% vim: tw=50 % 11/11/2022 11AM \begin{flashcard}[SimDiagonCommute] \begin{theorem*}[Simultaneous diagonalisation] \begin{itemize} \item $\dim_F V < \infty$ \item $\alpha, \beta \in \mathcal{L}(V)$ diagonalisable \end{itemize} Then $\alpha, \beta$ are simultaneously diagonalisable ($\exists \mathcal{B}$ basis of $V$ in which both $[\alpha]_{\mathcal{B}}$, $[\beta]_{\mathcal{B}}$ are diagonal) if and only if \cloze{$\alpha$ and $\beta$ commute.} \end{theorem*} \end{flashcard} \begin{proof} \begin{enumerate} \item[$\Rightarrow$] Exists basis $\mathcal{B}$ of $V$ in which \[ [\alpha]_{\mathcal{B}} = D_1 \] \[ [\beta]_{\mathcal{B}} = D_2 \] $D_1, D_2$ both diagonal, then $D_1 D_2 = D_2 D_1$ so $\alpha\beta = \beta \alpha$. \item[$\Leftarrow$] Suppose $\alpha, \beta$ are both diagonalisable \emph{and} $\alpha\beta = \beta\alpha$. Let $\lambda_1, \dots, \lambda_k$ be the $k$ distinct eigenvalues of $\alpha$. We have shown: \[ \text{$\alpha$ diagonalisable} \iff V = \bigoplus_{i = 1}^k V_{\lambda_i} \] $V_{\lambda_i}$ is the eigenspace associated to $\lambda_i$. \myskip Claim: $V_{\lambda_I}$ stable by $\beta$: $\beta(V_{\lambda_i}) \le V_{\lambda_i}$. \\ Indeed, let $v \in V_{\lambda_i}$, then \[ \alpha \beta(v) = \eta \alpha(v) = \beta(\lambda_i v) = \lambda_i \beta(v) \] so $\beta(v) \in V_{\lambda_i}$. \begin{itemize} \item We use criterion for diagonalisability: $\beta$ is diagonalisable implies that there exists $p$ with distinct linear factors such that $p(\beta) = 0$. \end{itemize} Now $B |_{V_{\lambda_j}}$ endomorphism $(\beta : V_{\lambda_j} \to V_{\lambda_j})$ and \[ p(\beta |_{V_{\lambda_j}}) = 0 \] $p$ has distinct linear factors, so $\beta | V_{\lambda_j}$ is diagonalisable. So there exists $\mathcal{B}$ basis of $V_{\lambda_j}$ in which $\beta |_{V_{\lambda_j}}$ is diagonal. Then \[ V = \bigoplus_{i = 1}^k V_{\lambda_i} \] so $(\mathcal{B}_1, \dots, \mathcal{B}_k) = \mathcal{B}$ is a basis of $V$ in which both $\alpha$ and $\beta$ are in diagonal form. \end{enumerate} \end{proof} \begin{hiddenflashcard}[commuting-endomorphisms] Cool fact about commuting endomorphisms? (i.e. if $\alpha \beta = \beta \alpha$ then\ldots \\ \cloze{ Then $\alpha$ is stable on eigenspaces of $\beta$ and vice versa: if $v \in V_{\lambda}$, i.e. $\beta v = \lambda v$, then \[ \beta\alpha v = \alpha\beta v = \alpha \lambda v = \lambda \beta v \] so $\beta v \in V_{\lambda}$. \\ This is the main idea behind simultaneous diagonalisation, but is useful in other contexts too! } \end{hiddenflashcard} \subsubsection*{Minimal polynomial of an endormorphism} \begin{itemize} \item Remainder: (Groups, Rings and Modules). \\ Euclidean algorithm for polynomials: $a, b$ polynomials over $F$, $b \neq 0$, then there exist polynomials $q, r$ over $F$ with: \[ \deg r < \deg b \] \[ a = qb + r \] \end{itemize} \begin{definition*}[Minimal polynomial] $V$ vector space over $F$, $\dim_F V < \infty$. Let $\alpha \in \mathcal{L}(V)$. The \emph{minimal polynomial} $m_\alpha$ of $\alpha$ is the (unique up to a constant) non zero polynomial with \emph{smallest degree} such that \[ m_\alpha(\alpha) = 0 \] \end{definition*} \noindent Existence and uniqueness follow from the following observations: \begin{itemize} \item $\dim_F V = n$, $\alpha \in \mathcal{L}(V)$. We know: \[ \dim_F \mathcal{L}(V) = n^2 \] \[ \implies \id, \alpha, \dots, \alpha^{n^2} \text{ cannot be free} \] \[ \implies a_{n^2} \alpha^{n^2} + \cdots + a_1 \alpha + a_0 = 0 \] \[ \implies \exists p \in F[t] \mid p(\alpha) = 0, p \neq 0 \] That is, there does exist a polynomial $p$ that kills $\alpha$. \item Lemma: $\alpha \in \mathcal{L}(V)$, $p \in F[t]$. Then $p(\alpha) = 0$ if and only if $m_\alpha$ is a factor of $p$. \\ Proof: $p \in F[t]$, $p(\alpha) = 0$, $m_\alpha$ is minimum polynomial of $\alpha$. So $\deg m_\alpha \le \deg p$. By Euclidean division: \[ p = m_\alpha q + r \] \[ \deg r < \deg m_\alpha \] Then \[ p(\alpha) = 0 = m_\alpha q(\alpha) r(\alpha) \] so $r(\alpha) = 0$. If $r \neq 0$, then this would contradict the definition of $m_\alpha$. So $r \equiv 0$. So $p = m_\alpha q$, that is, $m_\alpha$ divides $p$. \item If $m_1, m_2$ are both polynomial with smallest degree which kill $\alpha$ then by the above lemma, $m_1 \mid m_2$, $m_2 \mid m_1$ so $m_2 = cm_1$, $c \in F$. That is, the minimal polynomial is unique up to a constant. \end{itemize} \begin{example*} $V = \RR^2$ \[ A = \begin{pmatrix} 1 & 0 \\ 0 & 1 \end{pmatrix} \quad B = \begin{pmatrix} 1 & 1 \\ 0 & 1 \end{pmatrix} \] \begin{itemize} \item Let $p(t) = (t - 1)^2$, then $p(A) = p(B) = 0$. So minimal polynomial is either $t - 1$ or $(t - 1)^2$. \item Check: $m_A = t - 1$, $m_B = (t - 1)^2$. So $A$ is diagonalisable but $B$ is not. \end{itemize} \end{example*} \subsection{Cayley Hamiton Theorem and multiplicity of eiganvalues} \begin{flashcard}[cayley-hamilton] \begin{theorem*}[Cayley Hamilton] Let $V$ be an $F$ vector space, $\dim_F V < \infty$. Let $\alpha \in \mathcal{L}(V)$ with characteristic polynomial $\chi_\alpha(t) = \det(\alpha - t\id)$. Then $\chi_\alpha(\alpha) = 0$. \end{theorem*} \fcscrap{ \begin{corollary*} $m_\alpha \mid \chi_\alpha$. \end{corollary*} } \begin{proof} $F = \CC$ (general proof is in the notes). \cloze{ $\alpha \in \mathcal{L}(V)$, $n = \dim_\CC V$. Exists basis $\mathcal{B} = \{v_1, \dots, v_n\}$ such that \[ [\alpha]_{\mathcal{B}} = \begin{pmatrix} a_1 & \cdots & * \\ \vdots & \ddots & \vdots \\ 0 & \cdots & a_n \end{pmatrix} \] (triangulable). Let $U_j = \langle v_1, \dots, v_j\rangle$. Then because of the triangular form, $(\alpha - a_j \id) U_j \le U_{j - 1}$. \[ \chi_\alpha(t) = \prod_{i = 1}^n (a_i - t) \] \[ (\alpha - a_1 \id) \cdots (\alpha - a_{n - 1} \id)(\alpha - a_n \id) V \] \[ \le (\alpha - a_1\id) \cdots (\alpha - a_{n - 1} \id) U_{n - 1} \] \[ \vdots \] \[ \le 0 \] So $\chi_\alpha(\alpha) = 0$. For the general case, see the notes. } \end{proof} \end{flashcard} \begin{definition*}[algebraic / geometric multiplicity] $\dim_F V < \infty$, $\alpha \in \mathcal{L}(V)$. Let $\lambda$ eigenvalue of $\alpha$. Then \[ \chi_\alpha(t) = (t - \lambda)^{a_\lambda} q(t) \] \[ q \in F[t], \quad q(\lambda) \neq 0 \] \begin{itemize} \item $a_\lambda$ is the algebraic multiplicity of $\lambda$. \item $g_\lambda$ is the geometric multiplicity of $\lambda$, and $g_\lambda = \dim \Ker(\alpha - \lambda \id)$. \end{itemize} \end{definition*} \begin{remark*} $\lambda$ eigenvalue $\iff$ $\alpha - \lambda \iff$ singular $\iff$ $\det(\alpha - \lambda \id) = \chi_\alpha(\lambda) = 0$ \end{remark*} \begin{lemma*} $\lambda$ eigenvalue of $\alpha \in \mathcal{L}(V)$, then $1 \le g_\lambda \le a_\alpha$. \end{lemma*} \begin{proof} \begin{itemize} \item $g_\lambda = \dim \Ker(\alpha - \lambda \id) \ge 1$ since $\lambda$ is an eigenvalue. \item Let us show that $g_\lambda \le a_\lambda$. Indeed, let $(v_1, \dots, v_{g_\lambda})$ basis of $V_\lambda = \Ker (\alpha - \lambda\id)$, and compute $\mathcal{B} = (v_1, \dots, v_{\lambda_g}, v_{g_\lambda + 1}, \dots, v_n)$ of $V$. Then \[ [\alpha]_{\mathcal{B}} = \begin{pmatrix} \lambda \id_{g_{\lambda}} & * \\ 0 & A_1 \end{pmatrix} \] \[ \implies \det[\alpha - t\id] = \det \begin{pmatrix} (\lambda - t)\id_{g_\lambda} & * \\ 0 & A_1 - t\id \end{pmatrix} = (\lambda - t)^{g_\lambda} \chi_{A_1}(t) \] \[ \implies g_\lambda \le a_\lambda \] \end{itemize} \end{proof} \begin{lemma*} $\lambda$ eigenvalue of $\alpha \in \mathcal{L}(V)$. Let: \[ c_\lambda \equiv \text{multiplicity of $\lambda$ as a root of $m_\alpha$ (minimal polynomial)} \] Then $1 \le c_\lambda \le a_\lambda.$ \end{lemma*} \begin{proof} \begin{itemize} \item Cayley-Hamilton implies $m_\alpha \mid \chi_\alpha$. So $c_\lambda \le a_\lambda$. \item $c_\lambda \ge 1$. Indeed, there exists $b \neq 0$ such that $\alpha(v) = \lambda v$ so then for all $p \in F[t]$, $p(\alpha)(v) = (p(\lambda))v$ ($\alpha^n(v) = \lambda^n v$) so $m(\alpha)(v) = (m(\lambda))v$ so $m(\lambda) = 0$ so $c_\lambda \ge 1$. \end{itemize} \end{proof} \begin{example*} \[ A = \begin{pmatrix} 1 & 0 & -2 \\ 0 & 1 & 1 \\ 0 & 0 & 2 \end{pmatrix} \] $m_A$? \begin{itemize} \item $\chi_A(t) = (t - 1)^2(t - 2)$ \item So $m_\alpha$ is either $(t - 1)^2(t - 2)$ or $(t - 1)(t - 2)$. Check $(A - I)(A - 2I) = 0$, so $m_\alpha = (t - 1)(t - 2)$ so $A$ is diagonalisable. \end{itemize} \end{example*}