% vim: tw=50 % 14/03/2023 12AM \begin{lemma} The primes in $\CC[X]$ (up to associates) are the polynomials $X - \lambda$, for some $\lambda \in \CC$. \end{lemma} \begin{proof} By the fundamental theorem of algebra, any non-constant polynomial in $\CC[X]$ has a root in $\CC$, so a factor $X - \lambda$. Hence, the irreducibles have degree 1. \end{proof} \begin{flashcard}[jordan-normal-form] \begin{theorem}[Jordan Normal form] \cloze{ Let $\alpha : V \to V$ be an endomorphism of a finite dimensional $\CC$-vector space. Let $V_\alpha$ be $V$ regarded as a $\CC[X]$-module with $X$ acting as $\alpha$. There is an isomorphism of $\CC[X]$-modules \[ V_\alpha \cong \CC[X] / ((X - \lambda_1)^{n_1}) \oplus \cdots \oplus \CC[X] / ((X - \lambda_t)^{n_t}) \] where $\lambda_1, \ldots, \lambda_t \in \CC$ (not necessarily distinct). In particular there exists a basis for $V$ such that $\alpha$ has matrix \[ \begin{pmatrix} J_{n_1}(\lambda_1) & 0 & \cdots & 0 \\ 0 & J_{n_2}(\lambda_2) & \cdots & 0 \\ \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & \cdots & 0 \end{pmatrix} \] where \[ J_n(\lambda) = \begin{pmatrix} \lambda & 0 & 0 & \cdots & 0 & 0 \\ 1 & \lambda & 0 & \cdots & 0 & 0 \\ 0 & 1 & \lambda & \cdots & 0 & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots & \vdots \\ 0 & 0 & 0 & \cdots & \lambda & 0 \\ 0 & 0 & 0 & \cdots & 1 & \lambda \end{pmatrix} \] } \end{theorem} \begin{proof} \cloze{ $\CC[X]$ is a Euclidean Domain and $V_\alpha$ is finitely generated by Lemma 16.7. We apply the \fcemph{primary decomposition}, noting that the primes in $\CC[X]$ are as in Lemma 16.10. $V$ finite dimensional implies we get no copies of $\CC[X]$. $J_n(\lambda)$ represents multiplying by $X$ on $\CC[X] / (X - \lambda)^n$ with respect to the basis $1, X - \lambda, (X - \lambda^2, \ldots, (X - \lambda)^{n - 1}$. } \end{proof} \end{flashcard} \begin{remark*} \begin{enumerate}[(i)] \item If $\alpha$ represented by matrix $A$, then the theorem says that $A$ is similar to a matrix in JNF. \item The Jordan blocks are uniquely determined up to reordering. Can be proved by considering the dimensions of the generalised eigenspace $\Ker((\alpha - \lambda \id)^m)$, $m = 1, 2, 3, \ldots$ (omitted). \item The minimal polynomial of $\alpha$ is $\prod_\lambda (X - \lambda)^{c_\lambda}$ where $c_\lambda$ is the size of the largest $\lambda$-block. \item The \emph{characteristic polynomial} of $\alpha$ is $\prod_\lambda (X - \lambda)^{a_\lambda}$ where $a_\lambda$ is the sum of the sizes of $\lambda$-blocks. \item The number of $\lambda$ blocks is the dimension of the $\lambda$-eigenspace. \end{enumerate} \end{remark*} \newpage \section{Modules over PID (non-examinable)} The \emph{structure theorem} holds for PID's. We illustrate some ideas which go into the proof. \begin{theorem} Let $R$ be a PID. Then any finitely generated torsion-free $R$-module is free. (For $R$ a Euclidean Domain, this is Corollary 16.5). \end{theorem} \begin{lemma} Let $R$ be a PID and $M$ an $R$-module. Let $r_1, r_2 \in R$ not both zero and let $d = \gcd(r_1, r_2)$. \begin{enumerate}[(i)] \item There exists $A \in \SL_2(R)$ such that \[ A \begin{pmatrix} r_1 \\ r_2 \end{pmatrix} = \begin{pmatrix} \alpha \\ 0 \end{pmatrix} \] \item If $x_1, x_2 \in M$ then there exists $x_1', x_2 \in M$ such that $R x_1 + R x_2 = R x_1' + x_2'$ and $r_1 x_1 + r_2 x_2 = d x_1' + 0 x_2'$. \end{enumerate} \end{lemma} \begin{proof} $R$ a PID implies $(r_1, r_2) = (d)$, hence there exists $\alpha, \beta \in R$ such that $\alpha r_1 + \beta r_2 = d$. Write $r_1 = s_1 d$, $r_2 = s_2 d$ for some $s_1, s_2 \in R$. Then $\alpha s_1 + \beta s_2 = 1$. \begin{enumerate}[(i)] \item \[ \ub{ \begin{pmatrix} \alpha & \beta \\ -s_2 & s_1 \end{pmatrix} }_{\det = \alpha s_1 + \beta s_2 = 1} \begin{pmatrix} r_1 \\ r_2 \end{pmatrix} = \begin{pmatrix} d \\ 0 \end{pmatrix} \] \item Let $x_1' = s_1 x_1 + s_2 x_2$, $x_2' = -\beta x_1 + \alpha x_2$. Then $R x_1' + R x_2' \subseteq R x_1 + R x_2$. To prove the reverse inclusion we solve for $x_1$ and $x_2$ in terms of $x_1'$ and $x_2'$. This is possible since \[ \det \begin{pmatrix} s_1 & s_2 \\ -\beta & \alpha \end{pmatrix} = \alpha s_1 + \beta s_2 = 1 \] Finally \begin{align*} r_1 x_1 + r_2 x_2 &= d(s_1 x_1 + s_2 x_2) \\ &= d x_1' \qedhere \end{align*} \end{enumerate} \end{proof} \begin{proof}[Proof of Theorem 17.1] Let $M = R x_1 + R x_n$ with $n$ as small as possible. If $x_1, \ldots, x_n$ are independent then $M$ is free, and we're done. Otherwise, $\exists r_1, \ldots, r_n \in R$ not all zero with $\sum_{r = 1}^n r_i x_i = 0$. WLOG $r_1 \neq 0$. Lemma 17.2 (ii) shows that after replacing $x_1$ and $x_2$ by suitable $x_1'$ and $x_2'$, we may assume $r_1 \neq 0$ and $r_2 = 0$. Repeating this process (changing $x_1$ and $x_3$, then $x_1$ and $x_4$ and so on), we may assume $r_1 \neq 0$, $r_2 = 0, \ldots, r_n = 0$. Now $r_1 x_1 = 0 \implies x_1 = 0$ (since $M$ is torsion free). Thus, $M = R x_2 + \cdots + R x_n$, which contradicts our choice of $n$. \end{proof}