% vim: tw=50 % 15/02/2023 11AM \noindent Recall FFF $I_p : T_p \Sigma \times T_p \Sigma \to \RR$, $I_p(v, w) = \langle v, w \rangle_{\RR^3}$. \begin{flashcard}[proof-FFF-self-adjoint] \begin{lemma} $Dn|_p : T_p \Sigma \to T_p\Sigma$ is self-adjoint with respect to $I_p$, i.e. \[ I_p(Dn|_p (v), w) = I_p(v, Dn|_p(w)) \] \end{lemma} \prompt{ \begin{proof} \cloze{ Note that $\{\sigma_u, \sigma_v\}$ is a basis of $T_p\Sigma$, so just check that $\langle Dn|_p(\sigma_u), \sigma_v \rangle = \langle \sigma_u, Dn|_p(\sigma_v) \rangle$. (Note that these expressions are the same as $\langle n_u, \sigma_v \rangle = \langle \sigma_u, n_v \rangle$). Do this by considering the derivative of $\langle n, \sigma_v \rangle$ and similarly for $u$. } \end{proof} } \end{flashcard} \begin{proof} Take $\sigma$ a parametrisation with $\sigma(0) = p$. Then $\{\sigma_u, \sigma_v\}$ is a basis of $T_p \Sigma$. To prove self-adjoint if suffices to check that \[ \langle n_u, \sigma_v \rangle = \langle \sigma_u, n_v \rangle \] (because $n_u = Dn|_p(\sigma_u)$ and $n_v = Dn|_p(\sigma_v)$). Note that \[ \langle n, \sigma_u \rangle = \langle n, \sigma_v \rangle = 0 \] Differentiate the first term with respect to $v$: \[ \langle n_v, \sigma_u \rangle + \langle n, \sigma_{uv} \rangle = 0 \] Similarly differentiate the second: \[ \langle n_u, \sigma_v \rangle + \langle n, \sigma_{uv} \rangle = 0 \] So $\langle n_u, \sigma_v \rangle = \langle n_v, \sigma_u \rangle$ as desired. \end{proof} \myskip (Recall $M = -\langle n_v, \sigma_u \rangle = -\langle n_u, \sigma_v \rangle$). \myskip Let's try to find the matrix of $Dn|_p$ in the basis $\{\sigma_u, \sigma_v\}$. \begin{align*} n_u &= Dn|_p(\sigma_u) = a_{11} \sigma_u + a_{21} \sigma_v \\ n_v &= Dn|_p(\sigma_v) = a_{12} \sigma_u + a_{22} \sigma_v \end{align*} Taking products of the above with $\sigma_u$ and $\sigma_v$ (check!): \[ - \ub{ \begin{pmatrix} L & M \\ M & N \end{pmatrix} }_{Q} = \ub{ \begin{pmatrix} E & F \\ F & G \end{pmatrix} }_{P} \ub{ \begin{pmatrix} a_{11} & a_{12} \\ a_{21} & a_{22} \end{pmatrix} }_{A} \] \[ Q = -PA = A^\top P \] If $V = D\sigma|_0(\hat{v})$, $w = D\sigma|_0(\hat{w})$, then \begin{align*} -\hat{v}^\top \begin{pmatrix} L & M \\ M & N \end{pmatrix} \hat{w} &= -v^\top \begin{pmatrix} E & F \\ F & G \end{pmatrix} \begin{pmatrix} a_{11} & a_{12} \\ a_{21} & a_{22} \end{pmatrix} \hat{w} \\ &= I_p(v, -Dn|_p(w)) \\ &= I_p(-Dn|_p(v), w) \end{align*} Then the second FF has an intrinsic form given by the \emph{symmetric} bilinear form \[ \II : T_p\Sigma \times T_p\Sigma \to \RR \] given by \[ \II(v, w) = I_p(-Dn_p(v), w) \] \begin{flashcard}[Gauss-curvature] \begin{definition*} Let $\Sigma \subset \RR^3$ smooth surface. The \emph{Gauss curvature} \cloze{$\kappa : \Sigma \to \RR$ of $\Sigma$ is the function \[ p \mapsto \det(Dn|_p) \]} \end{definition*} \end{flashcard} \begin{remark*} This is always \emph{well-defined} even if $\Sigma$ is not oriented. We can always choose a local expression for $n$. If we replace it by $-n$, the determinant will \emph{not} change (this is because $\det(A) = \det(-A)$ if $A$ is $2 \times 2$). \end{remark*} \noindent Computing $\kappa$: If we pick $\sigma$ using (\dag) we see that taking $\det$: \[ LN - M^2 = (EG - F^2) \kappa \] hence \begin{flashcard}[curvature-from-FFF-and-second-FF] \[ \kappa \fcscrap{= \det(A)} = \cloze{\frac{LN - M^2} {EG - F^2}} \] \end{flashcard} \begin{example*} Cylinder we saw last time. We computed second fundamental form is \[ \sigma(u, v) = (a\cos u, a\sin u, v) \] \[ \begin{pmatrix} -a & 0 \\ 0 & 0 \end{pmatrix} \implies \kappa(p) = 0 \] for all $p$. \begin{center} \includegraphics[width=0.3\linewidth] {images/1d76a19aad2511ed.png} \end{center} $n : \Sigma \to \text{Equator} \subset S^2$. So if $\gamma : (-\eps, \eps) \to \Sigma$ is a vertical curve, then \[ Dn|_p(\gamma'(0)) = (n \circ \gamma)'(0) \] \[ \implies \det(Dn|_p) = 0 \] \end{example*} \begin{flashcard}[flat-surface-in-R3] \begin{definition*} $\Sigma$ is said to be \emph{flat} if \cloze{$\kappa \equiv 0$ on $\Sigma$.} \end{definition*} \end{flashcard} \begin{example*} If $\Sigma$ is a graph of a smooth function $f$, then it is easy to check that \begin{align*} E &= 1 + f_u^2 \\ G &= 1 + f_v^2 \\ F &= f_u f_v \\ EG - F^2 &= 1 + f_u^2 + f_v^2 \\ L &= \frac{f_{uu}}{\sqrt{EG - F^2}} \\ M &= \frac{f_{uv}}{\sqrt{EG - F^2}} \\ N &= \frac{f_{vv}}{\sqrt{EG - F^2}} \\ \kappa &= \frac{f_{uu} f_{vv} - f_{uv}^2}{(1 + f_u^2 + f_v^2)^2} \end{align*} So depends on the \emph{Hessian} of $f$. \end{example*} \begin{flashcard}[elliptic-hyperbolic-parabolic] \begin{definition*} $\Sigma \subset \RR^3$, $p \in \Sigma$. We say that $p$ is:\prompt{ (classifying curvatures)} \begin{itemize} \item \cloze{\emph{elliptic} if $\kappa(p) > 0$} \item \cloze{\emph{hyperbolic} if $\kappa(p) < 0$} \item \cloze{\emph{parabolic} if $\kappa(p) = 0$} \end{itemize} \end{definition*} \end{flashcard} \subsubsection*{Graphs} \begin{enumerate}[(1)] \item $f(u, v) = \frac{u^2 + v^2}{2}$ at $(0, 0)$, $\kappa(0, 0, 0) = 1$. \item $f(u, v) = \frac{(u^2 - v^2)}{2}$, $\kappa(0, 0, 0) = -1$ \end{enumerate} \begin{flashcard}[planes-elliptic-hyperbolic] \prompt{Surface lying on sides of tangent plane} \begin{lemma} \begin{enumerate}[(a)] \item \cloze{In a sufficiently small neighbourhood of an elliptic point $p$, $\Sigma$ lies entirely on one side of the affine tangent plane $p + T_p\Sigma$.} \item \cloze{In a sufficiently small neighbourhood of a hyperbolic point, $\Sigma$ meets both sides of its affine tangent plane.} \fcscrap{ \begin{center} \includegraphics[width=0.6\linewidth] {images/dbb61a5ead2611ed.png} \end{center} } \end{enumerate} \end{lemma} \prompt{ \begin{proof} \cloze{ Recall that if $w = n\sigma_n + l\sigma_v$ then $\half \II_p(w, w)$ measures the signed distance from $\sigma(h, l)$ to $p + T_p\Sigma$ ($\sigma(0) = p$), and gives \[ \half(Lh^2 + 2Mhl + Nl^2) + O(h^3, l^3) \] If $p$ eliptic, then this quadratic form has eigenvalues of the same sign so is either always positive or always negative for small $h, l$, so $\Sigma$ locally lies on one side only. If $p$ hyperbolic, then the quadratic form has eigenvalues of opposite sign, so for any neighbourhood, the quadratic form is negative in places and positive in others, so $\Sigma$ always takes values on each side. } \end{proof} } \end{flashcard} \begin{proof} Take a parametrisation $\sigma$ near $p$. \[ \kappa = \frac{LN - M^2}{EG - F^2} \] and $EG - F^2 > 0$. Recall also that if \[ w = n\sigma_n + l\sigma_v \in T_p\Sigma ,\] then $\half \II_p(w, w)$ measured the signed distance from $\sigma(h, l)$ to $p + T_p\Sigma$ ($\sigma(0) = p$) measured via the inner product with positive normal: \[ \half (Lh^2 + 2M hl + Nl^2) + O(h^3, l^3) \] $p$ elliptic implies \[ \begin{pmatrix} L & M \\ M & N \end{pmatrix} \] has eigenvalues of the same sign, so positive or negative definite at $p$, so in a neighbourhood of $p$, $\kappa$ is signed distance only has one sign locally. \myskip If $p$ is hyperbolic, then $\II_p$ is indefinite, so $\Sigma$ meets both sides of $p + T_p\Sigma$. \end{proof} \begin{remark*} If $p$ is parabolic we can't conclude either. \end{remark*}