%! TEX root = PM.tex % vim: tw=50 % 24/10/2023 10AM \begin{theorem*} If $f_n \stackrel{\mu}{\longrightarrow} 0$, then $\exists$ subsequence $(n_k)$ such that $f_{n_k} \to 0$ \glsref[al_ev]{$\mu$.a.e}. \end{theorem*} \begin{proof} Suppose $f_n \stackrel{\mu}{\longrightarrow} 0$. Choosing $\eps = \frac{1}{k}$, then $\mu\left(|f_n| > \frac{1}{l} \right) \to 0$ as $n \to \infty$. We can get $n_k$ such that $\mu\left(|f_{n_k}| > \frac{1}{k}\right) < \frac{1}{k^2}$. We can choose $n_{k + 1}$ in the same way (i.e. $\mu \left( |f_{n_{k + 1}}| > \frac{1}{k + 1} \right) < \frac{1}{(k + 1)^2}$, and such that $n_{k + 1} > n_k$). So we get a subsequence $(n_k)$ such that \[ \mu \left( |f_{n_k}| > \frac{1}{k} \right) < \frac{1}{k^2} \] Also, $\sum_j \frac{1}{k^2} < \infty$. So $\sum_k \mu \left( |f_{n_k}| > \frac{1}{k} \right) < \infty$. So by \nameref{borel_cantelli_1}, \[ \mu \left( \ub{|f_{n_k}| > \frac{1}{k} \text{ \gls{io}}}_{f_{n_k} \not\to 0} \right) = 0 \] \[ \implies \mu(f_{n_k} \not\to 0) = 0 \] i.e. $f_{n_k} \to 0$ \glsref[al_ev]{$\mu$.a.e}. \end{proof} \begin{remark*} Going to a subsequence is necessary, i.e. convergence in $\mu$ measure $\not\implies$ \glsref[al_ev]{$\mu$.a.e} convergence. For example, let $(A_n)_{n \in \NN}$ be \gls{indep_event} events such that $\PP(A_n) = \frac{1}{n}$. Let $X_n = \indicator{A_n}$. Then $X_n \stackrel{\PP}{\longrightarrow} 0$ (as $\PP(|X_n| > \eps) = \PP(A_n) = \frac{1}{n} \to 0 ~\forall \eps > 0$). But $\sum_n \PP(A_n) = \infty$, i.e. $\sum_n \PP(|X_n| > \eps) = \infty$ and $\{|X_n| > \eps\}$ are \gls{indep_rv}. So by \nameref{borel_cantelli_2} $\implies$ $\PP(|X_n| > \eps \text{\gls{io}}) = 1$, hence $X_n \not\to 1$ \gls{al_ev}. \end{remark*} \vspace{-1em} \begin{flashcard}[conv-dist-defn] \begin{definition*}[Convergence in distribution] \glsnoundefn{conv_dist}{converges in distribution}{N/A} \glsnoundefn{conv_dist}{converges in distribution}{converges in distribution} \cloze{ For $X, (X_n)_n$ a sequence of random variables, we say $X_n \convdist X$ (\emph{$X_n$ converges to $X$ in distribution}), if \[ F_{X_n}(t) \stackrel{n \to \infty}{\longrightarrow} F_X(t) ~\forall \text{$t$ such that $F_X(t)$ is continuous} \] (this definition does not require $(X_n)$ to be defined on the same probability space). } \end{definition*} \end{flashcard} \begin{remark*} If $X_n \stackrel{\PP}{\longrightarrow} X$, then $X_n \convdist X$ (proof is on \es{2}). \end{remark*} \begin{example*} $(X_n)_{n \in \NN}$ be IID $\Exp(1)$, i.e. $\PP(x_n > x) = e^{-x} ~\forall n \in \NN, x \ge 0$. Find a deterministic function $g : \NN \to \RR$ such that \gls{al_surely} \[ \limsup \frac{X_n}{g(n)} = 1 \] For $\alpha > 1$, $\PP(X_n > \alpha \log n) = e^{-\alpha \log n} = n^{-\alpha}$. So $\sum_n \PP(X_n > \alpha \log n) < \infty$ if and only if $\alpha > 1$. For any $\eps > 0$, $\sum_n \PP(X_n > (1 + \eps) \log n) < \infty$, so by \nameref{borel_cantelli_1}, \[ \PP \left( \frac{X_n}{\log n} > 1 + \eps \text{ \gls{io}} \right) = 0 \] Also, $\sum_n \PP(X_n > \log n) = \infty$, also $\{X_n > \log n\}$ are \gls{indep_event} events (as $(X_n)$ \gls{indep_rv}), so by \nameref{borel_cantelli_2}, \[ \PP \left( \frac{X_n}{\log n} > 1 \text{ \gls{io}} \right) = 1 \] So \[ \PP \left( \limsup \frac{X_n}{\log n} = 1 \right) = 1 .\] \end{example*} \begin{flashcard}[tail-event-defn] \begin{definition*}[Tail events] \glsnoundefn{tail_sig}{tail $\sigma$-algebra}{tail $\sigma$-algebras} \glssymboldefn{tail_sig}{tail $\sigma$-algebra}{$\tau$} \cloze{ Let $(X_n : n \in \NN)$ be a sequence of random variables. Define \[ \tau_n = \sigma\{X_{n + 1}, X_{n + 2}, \ldots\} \] and define $\tau = \bigcap_{n \in \NN} \tau_n$. Then $\tau$ is a \sigalg\ called the \emph{tail \sigalg} (contains events that depend only on the ``limiting behaviour'' of the sequence). } \end{definition*} \end{flashcard} \begin{flashcard}[kol-0-1-law] \begin{theorem*}[Kolmogorov $0-1$ law] \label{kol_0_1} \cloze{ Let $(X_n)_n$ be a sequence of \gls{indep_rv} random variables. Then for the \gls{tail_sig} $\tailsig$, if $A \in \tailsig$, then $\PP(A) = 0$ or $\PP(A) = 1$. If $Y : (\Omega, \tailsig) \to \RR$ is measurable, then $Y$ is \gls{al_surely} convergent. } \end{theorem*} \begin{proof} Let $\mathcal{F}_n = \sigma(X_1, \ldots, X_n)$. Then $\mathcal{F}_n$ is generated by the \pisys\ of sets \[ A = \{X_1 \le x_1, X_2 \le x_2, \ldots, X_n \le x_n\} \qquad x_1, \ldots, x_n \in \RR .\] and $\tailsig_n = \sigma(X_{n + 1}, \ldots)$ is generated by the \pisys\ of events \[ B = \{X_{n + 1} \le x_{n + 1}, \ldots, X_{n + k} \le x_{n + k}\}, \qquad x_{n + 1}, \ldots, \in \RR, k \in \NN \] By by independence $\PP(A \cap B) = \PP(A) \PP(B)$ for all such $A$ and $B$. Hence, by an earlier theorem, $\mathcal{F}_n$ and $\tailsig_n$ are independent. But $\tailsig \subseteq \tailsig_n$, so $\mathcal{F}_n$ and $\tailsig$ are \gls{indep_rv} for all $n$. Now, consider $\bigcup_n \mathcal{F}_n$ ($\mathcal{F}_1 \subseteq \mathcal{F}_2 \subseteq \cdots$) is a \pisys\ that generates $\mathcal{F}_\infty \defeq \sigma(X_n, n \in \NN)$. But $\bigcup_n \mathcal{F}_n$ and $\tailsig$ are \gls{indep_sigma}, so by the theorem again, $\mathcal{F}_\infty$ and $\tailsig$ are \gls{indep_sigma}. But $\tailsig \subseteq \mathcal{F}_\infty$, so for any $A \in \tailsig$, $A \in \mathcal{F}_\infty$, \[ \PP(A) = \PP(\ub{A}_{\in \tailsig} \cap \ub{A}_{\in \mathcal{F}_\infty}) = \PP(A) \PP(A) = \PP(A)^2 ,\] i.e. $\PP(A) = 0$ or $\PP(A) = 1$. Finally, if $Y$ is $\tailsig$ measurably, for any $y \in \RR$, $\{Y \le y\} \in \tailsig$, so $\PP(Y \le y) = 0$ or $1$. Then $c = \inf\{y : \PP(Y \le y) = 1\}$, then $\PP(Y = c) = 1$. $X_i$ IID, $\EE X < \infty$, then \[ \limsup \frac{\sum_{i = 1}^n X_i}{n}, \qquad \liminf \frac{\sum_{i = 1}^n X_i}{n} \] are constants \gls{al_surely}. \end{proof} \end{flashcard} \newpage \section{Integration} For $(E, \mathcal{E}, \mu)$ a measure space, $f : T \to \RR$ measurable and $f \ge 0$, we shall define the integral of $f$ and write it as \[ \mu(f) = \int_E f \dd \mu = \int_E f(x) \dd \mu(x) \] When $(E, \mathcal{E}, \mu) = (\RR, \mathcal{B}, \lambda)$, we write it as $\int f(x) \dd x$. For $(E, \mathcal{E}, \mu) = (\Omega, \mathcal{F}, \PP)$, and $X$ a random variable, we define its \emph{Expectation} \[ \EE(X) = \int_\Omega X \dd P = \int_\Omega = X(\omega) \dd P(\omega) \] \glsadjdefn{simple}{simple}{function} To start, we say $f : E \to \RR$ is \emph{simple} if $f = \sum_{k = 1}^m a_K \indicator{A_k}$, $0 \le a_k < \infty$, $A_k \in \mathcal{E} ~\forall k, m \in \NN$. Define for such simple $f$, \[ \mu(f) = \sum_{k = 1}^m a_k \mu(A_k) \] (where $0 \cdot \infty = 0$). This is well defined (see \es{2}). Check for $f, g$ simple, $\alpha, \beta \ge 0$, \refsteplabel[properties]{lec9_int_properties} \begin{enumerate}[(a)] \item $\mu(\alpha f + \beta g) = \alpha \mu(f) + \beta \mu(g)$ \item $f \le g \implies \mu(f) \le \mu(g)$ \item $f = 0$ \gls{al_ev} $\implies$ $\mu(f) = 0$. \end{enumerate}