% vim: tw=50 % 08/02/2022 11AM ``$\mathrm{Bin} \left( n, \frac{\lambda}{n} \right)$ converges to $\mathrm{Po}(\lambda)$''. (note the ``converges'' is not very meaningful). \subsubsection*{Expectation} $(\Omega, \mathcal{F}, \PP)$ and $X$ a discrete random variable. For now: $X$ only takes non-negative values. ``$X \ge 0$'' \begin{definition*} The \emph{expectation of $X$} (or \emph{expected value} of \emph{mean}) is \[ \EE[X] = \sum_{x \in \mathrm{Im}(X)} x \PP(X = x) = \sum_{\omega \in \Omega} X(\omega) \PP(\{\omega\}) \] ``average of values taken by $X$, weighted by $p_X$''. \end{definition*} \setcounter{customexample}{0} \begin{example} $X$ uniform on $\{1, 2, \dots, 6\}$ (i.e. dice) then \[ \EE[X] = \frac{1}{6} \times 1 + \frac{1}{6} \times 2 + \cdots + \frac{1}{6} \times 6 = 3.5 \] \begin{note*} $\EE[X] \not\in \mathrm{Im}(X)$. \end{note*} \end{example} \begin{example} $X \sim \mathrm{Binomial}(n, p)$. \[ \EE[X] = \sum_{k = 0}^n k \PP(X = k) = \sum_{k = 0}^n k {n \choose k} p^k (1 - p)^{n - k} \] Trick: \begin{align*} k {n \choose k} &= \frac{k \times n!}{k! \times (n - k)!} \\ &= \frac{n!}{(k - 1)! (n - k)!} \\ &= \frac{n \times (n - 1)!}{(k - 1)! \times (n - k)!} \\ &= n {n - 1 \choose k - 1} \\ \EE[X] &= n \sum_{k = 1}^n {n - 1 \choose k - 1} p^k (1 - p)^{n - k} \\ &= np \sum_{k = 1}^n {n - 1 \choose k - 1} p^{k - 1} (1 - p)^{(n - 1) - (k - 1)} \\ &= np \sum_{l = 0}^{n - 1} {n - 1 \choose l} p^l (1 - p)^{(n - 1) - l} \\ &= np (p + (1 - p))^{n - 1} \\ &= np \end{align*} \end{example} \begin{note*} Would like to say: \[ \EE[\mathrm{Bin}(n, p)] = \EE[\mathrm{Bern}(p)] + \cdots + \EE[\mathrm{Bern}(p)] \] \end{note*} \begin{example} $X \sim \mathrm{Poisson}(\lambda)$. \begin{align*} \EE[X] &= \sum_{k \ge 0} k \PP(X = k) \\ &= \sum_{k \ge 0} k \cdot e^{-\lambda} \frac{\lambda^k}{k!} \\ &= \sum_{k \ge 1} e^{-\lambda} \frac{\lambda^k}{(k - 1)!} \\ &= \lambda \sum_{k \ge 0} e^{-\lambda} \frac{\lambda^{k - 1}}{(k - 1)!} \\ &= \lambda \sum_{l \ge 0} e^{-\lambda} \frac{\lambda^l}{l!} \\ &= \lambda \end{align*} \end{example} \begin{note*} Would like to say \[ \EE[\mathrm{Poisson}(\lambda)] \approx \EE\left[\mathrm{Bin} \left( n, \frac{\lambda}{n} \right) \right] = \lambda \] Can't say this: not true in general that \[ \PP(X_n = k) \approx \PP(\lambda = k) \implies \EE[X_n] \approx \EE[X] \] \end{note*} \begin{example} $X \sim \mathrm{Geometric}(p)$. Exercise. \end{example} \noindent Positive and negative: General $X$ (not necessarily $X \ge 0$). \[ \EE[X] = \sum_{x \in \mathrm{Im}(X)} x \PP(X = x) \] unless \[ \sum_{\substack{x > 0 \\x \in \mathrm{Im}(x)}} x\PP(X = x) = +\infty \] and \[ \sum_{\substack{x < 0 \\x \in \mathrm{Im}(x)}} x\PP(X = x) = -\infty \] then we say that $\EE[X]$ is not defined. \\ \ul{Summary}: \begin{itemize} \item both infinite: not defined \item first infinite, second not: $\EE[X] = +\infty$ \item second infinite, first not: $\EE[X] = -\infty$ \item neither infinite: $X$ is \emph{integrable}, i.e. \[ \sum_{x \in \mathrm{Im}(X)} |x| \PP(X = x) \] converges. \end{itemize} Note that some people say that in cases 2 and 3, the expectation is undefined. \begin{example} Most examples in the course are integrable \emph{except}: \begin{itemize} \item $\PP(X = n) = \frac{6}{\pi^2} \times \frac{1}{n^2}$ for $n \ge 1$. (Note $\sum \PP(X = n) = 1$). Then \[ \EE[X] = \sum \frac{6}{\pi^2} \times \frac{1}{n} = +\infty \] \item $\PP(X = n) = \frac{3}{\pi^2} \times \frac{1}{n^2}$ for $n \in \ZZ \setminus \{0\}$, then $\EE[X]$ is not defined. (``It's symmetric so $\EE[X] = 0$'' is considered wrong for us). \end{itemize} \end{example} \begin{example*} $\EE[\mathbbm{1}_A = \PP(A)$ Important! \end{example*} \subsubsection*{Properties of Expectation} ($X$ discrete). \begin{enumerate}[(1)] \item If $X \ge 0$, then $\EE[X] \ge 0$ with equality if and only $\PP(X = 0) = 1$. Why? \[ \EE[X] = \sum_{\substack{x \in \mathrm{Im}(X) \\x \neq 0}} x \PP(X = x) \] \item If $\lambda, c \in \RR$ then: \begin{enumerate}[(i)] \item $\EE[X + c] = \EE[X] + c$ \item $\EE[\lambda X] = \lambda \EE[X]$ \end{enumerate} \item \begin{enumerate}[(i)] \item $X$, $Y$ random variables (both integrable) on same probability space. \[ \EE[X + Y] = \EE[X] + \EE[Y] \] \item In fact $\lambda, \mu \in \RR$ \[ \EE[\lambda X + \mu Y] = \lambda \EE[X] + \mu \EE[Y] \] similarly: \[ \EE[\lambda_1 X_1 + \cdots + \lambda_n X_n] = \lambda_1 \EE[X_1] + \cdots + \lambda_n \EE[X_n] \] \end{enumerate} \end{enumerate} \begin{proof}[of (3)(ii)] \begin{align*} \EE[\lambda X + \mu Y] &= \sum_{\omega \in \Omega} (\lambda X(\omega) + \mu Y(\omega)) \PP(\{\omega\}) \\ &= \lambda \sum_{\omega \in \Omega} X(\omega) \PP(\{\omega\}) + \mu \sum_{\omega \in \Omega} Y(\omega) \PP(\{\omega\}) \\ &= \lambda \EE[X] + \mu \EE[Y] \end{align*} Note that this proof only works for countable $\Omega$, but there is also a proof for general $\Omega$. \end{proof} \begin{note*} Independence is \emph{not} required for linearity of expectation to hold. (This is the name for property (3)(ii)). \end{note*}