% vim: tw=50 % 01/11/2022 10AM \vspace{-1\baselineskip} \begin{enumerate} \item[$d = 3$] We will prove that $\sum_n p_{00}(n) < \infty$, which will imply it is transient. Let's compute $p_{00}(2n)$. In order to be back at 0 after $2n$ steps it must make $i$ steps to the right, $i$ to the left, $j$ north and $j$ south, $k$ west and $k$ east for some $i, j, k \ge 0$ and $i + j + k = n$. So \begin{align*} p_{00}(2n) = \sum_{\substack{i, j, k \ge 0\\i + j + k = n}} {2n \choose i, i, j, j, k, k} \left( \frac{1}{6} \right)^{2n} \\ &= {2n \choose n} \left( \half \right)^{2n} \sum_{\substack{i, j, k \ge 0\\i + j + k = n}} {n \choose i, j, k}^2 \left( \frac{1}{3} \right)^{2n} \\ \sum_{\substack{i, j, k \ge 0\\i + j + k=n}} {n \choose i, j, k} \left( \frac{1}{3} \right)^n &= 1 \end{align*} Let $n = 3m$. We claim that \[ {n \choose i, j, k} \le {n \choose m, m, m} \] To prove this, suppose the maximum over $i, j, k$ is attained at some $i, j, k$ with $i > j + 1$. Then \[ {n \choose i, j, k} < {n \choose i - 1, j + 1, k} \] because $i! j! > (i - 1)!(j + 1)!$. So for $n = 3m$, \[ p_{00}(2n) \le {2n \choose n} \left( \half \right)^{2n} \left( \frac{1}{3} \right)^n {n \choose m, m, m} = 1 \] Stirling's formula gives \[ p_{00}(2n) \le \frac{A}{n^{3/2}} \] for some $A > 0$. So $\sum_m p_{00}(6m) < \infty$. But also $p_{00}(6m) \ge p_{00}(6m - 2) \left( \frac{1}{6} \right)^2$ and $p_{00}(6m) \ge p_{00}(6m - 4) \left( \frac{1}{6} \right)^4$. So $\sum_n p_{00}(2n) < \infty$ so it's transient. \hfill\qedsymbol \end{enumerate} \subsubsection*{Invariant distribution} \begin{flashcard}[probability-distribution] \begin{definition*} $I$ discrete (countable / finite) set. $\lambda = (\lambda_i : i \in I)$ is a probability distribution if \cloze{$\lambda_i \ge 0$ for all $i$ and $\sum_{i \in I} \lambda_i = 1$.} \end{definition*} \end{flashcard} \begin{example*} \begin{center} \includegraphics[width=0.6\linewidth] {images/dee919c859d011ed.png} \end{center} Then $p_{11}(n) \to \half$. So the Markov chain will converge to $(\half, \half)$. \end{example*} \noindent Want to find a distribution $\pi$ such that if $X_0 \sim \pi$, then $X_n \sim \pi$ for all $n$. \begin{align*} \PP(X_1 = j) &= \sum_i \PP(X_0 = i, X_1 = y) \\ &= \sum_i \PP(X_1 = j \mid X_0 = i) \PP(X_0 = 1) \\ &= \sum_i P(i, j) \pi(i) \end{align*} so $\pi(j) = \sum_i \pi(i) P(i, j)$ for all $j$. $\pi = \pi P$. ($\pi$ as a row vector). \begin{flashcard}[InvDist] \begin{definition*} A probability distribution $\pi$ is called \emph{invariant} / \emph{stationary} / \emph{equilibrium} if \cloze{$\pi = \pi P$.} \end{definition*} \end{flashcard} \begin{theorem*} Let $\pi$ be invariant and $X_0 \sim \pi$. Then $X_n \sim \pi$ for all $n$. \end{theorem*} \begin{proof} $n = 0$ is done. \begin{align*} \PP(X_{n + 1} = j) &= \sum_i \PP(X_{n + 1} = j, X_n = i) \\ &= \sum_i \PP(X_{n + 1} = j \mid X_n = i) \PP(X_n = i) \\ &= \sum_i P(i, j) \pi(i) \\ &= \pi(j) &&(\pi = \pi P) \qedhere \end{align*} \end{proof} \begin{flashcard}[FiniteLimitIsInvDist] \begin{theorem*} Let $I$ be a \cloze{\emph{finite} set} and $\exists i \in I$ such that $p_{ij}(n) \to \pi(j)$ as $n \to \infty$. Then \cloze{$\pi = (\pi_i : i \in I)$ is an invariant distribution.} \end{theorem*} \end{flashcard} \begin{proof} \begin{align*} \sum_{j \in I} \pi_j &= \sum_{j \in I} \lim_{n \to \infty} p_{ij}(n) \\ &= \lim_{n \to \infty} \sum_{j \in I} p_{ij}(n) \\ &= 1 \end{align*} (we changed the order of sum and limit because the sum is finite). So $\pi$ is a distribution. \begin{align*} \pi_j &= \lim_{n \to \infty} p_{ij}(n) \\ &= \lim_{n \to \infty} \sum_{k \in I} p_{ik}(n - 1) P(k, j) \\ &= \sum_{k \in I} \lim_{n \to \infty} p_{ik}(n - 1) P(k, j) \\ &= \sum_{k \in I} \pi_k P(k, j) \end{align*} i.e. $\pi = \pi P$. \end{proof} \begin{remark*} $I$ finite is essential: Consider a simple random walk on $\ZZ$. Then $p_{00}(2n) \sim \frac{A}{\sqrt{n}} \to 0$. Similarly $p_{0x}(n) \to \infty$ as $n \to \infty$. \end{remark*} \begin{remark*} $P$ is a stochastic matrix, so 1 is always an eigenvalue. If $P$ is irreducible, on a finite state space, then the Perron-Frobenius theorem from linear algebra ensures the existence of the invariant distribution. \end{remark*} \begin{definition*} $k \in I$, $T_k = \inf\{n \ge 1 : X_n = k\}$. First return time to $k$. $i \in I$ \[ \nu_k(i) = \EE_k \left[ \sum_{l = 0}^{T_k - 1} \mathbbm{1}(X_l = i) \right] \] \begin{center} \includegraphics[width=0.6\linewidth] {images/e0dbbb4859d311ed.png} \end{center} So $\nu_k(i)$ is the expected number of visits to $i$ during an excursion from $k$. So $\nu_k$ is a measure on $I$. \end{definition*}