% vim: tw=50 % 16/02/2023 11AM \begin{lemma*} If $C$ is a LRT with size $\alpha$, and $C^*$ is another test of size $\le \alpha$, then $C$ is more powerful than $C^*$, i.e. \[ \beta = \PP_{H_1}(x \not\in C) \le \PP_{H_1}(x \not\in C^*) = \beta^* \] \end{lemma*} \begin{example*} $X_1, \ldots, X_n \iidsim \normaldist(\mu, \sigma_0^2)$, $\sigma_0^2$ is known. Want the best size $\alpha$ test for $H_0$: $mu = \mu_0$, $H_1$: $\mu = \mu_1$ for some fixed $\mu_1 > \mu_0$ \begin{align*} \Lambda_x(H_0; H_1) &= \frac{\cancel{(2\pi \sigma^2)^{-\half}} \exp \left( -\frac{1}{2\sigma_0^2} \sum (x_i - \mu_1)^2 \right)} {\cancel{(2\pi \sigma_0^2)^{\half}} \exp \left( -\frac{1}{2\sigma_0^2} \sum (x_i - \mu_0)^2 \right)} \\ &= \exp \left( \frac{(\mu_1 - \mu_0)}{\sigma_0^2} n\ol{x} + \frac{n(\mu_0^2 - \mu_1^2)}{2\sigma_0^2} \right) \end{align*} $\Lambda_x(H_0; H_1)$ is monotone increasing in $\ol{x} = \frac{1}{n} \sum x_i$. Hence, for any $k$, there is a $c$, such that $\Lambda_x(H_0; H_1) > k \iff \ol{x} > c$. Thus the LRT critical region is $\{x : \ol{x} > a\}$ for some constant $c$. By the same logic the LRT is of the form \[ C = \{\sqrt{n} \frac{(\ol{x} - \mu_0)}{\sigma_0} < c'\} \] want to pick $c'$ such that \[ \PP_{H_0} \left( \sqrt{n} \frac{(\ol{x} - \mu_0)}{\sigma_0} > c' \right) = \alpha \] But $\sqrt{n} \frac{(\ol{x} - \mu_0)}{\sigma_0} \sim \normaldist(0, 1)$ (this is a pivot). So if we take $c' = \Phi^{-1}(1 - \alpha) \cdot z_\alpha$. Finally the LRT has critical region \[ \left\{ x : \frac{\sqrt{n}(\ol{x} - \mu_0)}{\sigma_0} > z_\alpha \right\} \] By N-D lemma, this is the most powerful test of size $\alpha$. This is called a ``$z$-test'' because we use a $z$ statistic $z = \sqrt{n} \left( \frac{\ol{x} - \mu_0}{\sigma_0} \right)$ to define the critical region. \end{example*} \subsubsection*{P-value} For any test with critical region of the form $\{x : T(x) > k\}$ for some statistic $T$, a \emph{$p$-value} or observed significance level is \[ p = \PP_{H_0}(T(X) > T(X^*)) \] where $x^*$ is the observed date. In example we just saw, let $\mu_0 = 5$, $\mu_1 = 6$, $\sigma_0 = 1$, $\alpha = 0.05$, observe \[ x^* = (5.1, 5.5, 4.9, 5.3) \] $\ol{x^*} = 5.2$, $z^* = 0.4$. $z_\alpha = \Phi^{-1}(1 - \alpha) = 1.645$ \begin{center} \includegraphics[width=0.6\linewidth] {images/21f565e8adee11ed.png} \end{center} Here, we fail to reject $H_0$: $\mu_0 = 5$, $p = 0.35$. \begin{proposition*} Under $H_0$, $p$ has a $\Unif(0, 1)$ distribution. $p$ is a function of $x^*$; null distribution assumes $x^* \sim \PP_{H_0}$. \end{proposition*} \begin{proof} \[ \PP_{H_0}(p < u) = \PP_{H_0}(1 - F(T) < u) \] where $F$ is the cdf of $T$. \begin{align*} &= \PP_{H_0}(F(T) > 1 - u)) \\ &= \PP_{H_0} (T > F^{-1}(1 - u)) \\ &= 1 - F(F^{-1}(1 - u)) \\ &= u \end{align*} for all $u \in [0, 1]$. Thus $p \sim \Unif(0, 1)$. \end{proof} \subsubsection*{Composite Hypotheses} $X \sim f_X(\bullet \mid \theta)$, $\theta \in \Theta$. $H_0$: $\theta \in \Theta_0 \subset \Theta$, $H_1$: $\theta \in \Theta_1 \subset \Theta$. Type I, II error probabilities depend on the value of $\theta$ within $\Theta_0$ or $\Theta_1$ respectively. Let $C$ be some critical region. \begin{flashcard}[power-func-and-ump-test] \begin{definition*}[Power Function and UMP test] The \emph{power function} of the test $C$ is\cloze{ \[ W(\theta) = \PP_\theta(\ub{x \in C}_{\text{$H_0$ rejected}}) \]} The \emph{size} of $c$ is \cloze{the worst case Type I error probability: \[ \alpha= \sup_{\theta \in \Theta} W(\theta) \]} We say that $C$ is \emph{uniformly most powerful} (UMP) \cloze{of size $\alpha$ for $H_0$ against $H_1$ if:} \begin{enumerate}[(1)] \item \cloze{$\sup_{\theta \in \Theta_0} W(\theta) = \alpha$} \item \cloze{For any other test $C^*$ of size $\le \alpha$, with power function $W^*$, we have $W(\theta) \ge W^*(\theta)$ for all $\theta \in \Theta_1$.} \end{enumerate} \end{definition*} \end{flashcard} \begin{note*} UMP test need not exist. But, in some simple cases, the LRT is UMP. \end{note*} \begin{example*} $X_1, \ldots, X_n \iidsim \normaldist(\mu, \sigma_0^2)$: $\sigma_0^2$ known. We wish to test $H_0$: $\mu \le \mu_0$ against $H_1$: $\mu > \mu_0$ for some fixed $\mu_0$. We just studied the simple hypothesis: \[ H'_0: \mu = \mu_0, \qquad H'_1 : \mu = \mu_1 \qquad (\mu_1 > \mu_0) \] LRT was: \[ C = \left\{ x : z = \frac{\sqrt{n} (\ol{x} - \mu_0)}{\sigma_0} > z_\alpha \right\} \] Claim: the same test $C$ is UMP for $H_0$ against $H_1$. The power function for $C$ is \begin{align*} W(\mu) &= \PP_\mu(X \in C) = \PP_\mu \left( \frac{\sqrt{n} (\ol{x} - \mu_0)}{\sigma_0} > z_\alpha \right) \\ &= \PP_\mu \left( \frac{\sqrt{n}(\ol{x} - \mu)}{\sigma_0} > z_\alpha + \frac{\sqrt{n}(\ol{x} - \mu)}{\sigma_0} \right) \\ &= 1 - \Phi \left( z_\alpha + \frac{\sqrt{n} (\mu_0 - \mu)}{\sigma_0} \right) \end{align*} This is monotone increasing in $\mu \in (-\infty, \infty)$ \begin{center} \includegraphics[width=0.6\linewidth] {images/63609b52adf211ed.png} \end{center} The test has size $\alpha$ as $\sup_{\mu \in \Theta_0} W(\mu) = \alpha$. It remains to show that if $C^*$ is another test of size $\le \alpha$ with power function $W^*$ then $W(\mu_1) \ge W^*(\mu_1)$ for all $\mu_1 > \mu_0$. Main observation: critical region only depends on $\mu_0$. And $C$ is the LRT for the simple hypothesis $H_0'$: $\mu = \mu_0$, $H_1'$: $\mu = \mu_1$. Any test $C^*$ of $H_0$ vs $H_1$ of size $\le \alpha$ also has size $\le \alpha$ for $H_0'$ vs $H_1'$. \[ W^*(\mu_0) \le \sup_{\mu \in \Theta_0} W^*(\mu) \le \alpha \] Hence by N-D lemma, we know $W(\mu_1) \ge W(\mu_2)$. As we can apply this argument for any $\mu_1 > \mu_0$, we have \[ W^*(\mu_1) \le W(\mu_1) \qquad \forall \mu_1 > \mu_0 \] \end{example*}