%! TEX root = ABF.tex % vim: tw=80 ft=tex % 19/02/2026 12PM \subsubsection*{Noise and stability} Fix $p$, so that we don't have to write subscripts everywhere. \glssymboldefn{pN}% Let $x \in \{-1, 1\}$ and let $\rho \in [0, 1]$. We say that $y \sim N_\rho(x)$ if with probability $\rho$ $y_i = x_i$ and with probability $1 - \rho$ $y_i$ is $\mup$-random, and all $y_i$s independent. If $x \sim \mup$, then \[ \Pbb[y_i = 1] = pq + (1 - p)q = q .\] So $y \sim \mup$. Then we say that $x$ and $y$ are $\rho$-correlated, and write $x \sim_\rho y$. Note: $\sim_\rho$ depends on $p$, but we don't write $\sum_{\rho, p}$ (i.e. we omit the $p$ for convenience). \begin{fcdefn}[] \glssymboldefn{pnoise}% The \emph{$p$-biased noise operator} $T_\rho$ is given by the formula \[ T_\rho f(x) = \Ebb_{y \sim \pN_\rho(x)} f(y) .\] \end{fcdefn} \begin{fclemma}[] % Lemma 6.2 \label{lemma:6.2} For every $A \subset [n]$, $\pnoise_\rho \pphi_A = \rho^{|A|} \pphi_A$. \end{fclemma} Reminders: \begin{align*} \pphi(t) &= \frac{t - \mu}{\sigma} \\ \mu &= q - p = 2q - 1 = 1 - 2p \\ \sigma &= 2\sqrt{pq} \\ \pphi(1) &= \sqrt{\frac{p}{q}} \\ \pphi(-1) &= -\sqrt{\frac{q}{p}} \\ \pphi_A(x) &= \prod_{i \in A} \pphi(x_i) \end{align*} \begin{proof} \begin{align*} \pnoise_\rho \pphi_A(x) &= \Ebb_{y \sim \pnoise_\rho(x)} \prod_{i \in A} \pphi(y_i) \\ &=\prod_{i \in A} \Ebb_{y \sim \pnoise_\rho(x)} \pphi(y_i) \\ &= \prod_{i \in A} (\rho \pphi(x_i) + (1 - \rho) \Ebb \pphi) \\ &= \rho^{|A|} \pphi_A(x) \qedhere \end{align*} \end{proof} \begin{fccoro}[] % Corollary 6.3 \label{coro:6.3} $\pft{\pnoise_\rho f}(A) = \rho^{|A|} \pft{f}(A)$. \end{fccoro} \begin{proof} $\pnoise_\rho f = \sum_A \pft{f}(A) \pnoise_\rho \pphi_A = \sum_A \rho^{|A|} \pft{f}(A) \pphi_A$. \end{proof} \begin{fcdefn}[] \glssymboldefn{pstab}% Let $\rho \in [0, 1]$, $f : \{-1, 1\}^n \to \Rbb$. Then \[ \Stabinternal_\rho f = \pip \langle f, \pnoise_\rho f \rangle = \Ebb_{x \sim_\rho y} f(x) f(y) .\] \end{fcdefn} \begin{remark*} By \cref{coro:6.3}, $\pstab_\rho f = \sum_A \rho^{|A|} \pft{f}(A)^2$, as in unbiased case. \end{remark*} \subsubsection*{The Margulis--Russo formula} We shall be considering more than one value of $p$. \textbf{Convention:} Let $f : \{-1, 1\}^n \to \Rbb$. If we write $f^{(p)}$, then all definitions should be understood to be $p$-biased. \begin{fclemma}[] % Lemma 6.4 \label{lemma:6.4} Let $f : \Rbb^n \to \Rbb$ be a multilinear function, and write $f$ also for its restriction to $\{-1, 1\}^n$. Then $\Ebb_{x \sim \mup} f^{(p)}(x) = f(\ol{\mu}, \mu, \ldots, \mu)$. \end{fclemma} Note: whenever we write $f^{(p)}$, it is implicit that we are restricting to $\{-1, 1\}^n$, since $\mup$ is only defined there. We will give 3 proofs! \begin{proof}[Proof 1] Write $f = \sum_A \theta_A x_A$. Then $\Ebb_{x \sim \mup} x_A = \prod_{i \in A} \Ebb_{x \sim \mup} x_i = \prod_{i \in A} (q - p) = \mu^{|A|} = x_A(\mu, \mu, \ldots, \mu)$. Then by linearity, we're done. \end{proof} You might think the above proof is a bit odd, since it uses the $x_A$ even though we're in the $p$-biased case. I would agree with you! \begin{proof}[Proof 2] Write $f^{(p)} = \sum_A \pft{f}(A) \pphi_A$. Then \[ \Ebb_{x \sim \mup} \pphi_A = \prod_{i \in A} \Ebb_{x \sim \mup} \pphi_i = \begin{cases} 0 & A \neq \emptyset \\ 1 & A = \emptyset \end{cases} = \pphi_A(\mu, \mu, \ldots, \mu) \qedhere \] \end{proof} \begin{proof}[Proof 3] Induction on $n$. \[ \Ebb f^{(p)}(x) = \Ebb (q f^{(p)} (x_{n \subst 1}) + p f^{(p)} (x_{n \subst -1})) = \Ebb f^{(p)}(x_{n \subst \mu}) = f^{(p)}(\mu, \ldots, \mu) .\] Where the second equality used linearity in the last coordinate, and the last equality is by induction hypothesis. \end{proof} \begin{fcthm}[The Margulis--Russo formula] % Theorem 6.5 \label{thm:6.5} Let $f$ be as above. Then \[ \frac{\dd}{\dd \mu} \Ebb f^{(p)} = \frac{1}{\sigma} \sum_{i = 1}^n \pft{f^{(p)}}(i) .\] \end{fcthm} \begin{remark*} Later we will care instead about $\frac{\dd}{\dd p}$. But for now it is easier to work with $\frac{\dd}{\dd \mu}$. \end{remark*} \begin{proof} By \cref{lemma:6.4}, \begin{align*} \frac{\dd}{\dd \mu} \Ebb f^{(p)} &= \frac{\dd}{\dd \mu} f(\mu, \mu, \ldots, \mu) \\ &= \sum_{i = 1}^{n} \frac{\partial}{\partial x_i} f(\mu, \mu, \ldots, \mu) \\ &= \sum_{i = 1}^{n} \half (f(\bm{\mu}_{i \subst 1}) - f(\bm{\mu}_{i \subst -1})) &&\text{(by multilinearity)} \\ &= \frac{1}{\sigma} \sum_{i = 1}^{n} \pD_i f(\bf{\mu}) \\ &= \frac{1}{\sigma} \sum_{i = 1}^{n} \Ebb_{x \sim \mup} \pD_i f^{(p)}(x) \end{align*} But $\pD_i f = \sum_{A \ni i} \ft{f}(A) \pphi_{A \setminus \{i\}}$, so $\Ebb \pD_i f = \pft{f}(i)$. The result follows. \end{proof} \begin{fccoro}[] % Corollary 6.6 \label{coro:6.6} Let $f : \{-1, 1\}^n \to \{-1, 1\}$ be a monotone Boolean function. Then \[ \frac{\dd}{\dd p} \Pbb[f^{(p)}(x) = -1] = \frac{1}{\sigma^2} \totinf(f^{(p)}) .\] \end{fccoro} \begin{proof} $\Ebb f^{(p)} = 1 - 2\Pbb[f^{(p)}(x) = -1]$, so \[ \Pbb[f^{(p)}(x) = -1] = \frac{1 - \Ebb f^{(p)}}{2} .\] Therefore, \begin{align*} \frac{\dd}{\dd p} \Pbb[f^{(p)}(x) = -1] &= -\half \frac{\dd}{\dd \mu} \Ebb f^{(p)} \frac{\dd \mu}{\dd p} \\ &= \frac{1}{\sigma} \sum_{i = 1}^{n} \pft{f^{(p)}}(i) &&\text{(by \cref{thm:6.5})} \end{align*} From the proof of \cref{thm:6.5}, this is $\frac{1}{\sigma} \sum_{i = 1}^{n} \Ebb \pD_i f^{(p)}$. Since $f$ is monotone, $\pD_i f^{(p)}(x) \in \{0, \sigma\}$, so this equals \[ \frac{1}{\sigma^2} \sum_{i = 1}^{n} \Ebb(\pD_i f^{(p)})^2 = \frac{1}{\sigma^2} \sum_i \|\pD_i f^{(p)}\|_2^2 = \totinf(f^{(p)}) . \qedhere \] \end{proof} \begin{remark*} Suppose that $p_1 < p_2$ are such that $\Pbb[f^{(p_1)}(x) = -1] = \eps$, $\Pbb[f^{(p_2)} = -1] = 1 - \eps$. Then by MVT there exists $p \in (p_1, p_2)$ such that \[ \frac{\dd}{\dd p} \Pbb[f^{(p)}(x) = -1] = \frac{1 - 2\eps}{p_2 - p_1} .\] So by \nameref{thm:6.5}, there exists $p \in (p_1, p_2)$ such that $\totinf(f^{(p)}) = \sigma^2 \left( \frac{1 - 2\eps}{p_2 - p_1} \right)$. So if $p_2 - p_1$ isn't small, then there exists $p$ such that $\eps \le \Pbb[f^{(p)} = -1] \le 1 - \eps$ and $\totinf(f^{(p)})$ isn't too large. \end{remark*}