%! TEX root = PC.tex % vim: tw=80 ft=tex % 27/10/2025 09AM % Let $X_v$, for $v \in V(G)$, be defined by \[ X_v = d \cdot \indicator{v \in I} + |N(v) \cap I| .\] Note, for fixed $I$, \[ \sum_{v \in V(G)} X_v = \sum_{v \in V(G)} d \cdot \indicator{v \in I} + |N(v) \cap I| \le d|I| + d|I| = 2d|I| .\] So \[ \sum_v \Ebb X_v \le 2d \cdot \Ebb |I| ,\] and so enough to show \[ \Ebb X_v \ge c \log d ,\] for $\forall v \in V(G)$ and some $c > 0$. \begin{center} \includegraphics[width=0.6\linewidth]{images/6375d7146ab04f12.png} \end{center} Now fix $v \in V(G)$ and define \[ L = N(v) \cup \{v\}, \qquad F = V(G) - v - N(v) .\] We now bound \[ \Ebb X_v \ge \min_{J \subset F} \Ebb(X_v \mid I \cap F = J) .\] So now fix some $J \subset F$. We consider $I \cap L'$ where $L' = L \setminus N(J)$ and $N(J)$ means $\{y : \exists x \in J, x \sim y\}$. Let $l = |L'|$. Now observe that $I \cap L'$ is uniform over all independent sets in $G[L']$. So since $I \cap L'$ is either $\{v\}$ or any subset of $L' \setminus \{v\}$, we have \[ \Ebb(X_v \mid I \cap F = J) = \frac{d}{2^{l - 1} + 1} + \left( \frac{l - 1}{2} \right) \left( \frac{2^{l - 1}}{2^{l - 1} + 1} \right) .\] We are happy with this inequality, because if $l$ is large, then the second term is large, and if $l$ is small then the first term is large. Continuing the above calculation, we have \[ \Ebb(X_v \mid I \cap F = J) \ge \max \left\{ \frac{d}{2^{l - 1} + 1}, \frac{l - 1}{4} \right\} .\] Now solve \[ \frac{d}{2^{l - 1} + 1} = \frac{l - 1}{4} ,\] which is $4d = (l - 1)(2^{l - 1} + 1)$, so $l = c\log d$. So \[ \Ebb(X_v \mid I \cap F = J) \ge c \log d ,\] and thus $\Ebb X_v \ge c \log d$ as desired. \end{proof} \subsection{Recap of $R(3, k)$ bounds proved in this course} We have shown \[ c \frac{k^2}{(\log k)^2} \le R(3, k) \le (1 + o(1)) \frac{k^2}{ \log k} .\] Kim showed $R(3, k) = \Theta \left( \frac{k^2}{\log k} \right)$. \textbf{Triangle-free process:} Define the random graph process $(G_i)_i$. Given $G_i$ define \[ O_i = \{e \in [n]^{(2)} : G_i + e \not\supset K_3\} .\] Now sample $e_{i + 1} \sim O_i$ uniformly at random, and define $G_{i + 1} = G_i + e_{i + 1}$. This method is not what Kim used, but it was used by others later to improve the lower bound. \begin{center} \includegraphics[width=0.6\linewidth]{images/17e0ea7ee8e84b27.png} \end{center} Up until very recently, every proof of lower bound on $R(3, k)$ has used a variant of the triangle-free process. \begin{remark*} There is some similarity between the triangle-free process and the first proof of \cref{thm:shearermaxdegtrianglefree} that we saw: both are about a process where each step is uniformly random once we condition on a desired property. So we might expect a related theorem to hold for the triangle-free process, and indeed this is the case. \end{remark*} The Ramsey numbers $R(3, k)$ are still of interest, as there is now interest in determining the right constant. We now finish this section by using \cref{thm:shearermaxdegtrianglefree} to deduce the upper bound on $R(3, k)$: \begin{fcthm}[Shearer upper bound on $R(3, k)$] $R(3, k) \le (1 + o(1)) \frac{k^2}{\log k}$. \end{fcthm} \begin{proof} Let $n = (1 + \delta) \frac{k^2}{\log k}$. If there is a vertex of degree $\ge k$ in the blue graph then we have an independent set of size $k$ because the blue graph must be triangle-free, otherwise done. So the max degree of blue graph is $\le k$. So apply \nameref{thm:shearermaxdegtrianglefree} to get (for large $k$) that \begin{align*} \indnum(G) &\ge (1 + o(1)) \frac{n}{k} \log k \\ &= (1 + o(1))(1 + \delta) \frac{k^2}{(\log k) k} \log k \\ &\ge k \end{align*} (where $G$ is the blue graph). \end{proof} \newpage \section{Dependent Random Choice} \begin{fcdefn}[$(s, k)$-rich] \glsadjdefn{skrich}{$(s, k)$-rich}{set}% Let $G$ be a graph. Say that $R \subset V(G)$ is $(s, k)$-rich if for every $x_1, \ldots, x_s \in R$ we have \[ |N(x_1) \cap \cdots \cap N(x_s)| \ge k .\] \end{fcdefn} \begin{center} \includegraphics[width=0.6\linewidth]{images/250be422cde94c5c.png} \end{center} \begin{fcthm}[Dependent Random Choice] \label{lemma:drc} Assuming: - $G$ a graph on $n$ vertices with $e(G) = m$ - $s, t, r, k \ge 0$ - $\frac{(2m)^t}{n^{2t - 1}} - {n \choose s} \left( \frac{k}{n} \right)^t \ge r$ Then: there exists $R \subset V(G)$, $|R| \ge r$ which is \gls{skrich}. \end{fcthm} Note that $t$ doesn't appear in the conclusion. It is a parameter that we can tune appropriately in applications. \begin{proof} We sample $x_1, \ldots, x_t \in V(G)$ uniformly at random (with replacement). Then \begin{align*} \Ebb_{x_1, \ldots, x_t} |N(x_1) \cap \cdots \cap N(x_t)| &= \Ebb_{x_1, \ldots, x_t} \sum_{y \in V(G)} \indicator{x_1 \sim y \wedge x_2 \sim y \wedge \cdots \wedge x_t \sim y} \\ &= \sum_{y \in V(G)} [\Pbb_{x_1}(x_1 \sim y)]^t \\ &= \sum_{y \in V(G)} \left( \frac{d(y)}{n} \right)^t \\ &\ge n \left( \frac{2m}{n^2} \right)^t \\ &= \frac{(2m)^t}{n^{2t - 1}} \end{align*} Let $Y$ be the random variable that counts the number of $\{y_1, \ldots, y_s\} \in (N(x_1) \cap \cdots \cap N(x_t))^{(s)}$ with \[ |N(y_1) \cap \cdots \cap N(y_s)| < k .\] We want $Y$ to be not too big. This will be the case (on average) because $\Pbb(y_1, \ldots, y_s \in N(x_1) \cap \cdots \cap N(x_t))$ is an increasing function of $|N(y_1) \cap \cdots \cap N(y_s)|$ (so our probability distribution is biased away from fully selecting tuples that will increase $Y$). We make this precise as follows: % We show % \[ % \Ebb Y % \le {n \choose s} \left( \frac{k}{n} \right)^t % .\] % Note that if we sample $x_1, \ldots, x_t$ as above and delete a vertex from % each $y_1, \ldots, y_s$ that violates the rich condition, what remains has % size at least % \[ % |N(x_1) \cap \cdots \cap N(x_t)| % - Y(x_1, \ldots, x_t) % \] % and is \gls{skrich}. % \end{proof}