diff --git "a/data_all_eng_slimpj/shuffled/split2/finalzzgrxt" "b/data_all_eng_slimpj/shuffled/split2/finalzzgrxt" new file mode 100644--- /dev/null +++ "b/data_all_eng_slimpj/shuffled/split2/finalzzgrxt" @@ -0,0 +1,5 @@ +{"text":"\\section{Additional Comments}\n\n\\begin{lemma} \n\tFor any $K \\in \\mathbb{N}, p \\in [0, 0.5],$ we have that \n\t\\begin{align} \n\t& I(X \\; ; \\; X + \\sum_{i=1}^K Y_i) \\\\\n\t& = H(p) - \\sum_{i=0}^{K-1} p^{K-i} (1-p)^{i+1} \\binom{K+1}{i+1} H \\left(\\frac{i+1}{K+1}\\right).\n\t\\end{align}\n\\end{lemma}\n\n\\begin{lemma}\n\tFor any $K \\in \\mathbb{N},$ $p = c(1 - e^{-\\frac{1}{K+1}}),$ and $L \\in \\mathbb{N}$ such that $L \\leq K$ and $c \\in \\mathbb{R}, \\; \\; 0 \\leq c \\leq 1,$ we have that \n\t\\begin{align} \n\t& I(X \\; ; \\; X + \\sum_{i=1}^K Y_i)\n\t\\\\ & \\leq c \\left( \\frac{1}{K+1} \\right) \\log\\left(c \\left(K+2 \\right) \\right) \n\t\\\\ & + \\left( \\frac{K+2-c}{K+2} \\right) \\log\\left(\\frac{K+1}{K+1-c} \\right)\n\t\\\\ & - c\\left( \\frac{K-L+2}{K+2} \\right)^{L} \\left( \\frac{1}{K+1} \\right) \\log \\left( \\frac{K+1}{L} \\right) \n\t\\\\ & \\times \\left(1 - \\frac{c}{K+1} \\right)^{K} e^c \\left(1 - \\frac{1}{\\sqrt{2 \\pi L}} \\left(\\frac{ce}{L}\\right)^L \\right) \n\t\\end{align}\n\\end{lemma}\n\nSubstituting $L = \\ln(K)$ into the bound above, we have that \n\nThus, the perfomance of the uniform scheme is upper bounded by \n\\begin{align}\n& c \\left( \\frac{1}{K+1} \\right) \\log\\left(c \\left(K+2 \\right) \\right) \n\\\\ & + \\left( \\frac{K+2-c}{K+2} \\right) \\log\\left(\\frac{K+1}{K+1-c} \\right)\n\\\\ & - c\\left( \\frac{K-\\ln(K)+2}{K+2} \\right)^{\\ln(K)} \\left( \\frac{1}{K+1} \\right) \\log \\left( \\frac{K+1}{\\ln(K)} \\right) \n\\\\ & \\left(1 - \\frac{c}{K+1} \\right)^{K} e^c \\left(1 - \\frac{1}{\\sqrt{2 \\pi \\ln(K)}} \\left(\\frac{ce}{\\ln(K)}\\right)^{\\ln(K)} \\right) \n\\\\ & \\sim \\frac{c\\ln(\\ln(K))}{\\ln(2)K}\n\\end{align}\n\nWe also have the lower bound at $p = c (1 - e^{-\\frac{1}{K+1}})$ is given by \n\\begin{align}\n& -\\left(1 - c(1 - e^{-\\frac{1}{K+1}}) \\right)^{K+1} \\log(1 - c(1 - e^{-\\frac{1}{K+1}}))\n\\\\ & - \\left(1 - c(1 - e^{-\\frac{1}{K+1}}) \\right)^{K+1} \\log(1 - (1 - c(1 - e^{-\\frac{1}{K+1}}))^K)\n\\\\ & + \\left(1 - c(1 - e^{-\\frac{1}{K+1}}) \\right)^{K+1} \\log(1 - (1 - c(1 - e^{-\\frac{1}{K+1}}))^{K+1})\n\\\\ & + \\left(1 - c(1 - e^{-\\frac{1}{K+1}}) \\right) \\log(1 - \\left(1 - c \\left(1 - e^{-\\frac{1}{K+1}} \\right) \\right)^{K})\n\\\\ & -\\log(1 - (1 - c(1 - e^{-\\frac{1}{K+1}}))^{K+1})\n\\\\ & \\sim \\frac{c e^{-c}}{K \\ln(2)}\n\\\\ & - \\frac{(c-3)c e^{-c}}{2(e^c-1)K \\ln(2)} + \\frac{(c-1)c e^{-c} \\ln(1 - e^{-c})}{2K \\ln(2)} \n\\\\ & + \\frac{(c-1)c e^{-c}}{2(e^c-1)K \\ln(2)} - \\frac{(c-1)c e^{-c} \\ln(1-e^{-c})}{2K \\ln(2)} \n\\\\ & - \\frac{c \\ln(1 - e^{-c})}{K \\ln(2)} + \\frac{(c-3)c}{2(e^c-1)K \\ln(2)}\n\\\\ & - \\frac{(c-1)c}{2(e^c-1)K \\ln(2)}\n\\\\ & = \\Theta \\left( \\frac{1}{K} \\right)\n\\end{align}\n\nThis proves that the ratio between the performance of the uniform scheme and the lower bound for $p = c(1 - e^{-\\frac{1}{K+1}})$ is upper bounded by a function that is $O(\\ln(\\ln(K)))$ for $0 \\leq c \\leq 1.$\n\n\\section{Appendix}\n\n\\begin{lemma} \n\tFor any $K \\in \\mathbb{N}, p \\in [0, 0.5],$ we have that \n\t\\begin{align*} \n\t& I(X \\; ; \\; X + \\sum_{i=1}^K Z_i)\n\t\\\\ & = H(p) - \\sum_{i = 1}^{K} p^{K+1-i} (1-p)^{i} \\binom{K+1}{i} H\\left(\\frac{i}{K+1}\\right)\n\t\\end{align*}\n\t\\label{lem:uniform_schem_formula}\n\\end{lemma}\n\n\\begin{lemma} \\label{lem:binary_scheme_performance}\n For any $K \\in \\mathbb{N}, p \\in [0, 0.5],$ we have that \n\t\\begin{align*} \n\t& I(X \\; ; \\; X + \\sum_{i=1}^{K} 2^{i-1} Z_i) \\\\ \n\t& = H(p) - \\sum_{i=1}^K (p^i (1-p) + p (1-p)^i) H \\left(\\frac{1}{1 + \\frac{p (1-p)^i}{p^i (1-p)}}\\right).\n\t\\end{align*}\n\\end{lemma}\n\n\\begin{lemma} \\label{lem:lower_bound}\n\tFor any $K\\in \\mathbb{N},$ $p \\in [0, 0.5],$ and $\\alpha_i \\in \\mathbb{N}, \\; i \\in [K],$ we have that \n\t\\begin{align*} \n\tI(X \\; ; \\; X + \\sum_{i=1}^{K} \\alpha_i Z_i) \\geq H(p) - 1 + p^{K+1} + (1-p)^{K+1}.\n\t\\end{align*}\n\\end{lemma}\n\n\\begin{theorem} \\label{theorem:main_recursion}\n\tLet\n \\begin{align*} \n T_{M, N} = X + \\sum_{i=1}^M Z_i + \\sum_{j=1}^{N} 2^{j-1} Z_{M+j}\n \\end{align*}\n and\n \\begin{align*} \n S_{M,N} = 1 + M + \\sum_{i=1}^{N} 2^{i-1}.\n \\end{align*}\n For $M,N \\in \\mathbb{N}$ such that $M>0$ and $N>1,$ we have that \n\t\\begin{align*} \n\t\t& H(X \\; | \\; T_{M,N}) \n\t\t\\\\ & = H(X \\; | \\; T_{M,N-1}) \n\t\t\\\\ & - (1-p) \\!\\!\\!\\! \\sum_{t=-S_{M,N-1}}^{-S_{M,N-1}+2M} \\!\\!\\!\\!\\!\\!\\!\\!\\!\\! \\Pr(T_{M,N-1} = t) H(X \\; | \\; T_{M,N-1} = t)\n\t\t\\\\ & - \tp \\!\\!\\!\\! \\sum_{t=S_{M,N-1}-2M}^{S_{M,N-1}} \\!\\!\\!\\!\\!\\!\\!\\!\\!\\! \\Pr(T_{M,N-1} = t) H(X \\; | \\; T_{M,N-1} = t)\n\t\t\\\\ & + \\sum_{t=-M}^{M} \\Pr(T_{M,N} = t) H(X \\; | \\; T_{M,N} = t). \n\t\\end{align*}\n\\end{theorem}\n\n\\begin{algorithm} \\label{alg:uniform_binary_polytime}\n\t\\SetAlgoLined\n\t\\KwData{$N,$ $M,$ $p$}\n\t\\KwResult{$I_{M,N}$}\n\t$n_1 \\leftarrow \\min(N, \\ceil{\\log(2M+2)})$\\;\n\t$D_{n_1} \\leftarrow$ array containing PMF for $\\sum_{i=1}^M Z_i + \\sum_{j=1}^{n_1} 2^{j-1} Z_{M+j}$ starting at the lowest support value and ending at highest support value\\;\n\t$D_{\\text{low}} \\leftarrow$ array containing first $2M+1$ entries of $D_{n_1}$\\;\n\t$D_{\\text{high}} \\leftarrow$ array containing last $2M+1$ entries of $D_{n_1}$\\;\n\t$H_{n_1} \\leftarrow $ $\\sum_{t=-S_{M,n_1}}^{S_{M,n_1}} \\Pr(T_{M, n_1} = t) H(X \\; | \\; T_{M, n_1} = t)$ computed using $D_{n_1}$\\; \n\t$H_{\\text{low}} \\leftarrow $ \n\t$\\sum_{t=-S_{M,n_1}}^{-S_{M,n_1}+2M} \\Pr(T_{M, n_1} = t) H(X \\; | \\; T_{M, n_1} = t)$ computed using $D_{\\text{low}}$\\; \n\t$H_{\\text{high}} \\leftarrow $ $\\sum_{t=S_{M,n_1}-2K}^{S_{M,n_1}} \\Pr(T_{M, n_1} = t) H(X \\; | \\; T_{M, n_1} = t)$ computed using $D_{\\text{high}}$\\; \t\n\t$n \\leftarrow n_1 + 1$\\;\n\t\\While{$n \\leq N$}{\n\t\n\t\t$H_{n} \\leftarrow H_{n-1}$\\;\n\t\t$H_{n} \\leftarrow H_{n} - (1-p) H_{\\text{low}} - p H_{\\text{high}}$\\;\n\t\t$D_{\\text{low ext}} \\leftarrow $ $D_{\\text{low}}$ appended with $[0, \\; 0]$\\;\n\t\t$D_{\\text{high ext}} \\leftarrow $ $D_{\\text{high}}$ prepended with $[0,\\; 0]$\\;\n\t\t$H_{\\text{center}} \\leftarrow$ \t$\\sum_{t=-M}^{M} \\Pr(T_{M, n} = t) H(X \\; | \\; T_{M, n} = t)$ computed using $p D_{\\text{high ext}} + (1-p) D_{\\text{low ext}}$\\;\n\t\t$H_{n} \\leftarrow H_{n} + H_{\\text{center}}$\\;\n\t\t$H_{\\text{low}} \\leftarrow p H_{\\text{low}}$\\; \n\t\t$H_{\\text{high}} \\leftarrow (1-p) H_{\\text{high}}$\\; \n\t\t$D_{\\text{low}} \\leftarrow p D_{\\text{low}}$\\;\n\t\t$D_{\\text{high}} \\leftarrow (1-p) D_{\\text{high}}$\\;\n\t\t$n \\leftarrow n + 1$\\;\n\t}\n\t$I_{M,N} \\leftarrow H(p) - H_N$\n\t\n\t\n\t\\caption{Computation of $I(X \\; ; \\; X + \\sum_{i=1}^M Z_i + \\sum_{j=1}^{N} 2^{j-1} Z_{M+j})$}\n\\end{algorithm}\n\n\\subsection{Proof of Lemma \\ref{lem:uniform_schem_formula}}\n We have that \n \\begin{align*} \n\t& I\\left(X \\; ; \\; X + \\sum_{i=1}^K Z_i\\right) \n\t\\\\ & = H(p) - H\\left(X \\; | \\; X + \\sum_{i=1}^K Z_i\\right) \n\t\\\\ & = H(p) \n\t\\\\ & - \\sum_{t = 0}^{K+1} \\Pr\\left(X + \\sum_{i=1}^K Z_i = t\\right) H\\left(X \\; | \\; X + \\sum_{i=1}^K Z_i = t\\right).\n\t\\end{align*}\n Thus,\n \\[\\Pr\\left(X + \\sum_{i=1}^K Z_i = t\\right) = p^{K+1-t} (1-p)^{t} \\binom{K+1}{t}.\\] Due to Bayes rule, we have \n\t\\begin{align*}\n\t \\Pr\\left(X = 1 \\; | \\; X + \\sum_{i=1}^K Z_i = t\\right) = \\frac{t}{K+1},\n\t\\end{align*}\n and thus, \n\t\\begin{align}\n\t & H\\left(X \\; | \\; X + \\sum_{i=1}^K Z_i = t\\right)\n\t = H\\left(\\frac{t}{K+1}\\right).\n\t\\end{align}\n Thus,\n \\begin{align*} \n\t& I(X \\; ; \\; X + \\sum_{i=1}^K Z_i) \n\t\\\\ & = H(p) \n\t\\\\ & - \\sum_{t = 0}^{K+1} \\Pr\\left(X + \\sum_{i=1}^K Z_i = t\\right) H\\left(X \\; | \\; X + \\sum_{i=1}^K Z_i = t\\right)\n\t\\\\ & = H(p) - \\sum_{t = 0}^{K+1} p^{K+1-t} (1-p)^{t} \\binom{K+1}{t} H\\left(\\frac{t}{K+1}\\right)\n \\\\ & = H(p) - \\sum_{t = 1}^{K} p^{K+1-t} (1-p)^{t} \\binom{K+1}{t} H\\left(\\frac{t}{K+1}\\right)\n\t\\end{align*}\n\n \n\n\t\n\n\\subsection{Proof of Lemma \\ref{lem:optimality_of_uniform}}\n\tLet $K \\in \\mathbb{N}$ and\n\t\\begin{align*}\n\t&S_K = 1 + \\sum_{i=1}^K \\alpha_i, \\quad \\quad T(K) = X + \\sum_{i=1}^K \\alpha_i Z_i.\n\t\\end{align*}\n\tWe have that \n\t\\begin{align*} \n\tI(X \\; ; \\; X + \\sum_{i=1}^K \\alpha_i Z_i ) = H(p) - H(X \\; | \\; X + \\sum_{i=1}^K \\alpha_i Z_i)\n\t\\end{align*} \n\tand\n\t\\begin{align*} \n\t&H(X \\; | \\; X + \\sum_{i=1}^K \\alpha_i Z_i) \\\\\n\t& = \\sum_{t=-S_K}^{S_K} \\Pr(T(K) = t) H(X \\; | \\; T(K) = t)\n\t\\end{align*} \n\tObserve that as $p \\to 0,$ the terms of $ H(X \\; | \\; X + \\sum_{i=1}^K \\alpha_i Z_i)$ with the lowest powers of $p$ dominate.\n\tThe only term containing no power of $p$ is \n\t\\begin{align*} & \\Pr(T(K) = S_K) H(X \\; | \\; T(K) = S_K) \\\\ & = (1-p)^{K+1} H(X \\; | \\; T(K) = S_K) = 0. \n\t\\end{align*}\n\tNext, consider the term $\\Pr(T(K) = S_K-2) H(X \\; | \\; T(K) = S_K-2).$ Observe that \n\t\\begin{align*} & \\Pr(T(K) = S_K-2) \\\\& = (1-p) \\Pr(T(K)-X = S_K-3) \\\\ & + p \\Pr(T(K)-X = S_K-1) \n\t\\end{align*}\n\tand\n\t\\begin{align*} \n\t&\\Pr(T(K)-X = S_K-3) = p (1-p)^{K-1}|\\{i : \\alpha_i = 1\\}|, \\\\\n\t& \\Pr(T(K)-X = S_K-1) = (1-p)^K. \n\t\\end{align*}\n\tWe than have that \n\t\\begin{align*} \n\t&\\Pr(T(K) = S_K-2) H(X \\; | \\; T(K) = S_K-2) \\\\\n\t& = p (1-p)^{K}(1 + |\\{i : \\alpha_i = 1\\}|) H(X \\; | \\; T(K) = S_K-2) \\\\\n\t& = p (1-p)^{K}(\\{i : \\alpha_i = 1\\}| \\\\\n\t& \\times \\log(\\frac{p (1-p)^{K}(1+|\\{i : \\alpha_i = 1\\}|)}{p (1-p)^{K}|\\{i : \\alpha_i = 1\\}|})) \\\\ \n\t& + p (1-p)^{K} \\log(\\frac{p (1-p)^{K}(1+|\\{i : \\alpha_i = 1\\}|)}{p (1-p)^{K}}) \\\\\n\t& = p (1-p)^{K}(\\{i : \\alpha_i = 1\\}|\\log(\\frac{1+|\\{i : \\alpha_i = 1\\}|}{|\\{i : \\alpha_i = 1\\}|})) \\\\\n\t& + p (1-p)^{K} \\log(1+|\\{i : \\alpha_i = 1\\}|)\n\t\\end{align*}\n\tThe above function is maximized when $|\\{i : \\alpha_i = 1\\}| = K.$ This is because \n\t\\begin{align*} \n\t\\frac{d}{d x} (x \\log(1 + \\frac{1}{x}) + \\log(1 + x)) > 0\n\t\\end{align*} \n\tfor all $x > 0.$\n\t\n\tNow consider any $t$ such that $t < S_K-2.$ For such a value of $t,$ we have that \\begin{align*} \\Pr(T(K) - X = t-1) = d (1-p)^{K-c} p^c + o(p^c)\n\t\\end{align*} \n\tfor some $c \\in \\mathbb{N}, \\; c \\geq 1$ and $d \\in \\mathbb{N}$ because there must be at least one $i \\in [K]$ such that $Z_i=-1$ in this case. Furthermore, we have \n\t\\begin{align*} \n\t\\Pr(T(K) - X = t+1) = b (1-p)^{K-a} p^a + o(p^a)\n\t\\end{align*} \n\tfor some $a \\in \\mathbb{N}, \\; a \\geq 1$ and $b \\in \\mathbb{N}$ because there must be at least one $i \\in [K]$ such that $Z_i=-1$ in this case. Thus, \n\t\\begin{align*} \n\t&\\Pr(T(K) = t) H(X \\; | \\; T(K) = t) \\\\ \n\t&= (d (1-p)^{K-c+1} p^{c} + o(p^{c})) \\\\\n\t& \\times \\log(1 + \\frac{ b (1-p)^{K-a} p^{a+1} ) + o(p^{a+1})}{d (1-p)^{K-c+1} p^{c} + o(p^{c})}) \\\\\n\t& + (b (1-p)^{K-a} p^{a+1} + o(p^{a+1})) \\\\\n\t& \\times \\log(1 + \\frac{d (1-p)^{K-c+1} p^{c} + o(p^{c})}{b (1-p)^{K-a} p^{a+1} + o(p^{a+1})}) \\\\\n\t& = o(p).\n\t\\end{align*}\n\n\tThus, there exists some $\\epsilon > 0$ such that the uniform scheme is optimal for $p < \\epsilon.$\n\t\n\tWe now justify the last line in the equation above. \n\tConsider the first term above in the second line. If $c < a+1,$ it follows that \n\t\\begin{align*}\n\t&(d (1-p)^{K-c+1} p^{c} + o(p^{c})) \\\\\n\t& \\times \\log(1 + \\frac{ b (1-p)^{K-a} p^{a+1} ) + o(p^{a+1})}{d (1-p)^{K-c+1} p^{c} + o(p^{c})}) \\\\\n\t& = (d (1-p)^{K-c+1} p^{c} + o(p^{c})) \\\\\n\t&\\times \\log(1 + \\frac{ b (1-p)^{K-a} p^{a+1-c} + o(p^{a+1-c})}{d (1-p)^{K-c+1} + o(1)}) \\\\\n\t& = (d (1-p)^{K-c+1} p^{c} + o(p^{c})) \\\\ \n\t& \\times \\frac{ b (1-p)^{K-a} p^{a+1-c} + o(p^{a+1-c})}{d (1-p)^{K-c+1} + o(1)} \\\\\n\t& = o(p).\n\t\\end{align*}\n\tIf $c \\geq a+1,$ it follows that \n\t\\begin{align*}\n\t& (d (1-p)^{K-c+1} p^{c} + o(p^{c})) \\\\\n\t& \\times \\log(1 + \\frac{ b (1-p)^{K-a} p^{a+1} + o(p^{a+1})}{d (1-p)^{K-c+1} p^{c} + o(p^{c})})\\\\\n\t& \\leq (d (1-p)^{K-c+1} p^{c} + o(p^{c})) \\\\\n\t& \\times \\log(\\frac{1}{d (1-p)^{K-c+1} p^{c} + o(p^{c})})\\\\\n\t& \\leq (d (1-p)^{K-c+1} p^{c} + o(p^{c}))\\log(\\frac{1}{(1-p)^{K} p^{K}})\\\\\n\t& = K (d (1-p)^{K-c+1} p^{c} + o(p^{c})) \\\\\n\t& \\times \\log(\\frac{1}{p(1-p)})\\\\\n\t& = o(p).\n\t\\end{align*}\n\tFinally, consider the second term. It follows that\n\t\\begin{align*}\t\n\t& (b (1-p)^{K-a} p^{a+1} + o(p^{a+1})) \\\\\n\t& \\times \\log(1 + \\frac{d (1-p)^{K-c+1} p^{c} + o(p^{c})}{b (1-p)^{K-a} p^{a+1} + o(p^{a+1})}) \\\\\n\t& \\leq (b (1-p)^{K-a} p^{a+1} + o(p^{a+1})) \\\\\n\t& \\times \\log(\\frac{1}{b (1-p)^{K-a} p^{a+1} + o(p^{a+1})}) \\\\\n\t& \\leq (b (1-p)^{K-a} p^{a+1} + o(p^{a+1})) \\\\\n\t& \\times \\log(\\frac{1}{ (1-p)^{K} p^{K} }) \\\\\n\t& = K(b (1-p)^{K-a} p^{a+1} + o(p^{a+1})) \\\\\n\t& \\times \\log(\\frac{1}{ p (1-p) }) \\\\\n\t& = o(p).\n\t\\end{align*}\n\n\n\n\n\\subsection{Proof of Lemma \\ref{lem:binary_scheme_performance}}\n\nWe will prove this by induction on $K \\in \\mathbb{N}.$ Let $p \\in [0, 0.5].$ For $K = 1,$ we have that \n\\begin{align*} \n& I(X \\; ; \\; X + Z_1) \\\\ \n& = H(p) - H(X \\; | \\; X + Z_1) \\\\\n& = H(p) - 2p(1-p)H(0.5)\n\\end{align*}\t\t\nwhich matches the formula.\nAssume the formula holds for the $(K-1)$th case where $K > 1.$ Consider the $K$th case: \n\\begin{align*} \n& I(X \\; ; \\; X + \\sum_{i=1}^{K} 2^{i-1} Z_i) \\\\\n& = H(p) - H(X \\; | \\; X + \\sum_{i=1}^{K} 2^{i-1} Z_i) \\\\\n& = H(p) - (1-p)H(X \\; | \\; X + \\sum_{i=1}^{K-1} 2^{i-1} Z_i) \\\\\n& - p H(X \\; | \\; X + \\sum_{i=1}^{K-1} 2^{i-1} Z_i) \\\\\n& - (p^{K}(1-p) + p(1-p)^{K}) H(\\frac{p^{K} (1-p)}{p^{K} (1-p) + p (1-p)^{K}}) \\\\\n& = H(p) - H(X \\; | \\; X + \\sum_{i=1}^{K-1} 2^{i-1} Z_i) \\\\ \n& - (p^{K}(1-p) + p(1-p)^{K}) H(\\frac{1}{1 + \\frac{p(1-p)^K}{p^K(1-p)}}) \\\\\n& = H(p) - \\sum_{i=1}^K (p^i(1-p) + p(1-p)^i) H(\\frac{1}{1 + \\frac{p(1-p)^i}{p^i(1-p)}}).\n\\end{align*}\n\n\\subsection{Proof of Lemma \\ref{lem:lower_bound}}\n\n\tDefine $S_K = 1 + \\sum_{i=1}^{K} \\alpha_i.$ For any $K\\in \\mathbb{N},$ $p \\in [0, 0.5],$ and $\\alpha_i \\in \\mathbb{N}, \\; i \\in [K],$ we have that \n\t\\begin{align*} \n\t& I(X \\; ; \\; X + \\sum_{i=1}^{K} \\alpha_i Z_i) \\\\\n\t& = H(p) - H(X \\; | \\; X + \\sum_{i=1}^{K} \\alpha_i Z_i) \\\\\n\t& \\geq H(p) - (1 - p^{K+1} - (1-p)^{K+1}) \\\\\n\t& = H(p) - 1 + p^{K+1} + (1-p)^{K+1}\n\t\\end{align*}\n\twhere the third line follows because\n\t\\begin{align*} \n\t& H(X \\; | \\; X + \\sum_{i=1}^{K} \\alpha_i Z_i) \\\\\n\t& = \\sum_{t = -S_K}^{S_K} \\Pr(X + \\sum_{i=1}^{K} \\alpha_i Z_i = t) H(X \\; | \\; X + \\sum_{i=1}^{K} \\alpha_i Z_i = t) \\\\\n\t& = p^{K+1} \\cdot 0 + (1-p)^{K+1} \\cdot 0 \\\\ \n\t& + \\sum_{t = -S_K+2}^{S_K-2} \\Pr(X + \\sum_{i=1}^{K} \\alpha_i Z_i = t) H(X \\; | \\; X + \\sum_{i=1}^{K} \\alpha_i Z_i = t) \\\\\n\t& \\leq \\sum_{t = -S_K+2}^{S_K-2} \\Pr(X + \\sum_{i=1}^{K} \\alpha_i Z_i = t) \\\\\n\t& \\leq 1 - p^{K+1} - (1-p)^{K+1}.\n\t\\end{align*}\n\n\n\\subsection{Proof of Lemma \\ref{lem:optimality_of_binary}}\n\nFor any $K \\in \\mathbb{N}$ and $p = 0.5,$ the performance of the binary scheme is given by\n\\begin{align*}\n\t& I(X \\; ; \\; X + \\sum_{i=1}^{K} 2^{i-1} Z_i) \\\\ \n\t& = H(0.5) - 2\\sum_{i=1}^K (0.5)^{i+1} H(0.5) \\\\\n\t& = 1 - \\sum_{i=1}^K (0.5)^{i} \\\\\n\t& = 1 - (\\frac{1 - (0.5)^{K+1}}{0.5} - 1) \\\\\n\t& = (0.5)^{K} \\\\\n\t& = 1 - 1 + (0.5)^{K+1} + (0.5)^{K+1}\n\\end{align*}\nThus, the performance of the binary scheme matches the lower bound for $p = 0.5.$\n\n\n\\subsection{Proof of Theorem \\ref{theorem:main_recursion}}\nFor $N \\geq 2,$ we have that \n\\begin{align*} \n& H(X \\; | \\; T_{M, N}) \n\\\\ & = \n\\sum_{t=-S_{M,N}}^{S_{M,N}} \\Pr(T_{M, N} = t) H(X \\; | \\; T_{M, N} = t) \n\\\\ & = \n\\sum_{t=-S_{M,N}}^{-M-1} \\Pr(T_{M, N} = t) H(X \\; | \\; T_{M, N} = t) \n\\\\ & + \n\\sum_{t=M+1}^{S_{M,N}} \\Pr(T_{M, N} = t) H(X \\; | \\; T_{M, N} = t) \n\\\\ & + \n\\sum_{t=-M}^{M} \\Pr(T_{M, N} = t) H(X \\; | \\; T_{M, N} = t) \n\\\\ & = \\sum_{t=-S_{M,N-1}}^{S_{M,N-1}-2M-1} p \\Pr(T_{M, N-1} = t) H(X \\; | \\; T_{M, N-1} = t)\n\\\\ & + \\sum_{t=-S_{M,N-1}+2M+1}^{S_{M,N-1}} (1-p) \\Pr(T_{M, N-1} = t) H(X \\; | \\; T_{M, N-1} = t) \n\\\\ & + \n\\sum_{t=-M}^{M} \\Pr(T_{M, N} = t) H(X \\; | \\; T_{M, N} = t) \n\\\\ & = \n\\sum_{t=-S_{M,N-1}+2M+1}^{S_{M,N-1}-2M-1} \\Pr(T_{M, N-1} = t) H(X \\; | \\; T_{M, N-1} = t)\n\\\\ & + \n\\sum_{t=-S_{M,N-1}}^{-S_{M,N-1}+2M} p \\Pr(T_{M, N-1} = t) H(X \\; | \\; T_{M, N-1} = t)\n\\\\ & + \n\\sum_{t=S_{M,N-1}-2M}^{S_{M,N-1}} (1-p)\\Pr(T_{M, N-1} = t) H(X \\; | \\; T_{M, N-1} = t)\n\\\\ & + \n\\sum_{t=-M}^{M} \\Pr(T_{M, N} = t) H(X \\; | \\; T_{M, N} = t) \n\\\\ & = \nH(X \\; | \\; T_{M, N-1}) \n\\\\ & - \n(1-p) \\sum_{t=-S_{M,N-1}}^{-S_{M,N-1}+2M} \\Pr(T_{M, N-1} = t) H(X \\; | \\; T_{M, N-1} = t)\n\\\\ & - \np \\sum_{t=S_{M,N-1}-2M}^{S_{M,N-1}} \\Pr(T_{M, N-1} = t) H(X \\; | \\; T_{M, N-1} = t)\n\\\\ & + \n\\sum_{t=-M}^{M} \\Pr(T_{M, N} = t) H(X \\; | \\; T_{M, N} = t)\n\\end{align*}\nwhere the third equality follows from the fact that\n\n\n\\begin{align*} \n& H(X \\; | \\; T_{M, N} = t)\n\\\\ & = \nH(\\Pr(X = 1 \\; | \\; T_{M, N} = t))\n\\\\ & = \nH(\\frac{(1-p)\\Pr(T_{M, N} = t \\; | \\; X = 1) }{\\Pr(T_{M, N} = t)})\n\\\\ & = \nH(\\frac{1}{1 + \\frac{p\\Pr(T_{M, N}-X = t +1)}{(1-p)\\Pr(T_{M, N}-X = t -1)}}) \n\\\\ & = \nH(\\frac{1}{1 + \\frac{p^2\\Pr(T_{M, N-1}-X = t +1+2^{N-1})}{p(1-p)\\Pr(T_{M, N-1}-X = t -1+2^{N-1})}}) \n\\\\ & = \nH(\\frac{1}{1 + \\frac{p\\Pr(T_{M, N-1}-X = t +1+2^{N-1})}{(1-p)\\Pr(T_{M, N-1}-X = t -1+2^{N-1})}}) \n\\\\ & = \nH(\\frac{(1-p)\\Pr(T_{M, N-1} = t + 2^{N-1} \\; | \\; X = 1) }{\\Pr(T_{M, N-1} = t + 2^{N-1})})\n\\\\ & =\t \nH(X \\; | \\; T_{M, N-1} = t+ 2^{N-1})\n\\end{align*}\nif $t < -M,$ and the fact that \n\\begin{align*} \nH(X \\; | \\; T_{M, N} = t) = \tH(X \\; | \\; T_{M, N-1} = t - 2^{N-1})\n\\end{align*}\nif $t > M.$\n\n\n\n\n\\subsection{Explanation of Algorithm}\n\nLet us analyze the recursion\n\\begin{align*} \n\t& H(X \\; | \\; T_{M,N}) \n\t\\\\ & = H(X \\; | \\; T_{M,N-1}) \n\t\\\\ & - (1-p) \\!\\!\\!\\! \\sum_{t=-S_{M,N-1}}^{-S_{M,N-1}+2M} \\!\\!\\!\\!\\!\\!\\!\\!\\!\\! \\Pr(T_{M,N-1} = t) H(X \\; | \\; T_{M,N-1} = t)\n\t\\\\ & - \tp \\!\\!\\!\\! \\sum_{t=S_{M,N-1}-2M}^{S_{M,N-1}} \\!\\!\\!\\!\\!\\!\\!\\!\\!\\! \\Pr(T_{M,N-1} = t) H(X \\; | \\; T_{M,N-1} = t)\n\t\\\\ & + \\sum_{t=-M}^{M} \\Pr(T_{M,N} = t) H(X \\; | \\; T_{M,N} = t). \n\\end{align*}\n\nThe algorithm computes $H(X \\; | \\; T_{M, N-1})$ starting from $N=2$ up to the desired $N$ value using the recursion above along with the following properties of the summation terms. \n\nObserve that if $N > \\ceil{\\log(2M+2)},$ then\n\\begin{align*}\n& \\sum_{t=-S_{M,N-1}}^{-S_{M,N}+2M} \\Pr(T_{M, N} = t) H(X \\; | \\; T_{M, N} = t)\n\\\\ & = p\\sum_{t=-S_{M,N-1}}^{-S_{M,N-1}+2M} \\Pr(T_{M, N-1} = t) H(X \\; | \\; T_{M, N-1} = t)\n\\end{align*}\nand \n\\begin{align*} \n& \\sum_{t=S_{M,N}-2M}^{S_{M,N}} \\Pr(T_{M, N} = t) H(X \\; | \\; T_{M, N} = t)\n\\\\ & = \n(1-p) \\sum_{t=S_{M,N-1}-2M}^{S_{M,N-1}} \\Pr(T_{M, N-1} = t) H(X \\; | \\; T_{M, N-1} = t).\n\\end{align*}\nThus, both of these summations can be computer recursively once $N > \\ceil{\\log(2M+2)}.$\n\nFinally, observe that \n\\begin{align*} \t\\sum_{t=-M}^{M} \\Pr(T_{M, N} = t) H(X \\; | \\; T_{M, N} = t). \n\\end{align*}\ncan be computed in $O(M)$ time for each value of $N$ if the first $2M+1$ values of the PMF of $ \\sum_{j=1}^M Z_j + \\sum_{k=1}^{N-1} 2^{k-1} Z_{M+k}$ (starting with the smallest support value) and the last $2M+1$ values of the PMF of $ \\sum_{j=1}^M Z_j + \\sum_{k=1}^{N-1} 2^{k-1} Z_{M+k}$ (ending with the largest support value) are available. This computation is shown in Algorithm \\ref{alg:uniform_binary_polytime}, and requires $O(M)$ space and $O(M)$ time. \n\n\nThe algorithm performs the steps above $O(N)$ times. The algorithm must also build the PMF for $\\sum_{j=1}^M Z_j$ which requires $O(M^2)$ time and $O(M)$ space. Therefore, the algorithm runs in $O(N M + M^2)$ time and $O(M)$ space. \n\n\n\n\\section{Appendix}\n\n\\begin{lemma} \n\tFor any $K \\in \\mathbb{N}, p \\in [0, 0.5],$ we have that \n\t\\begin{align*} \n\t& I\\left(X \\; ; \\; X + \\sum_{i=1}^K Y_i\\right)\n\t\\\\ & = H\\left(p\\right) - \\sum_{i = 1}^{K} p^{K+1-i} \\left(1-p\\right)^{i} \\binom{K+1}{i} H\\left(\\frac{i}{K+1}\\right)\n\t\\end{align*}\n\t\\label{lem:uniform_schem_formula}\n\\end{lemma}\n\n\\begin{lemma} \\label{lem:binary_scheme_performance}\n\tFor any $K \\in \\mathbb{N}, p \\in [0, 0.5],$ we have that \n\t\\begin{align*} \n\t& I\\left(X \\; ; \\; X + \\sum_{i=1}^{K} 2^{i-1} Y_i\\right) \\\\ \n\t& = H\\left(p\\right) - \\sum_{i=1}^K \\left(p^i \\left(1-p\\right) + p \\left(1-p\\right)^i\\right) H \\left(\\frac{1}{1 + \\frac{p \\left(1-p\\right)^i}{p^i \\left(1-p\\right)}}\\right).\n\t\\end{align*}\n\\end{lemma}\n\n\\begin{lemma} \\label{lem:lower_bound}\n\tFor any $K\\in \\mathbb{N},$ $p \\in [0, 0.5],$ and $\\alpha_i \\in \\mathbb{N}, \\; i \\in [K],$ we have that \n\t\\begin{align*} \n\tI\\left(X \\; ; \\; X + \\sum_{i=1}^{K} \\alpha_i Y_i\\right) \\geq H\\left(p\\right) - 1 + p^{K+1} + \\left(1-p\\right)^{K+1}.\n\t\\end{align*}\n\\end{lemma}\n\n\\begin{theorem} \\label{theorem:main_recursion}\n\tLet\n\t\\begin{align*} \n\tT_{M, N} = X + \\sum_{i=1}^M Y_i + \\sum_{j=1}^{N} 2^{j-1} Y_{M+j}\n\t\\end{align*}\n\tand\n\t\\begin{align*} \n\tS_{M,N} = 1 + M + \\sum_{i=1}^{N} 2^{i-1}.\n\t\\end{align*}\n\tFor $M,N \\in \\mathbb{N}$ such that $M>0$ and $N>1,$ we have that \n\t\\begin{align*} \n\t& H\\left(X \\; | \\; T_{M,N}\\right) \n\t\\\\ & = H\\left(X \\; | \\; T_{M,N-1}\\right) \n\t\\\\ & - \\left(1-p\\right) \\!\\!\\!\\! \\sum_{t=-S_{M,N-1}}^{-S_{M,N-1}+2M} \\!\\!\\!\\!\\!\\!\\!\\!\\!\\! \\Pr\\left(T_{M,N-1} = t\\right) H\\left(X \\; | \\; T_{M,N-1} = t\\right)\n\t\\\\ & - \tp \\!\\!\\!\\! \\sum_{t=S_{M,N-1}-2M}^{S_{M,N-1}} \\!\\!\\!\\!\\!\\!\\!\\!\\!\\! \\Pr\\left(T_{M,N-1} = t\\right) H\\left(X \\; | \\; T_{M,N-1} = t\\right)\n\t\\\\ & + \\sum_{t=-M}^{M} \\Pr\\left(T_{M,N} = t\\right) H\\left(X \\; | \\; T_{M,N} = t\\right). \n\t\\end{align*}\n\\end{theorem}\n\n\\begin{algorithm} \\label{alg:uniform_binary_polytime}\n\t\\SetAlgoLined\n\t\\KwData{$N,$ $M,$ $p$}\n\t\\KwResult{$I_{M,N}$}\n\t$n_1 \\leftarrow \\min\\left(N, \\ceil{\\log\\left(2M+2\\right)}\\right)$\\;\n\t$D_{n_1} \\leftarrow$ array containing PMF for $\\sum_{i=1}^M Y_i + \\sum_{j=1}^{n_1} 2^{j-1} Y_{M+j}$ starting at the lowest support value and ending at highest support value\\;\n\t$D_{\\text{low}} \\leftarrow$ array containing first $2M+1$ entries of $D_{n_1}$\\;\n\t$D_{\\text{high}} \\leftarrow$ array containing last $2M+1$ entries of $D_{n_1}$\\;\n\t$H_{n_1} \\leftarrow $ $\\sum_{t=-S_{M,n_1}}^{S_{M,n_1}} \\Pr\\left(T_{M, n_1} = t\\right) H\\left(X \\; | \\; T_{M, n_1} = t\\right)$ computed using $D_{n_1}$\\; \n\t$H_{\\text{low}} \\leftarrow $ \n\t$\\sum_{t=-S_{M,n_1}}^{-S_{M,n_1}+2M} \\Pr\\left(T_{M, n_1} = t\\right) H\\left(X \\; | \\; T_{M, n_1} = t\\right)$ computed using $D_{\\text{low}}$\\; \n\t$H_{\\text{high}} \\leftarrow $ $\\sum_{t=S_{M,n_1}-2K}^{S_{M,n_1}} \\Pr\\left(T_{M, n_1} = t\\right) H\\left(X \\; | \\; T_{M, n_1} = t\\right)$ computed using $D_{\\text{high}}$\\; \t\n\t$n \\leftarrow n_1 + 1$\\;\n\t\\While{$n \\leq N$}{\n\t\n\t\t$H_{n} \\leftarrow H_{n-1}$\\;\n\t\t$H_{n} \\leftarrow H_{n} - \\left(1-p\\right) H_{\\text{low}} - p H_{\\text{high}}$\\;\n\t\t$D_{\\text{low ext}} \\leftarrow $ $D_{\\text{low}}$ appended with $[0, \\; 0]$\\;\n\t\t$D_{\\text{high ext}} \\leftarrow $ $D_{\\text{high}}$ prepended with $[0,\\; 0]$\\;\n\t\t$H_{\\text{center}} \\leftarrow$ \t$\\sum_{t=-M}^{M} \\Pr\\left(T_{M, n} = t\\right) H\\left(X \\; | \\; T_{M, n} = t\\right)$ computed using $p D_{\\text{high ext}} + \\left(1-p\\right) D_{\\text{low ext}}$\\;\n\t\t$H_{n} \\leftarrow H_{n} + H_{\\text{center}}$\\;\n\t\t$H_{\\text{low}} \\leftarrow p H_{\\text{low}}$\\; \n\t\t$H_{\\text{high}} \\leftarrow \\left(1-p\\right) H_{\\text{high}}$\\; \n\t\t$D_{\\text{low}} \\leftarrow p D_{\\text{low}}$\\;\n\t\t$D_{\\text{high}} \\leftarrow \\left(1-p\\right) D_{\\text{high}}$\\;\n\t\t$n \\leftarrow n + 1$\\;\n\t}\n\t$I_{M,N} \\leftarrow H\\left(p\\right) - H_N$\n\t\n\t\n\t\\caption{Computation of $I\\left(X \\; ; \\; X + \\sum_{i=1}^M Y_i + \\sum_{j=1}^{N} 2^{j-1} Y_{M+j}\\right)$}\n\\end{algorithm}\n\n\\subsection{Proof of Lemma \\ref{lem:uniform_schem_formula}}\nWe have that \n\\begin{align*} \n& I\\left(X \\; ; \\; X + \\sum_{i=1}^K Y_i\\right) \n\\\\ & = H\\left(p\\right) - H\\left(X \\; | \\; X + \\sum_{i=1}^K Y_i\\right) \n\\\\ & = H\\left(p\\right) \n\\\\ & - \\sum_{t = -K-1}^{K+1} \\Pr\\left(X + \\sum_{i=1}^K Y_i = t\\right) H\\left(X \\; | \\; X + \\sum_{i=1}^K Y_i = t\\right)\n\\\\ & = H\\left(p\\right) - \\sum_{t = 0}^{K+1} \\Pr\\left(\\sum_{i=1}^{K+1} Z_i = t\\right) H\\left(Z_1 \\; | \\; \\sum_{i=1}^{K+1} Z_i = t\\right)\n\\end{align*}\nwhere $Z_i \\sim \\text{Bern}\\left(1-p\\right).$ Thus, $\\sum_{i=1}^{K+1} Z_i \\sim \\text{Bin}\\left(1-p, K+1\\right)$ and \\[\\Pr\\left(\\sum_{i=1}^{K+1} Z_i = t\\right) = p^{K+1-t} \\left(1-p\\right)^{t} \\binom{K+1}{t}.\\] Due to Bayes rule, we have \n\\begin{align*}\n\\Pr\\left(Z_1 = 1 \\; | \\; \\sum_{i=1}^{K+1} Z_i = t\\right) = \\frac{t}{K+1},\n\\end{align*}\nand thus, \n\\begin{align}\n& H\\left(Z_1 \\; | \\; \\sum_{i=1}^{K+1} Z_i = t\\right)\n= H\\left(\\frac{t}{K+1}\\right).\n\\end{align}\nThus,\n\\begin{align*} \n& I\\left(X \\; ; \\; X + \\sum_{i=1}^K Y_i\\right) \n\\\\ & = H\\left(p\\right) - \\sum_{t = 0}^{K+1} \\Pr\\left(\\sum_{i=1}^{K+1} Z_i = t\\right) H\\left(Z_1 \\; | \\; \\sum_{i=1}^{K+1} Z_i = t\\right)\n\\\\ & = H\\left(p\\right) - \\sum_{t = 0}^{K+1} p^{K+1-t} \\left(1-p\\right)^{t} \\binom{K+1}{t} H\\left(\\frac{t}{K+1}\\right)\n\\\\ & = H\\left(p\\right) - \\sum_{t = 1}^{K} p^{K+1-t} \\left(1-p\\right)^{t} \\binom{K+1}{t} H\\left(\\frac{t}{K+1}\\right)\n\\end{align*}\n\n\n\n\n\\subsection{Proof of Lemma \\ref{lem:optimality_of_uniform}}\nLet $K \\in \\mathbb{N}$ and\n\\begin{align*}\n&S_K = 1 + \\sum_{i=1}^K \\alpha_i, \\quad \\quad T\\left(K\\right) = X + \\sum_{i=1}^K \\alpha_i Y_i.\n\\end{align*}\nWe have that \n\\begin{align*} \nI\\left(X \\; ; \\; X + \\sum_{i=1}^K \\alpha_i Y_i \\right) = H\\left(p\\right) - H\\left(X \\; | \\; X + \\sum_{i=1}^K \\alpha_i Y_i\\right)\n\\end{align*} \nand\n\\begin{align*} \n&H\\left(X \\; | \\; X + \\sum_{i=1}^K \\alpha_i Y_i\\right) \\\\\n& = \\sum_{t=-S_K}^{S_K} \\Pr\\left(T\\left(K\\right) = t\\right) H\\left(X \\; | \\; T\\left(K\\right) = t\\right)\n\\end{align*} \nObserve that as $p \\to 0,$ the terms of $ H\\left(X \\; | \\; X + \\sum_{i=1}^K \\alpha_i Y_i\\right)$ with the lowest powers of $p$ dominate.\nThe only term containing no power of $p$ is \n\\begin{align*} & \\Pr\\left(T\\left(K\\right) = S_K\\right) H\\left(X \\; | \\; T\\left(K\\right) = S_K\\right) \\\\ & = \\left(1-p\\right)^{K+1} H\\left(X \\; | \\; T\\left(K\\right) = S_K\\right) = 0. \n\\end{align*}\nNext, consider the term $\\Pr\\left(T\\left(K\\right) = S_K-2\\right) H\\left(X \\; | \\; T\\left(K\\right) = S_K-2\\right).$ Observe that \n\\begin{align*} & \\Pr\\left(T\\left(K\\right) = S_K-2\\right) \\\\& = \\left(1-p\\right) \\Pr\\left(T\\left(K\\right)-X = S_K-3\\right) \\\\ & + p \\Pr\\left(T\\left(K\\right)-X = S_K-1\\right) \n\\end{align*}\nand\n\\begin{align*} \n&\\Pr\\left(T\\left(K\\right)-X = S_K-3\\right) = p \\left(1-p\\right)^{K-1}|\\{i : \\alpha_i = 1\\}|, \\\\\n& \\Pr\\left(T\\left(K\\right)-X = S_K-1\\right) = \\left(1-p\\right)^K. \n\\end{align*}\nWe than have that \n\\begin{align*} \n&\\Pr\\left(T\\left(K\\right) = S_K-2\\right) H\\left(X \\; | \\; T\\left(K\\right) = S_K-2\\right) \\\\\n& = p \\left(1-p\\right)^{K}\\left(1 + |\\{i : \\alpha_i = 1\\}|\\right) H\\left(X \\; | \\; T\\left(K\\right) = S_K-2\\right) \\\\\n& = p \\left(1-p\\right)^{K}\\left(\\{i : \\alpha_i = 1\\}| \\\\\n& \\times \\log\\left(\\frac{p \\left(1-p\\right)^{K}\\left(1+|\\{i : \\alpha_i = 1\\}|\\right)}{p \\left(1-p\\right)^{K}|\\{i : \\alpha_i = 1\\}|}\\right)\\right) \\\\ \n& + p \\left(1-p\\right)^{K} \\log\\left(\\frac{p \\left(1-p\\right)^{K}\\left(1+|\\{i : \\alpha_i = 1\\}|\\right)}{p \\left(1-p\\right)^{K}}\\right) \\\\\n& = p \\left(1-p\\right)^{K}\\left(\\{i : \\alpha_i = 1\\}|\\log\\left(\\frac{1+|\\{i : \\alpha_i = 1\\}|}{|\\{i : \\alpha_i = 1\\}|}\\right)\\right) \\\\\n& + p \\left(1-p\\right)^{K} \\log\\left(1+|\\{i : \\alpha_i = 1\\}|\\right)\n\\end{align*}\nThe above function is maximized when $|\\{i : \\alpha_i = 1\\}| = K.$ This is because \n\\begin{align*} \n\\frac{d}{d x} \\left(x \\log\\left(1 + \\frac{1}{x}\\right) + \\log\\left(1 + x\\right)\\right) > 0\n\\end{align*} \nfor all $x > 0.$\n\nNow consider any $t$ such that $t < S_K-2.$ For such a value of $t,$ we have that \\begin{align*} \\Pr\\left(T\\left(K\\right) - X = t-1\\right) = d \\left(1-p\\right)^{K-c} p^c + o\\left(p^c\\right)\n\\end{align*} \nfor some $c \\in \\mathbb{N}, \\; c \\geq 1$ and $d \\in \\mathbb{N}$ because there must be at least one $i \\in [K]$ such that $Y_i=-1$ in this case. Furthermore, we have \n\\begin{align*} \n\\Pr\\left(T\\left(K\\right) - X = t+1\\right) = b \\left(1-p\\right)^{K-a} p^a + o\\left(p^a\\right)\n\\end{align*} \nfor some $a \\in \\mathbb{N}, \\; a \\geq 1$ and $b \\in \\mathbb{N}$ because there must be at least one $i \\in [K]$ such that $Y_i=-1$ in this case. Thus, \n\\begin{align*} \n&\\Pr\\left(T\\left(K\\right) = t\\right) H\\left(X \\; | \\; T\\left(K\\right) = t\\right) \\\\ \n&= \\left(d \\left(1-p\\right)^{K-c+1} p^{c} + o\\left(p^{c}\\right)\\right) \\\\\n& \\times \\log\\left(1 + \\frac{ b \\left(1-p\\right)^{K-a} p^{a+1} \\right) + o\\left(p^{a+1}\\right)}{d \\left(1-p\\right)^{K-c+1} p^{c} + o\\left(p^{c}\\right)}\\right) \\\\\n& + \\left(b \\left(1-p\\right)^{K-a} p^{a+1} + o\\left(p^{a+1}\\right)\\right) \\\\\n& \\times \\log\\left(1 + \\frac{d \\left(1-p\\right)^{K-c+1} p^{c} + o\\left(p^{c}\\right)}{b \\left(1-p\\right)^{K-a} p^{a+1} + o\\left(p^{a+1}\\right)}\\right) \\\\\n& = o\\left(p\\right).\n\\end{align*}\n\nThus, there exists some $\\epsilon > 0$ such that the uniform scheme is optimal for $p < \\epsilon.$\n\nWe now justify the last line in the equation above. \nConsider the first term above in the second line. If $c < a+1,$ it follows that \n\\begin{align*}\n&\\left(d \\left(1-p\\right)^{K-c+1} p^{c} + o\\left(p^{c}\\right)\\right) \\\\\n& \\times \\log\\left(1 + \\frac{ b \\left(1-p\\right)^{K-a} p^{a+1} \\right) + o\\left(p^{a+1}\\right)}{d \\left(1-p\\right)^{K-c+1} p^{c} + o\\left(p^{c}\\right)}\\right) \\\\\n& = \\left(d \\left(1-p\\right)^{K-c+1} p^{c} + o\\left(p^{c}\\right)\\right) \\\\\n&\\times \\log\\left(1 + \\frac{ b \\left(1-p\\right)^{K-a} p^{a+1-c} + o\\left(p^{a+1-c}\\right)}{d \\left(1-p\\right)^{K-c+1} + o\\left(1\\right)}\\right) \\\\\n& = \\left(d \\left(1-p\\right)^{K-c+1} p^{c} + o\\left(p^{c}\\right)\\right) \\\\ \n& \\times \\frac{ b \\left(1-p\\right)^{K-a} p^{a+1-c} + o\\left(p^{a+1-c}\\right)}{d \\left(1-p\\right)^{K-c+1} + o\\left(1\\right)} \\\\\n& = o\\left(p\\right).\n\\end{align*}\nIf $c \\geq a+1,$ it follows that \n\\begin{align*}\n& \\left(d \\left(1-p\\right)^{K-c+1} p^{c} + o\\left(p^{c}\\right)\\right) \\\\\n& \\times \\log\\left(1 + \\frac{ b \\left(1-p\\right)^{K-a} p^{a+1} + o\\left(p^{a+1}\\right)}{d \\left(1-p\\right)^{K-c+1} p^{c} + o\\left(p^{c}\\right)}\\right)\\\\\n& \\leq \\left(d \\left(1-p\\right)^{K-c+1} p^{c} + o\\left(p^{c}\\right)\\right) \\\\\n& \\times \\log\\left(\\frac{1}{d \\left(1-p\\right)^{K-c+1} p^{c} + o\\left(p^{c}\\right)}\\right)\\\\\n& \\leq \\left(d \\left(1-p\\right)^{K-c+1} p^{c} + o\\left(p^{c}\\right)\\right)\\log\\left(\\frac{1}{\\left(1-p\\right)^{K} p^{K}}\\right)\\\\\n& = K \\left(d \\left(1-p\\right)^{K-c+1} p^{c} + o\\left(p^{c}\\right)\\right) \\\\\n& \\times \\log\\left(\\frac{1}{p\\left(1-p\\right)}\\right)\\\\\n& = o\\left(p\\right).\n\\end{align*}\nFinally, consider the second term. It follows that\n\\begin{align*}\t\n& \\left(b \\left(1-p\\right)^{K-a} p^{a+1} + o\\left(p^{a+1}\\right)\\right) \\\\\n& \\times \\log\\left(1 + \\frac{d \\left(1-p\\right)^{K-c+1} p^{c} + o\\left(p^{c}\\right)}{b \\left(1-p\\right)^{K-a} p^{a+1} + o\\left(p^{a+1}\\right)}\\right) \\\\\n& \\leq \\left(b \\left(1-p\\right)^{K-a} p^{a+1} + o\\left(p^{a+1}\\right)\\right) \\\\\n& \\times \\log\\left(\\frac{1}{b \\left(1-p\\right)^{K-a} p^{a+1} + o\\left(p^{a+1}\\right)}\\right) \\\\\n& \\leq \\left(b \\left(1-p\\right)^{K-a} p^{a+1} + o\\left(p^{a+1}\\right)\\right) \\\\\n& \\times \\log\\left(\\frac{1}{ \\left(1-p\\right)^{K} p^{K} }\\right) \\\\\n& = K\\left(b \\left(1-p\\right)^{K-a} p^{a+1} + o\\left(p^{a+1}\\right)\\right) \\\\\n& \\times \\log\\left(\\frac{1}{ p \\left(1-p\\right) }\\right) \\\\\n& = o\\left(p\\right).\n\\end{align*}\n\n\n\n\n\\subsection{Proof of Lemma \\ref{lem:binary_scheme_performance}}\n\nWe will prove this by induction on $K \\in \\mathbb{N}.$ Let $p \\in [0, 0.5].$ For $K = 1,$ we have that \n\\begin{align*} \n& I\\left(X \\; ; \\; X + Y_1\\right) \\\\ \n& = H\\left(p\\right) - H\\left(X \\; | \\; X + Y_1\\right) \\\\\n& = H\\left(p\\right) - 2p\\left(1-p\\right)H\\left(0.5\\right)\n\\end{align*}\t\t\nwhich matches the formula.\nAssume the formula holds for the $\\left(K-1\\right)$th case where $K > 1.$ Consider the $K$th case: \n\\begin{align*} \n& I\\left(X \\; ; \\; X + \\sum_{i=1}^{K} 2^{i-1} Y_i\\right) \\\\\n& = H\\left(p\\right) - H\\left(X \\; | \\; X + \\sum_{i=1}^{K} 2^{i-1} Y_i\\right) \\\\\n& = H\\left(p\\right) - \\left(1-p\\right)H\\left(X \\; | \\; X + \\sum_{i=1}^{K-1} 2^{i-1} Y_i\\right) \\\\\n& - p H\\left(X \\; | \\; X + \\sum_{i=1}^{K-1} 2^{i-1} Y_i\\right) \\\\\n& - \\left(p^{K}\\left(1-p\\right) + p\\left(1-p\\right)^{K}\\right) H\\left(\\frac{p^{K} \\left(1-p\\right)}{p^{K} \\left(1-p\\right) + p \\left(1-p\\right)^{K}}\\right) \\\\\n& = H\\left(p\\right) - H\\left(X \\; | \\; X + \\sum_{i=1}^{K-1} 2^{i-1} Y_i\\right) \\\\ \n& - \\left(p^{K}\\left(1-p\\right) + p\\left(1-p\\right)^{K}\\right) H\\left(\\frac{1}{1 + \\frac{p\\left(1-p\\right)^K}{p^K\\left(1-p\\right)}}\\right) \\\\\n& = H\\left(p\\right) - \\sum_{i=1}^K \\left(p^i\\left(1-p\\right) + p\\left(1-p\\right)^i\\right) H\\left(\\frac{1}{1 + \\frac{p\\left(1-p\\right)^i}{p^i\\left(1-p\\right)}}\\right).\n\\end{align*}\n\n\\subsection{Proof of Lemma \\ref{lem:lower_bound}}\n\nDefine $S_K = 1 + \\sum_{i=1}^{K} \\alpha_i.$ For any $K\\in \\mathbb{N},$ $p \\in [0, 0.5],$ and $\\alpha_i \\in \\mathbb{N}, \\; i \\in [K],$ we have that \n\\begin{align*} \n& I\\left(X \\; ; \\; X + \\sum_{i=1}^{K} \\alpha_i Y_i\\right) \\\\\n& = H\\left(p\\right) - H\\left(X \\; | \\; X + \\sum_{i=1}^{K} \\alpha_i Y_i\\right) \\\\\n& \\geq H\\left(p\\right) - \\left(1 - p^{K+1} - \\left(1-p\\right)^{K+1}\\right) \\\\\n& = H\\left(p\\right) - 1 + p^{K+1} + \\left(1-p\\right)^{K+1}\n\\end{align*}\nwhere the third line follows because\n\\begin{align*} \n& H\\left(X \\; | \\; X + \\sum_{i=1}^{K} \\alpha_i Y_i\\right) \\\\\n& = \\sum_{t = -S_K}^{S_K} \\Pr\\left(X + \\sum_{i=1}^{K} \\alpha_i Y_i = t\\right) H\\left(X \\; | \\; X + \\sum_{i=1}^{K} \\alpha_i Y_i = t\\right) \\\\\n& = p^{K+1} \\cdot 0 + \\left(1-p\\right)^{K+1} \\cdot 0 \\\\ \n& + \\sum_{t = -S_K+2}^{S_K-2} \\Pr\\left(X + \\sum_{i=1}^{K} \\alpha_i Y_i = t\\right) H\\left(X \\; | \\; X + \\sum_{i=1}^{K} \\alpha_i Y_i = t\\right) \\\\\n& \\leq \\sum_{t = -S_K+2}^{S_K-2} \\Pr\\left(X + \\sum_{i=1}^{K} \\alpha_i Y_i = t\\right) \\\\\n& \\leq 1 - p^{K+1} - \\left(1-p\\right)^{K+1}.\n\\end{align*}\n\n\n\\subsection{Proof of Lemma \\ref{lem:optimality_of_binary}}\n\nFor any $K \\in \\mathbb{N}$ and $p = 0.5,$ the performance of the binary scheme is given by\n\\begin{align*}\n& I\\left(X \\; ; \\; X + \\sum_{i=1}^{K} 2^{i-1} Y_i\\right) \\\\ \n& = H\\left(0.5\\right) - 2\\sum_{i=1}^K \\left(0.5\\right)^{i+1} H\\left(0.5\\right) \\\\\n& = 1 - \\sum_{i=1}^K \\left(0.5\\right)^{i} \\\\\n& = 1 - \\left(\\frac{1 - \\left(0.5\\right)^{K+1}}{0.5} - 1\\right) \\\\\n& = \\left(0.5\\right)^{K} \\\\\n& = 1 - 1 + \\left(0.5\\right)^{K+1} + \\left(0.5\\right)^{K+1}\n\\end{align*}\nThus, the performance of the binary scheme matches the lower bound for $p = 0.5.$\n\n\n\\subsection{Proof of Theorem \\ref{theorem:main_recursion}}\nFor $N \\geq 2,$ we have that \n\\begin{align*} \n& H\\left(X \\; | \\; T_{M, N}\\right) \n\\\\ & = \n\\sum_{t=-S_{M,N}}^{S_{M,N}} \\Pr\\left(T_{M, N} = t\\right) H\\left(X \\; | \\; T_{M, N} = t\\right) \n\\\\ & = \n\\sum_{t=-S_{M,N}}^{-M-1} \\Pr\\left(T_{M, N} = t\\right) H\\left(X \\; | \\; T_{M, N} = t\\right) \n\\\\ & + \n\\sum_{t=M+1}^{S_{M,N}} \\Pr\\left(T_{M, N} = t\\right) H\\left(X \\; | \\; T_{M, N} = t\\right) \n\\\\ & + \n\\sum_{t=-M}^{M} \\Pr\\left(T_{M, N} = t\\right) H\\left(X \\; | \\; T_{M, N} = t\\right) \n\\\\ & = \\sum_{t=-S_{M,N-1}}^{S_{M,N-1}-2M-1} p \\Pr\\left(T_{M, N-1} = t\\right) H\\left(X \\; | \\; T_{M, N-1} = t\\right)\n\\\\ & + \\sum_{t=-S_{M,N-1}+2M+1}^{S_{M,N-1}} \\left(1-p\\right) \\Pr\\left(T_{M, N-1} = t\\right) H\\left(X \\; | \\; T_{M, N-1} = t\\right) \n\\\\ & + \n\\sum_{t=-M}^{M} \\Pr\\left(T_{M, N} = t\\right) H\\left(X \\; | \\; T_{M, N} = t\\right) \n\\\\ & = \n\\sum_{t=-S_{M,N-1}+2M+1}^{S_{M,N-1}-2M-1} \\Pr\\left(T_{M, N-1} = t\\right) H\\left(X \\; | \\; T_{M, N-1} = t\\right)\n\\\\ & + \n\\sum_{t=-S_{M,N-1}}^{-S_{M,N-1}+2M} p \\Pr\\left(T_{M, N-1} = t\\right) H\\left(X \\; | \\; T_{M, N-1} = t\\right)\n\\\\ & + \n\\sum_{t=S_{M,N-1}-2M}^{S_{M,N-1}} \\left(1-p\\right)\\Pr\\left(T_{M, N-1} = t\\right) H\\left(X \\; | \\; T_{M, N-1} = t\\right)\n\\\\ & + \n\\sum_{t=-M}^{M} \\Pr\\left(T_{M, N} = t\\right) H\\left(X \\; | \\; T_{M, N} = t\\right) \n\\\\ & = \nH\\left(X \\; | \\; T_{M, N-1}\\right) \n\\\\ & - \n\\left(1-p\\right) \\sum_{t=-S_{M,N-1}}^{-S_{M,N-1}+2M} \\Pr\\left(T_{M, N-1} = t\\right) H\\left(X \\; | \\; T_{M, N-1} = t\\right)\n\\\\ & - \np \\sum_{t=S_{M,N-1}-2M}^{S_{M,N-1}} \\Pr\\left(T_{M, N-1} = t\\right) H\\left(X \\; | \\; T_{M, N-1} = t\\right)\n\\\\ & + \n\\sum_{t=-M}^{M} \\Pr\\left(T_{M, N} = t\\right) H\\left(X \\; | \\; T_{M, N} = t\\right)\n\\end{align*}\nwhere the third equality follows from the fact that\n\n\n\\begin{align*} \n& H\\left(X \\; | \\; T_{M, N} = t\\right)\n\\\\ & = \nH\\left(\\Pr\\left(X = 1 \\; | \\; T_{M, N} = t\\right)\\right)\n\\\\ & = \nH\\left(\\frac{\\left(1-p\\right)\\Pr\\left(T_{M, N} = t \\; | \\; X = 1\\right) }{\\Pr\\left(T_{M, N} = t\\right)}\\right)\n\\\\ & = \nH\\left(\\frac{1}{1 + \\frac{p\\Pr\\left(T_{M, N}-X = t +1\\right)}{\\left(1-p\\right)\\Pr\\left(T_{M, N}-X = t -1\\right)}}\\right) \n\\\\ & = \nH\\left(\\frac{1}{1 + \\frac{p^2\\Pr\\left(T_{M, N-1}-X = t +1+2^{N-1}\\right)}{p\\left(1-p\\right)\\Pr\\left(T_{M, N-1}-X = t -1+2^{N-1}\\right)}}\\right) \n\\\\ & = \nH\\left(\\frac{1}{1 + \\frac{p\\Pr\\left(T_{M, N-1}-X = t +1+2^{N-1}\\right)}{\\left(1-p\\right)\\Pr\\left(T_{M, N-1}-X = t -1+2^{N-1}\\right)}}\\right) \n\\\\ & = \nH\\left(\\frac{\\left(1-p\\right)\\Pr\\left(T_{M, N-1} = t + 2^{N-1} \\; | \\; X = 1\\right) }{\\Pr\\left(T_{M, N-1} = t + 2^{N-1}\\right)}\\right)\n\\\\ & =\t \nH\\left(X \\; | \\; T_{M, N-1} = t+ 2^{N-1}\\right)\n\\end{align*}\nif $t < -M,$ and the fact that \n\\begin{align*} \nH\\left(X \\; | \\; T_{M, N} = t\\right) = \tH\\left(X \\; | \\; T_{M, N-1} = t - 2^{N-1}\\right)\n\\end{align*}\nif $t > M.$\n\n\n\n\n\\subsection{Explanation of Algorithm}\n\nLet us analyze the recursion\n\\begin{align*} \n& H\\left(X \\; | \\; T_{M,N}\\right) \n\\\\ & = H\\left(X \\; | \\; T_{M,N-1}\\right) \n\\\\ & - \\left(1-p\\right) \\!\\!\\!\\! \\sum_{t=-S_{M,N-1}}^{-S_{M,N-1}+2M} \\!\\!\\!\\!\\!\\!\\!\\!\\!\\! \\Pr\\left(T_{M,N-1} = t\\right) H\\left(X \\; | \\; T_{M,N-1} = t\\right)\n\\\\ & - \tp \\!\\!\\!\\! \\sum_{t=S_{M,N-1}-2M}^{S_{M,N-1}} \\!\\!\\!\\!\\!\\!\\!\\!\\!\\! \\Pr\\left(T_{M,N-1} = t\\right) H\\left(X \\; | \\; T_{M,N-1} = t\\right)\n\\\\ & + \\sum_{t=-M}^{M} \\Pr\\left(T_{M,N} = t\\right) H\\left(X \\; | \\; T_{M,N} = t\\right). \n\\end{align*}\n\nThe algorithm computes $H\\left(X \\; | \\; T_{M, N-1}\\right)$ starting from $N=2$ up to the desired $N$ value using the recursion above along with the following properties of the summation terms. \n\nObserve that if $N > \\ceil{\\log\\left(2M+2\\right)},$ then\n\\begin{align*}\n& \\sum_{t=-S_{M,N-1}}^{-S_{M,N}+2M} \\Pr\\left(T_{M, N} = t\\right) H\\left(X \\; | \\; T_{M, N} = t\\right)\n\\\\ & = p\\sum_{t=-S_{M,N-1}}^{-S_{M,N-1}+2M} \\Pr\\left(T_{M, N-1} = t\\right) H\\left(X \\; | \\; T_{M, N-1} = t\\right)\n\\end{align*}\nand \n\\begin{align*} \n& \\sum_{t=S_{M,N}-2M}^{S_{M,N}} \\Pr\\left(T_{M, N} = t\\right) H\\left(X \\; | \\; T_{M, N} = t\\right)\n\\\\ & = \n\\left(1-p\\right) \\sum_{t=S_{M,N-1}-2M}^{S_{M,N-1}} \\Pr\\left(T_{M, N-1} = t\\right) H\\left(X \\; | \\; T_{M, N-1} = t\\right).\n\\end{align*}\nThus, both of these summations can be computer recursively once $N > \\ceil{\\log\\left(2M+2\\right)}.$\n\nFinally, observe that \n\\begin{align*} \t\\sum_{t=-M}^{M} \\Pr\\left(T_{M, N} = t\\right) H\\left(X \\; | \\; T_{M, N} = t\\right). \n\\end{align*}\ncan be computed in $O\\left(M\\right)$ time for each value of $N$ if the first $2M+1$ values of the PMF of $ \\sum_{j=1}^M Y_j + \\sum_{k=1}^{N-1} 2^{k-1} Y_{M+k}$ \\left(starting with the smallest support value\\right) and the last $2M+1$ values of the PMF of $ \\sum_{j=1}^M Y_j + \\sum_{k=1}^{N-1} 2^{k-1} Y_{M+k}$ \\left(ending with the largest support value\\right) are available. This computation is shown in Algorithm \\ref{alg:uniform_binary_polytime}, and requires $O\\left(M\\right)$ space and $O\\left(M\\right)$ time. \n\n\nThe algorithm performs the steps above $O\\left(N\\right)$ times. The algorithm must also build the PMF for $\\sum_{j=1}^M Y_j$ which requires $O\\left(M^2\\right)$ time and $O\\left(M\\right)$ space. Therefore, the algorithm runs in $O\\left(N M + M^2\\right)$ time and $O\\left(M\\right)$ space. \n\\subsection{Proof of Lemma \\ref{lem:lower_bound}}\n\n\\begin{IEEEproof}\n\tDefine $S_\\alpha = \\sum_{i=0}^{K} \\alpha_i,$ and let $T_Y$ be the set of support values of $Y = \\alpha_0 X + \\sum_{i=1}^{K} \\alpha_i Z_i.$ Clearly, $S_\\alpha$ is the maximum element in $T_Y.$ We have that \n\t\\begin{align*} \n\t& I(X \\; ; \\; Y) \\\\\n\t& = H(p) - H(X \\; | \\; Y) \\\\\n\t& \\geq H(p) - (1 - p^{K+1} - (1-p)^{K+1}) \\\\\n\t& = H(p) - 1 + p^{K+1} + (1-p)^{K+1}\n\t\\end{align*}\n\twhere the third line follows because\n\t\\begin{align*} \n\t& H(X \\; | \\; Y) \\\\\n\t& = \\sum_{t \\in T_Y} \\Pr(Y = t) H(X \\; | \\; Y = t) \\\\\n\t& = \\Pr(Y = 0) \\cdot H(X \\; | \\; Y = 0) \n\t\\\\ & + \\Pr(Y = S_\\alpha) \\cdot H(X \\; | \\; Y = S_\\alpha) \\\\ \n\t& + \\sum_{t \\in T_Y \\setminus \\{0, S_\\alpha\\}} \\Pr(Y = t) H(X \\; | \\; Y = t) \\\\\n\t& = (1-p)^{K+1} \\cdot 0 + p^{K+1} \\cdot 0 \\\\ \n\t& + \\sum_{t \\in T_Y \\setminus \\{0, S_\\alpha\\}} \\Pr(Y = t) H(X \\; | \\; Y = t) \\\\\n\t& \\leq \\sum_{t \\in T_Y \\setminus \\{0, S_\\alpha\\}} \\Pr(X + \\sum_{i=1}^{K} \\alpha_i Z_i = t) \\\\\n\t& = 1 - p^{K+1} - (1-p)^{K+1}.\n\t\\end{align*}\n\\end{IEEEproof}\n\n\\begin{lemma} \\label{lem:binary_scheme_performance}\n For any $K \\in \\mathbb{N}, p \\in [0, 0.5],$ we have that \n\t\\begin{align*} \n\t& I(X \\; ; \\; X + \\sum_{i=1}^{K} 2^{i-1} Z_i) \\\\ \n\t& = H(p) - \\sum_{i=1}^K (p^i (1-p) + p (1-p)^i) H \\left(\\frac{1}{1 + \\frac{p (1-p)^i}{p^i (1-p)}}\\right).\n\t\\end{align*}\n\\end{lemma}\n\n\n\\begin{IEEEproof}\nWe will prove this by induction on $K \\in \\mathbb{N}.$ Let $p \\in [0, 0.5].$ For $K = 1,$ we have that \n\\begin{align*} \n& I(X \\; ; \\; X + Z_1) \\\\ \n& = H(p) - H(X \\; | \\; X + Z_1) \\\\\n& = H(p) - 2p(1-p)H(0.5)\n\\end{align*}\t\t\nwhich matches the formula.\nAssume the formula holds for the $(K-1)$th case where $K > 1.$ Consider the $K$th case: \n\\begin{align*} \n& I\\left(X \\; ; \\; X + \\sum_{i=1}^{K} 2^{i-1} Z_i\\right) \\\\\n& = H(p) - H\\left(X \\; | \\; X + \\sum_{i=1}^{K} 2^{i-1} Z_i\\right) \\\\\n& = H(p) - (1-p)H\\left(X \\; | \\; X + \\sum_{i=1}^{K-1} 2^{i-1} Z_i\\right) \\\\\n& - p H\\left(X \\; | \\; X + \\sum_{i=1}^{K-1} 2^{i-1} Z_i\\right) \\\\\n& - (p^{K}(1-p) + p(1-p)^{K}) H\\left(\\frac{p^{K} (1-p)}{p^{K} (1-p) + p (1-p)^{K}}\\right) \\\\\n& = H(p) - H\\left(X \\; | \\; X + \\sum_{i=1}^{K-1} 2^{i-1} Z_i\\right) \\\\ \n& - (p^{K}(1-p) + p(1-p)^{K}) H\\left(\\frac{1}{1 + \\frac{p(1-p)^K}{p^K(1-p)}}\\right) \\\\\n& = H(p) - \\sum_{i=1}^K (p^i(1-p) + p(1-p)^i) H\\left(\\frac{1}{1 + \\frac{p(1-p)^i}{p^i(1-p)}}\\right).\n\\end{align*}\n\\end{IEEEproof}\n\n\n\\begin{lemma} \\label{lem:optimality_of_binary}\n\tFor any $K \\in \\mathbb{N},$ the binary scheme is optimal for $p = 0.5.$\n\\end{lemma}\n\n\n\\begin{IEEEproof}\nFor any $K \\in \\mathbb{N}$ and $p = 0.5,$ the performance of the binary scheme is given by\n\\begin{align*}\n\t& I\\left(X \\; ; \\; X + \\sum_{i=1}^{K} 2^{i-1} Z_i\\right) \\\\ \n\t& = H(0.5) - 2\\sum_{i=1}^K (0.5)^{i+1} H(0.5) \\\\\n\t& = 1 - \\sum_{i=1}^K (0.5)^{i} \\\\\n\t& = 1 - \\left(\\frac{1 - (0.5)^{K+1}}{0.5} - 1\\right) \\\\\n\t& = (0.5)^{K} \\\\\n\t& = 1 - 1 + (0.5)^{K+1} + (0.5)^{K+1}\n\\end{align*}\nThus, the performance of the binary scheme matches the lower bound for $p = 0.5.$\n\\end{IEEEproof}\n\n\\begin{lemma} \n\t\\label{lem:uniform_schem_formula}\n\tFor any $K \\in \\mathbb{N}, p \\in [0, 0.5],$ we have that \n\t\\begin{align*} \n\t& I\\left(X \\; ; \\; X + \\sum_{i=1}^K Z_i\\right)\n\t\\\\ & = H(p) - \\sum_{i = 1}^{K} (1-p)^{K+1-i} p^{i} \\binom{K+1}{i} H\\left(\\frac{i}{K+1}\\right)\n\t\\end{align*}\n\\end{lemma}\n\n\\begin{IEEEproof}\n We have that \n \\begin{align*} \n\t& I\\left(X \\; ; \\; X + \\sum_{i=1}^K Z_i\\right) \n\t\\\\ & = H(p) - H\\left(X \\; | \\; X + \\sum_{i=1}^K Z_i\\right) \n\t\\\\ & = H(p) \n\t\\\\ & - \\sum_{t = 0}^{K+1} \\Pr\\left(X + \\sum_{i=1}^K Z_i = t\\right) H\\left(X \\; | \\; X + \\sum_{i=1}^K Z_i = t\\right)\n\t\\end{align*}\n and\n \\[\\Pr\\left(X + \\sum_{i=1}^K Z_i = t\\right) = (1-p)^{K+1-t} p^{t} \\binom{K+1}{t}.\\] Due to Bayes rule, we have \n\t\\begin{align*}\n\t \\Pr\\left(X = 1 \\; | \\; X + \\sum_{i=1}^K Z_i = t\\right) = \\frac{t}{K+1},\n\t\\end{align*}\n and thus, \n\t\\begin{align}\n\t & H\\left(X \\; | \\; X + \\sum_{i=1}^K Z_i = t\\right)\n\t = H\\left(\\frac{t}{K+1}\\right).\n\t\\end{align}\n Thus,\n \\begin{align*} \n\t& I\\left(X \\; ; \\; X + \\sum_{i=1}^K Z_i\\right) \n\t\\\\ & = H(p) \n\t\\\\ & - \\sum_{t = 0}^{K+1} \\Pr\\left(X + \\sum_{i=1}^K Z_i = t\\right) H\\left(X \\; | \\; X + \\sum_{i=1}^K Z_i = t\\right)\n\t\\\\ & = H(p) - \\sum_{t = 0}^{K+1} (1-p)^{K+1-t} p^{t} \\binom{K+1}{t} H\\left(\\frac{t}{K+1}\\right)\n \\\\ & = H(p) - \\sum_{t = 1}^{K} (1-p)^{K+1-t} p^{t} \\binom{K+1}{t} H\\left(\\frac{t}{K+1}\\right).\n\t\\end{align*}\n\\end{IEEEproof}\n\n\\begin{lemma}\n\\label{lem:optimality_of_uniform}\nFor any $K \\in \\mathbb{N},$ there exists some $p^* > 0$ such that the uniform scheme is optimal for $p < p^*.$\n\\end{lemma}\n\n\\begin{IEEEproof}\n Consider the optimization problem (\\ref{eq:main}) with $\\alpha_0$ fixed to $1.$ We can assume this without loss of generality since scaling all $\\alpha_i$s by the same constant does not change the mutual information.\n\tWe have that \n\t\\begin{align*} \n\tI(X \\; ; \\; X + \\sum_{i=1}^K \\alpha_i Z_i ) = H(p) - H(X \\; | \\; X + \\sum_{i=1}^K \\alpha_i Z_i)\n\t\\end{align*} \n\tand\n\t\\begin{align} \\label{eq:cond_ent_sum} \n\t&H(X \\; | \\; X + \\sum_{i=1}^K \\alpha_i Z_i) \\nonumber \\\\\n\t& = \\sum_{t \\in \\mathbb{N}_0} \\Pr(X + Z_\\alpha = t) H(X \\; | \\; X + Z_\\alpha = t)\n\t\\end{align} \n\tObserve that as $p \\to 0,$ we have that $H(p) \\to 0,$ and $H(X \\; | \\; X + \\sum_{i=1}^K \\alpha_i Z_i) \\to 0$ since $0 \\leq H(X \\; | \\; X + \\sum_{i=1}^K \\alpha_i Z_i) \\leq H(p).$ Thus, $I(X \\; ; \\; X + \\sum_{i=1}^K \\alpha_i Z_i ) \\to 0$ as $p \\to 0.$ We will show that each nonzero term in (\\ref{eq:cond_ent_sum}) decreases like $O(p),$ except the $t=1$ term which decreases like $\\Theta(p)$ if there exists some $\\alpha_i = 1$ for $i \\geq 1.$ Therefore, we will show that the uniform scheme maximizes the coefficient in the asymptotic expression for the $t=1$ term, proving that the uniform scheme is optimal as $p \\to 0.$ \n\t\n\tFor any $t$ such that $0 \\leq t < 1,$ we have that $H(X \\; | \\; X + Z_\\alpha = t) = 0$ because if $X = 1,$ then $X+Z_\\alpha \\geq 1.$\n\t\n\tNext, consider the term $\\Pr(X + Z_\\alpha = 1) H(X \\; | \\; X + Z_\\alpha = 1).$ Observe that \n\t\\begin{align*} & \\Pr(X + Z_\\alpha = 1) \\\\& = (1-p) \\Pr(Z_\\alpha = 1) \\\\ & + p \\Pr(Z_\\alpha = 0) \n\t\\end{align*}\n\tand\n\t\\begin{align*} \n\t&\\Pr(Z_\\alpha = 1) = p (1-p)^{K-1}|\\{i : \\alpha_i = 0\\}| + O(p^2), \\\\\n\t& \\Pr(Z_\\alpha = 0) = (1-p)^K,\n\t\\end{align*}\n\twhere the $O(p^2)$ term in expression for $\\Pr(Z_\\alpha = 1)$ follows because it is possible for 2 or more $Z_i$s to equal 1 such that the corresponding $\\alpha_i$s add up to $1$.\tIf $|\\{i : \\alpha_i = 0\\}| \\geq 1,$ we have that \n\t\\begin{align} \n\t&\\Pr(X+Z_\\alpha = 1) H(X \\; | \\; X+Z_\\alpha = 1) \\nonumber \\\\\n\t& = (p (1-p)^{K}(1 + |\\{i : \\alpha_i = 1\\}|) + o(p)) \n\t\\nonumber \\\\& \\times H(X \\; | \\; X+Z_\\alpha = 1) \\nonumber \\\\\n\t& = (p (1-p)^{K}(|\\{i : \\alpha_i = 1\\}|) + o(p)) \\nonumber \\\\\n\t& \\times \\log\\left(\\frac{p (1-p)^{K}(1+|\\{i : \\alpha_i = 1\\}|)+ o(p)}{p (1-p)^{K}|\\{i : \\alpha_i = 1\\}| + o(p)} \\right) \\nonumber \\\\ \n\t& + p (1-p)^{K} \\log\\left(\\frac{p (1-p)^{K}(1+|\\{i : \\alpha_i = 1\\}|)+ o(p)}{p (1-p)^{K}}\\right) \\nonumber \\\\\n\t& = (p (1-p)^{K}(|\\{i : \\alpha_i = 1\\}|) + o(p)) \\nonumber \\\\\n\t& \\times \\log\\left(\\frac{(1+|\\{i : \\alpha_i = 1\\}|)+ o(1)}{|\\{i : \\alpha_i = 1\\}| + o(1)} \\right) \\nonumber \\\\ \n\t& + p (1-p)^{K} \\log((1+|\\{i : \\alpha_i = 1\\}|)+ o(1)) \\nonumber \\\\\n\t& \\sim p \\bigg(|\\{i : \\alpha_i = 1\\}|\\log \\left(1 + \\frac{1}{|\\{i : \\alpha_i = 1\\}|}\\right) \\nonumber \\\\\n\t& + \\log(1+|\\{i : \\alpha_i = 1\\}|)\\bigg)\n\t\\end{align}\n\tThe coefficient of $p$ in the asymptotic expression for $\\Pr(T(K) = 1) H(X \\; | \\; T(K) = 1)$ above is maximized when $|\\{i : \\alpha_i = 1\\}| = K.$ This is because \n\t\\begin{align*} \n\t\\frac{d}{d x} \\left(x \\log \\left(1 + \\frac{1}{x}\\right) + \\log(1 + x)\\right) > 0\n\t\\end{align*} \n\tfor all $x > 0.$\n\t\tIn contrast, suppose that $|\\{i : \\alpha_i = 0\\}| = 0.$ Then, we have that \n \\begin{align} \n\t&\\Pr(Z_\\alpha = 1) \\nonumber \\\\\n\t& = b (1-p)^{K-a} p^a + o(p^a)\t\\end{align}\n\tfor some $a \\in \\mathbb{N}$ such that $a \\geq 2,$ and therefore, \n\t\\begin{align} \n\t&\\Pr(X+Z_\\alpha = 1) H(X \\; | \\; X+Z_\\alpha = 1) \\nonumber \\\\\n\t& = (p(1-p)^K +b (1-p)^{K+1-a} p^a + o(p^a)) \\nonumber \\\\ & \\times H(X \\; | \\; X+Z_\\alpha = 1) \\nonumber \\\\\n\t& = (b (1-p)^{K+1-a} p^a + o(p^a)) \\nonumber \\\\ & \\times \\log\\left(\\frac{(p(1-p)^K +b (1-p)^{K+1-a} p^a + o(p^a))}{b (1-p)^{K+1-a} p^a + o(p^a))} \\right) \\nonumber \\\\ \n\t& + p (1-p)^{K} \\log\\left(\\frac{(p(1-p)^K +b (1-p)^{K+1-a} p^a + o(p^a))}{p (1-p)^{K}}\\right) \\nonumber \\\\\n\t& = (b (1-p)^{K+1-a} p^a + o(p^a)) \\log\\left(1 + \\frac{(1-p)^{a-1}}{b p^{a-1} + o(p^{a-1}))} \\right) \\nonumber \\\\\n\t& + p (1-p)^{K} \\log\\left((1 +b (1-p)^{1-a} p^{a-1} + o(p^{a-1}))\\right) \\nonumber \\\\\n\t& \\sim b p^a \\log\\left( \\frac{1}{b p^{a-1}} \\right) + p (1-p)^{K} \\frac{b p^{a-1} }{\\ln(2)} \\nonumber \\\\\n\t& = o(p).\n\t\\end{align}\n\t\n\t\n\tNow consider any $t>1.$ For such a value of $t,$ we have that \\begin{align*} \\Pr(Z_\\alpha = t-1) = d (1-p)^{K-c} p^c + o(p^c)\n\t\\end{align*} \n\tfor some $c \\in \\mathbb{N}, \\; c \\geq 1$ and $d \\in \\mathbb{N}$ because there must be at least one $i \\in [K]$ such that $Z_i=1$ in this case. Furthermore, we have \n\t\\begin{align*} \n\t\\Pr(Z_\\alpha = t) = b (1-p)^{K-a} p^a + o(p^a)\n\t\\end{align*} \n\tfor some $a \\in \\mathbb{N}, \\; a \\geq 1$ and $b \\in \\mathbb{N}$ because there must be at least one $i \\in [K]$ such that $Z_i=1$ in this case. Thus, \n\t\\begin{align}\n\t\\label{eq:entropy_term_t_bigger_1}\n\t&\\Pr(X + Z_\\alpha = t) H(X \\; | \\; X + Z_\\alpha = t) \\nonumber \\\\\n\t&= (d (1-p)^{K-c+1} p^{c} + o(p^{c})) \\nonumber \\\\\n\t& \\times \\log(1 + \\frac{ b (1-p)^{K-a} p^{a+1} ) + o(p^{a+1})}{d (1-p)^{K-c+1} p^{c} + o(p^{c})}) \\nonumber \\\\\n\t& + (b (1-p)^{K-a} p^{a+1} + o(p^{a+1})) \\nonumber \\\\\n\t& \\times \\log(1 + \\frac{d (1-p)^{K-c+1} p^{c} + o(p^{c})}{b (1-p)^{K-a} p^{a+1} + o(p^{a+1})}) \\nonumber \\\\\n\t& = o(p).\n\t\\end{align}\n Thus, the only term in (\\ref{eq:cond_ent_sum}) that decays like $\\Theta(p)$ as $p \\to 0$ is the $t=1$ term when there is at least one $i \\in [K]$ such that $\\alpha_i = 1$. Furthermore, the uniform scheme maximizes the coefficient for this term.\n Thus, there exists some $p^* > 0$ such that the uniform scheme is optimal for $p < p^*.$\n\t\n\tWe now justify the last line in (\\ref{eq:entropy_term_t_bigger_1}). \n\tConsider the first term above in the second line of (\\ref{eq:entropy_term_t_bigger_1}). If $c < a+1,$ it follows that \n\t\\begin{align*}\n\t&(d (1-p)^{K-c+1} p^{c} + o(p^{c})) \\\\\n\t& \\times \\log\\left(1 + \\frac{ b (1-p)^{K-a} p^{a+1} ) + o(p^{a+1})}{d (1-p)^{K-c+1} p^{c} + o(p^{c})}\\right) \\\\\n\t& = (d (1-p)^{K-c+1} p^{c} + o(p^{c})) \\\\\n\t&\\times \\log\\left(1 + \\frac{ b (1-p)^{K-a} p^{a+1-c} + o(p^{a+1-c})}{d (1-p)^{K-c+1} + o(1)}\\right) \\\\\n\t& \\sim (d (1-p)^{K-c+1} p^{c} + o(p^{c})) \\\\ \n\t& \\times \\frac{ b (1-p)^{K-a} p^{a+1-c} + o(p^{a+1-c})}{\\ln(2)(d (1-p)^{K-c+1} + o(1))} \\\\\n\t& = o(p).\n\t\\end{align*}\n\tIf $c \\geq a+1,$ it follows that \n\t\\begin{align*}\n\t& (d (1-p)^{K-c+1} p^{c} + o(p^{c})) \\\\\n\t& \\times \\log\\left(1 + \\frac{ b (1-p)^{K-a} p^{a+1} + o(p^{a+1})}{d (1-p)^{K-c+1} p^{c} + o(p^{c})}\\right)\\\\\n\t& \\leq (d (1-p)^{K-c+1} p^{c} + o(p^{c})) \\\\\n\t& \\times \\log\\left(\\frac{1}{d (1-p)^{K-c+1} p^{c} + o(p^{c})}\\right)\\\\\n\t& \\leq (d (1-p)^{K-c+1} p^{c} + o(p^{c}))\\log\\left(\\frac{1}{(1-p)^{K} p^{K}}\\right)\\\\\n\t& = K (d (1-p)^{K-c+1} p^{c} + o(p^{c})) \\\\\n\t& \\times \\log\\left(\\frac{1}{p(1-p)}\\right)\\\\\n\t& = o(p).\n\t\\end{align*}\n\tFinally, consider the second term. It follows that\n\t\\begin{align*}\t\n\t& (b (1-p)^{K-a} p^{a+1} + o(p^{a+1})) \\\\\n\t& \\times \\log\\left(1 + \\frac{d (1-p)^{K-c+1} p^{c} + o(p^{c})}{b (1-p)^{K-a} p^{a+1} + o(p^{a+1})}\\right) \\\\\n\t& \\leq (b (1-p)^{K-a} p^{a+1} + o(p^{a+1})) \\\\\n\t& \\times \\log\\left(\\frac{1}{b (1-p)^{K-a} p^{a+1} + o(p^{a+1})}\\right) \\\\\n\t& \\leq (b (1-p)^{K-a} p^{a+1} + o(p^{a+1})) \\\\\n\t& \\times \\log\\left(\\frac{1}{ (1-p)^{K} p^{K} }\\right) \\\\\n\t& = K(b (1-p)^{K-a} p^{a+1} + o(p^{a+1})) \\\\\n\t& \\times \\log\\left(\\frac{1}{ p (1-p) }\\right) \\\\\n\t& = o(p).\n\t\\end{align*}\n\\end{IEEEproof}\n\n\\longversion{\n\\subsection{Explicit computation of (\\ref{eq:closedform})}\n\\iscomment{add this in long version}\n}\t\n\t\n\n\n\n\n\\section{Concluding Remarks}\n\n\n\n\n\\iscomment{we should probably remove this section. it feels out of place out of so much discussion}\n\nWe derived a lower bound on the best possible mutual information achievable by a private DNA sequencing scheme, and showed that it is tight by comparing it with the scheme generated by a greedy algorithm. Directions for follow-up work include considering multiple, possibly correlated, genetic loci simultaneously, and characterizing the optimal tradeoffs between privacy and the ability to correctly recover the genotype $X$ from the data received from the lab.\n\\section{Discussion of Model Assumptions} \n\\label{sec:discussion}\n\nThe model proposed in this paper represents an initial attempt at studying the problem of providing privacy to genetic information through the physical mixing of samples prior to sequencing.\nIn this section we provide additional discussion and motivation for some of the modeling assumptions made.\n\n\n\n\nWhile the idea of achieving privacy by forcing the sequencing lab to sequence a mixture of DNA samples may seem odd at first, to a certain extent, it already occurs in standard DNA sequencing pipelines.\nA person's DNA is made up of two copies of each chromosome: a paternal chromosome and a maternal chromosome.\nHence, an individual's DNA can be thought of as a 50-50 mixture of the DNA of two unrelated individuals: the father and the mother.\nOnce a person's DNA is sequenced using next-generation sequencing technologies, the sequencing reads are aligned to the human reference genome, and genetic variants are identified.\nThese reads are equally likely to have come from the paternal and maternal chromosomes.\nIn Figure~\\ref{fig:seqdata}, we show the alignment of Illumina sequencing data from one individual to the BRCA2 gene (in chromosome 13) \\cite{brca2}.\nIn the highlighted position, out of the eight reads that cover it, four have the reference allele and four have the alternative allele.\nThis can be seen as a real-life illustration of the privacy strategy discussed in our paper, since it is not possible to know whether it is the person's father or mother that has the minor allele.\nIf the DNA samples of $K$ individuals were mixed, the picture will be similar to the one in Figure~\\ref{fig:seqdata}, except that the minor allele counts will correspond to the mixture of $2K$ chromosomes.\n\n\n\\begin{figure}[b]\n\\centering\n\t\\includegraphics[width=\\linewidth]{data\/SNP_example_image.png}\n\t\\caption{\nAlignment of Illumina sequencing data from one individual to the BRCA2 gene (in chromosome 13) on the human reference genome, visualized with the Integrative Genome Viewer \\cite{igv}.\nIn the dashed column ($\\sim$13,906,980), out of the eight reads that cover it, four have the reference allele and four have the alternative allele.\t\nGiven this data, one cannot infer whether it is the individual's father or mother chromosome that carries the minor allele.\n \n\t}\n\t\\label{fig:seqdata}\n\\end{figure}\n\n\nOne drawback of the model we study (and also the model in \\cite{Maddah-Ali}) is that the allele at different loci are implicitly assumed to be independent.\nIn reality, genetic variants that are close in the genome are more likely to be inherited together, leading to what is known as \\emph{linkage disequilibrium} \\cite{Slatkin}.\nThis creates dependence across different the alleles at different loci, which tends to reduce the privacy at any given locus.\nOne way to deal with this would be to extend \nour problem setup to simultaneously consider the privacy of a group of nearby correlated locations.\n\n\nOur problem setup also relies on the fact that the locations that admit more than one allele are observed in separate reads. \nIn the context of shotgun sequencing, this is equivalent to the locations being far enough apart in the genome that a single read (150bp for standard Illumina platforms) cannot simultaneously cover two loci. \nThis is often the case, as the number of single-nucleotide polymorphisms (SNPs) analyzed by standard genomic services is around one million, while the length of the human genome is roughly 3 billion.\nFurthermore, standard direct-to-consumer genomic services utilize SNP arrays for sequencing, which essentially probe specific SNPs the genome, rather than obtaining shotgun sequencing reads that could simultaneously cover multiple variants \\cite{imai2011concordance}.\n\n\n\n\n\n\n\n\nIt should be noted that our current results do not take into account the noise from the sequencing machine itself.\nMoreover, by assuming that the observation $Y$ is the precise proportion of the minor allele we are essentially assuming that a very high coverage is used for sequencing.\nTo make the setup more realistic, a noise term should be added to the observation $Y$.\nAnother practical challenge is that Alice would not be able to choose the proportions $\\alpha_0, \\alpha_1, ..., \\alpha_K$ with arbitrary precision.\nThe inclusion of noise terms in the observation $Y$ would also mitigate the unrealistic nature of this assumption.\n\n\n\n\n\n\\section{Efficiently Computable Schemes}\n\nTheorem~\\ref{thm:main_lower_bound} provides a lower bound to the \nmutual information between $X$ and the lab's observation $Y$ for any mixing coefficients $\\alpha_0,...,\\alpha_K$, thus providing a bound to the privacy levels that can be achieved.\nHowever, \n\n\n\\rdcomment{this may sound stupid, but i think it warrants a sentence here. `Theorem 1 gives us a lower bound on the mutual information between $X$ and ..., which would be the mixing ratio that provides the highest level of privacy. However, it does not provide any mixing scheme for approaching this lower bound.'}\n\nWhile much faster than brute force search, the greedy algorithm used to obtain the upper bound requires $\\Omega(2^K)$ time in the worst case. At a high level, this is due to the fact that at the $j$th step in the algorithm, computation of $I(X \\; ; \\; X + \\alpha Z_{j} + \\sum_{i = 1}^{j-1} \\alpha_i Z_i)$ requires use of each support value in the pmf of $\\alpha Z_{j} + \\sum_{i = 1}^{j-1} \\alpha_i Z_i,$ and there are $\\Omega(2^j)$ support values in the worst case. This motivates the design of efficiently computable schemes, which can then be evaluated using the lower bound in Theorem \\ref{thm:main_lower_bound}.\n\nPerhaps the most natural approach is given by the uniform scheme, which we define to set $\\alpha_i = 1$ for all $i \\in [K]\\cup\\{0\\}.$ This is the scheme used by the authors of \\cite{Maddah-Ali} to obtain their results. The scheme is explicitly defined given $K,$ so no algorithm is needed to compute it. The performance of the scheme for a given $p$ and $K$ can be computed in $O(K)$ time using the formula in Lemma \\ref{lem:uniform_schem_formula} which is given in the appendix.\n\nThe uniform scheme is important because it is optimal as $p \\to 0$ as proved in Lemma \\ref{lem:optimality_of_uniform}\nand exemplified by the optimal curve in Figure \\ref{fig:optimal_scheme_K5_minor_allele}.\n\n\\begin{lemma}\n\\label{lem:optimality_of_uniform}\nFor any $K \\in \\mathbb{N},$ there exists some $p^* > 0$ such that the uniform scheme is optimal for $p < p^*.$\n\\end{lemma}\n\n\nAt $p= 0.5,$ in Figure \\ref{fig:optimal_scheme_K5_minor_allele}, the optimal solution is given by the binary scheme, which we define to set $\\alpha_0 = 1$ and $\\alpha_i = 2^{i-1}$ for all $i \\in [K].$ Similar to the uniform scheme, the binary scheme is explicitly defined given $K.$ Also, similar to the uniform scheme, there exists a formula for computing its performance given $p$ and $K$ in $O(K)$ time. This formula is given in Lemma \\ref{lem:binary_scheme_performance} in the appendix. It is interesting that such a formula exists because the pmf of $\\sum_{i=1}^K 2^{i-1} Z_i$ has $2^K$ support values. \n\nMost importantly, the optimality of the binary scheme at $p = 0.5$ for $K=5$ shown in Figure \\ref{fig:optimal_scheme_K5_minor_allele} generalizes to all $K$ as proved in Lemma \\ref{lem:optimality_of_binary}.\n\n\\begin{lemma} \\label{lem:optimality_of_binary}\n\tFor any $K \\in \\mathbb{N},$ the binary scheme is optimal for $p = 0.5.$\n\\end{lemma}\n\nBecause the uniform scheme is optimal for $p \\to 0$ and the binary scheme is optimal for $p = 0.5,$ it is natural to try and combine these schemes to interpolate the performance for $p$ in the range $0 0, \\; \\sum_{i=0}^K \\alpha_i = 1} \nI\\left( X; \\alpha_0X+Z_\\alpha\\right),\n\\label{eq:main}\n}\ni.e., choose the mixing coefficients $\\alpha_0,...,\\alpha_K$ to minimize the mutual information between $X$ and the lab's observation. Here, $\\alpha>0 $ means that all entries of $\\alpha$ are positive.\nWe discuss the connection between our problem formulation and the work in \\cite{Maddah-Ali, Maddah-Ali2} in more detail in Section~\\ref{sec:related}.\n\n\n\nThe problem in (\\ref{eq:main}) is equivalent to maximizing the conditional entropy $H(X|Y)$; i.e., the residual uncertainty in $X$ after observing $Y = \\alpha_0 X+Z_\\alpha$, and can thus be understood as maximizing the privacy of $X$.\nIt can also be thought of as the problem of finding a worst-case noise $Z_\\alpha$ among those of the form $\\sum_k \\alpha_k Z_k$, with $Z_k$ being i.i.d.~${\\rm Ber}(p)$.\nOur main result is that the solution to (\\ref{eq:main}) is \nlower-bounded as\n\\alpha{\n\\min_{\\alpha \\in \\mathbb{R}^{K+1} : \\; \\alpha > 0, \\; \\sum_{i=0}^K \\alpha_i = 1} I\\left(X;\\alpha_0 X + Z_\\alpha\\right)\n\\geq I\\left(X;X + G\\right),\n\\label{eq:lower}\n}\nwhere $G \\sim {\\rm Geom}\\left( (1-p)^K \\right)$ and $G$ is independent of $X$.\nThe right-hand side of (\\ref{eq:lower}) can be computed explicitly as a function of $p$ and $K$.\nMoreover, we verify empirically that this lower bound is very close to an upper bound provided by a greedy algorithm that sets $\\alpha_0 = 1$ and selects $\\alpha_1,\\alpha_2,...,\\alpha_K$ sequentially to minimize the resulting mutual information, establishing $I\\left(X;X + G\\right)$ as a good approximation to the solution of (\\ref{eq:main}).\nWe derive (\\ref{eq:lower}) via a convex relaxation of (\\ref{eq:main}), and \nuse KKT conditions to show the lower bound.\n\nWe note that some of the assumptions in our problem setup such as Alice choosing $\\alpha_i$s exactly are not very realistic, and we discuss these assumptions in Section VII.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\\section{Introduction}\n\n\nAdvances in DNA sequencing technologies have led to the generation of human genetic data at an unprecedented rate \\cite{astronomical}. \nThis offers exciting prospects for biomedical research, and recent studies have leveraged the genetic data of hundreds of thousands of individuals to identify genetic markers associated with many traits and diseases \\cite{cad,diabetes,alzheimer,cirulli_rare}.\n\n\nGenetic testing for disease predisposition \\cite{diseaserisk} and popular direct-to-consumer genomics services \\cite{directtoconsumer,directtoconsumer2} can provide us with important and actionable information about our health. \nHowever, these services require the submission of a blood or saliva sample, making an individual's entire DNA available to the testing center.\nThis raises significant privacy concerns surrounding genetic data \\cite{erlich}, particularly regarding the potential use of this information by insurance companies \\cite{23andmepharma}.\n\n\nGiven the potential privacy risks of DNA sequencing, an important question is whether it is possible to alter a physical DNA sample prior to submitting it to a laboratory, in order to ``hide'' some of its genetic information.\nOne possible way to alter a sample could be to \n\\emph{mix} it with the DNA of other individuals.\nUpon sequencing, the lab would then observe \na mixture of \nthe data from the different samples, \nwhich would hinder its ability to retrieve individual genetic variants.\n\nThe general idea of mixing samples to attain genetic privacy\nwas proposed in \\cite{Maddah-Ali} (and later extended in \\cite{Maddah-Ali2}).\nSuppose Alice wants to have her DNA\nsequenced and has at her disposal the\nDNA samples of $K$ other people\nwho \\emph{already know} their DNA sequence.\nAlice can then mix all $K+1$ DNA samples and send them to the sequencing lab.\nFrom the lab's perspective, the DNA of the $K$ additional individuals plays the role of noise, impairing the lab's ability to recover Alice's DNA.\nHowever, upon receiving the sequencing data back, the contribution of the ``noise individuals'' can be removed, and Alice can recover her DNA sequence information.\n\\iscomment{need to mention figure}\n\n\n\nMotivated by this idea, \nwe study how to \\emph{optimally mix} DNA samples in order to maximize the privacy achieved.\nWe focus on a single \\emph{biallelic site} $s$ on the genome; i.e., a location on the human genome that admits two possible alleles\\footnote{\\rdcomment{this is a big part of justifying the model, so i prefer it get more discussion; perhaps even promote it out of a footnote. proposed change: Many mutations and disease indicators can be identified by a presence\/absence of a single allele. For example, TODO. In future work, we hope to generalize these results to sequences of coupled biallelic sites.}This could model, for example, the presence of the mutation on the BRCA2 gene that increases the likelihood of breast cancer \\cite{brca}}, and can thus be modeled as a single variable $X \\in \\{-1,+1\\}$.\nIn order to hide her value of $X$ from the sequencing lab, Alice mixes into her sample the samples of $K$ individuals with amounts $\\alpha_1,...,\\alpha_K$.\nWe model the lab's observation of site $s$ as\n$Y = X + \\sum_{i=1}^K \\alpha_i Z_i$,\nwhere $Z_i \\in \\{-1,1\\}$ is the allele value of the $i$th noise individual.\nThis is motivated by the fact that, if the lab uses shotgun sequencing technologies \\cite{illumina}, each reading of site $s$ is effectively a ${\\rm Ber}\\left(Y\/(1+\\sum \\alpha_i)\\right)$ random variable \\iscomment{only if $X \\in \\{0,1\\}$}.\nVia repeated readings of $s$, $Y$ can thus be obtained with arbitrary accuracy, so we assume for simplicity that $Y$ is observed exactly.\nWe refer to the long version of this manuscript \\cite{privateDNAlong} for a more detailed discussion on this model.\n\n\n\n\n\n\nFollowing \\cite{Maddah-Ali}, we model $X,Z_1,...,Z_K$ as i.i.d.~random \nvariables with $\\Pr(X=1) = p \\in [0,0.5]$.\nWe refer to $p$ as the minor allele frequency, a parameter that is known in practice for genetic loci of interest.\nAs in \\cite{Maddah-Ali}, \nwe utilize the mutual information as our privacy metric.\nOur goal is to solve \n\\alpha{\n\\min_{\\alpha_1,...,\\alpha_K} I\\left(X;X+\\sum_{k=1}^K \\alpha_k Z_k\\right),\n\\label{eq:main}\n}\ni.e., choose the mixing coefficients $\\alpha_1,...,\\alpha_K \\geq 0$ to minimize the mutual information between $X$ and the lab's observation.\n\n\\rdcomment{i would like more privacy interpretation here. for example, comment on the adversary's information recovery problem. in particular, as a reader, i imagine many questions. \"are the alphas known by the adversary?\" we don't need to go into to much detail, but here is where we would usually place some discussion on privacy and the adversary}\n\n\\rdcomment{re: worst case distribution. same note as in the intro: let's connect it to privacy here again}\n\nThis problem can be thought of as characterizing a worst-case noise distribution among those of the form $Z = \\sum_k \\alpha_k Z_k$, with $Z_k$ being i.i.d.~${\\rm Ber}(p)$.\nOur main result is that the solution to (\\ref{eq:main}) is well approximated as \n\\alpha{\n\\min_{\\alpha_1,...,\\alpha_K} I\\left(X;X + Z\\right)\n\\approx I\\left(X;X - 2G\\right),\n}\nwhere $G \\sim {\\rm Geom}\\left( (1-p)^K \\right)$ and $G$ is independent of $X$.\nThis result is surprising given that it is not possible in general to choose $\\alpha_1,...,\\alpha_K$ so that $Z_\\alpha \\sim {\\rm Geom}\\left( (1-p)^K \\right)$.\n\n\\rdcomment{when i read this part, the emotions i feel are skeptical, unsure, confused, and, as a result, mildly frustrated. it's not too big of a deal, but i think the \"approx\" part feels too vague to me at this point, so i prefer some foreshadowing. (in particular, you run the risk of a reader making up one set of expectations and being irrationally unhappy when these expectations are not met. my personal advice for writing these things is to always be calibrating and controlling the reader's expectations) proposed change: more details as to exactly what approx means, e.g.we say approx in the sense that the this mutual information serves as a lower bound and is very very close to an upper bound. (can we quantify how close it is to the upper bound?) maybe even say \"a more formal statement of this is in Theorem 1\" or something.}\n\nTo establish this result, we introduce a continuous (and convex) relaxation of (\\ref{eq:main}), and formally show that $I\\left(X;X - 2G\\right)$ is a lower bound to the discrete optimization problem.\nThis lower bound is then empirically shown to be very close a greedy solution to (\\ref{eq:main}), which chooses $\\alpha_1,...,\\alpha_K$ sequentially, to minimize the resulting mutual information.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\\begin{figure}\\label{fig:overview}\n\t\\centering\n\t\\includegraphics[width=0.75\\linewidth]{Overview}\n\t\\caption{A high level diagram of the problem \\rdcomment{my personal style is that the caption describes the figure in a fashion that it can be understood without reading the text in the body. i am not sure if IS agrees with this though. either way, i think this caption needs a little polishing.}}\n\t\\label{fig:Overview}\n\\end{figure}\n\\section{Introduction}\n\n\nAdvances in DNA sequencing technologies have led to the generation of human genetic data at an unprecedented rate \\cite{astronomical}. \nThis offers exciting prospects for biomedical research, and recent studies have leveraged the genetic data of hundreds of thousands of individuals to identify genetic markers associated with many traits and diseases \\cite{cad,diabetes,alzheimer,cirulli_rare}.\n\n\nGenetic testing for disease predisposition \\cite{diseaserisk} and popular direct-to-consumer genomics services \\cite{directtoconsumer,directtoconsumer2} can provide us with important and actionable information about our health. \nHowever, these services require the submission of a blood or saliva sample, making an individual's entire DNA available to the testing center.\nThis raises significant privacy concerns surrounding genetic data \\cite{erlich}, particularly regarding the potential use of this information by insurance companies \\cite{23andmepharma}.\n\n\nGiven the potential privacy risks of DNA sequencing, an important question is whether it is possible to alter a physical DNA sample prior to submitting it to a laboratory, in order to ``hide'' some of its genetic information.\nOne possible way to alter a sample could be to \n\\emph{mix} it with the DNA of other individuals.\nUpon sequencing, the lab would then observe \na mixture of \nthe data from the different samples, \nwhich would hinder its ability to retrieve individual genetic variants.\n\nThe general idea of mixing samples to attain genetic privacy\nwas proposed in \\cite{Maddah-Ali} (and later extended in \\cite{Maddah-Ali2}).\nSuppose Alice wants to have her DNA\nsequenced and has at her disposal the\nDNA samples of $K$ other people\nwho \\emph{already know} their DNA sequence.\nAlice can then mix all $K+1$ DNA samples and send them to the sequencing lab.\nFrom the lab's perspective, the DNA of the $K$ additional individuals plays the role of noise, impairing the lab's ability to recover Alice's DNA.\nHowever, upon receiving the sequencing data back, the contribution of the ``noise individuals'' can be removed, and Alice can recover her DNA sequence information.\nThis approach is illustrated in Figure~\\ref{fig:Overview}.\n\n\n\nMotivated by this idea, \nwe study how to \\emph{optimally mix} DNA samples in order to maximize the privacy achieved.\nWe focus on a single \\emph{biallelic site} $s$ on the genome; i.e., a location on the human genome that admits two possible alleles, and can thus be modeled as a single variable $X \\in \\{0,1\\}$.\nThis could model, for example, the presence of the mutation on the BRCA2 gene that increases the likelihood of breast cancer \\cite{brca} and many other disease genetic markers.\n\\iscomment{mention diploid in long}\n\n\n\n\\begin{figure}[b]\n\t\\centering\n\t\\includegraphics[width=0.77\\linewidth]{data\/alice_fig.pdf}\n\t\\caption{\n\tIn order to hide her genotype $X$ at a given locus $s$, Alice mixes her DNA sample with that of $K$ individuals in amounts $\\alpha_1,...,\\alpha_K$.\n\tUpon receiving the sequencing data from the lab, Alice can remove the contribution from the ``noise individuals'' (whose genotype at $s$ is known) to recover $X$.\n\t}\n\t\\label{fig:Overview}\n\\end{figure}\n\n\nIn order to hide her genotype $X$ from the sequencing lab, Alice mixes into her sample the samples of $K$ individuals using proportions $\\alpha_0,\\alpha_1,...,\\alpha_K$, where $\\sum_{i=0}^K \\alpha_i = 1$.\nWe model the lab's observation of site $s$ as\n$Y = \\alpha_0 X + \\sum_{i=1}^K \\alpha_i Z_i$,\nwhere \n$Z_i \\in \\{0,1\\}$ is the allele value of the $i$th noise individual.\nThis is motivated by the fact that, if the lab uses shotgun sequencing technologies \\cite{illumina}, each reading of site $s$ is effectively a ${\\rm Ber}\\left(\\alpha_0 X + \\sum_{i=1}^K \\alpha_i Z_i\\right)$ random variable.\nVia repeated readings of $s$, $Y$ can thus be obtained with arbitrary accuracy, so we assume for simplicity that $Y$ is observed exactly.\nWe refer to the long version of this manuscript \\cite{privateDNAlong} for a more detailed discussion on this model.\n\n\n\n\n\n\nFollowing \\cite{Maddah-Ali}, we model $X,Z_1,...,Z_K$ as i.i.d.~random \nvariables with $\\Pr(X=0) = p \\in [0,0.5]$.\n\\kmcomment{I changed this to $\\Pr(X=0) = p $ since we have been using $0$ as minor allele}\nWe refer to $p$ as the minor allele frequency, a parameter that is known in practice for genetic loci of interest.\nAs in \\cite{Maddah-Ali}, \nwe utilize the mutual information as our privacy metric.\nIf we let $\\alpha = (\\alpha_0,...,\\alpha_K)$ and $Z_\\alpha = \\sum_{k=1}^K \\alpha_k Z_k$, our goal is thus to solve\n\\alpha{\n\\min_{\\alpha} \nI\\left( X; \\alpha_0X+Z_\\alpha\\right),\n\\label{eq:main}\n}\ni.e., choose the mixing coefficients $\\alpha_0,...,\\alpha_K$ to minimize the mutual information between $X$ and the lab's observation. \n\n\nThe problem in (\\ref{eq:main}) is equivalent to maximizing the conditional entropy $H(X|Y)$; i.e., the residual uncertainty in $X$ after observing $Y = \\alpha_0 X+Z_\\alpha$, and can thus be understood as maximizing the privacy of $X$.\nIt can also be thought of as the problem of finding a worst-case noise $Z_\\alpha$ among those of the form $\\sum_k \\alpha_k Z_k$, with $Z_k$ being i.i.d.~${\\rm Ber}(p)$.\nOur main result is that the solution to (\\ref{eq:main}) is \nlower-bounded as\n\\alpha{\n\\min_{\\alpha} I\\left(X;\\alpha_0 X + Z_\\alpha\\right)\n\\geq I\\left(X;X - G\\right),\n\\label{eq:lower}\n}\nwhere $G \\sim {\\rm Geom}\\left( (1-p)^K \\right)$ and $G$ is independent of $X$.\nThe right-hand side of (\\ref{eq:lower}) can be computed explicitly as a function of $p$ and $K$.\nMoreover, we verify empirically that this lower bound is very close to an upper bound provided by a greedy algorithm that selects $\\alpha_1,\\alpha_2,...,\\alpha_K$ sequentially to minimize the resulting mutual information, establishing $I\\left(X;X - G\\right)$ as a good approximation to the solution of (\\ref{eq:main}).\nWe derive our lower bound via a convex relaxation of (\\ref{eq:main}), and \nuse the KKT conditions to show that its solution is given by\n$I\\left(X;X - G\\right)$.\n\\kmcomment{I guess we don't formally show that the solution to the infinite dimensional relaxation is $- G$}\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\\section{Relationship with worst-case noise}\n\nWhen is geometric worst-case noise from capacity standpoint?\nFor a fixed channel standpoint?\n}\n\n\n\n\n\\section{Auxiliary Lemmas} \\label{sec:lemmas}\n\\input{appendix_proofs}\n\n\\input{discussion_of_model}\n\n\n\n\n{\\footnotesize\n\n\\bibliographystyle{ieeetr}\n\n\n\\section{Proof of Theorem~\\ref{thm:main_lower_bound}}\n\n\n\n\nWe obtain the lower bound on $I(X \\, ; \\, \\alpha_0 X + Z_{\\alpha})= H(p) - H(X \\, | \\, \\alpha_0 X+Z_{\\alpha})$ \nby finding a lower bound on $-H(X \\, | \\, \\alpha_0 X+Z_{\\alpha})$. \nTherefore, we consider\n\\begin{align}\n \\label{eq:integral_alpha_opt_prob}\n & \\min_{\\alpha \\in \\mathbb{R}^{K+1} : \\: \\alpha >0} -H\\left(X \\; \\left| \\; \\alpha_{0} X+\\sum_{k=1}^K \\alpha_k Z_k \\right. \\right).\n\\end{align}\nObserve that the pmf of the random variable $Z_{\\alpha} = \\sum_{i = 1}^K \\alpha_i Z_i$ has probability $(1-p)^{K}$ at its lowest support value. \nMore precisely, $0$ is the minimum value that $Z_\\alpha$ can take, which occurs with probability $(1-p)^{K}$.\nA relaxation to (\\ref{eq:integral_alpha_opt_prob}) can then be obtained by ignoring all constraints on the pmf of $Z_\\alpha$ except the constraint on the minimum pmf value.\nThus, for a fixed value of $\\alpha_0$, a relaxation to (\\ref{eq:integral_alpha_opt_prob}) is given by\n\\begin{align}\n \\label{eq:relax_inf1}\n \\min_{Q} \\; & -H\\left(X \\; | \\; \\alpha_0 X+Q\\right)\n \\\\\\text{subject to:} \\; & \n \\nonumber \n \n Q \\text{ is a discrete random variable} \\nonumber\n \\\\ & \\text{$Q$ is independent of $X$} \\nonumber\n \n \\\\ & \\Pr(Q = 0) = (1-p)^K \\nonumber\n \\\\ & \\Pr(Q = i) = 0 \\; \\text{ for $i < 0$}.\n \\nonumber\n\\end{align}\nFurthermore, as we prove in Lemma~\\ref{lem:alpha0_condition} in Section~\\ref{sec:lemmas}, \nfixing $\\alpha_0 = 1$ in (\\ref{eq:relax_inf1}) and constraining the support of $Q$ to be integer \ndoes not change the optimal value. We assume these additional constraints throughout.\n \n\nLet $q_{(i)}$ be the pmf of $Q$; i.e., $q_{(i)} = \\Pr(Q=i)$ for $i \\geq 0$.\nIn order to write (\\ref{eq:relax_inf1}) explicitly in terms of $q$ under the assumption that $Q$ has integer support and $\\alpha_0 = 1$, we define \n\\begin{align}\n g_j(q) & \\triangleq -H(X|X+Q=j)\\Pr(X+Q=j) \\nonumber \\\\\n & = \\Pr(Q+X=j) \\nonumber \\\\\n & \\quad \\times \\bigg[ \\frac{(1-p)q_{(j)}}{\\Pr(Q+X=j)} \\log\\left(\\frac{(1-p)q_{(j)}}{\\Pr(Q+X=j)}\\right) \\nonumber \\\\ & \\quad + \\frac{p q_{(j-1)}}{\\Pr(Q+X=j)} \\log\\left(\\frac{p q_{(j-1)}}{\\Pr(Q+X=j)}\\right) \\bigg] \\nonumber \\\\ \n & = (1-p)q_{(j)} \\log \\left(\\frac{(1-p)q_{(j)}}{(1-p)q_{(j)} + p q_{(j-1)}} \\right) \n \\nonumber \n \\\\ & \\quad + p q_{(j-1)} \\log \\left(\\frac{p q_{(j-1)}}{(1-p)q_{(j)} + p q_{(j-1)}} \\right).\n\\end{align}\nAssuming integer support for $Q$ and fixing $\\alpha_0 = 1,$ we have that (\\ref{eq:relax_inf1}) written in terms of $g_j(q)$ is given by\n\\begin{align}\n\\label{eq:relax_inf2}\n\\min_{q_{(i)} \\geq 0 : \\; i \\in \\mathbb{N}_0 } \\; & \\quad \\sum_{j \\in \\mathbb{N}} g_j(q)\n\\\\ \\text{subject to:} \\; & \\quad q_{(0)} = (1-p)^K \\nonumber \\\\ & \\quad \\sum_{j=0}^\\infty q_{(j)} = 1. \\nonumber\n\\end{align} \n\n\nObserve that (\\ref{eq:relax_inf2}) is a convex minimization problem with infinitely many variables. \nFor such problems, to the best of our knowledge, a solution to the KKT conditions is not in general guaranteed to yield an optimal solution.\nFor that reason, we do not seek to directly solve the KKT conditions and, instead, we consider a \\emph{support-constrained} version of (\\ref{eq:relax_inf2}), where the support of the pmf of $Q$ is restricted to $\\{0,...,n\\}$, and let $n \\to \\infty$.\nThe support-constrained version of (\\ref{eq:relax_inf2}) is given by \n\\begin{align}\n\\label{eq:relax_const}\n\\min_{q_{(i)} \\geq 0 : \\; i \\in \\{0,...,n+1\\} } \\; & \n\\sum_{j =1}^{n+1} g_j(q) \\\\ \\text{subject to:} \\; & \n\\; q_{(0)} = (1-p)^K, \\quad q_{(n+1)} = 0 \\nonumber \\\\\n\\quad & \\; \\sum_{j=0}^{n+1} q_{(j)} = 1. \\nonumber\n\\end{align} \nNote that this is no longer a relaxation to the original problem.\n\n\nIn order to ensure that the derivative of the objective function exists at all feasible $q_{(i)},$ and ultimately find a lower bound on the optimal value of (\\ref{eq:relax_const}) through perturbation analysis, we change the constraints in (\\ref{eq:relax_const}) from $q_{(i)} \\geq 0$ to $q_{(i)} > 0$ and from $q_{(n+1)} = 0$ to $q_{(n+1)} = \\epsilon$ where \n$\\epsilon > 0$, obtaining\n\\begin{align}\n\\label{eq:relax_const_posq}\n\\min_{q_{(i)} > 0 : \\; i \\in \\{0,...,n+1\\} } \\; &\n\\sum_{j =1}^{n+1} g_j(q)\n\\\\ \\text{subject to:} \\; & \n\\; q_{(0)} = (1-p)^K, \\quad q_{(n+1)} = \\epsilon \\nonumber \\\\ \n\\quad &\\; \\sum_{j=0}^{n+1} q_{(j)} = 1. \\nonumber\n\\end{align} \n\nLet $V_n^*$ be the optimal value of (\\ref{eq:relax_const}) and let $V_{n,\\epsilon}^*$ be the optimal value of (\\ref{eq:relax_const_posq}) for a given $\\epsilon.$ Due to the continuity of the objective function in (\\ref{eq:relax_const}), we have $ V_n^* \\geq \\inf_{\\epsilon >0} V_{n,\\epsilon}^*.$ More precisely, for any solution to (\\ref{eq:relax_const}), it follows that (\\ref{eq:relax_const_posq}) can get arbitrarily close to the corresponding value as $\\epsilon \\to 0$ due to the continuity of the objective function. \n\n\nWhile we do not know the solution to (\\ref{eq:relax_const}) or (\\ref{eq:relax_const_posq}), we will perturb (\\ref{eq:relax_const_posq}) to form a problem we can solve analytically, then use perturbation analysis to lower bound the optimal value of (\\ref{eq:relax_const_posq}), and ultimately lower bound the optimal value of (\\ref{eq:relax_const}). Let $\\beta \\triangleq (1-p)^K.$ The perturbed version of (\\ref{eq:relax_const_posq}) is given by \n\\begin{align}\n\\label{eq:relax_const_pert_posq}\n\\min_{q_{(i)} > 0 : \\; i \\in \\{0,...,n+1\\} } & \\;\n\\sum_{j =1}^{n+1} g_j(q) \\\\\t\n\\text{subject to:} \n& \\; \\; q_{(0)} = \\beta , \\quad q_{(n+1)} = \\beta (1 - \\beta )^{n+1}\n\\nonumber \\\\ & \\; \\sum_{j=0}^{n+1} q_{(j)} = 1 - (1 - \\beta )^{n+2}. \\nonumber\n\\end{align} \nObserve that as $n$ increases, we have that $\\beta (1 - \\beta )^{n+1}\\to 0$ and $1 - (1 - \\beta )^{n+2} \\to 1.$ In other words, the constraints in (\\ref{eq:relax_const_pert_posq}) approach the constraints in (\\ref{eq:relax_const}).\nWe can solve (\\ref{eq:relax_const_pert_posq}) by solving the KKT conditions since the problem is convex, Slater's condition holds, and the objective function and constraint functions are differentiable on the domain $q_{(i)} > 0.$ \nLet $f_0(q)$ be the objective function of (\\ref{eq:relax_const_pert_posq}). The Lagrangian is given by \n\\begin{align} \\label{eq:lagrangian}\nL(q, v, \\lambda) = f_0(q) \n& + v_1 \\left(\\sum_i q_{(i)} - 1 + (1 - \\beta)^{n+2} \\right) \n\\nonumber \\\\ & + v_2 \\left(q_{(0)} - \\beta \\right) \n\\nonumber \\\\ & + v_3 \\left(q_{(n+1)} - \\beta (1 - \\beta)^{n+1} \\right). \n\\end{align}\n\\longversion{\n\\iscomment{what is $\\lambda$ here?}\n}\nThe perturbation values in (\\ref{eq:relax_const_pert_posq}) are carefully chosen so that the KKT conditions yield an optimal solution given by \n\\begin{align}\n\\label{eq:KKT_conditions_sol}\n& q_{(i)} = \\beta (1 - \\beta)^{i} \\text{ for } i \\in \\{0,...,n+1\\}.\n\\end{align}\nThis corresponds to the first $n+2$ terms of the pmf of a Geometric distribution.\nThe derivation of (\\ref{eq:KKT_conditions_sol}) and the optimal Lagrange multipliers $v_1^*,v_2^*,v_3^*$ are provided in Lemma~\\ref{lem:solve_KKT}.\n\n\nLet ${U}_{n}^*$ be the solution to (\\ref{eq:relax_const_pert_posq}), obtained by plugging in (\\ref{eq:KKT_conditions_sol}).\n\\longversion{\nApplying the geometric sum formula and simplifying, the optimal value for (\\ref{eq:relax_const_pert_posq}) is \n\\begin{align} \n& -(1-p) \\beta (1-\\beta) \\frac{1 - (1-\\beta)^{n+1}}{1 - (1-\\beta)} \\log \\left( \\frac{(1-p) (1-\\beta) + p }{(1-p) (1-\\beta)} \\right) \n\\nonumber \\\\ & - p \\beta \\frac{1 - (1-\\beta)^{n+1}}{1 - (1-\\beta)} \\log \\left( \\frac{(1-p) (1-\\beta) + p }{ p } \\right).\n\\end{align}\n}\nUsing the perturbation analysis from Section 5.6.1 of \\cite{boyd}, we see that the optimal value of (\\ref{eq:relax_const_posq}), $V_{n,\\epsilon}^*$, is lower bounded as\n\\begin{align}\n\\label{eq:relax_const_posq_bound}\nV_{n,\\epsilon}^* \\geq\nU_n^*\n- v_1^* \\left( (1 - \\beta)^{n+2} \\right) - v_3^* \\left(- \\beta (1 - \\beta)^{n+1} +\\epsilon \\right),\n\\end{align} \nwhere $v_1^*$ and $v_3^*$ are the optimal Lagrange multipliers for (\\ref{eq:relax_const_pert_posq})\ndescribed in Lemma~\\ref{lem:solve_KKT} in Section~\\ref{sec:lemmas}.\nTaking the infimum of (\\ref{eq:relax_const_posq_bound}) over \n$\\epsilon > 0$ then\nyields \n\\begin{align}\n\\label{eq:relax_const_bound}\nV_n^* \\geq\nU_n^*\n- v_1^* \\left( (1 - \\beta)^{n+2} \\right) - v_3^* \\left(- \\beta (1 - \\beta)^{n+1} \\right).\n\\end{align} \n\nThe sequence $V_n^*$ of optimal values returned by (\\ref{eq:relax_const}) is non-increasing in $n$. \nThus, letting $n \\to \\infty$ in (\\ref{eq:relax_const_bound}) implies that $\\lim_{n\\to\\infty}U_{n}^*$ is a lower bound to $-H(X \\, | \\, \\alpha_0 X + Z_{\\alpha})$ for any choice of $\\alpha_i$'s.\nNotice that, as $n \\to \\infty$, $q_{(j)}$ in (\\ref{eq:KKT_conditions_sol}) converges to the pmf of a Geometric random variable $G$ with \n$\\Pr(G = i) = \\beta (1-\\beta)^i = (1-p)^K(1 - (1-p)^K)^i$ for $i \\in \\mathbb{N}_0$.\nSince the objective function of (\\ref{eq:relax_const_pert_posq}) is $-H(X|X+Q)$ where $Q$ has pmf $q_{(j)}$, this concludes our proof.\n\n\n\n\n\\longversion{\nThus, taking the limit of the lower bound (\\ref{eq:relax_const_bound}) as $n \\to \\infty$ and simplifying yields the following lower bound on $-H(X \\; | \\; \\alpha_0 X + Z_{\\alpha})$ for any choice of $\\alpha_i$'s: \\begin{align} \n\\label{eq:lower_bound_neg_cond_ent}\n& -(p-1) \\left( (1-p)^K - 1\\right) \\log \\left( \\frac{1 - (1-p)^{K+1} }{(p-1) \\left( (1-p)^K -1 \\right)}\\right) \n\\nonumber \\\\ & - p \\log \\left( \\frac{1 - (1-p)^{K+1} }{p} \\right).\n\\end{align}\n\nFinally, observe that\n$-H(X \\; ; \\; X - G)$ is equal to (\\ref{eq:lower_bound_neg_cond_ent})\nwhere $G$ is a geometric random variable with\n$\\Pr(G = i) = (1-p)^K(1 - (1-p)^K)^i$ for $i \\in \\mathbb{N}_0.$\n}\n\n\n\\section{Proof of Theorem~\\ref{thm:main_lower_bound}}\n\n\\iscomment{No need for the proof environment here}\n\n\n\nWe obtain the lower bound on $I(X \\, ; \\, \\alpha_0 X + Z_{\\alpha})= H(p) - H(X \\, | \\, \\alpha_0 X+Z_{\\alpha})$ in Theorem~\\ref{thm:main_lower_bound} by finding a lower bound on $-H(X \\, | \\, \\alpha_0 X+Z_{\\alpha}).$ The optimization problem we consider is therefore\n\\begin{align}\n \\label{eq:integral_alpha_opt_prob}\n & \\min_{\\alpha_0,...,\\alpha_K \\in \\mathbb{N}} -H\\left(X \\; \\left| \\; \\alpha_{0} X+\\sum_{k=1}^K \\alpha_k Z_k \\right. \\right).\n\\end{align}\nObserve that the pmf of the random variable $Z_{\\alpha} = \\sum_{i = 1}^K \\alpha_i Z_i$ has probability $(1-p)^{K}$ at its highest support value. \nMore precisely, $\\sum_{i=1}^k \\alpha_K$ is the maximum value that $Z_\\alpha$ can take, which occurs with probability $(1-p)^{K}$.\nA relaxation to (\\ref{eq:integral_alpha_opt_prob}) can be then obtained by ignoring all constraint on the pmf of $Z_\\alpha$ except for the maximum pmf value.\nThus, for a fixed value of $\\alpha_0$, a relaxation to (\\ref{eq:integral_alpha_opt_prob}) is given by\n\\begin{align}\n \\label{eq:relax_inf1}\n \\min_{Q} \\; & -H\\left(X \\; | \\; \\alpha_0 X+Q\\right)\n \\\\\\text{subject to:} \\; & \n \\nonumber \n \n Q \\text{ is an integer-valued random variable} \\nonumber\n \\\\ & \\text{$Q$ is independent of $X$} \\nonumber\n \n \\\\ & \\Pr(Q = 0) = (1-p)^K \\nonumber\n \\\\ & \\Pr(Q = i) = 0 \\; \\text{ for $i > 0$}.\n \\nonumber\n\\end{align}\n\\iscomment{Should we use $Q$ instead of $Q$ to match pmf?}\nNotice that the last constraint is equivalent to saying that $0$ is the maximum value of $Q$.\nThis can be assumed without loss of generality, since adding a constant to $Q$ does not change the value of $H(X\\,|\\,\\alpha_0X+Q)$.\nFurthermore, as we prove in Appendix~\\ref{app:alpha0}, \nfixing $\\alpha_0 = 1$ in (\\ref{eq:relax_inf1}) above \ndoes not change the optimal value, and we assume throughout that $\\alpha_0=1$.\n \n\nLet $q_{(i)}$ be the pmf of $Q$; i.e., $q_{(i)} = \\Pr(Q=i)$ for $i \\leq 0$.\nIn order to write (\\ref{eq:relax_inf1}) explicitly in terms of $q$, we define \n\\begin{align}\n g_j(q) & \\triangleq -H(X|X+Q=j+1)\\Pr(X+Q=j+1) \\nonumber \\\\\n & = \\Pr(Q+X=j+1) \\nonumber \\\\\n & \\quad \\times \\bigg[ \\frac{(1-p)q_{(j)}}{\\Pr(Q+X=j+1)} \\log\\left(\\frac{(1-p)q_{(j)}}{\\Pr(Q+X=j+1)}\\right) \\nonumber \\\\ & \\quad + \\frac{p q_{(j+1)}}{\\Pr(Q+X=j+1)} \\log\\left(\\frac{p q_{(j+1)}}{\\Pr(Q+X=j+1)}\\right) \\bigg] \\nonumber \\\\ \n \n \n \n & = (1-p)q_{(j)} \\log \\left(\\frac{(1-p)q_{(j)}}{(1-p)q_{(j)} + p q_{(j+1)}} \\right) \n \\nonumber \n \\\\ & \\quad + p q_{(j+1)} \\log \\left(\\frac{p q_{(j+1)}}{(1-p)q_{(j)} + p q_{(j+1)}} \\right).\n\\end{align}\nWritten in terms of $g_j(q)$ and with $\\alpha_0 = 1$, (\\ref{eq:relax_inf1}) is given by\n\\begin{align}\n\\label{eq:relax_inf2}\n\\min_{q_{(-i)} \\geq 0 : \\; i \\in \\mathbb{N}\\cup\\{0\\} } \\; & \\sum_{j \\in \\{-n : \\; n \\in \\mathbb{N}\\}} g_j(q)\n\\\\ \\text{subject to:} \\; & \\quad q_{(0)} = (1-p)^K \\nonumber \\\\ & \\quad \\sum_{j=0}^\\infty q_{(-j)} = 1. \\nonumber\n\\end{align} \nNote that the conditional entropy in the objective function is in a simplified form. \n\\iscomment{What does this mean?}\n\n\\iscomment{I rewrote the following a bit:}\n\nObserve that (\\ref{eq:relax_inf2}) is a convex minimization problem with an infinitely many variables. \nFor such problems, to the best of our knowledge, a solution to the KKT conditions is not in general guaranteed to yield an optimal solution.\nFor that reason, we do not seek to directly solve the KKT conditions.\nInstead, we first consider a \\emph{support-constrained} version of (\\ref{eq:relax_inf2}, where the support of the pmf of $Q$ is restricted to $\\{-n,...,0\\}$, and then consider letting $n \\to \\infty$.\nThe support-constrained version of (\\ref{eq:relax_inf2}) is given by \n\\begin{align}\n\\label{eq:relax_const}\n\\min_{q_{(i)} \\geq 0 : \\; i \\in \\{-n,...,0\\} } \\; & \n\\sum_{j \\in \\{-m : \\; m \\in [n+1]\\}} g_j(q) \\\\ \\text{subject to:} \\; & \nq_{(0)} = (1-p)^K, \\quad q_{(-n-1)} = 0 \\nonumber \\\\\n\\quad & \\; \\sum_{j} q_{(j)} = 1. \\nonumber\n\\end{align} \n\\iscomment{say this is no longer a relaxation}\n\nChanging the implicit constraint in (\\ref{eq:relax_const}) from $q_{(i)} \\geq 0$ to $q_{(i)} > 0$ and the explicit constraint $q_{(-n-1)} = 0$ to $q_{(-n-1)} = \\epsilon$ where $\\epsilon \\in \\mathbb{R}_{>0}$ gives us\n\\begin{align}\n\\label{eq:relax_const_posq}\n& \\min_{q_{(-i)} > 0 : \\; i \\in [n+1]\\cup\\{0\\} }\n\\sum_{j \\in \\{-m : \\; m \\in [n+1]\\}} g_j(q)\n\\\\ & \\text{subject to:} \\nonumber \\\\ & \n\\quad q_{(0)} = (1-p)^K, \\quad q_{(-n-1)} = \\epsilon, \\quad \\sum_{j} q_{(j)} = 1. \\nonumber\n\\end{align} \n\nLet $p^*$ be the optimal value of \\ref{eq:relax_const} and let $p_{\\epsilon}^*$ be the optimal value of \\ref{eq:relax_const_posq} for a given $\\epsilon.$ Due to the continuity of the objective function in (\\ref{eq:relax_const}), it is not hard to show that $ p^* \\geq \\inf_{\\epsilon \\in \\mathbb{R}_{>0}} p_{\\epsilon}^*.$ At a high level, this is due to the fact that for any solution to (\\ref{eq:relax_const}), it follows that (\\ref{eq:relax_const_posq}) can get arbitrarily close to it as $\\epsilon \\to 0$ due to the continuity of the objective function. \n\n\n\nWhile we do not know the solution to (\\ref{eq:relax_const}) or (\\ref{eq:relax_const_posq}), we will perturb (\\ref{eq:relax_const_posq}) to form a problem we can solve analytically, then use perturbation analysis to lower bound the optimal value of (\\ref{eq:relax_const_posq}), and ultimately lower bound the optimal value of (\\ref{eq:relax_const}). The perturbed version of (\\ref{eq:relax_const_posq}) is given by \n\\begin{align}\n\\label{eq:relax_const_pert_posq}\n& \\min_{q_{(-i)} > 0 : \\; i \\in [n+1]\\cup\\{0\\} }\n\\sum_{j \\in \\{-m : \\; m \\in [n+1]\\}} g_j(q) \\\\\t& \n\\text{subject to:} \\nonumber \\\\\n& q_{(0)} = (1-p)^K, \\quad q_{(-n-1)} = (1-p)^K (1 - (1-p)^K)^{n+1}, \n\\nonumber \\\\ & \\sum_{j} q_{(j)} = 1 - (1 - (1-p)^K)^{n+2} \\nonumber\n\\end{align} \n\\iscomment{change to $\\beta$ notation. emphasize perturbation goes to $0$ as $n \\to \\infty$}\nwhere we have changed the constraint on the probability at the minimum support value, and the constraint on the sum of all the probabilities.\nWe can solve this by solving the KKT conditions since the problem is convex, Slater's condition holds, and the objective function and constraint functions are differentiable on the domain $q_{(i)} > 0.$ \n\nWe proceed to find the solution to the KKT conditions for (\\ref{eq:relax_const_pert_posq}). Let $f_0(q)$ be the objective function of (\\ref{eq:relax_const_pert_posq}). The Lagrangian is given by \n\\begin{align} \n&L(q, v, \\lambda) = f_0(q) \n\\nonumber \\\\ & + v_1 \\left(\\sum_i q_{(i)} - 1 + (1 - (1-p)^K)^{n+2} \\right) \n\\nonumber \\\\ & + v_2 \\left(q_{(0)} - (1-p)^K \\right) \n\\nonumber \\\\ & + v_3 \\left(q_{(-n-1)} - (1-p)^K (1 - (1-p)^K)^{n+1} \\right). \n\\end{align}\nBy Lemma \\ref{lem:solve_KKT}, a solution to the KKT conditions is given by \n\\begin{align}\n\\label{eq:KKT_conditions_sol}\n& q_{(-i)} = (1-p)^K (1 - (1-p)^K)^{i} \\text{ for } i \\in [n+1] \\cup \\{0\\} \n\\nonumber \\\\ & v_1 = - p \\log \\left( \\frac{p}{(1-p)(1 - (1-p)^K) + p} \\right) \n\\nonumber \\\\ & - (1-p)\\log \\left( \\frac{(1-p)(1 - (1-p)^K)}{(1-p)(1 - (1-p)^K) + p} \\right)\n\\nonumber \\\\ & v_2 = (1-p)\\log \\left( \\frac{(1-p)(1 - (1-p)^K)}{(1-p)(1 - (1-p)^K) + p} \\right) \n\\nonumber \\\\ & v_3 = p \\log \\left( \\frac{p}{(1-p)(1 - (1-p)^K) + p} \\right),\n\\end{align}\nwhich is then an optimal solution to (\\ref{eq:relax_const_pert_posq}). \nApplying the geometric sum formula and simplifying, the optimal value for (\\ref{eq:relax_const_pert_posq}) is \n\\begin{align} \n& -(1-p) \\beta \\alpha \\frac{1 - \\alpha^{n+1}}{1 - \\alpha} \\log \\left( \\frac{(1-p) \\alpha + p }{(1-p) \\alpha} \\right) \n\\nonumber \\\\ & - p \\beta \\frac{1 - \\alpha^{n+1}}{1 - \\alpha} \\log \\left( \\frac{(1-p) \\alpha + p }{ p } \\right)\n\\end{align}\nwhere $\\alpha = (1 - (1-p)^K)$ and $\\beta = (1-p)^K.$ \n\nUsing the perturbation analysis from Section 5.6.1 of \\cite{boyd}, we see that the optimal value of (\\ref{eq:relax_const_posq}) is lower bounded by\n\\begin{align}\n\\label{eq:relax_const_posq_bound}\n& -(1-p) \\beta \\alpha \\frac{1 - \\alpha^{n+1}}{1 - \\alpha} \\log \\left( \\frac{(1-p) \\alpha + p }{(1-p) \\alpha} \\right) \n\\nonumber \\\\ & - p \\beta \\frac{1 - \\alpha^{n+1}}{1 - \\alpha} \\log \\left( \\frac{(1-p) \\alpha + p }{ p } \\right)\n\\nonumber \\\\ & - v_1^* \\left( (1 - (1-p)^K)^{n+2} \\right) \n\\nonumber \\\\ & - v_3^* \\left(- (1-p)^K (1 - (1-p)^K)^{n+1} +\\epsilon \\right) \n\\end{align} \nwhere $v_1^*$ and $v_3^*$ are the optimal lagrange multipliers for (\\ref{eq:relax_const_pert_posq})\nwe determined through the KKT conditions.\n\nTaking the infimum of (\\ref{eq:relax_const_posq_bound}) over $\\epsilon \\in \\mathbb{R}_{>0}$ yields the following lower bound on (\\ref{eq:relax_const}):\n\n\\begin{align}\n\\label{eq:relax_const_bound}\n& -(1-p) \\beta \\alpha \\frac{1 - \\alpha^{n+1}}{1 - \\alpha} \\log \\left( \\frac{(1-p) \\alpha + p }{(1-p) \\alpha} \\right) \n\\nonumber \\\\ & - p \\beta \\frac{1 - \\alpha^{n+1}}{1 - \\alpha} \\log \\left( \\frac{(1-p) \\alpha + p }{ p } \\right)\n\\nonumber \\\\ & - v_1^* \\left( (1 - (1-p)^K)^{n+2} \\right) \n\\nonumber \\\\ & - v_3^* \\left(- (1-p)^K (1 - (1-p)^K)^{n+1} \\right). \n\\end{align} \n\nThe sequence of optimal values returned by (\\ref{eq:relax_const}) is monotonically decreasing in $n$ clearly. Thus, taking the limit of the lower bound (\\ref{eq:relax_const_bound}) as $n \\to \\infty$ and simplifying yields the following lower bound on $-H(X \\; | \\; \\alpha_0 X + Z_{\\alpha})$ for any choice of $\\alpha_i$'s: \\begin{align} \n\\label{eq:lower_bound_neg_cond_ent}\n& -(p-1) \\left( (1-p)^K - 1\\right) \\log \\left( \\frac{(p-1)(1-p)^K + 1}{(p-1) \\left( (1-p)^K -1 \\right)}\\right) \n\\nonumber \\\\ & - p \\log \\left( \\frac{(p-1)(1-p)^K + 1}{p} \\right).\n\\end{align}\n\nFinally, observe that\n$-H(X \\; ; \\; X - G)$ is equal to (\\ref{eq:lower_bound_neg_cond_ent})\nwhere $G$ is a geometric random variable with\n$\\Pr(G = i) = (1-p)^K(1 - (1-p)^K)^i$ for $i \\in \\mathbb{N} \\cup \\{0\\}.$\nThis concludes the proof of Theorem~\\ref{thm:main_lower_bound}.\n\n\n\\section{Proof of Theorem~\\ref{thm:main_lower_bound}}\n\n\\iscomment{No need for the proof environment here}\n\n\n\nWe obtain the lower bound on $I(X \\, ; \\, \\alpha_0 X + Z_{\\alpha})= H(p) - H(X \\, | \\, \\alpha_0 X+Z_{\\alpha})$ in Theorem~\\ref{thm:main_lower_bound} by finding a lower bound on $-H(X \\, | \\, \\alpha_0 X+Z_{\\alpha}).$ The optimization problem we consider is therefore\n\\begin{align}\n \\label{eq:integral_alpha_opt_prob}\n & \\min_{\\alpha_0,...,\\alpha_K \\in \\mathbb{N}} -H\\left(X \\; \\left| \\; \\alpha_{0} X+\\sum_{k=1}^K \\alpha_k Z_k \\right. \\right).\n\\end{align}\nObserve that the pmf of the random variable $Z_{\\alpha} = \\sum_{i = 1}^K \\alpha_i Z_i$ has probability $(1-p)^{K}$ at its highest support value. \nMore precisely, $\\sum_{i=1}^K \\alpha_i$ is the maximum value that $Z_\\alpha$ can take, which occurs with probability $(1-p)^{K}$.\nA relaxation to (\\ref{eq:integral_alpha_opt_prob}) can be then obtained by ignoring all constraints on the pmf of $Z_\\alpha$ except the constraint on the maximum pmf value.\nThus, for a fixed value of $\\alpha_0$, a relaxation to (\\ref{eq:integral_alpha_opt_prob}) is given by\n\\begin{align}\n \\label{eq:relax_inf1}\n \\min_{Q} \\; & -H\\left(X \\; | \\; \\alpha_0 X+Q\\right)\n \\\\\\text{subject to:} \\; & \n \\nonumber \n \n Q \\text{ is an integer-valued random variable} \\nonumber\n \\\\ & \\text{$Q$ is independent of $X$} \\nonumber\n \n \\\\ & \\Pr(Q = 0) = (1-p)^K \\nonumber\n \\\\ & \\Pr(Q = i) = 0 \\; \\text{ for $i > 0$}.\n \\nonumber\n\\end{align}\n\\iscomment{Should we use $Q$ instead of $Q$ to match pmf?}\nNotice that the last constraint is equivalent to saying that $0$ is the maximum value of $Q$.\nThis can be assumed without loss of generality, since adding a constant to $Q$ does not change the value of $H(X\\,|\\,\\alpha_0X+Q)$.\nFurthermore, as we prove in Appendix~\\ref{app:alpha0}, \nfixing $\\alpha_0 = 1$ in (\\ref{eq:relax_inf1}) above \ndoes not change the optimal value, and we assume throughout that $\\alpha_0=1$.\n \n\nLet $q_{(i)}$ be the pmf of $Q$; i.e., $q_{(i)} = \\Pr(Q=i)$ for $i \\leq 0$.\nIn order to write (\\ref{eq:relax_inf1}) explicitly in terms of $q$, we define \n\\begin{align}\n g_j(q) & \\triangleq -H(X|X+Q=j+1)\\Pr(X+Q=j+1) \\nonumber \\\\\n & = \\Pr(Q+X=j+1) \\nonumber \\\\\n & \\quad \\times \\bigg[ \\frac{(1-p)q_{(j)}}{\\Pr(Q+X=j+1)} \\log\\left(\\frac{(1-p)q_{(j)}}{\\Pr(Q+X=j+1)}\\right) \\nonumber \\\\ & \\quad + \\frac{p q_{(j+1)}}{\\Pr(Q+X=j+1)} \\log\\left(\\frac{p q_{(j+1)}}{\\Pr(Q+X=j+1)}\\right) \\bigg] \\nonumber \\\\ \n \n \n \n & = (1-p)q_{(j)} \\log \\left(\\frac{(1-p)q_{(j)}}{(1-p)q_{(j)} + p q_{(j+1)}} \\right) \n \\nonumber \n \\\\ & \\quad + p q_{(j+1)} \\log \\left(\\frac{p q_{(j+1)}}{(1-p)q_{(j)} + p q_{(j+1)}} \\right).\n\\end{align}\nWritten in terms of $g_j(q)$ and with $\\alpha_0 = 1$, (\\ref{eq:relax_inf1}) is given by\n\\begin{align}\n\\label{eq:relax_inf2}\n\\min_{q_{(-i)} \\geq 0 : \\; i \\in \\mathbb{N}_0 } \\; & \\sum_{j \\in \\{-n : \\; n \\in \\mathbb{N}\\}} g_j(q)\n\\\\ \\text{subject to:} \\; & \\quad q_{(0)} = (1-p)^K \\nonumber \\\\ & \\quad \\sum_{j=0}^\\infty q_{(-j)} = 1. \\nonumber\n\\end{align} \n\\kmcomment{I removed ``Note that the conditional entropy in the objective function is in a simplified form.'' It meant we used Bayes rule in the conditional entropy }\n\\iscomment{What does this mean?}\n\n\\iscomment{I rewrote the following a bit:}\n\nObserve that (\\ref{eq:relax_inf2}) is a convex minimization problem with infinitely many variables. \nFor such problems, to the best of our knowledge, a solution to the KKT conditions is not in general guaranteed to yield an optimal solution.\nFor that reason, we do not seek to directly solve the KKT conditions.\nInstead, we first consider a \\emph{support-constrained} version of (\\ref{eq:relax_inf2}), where the support of the pmf of $Q$ is restricted to $\\{-n,...,0\\}$, and then consider letting $n \\to \\infty$.\nThe support-constrained version of (\\ref{eq:relax_inf2}) is given by \n\\begin{align}\n\\label{eq:relax_const}\n\\min_{q_{(i)} \\geq 0 : \\; i \\in \\{-n-1,...,0\\} } \\; & \n\\sum_{j \\in \\{-m : \\; m \\in [n+1]\\}} g_j(q) \\\\ \\text{subject to:} \\; & \nq_{(0)} = (1-p)^K, \\quad q_{(-n-1)} = 0 \\nonumber \\\\\n\\quad & \\; \\sum_{j=0}^\\infty q_{(-j)} = 1. \\nonumber\n\\end{align} \nNote that this is no longer a relaxation to the original problem.\n\\iscomment{say this is no longer a relaxation}\n\n\nIn order to ensure that the derivative of the objective function exists at all feasible $q_{(i)},$ and ultimately find a lower bound on the optimal value of (\\ref{eq:relax_const}) through perturbation analysis, we we consider the problem below where we change the constraint in (\\ref{eq:relax_const}) from $q_{(i)} \\geq 0$ to $q_{(i)} > 0$ and the constraint $q_{(-n-1)} = 0$ to $q_{(-n-1)} = \\epsilon$ where $\\epsilon \\in \\mathbb{R}_{>0}:$\n\\begin{align}\n\\label{eq:relax_const_posq}\n& \\min_{q_{(i)} > 0 : \\; i \\in \\{-n-1,...,0\\} }\n\\sum_{j \\in \\{-m : \\; m \\in [n+1]\\}} g_j(q)\n\\\\ & \\text{subject to:} \\nonumber \\\\ & \n\\quad q_{(0)} = (1-p)^K, \\quad q_{(-n-1)} = \\epsilon, \\quad \\sum_{j=0}^\\infty q_{(-j)} = 1. \\nonumber\n\\end{align} \n\nLet $v^*$ be the optimal value of (\\ref{eq:relax_const}) and let $v_{\\epsilon}^*$ be the optimal value of (\\ref{eq:relax_const_posq}) for a given $\\epsilon.$ Due to the continuity of the objective function in (\\ref{eq:relax_const}), we have $ v^* \\geq \\inf_{\\epsilon \\in \\mathbb{R}_{>0}} v_{\\epsilon}^*.$ More precisely, for any solution to (\\ref{eq:relax_const}), it follows that (\\ref{eq:relax_const_posq}) can get arbitrarily close to the corresponding value as $\\epsilon \\to 0$ due to the continuity of the objective function. \n\n\n\nWhile we do not know the solution to (\\ref{eq:relax_const}) or (\\ref{eq:relax_const_posq}), we will perturb (\\ref{eq:relax_const_posq}) to form a problem we can solve analytically, then use perturbation analysis to lower bound the optimal value of (\\ref{eq:relax_const_posq}), and ultimately lower bound the optimal value of (\\ref{eq:relax_const}). Let $\\beta \\triangleq (1-p)^K.$ The perturbed version of (\\ref{eq:relax_const_posq}) is given by \n\\begin{align}\n\\label{eq:relax_const_pert_posq}\n& \\min_{q_{(i)} > 0 : \\; i \\in \\{-n-1,...,0\\} }\n\\sum_{j \\in \\{-m : \\; m \\in [n+1]\\}} g_j(q) \\\\\t& \n\\text{subject to:} \\nonumber \\\\\n& q_{(0)} = \\beta , \\quad q_{(-n-1)} = \\beta (1 - \\beta )^{n+1}, \n\\nonumber \\\\ & \\sum_{j=0}^\\infty q_{(-j)} = 1 - (1 - \\beta )^{n+2} \\nonumber\n\\end{align} \n\\iscomment{change to $\\beta$ notation. emphasize perturbation goes to $0$ as $n \\to \\infty$} \nwhere we have changed the constraint on the probability at the minimum support value, and the constraint on the sum of all the probabilities. Observe that as $n$ increases, we have that $\\beta (1 - \\beta )^{n+1}\\to 0$ and $1 - (1 - \\beta )^{n+2} \\to 1.$ In other words, the constraints in (\\ref{eq:relax_const_pert_posq}) approach the constraints in (\\ref{eq:relax_const}).\nWe can solve (\\ref{eq:relax_const_pert_posq}) by solving the KKT conditions since the problem is convex, Slater's condition holds, and the objective function and constraint functions are differentiable on the domain $q_{(i)} > 0.$ \n\nWe proceed to find the solution to the KKT conditions for (\\ref{eq:relax_const_pert_posq}). Let $f_0(q)$ be the objective function of (\\ref{eq:relax_const_pert_posq}). The Lagrangian is given by \n\\begin{align} \n&L(q, v, \\lambda) = f_0(q) \n\\nonumber \\\\ & + v_1 \\left(\\sum_i q_{(i)} - 1 + (1 - \\beta)^{n+2} \\right) \n\\nonumber \\\\ & + v_2 \\left(q_{(0)} - \\beta \\right) \n\\nonumber \\\\ & + v_3 \\left(q_{(-n-1)} - \\beta (1 - \\beta)^{n+1} \\right). \n\\end{align}\nBy Lemma \\ref{lem:solve_KKT}, a solution to the KKT conditions is given by \n\\begin{align}\n\\label{eq:KKT_conditions_sol}\n& q_{(-i)} = \\beta (1 - \\beta)^{i} \\text{ for } i \\in [n+1] \\cup \\{0\\} \n\\nonumber \\\\ & v_1 = - p \\log \\left( \\frac{p}{(1-p)(1 - \\beta) + p} \\right) \n\\nonumber \\\\ & - (1-p)\\log \\left( \\frac{(1-p)(1 - \\beta)}{(1-p)(1 - \\beta) + p} \\right)\n\\nonumber \\\\ & v_2 = (1-p)\\log \\left( \\frac{(1-p)(1 - \\beta)}{(1-p)(1 - \\beta) + p} \\right) \n\\nonumber \\\\ & v_3 = p \\log \\left( \\frac{p}{(1-p)(1 - \\beta) + p} \\right),\n\\end{align}\nwhich is then an optimal solution to (\\ref{eq:relax_const_pert_posq}). \nApplying the geometric sum formula and simplifying, the optimal value for (\\ref{eq:relax_const_pert_posq}) is \n\\begin{align} \n& -(1-p) \\beta (1-\\beta) \\frac{1 - (1-\\beta)^{n+1}}{1 - (1-\\beta)} \\log \\left( \\frac{(1-p) (1-\\beta) + p }{(1-p) (1-\\beta)} \\right) \n\\nonumber \\\\ & - p \\beta \\frac{1 - (1-\\beta)^{n+1}}{1 - (1-\\beta)} \\log \\left( \\frac{(1-p) (1-\\beta) + p }{ p } \\right).\n\\end{align}\n\nUsing the perturbation analysis from Section 5.6.1 of \\cite{boyd}, we see that the optimal value of (\\ref{eq:relax_const_posq}) is lower bounded by\n\\begin{align}\n\\label{eq:relax_const_posq_bound}\n& -(1-p) \\beta (1-\\beta) \\frac{1 - (1-\\beta)^{n+1}}{1 - (1-\\beta)} \\log \\left( \\frac{(1-p) (1-\\beta) + p }{(1-p) (1-\\beta)} \\right) \n\\nonumber \\\\ & - p \\beta \\frac{1 - (1-\\beta)^{n+1}}{1 - (1-\\beta)} \\log \\left( \\frac{(1-p) (1-\\beta) + p }{ p } \\right)\n\\nonumber \\\\ & - v_1^* \\left( (1 - \\beta)^{n+2} \\right) \n\\nonumber \\\\ & - v_3^* \\left(- \\beta (1 - \\beta)^{n+1} +\\epsilon \\right) \n\\end{align} \nwhere $v_1^*$ and $v_3^*$ are the optimal lagrange multipliers for (\\ref{eq:relax_const_pert_posq})\nwe determined through the KKT conditions.\n\nTaking the infimum of (\\ref{eq:relax_const_posq_bound}) over $\\epsilon \\in \\mathbb{R}_{>0}$ yields the following lower bound on (\\ref{eq:relax_const}):\n\n\\begin{align}\n\\label{eq:relax_const_bound}\n& -(1-p) \\beta (1-\\beta) \\frac{1 - (1-\\beta)^{n+1}}{1 - (1-\\beta)} \\log \\left( \\frac{(1-p) (1-\\beta) + p }{(1-p) (1-\\beta)} \\right) \n\\nonumber \\\\ & - p \\beta \\frac{1 - (1-\\beta)^{n+1}}{1 - (1-\\beta)} \\log \\left( \\frac{(1-p) (1-\\beta) + p }{ p } \\right)\n\\nonumber \\\\ & - v_1^* \\left( (1 - \\beta)^{n+2} \\right) \n\\nonumber \\\\ & - v_3^* \\left(- \\beta (1 - \\beta)^{n+1} \\right). \n\\end{align} \n\nThe sequence of optimal values returned by (\\ref{eq:relax_const}) is monotonically decreasing in $n$ clearly. Thus, taking the limit of the lower bound (\\ref{eq:relax_const_bound}) as $n \\to \\infty$ and simplifying yields the following lower bound on $-H(X \\; | \\; \\alpha_0 X + Z_{\\alpha})$ for any choice of $\\alpha_i$'s: \\begin{align} \n\\label{eq:lower_bound_neg_cond_ent}\n& -(p-1) \\left( (1-p)^K - 1\\right) \\log \\left( \\frac{1 - (1-p)^{K+1} }{(p-1) \\left( (1-p)^K -1 \\right)}\\right) \n\\nonumber \\\\ & - p \\log \\left( \\frac{1 - (1-p)^{K+1} }{p} \\right).\n\\end{align}\n\nFinally, observe that\n$-H(X \\; ; \\; X - G)$ is equal to (\\ref{eq:lower_bound_neg_cond_ent})\nwhere $G$ is a geometric random variable with\n$\\Pr(G = i) = (1-p)^K(1 - (1-p)^K)^i$ for $i \\in \\mathbb{N}_0.$\nThis concludes the proof of Theorem~\\ref{thm:main_lower_bound}.\n\n\n\\section{Main Results}\n\n\n\nIn order to tackle the discrete optimization problem in (\\ref{eq:main2}) in the entire interval $p \\in [0,0.5]$, we seek to bound its optimal solution.\nOur main result is the following lower bound.\n\n\n\n\\begin{theorem} \\label{thm:main_lower_bound}\n\tFor any $K\\in \\mathbb{N}$ and $p \\in [0, 0.5],$ we have \n\t\\begin{align} \n\n\t& \\min_{\\alpha \\in \\mathbb{R}^{K+1} \\; : \\; \\alpha > 0}\n\tI(X \\; ; \\; \\alpha_0 X +Z_\\alpha) \n\t\\geq \n\tI(X \\; ; \\; X + G),\n\t\\label{eq:mainthm}\n\t\\end{align}\n\twhere $G$ is a geometric random variable, independent of $X$, with\n\t$\\Pr(G = i) = (1-p)^K(1 - (1-p)^K)^i$ for $i \\in \\mathbb{N}_0$.\n\\end{theorem}\n\nIntuitively, Theorem~\\ref{thm:main_lower_bound} says that the noise distribution of $G$ is worse than the worst-case noise $Z_\\alpha$.\nThis lower bound can in fact be explicitly computed as\n\\begin{align}\n& I(X \\; ; \\; X + G)\n= H(p) \n\\nonumber \\\\ & - (p-1) \\left( (1-p)^K - 1\\right) \\log \\left( \\frac{1-(1-p)^{K+1} }{(p-1) \\left( (1-p)^K -1 \\right)}\\right) \n\\nonumber \\\\ & - p \\log \\left( \\frac{1-(1-p)^{K+1}}{p} \\right).\n\\label{eq:closedform}\n\\end{align}\nObserve that this formula is quickly computable for any value of $K$ and $p,$ making it attractive from a computational standpoint. To assess how tight the lower bound is, we empirically compare it to an upper bound that is computed with a greedy algorithm in Figure \\ref{fig:greedy_versus_bound_K15}. For a given $p$ and $K,$ the greedy algorithm chooses $\\alpha_0 = 1, \\; \\alpha_1 = 1,$ and sequentially chooses \\begin{align} \\alpha_{j} = \\argmin_{a \\in \\mathbb{N} \\; : \\; 1 \\leq a \\leq 1 + \\sum_{i = 1}^{j-1} \\alpha_i} I(X \\; ; \\; X + a Z_{j} + \\sum_{i = 1}^{j-1} \\alpha_i Z_i)\n\\end{align}\nfor $2 \\leq j \\leq K$. At the $j$th step we consider all values of $\\alpha_{j}$ between $1$ and $1 + \\sum_{i = 1}^{j-1} \\alpha_i$ because setting $\\alpha_j > 1 + \\sum_{i = 1}^{j-1} \\alpha_i$ can not decrease the mutual information from when $\\alpha_j = 1 + \\sum_{i = 1}^{j-1} \\alpha_i.$\n\n\nAs seen in Figure \\ref{fig:greedy_versus_bound_K15}, $I(X \\; ; \\; X + G)$ serves as a tight lower bound when compared with the upper bound. \nA similar picture can be obtained for other values of $K$.\n\\longversion{\nTherefore, we can think of $I(X \\; ; \\; X - G)$ and its closed-form expression (\\ref{eq:closedform}) as an approximation to the solution of (\\ref{eq:main2}). }\nThis is surprising, because for a given $K$, it is not possible in general to choose $\\alpha_i$s to make the pmf of $\\sum_{i = 1}^{j-1} \\alpha_i Z_i$ look like \nthe pmf of $G$ (or a shifted version of it), as illustrated in Figure~\\ref{fig:greedy_versus_geom_pmf_K15}.\n\n\n\n\\begin{figure}[t]\n\\centering\n\n\\begin{tikzpicture}[scale=0.8]\n\\begin{axis} \n[width=0.47\\textwidth,\nheight=0.36\\textwidth,\nlegend cell align={left}, \nylabel = mutual information, \nxlabel = minor allele frequency (p),\nymax = .06]\n\n\\addplot [\nline width=1.5pt,\ncolor=red!80!black\n] table[x index=0,y index=1] {data\/plt2_greedy.dat};\n\\addlegendentry{upper bound (greedy)}\n\n\\addplot [\nline width=1.5pt,\ncolor=blue!80!black,\ndashed\n] table[x index=0,y index=1] {data\/plt2_bound.dat};\n\\addlegendentry{lower bound}\n\n\\end{axis} \n\\end{tikzpicture}\n\\caption{Comparison between the lower bound from (\\ref{eq:mainthm}) and the upper bound provided by the greedy algorithm for $K = 15$.}\n\\label{fig:greedy_versus_bound_K15}\n\\end{figure}\n\n\n\n\n\\begin{figure}[h]\n\\centering\n\n\\begin{tikzpicture}[scale=0.8]\n\\begin{axis} \n[width=0.47\\textwidth,\nheight=0.36\\textwidth,\nlegend cell align={left}, \nlegend pos = north east,\nymax = .05]\n\n\n\\addplot +[only marks,\n\t\tpoint meta=explicit symbolic,\n\t\tcolor=black!80!black]\ntable[x index=0,y index=1] {data\/plt5_greedy_pmf_K15.dat};\n\n\\addlegendentry{greedy pmf}\n\n\\addplot +[only marks,\n\t\tpoint meta=explicit symbolic,\n\tcolor=black!80!black]\ntable[x index=0,y index=1] {data\/plt5_geom_pmf_K15.dat};\n\\addlegendentry{geometric pmf}\n\n\\end{axis} \n\\end{tikzpicture}\n\n\n\\caption{\nComparison of the pmf of $Z_\\alpha$ produced by the greedy algorithm ($\\alpha = [1, 1, 1, 1, 2, 2, 3, 4, 5, 6, 7, 8, 10, 12, 16, 19]$) and the (truncated) Geometric pmf in the lower bound (\\ref{eq:mainthm}), for $K=15$ and $p=0.25$.\n}\n\\label{fig:greedy_versus_geom_pmf_K15}\n\\end{figure}\n\n\n\n\nWhile finding the greedy solution $(\\alpha_0,...,\\alpha_K)$ requires $\\Omega(2^K)$ time in the worst case, similar plots to Figure \\ref{fig:greedy_versus_bound_K15} can be obtained for larger values of $K$ using a more computationally efficient variation of the greedy algorithm.\n\n\nAt a high level, we prove the lower bound\nin Theorem \\ref{thm:main_lower_bound} by forming a convex relaxation of the minimization problem, perturbing the relaxation to form a problem that is analytically solvable using KKT conditions, and using perturbation analysis to find a lower bound on the relaxation. \n\n\n\n\n\n\\longversion{\n\n\\begin{figure}[h]\n\\centering\n\n\\begin{tikzpicture}[scale=0.8]\n\\begin{axis} \n[width=0.47\\textwidth,\nheight=0.36\\textwidth,\ntitle ={Ratio: Upper Bound to Lower Bound},\nylabel = mutual information, \nxlabel = minor allele frequency (p),\nlegend pos = north west]\n\n\\addplot [\nline width=1pt,\ncolor=red!80!black\n] table[x index=0,y index=1] {data\/plt4_greedy_opt_ratio_K15.dat};\n\\addlegendentry{K = 15}\n\n\\addplot [\nline width=1pt,\ncolor=blue!80!black\n] table[x index=0,y index=1] {data\/plt4_greedy_opt_ratio_K10.dat};\n\\addlegendentry{K = 10}\n\n\\addplot [\nline width=1pt,\ncolor=green!80!black\n] table[x index=0,y index=1] {data\/plt4_greedy_opt_ratio_K5.dat};\n\\addlegendentry{K = 5}\n\n\\end{axis} \n\\end{tikzpicture}\n\n\n\\caption{}\n\\label{fig:greedy_versus_bound_K15}\n\\end{figure}\n\n\n}\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\\section{Main Result}\n\n\n\nOur main result is stated below.\n\n\\begin{theorem} \\label{thm:main_lower_bound}\n\tFor any $K\\in \\mathbb{N},$ $p \\in [0, 0.5],$ we have that \n\t\\begin{align} \n\t& \\min_{\\alpha_i \\in \\mathbb{N}, \\; i \\in [K]\\cup\\{0\\}} I(X \\; ; \\; \\alpha_0 X + \\sum_{i=1}^{K} \\alpha_i Z_i) \n\t\\geq \n\tI(X \\; ; \\; X - 2G)\n\t\\end{align}\n\twhere $G$ is a geometric random variable with success probability equal to $(1-p)^K$ and support equal to $\\mathbb{N} \\cup \\{0\\}.$\n\\end{theorem}\nThe formula for $I(X \\; ; \\; X - 2G)$ is given by \n\\begin{align}\n& I(X \\; ; \\; X - 2G)\n\\nonumber \\\\ & = H(p) \n\\nonumber \\\\ & - (p-1) \\left( (1-p)^K - 1\\right) \\log \\left( \\frac{(p-1)(1-p)^K + 1}{(p-1) \\left( (1-p)^K -1 \\right)}\\right) \n\\nonumber \\\\ & - p \\log \\left( \\frac{(p-1)(1-p)^K + 1}{p} \\right)\n\\end{align}\nObserve that the formula is quickly computable for any value of $K$ and $p,$ making it attractive from a computational standpoint. To assess how tight the lower bound is, we empirically compare it to an upper bound that is computed with a greedy algorithm in Figure \\ref{fig:greedy_versus_bound_K15}. For a given $p$ and $K,$ the greedy algorithm chooses $\\alpha_1 = 1,$ and sequentially chooses \\begin{align} \\alpha_{j} = \\argmin_{\\alpha \\in \\mathbb{N} \\; : \\; 1 \\leq \\alpha \\leq 1 + \\sum_{i = 1}^{j-1} \\alpha_i} I(X \\; ; \\; X + \\alpha Z_{j} + \\sum_{i = 1}^{j-1} \\alpha_i Z_i)\n\\end{align}\nfor $2 \\leq j \\leq K.$ As seen in Figure \\ref{fig:greedy_versus_bound_K15}, $I(X \\; ; \\; X - 2G)$ serves as a very tight lower bound when compared with the upper bound. We therefore view $I(X \\; ; \\; X - 2G)$ as an approximation to the original problem.\n\nWhile the greedy algorithm requires $\\Omega(2^K)$ time in the worst case, similar plots to Figure \\ref{fig:greedy_versus_bound_K15} can be obtained for larger values of $K$ using a more computationally efficient variation of the greedy algorithm that is discussed in the longer version of this paper.\n\nIn order to prove the lower bound on the minimization of $I(X \\; ; \\; \\alpha_0 X + \\sum_{i=1}^K \\alpha_i Z_i)$ in Theorem \\ref{thm:main_lower_bound}, we form a convex relaxation of the minimization problem, perturb the relaxation to form a problem that is analytically solvable using KKT conditions, and use perturbation analysis to find a lower bound on the relaxation. This is then a lower bound on the original problem. The full proof is in the Section V.\n\n\\begin{figure}[h]\n\\centering\n\n\\begin{tikzpicture}[scale=0.8]\n\\begin{axis} \n[width=0.47\\textwidth,\nheight=0.36\\textwidth,\ntitle ={Comparison of Bounds: $K = 15$},\nylabel = mutual information, \nxlabel = minor allele frequency (p),\nymax = .06]\n\n\\addplot [\nline width=1pt,\ncolor=red!80!black\n] table[x index=0,y index=1] {data\/plt2_greedy.dat};\n\\addlegendentry{upper bound (greedy)}\n\n\\addplot [\nline width=1pt,\ncolor=blue!80!black,\ndotted\n] table[x index=0,y index=1] {data\/plt2_bound.dat};\n\\addlegendentry{lower bound}\n\n\\end{axis} \n\\end{tikzpicture}\n\\caption{}\n\\label{fig:greedy_versus_bound_K15}\n\\end{figure}\n\n\n\n\n\n\n\n\n\n\\section{Efficiently Computable Schemes}\n\n\\rdcomment{this may sound stupid, but i think it warrants a sentence here. `Theorem 1 gives us a lower bound on the mutual information between $X$ and ..., which would be the mixing ratio that provides the highest level of privacy. However, it does not provide any mixing scheme for approaching this lower bound.'}\n\nWhile much faster than brute force search, the greedy algorithm used to obtain the upper bound requires $\\Omega(2^K)$ time in the worst case. At a high level, this is due to the fact that at the $j$th step in the algorithm, computation of $I(X \\; ; \\; X + \\alpha Z_{j} + \\sum_{i = 1}^{j-1} \\alpha_i Z_i)$ requires use of each support value in the PMF of $\\alpha Z_{j} + \\sum_{i = 1}^{j-1} \\alpha_i Z_i,$ and there are $\\Omega(2^j)$ support values in the worst case. This motivates the design of efficiently computable schemes, which can then be evaluated using the lower bound in Theorem \\ref{thm:main_lower_bound}.\n\nPerhaps the most natural approach is given by the uniform scheme, which we define to set $\\alpha_i = 1$ for all $i \\in [K]\\cup\\{0\\}.$ This is the scheme used by the authors of \\cite{Maddah-Ali} to obtain their results. The scheme is explicitly defined given $K,$ so no algorithm is needed to compute it. The performance of the scheme for a given $p$ and $K$ can be computed in $O(K)$ time using the formula in Lemma \\ref{lem:uniform_schem_formula} which is given in the appendix.\n\nThe uniform scheme is important because it is optimal as $p \\to 0$ as proved in Lemma \\ref{lem:optimality_of_uniform}\nand exemplified by the optimal curve in Figure \\ref{fig:optimal_scheme_K5_minor_allele}.\n\n\\begin{lemma}\n\\label{lem:optimality_of_uniform}\nFor any $K \\in \\mathbb{N},$ there exists some $p^* > 0$ such that the uniform scheme is optimal for $p < p^*.$\n\\end{lemma}\n\n\\rdcomment{i changed epsilon to $p^*$ here. epsilon is typically taken to mean a very small constant, which makes this result feel weak, i.e. this result is only true when p is arbitrarily small.}\n\nAt $p= 0.5,$ in Figure \\ref{fig:optimal_scheme_K5_minor_allele}, the optimal solution is given by the binary scheme, which we define to set $\\alpha_0 = 1$ and $\\alpha_i = 2^{i-1}$ for all $i \\in [K].$ Similar to the uniform scheme, the binary scheme is explicitly defined given $K.$ Also, similar to the uniform scheme, there exists a formula for computing its performance given $p$ and $K$ in $O(K)$ time. This formula is given in Lemma \\ref{lem:binary_scheme_performance} in the appendix. It is interesting that such a formula exists because the PMF of $\\sum_{i=1}^K 2^{i-1} Z_i$ has $2^K$ support values. \n\nMost importantly, the optimality of the binary scheme at $p = 0.5$ for $K=5$ shown in Figure \\ref{fig:optimal_scheme_K5_minor_allele} generalizes to all $K$ as proved in Lemma \\ref{lem:optimality_of_binary}.\n\n\\begin{lemma} \\label{lem:optimality_of_binary}\n\tFor any $K \\in \\mathbb{N},$ the binary scheme is optimal for $p = 0.5.$\n\\end{lemma}\n\nBecause the uniform scheme is optimal for $p \\to 0$ and the binary scheme is optimal for $p = 0.5,$ it is natural to try and combine these schemes to interpolate the performance for $p$ in the range $0 0.$ We will obtain a lower bound for this support constrained optimization problem, and use it to obtain a lower bound on the original optimization problem. \n\t\\iscomment{unclear why this can be done}\n\t\n\tTo lower bound the support constrained optimization problem, we work with a relaxation of the support constrained optimization problem. Instead of requiring that the PMF for the noise random variable $D$ be generated by adding the sum of scaled random variables with support $\\{-1, 1\\},$ we relax the problem to only require that the probability at the highest support value of $D$'s PMF is equal to $(1-p)^K$ and the distance between each pair of support values in $D$'s PMF is a multiple of two (this holds for any valid PMF of $Z_{\\alpha}$). \n\tDue to Lemma \\ref{lem:alpha0_condition}, given $\\alpha_0,$ any solution can be transformed into solution whose PMF had support values in $\\{2t \\alpha_0 : \\; t \\in \\mathbb{Z}\\}$ without increasing the mutual information. Therefore, for a fixed $\\alpha_0,$ it suffices to only consider PMFs that have support values in $\\{2t \\alpha_0 : \\; t \\in \\mathbb{Z}\\}$ in the optimization. If $\\alpha_0 = a \\in \\mathbb{N}$ any solution random variable $D$ with support values in $\\{2t a : \\; t \\in \\mathbb{Z}\\}$ can be transformed into a solution with the same mutual information, $\\alpha_0 = 1,$ and support values in $\\{2t : \\; t \\in \\mathbb{Z}\\}.$ This is accomplished by dividing $D$ by $a.$ Therefore, we analyze the relaxation with $\\alpha_0 = 1.$\n\t\n\tWe then obtain a lower bound on this relaxation of the support constrained optimization problem . Finally, we take the limit of the lower bound as $n$ goes to infinity in order to obtain a lower bound on the original minimization problem.\n\t\n\tThe support-constrained relaxation of the original problem with $\\alpha_0 = 1$ is given below. The objective function is a simplified form of $-H(X \\; | \\; X+D)$ where $D$ is the noise distribution we are optimizing over that is constrained to have probability of $(1-p)^K$ at the maximum support value, and each pair of support values separated by a multiple of two. Here, $q_{(0)}$ is the probability at the maximum support value of $D$ and $q_{(-i)}$ is the probability at the $i$th integer below the maximum support value of $D.$ Observe that the problem is a convex minimization problem.\n\n\t\\iscomment{not clear how this was obtained:}\n\t\\begin{align}\n\t& \\min_{q_{(-2n-2)}, \\; ..., \\; q_{(0)}} \\sum_{j \\in \\{-2n-2, \\; ...,\\; -2\\}} \n\t\\nonumber \\\\ & (1-p)q_{(j)} \\log \\left(\\frac{(1-p)q_{(j)}}{(1-p)q_{(j)} + p q_{(j+2)}} \\right) \n\t\\nonumber \\\\ & + p q_{(j+2)} \\log \\left(\\frac{p q_{(j+2)}}{(1-p)q_{(j)} + p q_{(j+2)}} \\right) \\\\\n\t\\nonumber \\\\ & \\text{subject to:} \\quad q_{(j)} \\geq 0 \\; \\; \\forall j, \\quad q_{(0)} = (1-p)^K, \n\t\\nonumber \\\\ & \\quad q_{(-2n-2)} = 0, \n\t\\nonumber \\\\ & \\quad \\sum_{j} q_{(j)} = 1. \\nonumber\n\t\\end{align} \n\t\n\t\\iscomment{do we need the full formula for the conditional entropy above? makes it harder to parse}\n\tWhile we cannot solve this problem analytically, we will perturb it to form an optimization problem that we can solve analytically. We will then use perturbation analysis to obtain a lower bound on the problem above.\n\t\n\t\n\tThe perturbed problem is given below where we have changed the constraint on the probability at the minimum support value, and the constraint on the sum of all the probabilities.\n\t\n\t\\begin{align}\n\t& \\min_{q_{(-2n-2)}, \\; ..., \\; q_{(0)}} \\sum_{j \\in \\{-2n-2, \\; ...,\\; -2\\}} \n\t\\nonumber \\\\ & (1-p)q_{(j)} \\log \\left(\\frac{(1-p)q_{(j)}}{(1-p)q_{(j)} + p q_{(j+2)}} \\right) \n\t\\nonumber \\\\ & + p q_{(j+2)} \\log \\left(\\frac{p q_{(j+2)}}{(1-p)q_{(j)} + p q_{(j+2)}} \\right) \\\\\n\t\\nonumber \\\\\t& \\text{subject to:} \\quad q_{(j)} \\geq 0 \\; \\; \\forall j, \\quad q_{(0)} = (1-p)^K, \n\t\\nonumber \\\\ & \\quad q_{(-2n-2)} = (1-p)^K (1 - (1-p)^K)^{n+1}, \n\t\\nonumber \\\\ & \\quad \\sum_{j} q_{(j)} = 1 - (1 - (1-p)^K)^{n+2} \\nonumber\n\t\\end{align} \n\t\n\t\n\tLet $f_0(q)$ be the objective function above. The Lagrangian is given by \n\t\n\t\\begin{align} \n\t&L(q, v, \\lambda) = f_0(q) \n\t\\nonumber \\\\ & + v_1 \\left(\\sum_i q_{(i)} - 1 + (1 - (1-p)^K)^{n+2} \\right) \n\t\\nonumber \\\\ & + v_2 \\left(q_{(0)} - (1-p)^K \\right) \n\t\\nonumber \\\\ & + v_3 \\left(q_{(-2n-2)} - (1-p)^K (1 - (1-p)^K)^{n+1} \\right) \n\t\\nonumber \\\\ & - \\sum_i \\lambda_i q_{(i)}. \n\t\\end{align}\n\tThe derivative of $f_0(q)$ with respect to $q_{(j)}$ is given by \n\t\\begin{align}\n\t& p \\log \\left(\\frac{p q_{(j)}}{(1-p)q_{(j-2)} + p q_{(j)}} \\right) \\nonumber \\\\ & + (1-p) \\log \\left( \\frac{(1-p) q_{(j)}}{p q_{(j+2)} + (1-p) q_{(j)}} \\right).\n\t\\end{align} \n\tfor $j \\in \\{-2n, \\; ..., \\; -2\\}$ and the KKT conditions are therefore given by \n\t\\begin{align}\n\t& q_{(j)} \\geq 0 \\; \\; \\forall j, \\quad q_{(0)} = (1-p)^K, \n\t\\nonumber \\\\ & q_{(-2n-2)} = (1-p)^K (1 - (1-p)^K)^{n+1}, \n\t\\nonumber \\\\ & \\sum_{j} q_{(j)} = 1 - (1 - (1-p)^K)^{n+2}\n\t\\nonumber \\\\ & \\lambda_j \\geq 0 \\quad \\lambda_j q_{(j)} = 0 \\quad \\forall j \n\t\\nonumber \\\\ & (1-p) \\log \\left( \\frac{(1-p) q_{(j)}}{p q_{(j+2)} + (1-p) q_{(j)}} \\right) - \\lambda_j + v_1 + v_3 = 0 \\quad \n\t\\nonumber \\\\ & \\text{for } j = - 2n -2\n\t\\nonumber \\\\ & \tp \\log \\left(\\frac{p q_{(j)}}{(1-p)q_{(j-2)} + p q_{(j)}} \\right) \n\t\\nonumber \\\\ & + (1-p) \\log \\left( \\frac{(1-p) q_{(j)}}{p q_{(j+2)} + (1-p) q_{(j)}} \\right) - \\lambda_j + v_1 = 0 \n\t\\nonumber \\\\ & \\forall j \\in \\{-2n, \\; ..., \\; -2\\}\n\t\\nonumber \\\\ & p \\log \\left(\\frac{p q_{(j)}}{(1-p)q_{(j-2)} + p q_{(j)}} \\right) - \\lambda_j + v_1 + v_2 = 0 \n\t\\nonumber \\\\ & \\text{for } j = 0\n\t\\end{align}\n\t\n\t\\rdcomment{stupid question: can you tell me a little more why (12) is this derivative? at a glance, it seems to be that there's a ton of term cancellations... (it's like the terms inside the log don't depend on $q_j$ with the way the derivative is written, so all those terms cancel?) i fear that a unfastidious reviewer may just claim this is a fundamental error and give a poor review without analyzing it deeply. (this happens sometimes to me.)}\n\t\n\tThe last three conditions can be rewritten as \n\t\\begin{align} \n\t& (1-p) \\log \\left( \\frac{(1-p) }{p \\frac{q_{(j+2)}}{q_{(j)}} + (1-p) } \\right) - \\lambda_j + v_1 + v_3 = 0 \n\t\\nonumber \\\\ & \\text{for } j = - 2n - 2\n\t\\nonumber \\\\ & p \\log \\left(\\frac{p }{(1-p)\\frac{q_{(j-2)}}{q_{(j)}} + p } \\right) + (1-p) \\log \\left( \\frac{(1-p) }{p \\frac{q_{(j+2)}}{q_{(j)}} + (1-p) } \\right) \n\t\\nonumber \\\\ & - \\lambda_j + v_1 = 0 \\quad \\forall j \\in \\{-2n, \\; ..., \\; -2\\}\n\t\\nonumber \\\\ & p \\log \\left(\\frac{p }{(1-p) \\frac{q_{(j-2)}}{q_{(j)}} + p } \\right) - \\lambda_j + v_1 + v_2 = 0 \\quad \\text{for } j = 0\n\t\\end{align}\n\twhich shows that if the ratio $\\frac{q_{(j-2)}}{q_{(j)}}$ between consecutive variables is the same for all $j$ and $\\lambda_j = 0 $ for all $j,$ then $v_1,$ $v_2$ can be chosen so that of these derivatives equal $0$ for all $j.$ Picking \\[\\frac{q_{(j-2)}}{q_{(j)}} = (1 - (1-p)^K),\\] a solution to these equations is then\n\t\\begin{align}\n\t& q_{(-2i)} = (1-p)^K (1 - (1-p)^K)^{i} \n\t\\nonumber \\\\ & \\quad \\quad \\text{for } i \\in \\{0, \\; 1, \\; ..., \\; n+1 \\} \n\t\\nonumber \\\\ & \\lambda_i = 0 \\quad \\forall i\n\t\\nonumber \\\\ & v_1 = - p \\log \\left( \\frac{p}{(1-p)(1 - (1-p)^K) + p} \\right) \n\t\\nonumber \\\\ & - (1-p)\\log \\left( \\frac{(1-p)(1 - (1-p)^K)}{(1-p)(1 - (1-p)^K) + p} \\right)\n\t\\nonumber \\\\ & v_2 = (1-p)\\log \\left( \\frac{(1-p)(1 - (1-p)^K)}{(1-p)(1 - (1-p)^K) + p} \\right) \n\t\\nonumber \\\\ & v_3 = p \\log \\left( \\frac{p}{(1-p)(1 - (1-p)^K) + p} \\right)\n\t\\end{align}\n\t\n\tBecause this is a convex optimization problem, this solution is optimal since it satisfies the KKT conditions.\n\t\n\tThe optimal value for the optimization problem is therefore given by \n\t\\begin{align} \n\t& \\sum_{i = 0}^{n} -(1-p) \\beta \\alpha^{i+1} \\log \\left( \\frac{(1-p) \\beta \\alpha^{i+1} + p \\beta \\alpha^i }{(1-p) \\beta \\alpha^{i+1}} \\right) \n\t\\\\ & - p \\beta \\alpha^i \\log \\left( \\frac{(1-p) \\beta \\alpha^{i+1} + p \\beta \\alpha^i }{ p \\beta \\alpha^i} \\right)\n\t\\\\ & = \\sum_{i = 0}^{n} -(1-p) \\beta \\alpha^{i+1} \\log \\left( \\frac{(1-p) \\alpha + p }{(1-p) \\alpha} \\right) \n\t\\\\ & - p \\beta \\alpha^i \\log \\left( \\frac{(1-p) \\alpha + p }{ p } \\right)\n\t\\end{align}\n\twhere $\\alpha = (1 - (1-p)^K)$ and $\\beta = (1-p)^K.$ \n\tApplying the geometric sum formula and simplifying, we obtain \n\t\\begin{align} \n\t& -(1-p) \\beta \\alpha \\frac{1 - \\alpha^{n+1}}{1 - \\alpha} \\log \\left( \\frac{(1-p) \\alpha + p }{(1-p) \\alpha} \\right) \n\t\\nonumber \\\\ & - p \\beta \\frac{1 - \\alpha^{n+1}}{1 - \\alpha} \\log \\left( \\frac{(1-p) \\alpha + p }{ p } \\right).\n\t\\end{align}\n\t\n\t\n\t\n\n\n\n\n\n\n\t\n\tUsing the perturbation analysis from Section 5.6.1 of \\cite{boyd}, we see that the optimal value of the support constrained relaxation of the original problem is lower bounded by\n\t\\begin{align}\n\t& -(1-p) \\beta \\alpha \\frac{1 - \\alpha^{n+1}}{1 - \\alpha} \\log \\left( \\frac{(1-p) \\alpha + p }{(1-p) \\alpha} \\right) \n\t\\nonumber \\\\ & - p \\beta \\frac{1 - \\alpha^{n+1}}{1 - \\alpha} \\log \\left( \\frac{(1-p) \\alpha + p }{ p } \\right)\n\t\\nonumber \\\\ & - v_1^* \\left( (1 - (1-p)^K)^{n+2} \\right) \n\t\\nonumber \\\\ & - v_3^* \\left(- (1-p)^K (1 - (1-p)^K)^{n+1} \\right) \n\t\\end{align} \n\twhere $v_1^*$ and $v_3^*$ are the optimal lagrange multipliers for the perturbed problem which we determined above through the KKT conditions.\n\t\n\tThe sequence of optimal values returned by the support constrained relaxation of the original problem is monotonically decreasing in $n$ clearly. Thus, taking the limit of the bound as $n \\to \\infty$ and simplifying yields the following lower bound on $-H(X \\; | \\; \\alpha_0 X + Z_{\\alpha})$ for any choice of $\\alpha_i$'s: \t\\begin{align} \n\t& -(p-1) \\left( (1-p)^K - 1\\right) \\log \\left( \\frac{(p-1)(1-p)^K + 1}{(p-1) \\left( (1-p)^K -1 \\right)}\\right) \n\t\\nonumber \\\\ & - p \\log \\left( \\frac{(p-1)(1-p)^K + 1}{p} \\right).\n\t\\end{align}\n\t\n\tThus, a lower bound on mutual information is\n\t\\begin{align} \n\t& H(p) - (p-1) \\left( (1-p)^K - 1\\right) \\log \\left( \\frac{(p-1)(1-p)^K + 1}{(p-1) \\left( (1-p)^K -1 \\right)}\\right) \n\t\\nonumber \\\\ & - p \\log \\left( \\frac{(p-1)(1-p)^K + 1}{p} \\right)\n\t\\end{align}\n\tfor any choice of $\\alpha_i$'s.\n\t\n\tFinally, observe that\n\t\\begin{align}\n\t& H(X \\; ; \\; X - 2G) \n\t\\nonumber \\\\ & =\\sum_{i = 0}^{\\infty} (1-p) \\beta \\alpha^{i+1} \\log \\left( \\frac{(1-p) \\beta \\alpha^{i+1} + p \\beta \\alpha^i }{(1-p) \\beta \\alpha^{i+1}} \\right) \n\t\\nonumber \\\\ & + p \\beta \\alpha^i \\log \\left( \\frac{(1-p) \\beta \\alpha^{i+1} + p \\beta \\alpha^i }{ p \\beta \\alpha^i} \\right)\n\t\\nonumber \\\\ & = (p-1) \\left( (1-p)^K - 1\\right) \\log \\left( \\frac{(p-1)(1-p)^K + 1}{(p-1) \\left( (1-p)^K -1 \\right)}\\right) \n\t\\nonumber \\\\ & + p \\log \\left( \\frac{(p-1)(1-p)^K + 1}{p} \\right)\n\t\\end{align}\n\twhere $G$ is a geometric random variable with success probability $(1-p)^K$ and support equal to $\\mathbb{N} \\cup \\{0\\},$ and $\\alpha = (1 - (1-p)^K)$ and $\\beta = (1-p)^K$ as before.\n\\end{IEEEproof}\n\n\\begin{lemma}\n \\label{lem:alpha0_condition}\n\tLet $X$ be a random variable that equals $1$ with probability $(1-p)$ and equals $-1$ with probability $p.$ Let $\\alpha_0 \\in \\mathbb{N}.$ \n\tLet $Z$ be any random variable such that its PMF has integral support and maximum support value at $t = 0.$ Let \n\t\\begin{align}\n\t& \\hat{Z} = Z - (Z \\mod{2\\alpha_0}) \n\t\\end{align} \n\n\n\tWe then have that \n\t\\begin{align}\n\t& I(X ; \\alpha_0 X + \tZ) \\geq I(X ; \\alpha_0 X + \\hat{Z}). \n\t\\end{align}\t\n\\end{lemma}\n\n\\begin{IEEEproof}\n\tLet $S = Z \\mod{2\\alpha_0}.$ We then have that \n\t\\begin{align} \n\t\t& I(X ; \\; \\alpha_0 X + \tZ)\n\t\t\\nonumber \\\\ & = I(X ; \\; \\alpha_0 X + \\hat{Z} + S)\n\t\t\\nonumber \\\\ & = I(X ; \\; \\alpha_0 X + \\hat{Z}, \\; S)\n\t\t\\nonumber \\\\ & = I(X ; \\; S) + I(X ; \\; \\alpha_0 X + \\hat{Z} \\; | \\; S)\n\t\t\\nonumber \\\\ & = I(X ; \\; \\alpha_0 X + \\hat{Z} \\; | \\; S)\n\t\t\\nonumber \\\\ & = H(X \\; | \\; S) - H(X \\; | \\; \\alpha_0 X + \\hat{Z}, \\; S) \n\t\t\\nonumber \\\\ & = H(X) - H(X \\; | \\; \\alpha_0 X + \\hat{Z}, \\; S)\n\t\t\\nonumber \\\\ & \\geq H(X) - H(X \\; | \\; \\alpha_0 X + \\hat{Z})\n\t\t\\nonumber \\\\ & = I(X \\; ; \\; \\alpha_0 X + \\hat{Z})\n\t\\end{align} \n\twhere the third line follows because given $\\alpha_0 X + \\hat{Z} + S,$ we have that \n\t\\begin{align}\n\t S = ((\\alpha_0 X + \\hat{Z} + S) - \\alpha_0) \\mod 2 \\alpha_0 \n\t\\end{align}\n\tand\n \\begin{align}\n\t \\alpha_0 X + \\hat{Z} = (\\alpha_0 X + \\hat{Z} + S) - S.\n\t\\end{align}\n\t\n\t\\end{IEEEproof}\n\n\\section{Main Result}\n\n\n\nOur main result is stated below.\n\n\\begin{theorem} \\label{thm:main_lower_bound}\n\tFor any $K\\in \\mathbb{N},$ $p \\in [0, 0.5],$ we have that \n\t\\begin{align} \n\t& \\min_{\\alpha_i \\in \\mathbb{N}, \\; i \\in [K]\\cup\\{0\\}} I(X \\; ; \\; \\alpha_0 X + \\sum_{i=1}^{K} \\alpha_i Z_i) \n\t\\geq \n\tI(X \\; ; \\; X - G)\n\t\\end{align}\n\twhere $G$ is a geometric random variable with success probability equal to $(1-p)^K$ and support equal to $\\mathbb{N} \\cup \\{0\\}.$\n\\end{theorem}\nThe formula for $I(X \\; ; \\; X - G)$ is given by \n\\begin{align}\n& I(X \\; ; \\; X - G)\n\\nonumber \\\\ & = H(p) \n\\nonumber \\\\ & - (p-1) \\left( (1-p)^K - 1\\right) \\log \\left( \\frac{(p-1)(1-p)^K + 1}{(p-1) \\left( (1-p)^K -1 \\right)}\\right) \n\\nonumber \\\\ & - p \\log \\left( \\frac{(p-1)(1-p)^K + 1}{p} \\right)\n\\end{align}\nObserve that the formula is quickly computable for any value of $K$ and $p,$ making it attractive from a computational standpoint. To assess how tight the lower bound is, we empirically compare it to an upper bound that is computed with a greedy algorithm in Figure \\ref{fig:greedy_versus_bound_K15}. For a given $p$ and $K,$ the greedy algorithm chooses $\\alpha_0 = 1, \\; \\alpha_1 = 1,$ and sequentially chooses \\begin{align} \\alpha_{j} = \\argmin_{\\alpha \\in \\mathbb{N} \\; : \\; 1 \\leq \\alpha \\leq 1 + \\sum_{i = 1}^{j-1} \\alpha_i} I(X \\; ; \\; X + \\alpha Z_{j} + \\sum_{i = 1}^{j-1} \\alpha_i Z_i)\n\\end{align}\nfor $2 \\leq j \\leq K.$ As seen in Figure \\ref{fig:greedy_versus_bound_K15}, $I(X \\; ; \\; X - G)$ serves as a very tight lower bound when compared with the upper bound. We therefore view $I(X \\; ; \\; X - G)$ as an approximation to the original problem.\n\nWhile the greedy algorithm requires $\\Omega(2^K)$ time in the worst case, similar plots to Figure \\ref{fig:greedy_versus_bound_K15} can be obtained for larger values of $K$ using a more computationally efficient variation of the greedy algorithm that is discussed in the longer version of this paper.\n\nIn order to prove the lower bound on the minimization of $I(X \\; ; \\; \\alpha_0 X + \\sum_{i=1}^K \\alpha_i Z_i)$ in Theorem \\ref{thm:main_lower_bound}, we form a convex relaxation of the minimization problem, perturb the relaxation to form a problem that is analytically solvable using KKT conditions, and use perturbation analysis to find a lower bound on the relaxation. This is then a lower bound on the original problem. The full proof is in the Section V.\n\n\\begin{figure}[h]\n\\centering\n\n\\begin{tikzpicture}[scale=0.8]\n\\begin{axis} \n[width=0.47\\textwidth,\nheight=0.36\\textwidth,\ntitle ={Comparison of Bounds: $K = 15$},\nylabel = mutual information, \nxlabel = minor allele frequency (p),\nymax = .06]\n\n\\addplot [\nline width=1pt,\ncolor=red!80!black\n] table[x index=0,y index=1] {data\/plt2_greedy.dat};\n\\addlegendentry{upper bound (greedy)}\n\n\\addplot [\nline width=1pt,\ncolor=blue!80!black,\ndotted\n] table[x index=0,y index=1] {data\/plt2_bound.dat};\n\\addlegendentry{lower bound}\n\n\\end{axis} \n\\end{tikzpicture}\n\\caption{}\n\\label{fig:greedy_versus_bound_K15}\n\\end{figure}\n\n\n\n\n\n\n\n\n\n\\section{Efficiently Computable Schemes}\n\n\\rdcomment{this may sound stupid, but i think it warrants a sentence here. `Theorem 1 gives us a lower bound on the mutual information between $X$ and ..., which would be the mixing ratio that provides the highest level of privacy. However, it does not provide any mixing scheme for approaching this lower bound.'}\n\nWhile much faster than brute force search, the greedy algorithm used to obtain the upper bound requires $\\Omega(2^K)$ time in the worst case. At a high level, this is due to the fact that at the $j$th step in the algorithm, computation of $I(X \\; ; \\; X + \\alpha Z_{j} + \\sum_{i = 1}^{j-1} \\alpha_i Z_i)$ requires use of each support value in the PMF of $\\alpha Z_{j} + \\sum_{i = 1}^{j-1} \\alpha_i Z_i,$ and there are $\\Omega(2^j)$ support values in the worst case. This motivates the design of efficiently computable schemes, which can then be evaluated using the lower bound in Theorem \\ref{thm:main_lower_bound}.\n\nPerhaps the most natural approach is given by the uniform scheme, which we define to set $\\alpha_i = 1$ for all $i \\in [K]\\cup\\{0\\}.$ This is the scheme used by the authors of \\cite{Maddah-Ali} to obtain their results. The scheme is explicitly defined given $K,$ so no algorithm is needed to compute it. The performance of the scheme for a given $p$ and $K$ can be computed in $O(K)$ time using the formula in Lemma \\ref{lem:uniform_schem_formula} which is given in the appendix.\n\nThe uniform scheme is important because it is optimal as $p \\to 0$ as proved in Lemma \\ref{lem:optimality_of_uniform}\nand exemplified by the optimal curve in Figure \\ref{fig:optimal_scheme_K5_minor_allele}.\n\n\\begin{lemma}\n\\label{lem:optimality_of_uniform}\nFor any $K \\in \\mathbb{N},$ there exists some $p^* > 0$ such that the uniform scheme is optimal for $p < p^*.$\n\\end{lemma}\n\n\\rdcomment{i changed epsilon to $p^*$ here. epsilon is typically taken to mean a very small constant, which makes this result feel weak, i.e. this result is only true when p is arbitrarily small.}\n\nAt $p= 0.5,$ in Figure \\ref{fig:optimal_scheme_K5_minor_allele}, the optimal solution is given by the binary scheme, which we define to set $\\alpha_0 = 1$ and $\\alpha_i = 2^{i-1}$ for all $i \\in [K].$ Similar to the uniform scheme, the binary scheme is explicitly defined given $K.$ Also, similar to the uniform scheme, there exists a formula for computing its performance given $p$ and $K$ in $O(K)$ time. This formula is given in Lemma \\ref{lem:binary_scheme_performance} in the appendix. It is interesting that such a formula exists because the PMF of $\\sum_{i=1}^K 2^{i-1} Z_i$ has $2^K$ support values. \n\nMost importantly, the optimality of the binary scheme at $p = 0.5$ for $K=5$ shown in Figure \\ref{fig:optimal_scheme_K5_minor_allele} generalizes to all $K$ as proved in Lemma \\ref{lem:optimality_of_binary}.\n\n\\begin{lemma} \\label{lem:optimality_of_binary}\n\tFor any $K \\in \\mathbb{N},$ the binary scheme is optimal for $p = 0.5.$\n\\end{lemma}\n\nBecause the uniform scheme is optimal for $p \\to 0$ and the binary scheme is optimal for $p = 0.5,$ it is natural to try and combine these schemes to interpolate the performance for $p$ in the range $0 0.$ We will obtain a lower bound for this support constrained optimization problem, and use it to obtain a lower bound on the original optimization problem. \n\t\\iscomment{unclear why this can be done}\n\t\n\tTo lower bound the support constrained optimization problem, we work with a relaxation of the support constrained optimization problem. Instead of requiring that the PMF for the noise random variable $D$ be generated by adding the sum of scaled $\\text{Ber}(1-p)$ random variables, we relax the problem to only require that the probability at the highest support value of $D$'s PMF is equal to $(1-p)^K$ (this holds for any valid PMF of $Z_{\\alpha}$). \n\tDue to Lemma \\ref{lem:alpha0_condition}, given $\\alpha_0,$ any solution can be transformed into a solution whose PMF had support values in $\\{t \\alpha_0 : \\; t \\in \\mathbb{Z}\\}$ without increasing the mutual information. Therefore, for a fixed $\\alpha_0,$ it suffices to only consider PMFs that have support values in $\\{t \\alpha_0 : \\; t \\in \\mathbb{Z}\\}$ in the optimization. If $\\alpha_0 = a \\in \\mathbb{N}$ any solution random variable $D$ with support values in $\\{t a : \\; t \\in \\mathbb{Z}\\}$ can be transformed into a solution with the same mutual information, $\\alpha_0 = 1,$ and support values in $\\{t : \\; t \\in \\mathbb{Z}\\}.$ This is accomplished by dividing $D$ by $a.$ Therefore, we analyze the relaxation with $\\alpha_0 = 1.$\n\t\n\tWe then obtain a lower bound on this relaxation of the support constrained optimization problem . Finally, we take the limit of the lower bound as $n$ goes to infinity in order to obtain a lower bound on the original minimization problem.\n\t\n\tThe support-constrained relaxation of the original problem with $\\alpha_0 = 1$ is given below. The objective function is a simplified form of $-H(X \\; | \\; X+D)$ where $D$ is the noise distribution we are optimizing over that is constrained to have probability of $(1-p)^K$ at the maximum support value. Here, $q_{(0)}$ is the probability at the maximum support value of $D$ and $q_{(-i)}$ is the probability at the $i$th integer below the maximum support value of $D.$ Observe that the problem is a convex minimization problem.\n\t\\iscomment{not clear how this was obtained:}\n\t\\begin{align}\n\t& \\min_{q_{(-n-1)}, \\; ..., \\; q_{(0)}} \\sum_{j \\in \\{-n-1, \\; ...,\\; -1\\}} \n\t\\nonumber \\\\ & (1-p)q_{(j)} \\log \\left(\\frac{(1-p)q_{(j)}}{(1-p)q_{(j)} + p q_{(j+1)}} \\right) \n\t\\nonumber \\\\ & + p q_{(j+1)} \\log \\left(\\frac{p q_{(j+1)}}{(1-p)q_{(j)} + p q_{(j+1)}} \\right) \\\\\n\t\\nonumber \\\\ & \\text{subject to:} \\quad q_{(j)} \\geq 0 \\; \\; \\forall j, \\quad q_{(0)} = (1-p)^K, \n\t\\nonumber \\\\ & \\quad q_{(-n-1)} = 0, \n\t\\nonumber \\\\ & \\quad \\sum_{j} q_{(j)} = 1. \\nonumber\n\t\\end{align} \n\t\n\t\\iscomment{do we need the full formula for the conditional entropy above? makes it harder to parse}\n\tWhile we cannot solve this problem analytically, we will perturb it to form an optimization problem that we can solve analytically. We will then use perturbation analysis to obtain a lower bound on the problem above.\n\t\n\t\n\tThe perturbed problem is given below where we have changed the constraint on the probability at the minimum support value, and the constraint on the sum of all the probabilities.\n\t\n\t\\begin{align}\n\t& \\min_{q_{(-n-1)}, \\; ..., \\; q_{(0)}} \\sum_{j \\in \\{-n-1, \\; ...,\\; -1\\}} \n\t\\nonumber \\\\ & (1-p)q_{(j)} \\log \\left(\\frac{(1-p)q_{(j)}}{(1-p)q_{(j)} + p q_{(j+1)}} \\right) \n\t\\nonumber \\\\ & + p q_{(j+1)} \\log \\left(\\frac{p q_{(j+1)}}{(1-p)q_{(j)} + p q_{(j+1)}} \\right) \\\\\n\t\\nonumber \\\\\t& \\text{subject to:} \\quad q_{(j)} \\geq 0 \\; \\; \\forall j, \\quad q_{(0)} = (1-p)^K, \n\t\\nonumber \\\\ & \\quad q_{(-n-1)} = (1-p)^K (1 - (1-p)^K)^{n+1}, \n\t\\nonumber \\\\ & \\quad \\sum_{j} q_{(j)} = 1 - (1 - (1-p)^K)^{n+2} \\nonumber\n\t\\end{align} \n\t\n\t\n\tLet $f_0(q)$ be the objective function above. The Lagrangian is given by \n\t\n\t\\begin{align} \n\t&L(q, v, \\lambda) = f_0(q) \n\t\\nonumber \\\\ & + v_1 \\left(\\sum_i q_{(i)} - 1 + (1 - (1-p)^K)^{n+2} \\right) \n\t\\nonumber \\\\ & + v_2 \\left(q_{(0)} - (1-p)^K \\right) \n\t\\nonumber \\\\ & + v_3 \\left(q_{(-n-1)} - (1-p)^K (1 - (1-p)^K)^{n+1} \\right) \n\t\\nonumber \\\\ & - \\sum_i \\lambda_i q_{(i)}. \n\t\\end{align}\n\tThe derivative of $f_0(q)$ with respect to $q_{(j)}$ is given by \n\t\\begin{align}\n\t& p \\log \\left(\\frac{p q_{(j)}}{(1-p)q_{(j-1)} + p q_{(j)}} \\right) \\nonumber \\\\ & + (1-p) \\log \\left( \\frac{(1-p) q_{(j)}}{p q_{(j+1)} + (1-p) q_{(j)}} \\right).\n\t\\end{align} \n\tfor $j \\in \\{-n, \\; ..., \\; -1\\}$ and the KKT conditions are therefore given by \n\t\\begin{align}\n\t& q_{(j)} \\geq 0 \\; \\; \\forall j, \\quad q_{(0)} = (1-p)^K, \n\t\\nonumber \\\\ & q_{(-n-1)} = (1-p)^K (1 - (1-p)^K)^{n+1}, \n\t\\nonumber \\\\ & \\sum_{j} q_{(j)} = 1 - (1 - (1-p)^K)^{n+2}\n\t\\nonumber \\\\ & \\lambda_j \\geq 0 \\quad \\lambda_j q_{(j)} = 0 \\quad \\forall j \n\t\\nonumber \\\\ & (1-p) \\log \\left( \\frac{(1-p) q_{(j)}}{p q_{(j+1)} + (1-p) q_{(j)}} \\right) - \\lambda_j + v_1 + v_3 = 0 \\quad \n\t\\nonumber \\\\ & \\text{for } j = - n -1\n\t\\nonumber \\\\ & \tp \\log \\left(\\frac{p q_{(j)}}{(1-p)q_{(j-1)} + p q_{(j)}} \\right) \n\t\\nonumber \\\\ & + (1-p) \\log \\left( \\frac{(1-p) q_{(j)}}{p q_{(j+1)} + (1-p) q_{(j)}} \\right) - \\lambda_j + v_1 = 0 \n\t\\nonumber \\\\ & \\forall j \\in \\{-n, \\; ..., \\; -1\\}\n\t\\nonumber \\\\ & p \\log \\left(\\frac{p q_{(j)}}{(1-p)q_{(j-1)} + p q_{(j)}} \\right) - \\lambda_j + v_1 + v_2 = 0 \n\t\\nonumber \\\\ & \\text{for } j = 0\n\t\\end{align}\n\t\n\t\\rdcomment{stupid question: can you tell me a little more why (12) is this derivative? at a glance, it seems to be that there's a ton of term cancellations... (it's like the terms inside the log don't depend on $q_j$ with the way the derivative is written, so all those terms cancel?) i fear that a unfastidious reviewer may just claim this is a fundamental error and give a poor review without analyzing it deeply. (this happens sometimes to me.)}\n\t\n\tThe last three conditions can be rewritten as \n\t\\begin{align} \n\t& (1-p) \\log \\left( \\frac{(1-p) }{p \\frac{q_{(j+1)}}{q_{(j)}} + (1-p) } \\right) - \\lambda_j + v_1 + v_3 = 0 \n\t\\nonumber \\\\ & \\text{for } j = - n - 1\n\t\\nonumber \\\\ & p \\log \\left(\\frac{p }{(1-p)\\frac{q_{(j-1)}}{q_{(j)}} + p } \\right) + (1-p) \\log \\left( \\frac{(1-p) }{p \\frac{q_{(j+1)}}{q_{(j)}} + (1-p) } \\right) \n\t\\nonumber \\\\ & - \\lambda_j + v_1 = 0 \\quad \\forall j \\in \\{-n, \\; ..., \\; -1\\}\n\t\\nonumber \\\\ & p \\log \\left(\\frac{p }{(1-p) \\frac{q_{(j-1)}}{q_{(j)}} + p } \\right) - \\lambda_j + v_1 + v_2 = 0 \\quad \\text{for } j = 0\n\t\\end{align}\n\twhich shows that if the ratio $\\frac{q_{(j-1)}}{q_{(j)}}$ between consecutive variables is the same for all $j$ and $\\lambda_j = 0 $ for all $j,$ then $v_1,$ $v_2$ can be chosen so that of these derivatives equal $0$ for all $j.$ Picking \\[\\frac{q_{(j-1)}}{q_{(j)}} = (1 - (1-p)^K),\\] a solution to these equations is then\n\t\\begin{align}\n\t& q_{(-i)} = (1-p)^K (1 - (1-p)^K)^{i} \n\t\\nonumber \\\\ & \\quad \\quad \\text{for } i \\in \\{0, \\; 1, \\; ..., \\; n+1 \\} \n\t\\nonumber \\\\ & \\lambda_i = 0 \\quad \\forall i\n\t\\nonumber \\\\ & v_1 = - p \\log \\left( \\frac{p}{(1-p)(1 - (1-p)^K) + p} \\right) \n\t\\nonumber \\\\ & - (1-p)\\log \\left( \\frac{(1-p)(1 - (1-p)^K)}{(1-p)(1 - (1-p)^K) + p} \\right)\n\t\\nonumber \\\\ & v_2 = (1-p)\\log \\left( \\frac{(1-p)(1 - (1-p)^K)}{(1-p)(1 - (1-p)^K) + p} \\right) \n\t\\nonumber \\\\ & v_3 = p \\log \\left( \\frac{p}{(1-p)(1 - (1-p)^K) + p} \\right)\n\t\\end{align}\n\t\n\tBecause this is a convex optimization problem, this solution is optimal since it satisfies the KKT conditions.\n\t\n\tThe optimal value for the optimization problem is therefore given by \n\t\\begin{align} \n\t& \\sum_{i = 0}^{n} -(1-p) \\beta \\alpha^{i+1} \\log \\left( \\frac{(1-p) \\beta \\alpha^{i+1} + p \\beta \\alpha^i }{(1-p) \\beta \\alpha^{i+1}} \\right) \n\t\\\\ & - p \\beta \\alpha^i \\log \\left( \\frac{(1-p) \\beta \\alpha^{i+1} + p \\beta \\alpha^i }{ p \\beta \\alpha^i} \\right)\n\t\\\\ & = \\sum_{i = 0}^{n} -(1-p) \\beta \\alpha^{i+1} \\log \\left( \\frac{(1-p) \\alpha + p }{(1-p) \\alpha} \\right) \n\t\\\\ & - p \\beta \\alpha^i \\log \\left( \\frac{(1-p) \\alpha + p }{ p } \\right)\n\t\\end{align}\n\twhere $\\alpha = (1 - (1-p)^K)$ and $\\beta = (1-p)^K.$ \n\tApplying the geometric sum formula and simplifying, we obtain \n\t\\begin{align} \n\t& -(1-p) \\beta \\alpha \\frac{1 - \\alpha^{n+1}}{1 - \\alpha} \\log \\left( \\frac{(1-p) \\alpha + p }{(1-p) \\alpha} \\right) \n\t\\nonumber \\\\ & - p \\beta \\frac{1 - \\alpha^{n+1}}{1 - \\alpha} \\log \\left( \\frac{(1-p) \\alpha + p }{ p } \\right).\n\t\\end{align}\n\t\n\t\n\t\n\n\n\n\n\n\n\t\n\tUsing the perturbation analysis from Section 5.6.1 of \\cite{boyd}, we see that the optimal value of the support constrained relaxation of the original problem is lower bounded by\n\t\\begin{align}\n\t& -(1-p) \\beta \\alpha \\frac{1 - \\alpha^{n+1}}{1 - \\alpha} \\log \\left( \\frac{(1-p) \\alpha + p }{(1-p) \\alpha} \\right) \n\t\\nonumber \\\\ & - p \\beta \\frac{1 - \\alpha^{n+1}}{1 - \\alpha} \\log \\left( \\frac{(1-p) \\alpha + p }{ p } \\right)\n\t\\nonumber \\\\ & - v_1^* \\left( (1 - (1-p)^K)^{n+2} \\right) \n\t\\nonumber \\\\ & - v_3^* \\left(- (1-p)^K (1 - (1-p)^K)^{n+1} \\right) \n\t\\end{align} \n\twhere $v_1^*$ and $v_3^*$ are the optimal lagrange multipliers for the perturbed problem which we determined above through the KKT conditions.\n\t\n\tThe sequence of optimal values returned by the support constrained relaxation of the original problem is monotonically decreasing in $n$ clearly. Thus, taking the limit of the bound as $n \\to \\infty$ and simplifying yields the following lower bound on $-H(X \\; | \\; \\alpha_0 X + Z_{\\alpha})$ for any choice of $\\alpha_i$'s: \t\\begin{align} \n\t& -(p-1) \\left( (1-p)^K - 1\\right) \\log \\left( \\frac{(p-1)(1-p)^K + 1}{(p-1) \\left( (1-p)^K -1 \\right)}\\right) \n\t\\nonumber \\\\ & - p \\log \\left( \\frac{(p-1)(1-p)^K + 1}{p} \\right).\n\t\\end{align}\n\t\n\tThus, a lower bound on mutual information is\n\t\\begin{align} \n\t& H(p) - (p-1) \\left( (1-p)^K - 1\\right) \\log \\left( \\frac{(p-1)(1-p)^K + 1}{(p-1) \\left( (1-p)^K -1 \\right)}\\right) \n\t\\nonumber \\\\ & - p \\log \\left( \\frac{(p-1)(1-p)^K + 1}{p} \\right)\n\t\\end{align}\n\tfor any choice of $\\alpha_i$'s.\n\t\n\tFinally, observe that\n\t\\begin{align}\n\t& H(X \\; ; \\; X - G) \n\t\\nonumber \\\\ & =\\sum_{i = 0}^{\\infty} (1-p) \\beta \\alpha^{i+1} \\log \\left( \\frac{(1-p) \\beta \\alpha^{i+1} + p \\beta \\alpha^i }{(1-p) \\beta \\alpha^{i+1}} \\right) \n\t\\nonumber \\\\ & + p \\beta \\alpha^i \\log \\left( \\frac{(1-p) \\beta \\alpha^{i+1} + p \\beta \\alpha^i }{ p \\beta \\alpha^i} \\right)\n\t\\nonumber \\\\ & = (p-1) \\left( (1-p)^K - 1\\right) \\log \\left( \\frac{(p-1)(1-p)^K + 1}{(p-1) \\left( (1-p)^K -1 \\right)}\\right) \n\t\\nonumber \\\\ & + p \\log \\left( \\frac{(p-1)(1-p)^K + 1}{p} \\right)\n\t\\end{align}\n\twhere $G$ is a geometric random variable with success probability $(1-p)^K$ and support equal to $\\mathbb{N} \\cup \\{0\\},$ and $\\alpha = (1 - (1-p)^K)$ and $\\beta = (1-p)^K$ as before.\n\\end{IEEEproof}\n\n\\begin{lemma}\n \\label{lem:alpha0_condition}\n\tLet $X$ be a $\\text{Ber}(1-p)$ random variable. Let $\\alpha_0 \\in \\mathbb{N}.$ \n\tLet $Z$ be any random variable such that its PMF has integral support and maximum support value at $t = 0.$ Let \n\t\\begin{align}\n\t& \\hat{Z} = Z - (Z \\mod{\\alpha_0}) \n\t\\end{align} \n\n\n\tWe then have that \n\t\\begin{align}\n\t& I(X ; \\alpha_0 X + \tZ) \\geq I(X ; \\alpha_0 X + \\hat{Z}). \n\t\\end{align}\t\n\\end{lemma}\n\n\\begin{IEEEproof}\n\tLet $S = Z \\mod{\\alpha_0}.$ We then have that \n\t\\begin{align} \n\t\t& I(X ; \\; \\alpha_0 X + \tZ)\n\t\t\\nonumber \\\\ & = I(X ; \\; \\alpha_0 X + \\hat{Z} + S)\n\t\t\\nonumber \\\\ & = I(X ; \\; \\alpha_0 X + \\hat{Z}, \\; S)\n\t\t\\nonumber \\\\ & = I(X ; \\; S) + I(X ; \\; \\alpha_0 X + \\hat{Z} \\; | \\; S)\n\t\t\\nonumber \\\\ & = I(X ; \\; \\alpha_0 X + \\hat{Z} \\; | \\; S)\n\t\t\\nonumber \\\\ & = H(X \\; | \\; S) - H(X \\; | \\; \\alpha_0 X + \\hat{Z}, \\; S) \n\t\t\\nonumber \\\\ & = H(X) - H(X \\; | \\; \\alpha_0 X + \\hat{Z}, \\; S)\n\t\t\\nonumber \\\\ & \\geq H(X) - H(X \\; | \\; \\alpha_0 X + \\hat{Z})\n\t\t\\nonumber \\\\ & = I(X \\; ; \\; \\alpha_0 X + \\hat{Z})\n\t\\end{align} \n\twhere the third line follows because given $\\alpha_0 X + \\hat{Z} + S,$ we have that \n\t\\begin{align}\n\t S = ((\\alpha_0 X + \\hat{Z} + S) - \\alpha_0) \\mod \\alpha_0 \n\t\\end{align}\n\tand\n \\begin{align}\n\t \\alpha_0 X + \\hat{Z} = (\\alpha_0 X + \\hat{Z} + S) - S.\n\t\\end{align}\n\t\n\t\\end{IEEEproof}\n\n\\section{Main Result}\n\n\n\nOur main result is stated below.\n\n\\begin{theorem} \\label{thm:main_lower_bound}\n\tFor any $K\\in \\mathbb{N},$ $p \\in [0, 0.5],$ we have that \n\t\\begin{align} \n\n\t& \\min_{\\alpha_1, ..., \\alpha_K \\in \\mathbb{R}_{\\geq 0}}\n\tI(X \\; ; \\; X + \\sum_{i=1}^{K} \\alpha_i Z_i) \n\t\\geq \n\tI(X \\; ; \\; X - G)\n\t\\end{align}\n\twhere $G$ is a geometric random variable with success probability equal to $(1-p)^K$ and support equal to $\\mathbb{N} \\cup \\{0\\}.$\n\\end{theorem}\nThe formula for $I(X \\; ; \\; X - G)$ is given by \n\\begin{align}\n& I(X \\; ; \\; X - G)\n\\nonumber \\\\ & = H(p) \n\\nonumber \\\\ & - (p-1) \\left( (1-p)^K - 1\\right) \\log \\left( \\frac{(p-1)(1-p)^K + 1}{(p-1) \\left( (1-p)^K -1 \\right)}\\right) \n\\nonumber \\\\ & - p \\log \\left( \\frac{(p-1)(1-p)^K + 1}{p} \\right)\n\\end{align}\nObserve that the formula is quickly computable for any value of $K$ and $p,$ making it attractive from a computational standpoint. To assess how tight the lower bound is, we empirically compare it to an upper bound that is computed with a greedy algorithm in Figure \\ref{fig:greedy_versus_bound_K15}. For a given $p$ and $K,$ the greedy algorithm chooses $\\alpha_0 = 1, \\; \\alpha_1 = 1,$ and sequentially chooses \\begin{align} \\alpha_{j} = \\argmin_{\\alpha \\in \\mathbb{N} \\; : \\; 1 \\leq \\alpha \\leq 1 + \\sum_{i = 1}^{j-1} \\alpha_i} I(X \\; ; \\; X + \\alpha Z_{j} + \\sum_{i = 1}^{j-1} \\alpha_i Z_i)\n\\end{align}\nfor $2 \\leq j \\leq K.$ As seen in Figure \\ref{fig:greedy_versus_bound_K15}, $I(X \\; ; \\; X - G)$ serves as a very tight lower bound when compared with the upper bound. We therefore view $I(X \\; ; \\; X - G)$ as an approximation to the original problem.\n\nWhile the greedy algorithm requires $\\Omega(2^K)$ time in the worst case, similar plots to Figure \\ref{fig:greedy_versus_bound_K15} can be obtained for larger values of $K$ using a more computationally efficient variation of the greedy algorithm that is discussed in the longer version of this paper.\n\nIn order to prove the lower bound on the minimization of $I(X \\; ; \\; X + \\sum_{i=1}^K \\alpha_i Z_i)$ in Theorem \\ref{thm:main_lower_bound}, we form a convex relaxation of the minimization problem, perturb the relaxation to form a problem that is analytically solvable using KKT conditions, and use perturbation analysis to find a lower bound on the relaxation. This is then a lower bound on the original problem. The full proof is in the Section V.\n\n\\begin{figure}[h]\n\\centering\n\n\\begin{tikzpicture}[scale=0.8]\n\\begin{axis} \n[width=0.47\\textwidth,\nheight=0.36\\textwidth,\ntitle ={Comparison of Bounds: $K = 15$},\nylabel = mutual information, \nxlabel = minor allele frequency (p),\nymax = .06]\n\n\\addplot [\nline width=1pt,\ncolor=red!80!black\n] table[x index=0,y index=1] {data\/plt2_greedy.dat};\n\\addlegendentry{upper bound (greedy)}\n\n\\addplot [\nline width=1pt,\ncolor=blue!80!black,\ndotted\n] table[x index=0,y index=1] {data\/plt2_bound.dat};\n\\addlegendentry{lower bound}\n\n\\end{axis} \n\\end{tikzpicture}\n\\caption{}\n\\label{fig:greedy_versus_bound_K15}\n\\end{figure}\n\n\n\n\n\n\n\n\n\n\\section{Efficiently Computable Schemes}\n\n\\rdcomment{this may sound stupid, but i think it warrants a sentence here. `Theorem 1 gives us a lower bound on the mutual information between $X$ and ..., which would be the mixing ratio that provides the highest level of privacy. However, it does not provide any mixing scheme for approaching this lower bound.'}\n\nWhile much faster than brute force search, the greedy algorithm used to obtain the upper bound requires $\\Omega(2^K)$ time in the worst case. At a high level, this is due to the fact that at the $j$th step in the algorithm, computation of $I(X \\; ; \\; X + \\alpha Z_{j} + \\sum_{i = 1}^{j-1} \\alpha_i Z_i)$ requires use of each support value in the PMF of $\\alpha Z_{j} + \\sum_{i = 1}^{j-1} \\alpha_i Z_i,$ and there are $\\Omega(2^j)$ support values in the worst case. This motivates the design of efficiently computable schemes, which can then be evaluated using the lower bound in Theorem \\ref{thm:main_lower_bound}.\n\nPerhaps the most natural approach is given by the uniform scheme, which we define to set $\\alpha_i = 1$ for all $i \\in [K]\\cup\\{0\\}.$ This is the scheme used by the authors of \\cite{Maddah-Ali} to obtain their results. The scheme is explicitly defined given $K,$ so no algorithm is needed to compute it. The performance of the scheme for a given $p$ and $K$ can be computed in $O(K)$ time using the formula in Lemma \\ref{lem:uniform_schem_formula} which is given in the appendix.\n\nThe uniform scheme is important because it is optimal as $p \\to 0$ as proved in Lemma \\ref{lem:optimality_of_uniform}\nand exemplified by the optimal curve in Figure \\ref{fig:optimal_scheme_K5_minor_allele}.\n\n\\begin{lemma}\n\\label{lem:optimality_of_uniform}\nFor any $K \\in \\mathbb{N},$ there exists some $p^* > 0$ such that the uniform scheme is optimal for $p < p^*.$\n\\end{lemma}\n\n\\rdcomment{i changed epsilon to $p^*$ here. epsilon is typically taken to mean a very small constant, which makes this result feel weak, i.e. this result is only true when p is arbitrarily small.}\n\nAt $p= 0.5,$ in Figure \\ref{fig:optimal_scheme_K5_minor_allele}, the optimal solution is given by the binary scheme, which we define to set $\\alpha_0 = 1$ and $\\alpha_i = 2^{i-1}$ for all $i \\in [K].$ Similar to the uniform scheme, the binary scheme is explicitly defined given $K.$ Also, similar to the uniform scheme, there exists a formula for computing its performance given $p$ and $K$ in $O(K)$ time. This formula is given in Lemma \\ref{lem:binary_scheme_performance} in the appendix. It is interesting that such a formula exists because the PMF of $\\sum_{i=1}^K 2^{i-1} Z_i$ has $2^K$ support values. \n\nMost importantly, the optimality of the binary scheme at $p = 0.5$ for $K=5$ shown in Figure \\ref{fig:optimal_scheme_K5_minor_allele} generalizes to all $K$ as proved in Lemma \\ref{lem:optimality_of_binary}.\n\n\\begin{lemma} \\label{lem:optimality_of_binary}\n\tFor any $K \\in \\mathbb{N},$ the binary scheme is optimal for $p = 0.5.$\n\\end{lemma}\n\nBecause the uniform scheme is optimal for $p \\to 0$ and the binary scheme is optimal for $p = 0.5,$ it is natural to try and combine these schemes to interpolate the performance for $p$ in the range $00$ for $i \\in \\mathbb{N} \\cup \\{0\\}$ is given by $q_{(-i)} = (1-p)^K (1 - (1-p)^K)^i$ for $i \\in \\mathbb{N} \\cup \\{0\\}$ which corresponds to $D = -G$ in (\\ref{eq:relax_inf1}) where $G$ is a geometric random variable with success probability equal to $(1-p)^K$ and support equal to $\\mathbb{N} \\cup \\{0\\}.$ We do not use this observation directly to establish the result because no reference on KKT conditions in the infinite dimensional case was found. \n \n Instead, we form a new optimization problem where we constrain $D$ to have distance $n$ between its maximum and minimum support values, derive a lower bound on the solution for $n \\in \\mathbb{N}$, and use this sequence of lower bounds to establish the result. The support constrained version of (\\ref{eq:relax_inf2}) is given by \n \\begin{align}\n \\label{eq:relax_const}\n & \\min_{q_{(-i)} \\geq 0 : \\; i \\in [n+1]\\cup\\{0\\} }\n \\sum_{j \\in \\{-m : \\; m \\in [n+1]\\}} \n\t\\nonumber \\\\ & (1-p)q_{(j)} \\log \\left(\\frac{(1-p)q_{(j)}}{(1-p)q_{(j)} + p q_{(j+1)}} \\right) \n\t\\nonumber \\\\ & + p q_{(j+1)} \\log \\left(\\frac{p q_{(j+1)}}{(1-p)q_{(j)} + p q_{(j+1)}} \\right) \\\\\n\t\\nonumber \\\\ & \\text{subject to:} \\nonumber \\\\ & \n \\quad q_{(0)} = (1-p)^K, \n\t\\nonumber \\\\ & \\quad q_{(-n-1)} = 0, \n\t\\nonumber \\\\ & \\quad \\sum_{j} q_{(j)} = 1. \\nonumber\n\t\\end{align} \n While we do not know the solution to (\\ref{eq:relax_const}), we will perturb it to form a problem we can solve analytically, then use perturbation analysis to lower bound the optimal value of (\\ref{eq:relax_const}).\n \n The perturbed version of (\\ref{eq:relax_const}) is given by \n\t\\begin{align}\n\t\\label{eq:relax_const_pert}\n & \\min_{q_{(-i)} \\geq 0 : \\; i \\in [n+1]\\cup\\{0\\} }\n \\sum_{j \\in \\{-m : \\; m \\in [n+1]\\}} \n\t\\nonumber \\\\ & (1-p)q_{(j)} \\log \\left(\\frac{(1-p)q_{(j)}}{(1-p)q_{(j)} + p q_{(j+1)}} \\right) \n\t\\nonumber \\\\ & + p q_{(j+1)} \\log \\left(\\frac{p q_{(j+1)}}{(1-p)q_{(j)} + p q_{(j+1)}} \\right) \\\\\n\t\\nonumber \\\\\t& \\text{subject to:} \\nonumber \\\\\n\t& \\quad q_{(0)} = (1-p)^K,\n\t\\nonumber \\\\ & \\quad q_{(-n-1)} = (1-p)^K (1 - (1-p)^K)^{n+1}, \n\t\\nonumber \\\\ & \\quad \\sum_{j} q_{(j)} = 1 - (1 - (1-p)^K)^{n+2} \\nonumber\n\t\\end{align} \n where we have changed the constraint on the probability at the minimum support value, and the constraint on the sum of all the probabilities.\n \n Changing the implicit constraint in (\\ref{eq:relax_const_pert}) from $q_{(i)} \\geq 0$ to $q_{(i)} > 0$ gives us\n\t\\begin{align}\n\t\\label{eq:relax_const_pert_posq}\n & \\min_{q_{(-i)} > 0 : \\; i \\in [n+1]\\cup\\{0\\} }\n \\sum_{j \\in \\{-m : \\; m \\in [n+1]\\}} \n\t\\nonumber \\\\ & (1-p)q_{(j)} \\log \\left(\\frac{(1-p)q_{(j)}}{(1-p)q_{(j)} + p q_{(j+1)}} \\right) \n\t\\nonumber \\\\ & + p q_{(j+1)} \\log \\left(\\frac{p q_{(j+1)}}{(1-p)q_{(j)} + p q_{(j+1)}} \\right) \\\\\n\t\\nonumber \\\\\t& \\text{subject to:} \\nonumber \\\\\n\t& \\quad q_{(0)} = (1-p)^K,\n\t\\nonumber \\\\ & \\quad q_{(-n-1)} = (1-p)^K (1 - (1-p)^K)^{n+1}, \n\t\\nonumber \\\\ & \\quad \\sum_{j} q_{(j)} = 1 - (1 - (1-p)^K)^{n+2} \\nonumber\n\t\\end{align} \n which we can solve through KKT conditions since the problem is convex, Slater's condition holds, and the objective function and constraint functions are differentiable on their domains. Due to the continuity of the objective function in (\\ref{eq:relax_const_pert}), it is not hard to show that an optimal solution to (\\ref{eq:relax_const_pert_posq}) is also an optimal solution to (\\ref{eq:relax_const_pert}). At a high level, this is due to the fact that if there was a solution to (\\ref{eq:relax_const_pert}) with $q_{(i)} = 0$ for some $i$ and this solution was better than the optimal solution to (\\ref{eq:relax_const_pert_posq}), then this would create a contradiction because (\\ref{eq:relax_const_pert_posq}) could get arbitrarily close to the better solution by the continuity of the objective function. \n \n We proceed to find the solution to the KKT conditions for (\\ref{eq:relax_const_pert_posq}), which is then a solution to (\\ref{eq:relax_const_pert}). Let $f_0(q)$ be the objective function of (\\ref{eq:relax_const_pert_posq}). The Lagrangian is given by \n\t\n\t\\begin{align} \n\t&L(q, v, \\lambda) = f_0(q) \n\t\\nonumber \\\\ & + v_1 \\left(\\sum_i q_{(i)} - 1 + (1 - (1-p)^K)^{n+2} \\right) \n\t\\nonumber \\\\ & + v_2 \\left(q_{(0)} - (1-p)^K \\right) \n\t\\nonumber \\\\ & + v_3 \\left(q_{(-n-1)} - (1-p)^K (1 - (1-p)^K)^{n+1} \\right). \n\t\\end{align}\n\tThe derivative of $f_0(q)$ with respect to $q_{(j)}$ is given by \n\t\\begin{align}\n\t& p \\log \\left(\\frac{p q_{(j)}}{(1-p)q_{(j-1)} + p q_{(j)}} \\right) \\nonumber \\\\ & + (1-p) \\log \\left( \\frac{(1-p) q_{(j)}}{p q_{(j+1)} + (1-p) q_{(j)}} \\right)\n\t\\end{align} \n\tfor $j \\in \\{-n, \\; ..., \\; -1\\}$ and the KKT conditions are given by \n\t\\begin{align}\n\t& q_{(0)} = (1-p)^K,\n\t\\nonumber \\\\ & q_{(-n-1)} = (1-p)^K (1 - (1-p)^K)^{n+1}, \n\t\\nonumber \\\\ & \\sum_{j} q_{(j)} = 1 - (1 - (1-p)^K)^{n+2}\n\t\\nonumber \\\\ & (1-p) \\log \\left( \\frac{(1-p) q_{(j)}}{p q_{(j+1)} + (1-p) q_{(j)}} \\right) + v_1 + v_3 = 0 \\quad \n\t\\nonumber \\\\ & \\text{for } j = - n -1\n\t\\nonumber \\\\ & \tp \\log \\left(\\frac{p q_{(j)}}{(1-p)q_{(j-1)} + p q_{(j)}} \\right) \n\t\\nonumber \\\\ & + (1-p) \\log \\left( \\frac{(1-p) q_{(j)}}{p q_{(j+1)} + (1-p) q_{(j)}} \\right)+ v_1 = 0 \n\t\\nonumber \\\\ & \\forall j \\in \\{-n, \\; ..., \\; -1\\}\n\t\\nonumber \\\\ & p \\log \\left(\\frac{p q_{(j)}}{(1-p)q_{(j-1)} + p q_{(j)}} \\right) + v_1 + v_2 = 0 \n\t\\nonumber \\\\ & \\text{for } j = 0.\n\t\\end{align}\n\t\n\n\t\n\tThe last three conditions can be rewritten as \n\t\\begin{align} \n\t& (1-p) \\log \\left( \\frac{(1-p) }{p \\frac{q_{(j+1)}}{q_{(j)}} + (1-p) } \\right) + v_1 + v_3 = 0 \n\t\\nonumber \\\\ & \\text{for } j = - n - 1\n\t\\nonumber \\\\ & p \\log \\left(\\frac{p }{(1-p)\\frac{q_{(j-1)}}{q_{(j)}} + p } \\right) \\nonumber \\\\ & + (1-p) \\log \\left( \\frac{(1-p) }{p \\frac{q_{(j+1)}}{q_{(j)}} + (1-p) } \\right) + v_1 = 0 \n\t\\nonumber \\\\ & \\forall j \\in \\{-n, \\; ..., \\; -1\\}\n\t\\nonumber \\\\ & p \\log \\left(\\frac{p }{(1-p) \\frac{q_{(j-1)}}{q_{(j)}} + p } \\right) + v_1 + v_2 = 0 \\quad \\text{for } j = 0\n\t\\end{align}\n\twhich shows that if the ratio $\\frac{q_{(j-1)}}{q_{(j)}}$ between consecutive variables is the same for all $j,$ then $v_1,$ $v_2$ can be chosen so that of these derivatives equal $0$ for all $j.$ Picking \\[\\frac{q_{(j-1)}}{q_{(j)}} = (1 - (1-p)^K),\\] a solution to these equations is then\n\t\\begin{align}\n\t& q_{(-i)} = (1-p)^K (1 - (1-p)^K)^{i} \n\t\\nonumber \\\\ & \\text{for } i \\in \\{0, \\; 1, \\; ..., \\; n+1 \\} \n\t\\nonumber \\\\ & v_1 = - p \\log \\left( \\frac{p}{(1-p)(1 - (1-p)^K) + p} \\right) \n\t\\nonumber \\\\ & - (1-p)\\log \\left( \\frac{(1-p)(1 - (1-p)^K)}{(1-p)(1 - (1-p)^K) + p} \\right)\n\t\\nonumber \\\\ & v_2 = (1-p)\\log \\left( \\frac{(1-p)(1 - (1-p)^K)}{(1-p)(1 - (1-p)^K) + p} \\right) \n\t\\nonumber \\\\ & v_3 = p \\log \\left( \\frac{p}{(1-p)(1 - (1-p)^K) + p} \\right)\n\t\\end{align}\n\t\n This is the solution to both (\\ref{eq:relax_const_pert_posq}) and (\\ref{eq:relax_const_pert}) as discussed earlier. The optimal value for (\\ref{eq:relax_const_pert_posq}) and (\\ref{eq:relax_const_pert}) is therefore given by \n\t\\begin{align} \n\t& \\sum_{i = 0}^{n} -(1-p) \\beta \\alpha^{i+1} \\log \\left( \\frac{(1-p) \\beta \\alpha^{i+1} + p \\beta \\alpha^i }{(1-p) \\beta \\alpha^{i+1}} \\right) \n\t\\nonumber \\\\ & - p \\beta \\alpha^i \\log \\left( \\frac{(1-p) \\beta \\alpha^{i+1} + p \\beta \\alpha^i }{ p \\beta \\alpha^i} \\right)\n\t\\nonumber \\\\ & = \\sum_{i = 0}^{n} -(1-p) \\beta \\alpha^{i+1} \\log \\left( \\frac{(1-p) \\alpha + p }{(1-p) \\alpha} \\right) \n\t\\nonumber \\\\ & - p \\beta \\alpha^i \\log \\left( \\frac{(1-p) \\alpha + p }{ p } \\right)\n\t\\end{align}\n\twhere $\\alpha = (1 - (1-p)^K)$ and $\\beta = (1-p)^K.$ \n\tApplying the geometric sum formula and simplifying, the optimal value for (\\ref{eq:relax_const_pert_posq}) and (\\ref{eq:relax_const_pert}) is \n\t\\begin{align} \n\t& -(1-p) \\beta \\alpha \\frac{1 - \\alpha^{n+1}}{1 - \\alpha} \\log \\left( \\frac{(1-p) \\alpha + p }{(1-p) \\alpha} \\right) \n\t\\nonumber \\\\ & - p \\beta \\frac{1 - \\alpha^{n+1}}{1 - \\alpha} \\log \\left( \\frac{(1-p) \\alpha + p }{ p } \\right).\n\t\\end{align}\n\t\n\tUsing the perturbation analysis from Section 5.6.1 of \\cite{boyd}, we see that the optimal value of (\\ref{eq:relax_const}) is lower bounded by\n\t\\begin{align}\n\t& -(1-p) \\beta \\alpha \\frac{1 - \\alpha^{n+1}}{1 - \\alpha} \\log \\left( \\frac{(1-p) \\alpha + p }{(1-p) \\alpha} \\right) \n\t\\nonumber \\\\ & - p \\beta \\frac{1 - \\alpha^{n+1}}{1 - \\alpha} \\log \\left( \\frac{(1-p) \\alpha + p }{ p } \\right)\n\t\\nonumber \\\\ & - v_1^* \\left( (1 - (1-p)^K)^{n+2} \\right) \n\t\\nonumber \\\\ & - v_3^* \\left(- (1-p)^K (1 - (1-p)^K)^{n+1} \\right) \n\t\\end{align} \n\twhere $v_1^*$ and $v_3^*$ are the optimal lagrange multipliers for (\\ref{eq:relax_const_pert_posq}) and (\\ref{eq:relax_const_pert})\n\twe determined through the KKT conditions.\n\t\n\tThe sequence of optimal values returned by (\\ref{eq:relax_const}) is monotonically decreasing in $n$ clearly. Thus, taking the limit of the lower bound as $n \\to \\infty$ and simplifying yields the following lower bound on $-H(X \\; | \\; \\alpha_0 X + Z_{\\alpha})$ for any choice of $\\alpha_i$'s: \t\\begin{align} \n\t& -(p-1) \\left( (1-p)^K - 1\\right) \\log \\left( \\frac{(p-1)(1-p)^K + 1}{(p-1) \\left( (1-p)^K -1 \\right)}\\right) \n\t\\nonumber \\\\ & - p \\log \\left( \\frac{(p-1)(1-p)^K + 1}{p} \\right).\n\t\\end{align}\n\t\n\tThus, a lower bound on mutual information is\n\t\\begin{align} \n\t& H(p) - (p-1) \\left( (1-p)^K - 1\\right) \\log \\left( \\frac{(p-1)(1-p)^K + 1}{(p-1) \\left( (1-p)^K -1 \\right)}\\right) \n\t\\nonumber \\\\ & - p \\log \\left( \\frac{(p-1)(1-p)^K + 1}{p} \\right)\n\t\\end{align}\n\tfor any choice of $\\alpha_i$'s.\n\t\n\tFinally, observe that\n\t\\begin{align}\n\t& H(X \\; ; \\; X - G) \n\t\\nonumber \\\\ & =\\sum_{i = 0}^{\\infty} (1-p) \\beta \\alpha^{i+1} \\log \\left( \\frac{(1-p) \\beta \\alpha^{i+1} + p \\beta \\alpha^i }{(1-p) \\beta \\alpha^{i+1}} \\right) \n\t\\nonumber \\\\ & + p \\beta \\alpha^i \\log \\left( \\frac{(1-p) \\beta \\alpha^{i+1} + p \\beta \\alpha^i }{ p \\beta \\alpha^i} \\right)\n\t\\nonumber \\\\ & = (p-1) \\left( (1-p)^K - 1\\right) \\log \\left( \\frac{(p-1)(1-p)^K + 1}{(p-1) \\left( (1-p)^K -1 \\right)}\\right) \n\t\\nonumber \\\\ & + p \\log \\left( \\frac{(p-1)(1-p)^K + 1}{p} \\right)\n\t\\end{align}\n\twhere $G$ is a geometric random variable with success probability $(1-p)^K$ and support equal to $\\mathbb{N} \\cup \\{0\\},$ and $\\alpha = (1 - (1-p)^K)$ and $\\beta = (1-p)^K$ as before.\n\t\n\n\t\n\\end{IEEEproof}\n\n\\begin{lemma}\n \\label{lem:alpha0_condition}\n Fixing $\\alpha_0 = 1$ in (\\ref{eq:relax_const}) can not increase the optimal value.\n\\end{lemma}\n\n\\begin{IEEEproof}\n Let $\\alpha_0 \\in \\mathbb{N}.$ \n\tLet $Z$ be any random variable such that its PMF has integral support and maximum support value at $t = 0.$ Let \n\t\\begin{align}\n\t& \\hat{Z} = Z - S\n\t\\end{align} \n\twhere $S = Z \\mod{\\alpha_0}.$ \n\tWe then have that \n\t\\begin{align}\n\t& I(X ; \\alpha_0 X + \tZ) \\geq I(X ; \\alpha_0 X + \\hat{Z}). \n\t\\end{align}\n\tbecause \n\t\\begin{align} \n\t\t& I(X ; \\; \\alpha_0 X + \tZ)\n\t\t\\nonumber \\\\ & = I(X ; \\; \\alpha_0 X + \\hat{Z} + S)\n\t\t\\nonumber \\\\ & = I(X ; \\; \\alpha_0 X + \\hat{Z}, \\; S)\n\t\t\\nonumber \\\\ & = I(X ; \\; S) + I(X ; \\; \\alpha_0 X + \\hat{Z} \\; | \\; S)\n\t\t\\nonumber \\\\ & = I(X ; \\; \\alpha_0 X + \\hat{Z} \\; | \\; S)\n\t\t\\nonumber \\\\ & = H(X \\; | \\; S) - H(X \\; | \\; \\alpha_0 X + \\hat{Z}, \\; S) \n\t\t\\nonumber \\\\ & = H(X) - H(X \\; | \\; \\alpha_0 X + \\hat{Z}, \\; S)\n\t\t\\nonumber \\\\ & \\geq H(X) - H(X \\; | \\; \\alpha_0 X + \\hat{Z})\n\t\t\\nonumber \\\\ & = I(X \\; ; \\; \\alpha_0 X + \\hat{Z})\n\t\\end{align} \n\twhere the third line follows because given $\\alpha_0 X + \\hat{Z} + S,$ we have that \n\t\\begin{align}\n\t S = ((\\alpha_0 X + \\hat{Z} + S) - \\alpha_0) \\mod \\alpha_0 \n\t\\end{align}\n\tand\n \\begin{align}\n\t \\alpha_0 X + \\hat{Z} = (\\alpha_0 X + \\hat{Z} + S) - S.\n\t\\end{align}\n\tThus, any solution to (\\ref{eq:relax_inf1}) with $\\alpha_0 = a$ can be transformed into a solution whose PMF had support values in $\\{t a : \\; t \\in \\mathbb{Z}\\}$ without increasing the mutual information. Therefore, for a fixed $\\alpha_0,$ it suffices to only consider PMFs that have support values in $\\{t \\alpha_0 : \\; t \\in \\mathbb{Z}\\}$ in the optimization. If $\\alpha_0 = a \\in \\mathbb{N}$ any solution random variable $D$ with support values in $\\{t a : \\; t \\in \\mathbb{Z}\\}$ can be transformed into a solution with the same mutual information, $\\alpha_0 = 1,$ and support values in $\\{t : \\; t \\in \\mathbb{Z}\\}.$ This is accomplished by dividing $D$ by $a.$ Therefore it suffices to fix $\\alpha_0=1$ in the optimization. \n\t\\end{IEEEproof}\n\n\\section{Main Result}\n\n\n\nOur main result is stated below.\n\n\\begin{theorem} \\label{thm:main_lower_bound}\n\tFor any $K\\in \\mathbb{N},$ $p \\in [0, 0.5],$ we have that \n\t\\begin{align} \n\n\t& \\min_{\\alpha_0, ..., \\alpha_K \\in \\mathbb{Q}_{\\geq 0}}\n\tI(X \\; ; \\; \\alpha_0 X + \\sum_{i=1}^{K} \\alpha_i Z_i) \n\t\\geq \n\tI(X \\; ; \\; X - G)\n\t\\end{align}\n\twhere $G$ is a geometric random variable with success probability equal to $(1-p)^K$ and support equal to $\\mathbb{N} \\cup \\{0\\}.$\n\\end{theorem}\nThe formula for $I(X \\; ; \\; X - G)$ is given by \n\\begin{align}\n& I(X \\; ; \\; X - G)\n\\nonumber \\\\ & = H(p) \n\\nonumber \\\\ & - (p-1) \\left( (1-p)^K - 1\\right) \\log \\left( \\frac{(p-1)(1-p)^K + 1}{(p-1) \\left( (1-p)^K -1 \\right)}\\right) \n\\nonumber \\\\ & - p \\log \\left( \\frac{(p-1)(1-p)^K + 1}{p} \\right)\n\\end{align}\nObserve that the formula is quickly computable for any value of $K$ and $p,$ making it attractive from a computational standpoint. To assess how tight the lower bound is, we empirically compare it to an upper bound that is computed with a greedy algorithm in Figure \\ref{fig:greedy_versus_bound_K15}. For a given $p$ and $K,$ the greedy algorithm chooses $\\alpha_0 = 1, \\; \\alpha_1 = 1,$ and sequentially chooses \\begin{align} \\alpha_{j} = \\argmin_{\\alpha \\in \\mathbb{N} \\; : \\; 1 \\leq \\alpha \\leq 1 + \\sum_{i = 1}^{j-1} \\alpha_i} I(X \\; ; \\; X + \\alpha Z_{j} + \\sum_{i = 1}^{j-1} \\alpha_i Z_i)\n\\end{align}\nfor $2 \\leq j \\leq K.$ As seen in Figure \\ref{fig:greedy_versus_bound_K15}, $I(X \\; ; \\; X - G)$ serves as a very tight lower bound when compared with the upper bound. We therefore view $I(X \\; ; \\; X - G)$ as an approximation to the original problem.\n\nWhile the greedy algorithm requires $\\Omega(2^K)$ time in the worst case, similar plots to Figure \\ref{fig:greedy_versus_bound_K15} can be obtained for larger values of $K$ using a more computationally efficient variation of the greedy algorithm that is discussed in the longer version of this paper.\n\nIn order to prove the lower bound on the minimization of $I(X \\; ; \\; X + \\sum_{i=1}^K \\alpha_i Z_i)$ in Theorem \\ref{thm:main_lower_bound}, we form a convex relaxation of the minimization problem, perturb the relaxation to form a problem that is analytically solvable using KKT conditions, and use perturbation analysis to find a lower bound on the relaxation. This is then a lower bound on the original problem. The full proof is in the Section V.\n\n\\begin{figure}[h]\n\\centering\n\n\\begin{tikzpicture}[scale=0.8]\n\\begin{axis} \n[width=0.47\\textwidth,\nheight=0.36\\textwidth,\ntitle ={Comparison of Bounds: $K = 15$},\nylabel = mutual information, \nxlabel = minor allele frequency (p),\nymax = .06]\n\n\\addplot [\nline width=1pt,\ncolor=red!80!black\n] table[x index=0,y index=1] {data\/plt2_greedy.dat};\n\\addlegendentry{upper bound (greedy)}\n\n\\addplot [\nline width=1pt,\ncolor=blue!80!black,\ndotted\n] table[x index=0,y index=1] {data\/plt2_bound.dat};\n\\addlegendentry{lower bound}\n\n\\end{axis} \n\\end{tikzpicture}\n\\caption{}\n\\label{fig:greedy_versus_bound_K15}\n\\end{figure}\n\n\n\n\n\n\n\n\n\n\\section{Efficiently Computable Schemes}\n\n\\rdcomment{this may sound stupid, but i think it warrants a sentence here. `Theorem 1 gives us a lower bound on the mutual information between $X$ and ..., which would be the mixing ratio that provides the highest level of privacy. However, it does not provide any mixing scheme for approaching this lower bound.'}\n\nWhile much faster than brute force search, the greedy algorithm used to obtain the upper bound requires $\\Omega(2^K)$ time in the worst case. At a high level, this is due to the fact that at the $j$th step in the algorithm, computation of $I(X \\; ; \\; X + \\alpha Z_{j} + \\sum_{i = 1}^{j-1} \\alpha_i Z_i)$ requires use of each support value in the PMF of $\\alpha Z_{j} + \\sum_{i = 1}^{j-1} \\alpha_i Z_i,$ and there are $\\Omega(2^j)$ support values in the worst case. This motivates the design of efficiently computable schemes, which can then be evaluated using the lower bound in Theorem \\ref{thm:main_lower_bound}.\n\nPerhaps the most natural approach is given by the uniform scheme, which we define to set $\\alpha_i = 1$ for all $i \\in [K]\\cup\\{0\\}.$ This is the scheme used by the authors of \\cite{Maddah-Ali} to obtain their results. The scheme is explicitly defined given $K,$ so no algorithm is needed to compute it. The performance of the scheme for a given $p$ and $K$ can be computed in $O(K)$ time using the formula in Lemma \\ref{lem:uniform_schem_formula} which is given in the appendix.\n\nThe uniform scheme is important because it is optimal as $p \\to 0$ as proved in Lemma \\ref{lem:optimality_of_uniform}\nand exemplified by the optimal curve in Figure \\ref{fig:optimal_scheme_K5_minor_allele}.\n\n\\begin{lemma}\n\\label{lem:optimality_of_uniform}\nFor any $K \\in \\mathbb{N},$ there exists some $p^* > 0$ such that the uniform scheme is optimal for $p < p^*.$\n\\end{lemma}\n\n\\rdcomment{i changed epsilon to $p^*$ here. epsilon is typically taken to mean a very small constant, which makes this result feel weak, i.e. this result is only true when p is arbitrarily small.}\n\nAt $p= 0.5,$ in Figure \\ref{fig:optimal_scheme_K5_minor_allele}, the optimal solution is given by the binary scheme, which we define to set $\\alpha_0 = 1$ and $\\alpha_i = 2^{i-1}$ for all $i \\in [K].$ Similar to the uniform scheme, the binary scheme is explicitly defined given $K.$ Also, similar to the uniform scheme, there exists a formula for computing its performance given $p$ and $K$ in $O(K)$ time. This formula is given in Lemma \\ref{lem:binary_scheme_performance} in the appendix. It is interesting that such a formula exists because the PMF of $\\sum_{i=1}^K 2^{i-1} Z_i$ has $2^K$ support values. \n\nMost importantly, the optimality of the binary scheme at $p = 0.5$ for $K=5$ shown in Figure \\ref{fig:optimal_scheme_K5_minor_allele} generalizes to all $K$ as proved in Lemma \\ref{lem:optimality_of_binary}.\n\n\\begin{lemma} \\label{lem:optimality_of_binary}\n\tFor any $K \\in \\mathbb{N},$ the binary scheme is optimal for $p = 0.5.$\n\\end{lemma}\n\nBecause the uniform scheme is optimal for $p \\to 0$ and the binary scheme is optimal for $p = 0.5,$ it is natural to try and combine these schemes to interpolate the performance for $p$ in the range $00$ for $i \\in \\mathbb{N} \\cup \\{0\\}$ is given by $q_{(-i)} = (1-p)^K (1 - (1-p)^K)^i$ for $i \\in \\mathbb{N} \\cup \\{0\\}$ which corresponds to $D = -G$ in (\\ref{eq:relax_inf1}) where $G$ is a geometric random variable with success probability equal to $(1-p)^K$ and support equal to $\\mathbb{N} \\cup \\{0\\}.$ We do not use this observation directly to establish Theorem \\ref{theorem:main_recursion} because no reference on the relation between KKT conditions and optimality in the infinite dimensional case was found. \n \n Instead, we form a new optimization problem where we constrain $D$ to have distance $n$ between its maximum and minimum support values, derive a lower bound on the solution for $n \\in \\mathbb{N}$, and use this sequence of lower bounds to establish the result. The support constrained version of (\\ref{eq:relax_inf2}) is given by \n \\begin{align}\n \\label{eq:relax_const}\n & \\min_{q_{(-i)} \\geq 0 : \\; i \\in [n+1]\\cup\\{0\\} }\n \\sum_{j \\in \\{-m : \\; m \\in [n+1]\\}} \n\t\\nonumber \\\\ & (1-p)q_{(j)} \\log \\left(\\frac{(1-p)q_{(j)}}{(1-p)q_{(j)} + p q_{(j+1)}} \\right) \n\t\\nonumber \\\\ & + p q_{(j+1)} \\log \\left(\\frac{p q_{(j+1)}}{(1-p)q_{(j)} + p q_{(j+1)}} \\right) \\\\ & \\text{subject to:} \\nonumber \\\\ & \n \\quad q_{(0)} = (1-p)^K, \n\t\\nonumber \\\\ & \\quad q_{(-n-1)} = 0, \n\t\\nonumber \\\\ & \\quad \\sum_{j} q_{(j)} = 1. \\nonumber\n\t\\end{align} \n\n Changing the implicit constraint in (\\ref{eq:relax_const}) from $q_{(i)} \\geq 0$ to $q_{(i)} > 0$ and the explicit constraint $q_{(-n-1)} = 0$ to $q_{(-n-1)} = \\epsilon$ where $\\epsilon \\in \\mathbb{R}_{>0}$ gives us\n \\begin{align}\n \\label{eq:relax_const_posq}\n & \\min_{q_{(-i)} > 0 : \\; i \\in [n+1]\\cup\\{0\\} }\n \\sum_{j \\in \\{-m : \\; m \\in [n+1]\\}} \n\t\\nonumber \\\\ & (1-p)q_{(j)} \\log \\left(\\frac{(1-p)q_{(j)}}{(1-p)q_{(j)} + p q_{(j+1)}} \\right) \n\t\\nonumber \\\\ & + p q_{(j+1)} \\log \\left(\\frac{p q_{(j+1)}}{(1-p)q_{(j)} + p q_{(j+1)}} \\right) \\\\ & \\text{subject to:} \\nonumber \\\\ & \n \\quad q_{(0)} = (1-p)^K, \n\t\\nonumber \\\\ & \\quad q_{(-n-1)} = \\epsilon,\n\t\\nonumber \\\\ & \\quad \\sum_{j} q_{(j)} = 1. \\nonumber\n\t\\end{align} \n\n Let $p^*$ be the optimal value of \\ref{eq:relax_const} and let $p_{\\epsilon}^*$ be the optimal value of \\ref{eq:relax_const_posq} for a given $\\epsilon.$ Due to the continuity of the objective function in (\\ref{eq:relax_const}), it is not hard to show that $ p^* \\geq \\inf_{\\epsilon \\in \\mathbb{R}_{>0}} p_{\\epsilon}^*.$ At a high level, this is due to the fact that for any solution to (\\ref{eq:relax_const}), it follows that (\\ref{eq:relax_const_posq}) can get arbitrarily close to it as $\\epsilon \\to 0$ due to the continuity of the objective function. \n\n\n\n While we do not know the solution to (\\ref{eq:relax_const}) or (\\ref{eq:relax_const_posq}), we will perturb (\\ref{eq:relax_const_posq}) to form a problem we can solve analytically, then use perturbation analysis to lower bound the optimal value of (\\ref{eq:relax_const_posq}), and ultimately lower bound the optimal value of (\\ref{eq:relax_const}). The perturbed version of (\\ref{eq:relax_const_posq}) is given by \n\t\\begin{align}\n\t\\label{eq:relax_const_pert_posq}\n & \\min_{q_{(-i)} > 0 : \\; i \\in [n+1]\\cup\\{0\\} }\n \\sum_{j \\in \\{-m : \\; m \\in [n+1]\\}} \n\t\\nonumber \\\\ & (1-p)q_{(j)} \\log \\left(\\frac{(1-p)q_{(j)}}{(1-p)q_{(j)} + p q_{(j+1)}} \\right) \n\t\\nonumber \\\\ & + p q_{(j+1)} \\log \\left(\\frac{p q_{(j+1)}}{(1-p)q_{(j)} + p q_{(j+1)}} \\right) \\\\\t& \n\t\\text{subject to:} \\nonumber \\\\\n\t& \\quad q_{(0)} = (1-p)^K,\n\t\\nonumber \\\\ & \\quad q_{(-n-1)} = (1-p)^K (1 - (1-p)^K)^{n+1}, \n\t\\nonumber \\\\ & \\quad \\sum_{j} q_{(j)} = 1 - (1 - (1-p)^K)^{n+2} \\nonumber\n\t\\end{align} \n where we have changed the constraint on the probability at the minimum support value, and the constraint on the sum of all the probabilities.\n We can solve this by solving the KKT conditions since the problem is convex, Slater's condition holds, and the objective function and constraint functions are differentiable on the domain $q_{(i)} > 0.$ \n \n We proceed to find the solution to the KKT conditions for (\\ref{eq:relax_const_pert_posq}). Let $f_0(q)$ be the objective function of (\\ref{eq:relax_const_pert_posq}). The Lagrangian is given by \n\t\\begin{align} \n\t&L(q, v, \\lambda) = f_0(q) \n\t\\nonumber \\\\ & + v_1 \\left(\\sum_i q_{(i)} - 1 + (1 - (1-p)^K)^{n+2} \\right) \n\t\\nonumber \\\\ & + v_2 \\left(q_{(0)} - (1-p)^K \\right) \n\t\\nonumber \\\\ & + v_3 \\left(q_{(-n-1)} - (1-p)^K (1 - (1-p)^K)^{n+1} \\right). \n\t\\end{align}\n\tThe derivative of $f_0(q)$ with respect to $q_{(j)}$ is given by \n\t\\begin{align}\n\t& p \\log \\left(\\frac{p q_{(j)}}{(1-p)q_{(j-1)} + p q_{(j)}} \\right) \\nonumber \\\\ & + (1-p) \\log \\left( \\frac{(1-p) q_{(j)}}{p q_{(j+1)} + (1-p) q_{(j)}} \\right)\n\t\\end{align} \n\tfor $j \\in \\{-n, \\; ..., \\; -1\\}$ and the KKT conditions are given by \n\t\\begin{align}\n\t& q_{(0)} = (1-p)^K,\n\t\\nonumber \\\\ & q_{(-n-1)} = (1-p)^K (1 - (1-p)^K)^{n+1}, \n\t\\nonumber \\\\ & \\sum_{j} q_{(j)} = 1 - (1 - (1-p)^K)^{n+2}\n\t\\nonumber \\\\ & (1-p) \\log \\left( \\frac{(1-p) q_{(j)}}{p q_{(j+1)} + (1-p) q_{(j)}} \\right) + v_1 + v_3 = 0 \\quad \n\t\\nonumber \\\\ & \\text{for } j = - n -1\n\t\\nonumber \\\\ & \tp \\log \\left(\\frac{p q_{(j)}}{(1-p)q_{(j-1)} + p q_{(j)}} \\right) \n\t\\nonumber \\\\ & + (1-p) \\log \\left( \\frac{(1-p) q_{(j)}}{p q_{(j+1)} + (1-p) q_{(j)}} \\right)+ v_1 = 0 \n\t\\nonumber \\\\ & \\forall j \\in \\{-n, \\; ..., \\; -1\\}\n\t\\nonumber \\\\ & p \\log \\left(\\frac{p q_{(j)}}{(1-p)q_{(j-1)} + p q_{(j)}} \\right) + v_1 + v_2 = 0 \n\t\\nonumber \\\\ & \\text{for } j = 0.\n\t\\end{align}\n\t\n\n\t\n\tThe last three conditions can be rewritten as \n\t\\begin{align} \n\t& (1-p) \\log \\left( \\frac{(1-p) }{p \\frac{q_{(j+1)}}{q_{(j)}} + (1-p) } \\right) + v_1 + v_3 = 0 \n\t\\nonumber \\\\ & \\text{for } j = - n - 1\n\t\\nonumber \\\\ & p \\log \\left(\\frac{p }{(1-p)\\frac{q_{(j-1)}}{q_{(j)}} + p } \\right) \\nonumber \\\\ & + (1-p) \\log \\left( \\frac{(1-p) }{p \\frac{q_{(j+1)}}{q_{(j)}} + (1-p) } \\right) + v_1 = 0 \n\t\\nonumber \\\\ & \\forall j \\in \\{-n, \\; ..., \\; -1\\}\n\t\\nonumber \\\\ & p \\log \\left(\\frac{p }{(1-p) \\frac{q_{(j-1)}}{q_{(j)}} + p } \\right) + v_1 + v_2 = 0 \\quad \\text{for } j = 0\n\t\\end{align}\n\twhich shows that if the ratio $\\frac{q_{(j-1)}}{q_{(j)}}$ between consecutive variables is the same for all $j,$ then $v_1,$ $v_2$ can be chosen so that of these derivatives equal $0$ for all $j.$ Picking \\[\\frac{q_{(j-1)}}{q_{(j)}} = (1 - (1-p)^K),\\] a solution to these equations is then\n\t\\begin{align}\n\t& q_{(-i)} = (1-p)^K (1 - (1-p)^K)^{i} \n\t\\nonumber \\\\ & \\text{for } i \\in \\{0, \\; 1, \\; ..., \\; n+1 \\} \n\t\\nonumber \\\\ & v_1 = - p \\log \\left( \\frac{p}{(1-p)(1 - (1-p)^K) + p} \\right) \n\t\\nonumber \\\\ & - (1-p)\\log \\left( \\frac{(1-p)(1 - (1-p)^K)}{(1-p)(1 - (1-p)^K) + p} \\right)\n\t\\nonumber \\\\ & v_2 = (1-p)\\log \\left( \\frac{(1-p)(1 - (1-p)^K)}{(1-p)(1 - (1-p)^K) + p} \\right) \n\t\\nonumber \\\\ & v_3 = p \\log \\left( \\frac{p}{(1-p)(1 - (1-p)^K) + p} \\right)\n\t\\end{align}\n\t\n This is the solution to (\\ref{eq:relax_const_pert_posq}) as discussed earlier. The optimal value for (\\ref{eq:relax_const_pert_posq}) is therefore given by \n\t\\begin{align} \n\t& \\sum_{i = 0}^{n} -(1-p) \\beta \\alpha^{i+1} \\log \\left( \\frac{(1-p) \\beta \\alpha^{i+1} + p \\beta \\alpha^i }{(1-p) \\beta \\alpha^{i+1}} \\right) \n\t\\nonumber \\\\ & - p \\beta \\alpha^i \\log \\left( \\frac{(1-p) \\beta \\alpha^{i+1} + p \\beta \\alpha^i }{ p \\beta \\alpha^i} \\right)\n\t\\nonumber \\\\ & = \\sum_{i = 0}^{n} -(1-p) \\beta \\alpha^{i+1} \\log \\left( \\frac{(1-p) \\alpha + p }{(1-p) \\alpha} \\right) \n\t\\nonumber \\\\ & - p \\beta \\alpha^i \\log \\left( \\frac{(1-p) \\alpha + p }{ p } \\right)\n\t\\end{align}\n\twhere $\\alpha = (1 - (1-p)^K)$ and $\\beta = (1-p)^K.$ \n\tApplying the geometric sum formula and simplifying, the optimal value for (\\ref{eq:relax_const_pert_posq}) is \n\t\\begin{align} \n\t& -(1-p) \\beta \\alpha \\frac{1 - \\alpha^{n+1}}{1 - \\alpha} \\log \\left( \\frac{(1-p) \\alpha + p }{(1-p) \\alpha} \\right) \n\t\\nonumber \\\\ & - p \\beta \\frac{1 - \\alpha^{n+1}}{1 - \\alpha} \\log \\left( \\frac{(1-p) \\alpha + p }{ p } \\right).\n\t\\end{align}\n\t\n\tUsing the perturbation analysis from Section 5.6.1 of \\cite{boyd}, we see that the optimal value of (\\ref{eq:relax_const_posq}) is lower bounded by\n\t\\begin{align}\n\t\\label{eq:relax_const_posq_bound}\n\t& -(1-p) \\beta \\alpha \\frac{1 - \\alpha^{n+1}}{1 - \\alpha} \\log \\left( \\frac{(1-p) \\alpha + p }{(1-p) \\alpha} \\right) \n\t\\nonumber \\\\ & - p \\beta \\frac{1 - \\alpha^{n+1}}{1 - \\alpha} \\log \\left( \\frac{(1-p) \\alpha + p }{ p } \\right)\n\t\\nonumber \\\\ & - v_1^* \\left( (1 - (1-p)^K)^{n+2} \\right) \n\t\\nonumber \\\\ & - v_3^* \\left(- (1-p)^K (1 - (1-p)^K)^{n+1} +\\epsilon \\right) \n\t\\end{align} \n\twhere $v_1^*$ and $v_3^*$ are the optimal lagrange multipliers for (\\ref{eq:relax_const_pert_posq})\n\twe determined through the KKT conditions.\n\t\n\tTaking the infimum of \\ref{eq:relax_const_posq_bound} then over $\\epsilon \\in \\mathbb{R}_{>0}$ yields the following lower bound on (\\ref{eq:relax_const_posq}):\n\t\n\t\\begin{align}\n\t\\label{eq:relax_const_bound}\n\t& -(1-p) \\beta \\alpha \\frac{1 - \\alpha^{n+1}}{1 - \\alpha} \\log \\left( \\frac{(1-p) \\alpha + p }{(1-p) \\alpha} \\right) \n\t\\nonumber \\\\ & - p \\beta \\frac{1 - \\alpha^{n+1}}{1 - \\alpha} \\log \\left( \\frac{(1-p) \\alpha + p }{ p } \\right)\n\t\\nonumber \\\\ & - v_1^* \\left( (1 - (1-p)^K)^{n+2} \\right) \n\t\\nonumber \\\\ & - v_3^* \\left(- (1-p)^K (1 - (1-p)^K)^{n+1} \\right). \n\t\\end{align} \n\t\n\tThe sequence of optimal values returned by (\\ref{eq:relax_const}) is monotonically decreasing in $n$ clearly. Thus, taking the limit of the lower bound (\\ref{eq:relax_const_bound}) as $n \\to \\infty$ and simplifying yields the following lower bound on $-H(X \\; | \\; \\alpha_0 X + Z_{\\alpha})$ for any choice of $\\alpha_i$'s: \t\\begin{align} \n\t& -(p-1) \\left( (1-p)^K - 1\\right) \\log \\left( \\frac{(p-1)(1-p)^K + 1}{(p-1) \\left( (1-p)^K -1 \\right)}\\right) \n\t\\nonumber \\\\ & - p \\log \\left( \\frac{(p-1)(1-p)^K + 1}{p} \\right).\n\t\\end{align}\n\t\n\tThus, a lower bound on mutual information is\n\t\\begin{align} \n\t& H(p) - (p-1) \\left( (1-p)^K - 1\\right) \\log \\left( \\frac{(p-1)(1-p)^K + 1}{(p-1) \\left( (1-p)^K -1 \\right)}\\right) \n\t\\nonumber \\\\ & - p \\log \\left( \\frac{(p-1)(1-p)^K + 1}{p} \\right)\n\t\\end{align}\n\tfor any choice of $\\alpha_i$'s.\n\t\n\tFinally, observe that\n\t\\begin{align}\n\t& H(X \\; ; \\; X - G) \n\t\\nonumber \\\\ & =\\sum_{i = 0}^{\\infty} (1-p) \\beta \\alpha^{i+1} \\log \\left( \\frac{(1-p) \\beta \\alpha^{i+1} + p \\beta \\alpha^i }{(1-p) \\beta \\alpha^{i+1}} \\right) \n\t\\nonumber \\\\ & + p \\beta \\alpha^i \\log \\left( \\frac{(1-p) \\beta \\alpha^{i+1} + p \\beta \\alpha^i }{ p \\beta \\alpha^i} \\right)\n\t\\nonumber \\\\ & = (p-1) \\left( (1-p)^K - 1\\right) \\log \\left( \\frac{(p-1)(1-p)^K + 1}{(p-1) \\left( (1-p)^K -1 \\right)}\\right) \n\t\\nonumber \\\\ & + p \\log \\left( \\frac{(p-1)(1-p)^K + 1}{p} \\right)\n\t\\end{align}\n\twhere $G$ is a geometric random variable with success probability $(1-p)^K$ and support equal to $\\mathbb{N} \\cup \\{0\\},$ and $\\alpha = (1 - (1-p)^K)$ and $\\beta = (1-p)^K$ as before.\n\t\n\n\t\n\\end{IEEEproof}\n\n\\begin{lemma}\n \\label{lem:alpha0_condition}\n Fixing $\\alpha_0 = 1$ in (\\ref{eq:relax_const}) can not increase the optimal value.\n\\end{lemma}\n\n\\begin{IEEEproof}\n Let $\\alpha_0 \\in \\mathbb{N}.$ \n\tLet $Z$ be any random variable such that its PMF has integral support and maximum support value at $t = 0.$ Let \n\t\\begin{align}\n\t& \\hat{Z} = Z - S\n\t\\end{align} \n\twhere $S = Z \\mod{\\alpha_0}.$ \n\tWe then have that \n\t\\begin{align}\n\t& I(X ; \\alpha_0 X + \tZ) \\geq I(X ; \\alpha_0 X + \\hat{Z}). \n\t\\end{align}\n\tbecause \n\t\\begin{align} \n\t\t& I(X ; \\; \\alpha_0 X + \tZ)\n\t\t\\nonumber \\\\ & = I(X ; \\; \\alpha_0 X + \\hat{Z} + S)\n\t\t\\nonumber \\\\ & = I(X ; \\; \\alpha_0 X + \\hat{Z}, \\; S)\n\t\t\\nonumber \\\\ & = I(X ; \\; S) + I(X ; \\; \\alpha_0 X + \\hat{Z} \\; | \\; S)\n\t\t\\nonumber \\\\ & = I(X ; \\; \\alpha_0 X + \\hat{Z} \\; | \\; S)\n\t\t\\nonumber \\\\ & = H(X \\; | \\; S) - H(X \\; | \\; \\alpha_0 X + \\hat{Z}, \\; S) \n\t\t\\nonumber \\\\ & = H(X) - H(X \\; | \\; \\alpha_0 X + \\hat{Z}, \\; S)\n\t\t\\nonumber \\\\ & \\geq H(X) - H(X \\; | \\; \\alpha_0 X + \\hat{Z})\n\t\t\\nonumber \\\\ & = I(X \\; ; \\; \\alpha_0 X + \\hat{Z})\n\t\\end{align} \n\twhere the third line follows because given $\\alpha_0 X + \\hat{Z} + S,$ we have that \n\t\\begin{align}\n\t S = ((\\alpha_0 X + \\hat{Z} + S) - \\alpha_0) \\mod \\alpha_0 \n\t\\end{align}\n\tand\n \\begin{align}\n\t \\alpha_0 X + \\hat{Z} = (\\alpha_0 X + \\hat{Z} + S) - S.\n\t\\end{align}\n\tThus, any solution to (\\ref{eq:relax_inf1}) with $\\alpha_0 = a$ can be transformed into a solution whose PMF had support values in $\\{t a : \\; t \\in \\mathbb{Z}\\}$ without increasing the mutual information. Therefore, for a fixed $\\alpha_0,$ it suffices to only consider PMFs that have support values in $\\{t \\alpha_0 : \\; t \\in \\mathbb{Z}\\}$ in the optimization. If $\\alpha_0 = a \\in \\mathbb{N}$ any solution random variable $D$ with support values in $\\{t a : \\; t \\in \\mathbb{Z}\\}$ can be transformed into a solution with the same mutual information, $\\alpha_0 = 1,$ and support values in $\\{t : \\; t \\in \\mathbb{Z}\\}.$ This is accomplished by dividing $D$ by $a.$ Therefore it suffices to fix $\\alpha_0=1$ in the optimization. \n\t\\end{IEEEproof}\n\n\\section{Main Results}\n\n\n\nIn order to tackle the discrete optimization problem in (\\ref{eq:main2}), we seek to bound its optimal solution.\nOur main result is the following lower bound.\n\n\n\n\\begin{theorem} \\label{thm:main_lower_bound}\n\tFor any $K\\in \\mathbb{N}$ and $p \\in [0, 0.5],$ we have \n\t\\begin{align} \n\n\t& \\min_{\\alpha}\n\tI(X \\; ; \\; X +Z_\\alpha) \n\t\\geq \n\tI(X \\; ; \\; X - G),\n\t\\end{align}\n\twhere $G$ is a geometric random variable with success probability equal to $(1-p)^K$.\n\tand support equal to $\\mathbb{N} \\cup \\{0\\}.$\n\t\\iscomment{instead, let's describe the pmf of $G$ to be concrete}\n\\end{theorem}\n\nIntuitively, Theorem~\\ref{thm:main_lower_bound} says that the noise distribution of $-G$ is worse than the worst-case noise $Z_\\alpha$ for all possible $\\alpha$s.\nThis lower bound can in fact be explicitly computed as\n\\begin{align}\n& I(X \\; ; \\; X - G)\n= H(p) \n\\nonumber \\\\ & - (p-1) \\left( (1-p)^K - 1\\right) \\log \\left( \\frac{(p-1)(1-p)^K + 1}{(p-1) \\left( (1-p)^K -1 \\right)}\\right) \n\\nonumber \\\\ & - p \\log \\left( \\frac{(p-1)(1-p)^K + 1}{p} \\right)\n\\end{align}\n\\iscomment{simpler:}\n\\begin{align}\n& I(X \\; ; \\; X - G)\n= H(p) \n\\nonumber \\\\ & - (p-1) \\left( (1-p)^K - 1\\right) \\log \\left( \\frac{1-(1-p)^{K+1} }{(p-1) \\left( (1-p)^K -1 \\right)}\\right) \n\\nonumber \\\\ & - p \\log \\left( \\frac{1-(1-p)^{K+1}}{p} \\right)\n\\end{align}\nObserve that the formula is quickly computable for any value of $K$ and $p,$ making it attractive from a computational standpoint. To assess how tight the lower bound is, we empirically compare it to an upper bound that is computed with a greedy algorithm in Figure \\ref{fig:greedy_versus_bound_K15}. For a given $p$ and $K,$ the greedy algorithm chooses $\\alpha_0 = 1, \\; \\alpha_1 = 1,$ and sequentially chooses \\begin{align} \\alpha_{j} = \\argmin_{\\alpha \\in \\mathbb{N} \\; : \\; 1 \\leq \\alpha \\leq 1 + \\sum_{i = 1}^{j-1} \\alpha_i} I(X \\; ; \\; X + \\alpha Z_{j} + \\sum_{i = 1}^{j-1} \\alpha_i Z_i)\n\\end{align}\nfor $2 \\leq j \\leq K.$ As seen in Figure \\ref{fig:greedy_versus_bound_K15}, $I(X \\; ; \\; X - G)$ serves as a very tight lower bound when compared with the upper bound. We therefore view $I(X \\; ; \\; X - G)$ as an approximation to the original problem. This is surprising, because for a given $K,$ it is not possible in general to choose $\\alpha_i$'s to make the PMF of $\\sum_{i = 1}^{j-1} \\alpha_i Z_i$ look like a shifted version of the PMF for $-G.$\n\nWhile the greedy algorithm requires $\\Omega(2^K)$ time in the worst case, similar plots to Figure \\ref{fig:greedy_versus_bound_K15} can be obtained for larger values of $K$ using a more computationally efficient variation of the greedy algorithm that is discussed in the longer version of this paper.\n\nIn order to prove the lower bound on the minimization of $I(X \\; ; \\; X + \\sum_{i=1}^K \\alpha_i Z_i)$ in Theorem \\ref{thm:main_lower_bound}, we form a convex relaxation of the minimization problem, perturb the relaxation to form a problem that is analytically solvable using KKT conditions, and use perturbation analysis to find a lower bound on the relaxation. This is then a lower bound on the original problem. The full proof is in the Section V.\n\n\\begin{figure}[h]\n\\centering\n\n\\begin{tikzpicture}[scale=0.8]\n\\begin{axis} \n[width=0.47\\textwidth,\nheight=0.36\\textwidth,\ntitle ={Comparison of Bounds: $K = 15$},\nylabel = mutual information, \nxlabel = minor allele frequency (p),\nymax = .06]\n\n\\addplot [\nline width=1pt,\ncolor=red!80!black\n] table[x index=0,y index=1] {data\/plt2_greedy.dat};\n\\addlegendentry{upper bound (greedy)}\n\n\\addplot [\nline width=1pt,\ncolor=blue!80!black,\ndotted\n] table[x index=0,y index=1] {data\/plt2_bound.dat};\n\\addlegendentry{lower bound}\n\n\\end{axis} \n\\end{tikzpicture}\n\\caption{}\n\\label{fig:greedy_versus_bound_K15}\n\\end{figure}\n\n\\begin{figure}[h]\n\\centering\n\n\\begin{tikzpicture}[scale=0.8]\n\\begin{axis} \n[width=0.47\\textwidth,\nheight=0.36\\textwidth,\ntitle ={Ratio: Upper Bound to Lower Bound},\nylabel = mutual information, \nxlabel = minor allele frequency (p),\nlegend pos = north west]\n\n\\addplot [\nline width=1pt,\ncolor=red!80!black\n] table[x index=0,y index=1] {data\/plt4_greedy_opt_ratio_K15.dat};\n\\addlegendentry{K = 15}\n\n\\addplot [\nline width=1pt,\ncolor=blue!80!black\n] table[x index=0,y index=1] {data\/plt4_greedy_opt_ratio_K10.dat};\n\\addlegendentry{K = 10}\n\n\\addplot [\nline width=1pt,\ncolor=green!80!black\n] table[x index=0,y index=1] {data\/plt4_greedy_opt_ratio_K5.dat};\n\\addlegendentry{K = 5}\n\n\\end{axis} \n\\end{tikzpicture}\n\\caption{}\n\\label{fig:greedy_versus_bound_ratio}\n\\end{figure}\n\n\n\n\n\n\n\n\n\n\\section{Efficiently Computable Schemes}\n\n\\rdcomment{this may sound stupid, but i think it warrants a sentence here. `Theorem 1 gives us a lower bound on the mutual information between $X$ and ..., which would be the mixing ratio that provides the highest level of privacy. However, it does not provide any mixing scheme for approaching this lower bound.'}\n\nWhile much faster than brute force search, the greedy algorithm used to obtain the upper bound requires $\\Omega(2^K)$ time in the worst case. At a high level, this is due to the fact that at the $j$th step in the algorithm, computation of $I(X \\; ; \\; X + \\alpha Z_{j} + \\sum_{i = 1}^{j-1} \\alpha_i Z_i)$ requires use of each support value in the PMF of $\\alpha Z_{j} + \\sum_{i = 1}^{j-1} \\alpha_i Z_i,$ and there are $\\Omega(2^j)$ support values in the worst case. This motivates the design of efficiently computable schemes, which can then be evaluated using the lower bound in Theorem \\ref{thm:main_lower_bound}.\n\nPerhaps the most natural approach is given by the uniform scheme, which we define to set $\\alpha_i = 1$ for all $i \\in [K]\\cup\\{0\\}.$ This is the scheme used by the authors of \\cite{Maddah-Ali} to obtain their results. The scheme is explicitly defined given $K,$ so no algorithm is needed to compute it. The performance of the scheme for a given $p$ and $K$ can be computed in $O(K)$ time using the formula in Lemma \\ref{lem:uniform_schem_formula} which is given in the appendix.\n\nThe uniform scheme is important because it is optimal as $p \\to 0$ as proved in Lemma \\ref{lem:optimality_of_uniform}\nand exemplified by the optimal curve in Figure \\ref{fig:optimal_scheme_K5_minor_allele}.\n\n\\begin{lemma}\n\\label{lem:optimality_of_uniform}\nFor any $K \\in \\mathbb{N},$ there exists some $p^* > 0$ such that the uniform scheme is optimal for $p < p^*.$\n\\end{lemma}\n\n\\rdcomment{i changed epsilon to $p^*$ here. epsilon is typically taken to mean a very small constant, which makes this result feel weak, i.e. this result is only true when p is arbitrarily small.}\n\nAt $p= 0.5,$ in Figure \\ref{fig:optimal_scheme_K5_minor_allele}, the optimal solution is given by the binary scheme, which we define to set $\\alpha_0 = 1$ and $\\alpha_i = 2^{i-1}$ for all $i \\in [K].$ Similar to the uniform scheme, the binary scheme is explicitly defined given $K.$ Also, similar to the uniform scheme, there exists a formula for computing its performance given $p$ and $K$ in $O(K)$ time. This formula is given in Lemma \\ref{lem:binary_scheme_performance} in the appendix. It is interesting that such a formula exists because the PMF of $\\sum_{i=1}^K 2^{i-1} Z_i$ has $2^K$ support values. \n\nMost importantly, the optimality of the binary scheme at $p = 0.5$ for $K=5$ shown in Figure \\ref{fig:optimal_scheme_K5_minor_allele} generalizes to all $K$ as proved in Lemma \\ref{lem:optimality_of_binary}.\n\n\\begin{lemma} \\label{lem:optimality_of_binary}\n\tFor any $K \\in \\mathbb{N},$ the binary scheme is optimal for $p = 0.5.$\n\\end{lemma}\n\nBecause the uniform scheme is optimal for $p \\to 0$ and the binary scheme is optimal for $p = 0.5,$ it is natural to try and combine these schemes to interpolate the performance for $p$ in the range $00$ for $i \\in \\mathbb{N} \\cup \\{0\\}$ is given by $q_{(-i)} = (1-p)^K (1 - (1-p)^K)^i$ for $i \\in \\mathbb{N} \\cup \\{0\\}$ which corresponds to $D = -G$ in (\\ref{eq:relax_inf1}) where $G$ is a geometric random variable with success probability equal to $(1-p)^K$ and support equal to $\\mathbb{N} \\cup \\{0\\}.$ We do not use this observation directly to establish Theorem \\ref{theorem:main_recursion} because no reference on the relation between KKT conditions and optimality in the infinite dimensional case was found. \n \n Instead, we constrain $D$ to have distance no greater than $n \\in \\mathbb{N}$ between its maximum and minimum support values in (\\ref{eq:relax_inf2}), derive a lower bound on the solution for each $n \\in \\mathbb{N},$ and use the resulting sequence of lower bounds to establish the result. The support constrained version of (\\ref{eq:relax_inf2}) is given by \n \\begin{align}\n \\label{eq:relax_const}\n & \\min_{q_{(-i)} \\geq 0 : \\; i \\in [n+1]\\cup\\{0\\} }\n \\sum_{j \\in \\{-m : \\; m \\in [n+1]\\}} \n\t\\nonumber \\\\ & (1-p)q_{(j)} \\log \\left(\\frac{(1-p)q_{(j)}}{(1-p)q_{(j)} + p q_{(j+1)}} \\right) \n\t\\nonumber \\\\ & + p q_{(j+1)} \\log \\left(\\frac{p q_{(j+1)}}{(1-p)q_{(j)} + p q_{(j+1)}} \\right) \\\\ & \\text{subject to:} \\nonumber \\\\ & \n \\quad q_{(0)} = (1-p)^K, \n\t\\nonumber \\\\ & \\quad q_{(-n-1)} = 0, \n\t\\nonumber \\\\ & \\quad \\sum_{j} q_{(j)} = 1. \\nonumber\n\t\\end{align} \n\n Changing the implicit constraint in (\\ref{eq:relax_const}) from $q_{(i)} \\geq 0$ to $q_{(i)} > 0$ and the explicit constraint $q_{(-n-1)} = 0$ to $q_{(-n-1)} = \\epsilon$ where $\\epsilon \\in \\mathbb{R}_{>0}$ gives us\n \\begin{align}\n \\label{eq:relax_const_posq}\n & \\min_{q_{(-i)} > 0 : \\; i \\in [n+1]\\cup\\{0\\} }\n \\sum_{j \\in \\{-m : \\; m \\in [n+1]\\}} \n\t\\nonumber \\\\ & (1-p)q_{(j)} \\log \\left(\\frac{(1-p)q_{(j)}}{(1-p)q_{(j)} + p q_{(j+1)}} \\right) \n\t\\nonumber \\\\ & + p q_{(j+1)} \\log \\left(\\frac{p q_{(j+1)}}{(1-p)q_{(j)} + p q_{(j+1)}} \\right) \\\\ & \\text{subject to:} \\nonumber \\\\ & \n \\quad q_{(0)} = (1-p)^K, \n\t\\nonumber \\\\ & \\quad q_{(-n-1)} = \\epsilon,\n\t\\nonumber \\\\ & \\quad \\sum_{j} q_{(j)} = 1. \\nonumber\n\t\\end{align} \n\n Let $p^*$ be the optimal value of \\ref{eq:relax_const} and let $p_{\\epsilon}^*$ be the optimal value of \\ref{eq:relax_const_posq} for a given $\\epsilon.$ Due to the continuity of the objective function in (\\ref{eq:relax_const}), it is not hard to show that $ p^* \\geq \\inf_{\\epsilon \\in \\mathbb{R}_{>0}} p_{\\epsilon}^*.$ At a high level, this is due to the fact that for any solution to (\\ref{eq:relax_const}), it follows that (\\ref{eq:relax_const_posq}) can get arbitrarily close to it as $\\epsilon \\to 0$ due to the continuity of the objective function. \n\n\n\n While we do not know the solution to (\\ref{eq:relax_const}) or (\\ref{eq:relax_const_posq}), we will perturb (\\ref{eq:relax_const_posq}) to form a problem we can solve analytically, then use perturbation analysis to lower bound the optimal value of (\\ref{eq:relax_const_posq}), and ultimately lower bound the optimal value of (\\ref{eq:relax_const}). The perturbed version of (\\ref{eq:relax_const_posq}) is given by \n\t\\begin{align}\n\t\\label{eq:relax_const_pert_posq}\n & \\min_{q_{(-i)} > 0 : \\; i \\in [n+1]\\cup\\{0\\} }\n \\sum_{j \\in \\{-m : \\; m \\in [n+1]\\}} \n\t\\nonumber \\\\ & (1-p)q_{(j)} \\log \\left(\\frac{(1-p)q_{(j)}}{(1-p)q_{(j)} + p q_{(j+1)}} \\right) \n\t\\nonumber \\\\ & + p q_{(j+1)} \\log \\left(\\frac{p q_{(j+1)}}{(1-p)q_{(j)} + p q_{(j+1)}} \\right) \\\\\t& \n\t\\text{subject to:} \\nonumber \\\\\n\t& \\quad q_{(0)} = (1-p)^K,\n\t\\nonumber \\\\ & \\quad q_{(-n-1)} = (1-p)^K (1 - (1-p)^K)^{n+1}, \n\t\\nonumber \\\\ & \\quad \\sum_{j} q_{(j)} = 1 - (1 - (1-p)^K)^{n+2} \\nonumber\n\t\\end{align} \n where we have changed the constraint on the probability at the minimum support value, and the constraint on the sum of all the probabilities.\n We can solve this by solving the KKT conditions since the problem is convex, Slater's condition holds, and the objective function and constraint functions are differentiable on the domain $q_{(i)} > 0.$ \n \n We proceed to find the solution to the KKT conditions for (\\ref{eq:relax_const_pert_posq}). Let $f_0(q)$ be the objective function of (\\ref{eq:relax_const_pert_posq}). The Lagrangian is given by \n\t\\begin{align} \n\t&L(q, v, \\lambda) = f_0(q) \n\t\\nonumber \\\\ & + v_1 \\left(\\sum_i q_{(i)} - 1 + (1 - (1-p)^K)^{n+2} \\right) \n\t\\nonumber \\\\ & + v_2 \\left(q_{(0)} - (1-p)^K \\right) \n\t\\nonumber \\\\ & + v_3 \\left(q_{(-n-1)} - (1-p)^K (1 - (1-p)^K)^{n+1} \\right). \n\t\\end{align}\n\tBy Lemma \\ref{lem:solve_KKT}, a solution to the KKT conditions is given by \n\t\\begin{align}\n\t\\label{eq:KKT_conditions_sol}\n\t& q_{(-i)} = (1-p)^K (1 - (1-p)^K)^{i} \\text{ for } i \\in [n+1] \\cup \\{0\\} \n\t\\nonumber \\\\ & v_1 = - p \\log \\left( \\frac{p}{(1-p)(1 - (1-p)^K) + p} \\right) \n\t\\nonumber \\\\ & - (1-p)\\log \\left( \\frac{(1-p)(1 - (1-p)^K)}{(1-p)(1 - (1-p)^K) + p} \\right)\n\t\\nonumber \\\\ & v_2 = (1-p)\\log \\left( \\frac{(1-p)(1 - (1-p)^K)}{(1-p)(1 - (1-p)^K) + p} \\right) \n\t\\nonumber \\\\ & v_3 = p \\log \\left( \\frac{p}{(1-p)(1 - (1-p)^K) + p} \\right),\n\t\\end{align}\n which is then an optimal solution to (\\ref{eq:relax_const_pert_posq}). \n\n\n\n\n\n\n\tApplying the geometric sum formula and simplifying, the optimal value for (\\ref{eq:relax_const_pert_posq}) is \n\t\\begin{align} \n\t& -(1-p) \\beta \\alpha \\frac{1 - \\alpha^{n+1}}{1 - \\alpha} \\log \\left( \\frac{(1-p) \\alpha + p }{(1-p) \\alpha} \\right) \n\t\\nonumber \\\\ & - p \\beta \\frac{1 - \\alpha^{n+1}}{1 - \\alpha} \\log \\left( \\frac{(1-p) \\alpha + p }{ p } \\right)\n\t\\end{align}\n\twhere $\\alpha = (1 - (1-p)^K)$ and $\\beta = (1-p)^K.$ \n\t\n\tUsing the perturbation analysis from Section 5.6.1 of \\cite{boyd}, we see that the optimal value of (\\ref{eq:relax_const_posq}) is lower bounded by\n\t\\begin{align}\n\t\\label{eq:relax_const_posq_bound}\n\t& -(1-p) \\beta \\alpha \\frac{1 - \\alpha^{n+1}}{1 - \\alpha} \\log \\left( \\frac{(1-p) \\alpha + p }{(1-p) \\alpha} \\right) \n\t\\nonumber \\\\ & - p \\beta \\frac{1 - \\alpha^{n+1}}{1 - \\alpha} \\log \\left( \\frac{(1-p) \\alpha + p }{ p } \\right)\n\t\\nonumber \\\\ & - v_1^* \\left( (1 - (1-p)^K)^{n+2} \\right) \n\t\\nonumber \\\\ & - v_3^* \\left(- (1-p)^K (1 - (1-p)^K)^{n+1} +\\epsilon \\right) \n\t\\end{align} \n\twhere $v_1^*$ and $v_3^*$ are the optimal lagrange multipliers for (\\ref{eq:relax_const_pert_posq})\n\twe determined through the KKT conditions.\n\t\n\tTaking the infimum of (\\ref{eq:relax_const_posq_bound}) over $\\epsilon \\in \\mathbb{R}_{>0}$ yields the following lower bound on (\\ref{eq:relax_const}):\n\t\n\t\\begin{align}\n\t\\label{eq:relax_const_bound}\n\t& -(1-p) \\beta \\alpha \\frac{1 - \\alpha^{n+1}}{1 - \\alpha} \\log \\left( \\frac{(1-p) \\alpha + p }{(1-p) \\alpha} \\right) \n\t\\nonumber \\\\ & - p \\beta \\frac{1 - \\alpha^{n+1}}{1 - \\alpha} \\log \\left( \\frac{(1-p) \\alpha + p }{ p } \\right)\n\t\\nonumber \\\\ & - v_1^* \\left( (1 - (1-p)^K)^{n+2} \\right) \n\t\\nonumber \\\\ & - v_3^* \\left(- (1-p)^K (1 - (1-p)^K)^{n+1} \\right). \n\t\\end{align} \n\t\n\tThe sequence of optimal values returned by (\\ref{eq:relax_const}) is monotonically decreasing in $n$ clearly. Thus, taking the limit of the lower bound (\\ref{eq:relax_const_bound}) as $n \\to \\infty$ and simplifying yields the following lower bound on $-H(X \\; | \\; \\alpha_0 X + Z_{\\alpha})$ for any choice of $\\alpha_i$'s: \t\\begin{align} \n\t& -(p-1) \\left( (1-p)^K - 1\\right) \\log \\left( \\frac{(p-1)(1-p)^K + 1}{(p-1) \\left( (1-p)^K -1 \\right)}\\right) \n\t\\nonumber \\\\ & - p \\log \\left( \\frac{(p-1)(1-p)^K + 1}{p} \\right).\n\t\\end{align}\n\t\n\n\n\n\n\n\n\t\n\tFinally, observe that\n\t\\begin{align}\n\t& -H(X \\; ; \\; X - G) \n\t\\nonumber \\\\ & = -(p-1) \\left( (1-p)^K - 1\\right) \\log \\left( \\frac{(p-1)(1-p)^K + 1}{(p-1) \\left( (1-p)^K -1 \\right)}\\right)\n\t\\nonumber \\\\ & - p \\log \\left( \\frac{(p-1)(1-p)^K + 1}{p} \\right)\n\t\\end{align}\n\twhere $G$ is a geometric random variable with success probability $(1-p)^K$ and support equal to $\\mathbb{N} \\cup \\{0\\},$ and $\\alpha = (1 - (1-p)^K)$ and $\\beta = (1-p)^K$ as before.\n\\end{IEEEproof}\t\n\n\\begin{lemma}\n \\label{lem:alpha0_condition}\n Fixing $\\alpha_0 = 1$ in (\\ref{eq:relax_const}) can not increase the optimal value.\n\\end{lemma}\n\n\\begin{IEEEproof}\n Let $\\alpha_0 \\in \\mathbb{N}.$ \n\tLet $Z$ be any random variable such that its PMF has integral support and maximum support value at $t = 0.$ Let \n\t\\begin{align}\n\t& \\hat{Z} = Z - S\n\t\\end{align} \n\twhere $S = Z \\mod{\\alpha_0}.$ \n\tWe then have that \n\t\\begin{align}\n\t& I(X ; \\alpha_0 X + \tZ) \\geq I(X ; \\alpha_0 X + \\hat{Z}). \n\t\\end{align} \n\tbecause\n\t\\begin{align} \n\t\t& I(X ; \\; \\alpha_0 X + \tZ)\n\t\t\\nonumber \\\\ & = I(X ; \\; \\alpha_0 X + \\hat{Z} + S)\n\t\t\\nonumber \\\\ & = I(X ; \\; \\alpha_0 X + \\hat{Z}, \\; S)\n\t\t\\nonumber \\\\ & = I(X ; \\; S) + I(X ; \\; \\alpha_0 X + \\hat{Z} \\; | \\; S)\n\t\t\\nonumber \\\\ & = I(X ; \\; \\alpha_0 X + \\hat{Z} \\; | \\; S)\n\t\t\\nonumber \\\\ & = H(X \\; | \\; S) - H(X \\; | \\; \\alpha_0 X + \\hat{Z}, \\; S) \n\t\t\\nonumber \\\\ & = H(X) - H(X \\; | \\; \\alpha_0 X + \\hat{Z}, \\; S)\n\t\t\\nonumber \\\\ & \\geq H(X) - H(X \\; | \\; \\alpha_0 X + \\hat{Z})\n\t\t\\nonumber \\\\ & = I(X \\; ; \\; \\alpha_0 X + \\hat{Z})\n\t\\end{align} \n\twhere the third line follows because given $\\alpha_0 X + \\hat{Z} + S,$ we have that \n\t\\begin{align}\n\t S = ((\\alpha_0 X + \\hat{Z} + S) - \\alpha_0) \\mod \\alpha_0 \n\t\\end{align}\n\tand\n \\begin{align}\n\t \\alpha_0 X + \\hat{Z} = (\\alpha_0 X + \\hat{Z} + S) - S.\n\t\\end{align}\n\tThus, any solution to (\\ref{eq:relax_inf1}) with $\\alpha_0 = a$ can be transformed into a solution whose PMF had support values in $\\{t a : \\; t \\in \\mathbb{Z}\\}$ without increasing the mutual information. Therefore, for a fixed $\\alpha_0,$ it suffices to only consider PMFs that have support values in $\\{t \\alpha_0 : \\; t \\in \\mathbb{Z}\\}$ in the optimization. If $\\alpha_0 = a ,$ any solution random variable $D$ with support values in $\\{t a : \\; t \\in \\mathbb{Z}\\}$ can be transformed into a solution with the same mutual information, $\\alpha_0 = 1,$ and support values in $\\{t : \\; t \\in \\mathbb{Z}\\}.$ This is accomplished by dividing $D$ by $a.$ Therefore it suffices to fix $\\alpha_0=1$ in the optimization. \n\\end{IEEEproof}\n\t\n\t\\begin{lemma}\n\t\\label{lem:solve_KKT}\n\t A solution to problem (\\ref{eq:relax_const_pert_posq}) is given by (\\ref{eq:KKT_conditions_sol})\n\t\\end{lemma}\n\t\\begin{IEEEproof}\n\tLet $f_0(q)$ be the objective function of (\\ref{eq:relax_const_pert_posq}). The Lagrangian is given by \n\t\\begin{align} \n\t&L(q, v, \\lambda) = f_0(q) \n\t\\nonumber \\\\ & + v_1 \\left(\\sum_i q_{(i)} - 1 + (1 - (1-p)^K)^{n+2} \\right) \n\t\\nonumber \\\\ & + v_2 \\left(q_{(0)} - (1-p)^K \\right) \n\t\\nonumber \\\\ & + v_3 \\left(q_{(-n-1)} - (1-p)^K (1 - (1-p)^K)^{n+1} \\right). \n\t\\end{align}\n\t\n\tThe derivative of $f_0(q)$ with respect to $q_{(j)}$ is given by \n\t\\begin{align}\n\t& p \\log \\left(\\frac{p q_{(j)}}{(1-p)q_{(j-1)} + p q_{(j)}} \\right) \\nonumber \\\\ & + (1-p) \\log \\left( \\frac{(1-p) q_{(j)}}{p q_{(j+1)} + (1-p) q_{(j)}} \\right)\n\t\\end{align} \n\tfor $j \\in \\{-n, \\; ..., \\; -1\\}$ and the KKT conditions are given by \n\t\\begin{align}\n\t& q_{(0)} = (1-p)^K,\n\t\\nonumber \\\\ & q_{(-n-1)} = (1-p)^K (1 - (1-p)^K)^{n+1}, \n\t\\nonumber \\\\ & \\sum_{j} q_{(j)} = 1 - (1 - (1-p)^K)^{n+2}\n\t\\nonumber \\\\ & (1-p) \\log \\left( \\frac{(1-p) q_{(j)}}{p q_{(j+1)} + (1-p) q_{(j)}} \\right) + v_1 + v_3 = 0 \\quad \n\t\\nonumber \\\\ & \\text{for } j = - n -1\n\t\\nonumber \\\\ & \tp \\log \\left(\\frac{p q_{(j)}}{(1-p)q_{(j-1)} + p q_{(j)}} \\right) \n\t\\nonumber \\\\ & + (1-p) \\log \\left( \\frac{(1-p) q_{(j)}}{p q_{(j+1)} + (1-p) q_{(j)}} \\right)+ v_1 = 0 \n\t\\nonumber \\\\ & \\forall j \\in \\{-n, \\; ..., \\; -1\\}\n\t\\nonumber \\\\ & p \\log \\left(\\frac{p q_{(j)}}{(1-p)q_{(j-1)} + p q_{(j)}} \\right) + v_1 + v_2 = 0 \n\t\\nonumber \\\\ & \\text{for } j = 0.\n\t\\end{align}\n\t\n\n\t\n\tThe last three conditions can be rewritten as \n\t\\begin{align} \n\t& (1-p) \\log \\left( \\frac{(1-p) }{p \\frac{q_{(j+1)}}{q_{(j)}} + (1-p) } \\right) + v_1 + v_3 = 0 \n\t\\nonumber \\\\ & \\text{for } j = - n - 1\n\t\\nonumber \\\\ & p \\log \\left(\\frac{p }{(1-p)\\frac{q_{(j-1)}}{q_{(j)}} + p } \\right) \\nonumber \\\\ & + (1-p) \\log \\left( \\frac{(1-p) }{p \\frac{q_{(j+1)}}{q_{(j)}} + (1-p) } \\right) + v_1 = 0 \n\t\\nonumber \\\\ & \\forall j \\in \\{-n, \\; ..., \\; -1\\}\n\t\\nonumber \\\\ & p \\log \\left(\\frac{p }{(1-p) \\frac{q_{(j-1)}}{q_{(j)}} + p } \\right) + v_1 + v_2 = 0 \\quad \\text{for } j = 0\n\t\\end{align}\n\twhich shows that if the ratio $\\frac{q_{(j-1)}}{q_{(j)}}$ between consecutive variables is the same for all $j,$ then $v_1,$ $v_2$ can be chosen so that of these derivatives equal $0$ for all $j.$ Picking \\[\\frac{q_{(j-1)}}{q_{(j)}} = (1 - (1-p)^K),\\] a solution to these equations is then\n\t\\begin{align}\n\t& q_{(-i)} = (1-p)^K (1 - (1-p)^K)^{i} \n\t\\nonumber \\\\ & \\text{for } i \\in \\{0, \\; 1, \\; ..., \\; n+1 \\} \n\t\\nonumber \\\\ & v_1 = - p \\log \\left( \\frac{p}{(1-p)(1 - (1-p)^K) + p} \\right) \n\t\\nonumber \\\\ & - (1-p)\\log \\left( \\frac{(1-p)(1 - (1-p)^K)}{(1-p)(1 - (1-p)^K) + p} \\right)\n\t\\nonumber \\\\ & v_2 = (1-p)\\log \\left( \\frac{(1-p)(1 - (1-p)^K)}{(1-p)(1 - (1-p)^K) + p} \\right) \n\t\\nonumber \\\\ & v_3 = p \\log \\left( \\frac{p}{(1-p)(1 - (1-p)^K) + p} \\right).\n\t\\end{align}\n\t\\end{IEEEproof}\n\t\n\t\\rdcomment{this may be similar to the IS comments, but this proof is a bit hard to read for me. i understand the first few paragraphs in words, but it is not trivial to me to see exactly how this formally changes the problem. i would suggest stating the problem, and then how the support-constraint changes the optimization, and so on. to be honest, if you're tight on space, i still think it would be more beneficial to dedicate more space to this `reduction' and then even leave some of the more mechanical parts of derivatives\/KKT condition analysis out.\n\nalso, we arrived at this result through figures and drawings; can we add some of this into the proof? i think it would strengthen the appeal of this paper to hint at why the idea is actually intuitive and cute. as written, it feels more a set of mechanical manipulations of KKT conditions, and it loses the sort of elegance i typically associate with info theory papers. (this is not a comment on the result, it's just a comment on the presentation and how it makes me feel.) i really liked the drawings of the convolution of discrete measures, and how we're really chosing a measure that gets shifted to the left by 1 and rescaled by 1-p and shifted to the right by 1 and rescaled by p. can this be put in this section at all?}\n\n\\rdcomment{stupid question: can you tell me a little more why (12) is this derivative? at a glance, it seems to be that there's a ton of term cancellations... (it's like the terms inside the log don't depend on $q_j$ with the way the derivative is written, so all those terms cancel?) i fear that a unfastidious reviewer may just claim this is a fundamental error and give a poor review without analyzing it deeply. (this happens sometimes to me.)}\n\n\n\\section{Main Results}\n\n\n\nIn order to tackle the discrete optimization problem in (\\ref{eq:main2}), we seek to bound its optimal solution.\nOur main result is the following lower bound.\n\n\n\n\\begin{theorem} \\label{thm:main_lower_bound}\n\tFor any $K\\in \\mathbb{N}$ and $p \\in [0, 0.5],$ we have \n\t\\begin{align} \n\n\t& \\min_{\\alpha \\in \\mathbb{N}^K}\n\tI(X \\; ; \\; \\alpha_0 X +Z_\\alpha) \n\t\\geq \n\tI(X \\; ; \\; X - G),\n\t\\end{align}\n\twhere $G$ is a geometric random variable with\n\t$\\Pr(G = i) = (1-p)^K(1 - (1-p)^K)^i$ for $i = 0,1,2,...$.\n\\end{theorem}\n\nIntuitively, Theorem~\\ref{thm:main_lower_bound} says that the noise distribution of $-G$ is worse than the worst-case noise $Z_\\alpha$.\nThis lower bound can in fact be explicitly computed as\n\\begin{align}\n& I(X \\; ; \\; X - G)\n= H(p) \n\\nonumber \\\\ & - (p-1) \\left( (1-p)^K - 1\\right) \\log \\left( \\frac{1-(1-p)^{K+1} }{(p-1) \\left( (1-p)^K -1 \\right)}\\right) \n\\nonumber \\\\ & - p \\log \\left( \\frac{1-(1-p)^{K+1}}{p} \\right)\n\\label{eq:closedform}\n\\end{align}\nObserve that the formula is quickly computable for any value of $K$ and $p,$ making it attractive from a computational standpoint. To assess how tight the lower bound is, we empirically compare it to an upper bound that is computed with a greedy algorithm in Figure \\ref{fig:greedy_versus_bound_K15}. For a given $p$ and $K,$ the greedy algorithm chooses $\\alpha_0 = 1, \\; \\alpha_1 = 1,$ and sequentially chooses \\begin{align} \\alpha_{j} = \\argmin_{\\alpha \\in \\mathbb{N} \\; : \\; 1 \\leq \\alpha \\leq 1 + \\sum_{i = 1}^{j-1} \\alpha_i} I(X \\; ; \\; X + \\alpha Z_{j} + \\sum_{i = 1}^{j-1} \\alpha_i Z_i)\n\\end{align}\nfor $2 \\leq j \\leq K$. \n\\iscomment{justify the upper bound of $1+\\sum_{i=1}^{j-1}\\alpha_i$ on $\\alpha_j$}\n\n\nAs seen in Figure \\ref{fig:greedy_versus_bound_K15}, $I(X \\; ; \\; X - G)$ serves as a very tight lower bound when compared with the upper bound. \nA similar picture can be obtained for other values of $K$.\nTherefore, we can think of $I(X \\; ; \\; X - G)$ and its closed-form expression (\\ref{eq:closedform}) as an approximation to the solution of (\\ref{eq:main2}). This is surprising, because for a given $K$, it is not possible in general to choose $\\alpha_i$s to make the pmf of $\\sum_{i = 1}^{j-1} \\alpha_i Z_i$ look like \nthe pmf of $-G$ or a shifted version of it.\n\nWhile finding the greedy solution $(\\alpha_0,...,\\alpha_K)$ requires $\\Omega(2^K)$ time in the worst case, similar plots to Figure \\ref{fig:greedy_versus_bound_K15} can be obtained for larger values of $K$ using a more computationally efficient variation of the greedy algorithm that is discussed in the longer version of this paper \\cite{long}.\n\nIn order to prove the lower bound on the minimization of $I(X \\; ; \\; \\alpha_0 X + \\sum_{i=1}^K \\alpha_i Z_i)$ in Theorem \\ref{thm:main_lower_bound}, we form a convex relaxation of the minimization problem, perturb the relaxation to form a problem that is analytically solvable using KKT conditions, and use perturbation analysis to find a lower bound on the relaxation. \nThis is then a lower bound on the original problem. \nThe full proof is provided in Section V.\n\n\\kmcomment{if get rid of Figure \\ref{fig:greedy_versus_bound_K15} and move the two lemma proofs in the proof of main result section to the appendix, then the paper will be 5 pages.}\n\n\\begin{figure}[h]\n\\centering\n\n\\begin{tikzpicture}[scale=0.8]\n\\begin{axis} \n[width=0.47\\textwidth,\nheight=0.36\\textwidth,\ntitle ={Comparison of Bounds: $K = 15$},\nylabel = mutual information, \nxlabel = minor allele frequency (p),\nymax = .06]\n\n\\addplot [\nline width=1pt,\ncolor=red!80!black\n] table[x index=0,y index=1] {data\/plt2_greedy.dat};\n\\addlegendentry{upper bound (greedy)}\n\n\\addplot [\nline width=1pt,\ncolor=blue!80!black,\ndotted\n] table[x index=0,y index=1] {data\/plt2_bound.dat};\n\\addlegendentry{lower bound}\n\n\\end{axis} \n\\end{tikzpicture}\n\\caption{}\n\\label{fig:greedy_versus_bound_K15}\n\\end{figure}\n\n\n\\longversion{\n\n\\begin{figure}[h]\n\\centering\n\n\\begin{tikzpicture}[scale=0.8]\n\\begin{axis} \n[width=0.47\\textwidth,\nheight=0.36\\textwidth,\ntitle ={Ratio: Upper Bound to Lower Bound},\nylabel = mutual information, \nxlabel = minor allele frequency (p),\nlegend pos = north west]\n\n\\addplot [\nline width=1pt,\ncolor=red!80!black\n] table[x index=0,y index=1] {data\/plt4_greedy_opt_ratio_K15.dat};\n\\addlegendentry{K = 15}\n\n\\addplot [\nline width=1pt,\ncolor=blue!80!black\n] table[x index=0,y index=1] {data\/plt4_greedy_opt_ratio_K10.dat};\n\\addlegendentry{K = 10}\n\n\\addplot [\nline width=1pt,\ncolor=green!80!black\n] table[x index=0,y index=1] {data\/plt4_greedy_opt_ratio_K5.dat};\n\\addlegendentry{K = 5}\n\n\\end{axis} \n\\end{tikzpicture}\n\n\n\\caption{}\n\\label{fig:greedy_versus_bound_K15}\n\\end{figure}\n\n\n}\n\n\n\n\n\n\n\n\n\n\\section{Efficiently Computable Schemes}\n\nTheorem~\\ref{thm:main_lower_bound} provides a lower bound to the \nmutual information between $X$ and the lab's observation $Y$ for any mixing coefficients $\\alpha_0,...,\\alpha_K$, thus providing a bound to the privacy levels that can be achieved.\nHowever, \n\n\n\\rdcomment{this may sound stupid, but i think it warrants a sentence here. `Theorem 1 gives us a lower bound on the mutual information between $X$ and ..., which would be the mixing ratio that provides the highest level of privacy. However, it does not provide any mixing scheme for approaching this lower bound.'}\n\nWhile much faster than brute force search, the greedy algorithm used to obtain the upper bound requires $\\Omega(2^K)$ time in the worst case. At a high level, this is due to the fact that at the $j$th step in the algorithm, computation of $I(X \\; ; \\; X + \\alpha Z_{j} + \\sum_{i = 1}^{j-1} \\alpha_i Z_i)$ requires use of each support value in the pmf of $\\alpha Z_{j} + \\sum_{i = 1}^{j-1} \\alpha_i Z_i,$ and there are $\\Omega(2^j)$ support values in the worst case. This motivates the design of efficiently computable schemes, which can then be evaluated using the lower bound in Theorem \\ref{thm:main_lower_bound}.\n\nPerhaps the most natural approach is given by the uniform scheme, which we define to set $\\alpha_i = 1$ for all $i \\in [K]\\cup\\{0\\}.$ This is the scheme used by the authors of \\cite{Maddah-Ali} to obtain their results. The scheme is explicitly defined given $K,$ so no algorithm is needed to compute it. The performance of the scheme for a given $p$ and $K$ can be computed in $O(K)$ time using the formula in Lemma \\ref{lem:uniform_schem_formula} which is given in the appendix.\n\nThe uniform scheme is important because it is optimal as $p \\to 0$ as proved in Lemma \\ref{lem:optimality_of_uniform}\nand exemplified by the optimal curve in Figure \\ref{fig:optimal_scheme_K5_minor_allele}.\n\n\\begin{lemma}\n\\label{lem:optimality_of_uniform}\nFor any $K \\in \\mathbb{N},$ there exists some $p^* > 0$ such that the uniform scheme is optimal for $p < p^*.$\n\\end{lemma}\n\n\nAt $p= 0.5,$ in Figure \\ref{fig:optimal_scheme_K5_minor_allele}, the optimal solution is given by the binary scheme, which we define to set $\\alpha_0 = 1$ and $\\alpha_i = 2^{i-1}$ for all $i \\in [K].$ Similar to the uniform scheme, the binary scheme is explicitly defined given $K.$ Also, similar to the uniform scheme, there exists a formula for computing its performance given $p$ and $K$ in $O(K)$ time. This formula is given in Lemma \\ref{lem:binary_scheme_performance} in the appendix. It is interesting that such a formula exists because the pmf of $\\sum_{i=1}^K 2^{i-1} Z_i$ has $2^K$ support values. \n\nMost importantly, the optimality of the binary scheme at $p = 0.5$ for $K=5$ shown in Figure \\ref{fig:optimal_scheme_K5_minor_allele} generalizes to all $K$ as proved in Lemma \\ref{lem:optimality_of_binary}.\n\n\\begin{lemma} \\label{lem:optimality_of_binary}\n\tFor any $K \\in \\mathbb{N},$ the binary scheme is optimal for $p = 0.5.$\n\\end{lemma}\n\nBecause the uniform scheme is optimal for $p \\to 0$ and the binary scheme is optimal for $p = 0.5,$ it is natural to try and combine these schemes to interpolate the performance for $p$ in the range $0 0$}.\n \\nonumber\n \\end{align}\n Due to Lemma \\ref{lem:alpha0_condition}, we can fix $\\alpha_0 = 1$ in (\\ref{eq:relax_inf1}) above without changing the optimal value. Let $q_{(0)}$ be the probability at the maximum support value of $D$ and let $q_{(-i)}$ be the probability at the $i$th integer below the maximum support value of $D.$ Define\n \\begin{align}\n & g_j(q) = (1-p)q_{(j)} \\log \\left(\\frac{(1-p)q_{(j)}}{(1-p)q_{(j)} + p q_{(j+1)}} \\right) \n\t \\nonumber \\\\ & + p q_{(j+1)} \\log \\left(\\frac{p q_{(j+1)}}{(1-p)q_{(j)} + p q_{(j+1)}} \\right).\n \\end{align}\n Written more explicitly with $\\alpha_0 = 1$ fixed, (\\ref{eq:relax_inf1}) is given by\n \\begin{align}\n \\label{eq:relax_inf2}\n\t& \\min_{q_{(-i)} \\geq 0 : \\; i \\in \\mathbb{N}\\cup\\{0\\} } \\sum_{j \\in \\{-n : \\; n \\in \\mathbb{N}\\}} g_j(q)\n \\\\ & \\text{subject to:} \\quad \\nonumber \\\\ & \\quad q_{(0)} = (1-p)^K, \\quad \\sum_{j} q_{(j)} = 1 \\nonumber\n\t\\end{align} \n Note that the conditional entropy in the objective function is in a simplified form. \n \n Observe that (\\ref{eq:relax_inf2}) is an infinite dimensional convex minimization problem. A solution to the KKT conditions with the added implicit constraint that $q_{(-i)}>0$ for $i \\in \\mathbb{N} \\cup \\{0\\}$ is given by $q_{(-i)} = (1-p)^K (1 - (1-p)^K)^i$ for $i \\in \\mathbb{N} \\cup \\{0\\}$ which corresponds to $D = -G$ in (\\ref{eq:relax_inf1}) where $G$ is a geometric random variable with success probability equal to $(1-p)^K$ and support equal to $\\mathbb{N} \\cup \\{0\\}.$ We do not use this observation directly to establish Theorem \\ref{theorem:main_recursion} because no reference on the relation between KKT conditions and optimality in the infinite dimensional case was found. \n \n Instead, we constrain $D$ to have distance no greater than $n \\in \\mathbb{N}$ between its maximum and minimum support values in (\\ref{eq:relax_inf2}), derive a lower bound on the solution for each $n \\in \\mathbb{N},$ and use the resulting sequence of lower bounds to establish the result. The support constrained version of (\\ref{eq:relax_inf2}) is given by \n \\begin{align}\n \\label{eq:relax_const}\n & \\min_{q_{(-i)} \\geq 0 : \\; i \\in [n+1]\\cup\\{0\\} }\n \\sum_{j \\in \\{-m : \\; m \\in [n+1]\\}} g_j(q) \\\\ & \\text{subject to:} \\nonumber \\\\ & \n \\quad q_{(0)} = (1-p)^K, \\quad q_{(-n-1)} = 0, \\quad \\sum_{j} q_{(j)} = 1. \\nonumber\n\t\\end{align} \n\n Changing the implicit constraint in (\\ref{eq:relax_const}) from $q_{(i)} \\geq 0$ to $q_{(i)} > 0$ and the explicit constraint $q_{(-n-1)} = 0$ to $q_{(-n-1)} = \\epsilon$ where $\\epsilon \\in \\mathbb{R}_{>0}$ gives us\n \\begin{align}\n \\label{eq:relax_const_posq}\n & \\min_{q_{(-i)} > 0 : \\; i \\in [n+1]\\cup\\{0\\} }\n \\sum_{j \\in \\{-m : \\; m \\in [n+1]\\}} g_j(q)\n \\\\ & \\text{subject to:} \\nonumber \\\\ & \n \\quad q_{(0)} = (1-p)^K, \\quad q_{(-n-1)} = \\epsilon, \\quad \\sum_{j} q_{(j)} = 1. \\nonumber\n\t\\end{align} \n\n Let $p^*$ be the optimal value of \\ref{eq:relax_const} and let $p_{\\epsilon}^*$ be the optimal value of \\ref{eq:relax_const_posq} for a given $\\epsilon.$ Due to the continuity of the objective function in (\\ref{eq:relax_const}), it is not hard to show that $ p^* \\geq \\inf_{\\epsilon \\in \\mathbb{R}_{>0}} p_{\\epsilon}^*.$ At a high level, this is due to the fact that for any solution to (\\ref{eq:relax_const}), it follows that (\\ref{eq:relax_const_posq}) can get arbitrarily close to it as $\\epsilon \\to 0$ due to the continuity of the objective function. \n\n\n\n While we do not know the solution to (\\ref{eq:relax_const}) or (\\ref{eq:relax_const_posq}), we will perturb (\\ref{eq:relax_const_posq}) to form a problem we can solve analytically, then use perturbation analysis to lower bound the optimal value of (\\ref{eq:relax_const_posq}), and ultimately lower bound the optimal value of (\\ref{eq:relax_const}). The perturbed version of (\\ref{eq:relax_const_posq}) is given by \n\t\\begin{align}\n\t\\label{eq:relax_const_pert_posq}\n & \\min_{q_{(-i)} > 0 : \\; i \\in [n+1]\\cup\\{0\\} }\n \\sum_{j \\in \\{-m : \\; m \\in [n+1]\\}} g_j(q) \\\\\t& \n\t\\text{subject to:} \\nonumber \\\\\n\t& q_{(0)} = (1-p)^K, \\quad q_{(-n-1)} = (1-p)^K (1 - (1-p)^K)^{n+1}, \n\t\\nonumber \\\\ & \\sum_{j} q_{(j)} = 1 - (1 - (1-p)^K)^{n+2} \\nonumber\n\t\\end{align} \n where we have changed the constraint on the probability at the minimum support value, and the constraint on the sum of all the probabilities.\n We can solve this by solving the KKT conditions since the problem is convex, Slater's condition holds, and the objective function and constraint functions are differentiable on the domain $q_{(i)} > 0.$ \n \n We proceed to find the solution to the KKT conditions for (\\ref{eq:relax_const_pert_posq}). Let $f_0(q)$ be the objective function of (\\ref{eq:relax_const_pert_posq}). The Lagrangian is given by \n\t\\begin{align} \n\t&L(q, v, \\lambda) = f_0(q) \n\t\\nonumber \\\\ & + v_1 \\left(\\sum_i q_{(i)} - 1 + (1 - (1-p)^K)^{n+2} \\right) \n\t\\nonumber \\\\ & + v_2 \\left(q_{(0)} - (1-p)^K \\right) \n\t\\nonumber \\\\ & + v_3 \\left(q_{(-n-1)} - (1-p)^K (1 - (1-p)^K)^{n+1} \\right). \n\t\\end{align}\n\tBy Lemma \\ref{lem:solve_KKT}, a solution to the KKT conditions is given by \n\t\\begin{align}\n\t\\label{eq:KKT_conditions_sol}\n\t& q_{(-i)} = (1-p)^K (1 - (1-p)^K)^{i} \\text{ for } i \\in [n+1] \\cup \\{0\\} \n\t\\nonumber \\\\ & v_1 = - p \\log \\left( \\frac{p}{(1-p)(1 - (1-p)^K) + p} \\right) \n\t\\nonumber \\\\ & - (1-p)\\log \\left( \\frac{(1-p)(1 - (1-p)^K)}{(1-p)(1 - (1-p)^K) + p} \\right)\n\t\\nonumber \\\\ & v_2 = (1-p)\\log \\left( \\frac{(1-p)(1 - (1-p)^K)}{(1-p)(1 - (1-p)^K) + p} \\right) \n\t\\nonumber \\\\ & v_3 = p \\log \\left( \\frac{p}{(1-p)(1 - (1-p)^K) + p} \\right),\n\t\\end{align}\n which is then an optimal solution to (\\ref{eq:relax_const_pert_posq}). \n\n\n\n\n\n\n\tApplying the geometric sum formula and simplifying, the optimal value for (\\ref{eq:relax_const_pert_posq}) is \n\t\\begin{align} \n\t& -(1-p) \\beta \\alpha \\frac{1 - \\alpha^{n+1}}{1 - \\alpha} \\log \\left( \\frac{(1-p) \\alpha + p }{(1-p) \\alpha} \\right) \n\t\\nonumber \\\\ & - p \\beta \\frac{1 - \\alpha^{n+1}}{1 - \\alpha} \\log \\left( \\frac{(1-p) \\alpha + p }{ p } \\right)\n\t\\end{align}\n\twhere $\\alpha = (1 - (1-p)^K)$ and $\\beta = (1-p)^K.$ \n\t\n\tUsing the perturbation analysis from Section 5.6.1 of \\cite{boyd}, we see that the optimal value of (\\ref{eq:relax_const_posq}) is lower bounded by\n\t\\begin{align}\n\t\\label{eq:relax_const_posq_bound}\n\t& -(1-p) \\beta \\alpha \\frac{1 - \\alpha^{n+1}}{1 - \\alpha} \\log \\left( \\frac{(1-p) \\alpha + p }{(1-p) \\alpha} \\right) \n\t\\nonumber \\\\ & - p \\beta \\frac{1 - \\alpha^{n+1}}{1 - \\alpha} \\log \\left( \\frac{(1-p) \\alpha + p }{ p } \\right)\n\t\\nonumber \\\\ & - v_1^* \\left( (1 - (1-p)^K)^{n+2} \\right) \n\t\\nonumber \\\\ & - v_3^* \\left(- (1-p)^K (1 - (1-p)^K)^{n+1} +\\epsilon \\right) \n\t\\end{align} \n\twhere $v_1^*$ and $v_3^*$ are the optimal lagrange multipliers for (\\ref{eq:relax_const_pert_posq})\n\twe determined through the KKT conditions.\n\t\n\tTaking the infimum of (\\ref{eq:relax_const_posq_bound}) over $\\epsilon \\in \\mathbb{R}_{>0}$ yields the following lower bound on (\\ref{eq:relax_const}):\n\t\n\t\\begin{align}\n\t\\label{eq:relax_const_bound}\n\t& -(1-p) \\beta \\alpha \\frac{1 - \\alpha^{n+1}}{1 - \\alpha} \\log \\left( \\frac{(1-p) \\alpha + p }{(1-p) \\alpha} \\right) \n\t\\nonumber \\\\ & - p \\beta \\frac{1 - \\alpha^{n+1}}{1 - \\alpha} \\log \\left( \\frac{(1-p) \\alpha + p }{ p } \\right)\n\t\\nonumber \\\\ & - v_1^* \\left( (1 - (1-p)^K)^{n+2} \\right) \n\t\\nonumber \\\\ & - v_3^* \\left(- (1-p)^K (1 - (1-p)^K)^{n+1} \\right). \n\t\\end{align} \n\t\n\tThe sequence of optimal values returned by (\\ref{eq:relax_const}) is monotonically decreasing in $n$ clearly. Thus, taking the limit of the lower bound (\\ref{eq:relax_const_bound}) as $n \\to \\infty$ and simplifying yields the following lower bound on $-H(X \\; | \\; \\alpha_0 X + Z_{\\alpha})$ for any choice of $\\alpha_i$'s: \\begin{align} \n\t\\label{eq:lower_bound_neg_cond_ent}\n\t& -(p-1) \\left( (1-p)^K - 1\\right) \\log \\left( \\frac{(p-1)(1-p)^K + 1}{(p-1) \\left( (1-p)^K -1 \\right)}\\right) \n\t\\nonumber \\\\ & - p \\log \\left( \\frac{(p-1)(1-p)^K + 1}{p} \\right).\n\t\\end{align}\n\t\n\tFinally, observe that\n\t$-H(X \\; ; \\; X - G)$ is equal to (\\ref{eq:lower_bound_neg_cond_ent})\n\twhere $G$ is a geometric random variable with\n\t$\\Pr(G = i) = (1-p)^K(1 - (1-p)^K)^i$ for $i \\in \\mathbb{N} \\cup \\{0\\}.$\n\n\\begin{lemma}\n \\label{lem:alpha0_condition}\n Fixing $\\alpha_0 = 1$ in (\\ref{eq:relax_const}) can not increase the optimal value.\n\\end{lemma}\n\n\\begin{IEEEproof}\n Let $\\alpha_0 \\in \\mathbb{N}.$ \n\tLet $Z$ be any random variable such that its pmf has integral support and maximum support value at $t = 0.$ Let \n\t\\begin{align}\n\t& \\hat{Z} = Z - S\n\t\\end{align} \n\twhere $S = Z \\mod{\\alpha_0}.$ \n\tWe then have that \n\t\\begin{align}\n\t& I(X ; \\alpha_0 X + \tZ) \\geq I(X ; \\alpha_0 X + \\hat{Z}). \n\t\\end{align} \n\tbecause\n\t\\begin{align} \n\t\t& I(X ; \\; \\alpha_0 X + \tZ)\n\t\t\\nonumber \\\\ & = I(X ; \\; \\alpha_0 X + \\hat{Z} + S)\n\t\t\\nonumber \\\\ & = I(X ; \\; \\alpha_0 X + \\hat{Z}, \\; S)\n\t\t\\nonumber \\\\ & = I(X ; \\; S) + I(X ; \\; \\alpha_0 X + \\hat{Z} \\; | \\; S)\n\t\t\\nonumber \\\\ & = I(X ; \\; \\alpha_0 X + \\hat{Z} \\; | \\; S)\n\t\t\\nonumber \\\\ & = H(X \\; | \\; S) - H(X \\; | \\; \\alpha_0 X + \\hat{Z}, \\; S) \n\t\t\\nonumber \\\\ & = H(X) - H(X \\; | \\; \\alpha_0 X + \\hat{Z}, \\; S)\n\t\t\\nonumber \\\\ & \\geq H(X) - H(X \\; | \\; \\alpha_0 X + \\hat{Z})\n\t\t\\nonumber \\\\ & = I(X \\; ; \\; \\alpha_0 X + \\hat{Z})\n\t\\end{align} \n\twhere the third line follows because given $\\alpha_0 X + \\hat{Z} + S,$ we have that \n\t\\begin{align}\n\t S = ((\\alpha_0 X + \\hat{Z} + S) - \\alpha_0) \\mod \\alpha_0 \n\t\\end{align}\n\tand\n \\begin{align}\n\t \\alpha_0 X + \\hat{Z} = (\\alpha_0 X + \\hat{Z} + S) - S.\n\t\\end{align}\n\tThus, any solution to (\\ref{eq:relax_inf1}) with $\\alpha_0 = a$ can be transformed into a solution whose pmf had support values in $\\{t a : \\; t \\in \\mathbb{Z}\\}$ without increasing the mutual information. Therefore, for a fixed $\\alpha_0,$ it suffices to only consider pmfs that have support values in $\\{t \\alpha_0 : \\; t \\in \\mathbb{Z}\\}$ in the optimization. If $\\alpha_0 = a ,$ any solution random variable $D$ with support values in $\\{t a : \\; t \\in \\mathbb{Z}\\}$ can be transformed into a solution with the same mutual information, $\\alpha_0 = 1,$ and support values in $\\{t : \\; t \\in \\mathbb{Z}\\}.$ This is accomplished by dividing $D$ by $a.$ Therefore it suffices to fix $\\alpha_0=1$ in the optimization. \n\\end{IEEEproof}\n\t\n\t\\begin{lemma}\n\t\\label{lem:solve_KKT}\n\t A solution to problem (\\ref{eq:relax_const_pert_posq}) is given by (\\ref{eq:KKT_conditions_sol})\n\t\\end{lemma}\n\t\\begin{IEEEproof}\n\tLet $f_0(q)$ be the objective function of (\\ref{eq:relax_const_pert_posq}). The Lagrangian is given by \n\t\\begin{align} \n\t&L(q, v, \\lambda) = f_0(q) \n\t\\nonumber \\\\ & + v_1 \\left(\\sum_i q_{(i)} - 1 + (1 - (1-p)^K)^{n+2} \\right) \n\t\\nonumber \\\\ & + v_2 \\left(q_{(0)} - (1-p)^K \\right) \n\t\\nonumber \\\\ & + v_3 \\left(q_{(-n-1)} - (1-p)^K (1 - (1-p)^K)^{n+1} \\right). \n\t\\end{align}\n\t\n\tThe derivative of $f_0(q)$ with respect to $q_{(j)}$ is given by \n\t\\begin{align}\n\t& p \\log \\left(\\frac{p q_{(j)}}{(1-p)q_{(j-1)} + p q_{(j)}} \\right) \\nonumber \\\\ & + (1-p) \\log \\left( \\frac{(1-p) q_{(j)}}{p q_{(j+1)} + (1-p) q_{(j)}} \\right)\n\t\\end{align} \n\tfor $j \\in \\{-n, \\; ..., \\; -1\\}$ and the KKT conditions are given by \n\t\\begin{align}\n\t& q_{(0)} = (1-p)^K,\n\t\\nonumber \\\\ & q_{(-n-1)} = (1-p)^K (1 - (1-p)^K)^{n+1}, \n\t\\nonumber \\\\ & \\sum_{j} q_{(j)} = 1 - (1 - (1-p)^K)^{n+2}\n\t\\nonumber \\\\ & (1-p) \\log \\left( \\frac{(1-p) q_{(j)}}{p q_{(j+1)} + (1-p) q_{(j)}} \\right) + v_1 + v_3 = 0 \\quad \n\t\\nonumber \\\\ & \\text{for } j = - n -1\n\t\\nonumber \\\\ & \tp \\log \\left(\\frac{p q_{(j)}}{(1-p)q_{(j-1)} + p q_{(j)}} \\right) \n\t\\nonumber \\\\ & + (1-p) \\log \\left( \\frac{(1-p) q_{(j)}}{p q_{(j+1)} + (1-p) q_{(j)}} \\right)+ v_1 = 0 \n\t\\nonumber \\\\ & \\forall j \\in \\{-n, \\; ..., \\; -1\\}\n\t\\nonumber \\\\ & p \\log \\left(\\frac{p q_{(j)}}{(1-p)q_{(j-1)} + p q_{(j)}} \\right) + v_1 + v_2 = 0 \n\t\\nonumber \\\\ & \\text{for } j = 0.\n\t\\end{align}\n\t\n\n\t\n\tThe last three conditions can be rewritten as \n\t\\begin{align} \n\t& (1-p) \\log \\left( \\frac{(1-p) }{p \\frac{q_{(j+1)}}{q_{(j)}} + (1-p) } \\right) + v_1 + v_3 = 0 \n\t\\nonumber \\\\ & \\text{for } j = - n - 1\n\t\\nonumber \\\\ & p \\log \\left(\\frac{p }{(1-p)\\frac{q_{(j-1)}}{q_{(j)}} + p } \\right) \\nonumber \\\\ & + (1-p) \\log \\left( \\frac{(1-p) }{p \\frac{q_{(j+1)}}{q_{(j)}} + (1-p) } \\right) + v_1 = 0 \n\t\\nonumber \\\\ & \\forall j \\in \\{-n, \\; ..., \\; -1\\}\n\t\\nonumber \\\\ & p \\log \\left(\\frac{p }{(1-p) \\frac{q_{(j-1)}}{q_{(j)}} + p } \\right) + v_1 + v_2 = 0 \\quad \\text{for } j = 0\n\t\\end{align}\n\twhich shows that if the ratio $\\frac{q_{(j-1)}}{q_{(j)}}$ between consecutive variables is the same for all $j,$ then $v_1,$ $v_2$ can be chosen so that of these derivatives equal $0$ for all $j.$ Picking \\[\\frac{q_{(j-1)}}{q_{(j)}} = (1 - (1-p)^K),\\] a solution to these equations is then\n\t\\begin{align}\n\t& q_{(-i)} = (1-p)^K (1 - (1-p)^K)^{i} \n\t\\nonumber \\\\ & \\text{for } i \\in \\{0, \\; 1, \\; ..., \\; n+1 \\} \n\t\\nonumber \\\\ & v_1 = - p \\log \\left( \\frac{p}{(1-p)(1 - (1-p)^K) + p} \\right) \n\t\\nonumber \\\\ & - (1-p)\\log \\left( \\frac{(1-p)(1 - (1-p)^K)}{(1-p)(1 - (1-p)^K) + p} \\right)\n\t\\nonumber \\\\ & v_2 = (1-p)\\log \\left( \\frac{(1-p)(1 - (1-p)^K)}{(1-p)(1 - (1-p)^K) + p} \\right) \n\t\\nonumber \\\\ & v_3 = p \\log \\left( \\frac{p}{(1-p)(1 - (1-p)^K) + p} \\right).\n\t\\end{align}\n\t\\end{IEEEproof}\n\t\n\t\\rdcomment{this may be similar to the IS comments, but this proof is a bit hard to read for me. i understand the first few paragraphs in words, but it is not trivial to me to see exactly how this formally changes the problem. i would suggest stating the problem, and then how the support-constraint changes the optimization, and so on. to be honest, if you're tight on space, i still think it would be more beneficial to dedicate more space to this `reduction' and then even leave some of the more mechanical parts of derivatives\/KKT condition analysis out.\n\nalso, we arrived at this result through figures and drawings; can we add some of this into the proof? i think it would strengthen the appeal of this paper to hint at why the idea is actually intuitive and cute. as written, it feels more a set of mechanical manipulations of KKT conditions, and it loses the sort of elegance i typically associate with info theory papers. (this is not a comment on the result, it's just a comment on the presentation and how it makes me feel.) i really liked the drawings of the convolution of discrete measures, and how we're really chosing a measure that gets shifted to the left by 1 and rescaled by 1-p and shifted to the right by 1 and rescaled by p. can this be put in this section at all?}\n\n\\rdcomment{stupid question: can you tell me a little more why (12) is this derivative? at a glance, it seems to be that there's a ton of term cancellations... (it's like the terms inside the log don't depend on $q_j$ with the way the derivative is written, so all those terms cancel?) i fear that a unfastidious reviewer may just claim this is a fundamental error and give a poor review without analyzing it deeply. (this happens sometimes to me.)}\n\n\n\\section{Main Results}\n\n\n\nIn order to tackle the discrete optimization problem in (\\ref{eq:main2}) in the entire interval $p \\in [0,0.5]$, we seek to bound its optimal solution.\nOur main result is the following lower bound.\n\n\n\n\\begin{theorem} \\label{thm:main_lower_bound}\n\tFor any $K\\in \\mathbb{N}$ and $p \\in [0, 0.5],$ we have \n\t\\begin{align} \n\n\t& \\min_{\\alpha \\in \\mathbb{N}^{K+1}}\n\tI(X \\; ; \\; \\alpha_0 X +Z_\\alpha) \n\t\\geq \n\tI(X \\; ; \\; X - G),\n\t\\end{align}\n\twhere $G$ is a geometric random variable with\n\t$\\Pr(G = i) = (1-p)^K(1 - (1-p)^K)^i$ for $i = 0,1,2,...$.\n\\end{theorem}\n\nIntuitively, Theorem~\\ref{thm:main_lower_bound} says that the noise distribution of $-G$ is worse than the worst-case noise $Z_\\alpha$.\nThis lower bound can in fact be explicitly computed as\n\\begin{align}\n& I(X \\; ; \\; X - G)\n= H(p) \n\\nonumber \\\\ & - (p-1) \\left( (1-p)^K - 1\\right) \\log \\left( \\frac{1-(1-p)^{K+1} }{(p-1) \\left( (1-p)^K -1 \\right)}\\right) \n\\nonumber \\\\ & - p \\log \\left( \\frac{1-(1-p)^{K+1}}{p} \\right)\n\\label{eq:closedform}\n\\end{align}\nObserve that the formula is quickly computable for any value of $K$ and $p,$ making it attractive from a computational standpoint. To assess how tight the lower bound is, we empirically compare it to an upper bound that is computed with a greedy algorithm in Figure \\ref{fig:greedy_versus_bound_K15}. For a given $p$ and $K,$ the greedy algorithm chooses $\\alpha_0 = 1, \\; \\alpha_1 = 1,$ and sequentially chooses \\begin{align} \\alpha_{j} = \\argmin_{\\alpha \\in \\mathbb{N} \\; : \\; 1 \\leq \\alpha \\leq 1 + \\sum_{i = 1}^{j-1} \\alpha_i} I(X \\; ; \\; X + \\alpha Z_{j} + \\sum_{i = 1}^{j-1} \\alpha_i Z_i)\n\\end{align}\nfor $2 \\leq j \\leq K$. At the $j$th step we consider all $\\alpha$'s between $1$ and $1 + \\sum_{i = 1}^{j-1} \\alpha_i$ because $1 + \\sum_{i = 1}^{j-1} \\alpha_i$ is the highest choice for $\\alpha$ that does not increase the number of support values $t$ where $H(X \\; | \\; X + \\alpha Z_{j} + \\sum_{i = 1}^{j-1} \\alpha_i Z_i=t) = 0.$ \n\\iscomment{justify the upper bound of $1+\\sum_{i=1}^{j-1}\\alpha_i$ on $\\alpha_j$}\n\n\nAs seen in Figure \\ref{fig:greedy_versus_bound_K15}, $I(X \\; ; \\; X - G)$ serves as a very tight lower bound when compared with the upper bound. \nA similar picture can be obtained for other values of $K$.\nTherefore, we can think of $I(X \\; ; \\; X - G)$ and its closed-form expression (\\ref{eq:closedform}) as an approximation to the solution of (\\ref{eq:main2}). This is surprising, because for a given $K$, it is not possible in general to choose $\\alpha_i$s to make the pmf of $\\sum_{i = 1}^{j-1} \\alpha_i Z_i$ look like \nthe pmf of $-G$ or a shifted version of it.\n\n\\begin{figure}[h]\n\\centering\n\n\\begin{tikzpicture}[scale=0.8]\n\\begin{axis} \n[width=0.47\\textwidth,\nheight=0.36\\textwidth,\ntitle ={Ratio: Upper Bound to Lower Bound},\nylabel = mutual information, \nxlabel = minor allele frequency (p),\nlegend pos = north west]\n\n\\addplot [\nline width=1pt,\ncolor=red!80!black\n] table[x index=0,y index=1] {data\/plt4_greedy_opt_ratio_K15.dat};\n\\addlegendentry{K = 15}\n\n\\addplot [\nline width=1pt,\ncolor=blue!80!black\n] table[x index=0,y index=1] {data\/plt4_greedy_opt_ratio_K10.dat};\n\\addlegendentry{K = 10}\n\n\\addplot [\nline width=1pt,\ncolor=green!80!black\n] table[x index=0,y index=1] {data\/plt4_greedy_opt_ratio_K5.dat};\n\\addlegendentry{K = 5}\n\n\\end{axis} \n\\end{tikzpicture}\n\n\n\\caption{}\n\\label{fig:greedy_versus_bound_K15}\n\\end{figure}\n\n\n\\iscomment{Maybe a nice figure to have here would be one comparing the pmfs of $-G$ and $Z_{\\alpha^*}$.\n}\n\\kmcomment{I'll get this plot as soon as I can}\n\n\nWhile finding the greedy solution $(\\alpha_0,...,\\alpha_K)$ requires $\\Omega(2^K)$ time in the worst case, similar plots to Figure \\ref{fig:greedy_versus_bound_K15} can be obtained for larger values of $K$ using a more computationally efficient variation of the greedy algorithm that is discussed in the longer version of this paper \\cite{long}.\n\nAt a high level, we prove the lower bound on the minimization of $I(X \\; ; \\; \\alpha_0 X + \\sum_{i=1}^K \\alpha_i Z_i)$ in Theorem \\ref{thm:main_lower_bound} by forming a convex relaxation of the minimization problem, perturbing the relaxation to form a problem that is analytically solvable using KKT conditions, and using perturbation analysis to find a lower bound on the relaxation. \nThis is then a lower bound on the original problem. \nThe full proof is provided in Section IV.\n\n\n\\begin{figure}[h]\n\\centering\n\n\\begin{tikzpicture}[scale=0.8]\n\\begin{axis} \n[width=0.47\\textwidth,\nheight=0.36\\textwidth,\ntitle ={Comparison of Bounds: $K = 15$},\nylabel = mutual information, \nxlabel = minor allele frequency (p),\nymax = .06]\n\n\\addplot [\nline width=1pt,\ncolor=red!80!black\n] table[x index=0,y index=1] {data\/plt2_greedy.dat};\n\\addlegendentry{upper bound (greedy)}\n\n\\addplot [\nline width=1pt,\ncolor=blue!80!black,\ndotted\n] table[x index=0,y index=1] {data\/plt2_bound.dat};\n\\addlegendentry{lower bound}\n\n\\end{axis} \n\\end{tikzpicture}\n\\caption{}\n\\label{fig:greedy_versus_bound_K15}\n\\end{figure}\n\n\n\\longversion{\n\n\\begin{figure}[h]\n\\centering\n\n\\begin{tikzpicture}[scale=0.8]\n\\begin{axis} \n[width=0.47\\textwidth,\nheight=0.36\\textwidth,\ntitle ={Ratio: Upper Bound to Lower Bound},\nylabel = mutual information, \nxlabel = minor allele frequency (p),\nlegend pos = north west]\n\n\\addplot [\nline width=1pt,\ncolor=red!80!black\n] table[x index=0,y index=1] {data\/plt4_greedy_opt_ratio_K15.dat};\n\\addlegendentry{K = 15}\n\n\\addplot [\nline width=1pt,\ncolor=blue!80!black\n] table[x index=0,y index=1] {data\/plt4_greedy_opt_ratio_K10.dat};\n\\addlegendentry{K = 10}\n\n\\addplot [\nline width=1pt,\ncolor=green!80!black\n] table[x index=0,y index=1] {data\/plt4_greedy_opt_ratio_K5.dat};\n\\addlegendentry{K = 5}\n\n\\end{axis} \n\\end{tikzpicture}\n\n\n\\caption{}\n\\label{fig:greedy_versus_bound_K15}\n\\end{figure}\n\n\n}\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\\section{Main Result}\n\n\n\nOur main result is stated below.\n\n\\begin{theorem} \\label{thm:main_lower_bound}\n\tFor any $K\\in \\mathbb{N},$ $p \\in [0, 0.5],$ and $\\alpha_i \\in \\mathbb{N}, \\; i \\in [K],$ we have that \n\t\\begin{align} \n\t& \\min_{\\alpha_i \\in \\mathbb{N}, \\; i \\in [K]} I(X \\; ; \\; X + \\sum_{i=1}^{K} \\alpha_i Z_i) \n\t\\geq \n\tI(X \\; ; \\; X - 2G)\n\t\\end{align}\n\twhere $G$ is a geometric random variable with success probability equal to $(1-p)^K$ and support equal to $\\mathbb{N} \\cup \\{0\\}.$\n\\end{theorem}\nThe formula for $I(X \\; ; \\; X - 2G)$ is given by \n\\begin{align}\n& I(X \\; ; \\; X - 2G)\n\\nonumber \\\\ & = H(p) \n\\nonumber \\\\ & - (p-1) \\left( (1-p)^K - 1\\right) \\log \\left( \\frac{(p-1)(1-p)^K + 1}{(p-1) \\left( (1-p)^K -1 \\right)}\\right) \n\\nonumber \\\\ & - p \\log \\left( \\frac{(p-1)(1-p)^K + 1}{p} \\right)\n\\end{align}\nObserve that the formula is quickly computable for any value of $K$ and $p,$ making it attractive from a computational standpoint. To assess how tight the lower bound is, we empirically compare it to an upper bound that is computed with a greedy algorithm in Figure \\ref{fig:greedy_versus_bound_K15}. For a given $p$ and $K,$ the greedy algorithm chooses $\\alpha_1 = 1,$ and sequentially chooses \\begin{align} \\alpha_{j} = \\argmin_{\\alpha \\in \\mathbb{N} \\; : \\; 1 \\leq \\alpha \\leq 1 + \\sum_{i = 1}^{j-1} \\alpha_i} I(X \\; ; \\; X + \\alpha Z_{j} + \\sum_{i = 1}^{j-1} \\alpha_i Z_i)\n\\end{align}\nfor $2 \\leq j \\leq K.$ As seen in Figure \\ref{fig:greedy_versus_bound_K15}, $I(X \\; ; \\; X - 2G)$ serves as a very tight lower bound when compared with the upper bound. We therefore view $I(X \\; ; \\; X - 2G)$ as an approximation to the original problem.\n\nWhile the greedy algorithm requires $\\Omega(2^K)$ time in the worst case, similar plots to Figure \\ref{fig:greedy_versus_bound_K15} can be obtained for larger values of $K$ using a more computationally efficient variation of the greedy algorithm that is discussed in the longer version of this paper.\n\nIn order to prove the lower bound on the minimization of $I(X \\; ; \\; X + \\sum_{i=1}^K \\alpha_i Z_i)$ in Theorem \\ref{thm:main_lower_bound}, we form a convex relaxation of the minimization problem, perturb the relaxation to form a problem that is analytically solvable using KKT conditions, and use perturbation analysis to find a lower bound on the relaxation. This is then a lower bound on the original problem. The full proof is in the Section V.\n\n\\begin{figure}[h]\n\\centering\n\n\\begin{tikzpicture}[scale=0.8]\n\\begin{axis} \n[width=0.47\\textwidth,\nheight=0.36\\textwidth,\ntitle ={Comparison of Bounds: $K = 15$},\nylabel = mutual information, \nxlabel = minor allele frequency (p),\nymax = .06]\n\n\\addplot [\nline width=1pt,\ncolor=red!80!black\n] table[x index=0,y index=1] {data\/plt2_greedy.dat};\n\\addlegendentry{upper bound (greedy)}\n\n\\addplot [\nline width=1pt,\ncolor=blue!80!black,\ndotted\n] table[x index=0,y index=1] {data\/plt2_bound.dat};\n\\addlegendentry{lower bound}\n\n\\end{axis} \n\\end{tikzpicture}\n\\caption{}\n\\label{fig:greedy_versus_bound_K15}\n\\end{figure}\n\n\n\n\n\n\n\n\n\n\\section{Efficiently Computable Schemes}\n\n\\rdcomment{this may sound stupid, but i think it warrants a sentence here. `Theorem 1 gives us a lower bound on the mutual information between $X$ and ..., which would be the mixing ratio that provides the highest level of privacy. However, it does not provide any mixing scheme for approaching this lower bound.'}\n\nWhile much faster than brute force search, the greedy algorithm used to obtain the upper bound requires $\\Omega(2^K)$ time in the worst case. At a high level, this is due to the fact that at the $j$th step in the algorithm, computation of $I(X \\; ; \\; X + \\alpha Z_{j} + \\sum_{i = 1}^{j-1} \\alpha_i Z_i)$ requires use of each support value in the PMF of $\\alpha Z_{j} + \\sum_{i = 1}^{j-1} \\alpha_i Z_i,$ and there are $\\Omega(2^j)$ support values in the worst case. This motivates the design of efficiently computable schemes, which can then be evaluated using the lower bound in Theorem \\ref{thm:main_lower_bound}.\n\nPerhaps the most natural approach is given by the uniform scheme, which we define to set $\\alpha_i = 1$ for all $i \\in [K].$ This is the scheme used by the authors of \\cite{Maddah-Ali} to obtain their results. The scheme is explicitly defined given $K,$ so no algorithm is needed to compute it. The performance of the scheme for a given $p$ and $K$ can be computed in $O(K)$ time using the formula in Lemma \\ref{lem:uniform_schem_formula} which is given in the appendix.\n\nThe uniform scheme is important because it is optimal as $p \\to 0$ as proved in Lemma \\ref{lem:optimality_of_uniform}\nand exemplified by the optimal curve in Figure \\ref{fig:optimal_scheme_K5_minor_allele}.\n\n\\begin{lemma}\n\\label{lem:optimality_of_uniform}\nFor any $K \\in \\mathbb{N},$ there exists some $p^* > 0$ such that the uniform scheme is optimal for $p < p^*.$\n\\end{lemma}\n\n\\rdcomment{i changed epsilon to $p^*$ here. epsilon is typically taken to mean a very small constant, which makes this result feel weak, i.e. this result is only true when p is arbitrarily small.}\n\nAt $p= 0.5,$ in Figure \\ref{fig:optimal_scheme_K5_minor_allele}, the optimal solution is given by the binary scheme, which we define to set $\\alpha_i = 2^{i-1}$ for all $i \\in [K].$ Similar to the uniform scheme, the binary scheme is explicitly defined given $K.$ Also, similar to the uniform scheme, there exists a formula for computing its performance given $p$ and $K$ in $O(K)$ time. This formula is given in Lemma \\ref{lem:binary_scheme_performance} in the appendix. It is interesting that such a formula exists because the PMF of $\\sum_{i=1}^K 2^{i-1} Z_i$ has $2^K$ support values. \n\nMost importantly, the optimality of the binary scheme at $p = 0.5$ for $K=5$ shown in Figure \\ref{fig:optimal_scheme_K5_minor_allele} generalizes to all $K$ as proved in Lemma \\ref{lem:optimality_of_binary}.\n\n\\begin{lemma} \\label{lem:optimality_of_binary}\n\tFor any $K \\in \\mathbb{N},$ the binary scheme is optimal for $p = 0.5.$\n\\end{lemma}\n\nBecause the uniform scheme is optimal for $p \\to 0$ and the binary scheme is optimal for $p = 0.5,$ it is natural to try and combine these schemes to interpolate the performance for $p$ in the range $0 0.$ We will obtain a lower bound for this support constrained optimization problem, and use it to obtain a lower bound on the original optimization problem. \n\t\\iscomment{unclear why this can be done}\n\t\n\tTo lower bound the support constrained optimization problem, we work with a relaxation of the support constrained optimization problem. Instead of requiring that the PMF for $T$ be generated by adding the sum of scaled Bernoulli random variables, we relax the problem to only require that the probability at the highest support value of the PMF of $T$ is equal to $(1-p)^K$ (this holds for any valid PMF of $T$). \n\t\n\tWe then obtain a lower bound on this relaxation of the support constrained optimization problem . Finally, we take the limit of the lower bound as $n$ goes to infinity in order to obtain an lower bound on the original minimization problem.\n\t\n\tThe support-constrained relaxation of the original problem is given below where the objective function is a simplified form of $-H(X \\; | \\; X+D)$ where $D$ is the noise distribution we are optimizing over. Here, $q_{(0)}$ is the probability at the maximum support value of $T$ and $q_{(-i)}$ is the probability at the $i$th integer below the maximum support value of $T.$ Due to the fact that $X, Z_i \\in \\{-1, 1\\},$ we have that all support values are separated by two or more in any valid PMF of $T.$ Observe that the problem is a convex minimization problem.\n\t\\iscomment{not clear how this was obtained:}\n\t\\begin{align}\n\t& \\min_{q_{(-2n-2)}, \\; ..., \\; q_{(0)}} \\sum_{j \\in \\{-2n-2, \\; ...,\\; -2\\}} \n\t\\nonumber \\\\ & (1-p)q_{(j)} \\log \\left(\\frac{(1-p)q_{(j)}}{(1-p)q_{(j)} + p q_{(j+2)}} \\right) \n\t\\nonumber \\\\ & + p q_{(j+2)} \\log \\left(\\frac{p q_{(j+2)}}{(1-p)q_{(j)} + p q_{(j+2)}} \\right) \\\\\n\t\\nonumber \\\\ & \\text{subject to:} \\quad q_{(j)} \\geq 0 \\; \\; \\forall j, \\quad q_{(0)} = (1-p)^K, \n\t\\nonumber \\\\ & \\quad q_{(-2n-2)} = 0, \n\t\\nonumber \\\\ & \\quad \\sum_{j} q_{(j)} = 1. \\nonumber\n\t\\end{align} \n\t\n\t\\iscomment{do we need the full formula for the conditional entropy above? makes it harder to parse}\n\tWhile we cannot solve this problem analytically, we will perturb it to form an optimization problem that we can solve analytically. We will then use perturbation analysis to obtain a lower bound on the problem above.\n\t\n\t\n\tThe perturbed problem is given below where we have changed the constraint on the probability at the minimum support value, and the constraint on the sum of all the probabilities.\n\t\n\t\\begin{align}\n\t& \\min_{q_{(-2n-2)}, \\; ..., \\; q_{(0)}} \\sum_{j \\in \\{-2n-2, \\; ...,\\; -2\\}} \n\t\\nonumber \\\\ & (1-p)q_{(j)} \\log \\left(\\frac{(1-p)q_{(j)}}{(1-p)q_{(j)} + p q_{(j+2)}} \\right) \n\t\\nonumber \\\\ & + p q_{(j+2)} \\log \\left(\\frac{p q_{(j+2)}}{(1-p)q_{(j)} + p q_{(j+2)}} \\right) \\\\\n\t\\nonumber \\\\\t& \\text{subject to:} \\quad q_{(j)} \\geq 0 \\; \\; \\forall j, \\quad q_{(0)} = (1-p)^K, \n\t\\nonumber \\\\ & \\quad q_{(-2n-2)} = (1-p)^K (1 - (1-p)^K)^{n+1}, \n\t\\nonumber \\\\ & \\quad \\sum_{j} q_{(j)} = 1 - (1 - (1-p)^K)^{n+2} \\nonumber\n\t\\end{align} \n\t\n\t\n\tLet $f_0(q)$ be the objective function above. The Lagrangian is given by \n\t\n\t\\begin{align} \n\t&L(q, v, \\lambda) = f_0(q) \n\t\\nonumber \\\\ & + v_1 \\left(\\sum_i q_{(i)} - 1 + (1 - (1-p)^K)^{n+2} \\right) \n\t\\nonumber \\\\ & + v_2 \\left(q_{(0)} - (1-p)^K \\right) \n\t\\nonumber \\\\ & + v_3 \\left(q_{(-2n-2)} - (1-p)^K (1 - (1-p)^K)^{n+1} \\right) \n\t\\nonumber \\\\ & - \\sum_i \\lambda_i q_{(i)}. \n\t\\end{align}\n\tThe derivative of $f_0(q)$ with respect to $q_{(j)}$ is given by \n\t\\begin{align}\n\t& p \\log \\left(\\frac{p q_{(j)}}{(1-p)q_{(j-2)} + p q_{(j)}} \\right) \\nonumber \\\\ & + (1-p) \\log \\left( \\frac{(1-p) q_{(j)}}{p q_{(j+2)} + (1-p) q_{(j)}} \\right).\n\t\\end{align} \n\tfor $j \\in \\{-2n, \\; ..., \\; -2\\}$ and the KKT conditions are therefore given by \n\t\\begin{align}\n\t& q_{(j)} \\geq 0 \\; \\; \\forall j, \\quad q_{(0)} = (1-p)^K, \n\t\\nonumber \\\\ & q_{(-2n-2)} = (1-p)^K (1 - (1-p)^K)^{n+1}, \n\t\\nonumber \\\\ & \\sum_{j} q_{(j)} = 1 - (1 - (1-p)^K)^{n+2}\n\t\\nonumber \\\\ & \\lambda_j \\geq 0 \\quad \\lambda_j q_{(j)} = 0 \\quad \\forall j \n\t\\nonumber \\\\ & (1-p) \\log \\left( \\frac{(1-p) q_{(j)}}{p q_{(j+2)} + (1-p) q_{(j)}} \\right) - \\lambda_j + v_1 + v_3 = 0 \\quad \n\t\\nonumber \\\\ & \\text{for } j = - 2n -2\n\t\\nonumber \\\\ & \tp \\log \\left(\\frac{p q_{(j)}}{(1-p)q_{(j-2)} + p q_{(j)}} \\right) \n\t\\nonumber \\\\ & + (1-p) \\log \\left( \\frac{(1-p) q_{(j)}}{p q_{(j+2)} + (1-p) q_{(j)}} \\right) - \\lambda_j + v_1 = 0 \n\t\\nonumber \\\\ & \\forall j \\in \\{-2n, \\; ..., \\; -2\\}\n\t\\nonumber \\\\ & p \\log \\left(\\frac{p q_{(j)}}{(1-p)q_{(j-2)} + p q_{(j)}} \\right) - \\lambda_j + v_1 + v_2 = 0 \n\t\\nonumber \\\\ & \\text{for } j = 0\n\t\\end{align}\n\t\n\t\\rdcomment{stupid question: can you tell me a little more why (12) is this derivative? at a glance, it seems to be that there's a ton of term cancellations... (it's like the terms inside the log don't depend on $q_j$ with the way the derivative is written, so all those terms cancel?) i fear that a unfastidious reviewer may just claim this is a fundamental error and give a poor review without analyzing it deeply. (this happens sometimes to me.)}\n\t\n\tThe last three conditions can be rewritten as \n\t\\begin{align} \n\t& (1-p) \\log \\left( \\frac{(1-p) }{p \\frac{q_{(j+2)}}{q_{(j)}} + (1-p) } \\right) - \\lambda_j + v_1 + v_3 = 0 \n\t\\nonumber \\\\ & \\text{for } j = - 2n - 2\n\t\\nonumber \\\\ & p \\log \\left(\\frac{p }{(1-p)\\frac{q_{(j-2)}}{q_{(j)}} + p } \\right) + (1-p) \\log \\left( \\frac{(1-p) }{p \\frac{q_{(j+2)}}{q_{(j)}} + (1-p) } \\right) \n\t\\nonumber \\\\ & - \\lambda_j + v_1 = 0 \\quad \\forall j \\in \\{-2n, \\; ..., \\; -2\\}\n\t\\nonumber \\\\ & p \\log \\left(\\frac{p }{(1-p) \\frac{q_{(j-2)}}{q_{(j)}} + p } \\right) - \\lambda_j + v_1 + v_2 = 0 \\quad \\text{for } j = 0\n\t\\end{align}\n\twhich shows that if the ratio $\\frac{q_{(j-2)}}{q_{(j)}}$ between consecutive variables is the same for all $j$ and $\\lambda_j = 0 $ for all $j,$ then $v_1,$ $v_2$ can be chosen so that of these derivatives equal $0$ for all $j.$ Picking \\[\\frac{q_{(j-2)}}{q_{(j)}} = (1 - (1-p)^K),\\] a solution to these equations is then\n\t\\begin{align}\n\t& q_{(-2i)} = (1-p)^K (1 - (1-p)^K)^{i} \n\t\\nonumber \\\\ & \\quad \\quad \\text{for } i \\in \\{0, \\; 1, \\; ..., \\; n+1 \\} \n\t\\nonumber \\\\ & \\lambda_i = 0 \\quad \\forall i\n\t\\nonumber \\\\ & v_1 = - p \\log \\left( \\frac{p}{(1-p)(1 - (1-p)^K) + p} \\right) \n\t\\nonumber \\\\ & - (1-p)\\log \\left( \\frac{(1-p)(1 - (1-p)^K)}{(1-p)(1 - (1-p)^K) + p} \\right)\n\t\\nonumber \\\\ & v_2 = (1-p)\\log \\left( \\frac{(1-p)(1 - (1-p)^K)}{(1-p)(1 - (1-p)^K) + p} \\right) \n\t\\nonumber \\\\ & v_3 = p \\log \\left( \\frac{p}{(1-p)(1 - (1-p)^K) + p} \\right)\n\t\\end{align}\n\t\n\tBecause this is a convex optimization problem, this solution is optimal since it satisfies the KKT conditions.\n\t\n\tThe optimal value for the optimization problem is therefore given by \n\t\\begin{align} \n\t& \\sum_{i = 0}^{n} -(1-p) \\beta \\alpha^{i+1} \\log \\left( \\frac{(1-p) \\beta \\alpha^{i+1} + p \\beta \\alpha^i }{(1-p) \\beta \\alpha^{i+1}} \\right) \n\t\\\\ & - p \\beta \\alpha^i \\log \\left( \\frac{(1-p) \\beta \\alpha^{i+1} + p \\beta \\alpha^i }{ p \\beta \\alpha^i} \\right)\n\t\\\\ & = \\sum_{i = 0}^{n} -(1-p) \\beta \\alpha^{i+1} \\log \\left( \\frac{(1-p) \\alpha + p }{(1-p) \\alpha} \\right) \n\t\\\\ & - p \\beta \\alpha^i \\log \\left( \\frac{(1-p) \\alpha + p }{ p } \\right)\n\t\\end{align}\n\twhere $\\alpha = (1 - (1-p)^K)$ and $\\beta = (1-p)^K.$ \n\tApplying the geometric sum formula and simplifying, we obtain \n\t\\begin{align} \n\t& -(1-p) \\beta \\alpha \\frac{1 - \\alpha^{n+1}}{1 - \\alpha} \\log \\left( \\frac{(1-p) \\alpha + p }{(1-p) \\alpha} \\right) \n\t\\nonumber \\\\ & - p \\beta \\frac{1 - \\alpha^{n+1}}{1 - \\alpha} \\log \\left( \\frac{(1-p) \\alpha + p }{ p } \\right).\n\t\\end{align}\n\t\n\t\n\t\n\n\n\n\n\n\n\t\n\tUsing the perturbation analysis from Section 5.6.1 of \\cite{boyd}, we see that the optimal value of the support constrained relaxation of the original problem is lower bounded by\n\t\\begin{align}\n\t& -(1-p) \\beta \\alpha \\frac{1 - \\alpha^{n+1}}{1 - \\alpha} \\log \\left( \\frac{(1-p) \\alpha + p }{(1-p) \\alpha} \\right) \n\t\\nonumber \\\\ & - p \\beta \\frac{1 - \\alpha^{n+1}}{1 - \\alpha} \\log \\left( \\frac{(1-p) \\alpha + p }{ p } \\right)\n\t\\nonumber \\\\ & - v_1^* \\left( (1 - (1-p)^K)^{n+2} \\right) \n\t\\nonumber \\\\ & - v_3^* \\left(- (1-p)^K (1 - (1-p)^K)^{n+1} \\right) \n\t\\end{align} \n\twhere $v_1^*$ and $v_3^*$ are the optimal lagrange multipliers for the perturbed problem which we determined above through the KKT conditions.\n\t\n\tThe sequence of optimal values returned by the support constrained relaxation of the original problem is monotonically decreasing in $n$ clearly. Thus, taking the limit of the bound as $n \\to \\infty$ and simplifying yields the following lower bound on $-H(X \\; | \\; X + T)$ for any choice of $\\alpha_i$'s: \t\\begin{align} \n\t& -(p-1) \\left( (1-p)^K - 1\\right) \\log \\left( \\frac{(p-1)(1-p)^K + 1}{(p-1) \\left( (1-p)^K -1 \\right)}\\right) \n\t\\nonumber \\\\ & - p \\log \\left( \\frac{(p-1)(1-p)^K + 1}{p} \\right).\n\t\\end{align}\n\t\n\tThus, a lower bound on mutual information is\n\t\\begin{align} \n\t& H(p) - (p-1) \\left( (1-p)^K - 1\\right) \\log \\left( \\frac{(p-1)(1-p)^K + 1}{(p-1) \\left( (1-p)^K -1 \\right)}\\right) \n\t\\nonumber \\\\ & - p \\log \\left( \\frac{(p-1)(1-p)^K + 1}{p} \\right)\n\t\\end{align}\n\tfor any choice of $\\alpha_i$'s.\n\t\n\tFinally, observe that\n\t\\begin{align}\n\t& H(X \\; ; \\; X - 2G) \n\t\\nonumber \\\\ & =\\sum_{i = 0}^{\\infty} (1-p) \\beta \\alpha^{i+1} \\log \\left( \\frac{(1-p) \\beta \\alpha^{i+1} + p \\beta \\alpha^i }{(1-p) \\beta \\alpha^{i+1}} \\right) \n\t\\nonumber \\\\ & + p \\beta \\alpha^i \\log \\left( \\frac{(1-p) \\beta \\alpha^{i+1} + p \\beta \\alpha^i }{ p \\beta \\alpha^i} \\right)\n\t\\nonumber \\\\ & = (p-1) \\left( (1-p)^K - 1\\right) \\log \\left( \\frac{(p-1)(1-p)^K + 1}{(p-1) \\left( (1-p)^K -1 \\right)}\\right) \n\t\\nonumber \\\\ & + p \\log \\left( \\frac{(p-1)(1-p)^K + 1}{p} \\right)\n\t\\end{align}\n\twhere $G$ is a geometric random variable with success probability $(1-p)^K$ and support equal to $\\mathbb{N} \\cup \\{0\\},$ and $\\alpha = (1 - (1-p)^K)$ and $\\beta = (1-p)^K$ as before.\n\\end{IEEEproof}\n\n\\section{Exhaustive Search} \n\n\\section{Problem Setting and Preliminaries}\n\nOur goal is to characterize the mixing proportions $\\alpha_0,...,\\alpha_K$ that minimize the mutual information in (\\ref{eq:main}).\nNotice that we \ndo not need to constrain the mixing proportions to add up to $1$, since scaling the observation $Y$\ndoes not change the mutual information.\nAs a result, we can restrict ourselves to solving the optimization problem\n\\alpha{\n\\min_{\\alpha \\in \\mathbb{R}^{K+1} : \\; \\alpha > 0} I(X;\\alpha_0 X+Z_\\alpha),\n\\label{eq:main2}\n}\nwhere $\\alpha = (\\alpha_0,...,\\alpha_K)$, $Z_\\alpha = \\sum_{i=1}^K \\alpha_i Z_i$, and $X,Z_1,...,Z_K$ are independent ${\\rm Ber}(p)$ random variables.\n\n\n\nThe optimization problem in (\\ref{eq:main2}) \nis surprisingly complex. \nThe symmetry between the variables $\\alpha_0,...,\\alpha_K$ may suggest that $\\alpha_i = 1$ for $i=0,...,K$ \nwould be an optimal solution.\nHowever, a brute-force solution to (\\ref{eq:main2}) over integer $\\alpha_i$s for small values of $K$ shows that optimal solutions $(\\alpha_0,...,\\alpha_K)$ vary widely for different values of $p$,\nas illustrated in Figure \\ref{fig:optimal_scheme_K5_minor_allele}. \nObserve that the curve appears to be only piecewise smooth. At $p = 0.5,$ the optimal solution is given by $[1,1,2,4,8,16],$ at $p=0.25,$ the optimal solution is given by $[1,1,1,2,3,4],$ and at $p = 0.01,$ the optimal solution is given by $[1,1,1,1,1,1]$. \n\n\n\nAs it turns out, the optimal solution to (\\ref{eq:main2}) can be exactly characterized in the two extremes cases of $p$.\nMore precisely, if we define the \\emph{uniform} scheme to be \n$\\alpha_i = 1$ for $i=0,...,K$,\nand we define the \\emph{binary} scheme to be $\\alpha_0 = 1$ and $\\alpha_i = 2^{i-1}$ for $i=1,...,K$,\nwe have the following result.\n\\begin{theorem}\\label{thm:extremes}\nFix some $K \\in \\mathbb{N}$.\nThen there exists some $p^* > 0$ such that the uniform scheme is optimal for $p < p^*$.\nMoreover, the binary scheme is optimal for $p=0.5$.\n\\end{theorem}\nThe two statements in Theorem~\\ref{thm:extremes} are divided into Lemma \\ref{lem:optimality_of_binary} and Lemma \\ref{lem:optimality_of_uniform}, which are proved in the appendix.\nAside from the cases $p = 0.5$ and $p \\approx 0$, there does not appear to be a simple expression for the optimal solution $\\alpha$. \n\n\\vspace{1mm}\n\\noindent\n\\textbf{Notation:} Throughout the paper we use $\\mathbb{N}$ to denote the set of natural numbers excluding $0,$ $\\mathbb{N}_0$ to denote the set of natural numbers including $0,$ and $[N]$ to denote the set of natural numbers in $\\{1, 2, ..., N\\}$ for an integer $N.$\nFor a vector $v,$ $v > 0 $ means that all entries of $v$ are positive.\n\\longversion{\n\\iscomment{\nTo discuss if we have space: convolution of discrete measures, exponential complexity of brute-force search,\n}\n}\n\n\\longversion{\n\nNotice that Equation \\ref{eq:main} behaves very differently from the case where $Z_i$ is Gaussian. In the Gaussian case, each $\\alpha_i$ would be chosen as large as possible for all $i \\in [K].$ However in our problem, if $\\alpha_i > 1$ for all $i \\in [K],$ then \n\\begin{align} \n& I(X \\; ; \\; X + \\sum_{i=1}^{K} \\alpha_i Z_i) \n\\nonumber \\\\ & = H(X) - H(X | X + \\sum_{i=1}^{K} \\alpha_i Z_i) = H(X) \n\\end{align}\ni.e. the scheme does not hide $X$ at all. Therefore, if $\\alpha_0 = 1,$ the solution to Equation \\ref{eq:main} must have $\\alpha_i = 1$ for at least one $i \\in [K].$ \n\n\\rdcomment{i might advocate for a little more discussion here: perhaps talk about how convolution of measures for discrete distributions is fundamentally a different problem than convolutions of continuous measures for this reason. i'll defer to IS's opinion on this tho.}\n\n}\n\n\\begin{figure}[t]\n\\centering\n\n\\tikzstyle{every pin}=[fill=white,\n\tdraw=black,\n\tfont=\\footnotesize]\n\t\n\\begin{tikzpicture}[scale=0.8]\n\\begin{axis} \n[width=0.46\\textwidth,\nheight=0.39\\textwidth,\nlegend cell align={left}, \nylabel = mutual information, \nxlabel = minor allele frequency ($p$),\nymax = .28,\nyticklabel style={\n \/pgf\/number format\/precision=3,\n \/pgf\/number format\/fixed},\n]\n\n\\addplot [\nline width=1.5pt,\ndashed,\ncolor=red!80!black\n] table[x index=0,y index=1] {data\/plt1_uniform.dat};\n\\addlegendentry{uniform}\n\n\\addplot [\nline width=1.5pt,\ndashed,\ncolor=green!80!black\n] table[x index=0,y index=1] {data\/plt1_binary.dat};\n\\addlegendentry{binary}\n\n\n\\addplot [\nline width=1.5pt,\ncolor=blue!80!black\n] table[x index=0,y index=1] {data\/plt1_optimal.dat};\n\\addlegendentry{optimal}\n\n\\node[coordinate,pin=right:{$[1,1,1,1,1,1]$}] \n\t\tat (axis cs:.01, 0.0423605362433) {};\n\n\\node[coordinate,pin=5:{$[1,1,1,2,3,4]$}] \n\t\tat (axis cs:.25, 0.0942663435378) {};\n\n\\node[coordinate,pin=192:{$[1,1,2,4,8,16]$}] \n\t\tat (axis cs:.5, 0.03125) {};\n\n\n\n\\end{axis} \n\n\\end{tikzpicture}\n\\caption{\nOptimal value of \n(\\ref{eq:main2}) for integral $\\alpha_i$s compared to the uniform scheme and the binary scheme,\nfor $K=5$ and $p \\in [0,0.5]$.\nAt $p = 0.5,$ the optimal scheme is $\\alpha = [1,1,2,4,8,16].$ At $ p = 0.25,$ the optimal scheme is $\\alpha = [1,1,1,2,3,4].$ At $p = 0.01,$ the optimal scheme is $\\alpha = [1,1,1,1,1,1].$\n}\n\\label{fig:optimal_scheme_K5_minor_allele}\n\\end{figure}\n\n\n\n\n\\section{Exhaustive Search} \n\n\\section{Problem Setting and Preliminaries}\n\nOur goal is to characterize the mixing proportions $\\alpha_0,...,\\alpha_K$ that minimize the mutual information in (\\ref{eq:main}).\nNotice that we \ndo not need to constrain the mixing proportions to add up to $1$, since scaling the observation $Y$\ndoes not change the mutual information.\nFurthermore, restricting $\\alpha_0,...,\\alpha_K$ to be positive rational numbers does not change the optimal value (as they can be chosen arbitrarily close to any irrational number).\nFinally, we notice that a rational-valued solution $(\\alpha_0,...,\\alpha_K)$ can be converted onto an integer-valued one with the same value of \\ref{eq:main} by scaling by the least common denominator.\nAs a result, we can restrict ourselves to solving the discrete optimization problem\n\\alpha{\n\\min_{\\alpha \\in \\mathbb{N}^{K+1}} I(X;\\alpha_0 X+Z_\\alpha),\n\\label{eq:main2}\n}\n\\kmcomment{I changed this to $\\alpha \\in \\mathbb{N}^{K+1}$ throughout}\nwhere $\\alpha = (\\alpha_0,...,\\alpha_K)$, $Z_\\alpha = \\sum_{i=1}^K \\alpha_i Z_i$, and $X,Z_1,...,Z_K$ are independent ${\\rm Ber}(p)$ random variables.\n\n\n\nThe optimization problem in (\\ref{eq:main2}) \nis surprisingly complex. \nThe symmetry between the variables $\\alpha_0,...,\\alpha_K$ may suggest that $\\alpha_i = 1$ for $i=0,...,K$ \\kmcomment{I changed this index to start from 0 throughout this section} would be an optimal solution.\nHowever, a brute-force solution to (\\ref{eq:main2}) for small values of $K$ shows that optimal solutions $(\\alpha_0,...,\\alpha_K)$ vary widely for different values of $p$,\nas illustrated in Figure \\ref{fig:optimal_scheme_K5_minor_allele}. \nObserve that the curve appears to be only piecewise smooth. At $p = 0.5,$ the optimal solution is given by $[1,1,2,4,8,16],$ at $p=0.25,$ the optimal solution is given by $[1,1,1,2,3,4],$ and at $p = 0.01,$ the optimal solution is given by $[1,1,1,1,1,1]$. \n\n\n\nAs it turns out, the optimal solution to (\\ref{eq:main2}) can be exactly characterized in the two extremes cases of $p$.\nMore precisely, if we define the \\emph{uniform} scheme to be \n$\\alpha_i = 1$ for $i=0,...,K$,\nand we define the \\emph{binary} scheme to be $\\alpha_0 = 1$ and $\\alpha_i = 2^{i-1}$ for $i=1,...,K$,\nwe have the following result.\n\\begin{theorem}\\label{thm:extremes}\nFix some $K \\in \\mathbb{N}$.\nThen there exists some $p^* > 0$ such that the uniform scheme is optimal for $p < p^*$.\nMoreover, the binary scheme is optimal for $p=0.5$.\n\\end{theorem}\nWe prove these results in the longer version of this paper \\cite{long}.\nAside from the cases $p = 0.5$ and $p \\approx 0$, there does not appear to be a simple expression for the optimal solution $\\alpha$. \n\nThroughout the paper we use $\\mathbb{N}$ to denote the set of natural numbers excluding $0,$ $\\mathbb{N}_0$ to denote the set of natural numbers including $0,$ and $[N]$ to denote the set of natural numbers in $\\{1, 2, ..., N\\}$ for an integer $N.$\n\\kmcomment{I changed all the $\\alpha$ vectors to include $\\alpha_0$}\n\\iscomment{we need to be a bit careful because the optimal vectors in Figure 2 do not include $\\alpha_0$}\n\\iscomment{\nTo discuss if we have space: convolution of discrete measures, exponential complexity of brute-force search\n}\n\n\n\\longversion{\n\nNotice that Equation \\ref{eq:main} behaves very differently from the case where $Z_i$ is Gaussian. In the Gaussian case, each $\\alpha_i$ would be chosen as large as possible for all $i \\in [K].$ However in our problem, if $\\alpha_i > 1$ for all $i \\in [K],$ then \n\\begin{align} \n& I(X \\; ; \\; X + \\sum_{i=1}^{K} \\alpha_i Z_i) \n\\nonumber \\\\ & = H(X) - H(X | X + \\sum_{i=1}^{K} \\alpha_i Z_i) = H(X) \n\\end{align}\ni.e. the scheme does not hide $X$ at all. Therefore, if $\\alpha_0 = 1,$ the solution to Equation \\ref{eq:main} must have $\\alpha_i = 1$ for at least one $i \\in [K].$ \n\n\\rdcomment{i might advocate for a little more discussion here: perhaps talk about how convolution of measures for discrete distributions is fundamentally a different problem than convolutions of continuous measures for this reason. i'll defer to IS's opinion on this tho.}\n\n}\n\n\\begin{figure}[t]\n\\centering\n\n\\tikzstyle{every pin}=[fill=white,\n\tdraw=black,\n\tfont=\\footnotesize]\n\t\n\\begin{tikzpicture}[scale=0.8]\n\\begin{axis} \n[width=0.46\\textwidth,\nheight=0.36\\textwidth,\nylabel = mutual information, \nxlabel = minor allele frequency ($p$),\nymax = .2,\nyticklabel style={\n \/pgf\/number format\/precision=3,\n \/pgf\/number format\/fixed},\n]\n\n\\addplot [\nline width=1pt,\ncolor=blue!80!black\n] table[x index=0,y index=1] {data\/plt1_optimal.dat};\n\\addlegendentry{optimal}\n\n\\node[coordinate,pin=right:{$[1,1,1,1,1,1]$}] \n\t\tat (axis cs:.01, 0.0423605362433) {};\n\n\\node[coordinate,pin=right:{$[1,1,1,2,3,4]$}] \n\t\tat (axis cs:.25, 0.0942663435378) {};\n\n\\node[coordinate,pin=left:{$[1,1,2,4,8,16]$}] \n\t\tat (axis cs:.5, 0.03125) {};\n\n\n\n\n\\addplot [\nline width=1pt,\ncolor=red!80!black\n] table[x index=0,y index=1] {data\/plt1_uniform.dat};\n\\addlegendentry{uniform}\n\n\n\\end{axis} \n\n\\end{tikzpicture}\n\\caption{\nOptimal value of \n(\\ref{eq:main2}) \nfor $K=5$ and $p \\in [0,0.5]$.\nAt $p = 0.5,$ the optimal scheme is $\\alpha = [1,1,2,4,8,16].$ At $ p = 0.25,$ the optimal scheme is $\\alpha = [1,1,1,2,3,4].$ At $p = 0.01,$ the optimal scheme is $\\alpha = [1,1,1,1,1,1].$\n\\iscomment{modify $\\alpha$s to include $\\alpha_0$? mention uniform scheme in caption}\n\\iscomment{Also, it looks like uniform goes below optimal for small $p$?}\n\\kmcomment{I was missing the p=.01 data point in the uniform curve so it looked wrong. I fixed it now.}\n}\n\\label{fig:optimal_scheme_K5_minor_allele}\n\\end{figure}\n\n\n\n\\section{Connection to Prior Work}\n\\label{sec:related}\n\nOur problem formulation (\\ref{eq:main}) is motivated by the problem studied in \\cite{Maddah-Ali}. In \\cite{Maddah-Ali}, the authors consider the same high-level problem of providing privacy to genotype information through the mixture of distinct samples prior to sequencing.\nBut there are several differences in the focus of the analysis in \\cite{Maddah-Ali}.\nIn addition to considering the privacy of \nAlice's genotype information, they also consider the probability of Alice being able to correctly recover her genotype.\nMoreover, they assume the presence of sequencer noise. \nThis means that each reading the sequencer makes is incorrect with some probability. \nThe authors of \\cite{Maddah-Ali} propose a scheme that uses $U \\in \\mathbb{N}$ non-communicating sequencers to sequence $U$ unknown DNA samples with privacy and reconstruction guarantees. Similar to our problem formulation, they also use $K$ noise individuals to generate privacy. However, they do not study the problem of optimizing the proportions of each DNA sample sent to the sequencers to maximize privacy. In contrast, our paper uses only one sequencing laboratory to sequence the DNA of one unknown DNA sample, and optimizes the privacy without considering sequencer noise or the reconstruction condition.\n\nThe proposed solution in \\cite{Maddah-Ali} involves the sequencing of the unknown DNA of $U$ individuals simultaneously using $U$ non-communicating sequencing laboratories and $K$ noise individuals. \nThe mixture of the DNA samples of all $K$ noise individuals and all unknown DNA samples except the $i$th one is sent to the $i$th sequencing laboratory. For each mixture sent to a sequencing laboratory, the included DNA samples are mixed in equal proportion. \nObserve that each sequencing laboratory observes a mixture that includes $U-1$ unknown DNA samples. Therefore, the analysis in our paper is directly applicable to the strategy from \\cite{Maddah-Ali} when two unknown samples and two non-communicating sequencing laboratories are used because this is the only case where only one unknown DNA sample is included in the mixture sent to each sequencer.\nIn this case, the scheme used in \\cite{Maddah-Ali} is the uniform scheme in the language of our paper, which we showed generates optimal privacy for $p$ close to $0$.\n\n\nIn \\cite{Maddah-Ali2}, \nthe unknown DNA samples of $U$ individuals \nare mixed with the samples of $K=U$ noise individuals and then sequenced using one sequencing laboratory. \nBoth the $i$th unknown sample and the $i$th noise sample are mixed in with amount $\\alpha_i = 2^i \\alpha_0$ for $i = 0,1, ..., K-1$. \nWhile our analysis only applies to the case $U = K = 1$ of their problem setting, it is interesting that they choose the proportion of each noise individual according to the binary scheme, which we showed is optimal in our problem formulation for $p = 0.5$.\n\n","meta":{"redpajama_set_name":"RedPajamaArXiv"}} +{"text":"\\section{Introduction}\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\\section{Introduction}\n\nThree-dimensional human-computer interaction (HCI) is a rich field which is under active development in the robotics and VR\/AR communities.\n\nPointing during HCI often happens via controllers, such as joysticks or different wearable or handheld devices \\cite{Billinghurst_1999, Howard}. For instance, authors in \\cite{Speicher_2018} used HTC Vive controllers to select keys on a virtual keyboard. In \\cite{yuan2019human}, a human-assisted quadcopter navigation system where the user guides the robot through eye-tracker glasses was proposed. Wearable devices are widely used to control remote \\cite{Tsykunov_TOH} or virtual vehicles \\cite{Labazanova_2019}. Although different devices proved rich functionality, the user has to hold or wear them during the whole interaction.\n\nOne alternative approach is gesture-based interaction which is widely used in VR\/AR. \\cite{Valentini_2016} showed that Leap Motion is capable of demonstrating suitable accuracy for hand tracking in some operational zones. The drawback is that the finger pointing allows the user to define direction, but not magnitude. Nielsen et al. \\cite{Nielsen_2004} stated that performing certain gestures or long lasting gesturing could be stressful. In addition, these approaches in most cases require a camera setup and provide no haptic or tactile feedback. \n\nMixed Reality (MR) plays an important role for creating new interaction methods, combining VR and robotics. \nHoenig et al. \\cite{HoenigMixedReality2015} used MR to simulate bigger quadrotors with small ones, which followed a virtual human. In this way authors tested human-robot interaction algorithms safely and preserved real flight dynamics. Authors in \\cite{Freund_1999} used MR for industrial robot teleoperation. A human wore a tracked glove and motions from VR were translated to real manipulators.\n\nIn contrast with the discussed works, we introduce a novel MR concept of using a single drone for three-dimensional virtual pointing and interaction. The drone acts as a slingshot and a projectile at the same time. SlingDrone is a selection device which is first used for pointing to an object, then it performs the defined maneuvers to interact with the environment.\n\nThe drone has a hand grip hanging from the bottom. By default, the drone hovers at the same position and the human pulls it in some direction using a hand grip, see Fig.\\ref{teaser_fig}(a). The physical displacement of the drone (which is three-dimensional) is connected to the virtual pointing and visualized in VR (an example of pointing is shown in Fig.\\ref{teaser_fig}(b)). A force feedback system with a leash (Fig.\\ref{teaser_fig}(a)) gives the user a better understanding of the interaction. After the pointing phase, the drone is able to perform the defined maneuvers to interact with the surroundings.\n\nIn contrast to the GridDrones project \\cite{Braley_2018}, where the operator grabs one drone to control the motion of the others agent, the interaction with the drone is smooth by its nature, (i) due to the elastic hardware element (leash) in the interface and (ii) due to the fact that the leash is connected to the center of the drone providing zero angular momentum.\n\nThe main novelty of the SlingDrone is that we propose to design and perform maneuvers using one drone, without any additional handheld or wearable control device. For the proof of concept, we tracked the user hand (to be shown in VR to facilitate holder grab) with hand mounted infrared reflective markers, but they can be replaced with Leap Motion attached to the HMD. On the other side, the user also could rely purely on haptic sensation from drone airflow for the hand grab. Since SlingDrone is a flying robot, one of its main advantages is that it can fly to a location that is convenient for a human to perform pointing.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\\section{SlingDrone Technology}\n\n\nA holder allows a human to grab and pull the SlingDrone in any direction in the $X-Y$ plane and downwards (except upwards, which limits the operational zone). \nThe drone tries to maintain its desired spatial position \\textbf{p\\textsubscript{des}} while the operator is pulling the hand grip, which causes a slight change in position.\nThe state of the SlingDrone, including the current position \\textbf{p}, is estimated with onboard sensors and a motion capture system.\nThe real displacement vector $\\textbf{D}$ is defined as $\\textbf{p}- \\textbf{p\\textsubscript{des}}$ and connected to the virtual pointing.\n\nThe relation between the real displacement vector and the virtual pointing could have multiple implementations, which incorporates different three-dimensional interaction techniques. For instance, SlingDrone allows to select an object that the user's hand cannot reach. We propose to generate virtual trajectory that starts from the drone position and is defined using the displacement vector $\\textbf{D}$. Vector $\\textbf{D}$ is tree-dimensional, therefore it is possible to map it to a point in tree-dimensional space.\n\nForce with a vector $\\textbf{F}$ distributes through the leash from the drone to the human hand. Based on the slingshot analogy, we assume that force feedback potentially helps to improve interaction and make more accurate pointing.\n\nAfter the pointing phase, the drone can be transformed to the projectile mode and is able to fly and perform different cases of 3D interaction with the real or virtual environment.\n\nWhen the drone experiences extreme values in the state parameters (e.g. pitch, roll) during the pointing, the motors shut down. This functionality is introduced as an emergency stop to ensure safety of the operator.\n\n\\subsection{Pointing During the Slingshot Mode}\n\nA pointing or the object selection could be defined in multiple ways.\nOne of the most straightforward solutions is a scaling of displacement vector $\\textbf{D}$ with the coefficient $k$ (usually $k>>1$). When the new vector $\\textbf{R} = -k\\textbf{D}$ defines the point in the mid-air (negative sign also can be omitted, but here it helps to replicate slingshot - we pull in one direction and the projectile fly in the opposite one).\nBut taking into account that the SlingDrone technology somehow replicates the slingshot operation, we propose to design a ballistic trajectory instead of the straight line. The ballistic trajectory better replicates the projectile flight after the slingshot execution. An intersection of the ballistic trajectory with some surface, line or object could help to define a point in three-dimensional space.\n\nThe proposed ballistic trajectory is generated by an air-drag model defined by a system of second order, nonlinear differential equations:\n\n\n\\begin{equation} \\label{xydir} \n \\Ddot{x} = \\frac{-\\rho C_d A_x\\dot{x}^2}{2m},\n \\Ddot{y} = \\frac{-\\rho C_d A_y\\dot{y}^2}{2m}\n\\end{equation}\n\n\\begin{equation} \\label{zdir} \n \\Ddot{z} = \\frac{-0.5\\rho C_d A_z \\dot{z}^2*sin(\\dot{z}) - mg}{m}\n\\end{equation}\nwhere $\\rho$ is the air density, $C_d$ is the coefficient of drag, $A$ is the frontal area of the drone in the respective direction, and $m$ is the mass. An air drag model was used to generate an intuitive trajectory of the free ballistic flight.\n\nEquations (\\ref{xydir}-\\ref{zdir}) are solved using a numerical ODE solver with six initial conditions: the three dimensional vector \\textbf{p\\textsubscript{des}} as the initial position and the three dimensional displacement vector $\\textbf{D}$ as a proxy for initial velocity. $\\textbf{D}$ can be scaled by a coefficient $k$ to achieve a desired distance scale for the trajectory. A value of $k=95$ was used. \nThe three columns representing position are extracted to form a position vector which defines the trajectory. To command the drone to execute the trajectory, polynomial coefficients were generated as in \\cite{richter2016polynomial}. \n\nThe coefficients of equations \\eqref{xydir} and \\eqref{zdir} have been selected to model a smooth sphere on a ballistic trajectory. The air density $\\rho$ is 1.23 kg\/m$^3$. The coefficient of drag ($C_d$) is held constant at 0.4. The area is $A_x=A_y=A_z=0.01$m$^2$ and the mass of the virtual body $m$ is 10 kg. Like the trajectory itself, these values can be modified to suit specific use cases.\n\n\n\\subsection{Transition between the Slingshot and the Projectile Modes}\nWhile hovering without interaction with a human, the drone stabilizes itself and experiences small displacements. All displacements under the certain threshold ($\\delta_d=20mm$) are not considered as inputs.\nDuring the pointing, the velocity of the drone displacement is small due to the fact that small displacements cause big change in trajectory (which is defined with the $k$ coefficient, described above). In addition, aiming is almost always performed in a slow manner by the user. Therefore, the drone maintains small velocity $v$ which is mostly under the threshold $\\delta_v$. Based on that fact, we assume that while the following statements are true\n\\begin{equation} \\label{slingshot_mode_condition}\n \\textbf{D}>\\delta_d, v<\\delta_v\n\\end{equation}\nthe drone is in the slingshot mode. We update the trajectory every loop while \\eqref{slingshot_mode_condition} is true. When the user releases the holder, the drone starts to accelerate towards the default hover position and the velocity $v$ becomes bigger than $\\delta_v$ (the trajectory is not updated any more). When the drone approaches the hover position $\\textbf{D}<\\delta_d$, it becomes a projectile and starts to follow the last valid trajectory towards the defined point in space to interact.\n\n\n\\subsection{Human-SlingDrone Interaction Strategy}\nFirst, the SlingDrone approaches the human and starts to hover, entering the slingshot mode (Fig.\\ref{teaser_fig}(a)). The user estimates the position of the drone utilizing visual feedback from VR coupled with a haptic sensation form the airflow below the quadrotor, which helps to catch the hand grab. After the human grabs the holder, he or she starts to pull it. When the magnitude of the current displacement vector |$\\textbf{D}$| exceeds the $\\delta_d$ threshold, the trajectory is being visualized in VR. Now, the human is able to point the SlingDrone by pulling the holder.\nWhen the user is satisfied with the pointing or the selection, he can release the holder. The drone enters the projectile mode (Fig.\\ref{teaser_fig}(c)) and performs the maneuvers and interaction with the surroundings.\n\n\n\\subsection{Implementation}\n\nThe SlingDrone itself, together with the user's hand, and the object of interest are real objects and tracked by a motion capture system with visualization in virtual scene.\n\n\\subsubsection{Aerial Platform}\nWe used a Crazyflie 2.0 quadrotor to perform the verification of the flight tests.\nTo get the high-quality tracking of the quadrotor during the experiments, we used Vicon motion capture system with 12 cameras (Vantage V5) covering a $5m \\times 5 m \\times 5 m$ space. We used the Robot Operating System (ROS) Kinetic framework to run the developed software and ROS stack \\cite{Preiss_2017, HoenigMixedReality2015} for Crazyflie 2.0. The position and attitude update rate was 100 Hz for all drones. Before the verification of the proposed approach, we ensured that we were able to perform a stable and smooth flight, following the desired trajectory. In order to do so, all PID coefficients for position controller were set to default values for Crazyflie 2.0, according to \\cite{Preiss_2017} (for x,y-axis $k_p$=0.4, $k_d$=0.2, $k_i$=0.05; for z-axis $k_p$=1.25, $k_d$=0.4, $k_i$=0.05). Finally, to be able to reach stable displacement of the SlingDrone during interaction, we set all positional integral terms of the PID controller to zero.\n\nAn elastic wire (100 mm in length) is connected to the bottom of the SlingDrone, as shown in Fig.\\ref{teaser_fig}(a), with a holder at the end. \n\n\\subsubsection{VR Development}\nA Virtual Reality application in Unity3D was created to provide the user with a pleasant immersion. Quadcopter, human hand, and remote object models were simulated in VR. The size of each virtual object changes like in real life depending on the distance between the operator and the objects. Vicon Motion Capture cameras were used to transfer positioning and rotation tracking data of each object to the Unity 3D engine. The operator wears HTC Vive VR headset to experience Virtual Reality application.\n \nThe virtual trajectory is updated 30 times per second and the human can get visual feedback in VR about the potential flight path (to avoid obstacles) and about the destination point.\n\n\\begin{figure*}[t]\n\\centering\n\\includegraphics[width=\\textwidth]{fig2.jpg}\\label{true}\n\\caption{Workflow of the user study experiment. (a) No interaction between the human and the drone, (b) User points towards the object of interest, (c) Pink ball represents the projectile.}\n\\label{fig2}\n\\end{figure*}\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\\subsection{Application: Grabbing a Remote Object}\nThe goal of the proposed application is to grab a remote object using a magnetic gripper placed on the Crazyflie 2.0 drone. As a gripper for the user, we used a magnet which is also used to attract the remote object of interest.\n\nDuring the pointing, when the end point of the generated trajectory hits the remote object that is intended to be picked, the user releases the holder. The drone enters the projectile mode (Fig.\\ref{teaser_fig}(c)) and starts to follow the defined trajectory representing the slowed flight of passive object. When the quadrotor approaches the end of trajectory, to increase the chances of grabbing, it performs a search of object of interest by moving in a square with 0.15 meter side. After that, the drone flies back to the user to perform a delivery.\nAfter the delivery the user is able to experience a tangible sensation of the object of interest.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\\section{User Study}\nSeven right-handed users (22 to 28 years old, six male and one female) took part in the experiments. \nThe protocol of the experiment was approved by a Skolkovo Institute of Science and Technology review board and all participants gave informed consent.\n\n\\subsection{Experimental Methods}\nAs described above, the user's hand, the drone, and the object of interest are visualized in VR. The drone hovered near the human and maintained 1.5 meters in height as shown in Fig. \\ref{fig2}(a). For the experiment we decided to focus on the pointing functionality of the SlingDrone purely in VR. Therefore, we kept the slingshot mode of the drone for the whole experiment. After the user performed aiming (with visualized trajectory as shown in Fig. \\ref{fig2}(b)) and released the hand grip, the drone avoided the transition to the projectile mode. Instead, after the release, the drone just moved to the default hover position. Then, the virtual ball appeared and became a projectile; then, it flied along the desired trajectory in virtual scene (Fig. \\ref{fig2}(c)).\nAll subjects were asked to evaluate the pointing capabilities of SlingDrone technology for 5 minutes. Pointing is just a part of the idea behind SlingDrone (another part is the physical flight for interaction), which means that the experimental results cannot be extrapolated to the whole technology.\n\n\n\\subsection{Experimental Results and Discussion}\n\nAll participants positively responded to the device convenience.\nAfter the experiment, we also asked the subjects to answer a questionnaire of 8 questions using bipolar Likert-type seven-point scales. The results are presented in Table \\ref{table}.\n\nAll participants reported that it is was easy to learn of how to use the SlingDrone (in fact it took 10-30 second to understand the workflow), which is supported by question 1 in Table \\ref{table}. \nAccording to the users, it was slightly hard to grab the holder, as far as it is not visualised in VR. For the future work, the holder will be visualised in VR.\nFor the trajectory representation, we used small white balls thrown from the position of the drone. Therefore, when the users change the direction, the trajectory did not change instantly - it takes some time for the balls to perform a flight. That fact was not positively supported by the users. For the future, we plan to replace the balls with a trajectory visualised with dashed line that can be changed instantly.\nSurprisingly, in spite of the small size of the used quadrotor, most of the users reported that they felt some force feedback from the drone, which provides additional information about the magnitude of the input.\n\nIn question 5 users were satisfied with the actual destination of the virtual projectile. Along with that, pulling a drone looks similar to the slingshot operation, which could have an acceptable accuracy in certain cases. Therefore, we propose a hypothesis that the SlingDrone potentially could have a high accuracy. During the experiment we did not measure the accuracy of the pointing, but it will be done in the future work.\n\n\n\\begin{table}[]\n\\caption{Evaluation of the participant's experience. Volunteers evaluated these statements, presented in random order, using a 7-point Likert scale (1 = completely disagree, 7 = completely agree). Means and standard deviations are presented.}\n\\label{table}\n\\begin{tabular}{|l|l|c|c|}\n\\hline\n & \\multicolumn{1}{c|}{\\textbf{Questions}} & \\multicolumn{1}{l|}{\\textbf{Mean}} & \\multicolumn{1}{l|}{\\textbf{SD}} \\\\ \\hline\n1 & It was easy to learn how to use SlingDrone. & 6.4 & 0.49 \\\\ \\hline\n2 & \\begin{tabular}[c]{@{}l@{}}It was easy to grab and keep the holder of \\\\ the drone.\\end{tabular} & 5.4 & 1.02 \\\\ \\hline\n3 & \\begin{tabular}[c]{@{}l@{}}The visualized trajectory in VR clearly \\\\ represented the flight path.\\end{tabular} & 5.6 & 0.8 \\\\ \\hline\n4 & \\begin{tabular}[c]{@{}l@{}}I felt the force response from the drone \\\\ when pulling the rope.\\end{tabular} & 5.8 & 1.47 \\\\ \\hline\n5 & \\begin{tabular}[c]{@{}l@{}}I was satisfied with the actual destination \\\\ of the virtual projectile.\\end{tabular} & 5.2 & 0.74 \\\\ \\hline\n6 & I was tired at the end of the experiment. & 1.4 & 0.48 \\\\ \\hline\n7 & I felt comfortable when I used SlingDrone. & 6.0 & 0.89 \\\\ \\hline\n8 & I felt safe when I used SlingDrone. & 7.0 & 0 \\\\ \\hline\n\\end{tabular}\n\\end{table}\n\n\n\n\n\n\n\n\n\n\n\n\n\\section{Conclusion and Future Work}\\label{conclusion}\nIn this paper, we developed SlingDrone - a novel interaction technology that combines the advantages of real and virtual environments. One of the core features is that SlingDrone provides a powerful and easy to use 3D pointing technology.\nTherefore, SlingDrone can also be used for pointing of other robots. The intersection between the virtual trajectory and a floor can be used as a desired position for a ground based robot such as car.\n\nAs a perspective development, we are planning to introduce the SlingDrone placement mode, when the user moves the SlingDrone in space to the most suitable location by pulling the rope. This can be done as a preliminary setup before the actual pointing process.\n\nTo provide deeper immersion into the pointing process, instead of the magnetic hand grab, the vibromotor can be used. The vibromotor is connected to the drone via coiled elastic wire and it is powered\/controlled from onboard electronics. Different tactile patterns can deliver additional information about the potential interaction with the surroundings. But for the selection applications a press button instead of the vibromotor can be more suitable.\n\n\nFor the future work, we plan to conduct a user study to evaluate the participants' experience of using SlingDrone and to estimate the accuracy and precision of pointing and flight. \nThe SlingDrone platform can also be implemented in an Augmented Reality (AR). This would allow for more direct information about the trajectory to the goal and provide additional safety. \n\n\n\n\n\n\n\n\\bibliographystyle{ACM-Reference-Format}\n","meta":{"redpajama_set_name":"RedPajamaArXiv"}} +{"text":"\\section{Introduction}\n\nVelocity addition was defined by Einstein in his famous paper of 1905 which founded the special theory of relativity. In fact, the whole theory is essentially based on Einstein velocity addition law, see \\cite{ein}. The algebraic structure corresponding to this operation is a particular example of so-called gyrogroups the general theory of which has been developed by Ungar \\cite{ung}. \n\n\\par\nThe Einstein gyrogroup of dimension three is the pair $(\\mathbf{B}, \\oplus),$ where $\\mathbf{B}=\\{\\mathbf{u}\\in \\mathbb{R}^3 : \\norm{\\mathbf{u}}< 1 \\}$ and $\\oplus$ is the binary operation on $\\mathbf{B}$ given by\n\\begin{equation} \\label{relsum}\n\\oplus: \\mathbf{B} \\times \\mathbf{B} \\rightarrow \\mathbf{B}; \\, (\\mathbf{u},\\mathbf{v})\\mapsto \\mathbf{u} \\oplus \\mathbf{v}:= \\frac{1}{1+\\inner{\\mathbf{u}}{\\mathbf{v}}}\\ler{\\mathbf{u}+\\frac{1}{\\gamma_\\mathbf{u}}\\mathbf{v}+\\frac{\\gamma_\\mathbf{u}}{1+\\gamma_\\mathbf{u}}\\inner{\\mathbf{u}}{\\mathbf{v}} \\mathbf{u}},\n\\end{equation}\nwhere $\\gamma_\\mathbf{u}=\\ler{1-\\norm{\\mathbf{u}}^2}^{-\\frac{1}{2}}$ is the so-called Lorentz factor.\nThe operation $\\oplus$ is called Einstein velocity addition or relativistic sum (cf. \\cite{abe,kim}).\nHere and throughout this paper, $\\inner{\\cdot}{\\cdot}$ stands for the usual Euclidean inner product and $\\norm{\\cdot}$ denotes the induced norm.\n\\par\nThe study of automorphisms (more generally, endomorphisms) of algebraic structures is of special importance in most areas of both mathematics and mathematical physics.\nThe aim of this note is to determine the (continuous) endomorphisms (in particular, automorphisms) of the fundamental structure $(\\mathbf{B}, \\oplus)$ of special relativity theory. \nImportant information on isomorphisms, automorphisms (symmetries) of quantum structures can be found in \\cite{CasVitLahLev04} and other sorts of so-called preservers on similar structures are discussed in \\cite{MB}, Chapter 2.\n\nThe main theorem of this paper is obtained as an application of our recent result on so-called Jordan triple endomorphisms of $2 \\times 2$ positive definite matrices \\cite[Theorem 1]{lmdv}.\nThe other ingredient of our argument is the result \\cite[Theorem 3.4]{kim} of Kim. The discussion below may look rather simple but the mathematical facts and results that we combine are highly nontrivial. \n\nOur main result reads as follows.\n\n\\begin{thm} \\label{tmain}\nLet $\\beta: \\mathbf{B} \\rightarrow \\mathbf{B}$ be a continuous map. We have $\\beta$ is an algebraic endomorphism with respect to the operation $\\oplus$, i.e., $\\beta$ satisfies\n$$\n\\beta (\\mathbf{u} \\oplus \\mathbf{v})=\\beta (\\mathbf{u}) \\oplus \\beta(\\mathbf{v}), \\quad \\mathbf{u},\\mathbf{v}\\in \\mathbf{B}\n$$\nif and only if\n\\begin{itemize}\n\\item[(i)]\neither there is an orthogonal matrix $O \\in {\\mathbf{M}}_3(\\mathbb{R})$ such that $$\\beta(\\mathbf{v})=O\\mathbf{v}, \\quad \\mathbf{v}\\in \\mathbf{B};$$\n\\item[(ii)]\nor we have\n$$\n\\beta(\\mathbf{v})=0, \\quad \\mathbf{v}\\in \\mathbf{B}.\n$$\n\\end{itemize}\n\\end{thm}\n\nHere continuity refers to the usual topology on $\\mathbf{B}$ inherited from the Euclidean space $\\mathbb{R}^3$. For a related comment see the remarks at the end of the paper.\n\nBy the above result we have the interesting conclusion that the group of all (continuous) automorphisms of the Einstein gyrogroup $(\\mathbf{B},\\oplus)$ coincides with the orthogonal group of $\\mathbb{R}^3$.\n\n\n\\section{Open Bloch ball, qubit density matrices, and $2 \\times 2$ positive definite matrices of determinant one}\n\nTo prove our main result we need \nan important observation made by Kim what we present below.\n\nWe denote by $\\mathbb{P}_2$ the set of all $2 \\times 2$ positive definite complex matrices.\nLet $\\mathbb{D}$ stand for the set of all $2\\times 2$ regular density matrices, i.e., the collection of all elements of $\\mathbb{P}_2$ with trace 1, $$\\mathbb{D}=\\{A \\in \\mathbb{P}_2\\, | \\, \\mathrm{Tr} A=1\\}.$$ From the quantum theoretical point of view, $\\mathbb{D}$ is the set of all regular density matrices of the $2$-level quantum system.\nOne can define a binary operation $\\odot$ on $\\mathbb{D}$ as\n\\begin{equation*}\n\\odot: \\mathbb{D} \\times \\mathbb{D} \\rightarrow \\mathbb{D}; \\, (A,B) \\mapsto A \\odot B:= \\frac{1}{\\mathrm{Tr} AB} A^{\\frac{1}{2}} B A^{\\frac{1}{2}}.\n\\end{equation*}\nFor certain reasons, we call $\\odot$ the normalized sequential product.\n\\par\nThe well-known Bloch parametrization of regular density matrices is the following map:\n\\begin{equation*}\n\\rho: \\mathbb{R}^3 \\supset \\mathbf{B} \\rightarrow {\\mathbf{M}}_2(\\mathbb{C}); \\, \\left[\\begin{array}{c}v_1\\\\v_2\\\\v_3\\end{array}\\right]=\\mathbf{v} \\mapsto \\rho(\\mathbf{v}):=\\frac{1}{2} \\left[\\begin{array}{cc}1+v_3 & v_1- i v_2 \\\\ v_1 + i v_2 & 1-v_3 \\end{array}\\right].\n\\end{equation*}\nThe transformation $\\rho$ is clearly a bijection between $\\mathbf{B}$ and $\\mathbb{D},$ and in fact, by \\cite[Theorem 3.4]{kim}, much more is true.\n\\begin{thm}[S. Kim] \\label{T:kim}\nThe Bloch parametrization $\\rho: (\\mathbf{B}, \\oplus) \\rightarrow (\\mathbb{D}, \\odot); \\, \\mathbf{v} \\mapsto \\rho(\\mathbf{v})$ is an isomorphism.\n\\end{thm}\n\nThroughout this note the word 'isomorphism' refers to a bijective map between algebraic structures which respects (preserves) the relevant algebraic operation(s).\n\\par\nLet us now consider a structure which is similar to the space of $2 \\times 2$ regular density matrices equipped with the normalized sequential product. Namely, Let $\\mathbb{P}_2^1$ be the set of all $2 \\times 2$ positive definite matrices with determinant $1.$ The sequential product $\\boxdot$ on $\\mathbb{P}_2^1$ is defined as\n\\begin{equation*}\n\\boxdot: \\mathbb{P}_2^1 \\times \\mathbb{P}_2^1 \\rightarrow \\mathbb{P}_2^1; \\, (A,B) \\mapsto A \\boxdot B:= A^{\\frac{1}{2}} B A^{\\frac{1}{2}}.\n\\end{equation*}\nWe show that $(\\mathbb{D}, \\odot)$ and $(\\mathbb{P}_2^1, \\boxdot)$ are isomorphic structures.\n\\begin{prop}\\label{cl1}\nThe map $\\tau: (\\mathbb{D}, \\odot) \\rightarrow (\\mathbb{P}_2^1, \\boxdot); \\, A \\mapsto \\tau(A):=\\frac{1}{\\sqrt{\\mathrm{Det} A}} A$ is an isomorphism.\n\\end{prop}\n\n\\begin{proof}\nTo prove the injectivity assume that $\\tau(A)=\\tau(B)$ for some $A,B \\in \\mathbb{D}.$ That is, $\\frac{1}{\\sqrt{\\mathrm{Det} A}} A=\\frac{1}{\\sqrt{\\mathrm{Det} B}} B,$ which means that $A$ is a positive scalar multiple of $B.$ By $\\mathrm{Tr} A=\\mathrm{Tr} B=1$ we can deduce that $\\sqrt{\\mathrm{Det} A}=\\sqrt{\\mathrm{Det} B}$ and therefore $A=B.$\n\nFor any $A \\in \\mathbb{P}_2^1$ we have $\\frac{1}{\\mathrm{Tr} A} A \\in \\mathbb{D}.$ By $\\mathrm{Det} A=1$ it follows that\n$$\\tau\\ler{\\frac{1}{\\mathrm{Tr} A} A}=\\frac{1}{\\sqrt{\\mathrm{Det} \\ler{\\frac{1}{\\mathrm{Tr} A} A}}}\\frac{1}{\\mathrm{Tr} A} A=\\frac{1}{\\frac{\\sqrt{\\mathrm{Det} A}}{\\mathrm{Tr} A}}\\frac{1}{\\mathrm{Tr} A} A=A.$$\nThis shows the surjectivity of $\\tau$.\n\nFinally, we need to show that $\\tau$ respects the operations $\\odot,\\boxdot$. \nUsing the properties of the determinant, for any $A,B\\in \\mathbb{D}$ we compute\n$$\n\\tau\\ler{A \\odot B}= \\frac{1}{\\sqrt{\\mathrm{Det} \\ler{\\frac{1}{\\mathrm{Tr} AB} A^{\\frac{1}{2}} B A^{\\frac{1}{2}}}}}\\frac{A^{\\frac{1}{2}} B A^{\\frac{1}{2}}}{\\mathrm{Tr} AB}=\\frac{\\mathrm{Tr} AB}{\\sqrt{\\mathrm{Det} \\ler{A^{\\frac{1}{2}} B A^{\\frac{1}{2}}}}}\\frac{A^{\\frac{1}{2}} B A^{\\frac{1}{2}}}{\\mathrm{Tr} AB}\n$$\n$$\n=\\ler{\\frac{A}{\\sqrt{\\mathrm{Det} A}}}^{\\frac{1}{2}}\\frac{B}{\\sqrt{\\mathrm{Det} B}} \\ler{\\frac{A}{\\sqrt{\\mathrm{Det} A}}}^{\\frac{1}{2}}=\\frac{A}{\\sqrt{\\mathrm{Det} A}} \\boxdot \\frac{B}{\\sqrt{\\mathrm{Det} B}}=\\tau(A)\\boxdot\\tau(B).\n$$\nThis completes the proof.\n\\nobreak\\hfill $\\square$\n\\end{proof}\n\nObserve that the inverse of $\\tau$ is given by $\\tau^{-1}(A)=\\frac{1}{\\mathrm{Tr} A} A$, $A\\in \\mathbb{P}_2$.\n\n\n\\section{Proof of the main result}\n\nTo verify the main result of the paper let us recall the following recent result of ours \\cite[Theorem 1]{lmdv} which, beside Kim's observation, is the second main ingredient of the proof of Theorem~\\ref{tmain}. It may look surprising but its content, i.e., the description of the structure of the continuous so-called Jordan triple endomorphisms of $\\mathbb{P}_2$, was an open problem for quite a while and the solution we have finally found is rather complicated resting on highly nontrivial arguments and facts.\n \nBelow continuity of maps on matrix structures refers to any one of the equivalent linear norm topologies on the full matrix algebra. \n\n\\begin{thm}\\label{jtrip}\nLet $\\phi: \\mathbb{P}_2 \\rightarrow \\mathbb{P}_2$ be a continuous map.\nAssume that it is Jordan triple endomorphism, i.e., $\\phi$ satisfies\n$$\n\\phi(ABA)=\\phi(A)\\phi(B)\\phi(A),\\quad A,B\\in \\mathbb{P}_2.\n$$\nThen $\\phi$ is of one of the following forms:\n\\begin{itemize}\n\\item[(1)]\nthere is a unitary matrix $U\\in {\\mathbf{M}}_2(\\mathbb{C})$ and a real number $c$ such that $$\\phi(A)=(\\mathrm{Det} A)^c UAU^*, \\quad A\\in \\mathbb{P}_2;$$\n\\item[(2)]\nthere is a unitary matrix $V\\in {\\mathbf{M}}_2(\\mathbb{C})$ and a real number $d$ such that $$\\phi(A)=(\\mathrm{Det} A)^d VA^{-1}V^*, \\quad A\\in \\mathbb{P}_2;$$\n\\item[(3)]\nthere is a unitary matrix $W\\in {\\mathbf{M}}_2(\\mathbb{C})$ and real numbers $c_1,c_2$ such that \n$$\n\\phi(A)=W\\mathrm{Diag} [(\\mathrm{Det} A)^{c_1}, (\\mathrm{Det} A)^{c_2}]W^*, \\quad A\\in \\mathbb{P}_2.\n$$\n\\end{itemize}\n\\end{thm}\n\nUsing this theorem the continuous sequential endomorphisms of $\\mathbb{P}_2^1$ can be described as follows.\n\\begin{cor}\\label{p21}\nLet $\\phi: \\mathbb{P}_2^1 \\rightarrow \\mathbb{P}_2^1$ be a continuous endomorphism with respect to the operation $\\boxdot$ meaning that $\\phi$ satisfies \n$$\n\\phi(A\\boxdot B)=\\phi(A)\\boxdot \\phi(B),\\quad A,B\\in \\mathbb{P}_2^1.\n$$\nThen $\\phi$ is of one of the following forms:\n\\begin{itemize}\n\\item[(1)]\nthere is a unitary matrix $U\\in {\\mathbf{M}}_2(\\mathbb{C})$ such that $$\\phi(A)=UAU^*, \\quad A\\in \\mathbb{P}_2^1;$$\n\\item[(2)]\nthere is a unitary matrix $V\\in {\\mathbf{M}}_2(\\mathbb{C})$ such that $$\\phi(A)=VA^{-1}V^*, \\quad A\\in \\mathbb{P}_2^1;$$\n\\item[(3)] we have\n$$\n\\phi(A)=I, \\quad A\\in \\mathbb{P}_2^1.\n$$\n\\end{itemize}\n\\end{cor}\n\n\\begin{proof}\nIf $\\phi: \\mathbb{P}_2^1 \\rightarrow \\mathbb{P}_2^1$ is a sequential endomorphism, then it is a Jordan triple endomorphism, as well. Indeed, $\\phi(A^2)=\\phi(A\\boxdot A)=\\phi(A)\\boxdot \\phi(A)=\\phi(A)^2$ holds for all $A \\in \\mathbb{P}_2^1.$ It follows that $\\phi(ABA)=\\phi(A^2 \\boxdot B)=\\phi(A^2)\\boxdot \\phi(B)=\\ler{\\phi(A)^2}^{\\frac{1}{2}}\\phi(B)\\ler{\\phi(A)^2}^{\\frac{1}{2}}=\\phi(A)\\phi(B)\\phi(A)$ for all $A,B \\in \\mathbb{P}_2^1.$\n\\par\nThe map\n$$\n\\psi: \\mathbb{P}_2 \\rightarrow \\mathbb{P}_2; \\, A\\mapsto\\psi(A):=\\sqrt{\\mathrm{Det} A}\\cdot\\phi\\ler{\\frac{A}{\\sqrt{\\mathrm{Det} A}}}\n$$\nis clearly a continuous Jordan triple endomorphism of $\\mathbb{P}_2$ which extends $\\phi$ (the idea of the definition of $\\psi$ comes from \\cite[proof of Theorem 3]{ML15b}). Now, the statement is an immediate consequence of the previous theorem.\n\\nobreak\\hfill $\\square$ \n\\end{proof}\n\nUsing the isomorphism $\\tau$ defined in Proposition~\\ref{cl1} which is clearly a homeomorphism, too, we can pull back the structural result on the continuous endomorphisms of $(\\mathbb{P}_2^1, \\boxdot)$ to $(\\mathbb{D}, \\odot).$ Namely, the continuous endomorphism of $(\\mathbb{D}, \\odot)$ are exactly the maps of the form\n$$\\tau^{-1} \\circ \\phi \\circ \\tau,$$\nwhere $\\phi$ is a continuous endomorphism of $(\\mathbb{P}_2^1, \\boxdot).$\nThe following corollary can be verified by straightforward computations.\n\\begin{cor} \\label{qubit}\nLet $\\alpha: \\mathbb{D} \\rightarrow \\mathbb{D}$ be a continuous endomorphism with respect to the operation $\\odot$. Then $\\alpha$ is of one of the following forms:\n\\begin{itemize}\n\\item[(1)]\nthere is a unitary matrix $U\\in {\\mathbf{M}}_2(\\mathbb{C})$ such that $$\\alpha(A)=UAU^*, \\quad A\\in \\mathbb{D};$$\n\\item[(2)]\nthere is a unitary matrix $V\\in {\\mathbf{M}}_2(\\mathbb{C})$ such that $$\\alpha(A)=\\frac{VA^{-1}V^*}{\\mathrm{Tr} A^{-1}}, \\quad A\\in \\mathbb{D};$$\n\\item[(3)] we have\n$$\n\\alpha(A)=I\/2, \\quad A\\in \\mathbb{D}.\n$$\n\\end{itemize}\n\\end{cor}\n\nPutting all information we have together, the proof of the main result is now easy.\n\n\n\\begin{proofs}\nWe have learned from the result Theorem~\\ref{T:kim} due to Kim that the Bloch parametrization $\\rho$ is an isomorphism between $(\\mathbf{B}, \\oplus)$ and $(\\mathbb{D}, \\odot)$. Clearly, $\\rho$ is a homeomorphism, too. Therefore, the continuous endomorphisms of $(\\mathbf{B}, \\oplus)$ are exactly the maps of the form $\\beta=\\rho^{-1} \\circ \\alpha \\circ \\rho,$ where $\\alpha$ is a continuous endomorphism of $(\\mathbb{D},\\odot).$\n\\par\nBy Corollary~\\ref{qubit} there are three possibilities.\nAssume first that we have a unitary $U\\in {\\mathbf{M}}_2(\\mathbb{C})$ such that \n$\\alpha(A)=UAU^*,$ $A\\in \\mathbb{D}$. Denote by ${\\bf H}_2^0(\\mathbb{C})$ the linear space of all traceless self-adjoint $2\\times 2$ complex matrices and equip this space with the inner product $\\langle A,B\\rangle := \\frac{1}{2} \\mathrm{Tr} AB$, $A,B\\in {\\bf H}_2^0(\\mathbb{C})$. Define\n\\begin{equation*} \n\\gamma: \\mathbb{R}^3 \\rightarrow {\\bf H}_2^0(\\mathbb{C}); \\, \\left[\\begin{array}{c}v_1\\\\v_2\\\\v_3\\end{array}\\right]=\\mathbf{v} \\mapsto \\gamma(\\mathbf{v}):=\\left[\\begin{array}{cc}v_3 & v_1- i v_2 \\\\ v_1 + i v_2 & -v_3 \\end{array}\\right].\n\\end{equation*}\nClearly, $\\gamma$ is a linear isomorphism from $\\mathbb{R}^3$ onto ${\\bf H}_2^0(\\mathbb{C})$ which preserves the inner product. Define $\\tilde \\alpha : {\\bf H}_2^0(\\mathbb{C}) \\to {\\bf H}_2^0(\\mathbb{C})$ by $\\tilde \\alpha(A)=UAU^*$, $A\\in {\\bf H}_2^0(\\mathbb{C})$. Then $O:=\\gamma^{-1} \\circ \\tilde \\alpha \\circ \\gamma$ is an orthogonal linear transformation on $\\mathbb{R}^3$ and using the relation $\\gamma (\\mathbf{v})=2\\rho(\\mathbf{v})-I$, $\\mathbf{v}\\in \\mathbf{B}$, we easily deduce that\n$\\rho \\circ \\alpha \\circ \\rho=O$ holds.\n\\par\nIf $\\alpha(A)=\\frac{VA^{-1}V^*}{\\mathrm{Tr} A^{-1}},$ $A\\in \\mathbb{D}$, then\nthe conclusion follows from the previous case. The only thing we have to observe is that\n$\\frac{(\\rho(\\mathbf{v}))^{-1}}{\\mathrm{Tr} (\\rho(\\mathbf{v}))^{-1}}= \\rho(-\\mathbf{v})$ holds which follows from \\cite[Remark 3.5]{kim}.\n\\par \nFinally, if $\\alpha(A)=I\/2$, then we clearly have $\\rho^{-1} \\circ \\alpha \\circ \\rho=0.$\n\\par\nThe converse statement that the formulas in (i) and (ii) \ndefine continuous endomorphisms of the Einstein gyrogroup is just obvious.\n\\nobreak\\hfill $\\square$\n\\end{proofs}\n\n\n\\begin{remark}\nWe conclude our note with some remarks.\n\nAbove we have given the complete description of all continuous endomorphisms of $\\mathbf{B}$ under the operation of Einstein velocity addition.\nIn the recent paper \\cite{abe} Abe have described the automorphisms of the Einstein gyrovector space \n\\cite[Theorem 3.1]{abe}. He concludes that those automorphisms are exactly the restrictions of orthogonal linear transformations onto the open unit ball. To see clearly the content of his result which looks very closely related to ours, one needs to be cautious and look at the definition \\cite[Definition 2.13]{abe} of automorphisms of gyrovector spaces. In fact, that definition includes the assumption about the preservation of the inner product. Hence, Abe's result says that every bijective map of $\\mathbf{B}$ which preserves the Einstein addition (plus a sort of scalar multiplication) and also preserves the inner product necessarily originates from an orthogonal linear transformation on $\\mathbb{R}^3$.\n\nWe need to point out that the requirement concerning the inner product preserving property is very strong, it alone implies the above conclusion. Indeed, any inner product preserving map $\\phi:\\mathbf{B} \\to \\mathbf{B}$ easily extends to an inner product preserving map on $\\mathbb{R}^3$, see the proof of \\cite[Lemma 4.4]{abe}. Moreover, it is well-known that on any inner product space the inner product preserving maps are automatically linear. That means that Abe's result carries no information concerning the automorphism of $\\mathbf{B}$ endowed merely with the Einstein addition $\\oplus$ and the usual topology. Let us remark at this point that it is not difficult to see that the usual topology coincides with the topology generated by the Einstein gyrometric, see \\cite[Example 2.10]{abe}.\n\nSo we can tell that our result is much different and in fact much stronger than Abe's but we also have to point out that\nour result is proved only in three dimension. \nThe main reason for this is that Kim's isomorphic identification between the open unit ball and the set of all regular density matrices is valid only in that low dimensional case. Though, for its physical content, the most important case is certainly this one,\nit would very be interesting to know what happens in higher dimensions. We propose this as an open problem.\n\\end{remark}\n\n\n\n","meta":{"redpajama_set_name":"RedPajamaArXiv"}} +{"text":"\\section{Introduction}\n\n\n\\input{intro.tex}\n\n\\section{Proposed Methods}\n\\input{methods.tex}\n\n\n\\section{Learning}\n\\input{learning.tex}\n\n\n\\section{Experiments}\n\\input{experiments.tex}\n\n\n\\section{Results}\n\\input{results.tex}\n\n\n\\section{Analysis}\n\\input{analysis.tex}\n\n\\section{Related Work}\n\\input{related_work.tex}\n\n\\section{Conclusion}\n\\input{conclusion.tex}\n\n\n\\section*{Acknowledgments}\nWe would like to thank the anonymous reviewers, NVIDIA for donating GPUs used in this research, Jessy Li for clarifying the experimental setup used in~\\citet{li2015fast}, and Google for a faculty research award to K.~Gimpel that partially supported this research.\n\n\n\\subsection{Sentence Entailment}\n\\citet{vilnis2014word} explored whether their Gaussian word entropies captured the lexical entailment relationship. Here we analyze the extent to which our representations capture sentential entailment.\n\nWe test models on the SNLI test set, assuming that for a given premise $p$ and hypothesis $h$, $p$ is more specific than $h$ for entailing sentence pairs. To avoid effects due to sentence length, we only consider $\\langle p, h\\rangle$ pairs with the same length. After this filtering, entailment\/neural\/contradiction categories have 120\/192\/208 instances respectively. We encode each sentence and calculate the percentage of cases in which the hypothesis has larger entropy (or smaller norm for non-probabilistic models) than the premise. Under an ideal model, this would happen with 100\\% of entailing pairs while showing random results (50\\%) for the other two types of pairs. \n\nAs shown in Table~\\ref{snli-res}, our best paraphrase-trained models show similar trends to InferSent, achieving around 75\\% accuracy in the entailment category and around 50\\% accuracy in other categories. Although ELMo can also achieve similar accuracy in the entailment category, it seems to conflate entailment with contradiction, where it shows the highest percentage of \nall models. Other models, including BERT, GloVe, and Skip-thought, are much closer to random (50\\%) for entailing pairs.\n\n\n\n\n\\subsection{Lexical Analysis}\n\n\n\\begin{table}[t]\n\\small\n\\setlength{\\tabcolsep}{2pt}\n\\centering\n\\begin{tabular}{c|c|c|c}\n \\multicolumn{2}{c|}{Small norm} & \\multicolumn{2}{c}{Large norm}\\\\\n\\hline\n small abs. ent. & small ent. & small abs. ent. & small ent.\\\\\n\\hline\n, & addressing & staveb & cenelec\\\\\n\/ & derived & jerusalem & ohim\\\\\nby & decree & trent & placebo\\\\\nan & fundamental & microwave & hydrocarbons\\\\\ngon & beneficiaries & brussels & iec\\\\\nas & tendency & synthetic & paras\\\\\nhaving & detect & christians & allah\\\\\na & reservations & elephants & milan\\\\\non & remedy & seldon & madrid\\\\\nfor & eligibility & burger & $\\pm$\\\\%10\nfrom & film-coated & experimental & ukraine\\\\\n'd & breach & alison & intravenous\\\\\n--- & exceed & 63 & electromagnetic\\\\\nhis & flashing & prophet & 131\\\\\n' & objectives & diego & electrons\\\\\nupon & cue & mallory & northeast\\\\\nunder & commonly & \\\"{o} & blister\\\\\ntowards & howling & natalie & http\\\\\n's & vegetable & hornblower & renal\\\\\nwith & bursting & korea & asteroid\\\\%20\n\\end{tabular}\n\\caption{Examples showing top-20 lists of large-norm or small-norm words ranked based on small absolute entropy or small entropy in WLO\\xspace.}\n\\label{lexical-examples-full}\n\\end{table}\n\n\nWLO\\xspace associates translation and scaling parameters with each word, allowing us to \nanalyze the impact of words on sentence representations. We ranked words under several criteria based on their translation parameter norms and single-word sentence entropies. Table~\\ref{lexical-examples-full} shows the top 20 words under each criterion.\n\n\n\\input{table-sentence-examples}\n\n\nWords with small norm and small absolute entropy have little effect, both in terms of meaning and specificity; they are mostly function words. \nWords with large norm and small entropy have a large impact on the sentence while also making it more specific. %\nThey are organization names (\\emph{cenelec}) or technical terms found in medical or scientific literature. When they appear in a sentence, they are very likely to appear in its paraphrase. \n\nWords with large norm and small absolute entropy contribute to the sentence semantics but do not make it more specific. Words like \\emph{microwave} and \\emph{synthetic} appear in many contexts and have multiple senses. Names (\\emph{trent}, \\emph{alison}) also appear in many contexts. Words like these often appear in a sentence's paraphrase, but can also appear in many other sentences in different contexts. \n\nWords with small norm\/entropy make sentences more specific \nbut do not lend themselves to a precise characterization. They affect sentence meaning, but can be expressed in many ways. \nFor example, when \\emph{beneficiaries} appears in a sentence, its paraphrase often has a synonym like \\emph{beneficiary}, \\emph{heirs}, or \\emph{grantees}. These words may have multiple senses, but it appears more that they correspond to concepts with many valid ways of expression. \n\n\n\n\\subsection{Sentential Analysis}\n\nWe subsample the ParaNMT training set and group sentences by length. For each model and length, we pick the sentence with either highest\/lowest entropy or largest\/smallest norm values. Table~\\ref{sentence-examples} shows some examples. %\n\\textsc{Wordsum}\\xspace tends to choose conversational sentences as general and those with many rare words as specific. WLO\\xspace favors literary and technical\/scientific sentences as most specific, and bureaucratic\/official language as most general. \n\n\n\n\n\n\\begin{table}[t]\n\\setlength{\\tabcolsep}{5pt}\n\\centering\n\\small\n\\begin{tabular}{l|cc|cc}\n &\\multicolumn{2}{c|}{With Prior} &\\multicolumn{2}{c}{Without Prior} \\\\ \n & Acc. & $F_{1}$ & Acc. & $F_{1}$ \\\\\n\\hline\nWLO\\xspace & 77.4 & 78.4 & 67.9 & 68.2 \\\\\n\n\\end{tabular}\n\\caption{Accuracy (\\%) and $F_{1}$ score (\\%) for specificity News test set with and without prior regularization.}\n\\label{prior-res}\n\\end{table}\n\n\\subsection{Effect of Prior Regularization}\nAs shown in Table~\\ref{prior-res}, there is a large performance improvement after adding prior regularization for avoiding degenerate solutions.\n\n\n\n\\begin{table}[t]\n\\setlength{\\tabcolsep}{5pt}\n\\centering\n\\small\n\\begin{tabular}{l|c}\n & STS Benchmark\\\\\n\\hline\n\\textsc{Wordavg}\\xspace & 73.4 \\\\\n\\textsc{LSTMavg}\\xspace & 73.6 \\\\\n\\textsc{LSTMGaussian}\\xspace & \\textbf{74.3} \\\\\nWLO\\xspace & 73.7 \\\\\n\n\\end{tabular}\n\\caption{Pearson correlation (\\%) for STS benchmark test set. Highest number is in bold.}\n\\label{sts-res}\n\\end{table}\n\n\\subsection{Semantic Textual Similarity}\nAlthough semantic textual similarity is not our target task, we still include the performance of our models on the STS benchmark test set in Table~\\ref{sts-res} to show that our models are competitive with standard strong baselines. \nWhen using probabilistic models to predict sentence similarity during test time, we let $v_1=\\mathit{concat}(\\mu_1,\\Sigma_1)$, $v_2=\\mathit{concat}(\\mu_2,\\Sigma_2)$, where $\\mathit{concat}$ is a concatenation operation, and predict sentence similarity via $\\mathit{cosine}(v_1, v_2)$, since we find it performs better than solely using the mean vectors. \nThe two probabilistic models, \\textsc{LSTMGaussian}\\xspace and WLO\\xspace, are able to outperform the baselines slightly.\n\n\n\n\n\\subsection{Baseline Methods}\nWe consider two baselines that have shown strong results on sentence similarity tasks \\citep{para-nmt-acl-18}. The first, word averaging (\\textsc{Wordavg}\\xspace), simply averages the word embeddings in the sentence. The second, long short-term\nmemory~(LSTM;~\\citealp{hochreiter1997long}) averaging (\\textsc{LSTMavg}\\xspace), uses an LSTM to encode the sentence and averages the hidden vectors. Inspired by sentence VAEs~\\cite{bowman16gen}, we consider an LSTM based probabilistic baseline (\\textsc{LSTMGaussian}\\xspace) which builds upon \\textsc{LSTMavg}\\xspace and uses separate linear transformations on the averaged hidden states to produce the mean and variance of a Gaussian distribution. \n\nWe also benchmark several pretrained models, including GloVe~\\cite{glove}, Skip-thought~\\cite{Kiros2015skipthought}, InferSent~\\cite{infersent}, BERT~\\cite{devlin-etal-2019-bert}, and ELMo~\\cite{peters-etal-2018-deep}. When using GloVe, we either sum embeddings (GloVe \\textsc{sum}\\xspace) or average them (GloVe \\textsc{avg}\\xspace) to produce a sentence vector. Similarly, for ELMo, we either sum the outputs from the last layer (ELMo \\textsc{sum}\\xspace) or average them (ELMo \\textsc{avg}\\xspace). For BERT, we take the representation for the ``[CLS]'' token. \n\n\n\n\\begin{table}[t]\n \\small\n \\centering\n \\begin{tabular}{l|c|c|c|c}\n Domain & News & Twitter & Yelp & Movie \\\\\\hline\n Number of instances & 900 & 984 & 845 & 920\n \\end{tabular}\n \\caption{Sizes of %\n test sets for sentence specificity.}\n \\label{tab:data}\n\\end{table}\n\n\\subsection{Datasets}\nWe use the preprocessed version of ParaNMT-50M~\\cite{para-nmt-acl-18} as our training set, which consists of 5 million paraphrase pairs.\n\nFor evaluating sentence specificity, we use human-annotated test sets from four domains, including news, Twitter, Yelp reviews, and movie reviews, from \\citet{li2015fast} and \\citet{ko2018domain}. \nFor the news dataset, labels are either ``general'' or ``specific'' and there is additionally a training set. For the other datasets, labels are real values indicating specificity. \nStatistics for these datasets are shown in Table~\\ref{tab:data}.\n\nFor analysis we also use the semantic textual similarity (STS) \nbenchmark test set~\\cite{cer2017semeval} and the Stanford Natural Language Inference (SNLI) dataset~\\citep{bomwn2015large}. \n\n\n\n\n\\subsection{Specificity Prediction Setup}\nFor predicting specificity in the news domain, we threshold the predictions either based on the entropy of Gaussian distributions produced from probabilistic models or based on the norm of vectors produced by deterministic models, which includes all of the pretrained models. The threshold is tuned based on the training set but no other training or tuning is done for this task with any of our models. For prediction in other domains, we simply compute the Spearman correlations between the entropy\/norm and the labels.\n\nIntuitively, when sentences are longer, they tend to be more specific. %\nSo, we report baselines (``Length'') that predict specificity solely based on length, by thresholding the sentence length for news (choosing the threshold using the training set) or simply returning the length for the others. The latter results are reported from \\citet{ko2018domain}. %\nWe also consider baselines that average or sum ranks of word frequencies within a sentence (``Word Freq.~\\textsc{avg}\\xspace'' and ``Word Freq.~\\textsc{sum}\\xspace''). \n\n\n\\subsection{Expected Inner Product of Gaussians}%\nLet $\\mu_1$, $\\mu_2$ be mean vectors and $\\Sigma_1$, $\\Sigma_2$ be the variances %\npredicted by models for a pair of input sentences. For the choice of $d$, \nfollowing \\citet{vilnis2014word}, we use the expected inner product of Gaussian distributions: %\n\\begin{equation}\n\\begin{aligned}\n &\\int_{x\\in\\mathbb{R}^k}\\mathcal{N}(x;\\mu_1,\\Sigma_1)\\mathcal{N}(x;\\mu_2,\\Sigma_2)dx\\\\\n &=\\log\\mathcal{N}{(0;\\mu_1-\\mu_2,\\Sigma_1+\\Sigma_2)}\\\\\n &=-\\frac{1}{2}\\log\\det{(\\Sigma_1+\\Sigma_2)}-\\frac{d}{2}\\log(2\\pi)\\\\\n &\\phantom{ = }-\\frac{1}{2}(\\mu_1-\\mu_2)^\\top(\\Sigma_1+\\Sigma_2)^{-1}(\\mu_1-\\mu_2)\n\\end{aligned}\n\\end{equation}\n\\noindent \nFor diagonal matrices $\\Sigma_1$ and $\\Sigma_2$, the equation above can be computed analytically. \n\n\\subsection{Regularization}\n\nTo avoid the mean or variance of the Gaussian distributions from becoming unbounded during training, resulting in degenerate solutions, we impose prior constraints on the operators introduced above. %\nWe force the transformed distribution after each operator to be relatively close to $\\mathcal{N}(0,I_k)$, which can be thought of as our ``prior'' knowledge of the operator. Then our training additionally minimizes \n\\begin{equation}\n\\begin{aligned}\n &\\lambda\\!\\!\\!\\!\\sum_{s\\in\\{s_1,s_2,n_1,n_2\\}}\\sum_{w\\in s}\\mathit{KL}(\\mathcal{N}(\\mu(w),\\Sigma(w))\\Vert\\mathcal{N}(0,I))\\nonumber\n\\end{aligned}\n\\end{equation}\n\\noindent where $\\lambda$ is a hyperparameter tuned based on the performance on the 2017 semantic textual similarity (STS; \\citealp{cer2017semeval}) data. \nWe found prior regularization very important, \nas will be shown in our results. \nFor fair comparison, we also add L2 regularization to the baseline models.\n\n\n\n\n\n\\subsection{Sentence Specificity}\n\nTable~\\ref{sent-spe-gen-res} shows results %\non sentence specificity tasks. We compare to the best-performing models reported by\n\\citet{li2015fast} and \\citet{ko2018domain}. %\nTheir models are specifically designed for predicting sentence specificity and they both use labeled training data from the news domain. \n\n\n\n\\begin{table}[t]\n\\setlength{\\tabcolsep}{5pt}\n\\centering\n\\small\n\\begin{tabular}{l|rrrr}\n & \\multicolumn{1}{|c}{News} & \\multicolumn{1}{c}{Twitter} & \\multicolumn{1}{c}{Yelp} & \\multicolumn{1}{c}{Movie} \\\\\\hline\nMajority baseline & 54.6 & \\multicolumn{1}{c}{-} & \\multicolumn{1}{c}{-} & \\multicolumn{1}{c}{-} \\\\\nLength & 73.4 & 44.5 & 67.6 & 58.1 \\\\\nWord Freq.~\\textsc{sum}\\xspace & 55.5 & 10.1 & 54.6 & 22.1 \\\\ \nWord Freq.~\\textsc{avg}\\xspace & 61.5 & 0.0 & 28.5 & 0.0 \\\\\\hline\n\\multicolumn{5}{c}{Prior work trained on labeled sentence specificity data}\n\\\\\\hline\n\\citet{li2015fast} & 81.6 & 55.3 & 63.3 & 57.5 \\\\\n\\citet{ko2018domain} & \\multicolumn{1}{|c}{-} & 67.9 & 75.0 & 70.6 \\\\\\hline\n\\multicolumn{5}{c}{Sentence embeddings from pretrained models} \\\\\n\\hline\nGloVe \\textsc{sum}\\xspace & 70.4 & 32.2 & 62.8 & 49.0 \\\\\nGloVe \\textsc{avg}\\xspace & 54.6 & -49.6 & -59.0 & -38.2 \\\\\nInferSent & 75.0 & 60.5 & 76.6 & 61.2 \\\\\nSkip-thought & 57.7 & 2.9 & 14.1 & 27.2 \\\\\nBERT & 64.5 & 20.8 & 29.5 & 18.1 \\\\\nELMo \\textsc{sum}\\xspace & 65.4 & 46.2 & 72.7 & 59.3 \\\\\nELMo \\textsc{avg}\\xspace & 56.2 & -9.4 & -0.9 & -22.5 \\\\\n\\hline\n\\multicolumn{5}{c}{Our work} \\\\\\hline\n\\textsc{Wordavg}\\xspace & 54.6 & -10.6 & -32.3 & -27.2 \\\\\n\\textsc{Wordsum}\\xspace & 75.8 & 57.9 & 75.4 & 60.0 \\\\ \n\\textsc{LSTMavg}\\xspace & 54.6 & -14.8 & -41.1 & -14.8 \\\\\n\\textsc{LSTMGaussian}\\xspace & 55.5 & 3.2 & 2.2 & 4.1 \\\\\nWLO\\xspace & \\underline{77.4} & \\underline{60.5} & \\underline{76.6} & \\underline{61.9}\n\n\\end{tabular}\n\\caption{Sentence specificity results on test sets from four domains (accuracy (\\%) for News and Spearman correlations (\\%) for others). Highest numbers for the models described in this work are underlined.}%\n\\vspace{-0.1in}\n\\label{sent-spe-gen-res}\n\\end{table}\n\nOur averaging-based models (\\textsc{Wordavg}\\xspace, \\textsc{LSTMavg}\\xspace) failed on this task, either giving the majority class accuracy or negative correlations. So, we also evaluate \\textsc{Wordsum}\\xspace, which sums word embeddings instead of averaging and \nshows strong performance compared to the other models. \n\nWhile the model from \\citet{li2015fast} performs quite well in the news domain, its performance drops on other domains, indicating some amount of overfitting. On the other hand, \\textsc{Wordsum}\\xspace and WLO\\xspace, which are trained on a large number of paraphrases, perform consistently across the four domains and both outperform the supervised models on Yelp. Additionally, our %\nWLO\\xspace model outperforms all our other models, achieving comparable performance to the supervised methods. \n\nAmong pretrained models, BERT, Skip-thought, ELMo \\textsc{sum}\\xspace, and GloVe \\textsc{sum}\\xspace show slight correlations with specificity, while InferSent performs strongly across domains. %\nInferSent uses supervised training on a large manually-annotated dataset (SNLI) \nwhile \\textsc{Wordsum}\\xspace and WLO\\xspace are trained on automatically-generated paraphrases and still show results comparable to InferSent.\n\n\n\n\\begin{table}[t]\n\\setlength{\\tabcolsep}{7pt}\n\\centering\n\\small\n\\begin{tabular}{l|c|c}\n & Full & Length norm. \\\\ \n\\hline\nMajority baseline & 54.6 & 50.1 \\\\ \\hline\n\\textsc{Wordavg}\\xspace & 54.6 & 69.0 \\\\\n\\textsc{Wordsum}\\xspace & 75.8 & 68.6\\\\ \n\\textsc{LSTMavg}\\xspace & 54.6 & 69.6 \\\\\n\\textsc{LSTMGaussian}\\xspace & 55.5 & 67.0 \\\\\nWLO\\xspace & \\textbf{77.4} & \\textbf{70.1} \\\\\n\n\\end{tabular}\n\\caption{Accuracy (\\%) for the %\nspecificity News test set, in both the original and length normalized conditions. Highest numbers in each column are in bold.}\n\\label{sent-equal-len-spe-gen-res}\n\\end{table}\n\n\nTo control for effects due to \nsentence length, we design another experiment in which sentences from News training and test are grouped by length, and thresholds are tuned on the group of length $k$ and tested on the group of length $k-1$, for all $k$, leading to a pool of 3582 test sentences.\n \nTable~\\ref{sent-equal-len-spe-gen-res} shows the results. In this length-normalized experiment, the averaging models demonstrate much better performance and even outperform \\textsc{Wordsum}\\xspace, but still WLO\\xspace has the best performance. \n\n","meta":{"redpajama_set_name":"RedPajamaArXiv"}} +{"text":"\\section{The \\mobidis System}\\label{sec:architecture}\n\n\\begin{figure*}[t!]\n\\centering{\n \\includegraphics[width=0.6\\columnwidth]{Figures\/Mobidis_Architecture.eps}\n} \\caption{Architecture of the PMS.} \\label{fig:fig_architecture}\n\\end{figure*}\n\nThis section aims at describing the internal structure of PMS.\nFigure~\\ref{fig:fig_architecture} shows its conceptual architecture.\nAt the beginning, a responsible person designs an Activity Diagram\nthrough SPIDE, a \\emph{Process Designer} Graphical tool with\nwhich \\mobidis is equipped. Later, Such a tool translates the\nActivity Diagram in a XML format file. Then, such a XML file is\nloaded into PMS. The \\emph{XML-to-\\indigolog\\ Parser} component\ntranslates this specification in a \\emph{Domain Program}, the\n\\indigolog\\ program corresponding to the designed process, and a set\nof \\emph{Domain Axioms}, which is the action theory that comprises the\ninitial situation, the set of available actions with their pre- and\npost-conditions.\n\nWhen the program is translated in the Domain Program and Axioms, a\ncomponent named \\emph{Communication Manager} (CM) starts up all of\n\\emph{device managers}, which are basically some drivers for making\ncommunicate PMS with the services and sensors installed on devices.\nFor each real world device PMS holds a device manager. Each device\nmanager is also intended for notifying the associated device about\nevery action performed by the \\mobidis engine as well as for\nnotifying the \\mobidis engine about the actions executed by the\nservices of the associated device.\n\nAfter this initialization process, CM activates the\n\\emph{\\indigolog\\ Engine}, which is in charge of executing\n\\indigolog\\ programs. Then, CM enters into a passive mode where it\nis listening for messages arriving from the devices through the\ndevice managers. In general, a message can be a exogenous event\nharvested by a certain sensor installed on a given device as well as\na message notifying the start or completion of a certain task. When\nCM judges a message as significant, it forwards it to \\indigolog.\nFor instance, relevant messages may be signals of the task\ncompletion or the sudden unavailability of a given device.\n\nIn sum, CM is responsible of deciding which device should\nperform certain actions, instructing the appropriate device\nmanagers to communicate with the device services and collecting the\ncorresponding sensing outcome. The \\indigolog\\ Engine is intended to\nexecute a \\textit{sense-think-act} interleaved\nloop~\\cite{Kowalski95}. The cycle repeats at all times the following\nthree steps:\n\\begin{enumerate}\n\\item check for exogenous events that have occurred;\n\\item calculate the next program step; and\n\\item if the step involves an action, \\textit{execute} the action,\ninstructing the Communication Manager.\n\\end{enumerate}\n\nThe \\indigolog\\ Engine relies on two further modules named\n\\emph{Transition System} and \\emph{Temporal Projector}. The former\nis used to compute the evolution of \\indigolog\\ programs according\nto the statements' semantic, whereas the latter is in charge of\nholding the current situations throughout the execution as well as\nletting evaluate the fluent values for taking the right decision of\nthe actions to perform.\n\nThe last module that is worth mentioning is the \\emph{Execution\nMonitor} (MON), which get notifications of exogenous events from the\nCommunication Manager. It decides whether adaptation is needed and\nadapts accordingly the process. Section~\\ref{sec:mobidisMonitoring}\ngives some additional details of the concrete implementation of\nmonitoring and adaptation.\n\n\\section{A Concrete Example from Emergency Management}\n\n\\begin{figure}[t!] \\centering\n \\includegraphics[height=0.6\\textheight]{Figures\/ProcessNutshell.eps}\n\\caption{An activity diagram of a process concerning emergency\nmanagement.} \\label{fig:pms}\n\\end{figure}\n\n\\begin{figure}[t!] \\centering\n\\begin{minipage}{0.5\\textwidth}\n\\begin{scriptsize}\n\\begin{verbatim}\nproc(main,\n prioritized_interrupts(\n [interrupt(exogEvent, monitor),\n interrupt(true, process),\n interrupt(neg(finished), wait)]\n)).\nproc(process, [rrobin(processRescue,\n while(or(noPhotos<7,neg(goodPics)),\n [rrobin(\n [manageTasks(\n [workitem((go,id19,loc(5,5)),\n workitem((photo,id20,loc(5,5)),\n workitem((survey,id21,loc(5,5))]),\n manageTasks(\n [workitem((go,id19,loc(15,15)),\n workitem((photo,id20,loc(15,15)),\n workitem((survey,id21,loc(15,15))]),\n manageTasks(\n [workitem((go,id19,loc(50,50)),\n workitem((photo,id20,loc(50,50)),\n workitem((survey,id21,loc(50,50))]),\n ]\n ),\n manageTasks([workitem((evalPics,id28,input)])\n ])\n ),\n manageTasks([workitem((sendData,id29,input)])\n]).\n\nproc(manageTasks(WrkList),\n pi(srvc,\n [?(and(Available(srvc),Capable(srvc,WrkList))),\n manageExecution(WrkList,srvc),\n ]\n)).\nproc(manageExecution([],Srvc),[]).\nproc(manageExecution([workitem(Task,Id,I)|TAIL],Srvc),\n [assign(Task,Id,Srvc,I),\n start(Task,Id,Srvc,I),\n ackTaskCompletion(Task,Id,Srvc),\n release(Task,Id,Srvc,I),\n manageExecution(TAIL,Srvc)\n ]\n )\n\\end{verbatim}\n\\end{scriptsize}\n\\end{minipage}\n\\caption{An example of process management with \\IndiGolog.}\n\\label{fig:pmsIndiGolog}\n\\end{figure}\n\nWe turn to describe the approach by an example concerning emergency\nmanagement in an area affected by an earthquake. The emergency\nresponse process in question comprises various activities that may\nneed to be adapted on-the-fly to react to unexpected exogenous\nevents that could arise during the operation. Figure~\\ref{fig:pms}\ndepicts an Activity Diagram of a process consisting of two\nconcurrent branches; the final task is \\emph{send data} and can only\nbe executed after the branches have successfully completed. The left\nbranch, abstracted out from the diagram, is built from several\nconcurrent processes involving tasks \\emph{rescue},\n\\emph{evacuation} and others. The right branch begins with the\nconcurrent execution of three sequences of tasks: \\emph{go},\n\\emph{photo}, and \\emph{survey}. When all survey tasks have been\ncompleted, the task \\emph{evaluate pictures} is executed. Then, a\ncondition is evaluated on the resulting state at a decision point\n(i.e., whether the pictures taken are of sufficient quality). If the\ncondition holds, the right branch is considered finished; otherwise,\nthe whole branch should be repeated.\n\nFigure \\ref{fig:pmsIndiGolog} shows some parts of the \\IndiGolog\\\nprogram representing the process of the example. The code proposes\nhere has been slightly simplified and abstracted for the sake of\nbrevity. The main procedure, called \\texttt{main}, involves three\ninterrupts running at different priorities. The first highest\npriority interrupt fires when an exogenous event occurs (i.e.,\ncondition \\texttt{exogEvent} is true). In such a case, the\n\\texttt{monitor} procedure is executed, evaluating whether or not\nadaptation is required (see Section~\\ref{sec:mobidisMonitoring}).\n\nIf no exogenous event has occurred, the second interrupt triggers\nand execution of the actual emergency response process is attempted.\nProcedure \\texttt{process}, also shown in the figure, encodes the\nActivity Diagram of the example process. It relies, in turn, on\nprocedure \\texttt{manageTasks(WrkLists)}, where \\texttt{WrkLists} is\na sequence of elements \\texttt{workitem(T,I,D)}, each one\nrepresenting a task \\texttt{T}, with identifier \\texttt{I}, and\ninput data \\texttt{D}, which needs to be performed. This procedure\nis meant to manage the execution of all tasks in the worklist, and\nit assigns them all to a \\emph{single} service that provides every\ncapability required.\n\nOf course, to assign tasks to an service, \\mobidis needs to reason\nabout the available ones, their current state (e.g., their\nlocation), and their capabilities, as not every service is capable\nof performing any task. In fact, before assigning the first task in\nany task list, procedure \\texttt{manageTasks(WrkLists)} executes a\n\\emph{pick} operation is done to choose a Service \\texttt{srvc} that\nis involved in no task execution (i.e., fluent \\texttt{Free(actr)}\nholds) and able to execute the whole worklist.\n\nOnce a suitable service has been chosen, PMS assigns the list of\ntasks to it by executing \\linebreak \\texttt{assign(srvc,WrkList)}.\nIn addition to inform the service about the task assignment, such an\naction turns fluent \\texttt{Free(actr)} to false.\n\nThen, PMS calls procedure \\texttt{manageExecution(WrkList)}, which\nhandles the execution of each task in the list. For each task T in\nthe list (with identifier \\texttt{I} and input data \\texttt{D}), the\nprocedure invokes action \\texttt{start(T,D,I,srvc)} that provides\nthe required information to the chosen service \\texttt{srvc}. In\nthis way, the service is instructed to begin working on the task and\nreceives the required input. When a service finishes executing an\nassigned task, it alerts \\mobidis via action\n\\texttt{finishedTask(T,srvc)}; PMS acknowledges by performing\n\\texttt{ackTaskCompletion (T,D,actr)}. When the whole work-item list\nis execution, the PMS releases the service by executing the action\n\\texttt{release(T,D,actr)}, after which fluent \\texttt{Free(srvc)}\nis turned to true again.\n\nIt is worth mentioning that, if the process being carried out cannot\nexecute temporarily further, the lowest priority interrupt fires.\nThis interrupt makes PMS wait for the conditions in which some tasks\ncan be executed. The fact that the process gets stuck does not imply\nnecessarily the occurrence of some relevant exogenous events. It\ncould be also caused by the fact that next tasks can be only\nassigned to services that are currently busy busy performing other\ntasks. The latter situation does not prevent processes from being\ncompleted successfully; indeed, such services will be eventually\nfree to work on those tasks.\n\n\\section{Preliminaries}\n\\label{sec:basics}\n\nIn this section we introduce the Situation Calculus, which we use to\nformalize \\mobidis and its adaptation features. The Situation\nCalculus \\cite{ReiterBook} is a second-order logic targeted\nspecifically for representing a dynamically changing domain of\ninterest (the world). All changes in the world are obtained as\nresult of \\emph{actions}. A possible history of the actions is\nrepresented by a \\emph{situation}, which is a first-order term\ndenoting the current situation of the world. The constant $s_0$\ndenotes the initial situation. A special binary function symbol\n$do(\\alpha,s)$ denotes the next situation after performing the\naction $\\alpha$ in the situation $s$. Action may be parameterized.\n\nProperties that hold in a situation are called \\emph{fluents}. These\nare predicates taking a situation term as their last argument. For\ninstance, we could define the fluent $free(x,s)$ stating whether the\nobject $x$ is free in situation $s$, meaning no object is located on\n$x$ in situation $s$.\n\nChanges in fluents (resulting from executing actions) are specified\nthrough \\emph{successor state axioms}. In particular for each fluent\n$F$ we have a successor state axioms as follows:\n\\[F(\\overrightarrow{x},do(\\alpha,s)) \\Leftrightarrow\n\\Phi_F( \\overrightarrow{x},do(\\alpha,s),s)\\]\nwhere $\\Phi_F(\\overrightarrow{x},do(\\alpha,s),s)$ is a formula with\nfree variables $\\overrightarrow{x}$, $\\alpha$ is an action, and $s$\nis a situation. %\n\nIn order to control the executions of actions we make use of high\nlevel programs expressed in \\indigolog~\\cite{S_DG_L_L@AMAI04}, which\nis equipped with primitives for expressing concurrency.\nTable~\\ref{tab:IndiGolog} summarizes the constructs of \\indigolog\nused in this work. Basically, these constructs allow to define every\nwell-structured process as defined in \\cite{KiepuszewskiHB00}. The\nlast table column shows the corresponding statement defined in the\n\\indigolog\\ platform developed at University of Toronto and RMIT\nUniversity.\\footnote{Downloadable at\n\\url{http:\/\/www.cs.toronto.edu\/cogrobo\/main\/systems\/index.html}}\n\n\\begin{table}[t]\n\\centering{\n \\caption{\\indigolog constructs.}\\label{tab:IndiGolog}\n\\begin{small}\n\\begin{tabular}{|l|p{8cm}|p{4.3cm}|}\n \\hline\n \\textbf{Construct} & \\textbf{Meaning} & \\textbf{Platform Statement}\\\\\\hline\n $a$ & A primitive action & \\texttt{a}\\\\\\hline\n $\\phi?$ & Wait while the $\\phi$ condition is false & \\texttt{?(phi)} \\\\\\hline\n $(\\delta_1;\\delta_2)$ & Sequence of two sub-programs $\\delta_1$\n and $\\delta_2$ & \\texttt{[delta1,delta2]} \\\\\\hline\n $proc~P(\\overrightarrow{v})~\\delta$ & Invocation of a procedure\n passing a vector $\\overrightarrow{v}$ of parameters & \\texttt{proc(P,delta)}\\\\\\hline\n $(\\phi;\\delta_1) | (\\neg\\phi;\\delta_2)$ & Exclusive choice between $\\delta_1$ and $\\delta_2$\n according to the condition $\\phi$ & \\texttt{ndet([?(phi);delta1],} \\texttt{[?(neg(phi)),delta2])} \\\\\\hline\n $while~\\phi~do~\\delta$ & Iterative invocation of $\\delta$ & \\texttt{while(phi,delta)}\\\\\\hline\n $(\\delta_1\\parallel\\delta_2)$ & Concurrent execution & \\texttt{rrobin(delta1,delta2)}\\\\\\hline\n $\\delta^*$ & Indeterministic iteration of program execution (The platform statement\n limits the maximum iterations number to \\texttt{n}) & \\texttt{star(delta,n)}\n \\\\\\hline\n $\\Sigma (\\delta)$ & Emulating off-line execution & \\texttt{searchn(delta,n)}\\\\\\hline\n $\\pi a.\\delta$ & Indeterministic choice of argument $a$ followed by the execution of $\\delta$ & \\texttt{pi(a,delta)} \\\\\\hline\n\\end{tabular}\n\\end{small}\n}\n\\end{table}\n\nFrom the formal point of view, \\indigolog programs are terms. The\nexecution of \\congolog programs is expressed through a\n\\emph{transition semantic} based on single steps of execution. At\neach step a program executes an action and evolves to a new program\nwhich represents what remains to be executed of the original\nprogram. Formally two predicates are introduced to specify such a\nsematic:\n\\begin{itemize}\n \\item $Trans(\\delta',s',\\delta'',s'')$, given a program $\\delta'$\n and a situation $s'$, returns \\myi a new situation $s''$ resulting from executing\n a single step of\n $\\delta'$, and \\myii $\\delta''$ which is the remaining program to be executed.\n \\item $Final(\\delta',s')$ returns true when the program $\\delta'$\n can be considered successfully completed in situation $s'$.\n\\end{itemize}\n\nBy using $Trans$ and $Final$ we can define a predicate\n$Do(\\delta',s',s'')$ that represent successful complete executions\nof a program $\\delta'$ in a situation $s'$, where $s''$ is the\nsituation at the end of the execution of $\\delta'$. Formally:\n\\begin{displaymath}\nDo(\\delta',s',s'')\\Leftrightarrow\\exists\\delta''.Trans^*(\\delta',s',\\delta'',s'')\n\\wedge Final(\\delta'',s'')\n\\end{displaymath}\n\\noindent where $Trans^*$ is the definition of the reflective and\ntransitive closure of \\emph{Trans}.\n\nTo cope with the impossibility of backtracking actions executed in\nthe real world, \\indigolog\\ incorporates a new programming\nconstruct, namely the {\\em search operator}. Let $\\delta$ be any\n\\indigolog\\ program, which provides different alternative executable\nactions. When the interpreter encounters program $\\Sigma(\\delta)$,\nbefore choosing among alternative executable actions of $\\delta$ and\npossible picks of variable values, it performs reasoning in order to\ndecide for a step which still allows\nthe rest of $\\delta$ to terminate successfully.\nIf $\\delta$ is the entire program under consideration,\n$\\Sigma(\\delta)$ emulates complete off-line execution.\n\n\\section{Conclusion}\n\nMost of existing PMSs are not completely appropriate for very\ndynamic and pervasive scenarios. Indeed, such scenarios are\nturbulent and subject to a higher frequency of unexpected\ncontingencies with respect to usual business settings that show a\nstatic static and foreseeable behaviour. This paper describes\n\\mobidis, an adaptive PMS that is able to adapt processes thus\nrecovering from exceptions. Adaptation is synthesized automatically\nwithout relying either on the intervention of domain experts or on\nthe existence of specific handlers planned in advance to cope with\nspecific exceptions. Space limitation has prevented from including\nconcrete examples of adaptation: interested readers can refer\nto~\\cite{deLeoniPhD}.\n\nFuture works aim mostly at integrating \\mobidis with state-of-art\nplanners. Indeed, current implementation relies on the \\indigolog\nplanner, which performs a blind search without using smarter\ntechniques recently proposed to reduce the search space by removing\na priori all the possibility surely taking to no solution. The most\nchallenging issue is to convert Action Theories and \\indigolog\\\nprograms in a way they can be given as input to planners (e.g.,\nconverting to PDDL~\\cite{F_L@JAIR07}).\n\n\n\\section{Process Formalisation in Situation Calculus}\n\\label{sec:BPMFormalization}\n\nNext we detail the general framework proposed above by using\nSituation Calculus and \\indigolog. We use some domain-independent\npredicates to denote the various objects of interest in the\nframework:\n\\begin{itemize}\n \\item $service(a)$: $a$ is a service\n \\item $task(x)$: $x$ is a task\n \\item $capability(b)$: $b$ is a capability\n \\item $provide(a,b)$: the service $a$ provides the capability $b$\n \\item $require(x,b)$: the task $x$ requires the capability $b$\n\\end{itemize}\nIn the light of these predicates, we have defined a shortcut to\nrefer to the capability of a certain service $a$ to perform a list\nof tasks, a.k.a.~worklist. Service $a$ can execute a certain\nworklist $wrkList$ iif $a$ provides all capabilities required by all\ntasks in the worklist:\n\\[\nCapable(a,wrklist) \\Leftrightarrow \\big( \\forall b,t. t \\in wrkList\n\\wedge require(b,t) \\Rightarrow provide(a,b) \\big)\n\\]\n\nEvery task execution is the sequence of four PMS actions: \\myi the\nassignment of the task to a service, resulting in the service being\nnot free anymore; \\myii the notification to the service to start\nexecuting the task. Then, the service carries out the tasks and,\nafter receiving the service notification of the task conclusion,\n\\myiii the PMS acknowledges the successful task termination.\nFinally, \\myiv the PMS releases the service, which becomes free\nagain. We formalise these four actions as follows:\n\\begin{itemize}\n \\item $Assign(a,x)$: task $x$ is assigned to a service $a$\n \\item $Start(a,x,p)$: service $a$ is allowed to\n start the execution of task $x$. The input provided is $p$.\n \\item $AckTaskCompletion(a,x)$: service $a$ concluded successfully the\n executing of $x$.\n \\item $Release(a,x)$: the service $a$ is released with respect\n to task $x$.\n\\end{itemize}\nIn addition, services can execute two actions:\n\\begin{itemize}\n \\item $readyToStart(a,x)$: service $a$ declares to be ready to\n start performing task $x$\n \\item $finishedTask(a,x,q)$: service $a$ declares to have completed\n executing task $x$ returning output $q$.\n\\end{itemize}\n\nThe terms $p$ and $q$ denote arbitrary sets of input\/output, which\ndepend on the specific task. Special constant $\\emptyset$ denotes\nempty input or output.\n\nThe interleaving of actions performed by the PMS and services is as\nfollows. After the assignment of a certain task $x$ by\n$Assign(a,x)$, when the service $a$ is ready to start executing, it\nexecutes action $readyToStartTask(a,x)$. At this stage, PMS executes\naction $Start(a,x,p)$, after which $a$ starts executing task $x$.\nWhen $a$ completes task $x$, it executes the action\n$finishedTask(a,x,q)$. Specifically, we envision that actions\n$finishedTask(\\cdot)$ are those in charge of changing properties of\nworld as result of executing tasks. When $x$ is completed, PMS is\nallowed in any moment to execute sequentially\n$AckTaskCompletion(a,x)$ and $Release(a,x)$. The program coding the\nprocess will the executed by only one actor, specifically the PMS.\nTherefore, actions $readyToStartTask(\\cdot)$ and\n$finishedTask(\\cdot)$ are considered as external and, hence, not\ncoded in the program itself.\n\nFor each specific domain, we have several fluents representing the\nproperties of situations. Some of them are modelled independently of\nthe domain whereas others, the majority, are defined according to\nthe domain. If they are independent of the domain, they can be\nalways formulated as defined in this chapter. Among the\ndomain-independent ones, we have fluent $free(a,s)$, that denotes\nthe fact that the service $a$ is free, i.e., no task has been\nassigned to it, in the situation $s$. The corresponding successor\nstate axiom is as follows:\n\\begin{equation}\\label{eq:freeAxiom}\n\\begin{array}{l}\nfree(a,do(t,s)) \\Leftrightarrow {}\\\\\n\\qquad\\big(\\forall x.t \\neq Assign(a,x) \\wedge free(a,s) \\big) \\vee {}\\\\\n\\qquad\\big( \\neg free(a,s) \\wedge \\exists x.t = Release(a,x) \\big)\n\\end{array}\n\\end{equation}\nThis says that a service $a$ is considered free in the current\nsituation if and only if $a$ was free in the previous situation and\nno tasks have been just assigned to it, or $a$ was not free and it\nhas been just released. There exists also the domain-independent\nfluent $enabled(x,a,s)$ which aims at representing whether service\n$a$ has notified to be ready to execute a certain task $x$ so as to\nenabled it. The corresponding successor-state axiom:\n\\begin{equation}\\label{eq:enabledAxiom}\n\\begin{array}{l}\nenabled(x,a,do(t,s)) \\Leftrightarrow \\\\\\qquad\\big( enabled(x,a,s)\n\\wedge \\forall q. t \\neq finishedTask(a,x,q) \\big)\\vee\n\\\\ \\qquad\\big( \\neg enabled(x,a,s) \\wedge t=readyToStartTask(a,x) \\big)\n\\end{array}\n\\end{equation}\nThis says that $enabled(x,a,s)$ holds in the current situation if\nand only if it held in the previous one and no action\n$finishedTask(a,x,q)$ has been performed or it was false in the\nprevious situation and $readyToStartTask(a,x)$ has been executed.\nThis fluent aims at enforcing the constraints that the PMS can\nexecute $Start(a,x,p)$ only after $a$ performed $begun(a,x)$ and it\ncan execute $AckTaskCompletion(a,x,q)$ only after\n$finishedTask(a,x,q)$. This can represented by two pre-conditions on\nactions $Start(\\cdot)$ and $AckTaskCompletion(\\cdot)$:\n\\begin{equation}\\label{eq:possStartStop}\n\\begin{array}{l}\n\\forall p.Poss(Start(a,x,p),s) \\Leftrightarrow enabled(x,a,s) \\\\\n\\forall p.Poss(AckTaskCompletion(x,a),s) \\Leftrightarrow \\neg\nenabled(x,a,s)\n\\end{array}\n\\end{equation}\nprovided that $AckTaskCompletion(x,a)$ never comes before\n$Start(x,a,p),s$.\n\nFurthermore, we introduce a domain-independent fluent\n$started(x,a,p,s)$ that holds if and only if an action\n$Start(a,x,p)$ has been executed but the dual\n$AckTaskCompletion(x,a)$ has not yet:\n\\begin{equation}\\label{eq:startedAxiom}\n\\begin{array}{l}\nstarted(a,x,p,do(t,s)) \\Leftrightarrow \\\\\\qquad\\big(\nstarted(a,x,p,s) \\wedge t \\neq Stop(a,x) \\big)\\vee\n\\\\\n\\qquad\\big( \\nexists p'.started(x,a,p',s) \\wedge t=Start(a,x,p)\n\\big)\n\\end{array}\n\\end{equation}\n\nIn addition, we make use, in every specific domain, of a predicate\n$available(a,s)$ which denotes whether a service $a$ is available in\nsituation $s$ for tasks assignment. However, $available$ is\ndomain-dependent and, hence, requires to be defined specifically for\nevery domain. Knowing whether a service is available is very\nimportant for the PMS when it has to perform assignments. Indeed, a\ntask $x$ is assigned to the best service $a$ which is available and\nprovides every capability required by $x$. The fact that a certain\nservice $a$ is free does not imply it can be assigned to tasks\n(e.g., in the example described above it has to be free as well as\nit has to be indirectly connected to the coordinator). The\ndefinition of $available(\\cdot)$ must enforce the following\ncondition:\n\\begin{equation}\\label{eq:availableAxiom}\n\\forall a~s. available(a,s) \\Rightarrow free(a,s)\n\\end{equation}\n\nWe do not give explicitly pre-conditions to task. We assume tasks\ncan always be executed. We assume that, given a task, if some\nconditions do not hold, then the outcomes of that tasks are not as\nexpected (in other terms, it fails).\n\n\n\n\\section{General Framework}\\label{sec:GeneralFramework}\n\nThe general framework which we shall introduce in this paper is\nbased on the \\textit{execution monitoring} scheme as described in\n\\cite{GiacomoRS98} for situation calculus agents. As we will later\ndescribe in more details, when using \\indigolog\\ for process\nmanagement, we take tasks to be predefined sequences of actions (see\nlater) and processes to be \\indigolog\\ programs.\nAfter each action, the PMS may need to align the internal world\nrepresentation (i.e., the virtual reality) with the external one\n(i.e., the physical reality).\n\n\\begin{figure*}[]\n\\centering{\n \\includegraphics[width=0.6\\columnwidth]{Figures\/Figure1.eps}\n} \\caption{Execution Monitoring.} \\label{fig:monitoring}\n\\end{figure*}\n\nBefore a process starts, PMS takes the initial context from the real\nenvironment and builds the corresponding initial situation $S_0$, by\nmeans of first-order logic formulas. It also builds the program\n$\\delta_{0}$ corresponding to the process to be carried on.\nThen, at each execution step, PMS, which has a complete knowledge of\nthe internal world (i.e., its virtual reality), assigns a task to a\nservice. The only ``assignable'' tasks are those whose preconditions\nare fulfilled. A service can collect data required needed to execute\nthe task assigned from PMS. When a service finishes executing a\ntask, it alerts PMS of that.\n\nThe execution of the PMS can be interrupted by the \\textit{monitor}\nmodule when a misalignment between the virtual and the physical\nrealities is discovered. In that case, the monitor \\textit{adapts}\nthe (current) program to deal with such discrepancy.\n\nIn Figure~\\ref{fig:monitoring}, the overall framework is depicted.\nAt each step, the PMS advances the process $\\delta$ in situation $s$\nby executing an action, resulting then in a new situation $s'$ with\nthe process $\\delta'$ remaining to be executed. Both $s'$ and\n$\\delta'$ are given as input to the monitor, which also collects\ndata from the environment through \\emph{sensors}.\\footnote{Here, we\nrefer as \\emph{sensors} not only proper sensors (e.g., the ones\ndeployed in sensor networks), but also any software or hardware\ncomponent enabling to retrieve contextual information. For instance,\nit may range from GIS clients to specific hardware that makes\navailable the communication distance of a device to its\nneighbors.~\\cite{dL_M_R@WETICE07}}\nIf a discrepancy between the virtual reality as represented by $s'$\nand the physical reality is sensed, then the monitor changes $s'$ to\n$s''$, by generating a sequence of actions that explains the changes\nperceived in the environment, thus re-aligning the virtual and\nphysical realities.\nNotice, however, that the process $\\delta'$ may \\textit{fail} to\nexecute successfully (i.e., assign all tasks as required) in the new\n(unexpected) situation $s''$. If so, the monitor adapts also the\n(current) process by performing suitable recovery changes and\ngenerating then a new process $\\delta''$. At this point, the PMS is\nresumed and the execution continues with program-process $\\delta''$\nin situation $s''$.\n\n\\section{Introduction}\n\nNowadays organisations are always trying to improve the performance\nof the processes they are part of. It does not matter whether such\norganisations are dealing with classical static business domains,\nsuch as loans, bank accounts or insurances, or with pervasive and\nhighly dynamic scenarios. The demands are always the same: seeking\nmore efficiency for their processes to reduce the time and the cost\nfor their execution.\n\nAccording to the definition given by the Workflow Management\nCoalition,\\footnote{\\url{http:\/\/wfmc.org}} a workflow is ``the\ncomputerised facilitation of automation of a business process, in\nwhole or part''. The Workflow Management Coalition defines a\nWorkflow Management System as ``a system that completely defines,\nmanages and executes workflows through the execution of software\nwhose order of execution is driven by a computer representation of\nthe workflow logic''. Workflow Management Systems (WfMSs) are also\nknown as Process Management Systems (PMSs), and we are going to use\nboth of them interchangeably throughout this thesis. Accordingly,\nthis thesis uses many times word ``process'' is place of word\n``workflow'', although the original acceptation of the former is not\nintrinsically referring to its computerised automation.\n\n\nIn this paper\nwe turn our\nattention to highly dynamic and pervasive scenarios. Pervasive\nscenarios comprise, for instance, emergency management, health care\nor home automation (a.k.a. domotics). All of these scenarios are\ncharacterised as being very dynamic and turbulent and subject to an\nhigher frequency of unexpected contingencies with respect to\nclassical scenarios. Therefore, PMSs for pervasive scenarios should\nprovide a higher degree of operational flexibility\/adaptability.\n\nAccording to Andresen and Gronau~\\cite{A_G@InfoRes} adaptability can\nbe seen as an ability to change something to fit to occurring\nchanges. Adaptability is to be understood here as the ability of a\nPMS to adapt\/modify processes efficiently and fast to change\ncircumstances. Adaptation aims at reducing the gap of the\n\\emph{virtual reality}, the (idealized) model of reality that is\nused by the PMS to deliberate, from the \\emph{physical reality}, the\nreal world with the actual values of conditions and\noutcomes~\\cite{GiacomoRS98}. Exogenous events may make deviate the\nvirtual reality from the physical reality. The reduction of this gap\nrequires sufficient knowledge of both kinds of realities (virtual\nand physical). Such knowledge, harvested by the services performing\nthe process tasks, would allow the PMS to sense deviations and to\ndeal with their mitigation.\n\nIn pervasive settings, efficiency and effectiveness when carrying on\nprocesses are a strong requirement. For instance, in emergency\nmanagement saving minutes could result in saving injured people,\npreventing buildings from collapses, and so on. Or, pervasive\nhealth-care processes can cause people's permanent diseases when not\nexecuted by given deadlines. In order to improve effectiveness of\nprocess execution, adaptation ought to be as automatic as possible\nand to require minimum manual human intervention. Indeed, human\nintervention would cause delays, which might not be acceptable.\n\nIn theory there are three possibilities to deal with deviations:\n\\begin{enumerate}\n \\item Ignoring deviations -- this is, of course, not feasible in general,\n since the new situation might be such that the PMS is no more able to\n carry out the process instance.\n \\item Anticipating all possible discrepancies -- the idea is to\n include in the process schema the actions to cope with each of\n such failures. This can be seen as a\n \\texttt{try-catch} approach, used in some programming languages such as\n Java.\n The process is defined as if exogenous actions cannot occur, that\n is everything runs fine (the \\texttt{try} block). Then, for each\n possible exogenous event, a \\texttt{catch} block is designed in\n which the method is given to handle the corresponding exogenous\n event.\n For simple and mainly static\n processes, this is feasible and valuable; but, especially in mobile\n and highly dynamic scenarios, it is quite impossible to take\n into account all exception cases.\n \\item Devising a general recovery method able to handle any\n kind of exogenous events -- considering again the metaphor of\n try\/catch, there exists just one \\texttt{catch} block, able to\n handle any exogenous events, included the unexpected.\n The \\texttt{catch} block activates the general recovery method to modify\n the old process $P$ in a process $P'$ so that $P'$\n can terminate in the new environment\n and its goals are included in those of $P$.\n This approach relies on the execution monitor (i.e., the module intended for execution\n monitoring) that detects discrepancies leading the process instance not\n to be terminable. When they are sensed,\n the control flow moves to the \\texttt{catch} block.\n An important challenge here is to build the monitor which\n is able to identify which exogenous events are relevant, i.e.~that make impossible process to terminate, as well as to \\emph{automatically}\n synthesize $P'$ during the execution\n itself.\n\\end{enumerate}\n\n\\begin{table}\n \\centering{\n \\caption{Adaptability in the leading PMSs (as from~\\cite{deLeoniPhD}).}\n \\begin{small}\n\\begin{tabular}{|p{0.15\\columnwidth}|c|c|c|}\n\\hline\n\\textbf{Product} & \\textbf{Manual} & \\textbf{Pre-planned} & \\textbf{Unplanned} \\\\\n\\hline YAWL & & \\checkmark & \\\\\n\\hline COSA & \\checkmark & \\checkmark & \\\\\n\\hline Tibco & \\checkmark & \\checkmark & \\\\\n\\hline WebSphere & \\checkmark & \\checkmark & \\\\\n\\hline SAP & \\checkmark & \\checkmark & \\\\\n\\hline OPERA & \\checkmark & \\checkmark & \\\\\n\\hline ADEPT2 & \\checkmark & & \\\\\n\\hline ADOME & \\checkmark & & \\\\\n\\hline AgentWork & \\checkmark & & \\\\\n\\hline\n\\end{tabular}\n \\end{small}}\n \\label{tab:adaptPMS}\n \\end{table}\n\nTable~\\ref{tab:adaptPMS} shows the adaptability features of the most\nvaluable PMSs according to the state-of-art analysis described\nin~\\cite{deLeoniPhD}. Column \\textbf{Manual} refers to the\npossibility of a responsible person who manually changes the process\nschema to deal with exogenous events. Column \\textbf{Pre-planned}\nconcerns the feature of defining policies to specify the adaptation\nbehaviour to manage some exogenous events, whose possible occurrence\nis foreseeable a priori. The last column \\textbf{Unplanned} refers\nto the third approach in the classification above.\n\nThe third approach seems to be the most appropriate when dealing\nwith scenarios where \\myi the frequency of unexpected exogenous\nevents are relatively high and \\myii there are several exogenous\nevents that cannot be foreseen before their actual occurrence.\nUnfortunately, as the table shows, the world leading PMSs are unable\nto feature the third approach.\n\nThis paper describes \\mobidis, a PMS that features some sound and\ncomplete techniques according to the third approach described above.\nSuch techniques are meant to improve the degree of \\emph{automatic}\nadaptation to react to very frequent changes in the execution\nenvironment and fit processes accordingly. The techniques proposed\nhere are based on Situation Calculus~\\cite{ReiterBook} and automatic\nplanning, conceived to coordinate robots and intelligent agents. The\nconcrete implementation, namely \\mobidis, is based on the\n\\indigolog\\ interpreter developed at University of Toronto and RMIT\nUniversity, Melbourne.\n\nIn \\mobidis, every entity performing task is generally named\n``service''. A service may be a human actor\/process participant as\nwell as an automatic service that execute a certain job (e.g., a\nSOAP-based Web Service).\n\nLet us consider a scenario for emergency management where processes\nshow typical a complexity that is comparable to business settings.\nTherefore, the usage of PMS is valuable to coordinate the activities\nof emergency operators. In these scenarios, operators are typically\nequipped with low-profile devices, such as PDAs, which several\nservices are installed on. Such services may range from usual\nGUI-based applications to automatic ones. For instances, some\napplications can be installed to fill questionnaires or take\npictures. In addition, PDAs can be provided with some automatic\nservices that connect to the Civil Protection headquarters to\nretrieve information for the assessment of the affected area and\npossibly send back the data collected.\n\nPDAs communicate with each other by Mobile Ad-hoc Networks\n(\\textsc{manet}s\\xspace), which are Wi-Fi networks that do not rely on a fixed\ninfrastructure, such as Access Points. Devices can be the final\nrecipients of some packets sent by other devices as well as they can\nact as relays and forward packets towards the final destination.\n\nIn order to orchestrate the services installed on operator devices,\nsuch devices need to be continually connected to the PMS through a\nloose connection: devices and the PMS can communicate if there\nexists a path of nodes that connects them in the graph of the\ncommunication links.\n\nIn the virtual reality, devices are supposed to be continuously\nconnected (i.e., a path always exists between pairs of nodes). But\nin this physical reality continuous connections cannot be\nguaranteed: the environment is highly dynamic and the movement of\nnodes (that is, devices and related operators) within the affected\narea, while carrying out assigned tasks, can cause disconnections\nand make deviate the two reality. Disconnections results in the\nunavailability of nodes and, hence, the services provided. From the\ncollection of actual user requirements~\\cite{H_C_dL_M_M_B_S@HCI09},\nit results that typical teams are formed by a few nodes (less than\n10 units), and therefore frequently a simple task reassignment is\nnot feasible. Indeed, there may not be two ``similar'' services\navailable to perform a given task. Reordering task executions would\nnot solve the problem, either. There is no guarantee that eventually\nthose services that provide unique capability connect again to the\nPMS.\n\nSo, adaptaption is needed: adaptability might consist in this case\nto recover the disconnection of a node X, and that can be achieved\nby assigning a task ``Follow X'' to another node Y in order to\nmaintain the connection. When the connection has been restored, the\nprocess can progress again.\n\n\n\\section*{Acknowledgments} The author wishes to thank to Giuseppe De Giacomo, Andrea\nMarrella, Massimo Mecella and Sebastian Sardina, who have\ncontributed to different aspects of the \\mobidis development.\n\\bibliographystyle{eptcs}\n\n\\section{Adaptation in \\mobidis}\n\n\\subsection{Monitoring Formalisation} \\label{sec:BPMAdaptiveness}\n\nNext we formalize how the monitor works. Intuitively, the monitor\ntakes the current program $\\delta'$ and the current situation $s'$\nfrom the PMS's virtual reality and, analyzing the physical reality\nby sensors, introduces fake actions in order to get a new situation\n$s''$ which aligns the virtual reality of the PMS with sensed\ninformation. Then, it analyzes whether $\\delta'$ can still be\nexecuted in $s''$, and if not, it adapts $\\delta'$ by generating a\nnew correctly executable program $\\delta''$. Specifically, the\nmonitor work can be abstractly defined as follows (we do not model\nhow the situation $s''$ is generated from the sensed information):\n\n\\begin{equation}\n\\begin{array}{l}\nMonitor(\\delta',s',s'',\\delta'') \\Leftrightarrow\n\\big(Relevant(\\delta', s', s'') \\wedge Recovery(\\delta', s', s'',\n\\delta'') \\big) \\vee {}\\\\ \\qquad \\big(\\neg Relevant( \\delta', s',\ns'') \\wedge \\delta'' = \\delta' \\big)\n\\end{array}\n\\label{equ:monitor}\n\\end{equation}\n\n\\noindent where: \\myi $Relevant(\\delta',s',s'')$ states whether the\nchange from the situation $s'$ into $s''$ is such that $\\delta'$\ncannot be correctly executed anymore; and \\myii $Recovery(\\delta',\ns', s'', \\delta'')$ is intended to hold whenever the program\n$\\delta'$, to be originally executed in situation $s'$, is\nadapted to $\\delta''$ in order to be executed in situation\n$s''$.\n\nFormally $Relevant$ is defined as follows:\n\n\\begin{displaymath}\nRelevant(\\delta',s',s'') \\Leftrightarrow \\neg\nSameConfig(\\delta',s',\\delta',s'')\n\\end{displaymath}\n\n\\noindent where $SameConfig(\\delta',s',\\delta'',s'')$ is true if\nexecuting $\\delta'$ in $s'$ is ``equivalent'' to executing\n$\\delta''$ in $s''$ (see later for further details).\n\nIn this general framework we do not give a definition for\n$SameConfig(\\delta',s',\\delta'',s'')$. However we consider any\ndefinition for $SameConfig$ to be correct if it denotes a\nbisimulation \\cite{MilnerBook}. Formally, for every $\\delta',\ns',\\delta'',s''$ holds:\n\n\\begin{enumerate}\n \\item $Final(\\delta',s') \\Leftrightarrow Final(\\delta'',s')$\n \\item\n $\\forall~a,\\delta'.Trans\\big(\\delta',s',\\overline{\\delta'},do(a,s')\\big) \\Rightarrow$\n \\\\$\\exists~\\overline{\\delta''}.Trans\\big(\\delta'',s'',\\overline{\\delta'},do(a,s'')\\big)\n \\wedge SameConfig\\big(\\overline{\\delta'},do(a,s),\\overline{\\delta''},do(a,s'')\\big)$\n \\item\n $\\forall~a,\\delta'.Trans\\big(\\delta'',s'',\\overline{\\delta'},do(a,s'')\\big) \\Rightarrow$\n \\\\$\\exists~\\overline{\\delta''}.Trans\\big(\\delta',s',\\overline{\\delta'},do(a,s')\\big)\n \\wedge SameConfig\\big(\\overline{\\delta''},do(a,s''),\\overline{\\delta'},do(a,s')\\big)$\n\\end{enumerate}\n\nIntuitively, a predicate $SameConfig(\\delta',s',\\delta'',s'')$ is\nsaid to be correct if $\\delta'$ and $\\delta''$ are terminable either\nboth or none of them. Furthermore, for each action $a$ performable\nby $\\delta'$ in the situation $s'$, $\\delta''$ in the situation\n$s''$ has to enable the performance of the same actions (and\nviceversa). Moreover, the resulting configurations\n$(\\overline{\\delta'},do(a,s'))$ and $(\\overline{\\delta''},do(a,s'))$\nmust still satisfy $SameConfig$.\n\nThe use of the bisimulation criteria to state when a predicate\n$SameConfig(\\cdots)$ is correct, derives from the notion of\nequivalence introduced in \\cite{HiddersDAHV05}. When comparing the\nexecution of two formally different business processes, the internal\nstates of the processes may be ignored, because what really matters\nis the process behavior that can be observed. This view reflects the\nway a PMS works: indeed what is of interest is the set of tasks that\nthe PMS offers to its environment, in response to the inputs that\nthe environment provides.\n\nNext we turn our attention to the procedure to adapt the process\nformalized by $Recovery(\\delta,s,s',\\delta')$. Formally is defined\nas follows:\n\n\n\\begin{equation}\n\\begin{array}{l}\nRecovery(\\delta',s',s'',\\delta'') \\Leftrightarrow {\n\\exists \\delta_a,\\delta_b.\\delta''=\\delta_a;\\delta_b \\wedge\nDeterministic(\\delta_a) \\wedge \\\\\\quad Do(\\delta_a,s'',s_b) \\wedge\nSameConfig(\\delta',s',\\delta_b,s_b)\n\\end{array}\n\\label{equ:recovery}\n\\end{equation}\n\n$Recovery$ determines a process $\\delta''$ consisting of a\n\\emph{deterministic} $\\delta_a$ (i.e., a program not using the\nconcurrency construct), and an arbitrary program $\\delta_b$. The aim\nof $\\delta_a$ is to lead from the situation $s''$ in which\nadaptation is needed to a new situation $s_b$ where\n$SameConfig(\\delta',s',\\delta_b,s_b)$ is true.\n\nThe nice feature of \\textsc{Recovery} is that it asks to search for\na linear program that achieves a certain formula, namely\n$SameState(s',s'')$. That is we have reduced the synthesis of a\nrecovery program to a classical Planning problem in AI\n\\cite{TraversoBook}. As a result we can adopt a well-developed\nliterature about planning for our aim. In particular, if the\nservices and input and output parameters are finite, then the\nrecovery can be reduced to \\emph{propositional} planning, which is\nknown to be decidable in general (for which very well performing\nsoftware tools exists).\n\nNotice that during the actual recovery phase $\\delta_a$ we disallow\nfor concurrency because we need full control on the execution of\neach service in order to get to a recovered state. Then the actual\nrecovered program $\\delta_b$ can again allow for concurrency.\n\nIn the previous sections we have provided a general description on\nhow adaptation can be defined and performed. Here we choose a\nspecific technique that is actually feasible in practice. Our main\nstep is to adopt a specific definition for $SameConfig$, here\ndenoted as \\textsc{SameConfig}, namely:\n\\begin{equation}\n\\textsc{SameConfig}(\\delta',s',\\delta'',s'') \\Leftrightarrow\nSameState(s',s'') \\wedge \\delta'=\\delta''\n\\label{equ:SameConfigConcrete}\n\\end{equation}\n\n\\indent In other words, \\textsc{SameConfig} states that $\\delta'$,\n$s'$ and $\\delta''$, $s''$ are the same configuration if \\myi all\nfluents have the same truth values in both $s'$ and $s''$\n($SameState$), and \\myii $\\delta''$ is actually\n$\\delta'$.\\footnote{Observe that $SameState$ can actually be defined\nas a first-order formula over the fluents, as the conjunction of\n$F(s') \\Leftrightarrow F(s'')$ for each fluent $F$.} In\npapers~\\cite{deLeoniPhD,DBLP:conf\/bpm\/LeoniMG07}, we have proved\nthat the above-defined \\textsc{SameConfig} is a correct\nbisimulation.\n\nUsing Equation~\\ref{equ:SameConfigConcrete} as $SameConfig$\ndefinition feasible in practice, relevancy results to be:\n\\begin{equation}\n\\begin{array}{l}\n\\textsc{Relevant}(\\delta',s',s'') \\Leftrightarrow \\neg\nSameState(s',s'')\n\\end{array}\n\\label{equ:RelevantConcrete}\n\\end{equation}\nIn the next section, we are going to show how the abstract planner\nspecification given here has been concretely used inside \\mobidis.\nSpecifically the current version of \\mobidis uses the proportional\nplanner available in the \\indigolog platform developed by University\nof Toronto and RMIT in Melbourne. In order to adapt, \\mobidis is\nbased on the concrete definitions of relevancy and $SameConfig$\ngiven by Equations~\\ref{equ:RelevantConcrete}\nand~\\ref{equ:SameConfigConcrete}.\n\n\\subsection{The Execution Monitoring and Adaptation}\n\\label{sec:mobidisMonitoring}\n\n\\begin{figure}[t!] \\centering\n\\begin{minipage}{0.7\\textwidth}\n\\begin{Verbatim}[fontsize=\\scriptsize]\nproc(monitor,[ndet(\n [?(neg(relevant))],\n [?(relevant),recovery]\n )]).\n\nproc(recovery, searchn([searchProgram],10).\n\nproc(searchProgram, [star(pi([Task,Id,Input,srvc],\n [?(and(Available(srvc),\n Capable(srvc,[workitem(Task,Id,Input)]))),\n manageExecution([workitem(Task,Id,Input)],srvc)])),\n ?(SameState)]).\n\\end{Verbatim}\n\\end{minipage}\n\\caption{The procedure for managing automatic adaptation with the\n\\indigolog interpreter.} \\label{fig:adaptIndiGolog}\n\\end{figure}\n\nAs already told, adaptation amounts to find a linear program (i.e.,\nwithout concurrency) that is meant to be ``appended'' before the\ncurrent \\indigolog program remaining to be executed. Such a linear\nprogram is meant to resolve the gap that was just sensed by\nrestoring the values of affected fluents to those before the\noccurrence of the deviation.\n\nFigure~\\ref{fig:adaptIndiGolog} shows how adaptability has been\nconcretely implemented in \\mobidis. The execution of the process\nbeing carried out by \\mobidis can be interrupted by the\n\\texttt{monitor} procedure when a misalignment between the virtual\nand the physical reality is discovered.\n\nThe \\texttt{monitor} procedure is the concrete coding of\nEquation~\\ref{equ:monitor} and relies on procedure\n\\texttt{relevant}. Procedure \\texttt{relevant} returns true if the\nexogenous event has created a gap between the physical and virtual\nreality that is in accord with Equation~\\ref{equ:RelevantConcrete}.\nFor this aim, \\mobidis keeps a ``copy'' of the expected value of\neach defined fluent so that when an exogenous action is sensed it\ncan check whether the action has altered the value of some fluent.\n\nIf the gap is relevant, procedure \\texttt{recovery} is invoked. It\namounts to find a linear program (i.e., without concurrency) to\nreduce the gap sensed as well as, if such a program is found, to\nexecute it. After executing such a linear program, the program coded\nby routine \\texttt{process} (and its possible sub-routines) can\nprogress again. This behaviour is equivalent to that expressed\nformally in Equation~\\ref{equ:recovery} where the adapting linear\nprogram is ``appended before'' and, hence, executed before the\nremaining process.\n\nThe \\texttt{recovery} procedure looks for a sequence of actions that\nbrings to a situation in which procedure \\texttt{SameState} returns\ntrue: $\\Sigma \\big((\\pi a.a)^*; SameState?\\big)$. Procedure\n\\texttt{SameState} tests whether executing $(\\pi a.a)^*$ really has\nreally reduced the gap. The use of the \\indigolog's lookahead\noperator $\\Sigma$ guarantees the action sequence $(\\pi a.a)^*$ is\nchosen so as to make \\texttt{SameState} true. In fact, we do not\nlook for any action sequence $(\\pi a.a)^*$ but we reduce the search\nspace since we search for sequences of invocations of procedure\n\\texttt{manageExecution} with appropriate parameters.\n","meta":{"redpajama_set_name":"RedPajamaArXiv"}}