diff --git "a/data_all_eng_slimpj/shuffled/split2/finalzzllre" "b/data_all_eng_slimpj/shuffled/split2/finalzzllre" new file mode 100644--- /dev/null +++ "b/data_all_eng_slimpj/shuffled/split2/finalzzllre" @@ -0,0 +1,5 @@ +{"text":"\\section{Special values of the Lommel functions $s_{\\protect\\mu ,\\protect\\nu\n}(z)$ and $S_{\\protect\\mu ,\\protect\\nu }(z)$}\n\n\\ \\ \\ \\ The Lommel functions $s_{\\mu ,\\nu }(z)$ and $S_{\\mu ,\\nu }(z)$ with\nunrestricted $\\mu ,\\nu $ satisfy the relations \\cite{Magnus1}\n\n\\begin{quote}\n\\begin{equation}\n\\frac{2\\nu }{z}S_{\\mu ,\\nu }(z)=(\\mu +\\nu -1)S_{\\mu -1,\\nu -1}(z)-(\\mu -\\nu\n-1)S_{\\mu -1,\\nu +1}(z), \\label{eq1}\n\\end{equation}\n\n\\begin{equation}\n\\lbrack (\\mu +1)^{2}-\\nu ^{2}]S_{\\mu ,\\nu }(z)+S_{\\mu +2,\\nu }(z)=z^{\\mu +1},\n\\label{eq2}\n\\end{equation\n\\begin{equation}\n\\frac{dS_{\\mu ,\\nu }(z)}{dz}+\\frac{\\nu }{z}S_{\\mu ,\\nu }(z)=(\\mu +\\nu\n-1)S_{\\mu -1,\\nu -1}(z), \\label{eq3}\n\\end{equation\nand the symmetry propert\n\\begin{equation*}\nS_{\\mu ,-\\nu }(z)=S_{\\mu ,\\nu }(z).\n\\end{equation*}\n\nIn the case where $\\mu $ is an integer $k$ and $\\nu =1\/2$, the recurrence\nrelation (2) viewed as a second-order difference equation is \n\\begin{equation}\n(2k+1)(2k+3)S_{k,1\/2}(z)+4S_{k+2,1\/2}(z)=4z^{\\,k+1}, \\label{eq4}\n\\end{equation\nand can be reduced to first-order equations in the cases of even or odd\nvalues of $k.$ \\ \n\n\\bigskip\n\\end{quote}\n\n\\subsection{Case $k=2m$}\n\n\\begin{quote}\nWith $k=2m$ equation (4) can be written \n\\begin{equation}\n(4m+1)(4m+3)\\,f_{m}(z)+4\\,f_{m+1}(z)=4z^{\\,2m+1}, \\label{eq5}\n\\end{equation}\n\nwhere $f_{m}(z)=S_{2m,1\/2}(z)$ or $s_{2m,1\/2}(z).$ It has the solutio\n\\begin{equation*}\nf_{m}(z)=(-1)^{m}{\\Gamma (2}m+1\/2{)}\\left[ \\frac{f_{0}(z)}{\\sqrt{\\pi }\n-z\\sum_{j=0}^{m-1}\\frac{(-z^{2})^{\\,j}}{\\Gamma (2j+5\/2)}\\right] ,\n\\end{equation*\n\\ the initial values of $\\ f_{0}(z)$ being either $S_{0,1\/2}(z)$ or \ns_{0,1\/2}(z).$ \\ Using those relations the solutions to (5) become\n\n\\begin{subequations}\n\\label{0}\n\\begin{align}\nS_{2m,1\/2}(z)& =(-1)^{m}\\Gamma (2m+1\/2)[\\tfrac{S_{0,1\/2}(z)}{\\sqrt{\\pi }\n-z\\sum_{j=0}^{m-1}\\tfrac{(-z^{2})^{\\,j}}{\\Gamma (2j+5\/2)}], \\label{eq6a} \\\\\ns_{2m,1\/2}(z)& =(-1)^{m}\\Gamma (2m+1\/2)[\\tfrac{s_{0,1\/2}(z)}{\\sqrt{\\pi }\n-z\\sum_{j=0}^{m-1}\\tfrac{(-z^{2})^{\\,j}}{\\Gamma (2j+5\/2)}], \\label{eq6b}\n\\end{align\nwhere $S_{0,1\/2}(z),$ and $s_{0,1\/2}(z)$ have been given by Magnus, \\textit\net al} \\cite{Magnus2} as \n\\end{subequations}\n\\begin{eqnarray*}\n\\frac{1}{\\sqrt{\\pi }}S_{0,1\/2}(z) &=&\\sqrt{\\frac{2}{z}}\\left\\{ \\cos (z)\n\\frac{1}{2}-S(\\chi )]-\\sin (z)[\\frac{1}{2}-C(\\chi )]\\right\\} , \\\\\n\\frac{1}{\\sqrt{\\pi }}s_{0,1\/2}(z) &=&\\sqrt{\\frac{2}{z}}\\left\\{ \\sin\n(z)C(\\chi )-\\cos (z)S(\\chi )\\right\\} ,\n\\end{eqnarray*\nwit\n\\begin{equation*}\n\\chi =\\sqrt{\\frac{2z}{\\pi }},\n\\end{equation*}\n\nand where $S$ and $C$ are the Fresnel sine and cosine integrals \\cite{NBS\n\\begin{eqnarray*}\nS(z) &=&\\int_{0}^{z}\\sin (\\frac{1}{2}\\pi t^{2})\\,dt, \\\\\nC(z) &=&\\int_{0}^{z}\\cos (\\frac{1}{2}\\pi t^{2})\\,dt.\n\\end{eqnarray*}\n\\end{quote}\n\n\\subsection{Case $k=2m+1$}\n\n\\begin{quote}\n\\ \\ \\ \\ In the case where $k=2m+1,$ the difference equation (2) become\n\\begin{equation}\n(4m+3)(4m+5)\\,f_{m}(z)+4\\,f_{m+1}(z)=4z^{\\,2m+2}, \\label{eq7}\n\\end{equation}\n\nwhere $f_{m}(z)=S_{2m+1,1\/2}(z)$ or $s_{2m+1,1\/2}(z).$ Here the solution is \n\\begin{equation*}\nf_{m}(z)=(-1)^{m}\\Gamma (2m+3\/2)\\left[ \\frac{2\\,f_{0}(z)}{\\sqrt{\\pi }\n-z^{2}\\sum_{j=0}^{m-1}\\frac{(-z^{2})^{\\,j}}{\\Gamma (2j+7\/2)}\\right] .\n\\end{equation*\nIn the special case $\\mu =-1$ and $\\nu =1\/2$ in (2) the functions\\thinspace\\ \n$f_{0}(z)\n\\begin{equation*}\nf_{0}(z)=S_{1,1\/2}(z)=1+\\frac{1}{4}S_{-1,1\/2}(z),\n\\end{equation*\no\n\\begin{equation*}\nf_{0}(z)=s_{1,1\/2}(z)=1+\\frac{1}{4}s_{-1,1\/2}(z).\n\\end{equation*\nWe hav\n\\begin{eqnarray*}\nS_{1,1\/2}(z) &=&1+\\sqrt{\\frac{\\pi }{2z}}\\{\\cos (z)[\\frac{1}{2}-C(\\chi\n)]+\\sin (z)[\\frac{1}{2}-S(\\chi )]\\}, \\\\\ns_{1,1\/2}(z) &=&1-\\sqrt{\\frac{\\pi }{2z}}\\{\\sin (z)S(\\chi )+\\cos (z)C(\\chi\n)\\},\n\\end{eqnarray*\nwhere values of $S_{-1,1\/2}(z),$ $s_{-1,1\/2}(z)$ have been given by Magnus, \n\\textit{et al }as\n\n\\begin{eqnarray*}\n\\frac{\\,1}{2\\sqrt{\\pi }}S_{-1,1\/2}(z) &=&\\sqrt{\\frac{2}{z}}\\left\\{ \\cos (z)\n\\frac{1}{2}-C(\\chi )]+\\sin (z)[\\frac{1}{2}-S(\\chi )]\\right\\} , \\\\\n\\frac{\\,1}{2\\sqrt{\\pi }}s_{-1,1\/2}(z) &=&-\\sqrt{\\frac{2}{z}}\\left\\{ \\sin\n(z)S(\\chi )+\\cos (z)C(\\chi )\\right\\} .\n\\end{eqnarray*}\n\\end{quote}\n\nUsing those relations, the solutions to (7) become after resumming \n\\begin{subequations}\n\\begin{eqnarray}\nS_{2m+1,1\/2}(z) &=&(-1)^{m}\\Gamma (2m+3\/2)[\\frac{S_{-1,1\/2}(z)}{2\\sqrt{\\pi }\n+\\sum_{j=0}^{m}\\tfrac{(-z^{2})^{\\,j}}{\\Gamma (2j+3\/2)}], \\label{eq8a} \\\\\ns_{2m+1,1\/2}(z) &=&(-1)^{m}\\Gamma (2m+3\/2)[\\frac{s_{-1,1\/2}(z)}{2\\sqrt{\\pi }\n+\\sum_{j=0}^{m}\\tfrac{(-z^{2})^{\\,j}}{\\Gamma (2j+3\/2)}]. \\label{eq8b}\n\\end{eqnarray}\n\n\\subsection{Integrals with values containing the Fresnel functions}\n\nThe integrals \n\\end{subequations}\n\\begin{eqnarray*}\n&&\\int_{0}^{1}z^{2k}\\cos (\\lambda z^{2})dz, \\\\\n&&\\int_{0}^{1}z^{2k}\\sin (\\lambda z^{2})dz,\n\\end{eqnarray*\nwhich contain even powers of the variable, can be expressed in terms of the\nLommel functions. \\ \n\nIn the first instance, Maple gives \n\\begin{eqnarray}\n\\int_{0}^{1}z^{2k}\\cos (\\lambda z^{2})dz &=&[1-\\frac{s_{k+1,1\/2(\\lambda )}}\n\\lambda ^{k}}]\\frac{\\cos (\\lambda )}{(2k+1)} \\label{eq9} \\\\\n&&+[(2k-1)s_{k,3\/2}+(2\/\\lambda )s_{k+1,1\/2}]\\frac{\\sin (\\lambda )}{2\\lambda\n^{k}(2k+1)}. \\notag\n\\end{eqnarray\nUsing (1) and (2) we get the simplified for\n\\begin{equation}\n\\int_{0}^{1}z^{2k}\\cos (\\lambda z^{2})dz=\\frac{1}{4\\lambda ^{k}}[(2k-1)\\cos\n(\\lambda )\\,s_{k-1,1\/2}(\\lambda )+2\\sin (\\lambda )\\,s_{k,1\/2}(\\lambda )].\n\\label{eq10}\n\\end{equation\nThe values of the integral in the case where $k=2m$ can be obtained from the\nLommel expressions above in (6b) and (8b) with $m$ replaced with $m-1$. \\ We\nge\n\\begin{eqnarray*}\n\\int_{0}^{1}z^{4m}\\cos (\\lambda z^{2})dz &=&\\frac{(-1)^{m}\\Gamma (2m+1\/2)}\n2\\lambda ^{2m}}\\{\\sqrt{\\frac{2}{\\lambda }}C(\\chi ) \\\\\n&&-\\cos (\\lambda )\\sum_{j=0}^{m-1}\\frac{(-\\lambda ^{2})^{\\,j}}{\\Gamma\n(2j+3\/2)} \\\\\n&&-\\lambda \\sin (\\lambda )\\sum_{j=0}^{m-1}\\frac{(-\\lambda ^{2})^{\\,j}}\n\\Gamma (2j+5\/2)}\\}.\n\\end{eqnarray*\nIn the case $k=2m+1$ we hav\n\\begin{eqnarray*}\n\\int_{0}^{1}z^{4m+2}\\cos (\\lambda z^{2})dz &=&\\frac{(-1)^{m+1}\\Gamma (2m+3\/2\n}{2\\lambda ^{2m+1}}\\{\\sqrt{\\frac{2}{\\lambda }}S(\\chi ) \\\\\n&&+\\lambda \\cos (\\lambda )\\sum_{j=0}^{m-1}\\frac{(-\\lambda ^{2})^{\\,j}}\n\\Gamma (2j+5\/2)} \\\\\n&&-\\sin (\\lambda )\\sum_{j=0}^{m}\\frac{(-\\lambda ^{2})^{\\,j}}{\\Gamma (2j+3\/2)\n\\}.\n\\end{eqnarray*\nWith these results we see that all cases of cosine integrals with even\npowers of the variable have been obtained in terms containing the Fresnel $S$\nfunction. \\ \n\nFor the corresponding sine integrals, integration by parts give\n\\begin{equation*}\n\\int_{0}^{1}z^{2k}\\sin (\\lambda z^{2})dz=\\frac{\\sin (\\lambda )}{2k+1}-\\frac\n2\\lambda }{2k+1}\\int_{0}^{1}z^{2(k+1)}\\cos (\\lambda z^{2})dz.\n\\end{equation*\nUsing (10) we hav\n\\begin{equation*}\n\\int_{0}^{1}z^{2k}\\sin (\\lambda z^{2})dz=\\frac{1}{4\\lambda ^{k}}[(2k-1)\\sin\n(\\lambda )s_{k-1,1\/2}(\\lambda )-2\\cos (\\lambda )s_{k,1\/2}(\\lambda )].\n\\end{equation*\nWith $k=2m$ we ge\n\\begin{equation*}\n\\int_{0}^{1}z^{4m}\\sin (\\lambda z^{2})dz=\\frac{1}{2\\lambda ^{2m}}[(4m-1)\\sin\n(\\lambda )s_{2m-1,1\/2}(\\lambda )-2\\cos (\\lambda )s_{2m,1\/2}(\\lambda )],\n\\end{equation*\nwhich then become\n\\begin{eqnarray*}\n\\int_{0}^{1}z^{4m}\\sin (\\lambda z^{2})dz &=&\\frac{(-1)^{m}\\Gamma (2m+1\/2)}\n2\\lambda ^{2m}}\\{\\sqrt{\\frac{2}{\\lambda }}S(\\chi ) \\\\\n&&-\\sin (\\lambda )\\sum_{j=0}^{m-1}\\frac{(-1)^{\\,j}\\lambda ^{2j}}{\\Gamma\n(2j+3\/2)} \\\\\n&&+\\lambda \\cos (\\lambda )\\sum_{j=0}^{m-1}\\frac{(-1)^{\\,j}\\lambda ^{2j}}\n\\Gamma (2j+5\/2)}\\}.\n\\end{eqnarray*\nFor $k=2m+1$ the sine integrals are given by Maple a\n\\begin{equation*}\n\\int_{0}^{1}z^{4m+2}\\sin (\\lambda z^{2})dz=\\frac{1}{4\\lambda ^{2m+1}}[\\sin\n(\\lambda )(4m+1)s_{2m,1\/2}(\\lambda )-2\\cos (\\lambda )s_{2m+1,1\/2}(\\lambda )].\n\\end{equation*\nUsing the values of Lommel functions given above we hav\n\\begin{eqnarray*}\n\\int_{0}^{1}z^{4m+2}\\sin (\\lambda z^{2})dz &=&\\frac{(-1)^{m}\\Gamma (2m+3\/2)}\n2\\lambda ^{2m+1}}\\{\\sqrt{\\frac{2}{\\lambda }}C(\\chi ) \\\\\n&&-\\lambda \\sin (\\lambda )\\sum_{j=0}^{m-1}\\frac{(-1)^{\\,j}\\lambda ^{2j}}\n\\Gamma (2j+5\/2)} \\\\\n&&-\\cos (\\lambda )\\sum_{j=0}^{m}\\frac{(-1)^{\\,j}\\lambda ^{2j}}{\\Gamma\n(2j+3\/2)}\\}.\n\\end{eqnarray*\nWith these results we see that all values of the sine integrals which\ncontain \\textit{even} powers of $z$ requires the presence of the Fresnel\nintegrals $C(\\chi )$.\n\n\\section{Integrals with values containing elementary functions}\n\nWe next consider integrals which contain \\textit{odd} powers of $z$ i.e\n\\begin{eqnarray*}\n&&\\int_{0}^{1}z^{2k+1}\\cos (\\lambda z^{2})dz, \\\\\n&&\\int_{0}^{1}z^{2k+1}\\sin (\\lambda z^{2})dz.\n\\end{eqnarray*\nIn the first instance Maple give\n\\begin{eqnarray*}\n\\int_{0}^{1}z^{2k+1}\\cos (\\lambda z^{2})dz &=&\\frac{1}{2(k+1)}\\{(1-\\frac\ns_{k+3\/2,1\/2}(\\lambda )}{\\lambda ^{k+1\/2}})\\cos (\\lambda ) \\\\\n&&+(\\,k\\,s_{k+1\/2,3\/2}(\\lambda ))+\\frac{s_{k+3\/2,1\/2}(\\lambda )}{\\lambda }\n\\frac{\\sin (\\lambda )}{\\lambda ^{k+1\/2}}\\},\n\\end{eqnarray*\nwhich reduces t\n\\begin{equation}\n\\int_{0}^{1}z^{2k+1}\\cos (\\lambda z^{2})dz=\\frac{1}{2\\lambda ^{k+1\/2}}[k\\cos\n(\\lambda )s_{k-1\/2,1\/2}(\\lambda )+\\sin (\\lambda )s_{k+1\/2,1\/2}(\\lambda )]\n\\label{eq11}\n\\end{equation\nusing (1) and (2). \\ Where $k=2m$ we hav\n\\begin{equation}\n\\int_{0}^{1}z^{4m+1}\\cos (\\lambda z^{2})dz=\\frac{1}{2\\lambda ^{2m+1\/2}\n[2m\\cos (\\lambda )s_{2m-1\/2,1\/2}(\\lambda )+\\sin (\\lambda\n)s_{2m+1\/2,1\/2}(\\lambda )]. \\label{eq12}\n\\end{equation\nWe see that this expression requires the Lommel functions \ns_{2m-1\/2,1\/2}(\\lambda )$ and $s_{2m+1\/2,1\/2}(\\lambda )$. \\ In the first\ncase we have from (2) and the initial condition $s_{3\/2,1\/2}(\\lambda )=\\sqrt\n\\lambda }\\left[ 1-\\sin (\\lambda )\/\\lambda \\right] $, the Lommel function \ns_{2m-1\/2,1\/2}(\\lambda )$ as \n\\begin{equation*}\ns_{2m-1\/2,1\/2}(\\lambda )=\\frac{(-1)^{m}(2m-1)!}{\\sqrt{\\lambda }}\\left[ \\sin\n(\\lambda )-\\sum_{j=0}^{m-1}\\frac{(-1)^{\\,j}\\lambda ^{2j+1}}{(2j+1)!}\\right] .\n\\end{equation*\n\\ The function $s_{2m+1,1\/2}(\\lambda )$ then can be obtained from the\ndifferential-difference equation in (3). \\ That is to sa\n\\begin{equation}\n\\frac{d\\,s_{2m+1\/2,1\/2}(\\lambda )}{d\\lambda }+\\frac{1}{2\\lambda \ns_{2m+1\/2,1\/2}(\\lambda )=2m\\,s_{2m-1\/2,1\/2}(\\lambda ). \\label{eq13}\n\\end{equation\nWe get with the initial condition $s_{2m+1\/2,1\/2}(0)=0,$ the solution to\n(13) a\n\\begin{equation}\ns_{2m+1\/2,1\/2}(\\lambda )=\\frac{2m}{\\sqrt{\\lambda }}\\int_{0}^{\\lambda }\\sqrt{\n}s_{2m-1\/2,1\/2}(z)\\,dz, \\label{eq14}\n\\end{equation\nwhich immediately gives \n\\begin{equation*}\ns_{2m+1\/2,1\/2}(\\lambda )=\\frac{(-1)^{m+1}(2m)!}{\\sqrt{\\lambda }}\\left[ \\cos\n(\\lambda )-\\sum_{j=0}^{m}\\frac{(-1)^{\\,j}\\lambda ^{2j}}{(2j)!}\\right] .\n\\end{equation*\nWe have obtained in these cases closed forms for the Lommel functions which\ncontain only elementary functions. As a result, we ge\n\\begin{eqnarray*}\n\\int_{0}^{1}z^{4m+1}\\cos (\\lambda z^{2})dz &=&\\frac{(-1)^{m}(2m)!}{2\\lambda\n^{2m+1}}\\{\\sin (\\lambda )\\sum_{j=0}^{m}\\frac{(-1)^{\\,j}\\lambda ^{2j}}{(2j)!}\n\\\\\n&&-\\lambda \\cos (\\lambda )\\sum_{j=0}^{m-1}\\frac{(-1)^{\\,j}\\lambda ^{2j}}\n(2j+1)!}\\}.\n\\end{eqnarray*\nThe cosine integral containing powers $4m+3$ is given b\n\\begin{equation*}\n\\int_{0}^{1}z^{4m+3}\\cos (\\lambda z^{2})dz=\\frac{1}{2\\lambda ^{2m+3\/2}\n[(2m+1)\\cos (\\lambda )\\,s_{2m+1\/2,1\/2}(\\lambda )+\\sin (\\lambda\n)s_{2m+3\/2,1\/2}(\\lambda )].\n\\end{equation*\nIn the latter expression the quantity $s_{2m+3\/2,1\/2}(\\lambda )$ can be\nobtained from (3) and we have \n\\begin{eqnarray*}\n\\int_{0}^{1}z^{4m+3}\\cos (\\lambda z^{2})dz &=&\\frac{(-1)^{m+1}(2m+1)!}\n2\\lambda ^{2m+2}}\\{1-\\cos (\\lambda )\\sum_{j=0}^{m}\\frac{(-1)^{\\,j}\\lambda\n^{2j}}{(2j)!} \\\\\n&&-\\lambda \\sin (\\lambda )\\sum_{j=0}^{m}\\frac{(-1)^{\\,j}\\lambda ^{2j}}\n(2j+1)!}\\}.\n\\end{eqnarray*\nAn expression for the sine integrals with odd powers i.e\n\\begin{equation*}\n\\int_{0}^{1}z^{2k+1}\\sin (\\lambda z^{2})dz=\\frac{1}{2\\lambda ^{k+1\/2}}[k\\sin\n(\\lambda )\\,\\,s_{k-1\/2,1\/2}(\\lambda )-\\cos (\\lambda )s_{k+1\/2,1\/2}(\\lambda\n)],\n\\end{equation*\nhas been obtained using integration by parts of the corresponding $\\cos\n(\\lambda z^{2})$ integral together with the latter's integrated form. \\ Then\nin the case where $\\ k=2m$ we have \n\\begin{eqnarray*}\n\\int_{0}^{1}z^{4m+1}\\sin (\\lambda z^{2})dz &=&\\frac{(-1)^{m}(2m)!}{2\\lambda\n^{2m+1}}\\{1-\\cos (\\lambda )\\sum_{j=0}^{m}\\frac{(-1)^{\\,j}\\lambda ^{2j}}{(2j)\n} \\\\\n&&-\\lambda \\sin (\\lambda )\\sum_{j=0}^{m-1}\\frac{(-1)^{\\,j}\\lambda ^{2j}}\n(2j+1)!}\\}.\n\\end{eqnarray*\nWhere $k=2m+1$ we hav\n\\begin{equation*}\n\\int_{0}^{1}z^{4m+3}\\sin (\\lambda z^{2})dz=\\frac{1}{2\\lambda ^{2m+3\/2}\n[(2m+1)\\sin (\\lambda )\\,\\,s_{2m+1\/2,1\/2}(\\lambda )-\\cos (\\lambda\n)s_{2m+3\/2,1\/2}(\\lambda ).\n\\end{equation*\nIn the latter expression the quantity $s_{2m+3\/2,1\/2}(\\lambda )$ can also be\nobtained from (3) and the integral being sought has the value\n\n\\begin{eqnarray*}\n\\int_{0}^{1}z^{4m+3}\\sin (\\lambda z^{2})dz &=&\\frac{(-1)^{m}(2m+1)!}\n2\\lambda ^{2m+2}}\\{-\\lambda \\cos (\\lambda )\\sum_{j=0}^{m}\\frac\n(-1)^{\\,j}\\lambda ^{2j}}{(2j+1)!} \\\\\n&&+\\sin (\\lambda )\\sum_{j=0}^{m}\\frac{(-1)^{\\,j}\\lambda ^{2j}}{(2j)!}\\}.\n\\end{eqnarray*\n\\ \n\n\\section{Asymptotic forms for the integrals containing $S(\\protect\\chi )$\nand $C(\\protect\\chi )$}\n\nIt is useful to provide values of the integrals evaluated above in section 2\nwhere the parameter $\\lambda $ is large. \\ Initially we consider the case of\nthe integrals with even powers of the integration variable $z$ i.e\n\\begin{eqnarray*}\n&&\\int_{0}^{1}z^{4m}\\cos (\\lambda z^{2})dz, \\\\\n&&\\int_{0}^{1}z^{4m}\\sin (\\lambda z^{2})dz, \\\\\n&&\\int_{0}^{1}z^{4m+2}\\cos (\\lambda z^{2})dz, \\\\\n&&\\int_{0}^{1}z^{4m+2}\\sin (\\lambda z^{2})dz,\n\\end{eqnarray*\nwhich we have seen contain the Fresnel integrals. The asymptotic forms for\nthe functions $S\\left( \\chi \\right) $ and $C(\\chi )$ can be obtained from\nthe expressions \\cite{NIST} \n\\begin{eqnarray*}\nS\\left( \\sqrt{\\frac{2\\lambda }{\\pi }}\\right) &=&\\frac{1}{2}-f\\,(\\sqrt{\\frac\n2\\lambda }{\\pi }})\\cos (\\lambda )-g(\\sqrt{\\frac{2\\lambda }{\\pi }})\\sin\n(\\lambda ), \\\\\nC\\left( \\sqrt{\\frac{2\\lambda }{\\pi }}\\right) &=&\\frac{1}{2}+f\\,(\\sqrt{\\frac\n2\\lambda }{\\pi }})\\sin (\\lambda )-g(\\sqrt{\\frac{2\\lambda }{\\pi }})\\cos\n(\\lambda ),\n\\end{eqnarray*\nwhere $f(z)$ and $g(z)$ are the Fresnel auxiliary functions. \\ Their\nasymptotic forms with (cut of\\thinspace f $N\\geq 1$) are given b\n\\begin{eqnarray*}\nf\\,(\\sqrt{\\frac{2\\lambda }{\\pi }}) &\\thicksim &\\frac{1}{\\sqrt{2}\\,\\pi\n\\lambda ^{1\/2}}\\sum_{j=0}^{N-1}\\frac{(-1)^{j}}{\\lambda ^{2j}}\\Gamma (2j+1\/2),\n\\\\\ng\\,(\\sqrt{\\frac{2\\lambda }{\\pi }}) &\\thicksim &\\frac{1}{\\sqrt{2}\\,\\pi\n\\lambda ^{3\/2}}\\sum_{j=0}^{N-1}\\frac{(-1)^{j}}{\\lambda ^{2j}}\\Gamma (2j+3\/2).\n\\end{eqnarray*\nWe get for the integrals in question, having used the relatio\n\\begin{equation*}\n\\frac{1}{\\Gamma (-z)}=-\\frac{\\Gamma (z+1)}{\\pi }\\sin (\\pi z),\n\\end{equation*\nin the sine and cosine sums, the expression\n\\begin{eqnarray*}\n\\int_{0}^{1}z^{4m}\\cos (\\lambda z^{2})dz &\\thicksim &\\frac{\\Gamma (2m+1\/2)}{\n}\\{\\frac{(-1)^{m}}{\\sqrt{2}\\lambda ^{2m+1\/2}} \\\\\n&&+\\frac{\\cos (\\lambda )}{\\pi }\\sum_{j=1}^{m+N}\\frac{(-1)^{\\,j}}{\\lambda\n^{2j}}\\Gamma (2j-2m-1\/2) \\\\\n&&+\\frac{\\sin (\\lambda )}{\\pi }\\sum_{j=1}^{m+N}\\frac{(-1)^{\\,j+1}}{\\lambda\n^{2j-1}}\\Gamma (2j-2m-3\/2)\\},\n\\end{eqnarray*\n\\begin{eqnarray*}\n\\int_{0}^{1}z^{4m}\\sin (\\lambda z^{2})dz &\\thicksim &\\frac{\\Gamma (2m+1\/2)}{\n}\\{\\frac{(-1)^{m}}{\\sqrt{2}\\lambda ^{2m+1\/2}} \\\\\n&&+\\frac{\\cos (\\lambda )}{\\pi }\\sum_{j=1}^{m+N}\\frac{(-1)^{\\,j}}{\\lambda\n^{2j-1}}\\Gamma (2j-2m-3\/2) \\\\\n&&+\\frac{\\sin (\\lambda )}{\\pi }\\sum_{j=1}^{m+N}\\frac{(-1)^{\\,j}}{\\lambda\n^{2j}}\\Gamma (2j-2m-1\/2)\\},\n\\end{eqnarray*\n\\begin{eqnarray*}\n\\int_{0}^{1}z^{4m+2}\\cos (\\lambda z^{2})dz &\\thicksim &\\frac{\\Gamma (2m+3\/2\n}{2}\\{\\frac{(-1)^{m+1}}{\\sqrt{2}\\lambda ^{2m+3\/2}} \\\\\n&&+\\frac{\\cos (\\lambda )}{\\pi }\\sum_{j=1}^{m+N}\\frac{(-1)^{\\,j+1}}{\\lambda\n^{2j}}\\Gamma (2j-2m-3\/2) \\\\\n&&+\\frac{\\sin (\\lambda )}{\\pi }\\sum_{j=1}^{m+N+1}\\frac{(-1)^{\\,j}}{\\lambda\n^{2j-1}}\\Gamma (2j-2m-5\/2)\\},\n\\end{eqnarray*\n\\begin{eqnarray*}\n\\int_{0}^{1}z^{4m+2}\\sin (\\lambda z^{2})dz &\\thicksim &\\frac{\\Gamma (2m+3\/2\n}{2}\\{\\frac{(-1)^{m}}{\\sqrt{2}\\lambda ^{2m+3\/2}} \\\\\n&&+\\frac{\\cos (\\lambda )}{\\pi }\\sum_{j=1}^{m+N}\\frac{(-1)^{\\,j+1}}{\\lambda\n^{2j+1}}\\Gamma (2j-2m-1\/2) \\\\\n&&+\\frac{\\sin (\\lambda )}{\\pi }\\sum_{j=1}^{m+N}\\frac{(-1)^{\\,j+1}}{\\lambda\n^{2j}}\\Gamma (2j-2m-3\/2)\\}.\n\\end{eqnarray*\nWe see that the terms in these asymptotic expressions contain the sine and\ncosine sums along with the additional and significant contributions from the\nFresnel integrals.\n\n\\subsection{Asymptotic values for Fresnel related integrals}\n\n\\bigskip\n\nIn extensions of the Schwinger-Englert semi-classical theory \\cite{englert}\\\nof atomic structure, asymptotic forms for the integral\n\\begin{eqnarray*}\n&&\\int_{0}^{1}\\frac{\\sin (\\lambda z^{2})}{(1+a\\,z^{2})}dz, \\\\\n&&\\int_{0}^{1}\\frac{\\cos (\\lambda z^{2})}{(1+a\\,z^{2})}dz,\n\\end{eqnarray*\narise with $\\lambda \\rightarrow \\infty $ and $00), \\\\\n&& \\\\\n\\frac{4n}{2^{\\Delta \\,\\epsilon (n)}}f_{n,2n+1} &=&-2e_{n,2n},\\hspace{0.25in\n(k=n), \\\\\n\\frac{4n}{2^{\\Delta \\,\\epsilon (n)}}f_{k,2n+1}+(4k-1-4n)\\,f_{k,2n}\n&=&-2e_{k,2n},\\hspace{0.25in}(k E_a$) to be:\n\\begin{align}\n \\sigma^x &= \\op{a}{b} + \\op{b}{a}\\\\\n \\sigma^y &= i\\op{a}{b} - i\\op{b}{a} \\\\\n \\sigma^z & = \\op{b}{b} - \\op{a}{a}\n\\end{align}\nIn the Bloch sphere representation, for each instant $t$, this Hamiltonian describes a rotation around the axis $\\mathbf{\\Omega}$ with angular velocity $\\Omega_{eff} = |\\bm{\\Omega}| = \\sqrt{\\Omega^2 + \\delta^2}$, as illustrated in Figure \\ref{fig:bloch_rotation}.\n\n\\begin{figure}[h]\n\\centering\n\\includegraphics[width=0.9\\columnwidth, trim=0 0.5cm 0 0.8cm, clip]{figures\/bloch_rotation.png}\n\\caption{Representation of the drive Hamiltonian's dynamics as a rotation in the Bloch sphere.}\n\\label{fig:bloch_rotation}\n\\end{figure}\n\n\n\n\\subsection{Rydberg states}\n\\label{sec:rydberg}\n\nIn neutral-atom devices, atoms are driven to Rydberg states as a way to make them interact over large distances. Depending on the electronic levels that are involved in the process, the atoms experience different types of interactions, translating into different Hamiltonians~\\cite{Browaeys20}.\\\\\n\nIn the so-called ``Ising'' configuration, which is obtained when the spin states are one of the ground states $\\ket{g}$ and a Rydberg state $\\ket{r}$~\\cite{schauss2015crystallization,labuhn2016tunable,Bernien17,leseleuc2018accurate} (the \\texttt{ground-rydberg} basis in Pulser, see Fig. \\ref{Ising_config}.), the interaction adds a term to the drive Hamiltonian describing the transition between those states: \n\n\\begin{equation}\n\\mathcal H^{gr}(t) = \\sum_i \\left(H^D_i(t) + \\sum_{j10}\n\\end{lstlisting}\n\nFrom the output of the simulation, we can plot the histogram of the sampled results, shown in Figure \\ref{histogram_results}. Notice the peak corresponding to the antiferromagnetic state.\\\\\n\n\\begin{figure}[h]\\label{fig:AFM_hist}\n\\centering\n\\includegraphics[width=\\linewidth]{figures\/AFM_hist}\n\\caption{Most frequent results from the sampling of the final state.}\n\\label{histogram_results}\n\\end{figure}\n\n\\begin{figure}[h]\n\\centering\n\\includegraphics[width=\\linewidth]{figures\/figure_AFM_pulser_paper_2.pdf}\n\\caption{(a) Simulation results for the $g^{(2)}$ correlation function, eq.\\eqref{eq:corr_fct}. (b) Sweep results for the $S_{\\text{N\u00e9el}}$ score function, eq.\\eqref{eq:neel}.}\n\\label{fig:AFM}\n\\end{figure}\n\nFrom \\lstinline{results}, one can access the state of the system throughout the simulation (as a list of \\texttt{qutip.Qobj}s) by calling \\lstinline{results.states}. This can be useful for post-processing the simulation results and computing observables. Alternatively, one can also call \\lstinline{results.expect()} to get the exact expectation value of a list of observables. For example, \\lstinline{results.expect( [qutip.basis(9, 0).proj()])} will return the probability of measuring the all-excited state $|r\\rangle^{\\otimes 9}$ for every state in \\lstinline{results}.\n\nWe show in Figure \\ref{fig:AFM}(a) the output of the simulation for the correlation function $g^{(2)}$, eq. \\eqref{eq:corr_fct}. Note that the correlation function is expected to decay exponentially (modulo finite-size effects), which is best observed at larger system sizes~\\cite{scholl2020programmable}.\n\nChanging the endpoint $\\delta_f$ of the pulse's path results in different qualities of the final state. In order to evaluate how well it represents an antiferromagnetic state, we will explore the $\\Omega = 0$ line on the phase diagram, Fig. \\ref{AFM:phase_diagram_and_seq}(a). Since the value of correlation function $g^{(2)}(k,l)$ (eq. \\eqref{eq:corr_fct}) can be positive or negative, we compensate with alternating signs to construct a single score for the state \\cite{lienhard2018observing}:\n\\begin{equation}\nS_{\\text{N\u00e9el}} = \\sum_{(k,l)\\neq (0,0)} (-1)^{|k| + |l|} g^{(2)}(k,l),\n\\label{eq:neel}\n\\end{equation}\nwhich should be largest when the state is closest to an antiferromagnetic product state. By sweeping over different values of detuning $\\delta_f$, we examine the region $ 0 < \\hbar \\delta_f\/U < 4$. In a few seconds, Pulser gives the results shown in Figure \\ref{fig:AFM} (b), showing where the antiferromagnetic phase is more strongly present.\n\n\n\\subsection{Variational algorithms with Pulser: solving a graph problem with neutral atoms}\n\n\nThe implementation of variational algorithms, such as the Quantum Approximation Optimization Algorithm\\,\\cite{Farhi14} (QAOA), is facilitated by the use of a \\textit{parametrized sequence} in Pulser. We illustrate this aspect here, by solving the \\emph{Maximum Independent Set} (MIS) problem on \\emph{Unit Disk} graphs, which can be naturally studied on neutral-atom devices \\cite{pichler2018quantum, henriet2020robustness, dalyac2020qualifying}. \n\n\n\\subsubsection{From a graph to an atomic register} \n\nAn \\emph{independent set} of a graph is a subset of vertices where any two elements of this subset are not connected by an edge. The MIS corresponds to the largest of such subsets. The so-called \\emph{Unit Disk} (UD) version of this problem corresponds to the instances where the graph under consideration lives in 2D and displays an edge between two nodes if they are within a unit length of each other.\\\\\n\nInterestingly, ensembles of interacting Rydberg atoms in 2D can be naturally represented by Unit Disk graph structures: the atoms are the nodes of the graphs and an edge in the graph corresponds to two atoms being within the blockade radius of each other. In this example, we will take $\\Omega$ fixed to a frequency of 1 $\\text{rad}\/\\mu$s, hence the blockade radius can be obtained by using \\lstinline{Chadoq2.rydberg_blockade_radius(1.)}. The following code block instantiates the atomic register, and displays the graph induced by their interactions with the \\lstinline{draw_graph=True} and \\lstinline{draw_half_radius=True} arguments passed to the \\lstinline{reg.draw()} method, as illustrated in Figure \\ref{fig:MIS_reg}.\n\n\n\\begin{lstlisting}[firstnumber=1]\n# Load basic packages and classes\nimport numpy as np\nimport pulser \nfrom pulser import Register, Pulse, Sequence\nfrom pulser.devices import Chadoq2\nfrom pulser.simulation import Simulation\n# Set Register\npos = np.array([[0., 0.], [-4, -7], [4,-7], [8,6], [-8,6]])\nreg = Register.from_coordinates(pos)\nRb = Chadoq2.rydberg_blockade_radius(1.)\n\nreg.draw(\n blockade_radius=Rb,\n draw_graph=True,\n draw_half_radius=True\n )\n\\end{lstlisting}\n\n\\begin{figure}[h]\n\\centering\n\\includegraphics[scale=0.6]{figures\/MIS_reg}\n\\caption{The register for the MIS graph problem. The shaded circles show the blockade half-radius, such that the intersecting regions indicate existing links.}\n\\label{fig:MIS_reg}\n\\end{figure}\n\nThe graph $G$ has two maximal independent sets: $(1,3,4)$ and $(2,3,4)$, respectively \\texttt{01011} and \\texttt{00111} in binary. One could try to prepare those states using an adiabatic approach such as the one illustrated in section \\ref{adiabatic_preparation}. Another approach is to use QAOA, as we illustrate below.\n\n\\subsubsection{Building the parametrized sequence}\n\nQAOA exploits both a quantum and a classical processor that exchange information within a feedback loop to solve an optimization problem. The role of the neutral-atom processor is to prepare and measure an $N$-atom parametrized wavefunction. The outcome of the measurement is then used by the classical processor in a standard classical optimization procedure, that updates the parameters for the next iteration.\\\\\n\nMore specifically, the preparation of the parametrized wavefunction is achieved through the successive application of two non-commutative Hamiltonians, with all atoms initially starting in the ground state $|00\\dots0\\rangle$ of the \\lstinline{ground-rydberg} basis. The first one is realized by taking $\\Omega = 1$ $\\text{rad}\/\\mu s$, and $\\delta = 0 $ $\\text{rad}\/\\mu s$ in Eq.(\\ref{eq:global_ising}). The second Hamiltonian is realized with $\\Omega = \\delta = 1$ $\\text{rad}\/\\mu s$. These two Hamiltonians are applied successively, for durations $t_j$ and $s_j$, respectively. The dimension of $\\bm t$ and $\\bm s$, i.e. the number of layers applied to the system, is referred to as the depth of the algorithm. The outcome of the measurement is then used as the objective function in a standard classical optimization procedure, that updates the parameters for the next iteration.\\\\\n\nWhen implementing a variational procedure, multiple sequences are created that differ only by the durations $t_j$ and $s_j$ of the pulses. This is conveniently handled in Pulser by turning a regular sequence into a \\textit{parametrized} sequence. A \\texttt{Sequence} is said to be parametrized once a \\textit{variable} is declared and used in a sequence-building call, from which point the sequence building process can continue as usual but the sequence is no longer built on the fly. Instead, all calls to a parametrized sequence are stored as a \\textit{blueprint} for generating a new \\texttt{Sequence}, which may depend on the value of the declared variables. Consequently, it is not possible to progressively monitor the creation process of a sequence that is parametrized (e.g. by drawing the state of a sequence as new pulses are added). The building of the sequence itself only happens when \\texttt{Sequence.build()} is called, at which point specific values for all the declared variables have to be specified as arguments.\\\\\n\nThe \\texttt{Variable} objects are obtained by calling \\texttt{Sequence.declare\\_variable()} and can be used throughout the \\texttt{Sequence} creation process (i) as parameters when creating new \\texttt{Waveform}s and \\texttt{Pulses}, or (ii) as arguments for standard sequence creation methods like \\texttt{add}, \\texttt{target}, \\texttt{align} and \\texttt{delay}. Moreover, they support basic arithmetic operations and, when they are of \\texttt{size}$>1$, iteration and item accessing. \\\\\n\nThe notion of parametrized sequence can become very handy with real world constraints such as those we find in cloud-based platforms. First, the bandwidth allocation of a program, variational or not, is greatly reduced thanks to this factorization. A user only needs to send the parametrized sequence that describes his program once, and then each new iteration only requires the associated set of parameters. Thanks to the high flexibility of Pulser, users can have a very fine-grained control over the waveforms that define their pulses. This results in needing larger objects to define these pulses, which makes such a factorization even more useful.\n\nIn addition, one area of improvement for cold atom platforms is the large calibration times for atom register configuration changes. This means that running randomly independent sequences on the same QPU can be very inefficient. The sequences of a given user's program on the other hand share some common parameters, including the register configuration. On the QPU side, we can treat the parametrized sequences as the basic block of the QPU scheduling to greatly reduce the latencies due to the calibration processes, make sure the different user programs are run efficiently and that all the sequences in a given program are run on the same QPU.\\\\\n\nThe example below shows how \\texttt{Sequence} can be used to create a parametrized QAOA sequence with two layers, with the variable duration of each layer stored in the \\lstinline{t_list} and \\lstinline{s_list} arrays. Notice that no value is given to the contents of \\lstinline{t_list} and \\lstinline{s_list}.\n \n\n\\begin{lstlisting}[firstnumber=17]\n# Parametrized sequence\nseq = Sequence(reg, Chadoq2)\nseq.declare_channel('ch0', 'rydberg_global')\n\nt_list = seq.declare_variable('t_list', size=2)\ns_list = seq.declare_variable('s_list', size=2)\n\nfor t, s in zip(t_list, s_list): \n pulse_1 = Pulse.ConstantPulse(1000*t, 1., 0., 0) \n pulse_2 = Pulse.ConstantPulse(1000*s, 1., 1., 0)\n \n seq.add(pulse_1, 'ch0')\n seq.add(pulse_2, 'ch0')\n\\end{lstlisting}\n\nThe sequence \\lstinline{seq} above will only be built once the user provides specific values for \\lstinline{t_list} and \\lstinline{s_list} while calling the \\texttt{Sequence.build()} method. It thus enables to build a variety of sequences that share the same structure:\n\n\\begin{lstlisting}[numbers=none]\n# Build sequences with specific values\nmy_seq_1 = seq.build(t_list=[2,4], s_list=[3,6])\nmy_seq_2 = seq.build(t_list=[1,3], s_list=[2,5])\n\\end{lstlisting}\n\n\\subsubsection{Classical optimization and QAOA results}\n\nThe parametrized sequence above can then be used in conjunction with a classical optimizer in order to determine a set of good values for \\lstinline{t_list} and \\lstinline{s_list}. When running the full QAOA procedure in closed loop, the optimizer is responsible for iteratively selecting the next set of parameters to be tested. The parametrized sequence can then be updated in Pulser and the new program sent externally to the quantum hardware. \n\nThe procedure can also be emulated locally: to try out the algorithm, we applied the non-gradient method Nelder-Mead for a few dozen function evaluations, initializing the parameters in a convenient point (the implementation can be found at Pulser's online documentation). \nThis is already sufficient in order to find some acceptable parameters $\\bm t$ and $\\bm s$. The performance of QAOA can be tested by sampling from the final state $|\\psi(t_f)\\rangle$ which returns both MISs of the graph with high probability. We show in Figure \\ref{fig:MIS_optim} the histogram of the recorded bitstrings. \n\n\n\\begin{figure}[h]\n\\centering\n\\includegraphics[width=0.9\\linewidth]{figures\/MIS_optim}\n\\caption{Histogram of most frequent bitstrings after the optimization loop. The MIS graphs (in red) are the ones that are observed most frequently.}\n\\label{fig:MIS_optim}\n\\end{figure}\n\n\n\\section{Conclusion}\n\nWith recent advances in the field, neutral-atom processors now provide unique opportunities for the exploration of uncharted territories in many-body quantum physics with hundreds of interacting quantum units\\,\\cite{ebadi2020quantum,scholl2020programmable,Bluvstein21}. By using distinct electronic transitions for encoding the quantum information, one can implement various Hamiltonians depending on the task at hand. Beyond analog computing, recent developments in qubit addressing have opened the door to high-fidelity quantum gates\\,\\cite{levine_high-fidelity_2018,Levine19}. In addition to the flexibility in terms of information encoding and processing, the geometry of the quantum register itself is also highly programmable\\,\\cite{barredo_synthetic_2018,Schymik20}. This holds great promises for the implementation of graph-related algorithms\\,\\cite{Pichler18,zhou20,henriet2020robustness,dalyac2020qualifying,Henry21}, the exploration of lattice models in condensed-matter physics\\,\\cite{ebadi2020quantum,scholl2020programmable,leseleuc_observation_2019}, or the reduction of the gate-count for quantum circuits\\,\\cite{Henriet2020quantum}. \n\nIn this paper, we introduced Pulser, an open-source Python library allowing users to pilot such powerful devices at the pulse level. This tool will enable the exploration of new avenues in physics and quantum information science requiring an advanced control of the system. After their creation in Pulser, pulse sequences can be translated and read by an arbitrary waveform generator for implementation on a quantum processor. The architecture of the software makes it possible to use as an interface for any neutral-atom QPU as the specifics of the back-end can be encapsulated by creating new \\texttt{Device} objects. Pulser currently enables the control of laser pulses during the quantum information processing phase, but it could be extended in the future to include other controls either in the processing phase, during the assembly of the register or the measurement phase. Being a low-level tool, Pulser could be interfaced with higher-level application-oriented libraries and frameworks for digital quantum information processing, such as Cirq\\,\\cite{cirq_developers_2021_4586899}, Qiskit\\,\\cite{Qiskit}, Atos myQLM, Pennylane\\,\\cite{Pennylane} or tket\\,\\cite{tket}, which abstract away many hardware features. \n\nThe primary purpose of Pulser's built-in emulator is to reproduce faithfully what the output of a real experiment would look like, in order to facilitate the design of hardware runs. In that spirit, some developments are currently underway for the inclusion of realistic error models\\,\\cite{sylvain18}. Some work could also be done to extend the simulations capabilities in terms of the number of qubits that can be efficiently simulated, including the development of tensor-network based methods and GPU acceleration. \n\n\\section*{Acknowledgments}\nWe thank Lucas Beguin, Julien Br\u00e9mont, Antoine Browaeys, Mauro d'Arcangelo, Eric Giguere, Christophe Jurczak, Thierry Lahaye, Boxi Li, Georges-Olivier Reymond, Pascal Scholl, Adrien Signoles, and William J. Zeng for useful discussions.\n\n\n\n\n\n\\bibliographystyle{unsrtnat}\n","meta":{"redpajama_set_name":"RedPajamaArXiv"}} +{"text":"\\section{Introduction}\n\\label{sec:intro}\nThe condition (on the autocovariances $\\gamma_k$ of a stationary time series)\n\\begin{equation}\n\\lim_{k \\rightarrow \\infty} |\\gamma_k| \\; \\ln k = 0\n\\label{eq:berman}\n\\end{equation}\nwas introduced by \\citet[Theorem 3.1, page 510]{ber}. It appears to\nhave been adopted as a fundamental sufficient condition in proving\nresults about extreme value distributions for correlated data. It is\ncited for instance in \\citet[2.5.1, p.~444]{lead1}, \\citet[5.1,\np.~248]{lin}, \\citet[4.1.1, p.~80]{lead2}, \\citet[Theorem 3.8.2,\np.~169; see also p.~198]{gal}, and in \\citet[Theorem 4.4.8,\np.~217]{emb}, where it is described as being ``very weak''. It\nappears to be effectively the weakest condition that one can assume\nand still obtain positive results in this context.\n\nIn \\citet*{cha} the authors found it necessary to assume, in addition\nto the Berman condition, another condition\n\\begin{equation}\n\\sum_{k=1}^{\\infty} \\frac{|\\gamma_k|}{k^{\\epsilon}} < \\infty\n\\mbox{~~for some~~} \\epsilon < 1.\n\\label{eq:sum.cond}\n\\end{equation}\nThis is given as condition (7) on page 598 of \\citeauthor{cha}. In that\npaper the authors find it expedient to deal with the residuals from\nfitting an ARMA model to the time series $X_t$ under consideration.\nThey are thereby concerned with the innovation terms of such a model.\nSuppose that $X_t$ and $a_t$ are related via an ARMA model in which\nthe $a_t$ play the role of the innovations. \\citeauthor{cha} remark\nthat if the time series $X_t$ is a fractionally integrated ARMA\n(``FARIMA'') time series (whence it satisfies the two conditions\nof interest (\\ref{eq:berman}) and (\\ref{eq:sum.cond})) then the\ninnovations $a_t$ also form a FARIMA series provided that the\nmodel is invertible. Hence the $a_t$ satisfy the two conditions\nof interest as well.\n\n\\citeauthor{cha} assert that more is true: if $X_t$ is \\emph{any}\nstationary time series satisfying (\\ref{eq:berman}) and\n(\\ref{eq:sum.cond}) and if $X_t$ and a series of innovations $a_t$\nare related by an invertible ARMA model, then the $a_t$ will also\nsatisfy these conditions. In this note we present the proof of\nthat claim.\n\nWe now remark that interest is focussed on the $a_t$ and these\nquantities are thought of as being the output of a filter, with\nthe $X_t$ being the input. However the phrasing of the claim,\nwith the $a_t$ being the innovations of an ARMA model, makes it\nappear as if the $a_t$ are the \\emph{input} to a filter, which is\nrather confusing. The required condition of invertibility\nof the ARMA model is also somewhat disconcerting. Finally,\nit turns out that a slightly stronger claim may be established.\nWe therefore re-phrase the assertion to be proven, in a stronger\nand less confusing form, and state the original claim as a corollary\nof the re-phrased assertion.\n\n\\section{The Main Result}\n\\label{sec:result}\n\nWe state the result to be proven as follows:\\\\\n\n\\noindent\n{\\bf Theorem:} Suppose that $X_t$ is a stationary time series with\nautocovariances $\\gamma_k$ satisfying conditions (\\ref{eq:berman})\nand (\\ref{eq:sum.cond}) and that the series $Y_t$ is the output of\na linear filter with input $X_t$ given as follows:\n\\[\nY_t = \\sum_{n=0}^{\\infty} \\psi_n X_{t-n}\n\\]\nSuppose that the $\\psi_n$ are summable (whence the $Y_t$ form a\nstationary time series). Furthermore suppose that the $\\psi_n$\nsatisfy the condition\n\\begin{equation}\n|\\gamma^W_k| \\leq C r^{|k|} \\mbox{~~for all~~} k\n\\label{eq:bound}\n\\end{equation}\nfor some constants $C$ and $r$, $0 < r < 1$, where\n\\[\n\\gamma^W_k = \\sum_{n = -\\infty}^{\\infty} \\psi_n \\psi_{n+k}\n\\]\nand where we set $\\psi_n = 0$ for $n < 0$ (to simplify the notation).\nThen the autocovariances $\\gamma^Y_k$ of the series $Y_t$ satisfy\n(\\ref{eq:berman}) and (\\ref{eq:sum.cond}) as well.\\\\\n\n\\noindent {\\bf Proof:}\\\\\n\\begin{quote}\n\nWe remark that the $\\gamma^W_k$ are in fact the autocovariances of\na time series $W_t$ defined by\n\\[\nW_t = \\sum_{n=0}^{\\infty} \\psi_n b_{t-n}\n\\]\nwhere $b_t$ is white noise with variance 1.\n\nObserve that\n\\begin{eqnarray*}\n\\gamma^Y_k & = & \\sum_{n=-\\infty}^{\\infty} \\sum_{m=-\\infty}^{\\infty}\n \\psi_n \\psi_m \\gamma_{m-n+k} \\\\\n & = & \\sum_{h=-\\infty}^{\\infty} \\sum_{n=-\\infty}^{\\infty}\n \\psi_n \\psi_{n+h} \\gamma_{k+h} \\\\\n & = & \\sum_{h=-\\infty}^{\\infty} \\gamma^W_h \\gamma_{k+h}\n\\end{eqnarray*}\n\nTo show that the $\\gamma^Y_k$ satisfy condition\n(\\ref{eq:berman}) we write\n\\begin{eqnarray*}\n\\gamma^Y_k\n & = & \\sum_{h=-\\infty}^{-k-1} \\gamma^W_h \\gamma_{k+h} +\n \\sum_{h=-k}^{-1} \\gamma^W_h \\gamma_{k+h} +\n \\sum_{h=0}^\\infty \\gamma^W_h \\gamma_{k+h} \\\\\n & = & \\sum_{j=1}^{\\infty} \\gamma^W_{k+j} \\gamma_j +\n \\sum_{j=0}^{k-1} \\gamma^W_{k-j} \\gamma_j +\n \\sum_{j=0}^{\\infty} \\gamma^W_j \\gamma_{k+j} \\\\\n & = & \\xi_1(k) + \\xi_2(k) + \\xi_3(k) \\mbox{~~(say).}\n\\end{eqnarray*}\n\nTo deal with $\\xi_1(k)$ we observe that\n\\[\n|\\xi_1(k)| \\ln k\n\\leq \\sum_{j=1}^{\\infty} |\\gamma^W_{k+j}| |\\gamma_j| \\; \\ln k\n\\leq C \\; \\gamma_0 \\; r^k \\ln k \\; \\frac{r}{1-r}\n\\]\nusing (\\ref{eq:bound}). This quantity $\\rightarrow 0$\nas $k \\rightarrow \\infty$ since $r < 1$.\n\nSimilarly\n\\[\n|\\xi_3(k)| \\ln k \\leq \n\\sum_{j=0}^{\\infty} |\\gamma^W_j| |\\gamma_{k+j}| \\ln (k+j) \\; \\leq\nC \\sum_{j=0}^{\\infty} r^j |\\gamma_{k+j}| \\ln (k+j)\n\\]\nTake $\\delta > 0$; for sufficiently large $k$, $|\\gamma_{k+j}| \\ln (k+j)\n\\leq \\delta$ for all $j \\geq 0$. Hence\n\\[\n|\\xi_3(k)| \\ln k \\leq \\frac{\\delta \\times C}{1-r}\n\\]\nfor sufficiently large $k$, and since $\\delta$ is arbitrary,\n$ |\\xi_3(k)| \\ln k \\rightarrow 0 $ as $k \\rightarrow \\infty$.\n\nTo deal with the middle term $\\xi_2(k)$ we note that\n\\begin{eqnarray*}\n|\\xi_2(k)| & \\leq & C \\sum_{j=0}^{k-1} |\\gamma_j| r^{k-j} \\\\\n & = & C \\left [ \\sum_{j=0}^{[k\/2]} |\\gamma_j| r^{k-j}\n + \\sum_{j=[k\/2]+1}^{k-1} |\\gamma_j| r^{k-j}\n \\right ] \\\\\n & \\leq & C \\sum_{j=0}^{[k\/2]} \\gamma_0 r^{k-j}\n + C \\sum_{j=[k\/2]+1}^{k-1} |\\gamma_j| r^{k-j} \\\\\n & \\leq & C \\gamma_0 \\frac{r^{k\/2}}{1-r}\n + C \\gamma_{j^*(k)} \\frac{r}{1-r}\n\\end{eqnarray*}\nwhere $j^*(k) = \\mbox{argmax} \\{|\\gamma_j| : [k\/2]+1 \\leq j \\leq k-1 \\}$.\n\nHence\n\\begin{eqnarray*}\n\\ln k \\times |\\xi_2(k)| & \\leq & C \\left [ \\gamma_0 \\ln k \\frac{r^{k\/2}}{1-r}\n + (\\ln k\/2 + \\ln 2) \\left ( \\gamma_{j^*(k)}\n \\frac{r}{1-r} \\right ) \\right ] \\\\\n & \\leq & C \\left [ \\gamma_0 \\ln k \\frac{r^{k\/2}}{1-r}\n + (\\ln j^*(k) + \\ln 2) \\left ( \\gamma_{j^*(k)}\n \\frac{r}{1-r} \\right ) \\right ] \\\\\n\\end{eqnarray*}\nwhich $\\rightarrow 0$ as $k \\rightarrow \\infty$.\n\nWe have thus established that the autocovariances $\\gamma^Y_k$\nsatisfy (\\ref{eq:berman}). We now proceed to show that condition\n(\\ref{eq:sum.cond}) is satisfied:\n\n\\begin{eqnarray*}\n\\sum_{k=1}^{\\infty} \\frac{|\\gamma^Y_k|}{k^{\\epsilon}}\n & = & \\sum_{k=1}^{\\infty} \\left | \\sum_{h=-\\infty}^{\\infty} \\gamma^W_h\n \\frac{\\gamma_{k+h}}{k^{\\epsilon}} \\right |\n \\leq \\sum_{h=-\\infty}^{\\infty} |\\gamma^W_h| \\sum_{k=1}^{\\infty}\n \\frac{|\\gamma_{k+h}|}{k^{\\epsilon}} \\\\\n & = & \\sum_{h=-\\infty}^{-1} |\\gamma^W_h| \\sum_{k=1}^{\\infty}\n \\frac{|\\gamma_{k+h}|}{k^{\\epsilon}} +\n \\sum_{h=0}^{\\infty} |\\gamma^W_h| \\sum_{k=1}^{\\infty}\n \\frac{|\\gamma_{k+h}|}{k^{\\epsilon}} \\\\\n & = & \\sum_{j=1}^{\\infty} |\\gamma^W_j| \\sum_{k=1}^{\\infty}\n \\frac{|\\gamma_{k-j}|}{k^{\\epsilon}} +\n \\sum_{h=0}^{\\infty} |\\gamma^W_h| \\sum_{k=1}^{\\infty}\n \\frac{|\\gamma_{k+h}|}{k^{\\epsilon}} \\\\\n & \\leq & \\sum_{j=1}^{\\infty} |\\gamma^W_j| \\sum_{k=1}^{\\infty}\n \\frac{|\\gamma_{k-j}|}{k^{\\epsilon}} +\n \\sum_{h=0}^{\\infty} |\\gamma^W_h| \\sum_{k=1}^{\\infty}\n \\frac{|\\gamma_{k+h}|}{(k+h)^{\\epsilon}}\n \\left (\\frac{k+h}{k}\\right)^{\\epsilon} \\\\\n & \\leq & \\sum_{j=1}^{\\infty} |\\gamma^W_j| \\sum_{k=1}^{\\infty}\n \\frac{|\\gamma_{k-j}|}{k^{\\epsilon}} +\n \\zeta \\sum_{h=0}^{\\infty} |\\gamma^W_h| (1+h)\n \\mbox{, where~~} \\zeta = \\sum_{k=1}^{\\infty}\n \\frac{|\\gamma_{k}|}{k^{\\epsilon}}\n\\end{eqnarray*}\n\\begin{eqnarray*}\n\\phantom{\\sum_{k=1}^{\\infty} \\frac{|\\gamma^Y_k|}{k^{\\epsilon}}}\n & = & \\sum_{j=1}^{\\infty} |\\gamma^W_j| \\left [ \\sum_{k=1}^{j}\n \\frac{|\\gamma_{k-j}|}{k^{\\epsilon}} + \\sum_{k=j+1}^{\\infty}\n \\frac{|\\gamma_{k-j}|}{k^{\\epsilon}} \\right ] +\n \\zeta \\sum_{h=0}^{\\infty} |\\gamma^W_h| (1+h) \\\\\n & \\leq & \\sum_{j=1}^{\\infty} C r^j \\left [ \\gamma_0 \\sum_{k=1}^{j}\n \\frac{1}{k^{\\epsilon}} +\n \\sum_{\\ell=1}^{\\infty} \\frac{|\\gamma_{\\ell}|}{\\ell^{\\epsilon}}\n \\left ( \\frac{\\ell}{\\ell+j} \\right)^\\epsilon\n \\right ] + \\zeta \\sum_{h=0}^{\\infty} C r^h \\; (1+h) \\\\\n & \\leq & C \\sum_{j=1}^{\\infty} r^j \\left [ j \\gamma_0 +\n \\sum_{\\ell=1}^{\\infty} \\frac{|\\gamma_{\\ell}|}{\\ell^{\\epsilon}}\n \\right ] + \\zeta C \\sum_{h=0}^{\\infty} r^h \\;(1+h) \\\\\n & \\leq & C \\sum_{j=1}^{\\infty} r^j [ j \\gamma_0 + \\zeta] +\n \\zeta C \\sum_{h=0}^{\\infty} r^h \\;(1+h) \\\\\n & = & C \\left [ \\gamma_0 \\sum_{j=1}^{\\infty} jr^j +\n \\zeta \\sum_{j=1}^{\\infty} r^j + \\zeta \\sum_{j=0}^{\\infty} r^j\n + \\zeta \\sum_{j=1}^{\\infty} j r^j \\right ] \\\\\n & = & C \\left [ (\\gamma_0 + \\zeta) \\sum_{j=1}^{\\infty} j r^j +\n \\zeta \\; \\frac{1+r}{1-r} \\right ] < \\infty\n\\end{eqnarray*}\nsince $\\sum_{j=1}^{\\infty} jr^j$ converges. (The radius of convergence\nof this power series is 1, and by assumption $0 < r < 1$.)\n\\hfill \\rule{2mm}{3.5mm}\\\\\n\\end{quote}\n\n\\section{The Original Claim}\n\\label{sec:orig.claim}\n\nWe state the original claim as:\\\\\n\n\\noindent\n{\\bf Corollary:} Suppose that $X_t$ satisfies conditions\n(\\ref{eq:berman}) and (\\ref{eq:sum.cond}) and that $X_t$ and $a_t$\nare related by the invertible ARMA model\n\\[\n\\phi(B)X_t = \\theta(B) a_t\n\\]\nwhere $\\phi(z)$ and $\\theta(z)$ are polynomials and $B$ is the\n``backshift'' operator. Then the $a_t$ satisfy conditions\n(\\ref{eq:berman}) and (\\ref{eq:sum.cond}) as well.\\\\\n\n\\noindent {\\bf Proof:}\\\\\n\\begin{quote}\nThe invertibility of the model tells us that $a_t$ can be\nexpressed as\n\\[\na_t = \\sum_{n=0}^{\\infty} \\psi_n X_{t-n}\n\\]\nwhere\n\\[\n\\frac{\\phi(z)}{\\theta(z)} = \\psi(x) = \\sum_{n=0}^{\\infty} \\psi_n z^n\n\\]\nwith the coefficients $\\psi_n$ being summable. (It is more usual in\nsuch a context to denote the coefficients of the series expansion\nas ``$\\pi_n$'' rather than ``$\\psi_n$'', but to make clear the\nrelationship to the main result we eschew the $\\pi_n$ notation.)\n\nNow if we set\n\\[\nW_t = \\sum_{n=0}^{\\infty} \\psi_n b_{t-n}\n\\]\nwhere $b_t$ is white noise (with variance 1) then basic results\nabout ARMA time series (see e.g. \\citet[Chapter 3, problem 3.11]{bro}) tell\nus that the autocovariances $\\gamma^W_k$ of $W_t$ satisfy\n(\\ref{eq:bound}). Hence the claim follows by the theorem proven\nin section~\\ref{sec:result}.\\\\\n\\mbox{ } \\hfill \\rule{2mm}{3.5mm}\\\\\n\\end{quote}\n\n\\section*{Acknowledgments}\nThe authors express their warm thanks to an anonymous referee whose\ncomments substantially improved the exposition in this paper. The\nfirst author's research is supported by a grant from the Natural\nSciences and Engineering Research Council of Canada.\n\n\\bibliographystyle{melvin}\n","meta":{"redpajama_set_name":"RedPajamaArXiv"}} +{"text":"\\section{Introduction}\n\nOne of the main goals of the LEP experiments is to search for new\nparticles predicted by theories beyond the Standard\nModel. In this letter we report on searches for \nunstable charginos and neutralinos.\nThese particles are predicted by supersymmetric theories (SUSY)\n\\cite{susy}. In SUSY theories with minimal particle\ncontent (MSSM) \\cite{mssm}, in addition to the ordinary particles, there is\na supersymmetric spectrum of particles with spins which differ by one\nhalf with respect to their Standard Model partners.\n\nCharginos (\\charginos{\\pm}), the supersymmetric partners of \\Wpm\\, \nand \\Hpm, are pair produced via $s$-channel \n$\\gamma\/\\mathrm{Z}$ exchange. The production cross section can be\nreduced by an order of magnitude when the $t$-channel scalar neutrino\n(\\susy{\\nu}) exchange is important.\nNeutralinos, the supersymmetric partners of Z, \\gam, and neutral\nHiggs bosons, are pair produced \n$\\ee \\rightarrow \\neutralino{i}\\neutralino{j}~ (i,j=1, \\ldots ,4$;\nordered by their masses) via $s$-channel $\\mathrm{Z}$ exchange and their\n production cross section can be enhanced by $t$-channel exchange of a \nscalar electron (\\selectron{\\pm}). \n\nShort-lived supersymmetric particles are expected\nin R-parity conserving SUSY models. \nThe R-parity is a quantum number which \ndistinguishes ordinary particles from supersymmetric particles.\nIf R-parity is conserved, supersymmetric particles are \npair-produced and the lightest supersymmetric particle, \nthe lightest neutralino, \\neutralino{1}, is stable. The neutralino is weakly-interacting \nand escapes detection.\nIn this letter we assume R-parity conservation, which implies \nthat the decay chain of supersymmetric particles always contain,\nbesides standard particles, two invisible neutralinos\ncausing the missing energy signature.\n\nWhen the masses of the scalar leptons and the charged Higgs bosons (\\Hpm) are very large,\nthe \\chargino{\\pm} decays via \\ensuremath{\\mathrm{W^\\ast}}: \n$\\chargino{\\pm} \\rightarrow \\neutralino{1}\\ensuremath{\\mathrm{W^\\ast}}\n \\rightarrow \\neutralino{1}\\, f\\bar{f}^\\prime$.\nIf the \\slepton{\\pm} and \\susy{\\nu}\\, masses are comparable to \n$M_{\\W}$ the chargino also \ndecays via virtual scalar lepton or scalar neutrino \nand the leptonic branching fraction is enhanced.\nFinally for \\slepton{\\pm} and \\susy{\\nu}\\, lighter than the chargino, \nthe decay modes \n$\\chargino{\\pm} \\rightarrow \\slepton{\\pm} \\nu$ or\n$\\chargino{\\pm} \\rightarrow \\susy{\\nu} \\ell^\\pm$ become dominant. \nWhen the masses of the neutral SUSY Higgs bosons (\\ho, \\Ao) and of\nthe scalar leptons are very large, the heavier neutralinos \n($\\neutralino{j},\\ j \\ge 2$) decay via \\ensuremath{\\mathrm{Z^\\ast}}:\n$\\neutralino{j} \\rightarrow \\neutralino{k}\\ensuremath{\\mathrm{Z^\\ast}}\n \\rightarrow \\neutralino{k}\\, f\\bar{f}$ with $k < j$.\nFor a chargino lighter than neutralinos, the latter decay via\n$\\ensuremath{\\mathrm{W^\\ast}}$ such as $\\neutralino{j} \\rightarrow \\chargino{\\pm} f\\bar{f}^\\prime$.\nIf the scalar lepton masses are comparable to the Z mass,\nthe neutralino decays also via a virtual scalar lepton, enhancing the leptonic \nbranching fraction.\nFinally, for \\susy{\\nu}\\, and \\slepton{\\pm} lighter than neutralinos the\ntwo-body decays \n$\\neutralino{j} \\rightarrow \\slepton{\\pm} \\ell^\\mp$ or\n$\\neutralino{j} \\rightarrow \\susy{\\nu} \\nu$ (j $\\ge 2$)\nbecome dominant. \nThe radiative decays $\\neutralino{j} \\rightarrow \\neutralino{k} \\gamma $ \nare also possible via higher-order diagrams.\n\nPrevious results on chargino and neutralino searches have been \nreported by L3~\\cite{reflep1.5,susy_96,susy_97} \nand other LEP experiments~\\cite{charlep98}.\nIn this letter, new limits are presented on chargino \nand neutralino production cross sections.\nThese experimental results are interpreted in \nthe framework of the constrained MSSM. \nWithin these models lower limits on the masses of \nsupersymmetric particles are derived.\nFor these limits present experimental results are combined \nwith those obtained previously by L3 \nat the Z peak \\cite{oldsusyl3} and at energies up to\n$\\sqrt{s} = 183 \\gev{}$ \\cite{reflep1.5,susy_96,susy_97,nota99slep}.\n\n\\section{Data Sample and Simulation} \\label{dtsmcs}\n\nWe present the analysis of data collected by\nthe L3 detector \\cite{l3-detector} in 1998, \ncorresponding to an integrated luminosity of 176.3 pb$^{-1}$ at an\naverage centre-of-mass energy of 188.6 \\gev{}, denoted hereafter as \n$\\sqrt{s} = 189 \\gev{}$.\n\nStandard Model reactions are simulated with the following \nMonte Carlo generators:\n{\\tt PYTHIA}~\\cite{PYTHIA} for \n $\\ee \\rightarrow \\mathrm{q\\bar{q}}$,\n $\\ee \\rightarrow \\mathrm{Z}\\,\\ee$ and \n $\\ee \\rightarrow \\gamma\\!\/\\mathrm{Z}\\,\\gamma\\!\/\\mathrm{Z} $;\n{\\tt EXCALIBUR}~\\cite{EXCALIBUR} for\n $\\ee \\rightarrow \\mathrm{W^\\pm\\, e^\\mp \\nu}$;\n{\\tt KORALZ}~\\cite{KORALZ} for\n $\\ee \\rightarrow \\mu^+\\mu^-$ and\n $\\ee \\rightarrow \\tau^+\\tau^-$;\n{\\tt BHWIDE}~\\cite{BHWIDE} for \n $\\ee \\rightarrow \\ee$;\n{\\tt KORALW}~\\cite{KORALW} for\n $\\ee \\rightarrow \\mathrm{W^+ W^-}$;\ntwo-photon interaction processes have been simulated using \n{\\tt DIAG36}~\\cite{DIAG} ($\\ee \\rightarrow \\ee \\ell^+\\ell^-$) and\n{\\tt PHOJET}~\\cite{PHOJET} ($\\ee \\rightarrow \\ee\\, \\mathrm{hadrons}$), \nrequiring at least 3 \\gev{} for the invariant mass of the two-photon system.\nThe number of simulated events for each background process is \nequivalent to more than 100 times the statistics of the collected \ndata sample except for two-photon interactions for which it is more \nthan two times the data statistics.\n\nSignal events are generated with the Monte Carlo program \n{\\tt SUSYGEN}~\\cite{susygen2.2}, for masses of SUSY particles \n($M_{\\rm SUSY}$) ranging from 45 \\gev{} up to the kinematic limit and for\n$\\DELTAM$ values \n($\\ensuremath{\\Delta M} = M_{\\rm SUSY} - M_{\\neutralino{1}}$) between 3 \\gev{} and\n$M_{\\rm SUSY}-1 \\GeV{}$. \nThe explicit two-body decay branching ratios for charginos \n$\\chargino{\\pm} \\rightarrow \\susy{\\nu} \\ell^\\pm, \\slepton{\\pm} \\nu $ \nor $\\neutralino{2,3,4} \\rightarrow \\susy{\\nu} \\nu , \\slepton{} \\ell$\nhave been estimated with {\\tt SUSYGEN}.\n\nThe detector response is simulated using the {\\tt GEANT} \npackage~\\cite{geant}. It takes into account effects of energy loss,\nmultiple scattering and showering in the detector materials and\nin the beam pipe. Hadronic interactions are simulated with the\n{\\tt GHEISHA} program~\\cite{gheisha}. Time dependent inefficiencies\nof the different subdetectors are also taken into account\nin the simulation procedure.\n\n\n\\section{Analysis Procedure}\n\n\\subsection{Signal topologies and optimisation procedure}\n\\label{sec:optimization}\n\n\nBesides the main characteristic of missing transverse momentum,\nsupersymmetric particle signals can be further specified according\nto the number of leptons or the multiplicity of hadronic jets in the\nfinal state. As mentioned in the introduction, chargino pair\nproduction gives final states similar to \\W\\W\\, production.\nFor neutralinos, we distinguish two classes of detectable processes: \n$\\ee \\rightarrow \\neutralino{1}\\neutralino{2}$ and \n$\\ee \\rightarrow \\neutralino{2}\\neutralino{2}$.\nFor these signals, final states are given by the Z branching ratios.\nBoth for charginos and neutralinos, the event energy\nis directly related to \\ensuremath{\\Delta M}\\, ($\\ensuremath{\\Delta M} = M_{\\rm SUSY} - M_{\\neutralino{1}}$).\n\nWe devise five types of selection criteria oriented to all decays \nof charginos, as follows:\nat least two acoplanar leptons (e,$\\mu$); hadrons and at least one isolated lepton;\n at least two acoplanar taus; hadrons and at least one isolated tau;\n purely hadronic final states with high multiplicity.\n$\\neutralino{2}\\neutralino{2}$ production gives rise to final states very similar\nto those of chargino pair production, even if with very different \nbranching ratios. Hence, chargino selections based on these five topologies are also effective\nto select $\\neutralino{2}\\neutralino{2}$ events.\n\nThe two-acoplanar-jets final state on the other hand deserves a\ndedicated selection since it accounts for 70\\% of the decays in\n$\\neutralino{1}\\neutralino{2}$ events, and 28\\% in\n$\\neutralino{2}\\neutralino{2}$ events.\n\nThe signal topologies and the associated background sources depend strongly \non \\ensuremath{\\Delta M}. Therefore all five selections are optimised separately for four\ndifferent \\ensuremath{\\Delta M}\\, ranges: \nthe very low \\ensuremath{\\Delta M}\\, range at $3-5 \\gev{}$, \nthe low \\ensuremath{\\Delta M}\\, range at $10-30 \\gev{}$, \nthe medium \\ensuremath{\\Delta M}\\, range at $40-70 \\gev{}$ and the high range\n\\ensuremath{\\Delta M}\\, at $80-94 \\gev{}$.\nIn the low and very low \\ensuremath{\\Delta M}\\, ranges, the expected topologies for the signal are \ncharacterised by a low multiplicity and a low visible energy, and\n the background is dominated by two-photon interactions.\nFor medium and high \\ensuremath{\\Delta M}\\, ranges, the signal signatures are very similar to those of\nW-pair production; in particular for $\\ensuremath{\\Delta M} > 80 \\gev{}$ on-shell Ws are produced.\n\n\nThe cut values of each selection are {\\it a priori} optimised using\nMonte Carlo signal and background events. \nThe optimisation procedure varies all cuts simultaneously to maximise the\nsignal efficiency and the background rejection.\nIn fact, the average limit ($\\kappa^{-1}$) is minimised for an infinite \nnumber of tries, \nassuming only background contributions. This is expressed mathematically\nby the following formula:\n\\begin{equation} \n\\kappa=\\epsilon\/ \\Sigma_{n=0}^{\\infty} k(b)_{n} P(b,n)\n\\end{equation}\nwhere $k(b)_{n}$ is the 95\\% confidence level Bayesian upper limit, \n$P(b,n)$ is the Poisson distribution \nfor $n$ events with an expected background of $b$ events, and \n$\\epsilon$ is the signal efficiency.\n\n\n\n\\subsection{Event selection}\n\nLepton and photon identification, and isolation criteria in hadronic events\nare unchanged compared to our previous analysis \\cite{susy_96}.\nThe Durham algorithm~\\cite{durham} is used for the clustering of\nhadronic jets.\n\nEvents are first selected by requiring at least 3 \\gev{} of visible energy\nand 3 \\gev{} of transverse momentum. Beam-gas events are rejected by\nrequiring the visible energy in a cone of $30^\\circ$ \naround the beam\npipe ($E_{30^0}$) to be less than 90\\% of the total and the missing momentum\nvector to be at least $10^\\circ$ away from the beam pipe.\nTagged two-photon interactions are rejected by requiring the sum of\nthe energies measured in the lead-scintillator\nring calorimeter and in the luminosity monitors~\\cite{l3-detector}\nto be less than 10 \\GeV{}.\nThese two detectors cover the polar angle range \n$1.5^\\circ < \\theta <9^\\circ$ on both sides of the interaction \npoint.\n\n\n\\subsubsection{Leptonic final states} \\label{sec:selection_lep}\n\nFor the pure leptonic final states, dedicated selections have been\noptimised for the charginos, where the two leptons \nmay have a different flavour. Those selections are very \nsimilar to the scalar lepton selections which are described in \nReference \\cite{slep99ref}. At the end, a combination of all\nthe leptonic selections, providing the optimal sensitivity, is done for\nthe chargino and the neutralino leptonic decays.\n\n\\subsubsection{Lepton plus hadrons final states} \\label{sec:selection_lephad}\n\nWe select events with at least one isolated electron, muon or tau for which \nthe energy, not associated to the lepton, in a cone of $30^\\circ$ \nhalf-opening angle around its direction is less than 2 \\GeV{}. \nThe following quantities are defined:\nthe energy depositions\n($E^{\\perp}_{25}$ and $E_{25}$)\nwithin $\\pm25^\\circ$ around the missing \nenergy direction in the R--$\\phi$ plane \n or in space, respectively. \nWe apply cuts on the number of tracks in the hadronic system\n($N_{tk} - N_{lep}$) and the number of calorimetric clusters ($N_{cl}$).\nFurthermore, cuts are applied on the missing energy direction isolation\n($\\theta_{miss}$ and $E^{\\perp}_{25}$),\nthe total transverse momentum ($p_{\\perp}$),\nthe energy of the isolated lepton ($E_{lep}$), \nthe recoil mass ($M_{rec}$),\nas well as on the acoplanarity angle between\nthe jet and the lepton. A cut is applied on the visible energy ($E_{vis}$)\nand $E_{TTJL}$\nwhich is defined as the absolute\nvalue of the projection of the total momentum of the jet and the \nlepton onto the\ndirection perpendicular to the lepton-jet thrust computed in the R-$\\phi$ \nplane. \nA cut on the invariant mass of the hadronic system ($M_{had}$) \nremoves most of the WW background.\n\nThe cut values at $\\sqrt{s} = 189 \\gev{}$, are shown in Table~\\ref{tab2} for the \ndifferent \\ensuremath{\\Delta M}\\ ranges.\n\n\n\\subsubsection{Purely hadronic final states} \\label{sec:selection_hadrons}\n\nThe list of cuts used at $\\sqrt{s} = 189 \\gev{}$ is reported in Table~\\ref{tab3} for the different \n\\ensuremath{\\Delta M}\\ ranges. \nAgain, we apply cuts on $N_{cl}$, $N_{tk}$, $p_{\\perp}$, $E_{vis}$,\nacollinearity and acoplanarity as well as on the missing energy \npolar angle ($\\theta_{miss}$) and isolation ($E^{\\perp}_{25}$, $E_{25}$).\nThe absolute value of the total momentum of the event \nalong the beam line normalised to the visible energy ($p_\\parallel$),\nthe recoil mass ($M_{rec}$) and the visible mass ($M_{vis}$) are also used\nin the selections.\n\nIn all the selections, but the very low \\ensuremath{\\Delta M},\na cut on the width of the two jets \nis applied. We define $y_{\\perp}$ as the ratio between the scalar sum of the\nparticle momenta transverse to the jet direction and the jet energy. \nWe require $y_{\\perp}$ to be large in order to select four-jet-like events.\nIn the low \\ensuremath{\\Delta M}\\ range a cut on the ratio $E_{TTJ}$\/$p_\\perp$ \nis applied. \n$E_{TTJ}$ is equivalent to $E_{TTJL}$ using the momenta and the directions of\nthe two jets. \n\n\n\\section{Results}\n\nThe results at $\\sqrt{s} = 189 \\gev{}$, for \nthe eighteen chargino selections and the four neutralino\nselections are shown in Table \\ref{tab8}. The results for the very low and low \\ensuremath{\\Delta M}\\, selections are shown together.\nA good agreement between the expected background from Standard Model\nprocesses and the selected data is observed.\n\nThe eighteen chargino selections\nfind 147 candidates in the data when expecting 148 events\nfrom the Standard Model processes. \nIn the low and\nvery low \\ensuremath{\\Delta M}\\ regions\n72 events are selected, 11 events in the medium \\ensuremath{\\Delta M}\\, region and\n67 events in the high \\ensuremath{\\Delta M}\\, region. \nIn the four neutralino selections\n50 candidates are found \nwhereas 48.1 events are expected\nfrom the Standard Model processes, \nmost of those events are selected by the \nlow $\\ensuremath{\\Delta M}$ selections.\n\nEach selection is parametrised as a function of a single parameter, $\\xi$,\nin the following manner: \ngiven a lower edge, $X_{loose}^i$, and an upper \nedge, $X_{tight}^i$,\nfor the cut on the variable $i$, the parameter $\\xi$ \nis equal to 0 when this cut is at the lower edge\n(many background events satisfy the selection)\nand 100 when it is at the upper edge\n(no or few background events pass the selection). All cuts \n($i = 1, . . ., N$) are related to the parameter $\\xi$ as follows:\n$$X_{cut}^i=\nX_{loose}^i +\n(X_{tight}^i- X_{loose}^i)\n\\times \\frac{\\xi}{100}.\n$$\nThe parameter $\\xi$ is scanned around the optimal value ($\\xi$=50)\nto check the agreement between data and Monte Carlo at different background \nrejection stages.\nAs illustrated in Figure~\\ref{fig:xi_chaneu} \nfor the lepton and hadrons final state in chargino decays and \nthe pure hadronic final state in the neutralino decays, the data and\nMonte Carlo simulations are \nin good agreement for all the \\ensuremath{\\Delta M}\\ selections.\nThe vertical arrows show the $\\xi$ value corresponding to\nthe optimised cuts.\n\nFor intermediate \\ensuremath{\\Delta M}\\, values different from those chosen for\noptimisation we choose the combination of selections \nproviding the highest sensitivity \\cite{susy_96}.\nIn this combination procedure, we take into account the overlap\namong the selections within the \ndata and Monte Carlo samples.\n\n\nTypical selection efficiencies, as well as the number of background events\nexpected for a chargino mass of 94 \\gev{} for the purely leptonic final state\n(LL) or\nfor the $\\ensuremath{\\mathrm{W^\\ast}}\\neutralino{1}$ decay mode, are displayed in\nTable \\ref{tab4}. In \nthe latter case, a maximum efficiency of 47\\% is reached for a background\ncontamination of 7.5 events for $\\ensuremath{\\Delta M} = 30 \\gev{}$.\nIn the low \\ensuremath{\\Delta M}\\ region the efficiency decreases due to\nthe large contamination of two-photon interactions and due to the\nlower trigger acceptance.\nFor large \\ensuremath{\\Delta M}\\ it decreases because of the WW background.\n\nThe selection efficiencies, as well as the number of background events\nexpected for a sum of neutralino masses \n$M_{\\neutralino{1}} + M_{\\neutralino{2}} = 188 \\gev{}$ \nfor the pure leptonic decays and for the $\\ensuremath{\\mathrm{Z^\\ast}}\\neutralino{1}$ \ndecay mode are displayed in Table~\\ref{tab5b}.\nCompared to the chargino selection, the efficiencies are\nlower due to the invisible decays of the \\ensuremath{\\mathrm{Z^\\ast}}.\n\nSystematic errors on the signal efficiencies are evaluated as in\nReference \\citen{reflep1.5}, and they are typically 5\\% relative, \ndominated by Monte Carlo statistics. These errors\nare taken into account following the procedure explained \nin Reference \\citen{cal_limit}.\n\n\n\\section{Model independent upper limits on production cross sections}\nNo excess of events is observed\nand we set upper limits on the \nchargino and neutralino production cross sections \nin the framework of the MSSM.\nExclusion limits at 95\\% C.L. are derived taking into account background\ncontributions.\n\nTo derive the upper limits on production cross sections \nand for interpretations in the MSSM we combine the \n$\\sqrt{s} = 189 \\gev{}$ data sample\nwith those collected by L3 at lower centre-of-mass energies \n\\cite{reflep1.5,susy_96,susy_97}.\n\nThe contours of upper limits on the production cross sections for the process\n$\\ee \\rightarrow \\chargino{\\pm}\\chargino{\\mp}$ are shown in \nFigure~\\ref{fig:xsection_chargino} assuming \n$\\chargino{\\pm} \\rightarrow \\ensuremath{\\mathrm{W^\\ast}}\\neutralino{1}$ for the chargino decay \nwith standard W branching fractions, \nand for purely leptonic W decays.\nIn most of the kinematically accessible region, cross sections larger than \n0.2~pb are excluded for both scenarios.\n\nSimilarly, cross section limits for associated neutralino production \n$\\ee \\rightarrow \\neutralino{1}\\neutralino{2}$ are derived as shown in\nFigure~\\ref{fig:xsection_neutralino}\nassuming $\\neutralino{2} \\rightarrow \\ensuremath{\\mathrm{Z^\\ast}}\\neutralino{1}$, \nwith standard Z branching fractions \nand for purely leptonic Z decays.\nIn most of the kinematically accessible region, cross sections larger than \n0.3~pb are excluded for both scenarios.\n\n\n\n\\section{Interpretation in the MSSM}\n\nIn the MSSM, with Grand Unification assumptions~\\cite{MSSM_GUT},\nthe masses and couplings of the SUSY\nparticles as well as their production cross sections,\nare entirely described~\\cite{mssm} once five parameters are fixed:\n$\\tan\\beta$, the ratio of the vacuum expectation values of the two Higgs \ndoublets, $M \\equiv M_2$, the gaugino mass parameter,\n$\\mu$, the higgsino mixing parameter,\n$m_0$, the common mass for scalar fermions at the GUT scale, and $A$, \nthe trilinear coupling in the Higgs sector.\nThe following MSSM parameter space is investigated: \n$$\n \\begin{array}{rclcrcl}\n 0.7 \\leq& \\tan\\beta & \\leq 60 , &&\n0 \\leq &M_2 &\\leq 2000 \\gev{} ,\\\\\n -2000 \\gev{} \\leq& \\mu & \\leq 2000 \\gev{} ,&& \n0 \\leq& m_0 &\\leq 500 \\gev{} .\n \\end{array}\n$$\n\nTo derive the absolute limits on the masses \nof the lightest neutralino and of\nthe lightest chargino,\na scan in the MSSM parameter space is performed \nin steps of $0.2 \\gev{}$ for $M_2$,\n$1.0 \\gev{}$ for $\\mu$ and $0.5 \\gev{}$ for $m_0$. \n\nMass eigenstates of scalar quarks and leptons are in general a \nmixture of the weak eigenstates $\\susy{f}_R$ and $\\susy{f}_L$.\nThe mixing between these two states\nis proportional to the mass of the partner fermion.\nHence the mixing can be sizable only for particles of the\nthird generation. The mixing is governed by the parameters $A$, \n$\\mu$ and $\\tan\\beta$. Besides $\\mu$ and $\\tan\\beta$, also\na scan on $A$ is performed to check the validity of the following results.\n\nAll the limits on the cross sections previously shown, combined with\nthe results obtained at lower centre-of-mass energies and with the\nresults of scalar lepton searches obtained at $\\sqrt{s} = 189 \\gev{}$ \n\\cite{slep99ref}, can be \ntranslated into exclusion regions in the MSSM parameter space.\nTo derive limits in the MSSM, \nwe optimise the global selection for any different\npoint in the parameter space. This is obtained, choosing every time the\ncombination of selections providing the highest sensitivity, given \nthe production cross sections and the decay branching fractions which are \ncalculated with the generator {\\tt SUSYGEN}. When the mixing in the\nscalar tau sector is considered, masses and decay branching fractions\nare calculated with the generator {\\tt ISAJET} \\cite{isajet}.\n\n\n\\subsection{Limits on chargino and neutralino masses} \n\nIn the MSSM, while the cross sections and decay branching fractions\nof the\ncharginos and neutralinos depend on the masses of the scalar leptons, \ntheir masses depend only on \n$M_2$, $\\mu$ and $\\tan\\beta$. \nThe exclusions in the\nhigh $m_0$ range are derived from chargino and\nneutralino searches, while for low $m_0$ the searches for scalar\nleptons \\cite{slep99ref}, and for photons and\nmissing energy final states \\cite{papho97}, also contribute.\nWe also take into account all chargino and neutralino cascade decays:\n\\begin{itemize}\n\\item\n $\\chargino{\\pm} \\rightarrow \\neutralino{2}\\, \\ensuremath{\\mathrm{W^\\ast}}$: \n we observe a slight decrease of the efficiency relative to\n $\\chargino{\\pm} \\rightarrow \\neutralino{1}\\, \\ensuremath{\\mathrm{W^\\ast}}$\n depending on the masses of \\neutralino{2}, \\chargino{\\pm} and \n \\neutralino{1}.\n The lowest efficiency is then used for cascade decays.\n\\item\n $\\neutralino{3,4} \\rightarrow \\neutralino{2}\\, \\ensuremath{\\mathrm{Z^\\ast}}$:\n the efficiency is found to be larger than the efficiency obtained for the\n $\\neutralino{3,4} \\rightarrow \\neutralino{1}\\, \\ensuremath{\\mathrm{Z^\\ast}}$ channel, \n especially in the high \\ensuremath{\\Delta M}\\ region. \n The efficiencies obtained in the latter\n channel are used. \n\\item \n\n$\\neutralino{3,4} \\rightarrow \\susy{\\nu} \\nu$: when the $ \\susy{\\nu}$\nbecomes detectable through its cascade decays into $\\neutralino{2}$ or \n $\\chargino{\\pm}$. This is especially relevant in the mixed region\n($\\mu\\sim -M_2$) for\nthe low $\\tan\\beta$ values.\n\\end{itemize}\n\n\nDepending on the neutralino-chargino field content, \none distinguishes the following \ncases for the determination of lower limits on the neutralino and chargino \nmasses:\n\\begin{itemize}\n\\item \n Higgsino-like \\neutralino{2} and \\chargino{\\pm} ($M_2 \\gg |\\mu|$):\n in this case, the production cross sections do not depend on the scalar lepton\n masses, \\ensuremath{\\Delta M}\\ is low and decreases with increasing $M_2$.\nConsequently, the limits on the masses of the next-to-lightest neutralino\n and the lightest chargino decrease with $M_2$ as depicted in \n Figure~\\ref{fig:mass_m2}.\n For $\\tan\\beta = \\sqrt{2}$ and $M_2$ less than $500 \\gev{}$, \n$M_{\\neutralino{2}} \\leq 101 \\GeV{}$ and\n $M_{\\chargino{\\pm}} \\leq 93 \\GeV{}$\n are excluded.\n\\item\n Gaugino-like $\\chargino{\\pm}$ ($|\\mu| \\gg M_2$):\n the chargino cross section depends strongly on the scalar neutrino mass. \n For $50 \\gev{} \\leq M_{\\susy{\\nu}} \\leq 80 \\gev{}$ \nthe cross section is reduced by one order of magnitude compared to what is\n expected for $M_{\\susy{\\nu}} \\geq 500 \\gev{}$.\n When the two body decay \n $\\chargino{\\pm} \\rightarrow \\ell^\\pm \\, \\susy{\\nu}$ \n is dominant, the relevant \\ensuremath{\\Delta M}\\ becomes \n $\\ensuremath{\\Delta M} = M_{\\chargino{\\pm}} - M_{\\susy{\\nu}}$.\nIf the $\\susy{\\nu}$ is mass degenerate with the \\chargino{\\pm} the \nacceptance is substantially reduced. However, when this occurs scalar \nleptons are light and the experimental sensitivity is recovered with these\nchannels.\n\n\\end{itemize}\n\nThe mass limit of the lightest\nchargino is shown in Figure \\ref{fig:mass_cha} as a function of \n$\\tan\\beta$ for all the different chargino field contents. \nAt large $\\tan\\beta$ values,\nthe lower mass limit of the lightest chargino is obtained\nwhen the lightest chargino and the $\\susy{\\nu}$ are mass degenerate\n(gaugino region). At \nlow $\\tan\\beta$ values the lower mass limit on the lightest \nchargino is obtained when the lightest chargino and \nthe $\\neutralino{1}$ (LSP) are mass\ndegenerate (higgsino region). Finally, for $M_2<2$ \\TeV, \nfor $\\tan\\beta \\leq 60$ and for any $m_0$ values, \nthe lower mass limit of the lightest chargino is:\n$$ M_{\\chargino{\\pm}} \\geq 67.7 \\gev{} .$$\n\nThe scalar tau can be much lighter than\nthe scalar electron and muon. This mass splitting \noccurs in particular for large $\\tan\\beta$ and $A$ values.\nWhen this happens, chargino and next-to-lightest neutralino decays \nare affected. Therefore, detection efficiencies are estimated for chargino and\nnext-to-lightest neutralino decays with 100\\% branching ratio\ninto $\\stau{}_{1}\\nu$ and $\\stau{\\pm}_{1}\\tau^{\\mp}$, respectively.\nIn particular, when the \n$\\stau{}_{1}$ and the LSP are mass degenerate the efficiencies \ndecrease substantially.\nHowever, the experimental sensitivity can be partially recovered\ntaking into account also the process\n$\\epem\\ra \\susy{\\nu}\\susy{\\nu}$, where \nthe $\\susy{\\nu}$ is visible through its cascade decays.\nIn particular, the limit on the chargino mass \nholds for any value of the mixing if $\\tan\\beta < 20$.\nFor higher $\\tan\\beta$ values this limit can be decreased\nat most by 10 \\gev. \n\nIndirect limits on the mass of the lightest neutralino\nare also derived as a function of $m_0$ and as a function of tan$\\beta$.\nIn the\nlow $m_0$ region ($\\leq 65 \\gev{}$) the mass limit on the LSP\ncomes mainly from the scalar lepton searches.\nFor large $m_0$ values ($\\geq 200 \\gev{}$), only \nthe chargino and neutralino searches contribute. At \nlow $\\tan\\beta$ the processes\n$\\epem\\ \\rightarrow \\neutralino{2} \\neutralino{3,4}$\ncontribute significantly and they are taken into account.\nThe lower mass limit is found at $\\tan\\beta = 1$, $\\mu = -70 \\gev{}$\nand $m_0 = 500 \\gev{}$, as shown in Figure~\\ref{fig:neutralino_mass_a}.\nFor these values of the parameters, \nthe chargino mass is at the kinematic limit and \nthe mass difference between the chargino \nand the LSP is maximal.\n\nFor intermediate $m_0$ values ($65 \\gev{} \\leq m_0 \\leq 95 \\gev{}$)\nthe production cross section for charginos is minimal\nand the $\\susy{\\nu}$ is light enough to allow\nthe following decay modes:\n$\\neutralino{2,3,4} \\rightarrow \\susy{\\nu}\\nu$ and\n$\\chargino{\\pm} \\rightarrow \\susy{\\nu} \\ell^{\\pm}$. \nThis is the region where the exclusion is due to the interplay of many\ndifferent searches. The limit on the lightest neutralino as a function\nof $m_0$, and for two extreme values of $\\tan\\beta$, \nis shown in Figure~\\ref{fig:neutralino_mass_b}. \nFor low $\\tan\\beta$ values\n($\\le \\sqrt{2}$), the minimum is found for\n$\\mu \\sim -70 \\gev{}$ and large $m_0$ values. \nBetter limits are obtained \nfor intermediate $m_0$ values, where \nthe neutralino production cross sections are large and the \ntwo body decays of the $\\neutralino{3,4}$ into $\\susy{\\nu} \\nu $ \nare visible through the cascade decays of the\n$\\susy{\\nu}$. \nFor larger $\\tan\\beta$ values, the minimum is found in the gaugino \nregion ($-2000 \\gev{} < \\mu < -200 \\gev{}$) and for \n$70 \\gev{} \\leq m_0 \\leq 80 \\gev{}$. In this region of the parameter space, \nthe $\\susy{\\nu}$ and the chargino are \nmass degenerate, the heavier neutralinos \ndecay invisibly and the experimental sensitivity \nis entirely due to the scalar lepton searches.\n\nFinally in Figure~\\ref{fig:neutralino_any_a}, the mass limit on the\nlightest neutralino as a function of $\\tan\\beta$ for any $m_0$ value\nis shown. For $\\tan\\beta \\geq 0.7$, the \nlower mass limit of the lightest neutralino is \n$$\n M_{\\neutralino{1}} \\geq 32.5 \\gev{} .\n$$\n\nThe mass limit on the\nlightest neutralino is very little affected\nby the mixing in the scalar tau sector.\nThe limit holds for any value of the mixing if $\\tan\\beta < 20$\nand it can be reduced at most by 1.5 \\gev{} for higher $\\tan\\beta$ values.\nNevertheless, the absolute mass limit for the lightest neutralino\ndoes not change since the lowest value is still found at $\\tan\\beta =1$.\n\n\nThe mass limit on the lightest neutralino is also translated in\nan absolute limit on $M_2$. This is shown as a function \nof $\\tan\\beta$ for any $m_0$ and $\\mu$ as depicted \nin Figure ~\\ref{fig:neutralino_any_b}. \nValues of $M_2$ lower than $54.8 \\gev{}$ are now excluded at 95$\\%$ C.L.\n\n\n\\section*{Acknowledgments}\n\nWe express our gratitude to the CERN accelerator divisions for the\nexcellent performance of the LEP machine. We also acknowledge\nand appreciate the effort of the engineers, technicians and support staff \nwho have participated in the construction and maintenance of this experiment.\n\n\\newpage\n\n\n\\bibliographystyle{l3stylem}\n\\begin{mcbibliography}{10}\n\n\\bibitem{susy}\nY.A. Golfand and E.P. Likhtman, \\JETP {\\bf 13} (1971) 323; \\\\ D.V. Volkhov and\n V.P. Akulov, \\PL {\\bf B 46} (1973) 109; \\\\ J. Wess and B. Zumino, \\NP {\\bf B\n 70} (1974) 39;\\\\ P. Fayet and S. Ferrara, \\PRep {\\bf C 32} (1977) 249;\\\\ A.\n Salam and J. Strathdee, \\FortP {\\bf 26} (1978) 57\\relax\n\\relax\n\\bibitem{mssm}\nH. P. Nilles, \\PRep {\\bf 110} (1984) 1;\\\\ H. E. Haber and G. L. Kane, \\PRep\n {\\bf 117} (1985) 75;\\\\ R. Barbieri, \\NCim {\\bf 11} No. 4 (1988) 1\\relax\n\\relax\n\\bibitem{reflep1.5}\nL3 Collab., M. Acciarri \\etal, \\PL {\\bf B 377} (1996) 289\\relax\n\\relax\n\\bibitem{susy_96}\nL3 Collab., M. Acciarri \\etal, Eur. Phys. Journal {\\bf C 4} (1998) 207\\relax\n\\relax\n\\bibitem{susy_97}\nL3 Collab., M. Acciarri \\etal, contributed paper n. 493 to {\\it ICHEP98},\n Vancouver, July 1998\\relax\n\\relax\n\\bibitem{charlep98}\nALEPH Collab., R. Barate \\etal, CERN-EP-99-014 (1999);\\\\ DELPHI Collab., P.\n Abreu \\etal, \\PL {\\bf B 446} (1999) 75;\\\\ OPAL Collab., G. Abbiendi \\etal,\n Eur. Phys. Journal {\\bf C 8} (1999) 255\\relax\n\\relax\n\\bibitem{oldsusyl3}\nL3 Collab., O. Adriani \\etal, \\PRep {\\bf 236} (1993) 1; \\\\ L3 Collab., M.\n Acciarri \\etal, \\PL {\\bf B 350} (1995) 109\\relax\n\\relax\n\\bibitem{nota99slep}\nL3 Collab., M. Acciarri \\etal, \\PL {\\bf B 456} (1999) 283\\relax\n\\relax\n\\bibitem{l3-detector}\nL3 Collab., B. Adeva \\etal, Nucl. Instr. and Meth. {\\bf A 289} (1990) 35; \\\\ M.\n Chemarin \\etal, Nucl. Instr. and Meth. {\\bf A 349} (1994) 345; \\\\ M. Acciarri\n \\etal, Nucl. Instr. and Meth. {\\bf A 351} (1994) 300; \\\\ G. Basti \\etal,\n Nucl. Instr. and Meth. {\\bf A 374} (1996) 293; \\\\ I.C. Brock \\etal, Nucl.\n Instr. and Meth. {\\bf A 381} (1996) 236; \\\\ A. Adam \\etal, Nucl. Instr. and\n Meth. {\\bf A 383} (1996) 342\\relax\n\\relax\n\\bibitem{PYTHIA}\nT. Sj{\\\"o}strand, ``PYTHIA~5.7 and JETSET~7.4 Physics and Manual'', \\\\\n CERN--TH\/7112\/93 (1993), revised August 1995;\\\\ T. Sj{\\\"o}strand, \\CPC {\\bf\n 82} (1994) 74\\relax\n\\relax\n\\bibitem{EXCALIBUR}\n{\\tt EXCALIBUR} version 1.11 is used.\\\\ F.A.~Berends, R.~Kleiss and R.~Pittau,\n Nucl. Phys. {\\bf B 424} (1994) 308; Nucl. Phys. {\\bf B 426} (1994) 344; Nucl.\n Phys. (Proc. Suppl.) {\\bf B 37} (1994) 163; Phys. Lett. {\\bf B 335} (1994)\n 490; Comp. Phys. Comm. {\\bf 83} (1994) 141\\relax\n\\relax\n\\bibitem{KORALZ}\n{\\tt KORALZ} version 4.02 is used.\\\\ S. Jadach, B.F.L. Ward and Z. W\\c{a}s,\n \\CPC {\\bf 79} (1994) 503\\relax\n\\relax\n\\bibitem{BHWIDE}\n{\\tt BHWIDE} version 1.01 is used.\\\\ S. Jadach \\etal, \\PL {\\bf B 390} (1997)\n 298\\relax\n\\relax\n\\bibitem{KORALW}\n{\\tt KORALW} version 1.33 is used.\\\\ M.~Skrzypek \\etal, \\CPC {\\bf 94} (1996)\n 216;\\\\ M.~Skrzypek \\etal, \\PL {\\bf B 372} (1996) 289\\relax\n\\relax\n\\bibitem{DIAG}\nF.A.~Berends, P.H.~Daverfeldt and R. Kleiss,\n\\newblock Nucl. Phys. {\\bf B 253} (1985) 441\\relax\n\\relax\n\\bibitem{PHOJET}\n{\\tt PHOJET} version 1.10 is used. \\\\ R.~Engel, \\ZfP {\\bf C 66} (1995) 203; \\\\\n R.~Engel and J.~Ranft, \\PR {\\bf{D 54}} (1996) 4244\\relax\n\\relax\n\\bibitem{susygen2.2}\n{\\tt SUSYGEN} version 2.2 is used.\\\\ S. Katsanevas and P. Morawitz, \\CPC {\\bf\n 112} (1998) 227\\relax\n\\relax\n\\bibitem{geant}\nThe L3 detector simulation is based on GEANT Version 3.15.\\\\ See R. Brun \\etal,\n ``GEANT 3'', CERN DD\/EE\/84-1 (Revised), September 1987\\relax\n\\relax\n\\bibitem{gheisha}\nH. Fesefeldt, RWTH Aachen Preprint PITHA 85\/02 (1985)\\relax\n\\relax\n\\bibitem{durham}\nS. Catani \\etal, \\PL {\\bf B 269} (1991) 432;\\\\ S. Bethke \\etal, \\NP {\\bf B 370}\n (1992) 310\\relax\n\\relax\n\\bibitem{slep99ref}\nL3 Collab., M. Acciarri \\etal, {\\it Search for Scalar leptons in $\\ee$\n collisions at $\\sqrt{s}$=189 \\gev}, contributed paper n. 7-46 to {\\it\n EPS-HEP99}, Tampere, July 1999, and also submitted to Phys. Lett\\relax\n\\relax\n\\bibitem{cal_limit}\nR.D. Cousins and V.L. Highland, \\NIM {\\bf A 320} (1992) 331\\relax\n\\relax\n\\bibitem{MSSM_GUT}\nSee for instance:\\\\ L. Ibanez, Phys. Lett. {\\bf B 118} (1982) 73;\\\\ R.\n Barbieri, S. Farrara and C. Savoy, Phys. Lett. {\\bf B 119} (1982) 343\\relax\n\\relax\n\\bibitem{isajet}\n{\\tt ISAJET} version 7.44 is used. \\\\ H.~Baer \\etal, BNL-HET-98-39, (1998),\n hep-ph\/9810440\\relax\n\\relax\n\\bibitem{papho97}\nL3 Collab., M. Acciarri \\etal, \\PL {\\bf B 444} (1998) 503\\relax\n\\relax\n\\end{mcbibliography}\n\n \n\\newpage\n\\typeout{ } \n\\typeout{Using author list for paper 186 -?}\n\\typeout{$Modified: Fri Sep 10 08:43:14 1999 by clare $}\n\\typeout{!!!! This should only be used with document option a4p!!!!}\n\\typeout{ }\n\n\\newcount\\tutecount \\tutecount=0\n\\def\\tutenum#1{\\global\\advance\\tutecount by 1 \\xdef#1{\\the\\tutecount}}\n\\def\\tute#1{$^{#1}$}\n\\tutenum\\aachen \n\\tutenum\\nikhef \n\\tutenum\\mich \n\\tutenum\\lapp \n\\tutenum\\basel \n\\tutenum\\lsu \n\\tutenum\\beijing \n\\tutenum\\berlin \n\\tutenum\\bologna \n\\tutenum\\tata \n\\tutenum\\ne \n\\tutenum\\bucharest \n\\tutenum\\budapest \n\\tutenum\\mit \n\\tutenum\\debrecen \n\\tutenum\\florence \n\\tutenum\\cern \n\\tutenum\\wl \n\\tutenum\\geneva \n\\tutenum\\hefei \n\\tutenum\\seft \n\\tutenum\\lausanne \n\\tutenum\\lecce \n\\tutenum\\lyon \n\\tutenum\\madrid \n\\tutenum\\milan \n\\tutenum\\moscow \n\\tutenum\\naples \n\\tutenum\\cyprus \n\\tutenum\\nymegen \n\\tutenum\\caltech \n\\tutenum\\perugia \n\\tutenum\\cmu \n\\tutenum\\prince \n\\tutenum\\rome \n\\tutenum\\peters \n\\tutenum\\salerno \n\\tutenum\\ucsd \n\\tutenum\\santiago \n\\tutenum\\sofia \n\\tutenum\\korea \n\\tutenum\\alabama \n\\tutenum\\utrecht \n\\tutenum\\purdue \n\\tutenum\\psinst \n\\tutenum\\zeuthen \n\\tutenum\\eth \n\\tutenum\\hamburg \n\\tutenum\\taiwan \n\\tutenum\\tsinghua \n{\n\\parskip=0pt\n\\noindent\n{\\bf The L3 Collaboration:}\n\\ifx\\selectfont\\undefine\n \\baselineskip=10.8pt\n \\baselineskip\\baselinestretch\\baselineskip\n \\normalbaselineskip\\baselineskip\n \\ixpt\n\\els\n \\fontsize{9}{10.8pt}\\selectfont\n\\fi\n\\medskip\n\\tolerance=10000\n\\hbadness=5000\n\\raggedright\n\\hsize=162truemm\\hoffset=0mm\n\\def\\rlap,{\\rlap,}\n\\noindent\n\nM.Acciarri\\rlap,\\tute\\milan\\\nP.Achard\\rlap,\\tute\\geneva\\ \nO.Adriani\\rlap,\\tute{\\florence}\\ \nM.Aguilar-Benitez\\rlap,\\tute\\madrid\\ \nJ.Alcaraz\\rlap,\\tute\\madrid\\ \nG.Alemanni\\rlap,\\tute\\lausanne\\\nJ.Allaby\\rlap,\\tute\\cern\\\nA.Aloisio\\rlap,\\tute\\naples\\ \nM.G.Alviggi\\rlap,\\tute\\naples\\\nG.Ambrosi\\rlap,\\tute\\geneva\\\nH.Anderhub\\rlap,\\tute\\eth\\ \nV.P.Andreev\\rlap,\\tute{\\lsu,\\peters}\\\nT.Angelescu\\rlap,\\tute\\bucharest\\\nF.Anselmo\\rlap,\\tute\\bologna\\\nA.Arefiev\\rlap,\\tute\\moscow\\ \nT.Azemoon\\rlap,\\tute\\mich\\ \nT.Aziz\\rlap,\\tute{\\tata}\\ \nP.Bagnaia\\rlap,\\tute{\\rome}\\\nL.Baksay\\rlap,\\tute\\alabama\\\nA.Balandras\\rlap,\\tute\\lapp\\ \nR.C.Ball\\rlap,\\tute\\mich\\ \nS.Banerjee\\rlap,\\tute{\\tata}\\ \nSw.Banerjee\\rlap,\\tute\\tata\\ \nA.Barczyk\\rlap,\\tute{\\eth,\\psinst}\\ \nR.Barill\\`ere\\rlap,\\tute\\cern\\ \nL.Barone\\rlap,\\tute\\rome\\ \nP.Bartalini\\rlap,\\tute\\lausanne\\ \nM.Basile\\rlap,\\tute\\bologna\\\nR.Battiston\\rlap,\\tute\\perugia\\\nA.Bay\\rlap,\\tute\\lausanne\\ \nF.Becattini\\rlap,\\tute\\florence\\\nU.Becker\\rlap,\\tute{\\mit}\\\nF.Behner\\rlap,\\tute\\eth\\\nL.Bellucci\\rlap,\\tute\\florence\\ \nJ.Berdugo\\rlap,\\tute\\madrid\\ \nP.Berges\\rlap,\\tute\\mit\\ \nB.Bertucci\\rlap,\\tute\\perugia\\\nB.L.Betev\\rlap,\\tute{\\eth}\\\nS.Bhattacharya\\rlap,\\tute\\tata\\\nM.Biasini\\rlap,\\tute\\perugia\\\nA.Biland\\rlap,\\tute\\eth\\ \nJ.J.Blaising\\rlap,\\tute{\\lapp}\\ \nS.C.Blyth\\rlap,\\tute\\cmu\\ \nG.J.Bobbink\\rlap,\\tute{\\nikhef}\\ \nA.B\\\"ohm\\rlap,\\tute{\\aachen}\\\nL.Boldizsar\\rlap,\\tute\\budapest\\\nB.Borgia\\rlap,\\tute{\\rome}\\ \nD.Bourilkov\\rlap,\\tute\\eth\\\nM.Bourquin\\rlap,\\tute\\geneva\\\nS.Braccini\\rlap,\\tute\\geneva\\\nJ.G.Branson\\rlap,\\tute\\ucsd\\\nV.Brigljevic\\rlap,\\tute\\eth\\ \nF.Brochu\\rlap,\\tute\\lapp\\ \nA.Buffini\\rlap,\\tute\\florence\\\nA.Buijs\\rlap,\\tute\\utrecht\\\nJ.D.Burger\\rlap,\\tute\\mit\\\nW.J.Burger\\rlap,\\tute\\perugia\\\nJ.Busenitz\\rlap,\\tute\\alabama\\\nA.Button\\rlap,\\tute\\mich\\ \nX.D.Cai\\rlap,\\tute\\mit\\ \nM.Campanelli\\rlap,\\tute\\eth\\\nM.Capell\\rlap,\\tute\\mit\\\nG.Cara~Romeo\\rlap,\\tute\\bologna\\\nG.Carlino\\rlap,\\tute\\naples\\\nA.M.Cartacci\\rlap,\\tute\\florence\\ \nJ.Casaus\\rlap,\\tute\\madrid\\\nG.Castellini\\rlap,\\tute\\florence\\\nF.Cavallari\\rlap,\\tute\\rome\\\nN.Cavallo\\rlap,\\tute\\naples\\\nC.Cecchi\\rlap,\\tute\\geneva\\\nM.Cerrada\\rlap,\\tute\\madrid\\\nF.Cesaroni\\rlap,\\tute\\lecce\\ \nM.Chamizo\\rlap,\\tute\\geneva\\\nY.H.Chang\\rlap,\\tute\\taiwan\\ \nU.K.Chaturvedi\\rlap,\\tute\\wl\\ \nM.Chemarin\\rlap,\\tute\\lyon\\\nA.Chen\\rlap,\\tute\\taiwan\\ \nG.Chen\\rlap,\\tute{\\beijing}\\ \nG.M.Chen\\rlap,\\tute\\beijing\\ \nH.F.Chen\\rlap,\\tute\\hefei\\ \nH.S.Chen\\rlap,\\tute\\beijing\\\nX.Chereau\\rlap,\\tute\\lapp\\ \nG.Chiefari\\rlap,\\tute\\naples\\ \nL.Cifarelli\\rlap,\\tute\\salerno\\\nF.Cindolo\\rlap,\\tute\\bologna\\\nC.Civinini\\rlap,\\tute\\florence\\ \nI.Clare\\rlap,\\tute\\mit\\\nR.Clare\\rlap,\\tute\\mit\\ \nG.Coignet\\rlap,\\tute\\lapp\\ \nA.P.Colijn\\rlap,\\tute\\nikhef\\\nN.Colino\\rlap,\\tute\\madrid\\ \nS.Costantini\\rlap,\\tute\\berlin\\\nF.Cotorobai\\rlap,\\tute\\bucharest\\\nB.Cozzoni\\rlap,\\tute\\bologna\\ \nB.de~la~Cruz\\rlap,\\tute\\madrid\\\nA.Csilling\\rlap,\\tute\\budapest\\\nS.Cucciarelli\\rlap,\\tute\\perugia\\ \nT.S.Dai\\rlap,\\tute\\mit\\ \nJ.A.van~Dalen\\rlap,\\tute\\nymegen\\ \nR.D'Alessandro\\rlap,\\tute\\florence\\ \nR.de~Asmundis\\rlap,\\tute\\naples\\\nP.D\\'eglon\\rlap,\\tute\\geneva\\ \nA.Degr\\'e\\rlap,\\tute{\\lapp}\\ \nK.Deiters\\rlap,\\tute{\\psinst}\\ \nD.della~Volpe\\rlap,\\tute\\naples\\ \nP.Denes\\rlap,\\tute\\prince\\ \nF.DeNotaristefani\\rlap,\\tute\\rome\\\nA.De~Salvo\\rlap,\\tute\\eth\\ \nM.Diemoz\\rlap,\\tute\\rome\\ \nD.van~Dierendonck\\rlap,\\tute\\nikhef\\\nF.Di~Lodovico\\rlap,\\tute\\eth\\\nC.Dionisi\\rlap,\\tute{\\rome}\\ \nM.Dittmar\\rlap,\\tute\\eth\\\nA.Dominguez\\rlap,\\tute\\ucsd\\\nA.Doria\\rlap,\\tute\\naples\\\nM.T.Dova\\rlap,\\tute{\\wl,\\sharp}\\\nD.Duchesneau\\rlap,\\tute\\lapp\\ \nD.Dufournaud\\rlap,\\tute\\lapp\\ \nP.Duinker\\rlap,\\tute{\\nikhef}\\ \nI.Duran\\rlap,\\tute\\santiago\\\nH.El~Mamouni\\rlap,\\tute\\lyon\\\nA.Engler\\rlap,\\tute\\cmu\\ \nF.J.Eppling\\rlap,\\tute\\mit\\ \nF.C.Ern\\'e\\rlap,\\tute{\\nikhef}\\ \nP.Extermann\\rlap,\\tute\\geneva\\ \nM.Fabre\\rlap,\\tute\\psinst\\ \nR.Faccini\\rlap,\\tute\\rome\\\nM.A.Falagan\\rlap,\\tute\\madrid\\\nS.Falciano\\rlap,\\tute{\\rome,\\cern}\\\nA.Favara\\rlap,\\tute\\cern\\\nJ.Fay\\rlap,\\tute\\lyon\\ \nO.Fedin\\rlap,\\tute\\peters\\\nM.Felcini\\rlap,\\tute\\eth\\\nT.Ferguson\\rlap,\\tute\\cmu\\ \nF.Ferroni\\rlap,\\tute{\\rome}\\\nH.Fesefeldt\\rlap,\\tute\\aachen\\ \nE.Fiandrini\\rlap,\\tute\\perugia\\\nJ.H.Field\\rlap,\\tute\\geneva\\ \nF.Filthaut\\rlap,\\tute\\cern\\\nP.H.Fisher\\rlap,\\tute\\mit\\\nI.Fisk\\rlap,\\tute\\ucsd\\\nG.Forconi\\rlap,\\tute\\mit\\ \nL.Fredj\\rlap,\\tute\\geneva\\\nK.Freudenreich\\rlap,\\tute\\eth\\\nC.Furetta\\rlap,\\tute\\milan\\\nYu.Galaktionov\\rlap,\\tute{\\moscow,\\mit}\\\nS.N.Ganguli\\rlap,\\tute{\\tata}\\ \nP.Garcia-Abia\\rlap,\\tute\\basel\\\nM.Gataullin\\rlap,\\tute\\caltech\\\nS.S.Gau\\rlap,\\tute\\ne\\\nS.Gentile\\rlap,\\tute{\\rome,\\cern}\\\nN.Gheordanescu\\rlap,\\tute\\bucharest\\\nS.Giagu\\rlap,\\tute\\rome\\\nZ.F.Gong\\rlap,\\tute{\\hefei}\\\nG.Grenier\\rlap,\\tute\\lyon\\ \nO.Grimm\\rlap,\\tute\\eth\\ \nM.W.Gruenewald\\rlap,\\tute\\berlin\\ \nM.Guida\\rlap,\\tute\\salerno\\ \nR.van~Gulik\\rlap,\\tute\\nikhef\\\nV.K.Gupta\\rlap,\\tute\\prince\\ \nA.Gurtu\\rlap,\\tute{\\tata}\\\nL.J.Gutay\\rlap,\\tute\\purdue\\\nD.Haas\\rlap,\\tute\\basel\\\nA.Hasan\\rlap,\\tute\\cyprus\\ \nD.Hatzifotiadou\\rlap,\\tute\\bologna\\\nT.Hebbeker\\rlap,\\tute\\berlin\\\nA.Herv\\'e\\rlap,\\tute\\cern\\ \nP.Hidas\\rlap,\\tute\\budapest\\\nJ.Hirschfelder\\rlap,\\tute\\cmu\\\nH.Hofer\\rlap,\\tute\\eth\\ \nG.~Holzner\\rlap,\\tute\\eth\\ \nH.Hoorani\\rlap,\\tute\\cmu\\\nS.R.Hou\\rlap,\\tute\\taiwan\\\nI.Iashvili\\rlap,\\tute\\zeuthen\\\nB.N.Jin\\rlap,\\tute\\beijing\\ \nL.W.Jones\\rlap,\\tute\\mich\\\nP.de~Jong\\rlap,\\tute\\nikhef\\\nI.Josa-Mutuberr{\\'\\i}a\\rlap,\\tute\\madrid\\\nR.A.Khan\\rlap,\\tute\\wl\\ \nD.Kamrad\\rlap,\\tute\\zeuthen\\\nM.Kaur\\rlap,\\tute{\\wl,\\diamondsuit}\\\nM.N.Kienzle-Focacci\\rlap,\\tute\\geneva\\\nD.Kim\\rlap,\\tute\\rome\\\nD.H.Kim\\rlap,\\tute\\korea\\\nJ.K.Kim\\rlap,\\tute\\korea\\\nS.C.Kim\\rlap,\\tute\\korea\\\nJ.Kirkby\\rlap,\\tute\\cern\\\nD.Kiss\\rlap,\\tute\\budapest\\\nW.Kittel\\rlap,\\tute\\nymegen\\\nA.Klimentov\\rlap,\\tute{\\mit,\\moscow}\\ \nA.C.K{\\\"o}nig\\rlap,\\tute\\nymegen\\\nA.Kopp\\rlap,\\tute\\zeuthen\\\nI.Korolko\\rlap,\\tute\\moscow\\\nV.Koutsenko\\rlap,\\tute{\\mit,\\moscow}\\ \nM.Kr{\\\"a}ber\\rlap,\\tute\\eth\\ \nR.W.Kraemer\\rlap,\\tute\\cmu\\\nW.Krenz\\rlap,\\tute\\aachen\\ \nA.Kunin\\rlap,\\tute{\\mit,\\moscow}\\ \nP.Ladron~de~Guevara\\rlap,\\tute{\\madrid}\\\nI.Laktineh\\rlap,\\tute\\lyon\\\nG.Landi\\rlap,\\tute\\florence\\\nK.Lassila-Perini\\rlap,\\tute\\eth\\\nP.Laurikainen\\rlap,\\tute\\seft\\\nA.Lavorato\\rlap,\\tute\\salerno\\\nM.Lebeau\\rlap,\\tute\\cern\\\nA.Lebedev\\rlap,\\tute\\mit\\\nP.Lebrun\\rlap,\\tute\\lyon\\\nP.Lecomte\\rlap,\\tute\\eth\\ \nP.Lecoq\\rlap,\\tute\\cern\\ \nP.Le~Coultre\\rlap,\\tute\\eth\\ \nH.J.Lee\\rlap,\\tute\\berlin\\\nJ.M.Le~Goff\\rlap,\\tute\\cern\\\nR.Leiste\\rlap,\\tute\\zeuthen\\ \nE.Leonardi\\rlap,\\tute\\rome\\\nP.Levtchenko\\rlap,\\tute\\peters\\\nC.Li\\rlap,\\tute\\hefei\\\nC.H.Lin\\rlap,\\tute\\taiwan\\\nW.T.Lin\\rlap,\\tute\\taiwan\\\nF.L.Linde\\rlap,\\tute{\\nikhef}\\\nL.Lista\\rlap,\\tute\\naples\\\nZ.A.Liu\\rlap,\\tute\\beijing\\\nW.Lohmann\\rlap,\\tute\\zeuthen\\\nE.Longo\\rlap,\\tute\\rome\\ \nY.S.Lu\\rlap,\\tute\\beijing\\ \nK.L\\\"ubelsmeyer\\rlap,\\tute\\aachen\\\nC.Luci\\rlap,\\tute{\\cern,\\rome}\\ \nD.Luckey\\rlap,\\tute{\\mit}\\\nL.Lugnier\\rlap,\\tute\\lyon\\ \nL.Luminari\\rlap,\\tute\\rome\\\nW.Lustermann\\rlap,\\tute\\eth\\\nW.G.Ma\\rlap,\\tute\\hefei\\ \nM.Maity\\rlap,\\tute\\tata\\\nL.Malgeri\\rlap,\\tute\\cern\\\nA.Malinin\\rlap,\\tute{\\moscow,\\cern}\\ \nC.Ma\\~na\\rlap,\\tute\\madrid\\\nD.Mangeol\\rlap,\\tute\\nymegen\\\nP.Marchesini\\rlap,\\tute\\eth\\ \nG.Marian\\rlap,\\tute\\debrecen\\ \nJ.P.Martin\\rlap,\\tute\\lyon\\ \nF.Marzano\\rlap,\\tute\\rome\\ \nG.G.G.Massaro\\rlap,\\tute\\nikhef\\ \nK.Mazumdar\\rlap,\\tute\\tata\\\nR.R.McNeil\\rlap,\\tute{\\lsu}\\ \nS.Mele\\rlap,\\tute\\cern\\\nL.Merola\\rlap,\\tute\\naples\\ \nM.Meschini\\rlap,\\tute\\florence\\ \nW.J.Metzger\\rlap,\\tute\\nymegen\\\nM.von~der~Mey\\rlap,\\tute\\aachen\\\nA.Mihul\\rlap,\\tute\\bucharest\\\nH.Milcent\\rlap,\\tute\\cern\\\nG.Mirabelli\\rlap,\\tute\\rome\\ \nJ.Mnich\\rlap,\\tute\\cern\\\nG.B.Mohanty\\rlap,\\tute\\tata\\ \nP.Molnar\\rlap,\\tute\\berlin\\\nB.Monteleoni\\rlap,\\tute{\\florence,\\dag}\\ \nT.Moulik\\rlap,\\tute\\tata\\\nG.S.Muanza\\rlap,\\tute\\lyon\\\nF.Muheim\\rlap,\\tute\\geneva\\\nA.J.M.Muijs\\rlap,\\tute\\nikhef\\\nM.Musy\\rlap,\\tute\\rome\\ \nM.Napolitano\\rlap,\\tute\\naples\\\nF.Nessi-Tedaldi\\rlap,\\tute\\eth\\\nH.Newman\\rlap,\\tute\\caltech\\ \nT.Niessen\\rlap,\\tute\\aachen\\\nA.Nisati\\rlap,\\tute\\rome\\\nH.Nowak\\rlap,\\tute\\zeuthen\\ \nY.D.Oh\\rlap,\\tute\\korea\\\nG.Organtini\\rlap,\\tute\\rome\\\nR.Ostonen\\rlap,\\tute\\seft\\\nC.Palomares\\rlap,\\tute\\madrid\\\nD.Pandoulas\\rlap,\\tute\\aachen\\ \nS.Paoletti\\rlap,\\tute{\\rome,\\cern}\\\nP.Paolucci\\rlap,\\tute\\naples\\\nR.Paramatti\\rlap,\\tute\\rome\\ \nH.K.Park\\rlap,\\tute\\cmu\\\nI.H.Park\\rlap,\\tute\\korea\\\nG.Pascale\\rlap,\\tute\\rome\\\nG.Passaleva\\rlap,\\tute{\\cern}\\\nS.Patricelli\\rlap,\\tute\\naples\\ \nT.Paul\\rlap,\\tute\\ne\\\nM.Pauluzzi\\rlap,\\tute\\perugia\\\nC.Paus\\rlap,\\tute\\cern\\\nF.Pauss\\rlap,\\tute\\eth\\\nD.Peach\\rlap,\\tute\\cern\\\nM.Pedace\\rlap,\\tute\\rome\\\nS.Pensotti\\rlap,\\tute\\milan\\\nD.Perret-Gallix\\rlap,\\tute\\lapp\\ \nB.Petersen\\rlap,\\tute\\nymegen\\\nD.Piccolo\\rlap,\\tute\\naples\\ \nF.Pierella\\rlap,\\tute\\bologna\\ \nM.Pieri\\rlap,\\tute{\\florence}\\\nP.A.Pirou\\'e\\rlap,\\tute\\prince\\ \nE.Pistolesi\\rlap,\\tute\\milan\\\nV.Plyaskin\\rlap,\\tute\\moscow\\ \nM.Pohl\\rlap,\\tute\\eth\\ \nV.Pojidaev\\rlap,\\tute{\\moscow,\\florence}\\\nH.Postema\\rlap,\\tute\\mit\\\nJ.Pothier\\rlap,\\tute\\cern\\\nN.Produit\\rlap,\\tute\\geneva\\\nD.O.Prokofiev\\rlap,\\tute\\purdue\\ \nD.Prokofiev\\rlap,\\tute\\peters\\ \nJ.Quartieri\\rlap,\\tute\\salerno\\\nG.Rahal-Callot\\rlap,\\tute{\\eth,\\cern}\\\nM.A.Rahaman\\rlap,\\tute\\tata\\ \nP.Raics\\rlap,\\tute\\debrecen\\ \nN.Raja\\rlap,\\tute\\tata\\\nR.Ramelli\\rlap,\\tute\\eth\\ \nP.G.Rancoita\\rlap,\\tute\\milan\\\nG.Raven\\rlap,\\tute\\ucsd\\\nP.Razis\\rlap,\\tute\\cyprus\nD.Ren\\rlap,\\tute\\eth\\ \nM.Rescigno\\rlap,\\tute\\rome\\\nS.Reucroft\\rlap,\\tute\\ne\\\nT.van~Rhee\\rlap,\\tute\\utrecht\\\nS.Riemann\\rlap,\\tute\\zeuthen\\\nK.Riles\\rlap,\\tute\\mich\\\nA.Robohm\\rlap,\\tute\\eth\\\nJ.Rodin\\rlap,\\tute\\alabama\\\nB.P.Roe\\rlap,\\tute\\mich\\\nL.Romero\\rlap,\\tute\\madrid\\ \nA.Rosca\\rlap,\\tute\\berlin\\ \nS.Rosier-Lees\\rlap,\\tute\\lapp\\ \nJ.A.Rubio\\rlap,\\tute{\\cern}\\ \nD.Ruschmeier\\rlap,\\tute\\berlin\\\nH.Rykaczewski\\rlap,\\tute\\eth\\ \nS.Saremi\\rlap,\\tute\\lsu\\ \nS.Sarkar\\rlap,\\tute\\rome\\\nJ.Salicio\\rlap,\\tute{\\cern}\\ \nE.Sanchez\\rlap,\\tute\\cern\\\nM.P.Sanders\\rlap,\\tute\\nymegen\\\nM.E.Sarakinos\\rlap,\\tute\\seft\\\nC.Sch{\\\"a}fer\\rlap,\\tute\\aachen\\\nV.Schegelsky\\rlap,\\tute\\peters\\\nS.Schmidt-Kaerst\\rlap,\\tute\\aachen\\\nD.Schmitz\\rlap,\\tute\\aachen\\ \nH.Schopper\\rlap,\\tute\\hamburg\\\nD.J.Schotanus\\rlap,\\tute\\nymegen\\\nG.Schwering\\rlap,\\tute\\aachen\\ \nC.Sciacca\\rlap,\\tute\\naples\\\nD.Sciarrino\\rlap,\\tute\\geneva\\ \nA.Seganti\\rlap,\\tute\\bologna\\ \nL.Servoli\\rlap,\\tute\\perugia\\\nS.Shevchenko\\rlap,\\tute{\\caltech}\\\nN.Shivarov\\rlap,\\tute\\sofia\\\nV.Shoutko\\rlap,\\tute\\moscow\\ \nE.Shumilov\\rlap,\\tute\\moscow\\ \nA.Shvorob\\rlap,\\tute\\caltech\\\nT.Siedenburg\\rlap,\\tute\\aachen\\\nD.Son\\rlap,\\tute\\korea\\\nB.Smith\\rlap,\\tute\\cmu\\\nP.Spillantini\\rlap,\\tute\\florence\\ \nM.Steuer\\rlap,\\tute{\\mit}\\\nD.P.Stickland\\rlap,\\tute\\prince\\ \nA.Stone\\rlap,\\tute\\lsu\\ \nH.Stone\\rlap,\\tute{\\prince,\\dag}\\ \nB.Stoyanov\\rlap,\\tute\\sofia\\\nA.Straessner\\rlap,\\tute\\aachen\\\nK.Sudhakar\\rlap,\\tute{\\tata}\\\nG.Sultanov\\rlap,\\tute\\wl\\\nL.Z.Sun\\rlap,\\tute{\\hefei}\\\nH.Suter\\rlap,\\tute\\eth\\ \nJ.D.Swain\\rlap,\\tute\\wl\\\nZ.Szillasi\\rlap,\\tute{\\alabama,\\P}\\\nT.Sztaricskai\\rlap,\\tute{\\alabama,\\P}\\ \nX.W.Tang\\rlap,\\tute\\beijing\\\nL.Tauscher\\rlap,\\tute\\basel\\\nL.Taylor\\rlap,\\tute\\ne\\\nC.Timmermans\\rlap,\\tute\\nymegen\\\nSamuel~C.C.Ting\\rlap,\\tute\\mit\\ \nS.M.Ting\\rlap,\\tute\\mit\\ \nS.C.Tonwar\\rlap,\\tute\\tata\\ \nJ.T\\'oth\\rlap,\\tute{\\budapest}\\ \nC.Tully\\rlap,\\tute\\prince\\\nK.L.Tung\\rlap,\\tute\\beijing\nY.Uchida\\rlap,\\tute\\mit\\\nJ.Ulbricht\\rlap,\\tute\\eth\\ \nE.Valente\\rlap,\\tute\\rome\\ \nG.Vesztergombi\\rlap,\\tute\\budapest\\\nI.Vetlitsky\\rlap,\\tute\\moscow\\ \nD.Vicinanza\\rlap,\\tute\\salerno\\ \nG.Viertel\\rlap,\\tute\\eth\\ \nS.Villa\\rlap,\\tute\\ne\\\nM.Vivargent\\rlap,\\tute{\\lapp}\\ \nS.Vlachos\\rlap,\\tute\\basel\\\nI.Vodopianov\\rlap,\\tute\\peters\\ \nH.Vogel\\rlap,\\tute\\cmu\\\nH.Vogt\\rlap,\\tute\\zeuthen\\ \nI.Vorobiev\\rlap,\\tute{\\moscow}\\ \nA.A.Vorobyov\\rlap,\\tute\\peters\\ \nA.Vorvolakos\\rlap,\\tute\\cyprus\\\nM.Wadhwa\\rlap,\\tute\\basel\\\nW.Wallraff\\rlap,\\tute\\aachen\\ \nM.Wang\\rlap,\\tute\\mit\\\nX.L.Wang\\rlap,\\tute\\hefei\\ \nZ.M.Wang\\rlap,\\tute{\\hefei}\\\nA.Weber\\rlap,\\tute\\aachen\\\nM.Weber\\rlap,\\tute\\aachen\\\nP.Wienemann\\rlap,\\tute\\aachen\\\nH.Wilkens\\rlap,\\tute\\nymegen\\\nS.X.Wu\\rlap,\\tute\\mit\\\nS.Wynhoff\\rlap,\\tute\\aachen\\ \nL.Xia\\rlap,\\tute\\caltech\\ \nZ.Z.Xu\\rlap,\\tute\\hefei\\ \nB.Z.Yang\\rlap,\\tute\\hefei\\ \nC.G.Yang\\rlap,\\tute\\beijing\\ \nH.J.Yang\\rlap,\\tute\\beijing\\\nM.Yang\\rlap,\\tute\\beijing\\\nJ.B.Ye\\rlap,\\tute{\\hefei}\\\nS.C.Yeh\\rlap,\\tute\\tsinghua\\ \nAn.Zalite\\rlap,\\tute\\peters\\\nYu.Zalite\\rlap,\\tute\\peters\\\nZ.P.Zhang\\rlap,\\tute{\\hefei}\\ \nG.Y.Zhu\\rlap,\\tute\\beijing\\\nR.Y.Zhu\\rlap,\\tute\\caltech\\\nA.Zichichi\\rlap,\\tute{\\bologna,\\cern,\\wl}\\\nF.Ziegler\\rlap,\\tute\\zeuthen\\\nG.Zilizi\\rlap,\\tute{\\alabama,\\P}\\\nM.Z{\\\"o}ller\\rlap.\\tute\\aachen\n\\newpage\n\\begin{list}{A}{\\itemsep=0pt plus 0pt minus 0pt\\parsep=0pt plus 0pt minus 0pt\n \\topsep=0pt plus 0pt minus 0pt}\n\\item[\\aachen]\n I. Physikalisches Institut, RWTH, D-52056 Aachen, FRG$^{\\S}$\\\\\n III. Physikalisches Institut, RWTH, D-52056 Aachen, FRG$^{\\S}$\n\\item[\\nikhef] National Institute for High Energy Physics, NIKHEF, \n and University of Amsterdam, NL-1009 DB Amsterdam, The Netherlands\n\\item[\\mich] University of Michigan, Ann Arbor, MI 48109, USA\n\\item[\\lapp] Laboratoire d'Annecy-le-Vieux de Physique des Particules, \n LAPP,IN2P3-CNRS, BP 110, F-74941 Annecy-le-Vieux CEDEX, France\n\\item[\\basel] Institute of Physics, University of Basel, CH-4056 Basel,\n Switzerland\n\\item[\\lsu] Louisiana State University, Baton Rouge, LA 70803, USA\n\\item[\\beijing] Institute of High Energy Physics, IHEP, \n 100039 Beijing, China$^{\\triangle}$ \n\\item[\\berlin] Humboldt University, D-10099 Berlin, FRG$^{\\S}$\n\\item[\\bologna] University of Bologna and INFN-Sezione di Bologna, \n I-40126 Bologna, Italy\n\\item[\\tata] Tata Institute of Fundamental Research, Bombay 400 005, India\n\\item[\\ne] Northeastern University, Boston, MA 02115, USA\n\\item[\\bucharest] Institute of Atomic Physics and University of Bucharest,\n R-76900 Bucharest, Romania\n\\item[\\budapest] Central Research Institute for Physics of the \n Hungarian Academy of Sciences, H-1525 Budapest 114, Hungary$^{\\ddag}$\n\\item[\\mit] Massachusetts Institute of Technology, Cambridge, MA 02139, USA\n\\item[\\debrecen] Lajos Kossuth University-ATOMKI, H-4010 Debrecen, Hungary$^\\P$\n\\item[\\florence] INFN Sezione di Firenze and University of Florence, \n I-50125 Florence, Italy\n\\item[\\cern] European Laboratory for Particle Physics, CERN, \n CH-1211 Geneva 23, Switzerland\n\\item[\\wl] World Laboratory, FBLJA Project, CH-1211 Geneva 23, Switzerland\n\\item[\\geneva] University of Geneva, CH-1211 Geneva 4, Switzerland\n\\item[\\hefei] Chinese University of Science and Technology, USTC,\n Hefei, Anhui 230 029, China$^{\\triangle}$\n\\item[\\seft] SEFT, Research Institute for High Energy Physics, P.O. Box 9,\n SF-00014 Helsinki, Finland\n\\item[\\lausanne] University of Lausanne, CH-1015 Lausanne, Switzerland\n\\item[\\lecce] INFN-Sezione di Lecce and Universit\\'a Degli Studi di Lecce,\n I-73100 Lecce, Italy\n\\item[\\lyon] Institut de Physique Nucl\\'eaire de Lyon, \n IN2P3-CNRS,Universit\\'e Claude Bernard, \n F-69622 Villeurbanne, France\n\\item[\\madrid] Centro de Investigaciones Energ{\\'e}ticas, \n Medioambientales y Tecnolog{\\'\\i}cas, CIEMAT, E-28040 Madrid,\n Spain${\\flat}$ \n\\item[\\milan] INFN-Sezione di Milano, I-20133 Milan, Italy\n\\item[\\moscow] Institute of Theoretical and Experimental Physics, ITEP, \n Moscow, Russia\n\\item[\\naples] INFN-Sezione di Napoli and University of Naples, \n I-80125 Naples, Italy\n\\item[\\cyprus] Department of Natural Sciences, University of Cyprus,\n Nicosia, Cyprus\n\\item[\\nymegen] University of Nijmegen and NIKHEF, \n NL-6525 ED Nijmegen, The Netherlands\n\\item[\\caltech] California Institute of Technology, Pasadena, CA 91125, USA\n\\item[\\perugia] INFN-Sezione di Perugia and Universit\\'a Degli \n Studi di Perugia, I-06100 Perugia, Italy \n\\item[\\cmu] Carnegie Mellon University, Pittsburgh, PA 15213, USA\n\\item[\\prince] Princeton University, Princeton, NJ 08544, USA\n\\item[\\rome] INFN-Sezione di Roma and University of Rome, ``La Sapienza\",\n I-00185 Rome, Italy\n\\item[\\peters] Nuclear Physics Institute, St. Petersburg, Russia\n\\item[\\salerno] University and INFN, Salerno, I-84100 Salerno, Italy\n\\item[\\ucsd] University of California, San Diego, CA 92093, USA\n\\item[\\santiago] Dept. de Fisica de Particulas Elementales, Univ. de Santiago,\n E-15706 Santiago de Compostela, Spain\n\\item[\\sofia] Bulgarian Academy of Sciences, Central Lab.~of \n Mechatronics and Instrumentation, BU-1113 Sofia, Bulgaria\n\\item[\\korea] Center for High Energy Physics, Adv.~Inst.~of Sciences\n and Technology, 305-701 Taejon,~Republic~of~{Korea}\n\\item[\\alabama] University of Alabama, Tuscaloosa, AL 35486, USA\n\\item[\\utrecht] Utrecht University and NIKHEF, NL-3584 CB Utrecht, \n The Netherlands\n\\item[\\purdue] Purdue University, West Lafayette, IN 47907, USA\n\\item[\\psinst] Paul Scherrer Institut, PSI, CH-5232 Villigen, Switzerland\n\\item[\\zeuthen] DESY, D-15738 Zeuthen, \n FRG\n\\item[\\eth] Eidgen\\\"ossische Technische Hochschule, ETH Z\\\"urich,\n CH-8093 Z\\\"urich, Switzerland\n\\item[\\hamburg] University of Hamburg, D-22761 Hamburg, FRG\n\\item[\\taiwan] National Central University, Chung-Li, Taiwan, China\n\\item[\\tsinghua] Department of Physics, National Tsing Hua University,\n Taiwan, China\n\\item[\\S] Supported by the German Bundesministerium \n f\\\"ur Bildung, Wissenschaft, Forschung und Technologie\n\\item[\\ddag] Supported by the Hungarian OTKA fund under contract\nnumbers T019181, F023259 and T024011.\n\\item[\\P] Also supported by the Hungarian OTKA fund under contract\n numbers T22238 and T026178.\n\\item[$\\flat$] Supported also by the Comisi\\'on Interministerial de Ciencia y \n Tecnolog{\\'\\i}a.\n\\item[$\\sharp$] Also supported by CONICET and Universidad Nacional de La Plata,\n CC 67, 1900 La Plata, Argentina.\n\\item[$\\diamondsuit$] Also supported by Panjab University, Chandigarh-160014, \n India.\n\\item[$\\triangle$] Supported by the National Natural Science\n Foundation of China.\n\\item[\\dag] Deceased.\n\\end{list}\n}\n\\vfill\n\n\n\n\n\n\n\n\\newpage\n\n\n\n\n\\begin{table} [htbp]\n\\begin{center} \n\\begin{tabular}{|c|c|c|c|c|} \\hline \n\\multicolumn{5}{|c|}{Electron\/Muon plus hadrons selections } \\\\ \\hline\n & Very Low \\ensuremath{\\Delta M} & Low \\ensuremath{\\Delta M} & Medium \\ensuremath{\\Delta M} & Large \\ensuremath{\\Delta M} \\\\ \\hline\nNo. of isolated leptons $\\ge$ & 1 & 1 & 1 & 1 \\\\ \\hline\n$N_{tk} - N_{lep}$ $\\ge$ & 2 & 4 & 5 & 4 \\\\ \\hline\n$N_{cl}$ $\\ge$ & 6 & 10 & 10 & 10 \\\\ \\hline\n$\\sin(\\theta_{miss})$ $\\ge$ & 0.74 & 0.38 & 0.23 & 0.28 \\\\ \\hline\n $E^{\\perp}_{25}$ (\\gev{}) $\\le$ & 0.52 & -- & -- & 11.6 \\\\ \\hline\n $p_{\\perp}$ (\\gev{}) $\\ge$ & 3.24 & 5.62 & 8.65 & 9.84 \\\\ \\hline\n $E_{lep} $ (\\gev{}) $\\ge$ & 1.51 & 2.59 & 6.17 & 25.9 \\\\ \\hline\n $E_{lep} $ (\\gev{}) $\\le$ & 9.12 & 27.5 & 31.2 & 43.8 \\\\ \\hline\n $E_{TTJL} $ (\\gev{}) $\\ge$ & 1.27 & 0.95 & 1.44 & -- \\\\ \\hline\n $M_{had} $ (\\gev{}) $\\le$ & 5.0 & 28.2 & 39.1 & 89.0 \\\\ \\hline\n $M_{rec} $ (\\gev{}) $\\ge$ & 144 & 130 & 107 & 57.0 \\\\ \\hline\n $E_{vis} $ (\\gev{}) $\\ge$ & 4.02 & 8.90 & 31.5 & 65.3 \\\\ \\hline\n $E_{vis} $ (\\gev{}) $\\le$ & 11.0 & 59.1 & 93.6 & 118 \\\\ \\hline\n\\end{tabular} \n\\caption{Values of the cuts for the lepton plus hadrons selections; they\n are determined with the optimisation procedure described in \n Section~\\protect\\ref{sec:optimization}.\\label{tab2}}\n\\end{center}\n\\end{table}\n\\begin{table}[!]\n\\vspace{-0.1 cm}\n\\begin{center}\n\\begin{tabular}{|c|c|c|c|c|} \\hline \n\\multicolumn{5}{|c|}{Chargino Hadronic selections } \\\\ \\hline\n & Very Low \\ensuremath{\\Delta M} & Low \\ensuremath{\\Delta M} & Medium \\ensuremath{\\Delta M} & Large \\ensuremath{\\Delta M} \\\\ \\hline\n$N_{cl}$ $\\ge$ & 14 & 14 & 14 & 14 \\\\ \\hline\n$N_{tk}$ $\\ge$ & 5 & 5 & 5 & 5 \\\\ \\hline\n $p_{\\perp} $ (\\gev{}) $\\ge$ & 3.72 & 10.0 & 11.5 & 11.4 \\\\ \\hline\n $p_{\\perp}\/E_{vis}$ $\\ge$ & -- & 0.20 & 0.15 & 0.10 \\\\ \\hline\n$E_{vis}$ (\\gev{}) $\\le$ & 12.0 & 68.0 & 76.0 & 149 \\\\ \\hline\n Acollinearity (rad) $\\le$ & 2.00 & -- & -- & 3.02 \\\\ \\hline\n Acoplanarity (rad) $\\le$ & 2.18 & 2.89 & 2.92 & 3.11 \\\\ \\hline\n sin($\\theta_{miss}$) $\\ge$ & 0.56 & 0.46 & 0.20 & 0.61 \\\\ \\hline\n$E^{\\perp}_{25} $ (\\gev{}) $\\le$ & 0.21& 5.80 & 5.80 & 3.25 \\\\ \\hline\n $E_{25}$ (\\gev{}) $\\le$ & -- & -- & -- & 2.53 \\\\ \\hline\n $p_{\\parallel}\/E_{vis}$ $\\le$ & -- & 0.53 & 0.95 & 0.55 \\\\ \\hline\n $E_{lep}^{max}$ (\\gev{}) $\\le$ & 9.12 & 27.5 & 31.2 & 43.8 \\\\ \\hline\n $M_{vis}$ (\\gev{}) $\\ge$ & 2.85& 9.3 & 35.4 & -- \\\\ \\hline\n $M_{rec}$ (\\gev{}) $\\ge$ & -- & 124 & 67.2 & -- \\\\ \\hline\n $E_{vis}\/\\sqrt{s}$ $\\ge$ & -- & -- & -- & 0.60 \\\\ \\hline\n $E_{30^0}\/E_{vis}$ $\\le$ & -- & 0.22 & 0.40 & 0.65 \\\\ \\hline\n $E_{TTJ}\/p_\\perp$ $\\ge$ & 0.24& -- & 0.24 & -- \\\\ \\hline\n $y_{\\perp}$ $\\ge$ & -- & 0.28 & 0.28 & 0.40 \\\\ \\hline\n\\end{tabular} \n\\caption{Values of the cuts for the purely hadronic selections\n which are determined with the optimisation procedure\n described in Section~\\protect\\ref{sec:optimization}.\\label{tab3}}\n\n\\end{center}\n\\end{table}\n\n\\begin{table} [htbp]\n\\begin{center} \n\\begin{tabular}{|c|c|c|c|c|c|c|c|c|} \\hline \n & \\multicolumn{2}{|c|}{Low \\ensuremath{\\Delta M}}& \\multicolumn{2}{|c|}{Medium \\ensuremath{\\Delta M}}& \n\\multicolumn{2}{|c|}{High \\ensuremath{\\Delta M}}& \\multicolumn{2}{|c|}{Combined} \\\\ \\hline\n & $N_{data}$ & $N_{exp}$ & $N_{data}$ & $N_{exp}$ & $N_{data}$ & $N_{exp}$ \n& $N_{data}$ & $N_{exp}$ \\\\ \\hline\n\\chinop & 72 & 66.9 & 11 & 10.9 & 67 & 76.7 & 147 & 148. \\\\ \\hline\n\\chinonn & 43 & 39.3 & 6 & 7.78 & 3 & 2.45 & 50 & 48.1 \\\\ \\hline\n\\end{tabular} \n\\caption[cascade]{Results for charginos and neutralinos:\n $N_{data}$ is the number of observed events and\n $N_{exp}$ is the number of expected events from Standard \n Model processes for the total integrated\n luminosity collected at $\\sqrt{s} = 189 \\gev{}$.\n\\label{tab8}}\n\\end{center} \n\\end{table} \n\\begin{table} [htbp] \n\\begin{center} \n\\begin{tabular}{|c|r|r|r|r|} \n\\hline \n{ } &\n \\multicolumn{2}{|c|}{~~LL~~} &\n \\multicolumn{2}{|c|}{~~$\\neutralino{1}\\, \\ensuremath{\\mathrm{W^\\ast}}$~~} \\\\ \\hline\n{ \\ensuremath{\\Delta M}{} (\\gev{}) } & $\\epsilon $ (\\%) & $~N_{exp}$ &\n $\\epsilon $ (\\%) & $N_{exp}$ \\\\\n\\hline \n3 & 1.6 & 20.3 & 1.9 & 39.4 \\\\ \n5 & 6.5 & 20.3 & 16.8 & 76.8 \\\\\n10 & 19.2 & 27.7 & 8.5 & 2.9 \\\\\n20 & 25.0 & 7.3 & 38.2 & 10.4 \\\\\n30 & 30.0 & 7.3 & 46.6 & 7.5 \\\\ \n40 & 28.9 & 7.3 & 44.2 & 4.9 \\\\\n50 & 26.9 & 7.3 & 25.3 & 4.9 \\\\ \n60 & 21.6 & 7.3 & 16.6 & 4.9 \\\\\n75 & 34.5 & 34.6 & 20.8 & 55.5 \\\\\n90 & 31.8 & 34.6 & 7.8 & 55.5 \\\\\n\\hline\n\\end{tabular} \n\\caption[cascade]{Optimised chargino efficiencies ($\\epsilon$) \n for the purely leptonic (LL) \n and for the\n $\\neutralino{1}\\,\\ensuremath{\\mathrm{W^\\ast}}$ decay mode.\n $N_{exp}$ is the number of events expected from \n Standard Model processes.\n Results are given as a function of \n $\\ensuremath{\\Delta M}$\n for $M_{\\chargino{\\pm}} = 94 \\gev{}$ at $\\sqrt{s} = 189 \\gev{}$.\n\\label{tab4}}\n\\end{center}\n\\end{table}\n\n\\begin{table} [htbp]\n\\begin{center}\n\\begin{tabular}{|c|r|r|r|r|}\n\\hline \n{ } &\n \\multicolumn{2}{|c|}{~~LL~~} &\n \\multicolumn{2}{|c|}{~~$\\neutralino{1}\\, \\ensuremath{\\mathrm{Z^\\ast}}$~~} \\\\ \\hline\n{ \\ensuremath{\\Delta M}{} (\\gev{}) } & $\\epsilon $ (\\%) &~$N_{exp}$ &\n $\\epsilon $ (\\%) & $N_{exp}$ \n \\\\\n\n\\hline\n6 & 9.1 & 9.5 & 3.5 & 35.9 \\\\\n10 & 10.9 & 10.2 & 10.9 & 35.9 \\\\\n20 & 27.3 & 4.2 & 9.2 & 3.4 \\\\\n40 & 30.8 & 2.8 & 25.9 & 3.4 \\\\\n60 & 34.1 & 19.2 & 35.2 & 7.7 \\\\\n80 & 35.5 & 19.2 & 35.2 & 7.7 \\\\\n100 & 29.3 & 21.4 & 20.7 & 7.7 \\\\\n140 & 17.7 & 25.5 & 9.4 & 2.4 \\\\\n180 & 10.5 & 25.5 & 8.7 & 2.4 \\\\\n\\hline \n\\end{tabular} \n\\caption[cascade]{Optimised neutralino efficiencies ($\\epsilon$)\n for the purely leptonic (LL)\nfinal states and for the $\\neutralino{1}\\, \\ensuremath{\\mathrm{Z^\\ast}}$ decay mode. \n Results are given as a function of $\\ensuremath{\\Delta M}$\n for $M_{\\neutralino{2}} + M_{\\neutralino{1}} = 188 \\gev{}$\nat $\\sqrt{s} = 189 \\gev{}$.\n \n\\label{tab5b}}\n\\end{center} \n\\end{table} \n\n\\pagebreak \n\n\\begin{figure}\n\\begin{tabular}{cc}\n\\psfig{file=xi_ljjl.eps,width=8.0cm} &\n\\psfig{file=xi_ljjm.eps,width=8.0cm} \\\\\n\\psfig{file=xi_nhhv.eps,width=8.0cm} &\n\\psfig{file=xi_nhhh.eps,width=8.0cm} \n\\end{tabular}\n \\caption{\nNumber of events selected in data (points),\n in Monte Carlo simulation of standard processes (solid line)\nand signal sensitivity (dashed line)\nas a function of selection cuts with increasing background rejection\npower. The vertical arrows show the $\\xi$ value corresponding to\nthe optimised cuts.\nThe distributions are shown for the \nchargino lepton-jets low \\ensuremath{\\Delta M}\\ a), the \nchargino lepton-jets medium \\ensuremath{\\Delta M}\\ b), the\nneutralino jet-jet very low \\ensuremath{\\Delta M}\\ c) and the \nneutralino jet-jet high \\ensuremath{\\Delta M}\\ d)\nselections, respectively\n }\n \\label{fig:xi_chaneu}\n\\end{figure}\n\n\\begin{figure}[hbtp]\n\\begin{center}\n \\mbox{\\epsfxsize=10.0cm \\epsffile{upper_char_w_pap.eps}} \\\\ \n \\mbox{\\epsfxsize=10.0cm \\epsffile{upper_char_ll_pap.eps}}\n \\caption{Upper limits on the $\\ee \\rightarrow \\chargino{+}\\chargino{-}$\n production cross section\nup to $\\sqrt{s} = 189 \\gev{}$ in \nthe $M_{\\neutralino{1}} - M_{\\chargino{\\pm}}$ plane.\n Exclusion limits are obtained assuming\n standard W branching\nratios in the chargino decay a) or purely leptonic W \n decays b), \n $\\chargino{\\pm} \\rightarrow \\neutralino{1} \\ell^\\pm \\nu $\n ($\\ell=$e, $\\mu$, $\\tau$).\n }\n \\label{fig:xsection_chargino}\n \\end{center}\n\\end{figure} \n\\begin{figure}[hbtp]\n \\begin{center}\n \\mbox{\\epsfxsize=10.0cm \\epsffile{pap_neu.eps}} \\\\\n \\mbox{\\epsfxsize=10.0cm \\epsffile{pap_neu_lep.eps}}\n \\caption{Upper limits on the $\\ee \\rightarrow \\neutralino{1}\\neutralino{2}$\n production cross section\nup to $\\sqrt{s} = 189 \\gev{}$ \n in the $M_{\\neutralino{1}} - M_{\\neutralino{2}}$ plane.\n Exclusion limits are obtained \nassuming standard Z branching\n ratios in the next-to-lightest neutralino decay \n$\\neutralino{2} \\rightarrow \\ensuremath{\\mathrm{Z^\\ast}} \\neutralino{1}$ a)\nor assuming purely leptonic Z \n decays b), \n $ \\neutralino{2}\\rightarrow \\neutralino{1} \\ell^+ \\ell^- $\n ($\\ell=$e, $\\mu$, $\\tau$).\n }\n \\label{fig:xsection_neutralino}\n \\end{center}\n\\end{figure} \n\n\n\\begin{figure}[hbtp]\n \\begin{center} \\vspace*{-1.5cm}\n \\hspace*{-1.5cm}\\mbox{\\epsfxsize=11.50cm \\epsffile{higgsino_189_neu2.eps}} \\\\\n \\hspace*{-1.5cm}\\mbox{\\epsfxsize=11.50cm \\epsffile{higgsino_189_char.eps}}\n\\vspace{-.5cm}\n \\caption {Lower mass limits as a function of $M_2$ for\n the next-to-lightest neutralino a)\nand the lightest chargino b).\n The limits are shown for $\\tan\\beta = \\sqrt{2}$ and for $\\mu>0$ and $\\mu<0$. \n } \n \\label{fig:mass_m2}\n \\end{center}\n\\end{figure} \n\n\n\n\n\\begin{figure}[hbtp]\n \\begin{center}\n\n \\hspace*{-1.5cm}\\mbox{\\epsfxsize=18cm \\epsffile{limchar_189_last.eps}}\n \\caption{\nLower limit on $M_{\\chargino{\\pm}}$ as a function of\n $\\tan\\beta$ and for any value of $m_0$.\nThe solid line (gaugino region) shows the lower limit obtained for\nlight scalar neutrinos (also small $M_2$ values), which corresponds to the \nabsolute lower limit for large $\\tan\\beta$ values. The \ndashed line (higgsino region) shows the lower limit obtained for very small $\\ensuremath{\\Delta M}$ values. This line corresponds to the absolute lower limit for \nsmall $\\tan\\beta$ values.\n \\label{fig:mass_cha}}\n \\end{center}\n\\end{figure} \n\n\n\n\n\\begin{figure}\n \\begin{center}\n\\vspace{-1.5cm}\n \\hspace*{-2.0cm}\\mbox{\\epsfxsize=11.5cm \\epsffile{limneu_m0500_189_last.eps}}\n\\vspace{-0.5cm}\n \\caption{ Lower limit on the lightest neutralino mass, $M_{\\neutralino{1}}$,\n as a function of $\\tan\\beta$ for \n $m_0=500 \\gev{}$, when combining all chargino and neutralino searches.}\n \\label{fig:neutralino_mass_a}\n\\vspace{-.5cm}\n \\hspace*{-1.5cm}\\mbox{\\epsfxsize=11.5cm \\epsffile{tan_beta_pap.eps}}\n\\vspace{0.0cm}\n \\caption{Lower limit on the lightest neutralino mass, $M_{\\neutralino{1}}$,\n as a function of\n $m_0$ for two values of $\\tan\\beta$. Scalar lepton searches\ncontribute in the low $m_0$ region. Chargino searches contribute mainly\nin the high $m_0$ region. For the low $\\tan\\beta$ values, the neutralino\nsearches give additional contribution in the intermediate $m_0$ region.\n }\n \\label{fig:neutralino_mass_b}\n \\end{center}\n\\end{figure} \n\n\\begin{figure}\n \\begin{center}\n\\vspace{-1.cm}\n \\hspace*{-1.5cm}\\mbox{\\epsfxsize=11.5cm \\epsffile{limneu_anym0_189_last.eps}}\n\\vspace{-.5cm}\n \\caption{Lower limit on $M_{\\neutralino{1}}$\n as a function of\n $\\tan\\beta$ and for any value of $m_0$, when combining\nthe chargino, neutralino and scalar lepton searches.\n }\n \\label{fig:neutralino_any_a}\n\\vspace{-.5cm}\n \\hspace*{-1.5cm}\\mbox{\\epsfxsize=11.5cm \\epsffile{limm2_anym0_189_last.eps}}\n \\caption{Lower limit on $M_2$\n as a function of\n $\\tan\\beta$ and for any value of $m_0$, when combining\nthe chargino neutralino and scalar lepton searches. }\n \\label{fig:neutralino_any_b}\n \\end{center}\n\\end{figure} \n\n\\end{document}\n\n\n\n\n\n\n\n\n\n","meta":{"redpajama_set_name":"RedPajamaArXiv"}} +{"text":"\\section{INTRODUCTION}\n\nRobots that can operate in unconstrained environments and collaborate with humans must be capable of learning about new objects they may encounter. Pointing at an object with our hand is a natural way to communicate with the robot about a new object. In this paper, we consider the problem of teaching robots novel objects by pointing at the new object. Once we show the robot a new object, it can generate and store a feature vector corresponding to that object and then re-use it for one-shot localization of the object in new scenes (Fig.~\\ref{fig:hero}).\n\nNeural networks have been used in recent years to learn fully differentiable visuomotor policies that directly map pixels to actuator commands~\\cite{levine2016end,zhang2018deep,yu2018one,rahmatizadeh2018virtual,giusti2015machine}. The neural network architecture typically used for such policies can be decomposed into vision layers and control layers. The vision layers comprise of several convolutional and pooling layers followed by a spatial attention mechanism that attends to the objects of interest in the image. We propose modulating the spatial attention so as the network is able to attend to the object that the hand is pointing at (see Fig.~\\ref{fig:hero}) while ignoring the other distracting objects in the scene.\n\nIn this work, we assume that only the location of the object of interest is available for training and that the position and orientation of the pointing hand are unavailable. So, this is a weakly supervised learning problem where the neural network must figure out as part of the learning process that the pointing hand in the image is salient and then learn to attend to the object being pointed at. On the other hand, this assumption makes the process of data acquisition with a real robot easier by reducing the labeling effort\n\nUnlike other papers on object detection~\\cite{redmon2016you,ren2015faster}, we are primarily interested in teaching robots new objects. This means that we are interested in objects not seen by the neural network during training. We accomplish this using Siamese networks~\\cite{koch2015siamese,venkatesh2019one,bertinetto2016fully}, which are twin neural networks with shared weights. The idea is to use the neural network to obtain from the image a feature vector representing the object of interest rather than classifying the contents of the image as is usually done (Fig.~\\ref{fig:hero}). This vector can be subsequently used in new environments to find the novel object of interest.\n\n\\begin{figure}[!t]\n \\centering\n \\includegraphics[width=1.0\\linewidth]{figures\/hero.PNG}\n \\caption{One-shot localization of novel object selected by the pointing hand. The feature vector of the object (blue bottle cap) that the hand is pointing at is extracted using the proposed attention modulation mechanism. This is then used to localize the object in new scenes. Note that the pointing finger is at considerable distance from the object of interest.}\n \\label{fig:hero}\n\\end{figure}\n\nOur contributions are as follows:\n\\begin{itemize}\n \\item We propose a spatial attention modulation mechanism that endows the neural network with the ability to selectively attend to the object that is being pointed at while ignoring other distracting objects in the scene.\n \\item We show that the proposed method can be combined with Siamese networks to teach robots novel objects.\n\\end{itemize}\n\nThe proposed network architecture is trained on synthetic data constructed from a dataset of emojis. We demonstrate the proposed method on the Dobot Magician robot arm. We show that the robot learns new objects that we point at and can find them in new scenes.\n\nThe rest of this paper is organized as follows. In the next section, papers related to this work are discussed. In Section~3, the proposed model architecture is described. Section~4 details experimental results, and Section~5 concludes the paper.\n\n\n\\section{RELATED WORK}\n\\subsection{Hand Recognition}\nOne of the ways to design a system that can infer the object of interest in a similar scenario is to use a pipelined approach. For example, one could employ deep learning models trained to localize human hands in an image~\\cite{openpose, doosti2019hand}, extract the most relevant keypoints of the hand (say the joints of the index finger) and then fit a line that passes through these keypoints. An object recognition module could then be used to localize each object in the scene, project these points on the line and pick the object corresponding to the closest point to the the hand as the object of interest. However, training such a system requires a strong level of supervision such as the positions of all objects in the scene, and possibly even the keypoints of the hand if one would like to fine-tune the hand localization models. However, this approach will not be feasible in a weak supervision setting as outlined in this paper where only the location of the object of interest is given. Moreover, it has been shown across a wide range of problems that using an end-to-end approach leads to better performance as compared to using a pipelined approach for a given task~\\cite{gupta2016synthetic, end_to_end_speech, zhang2018deep}.\n\n\\subsection{Spatial Attention}\nThe architecture of typical end-to-end networks for visuomotor tasks can be broadly grouped into two sets of layers. The initial group of layers form the vision layers that help in localizing the relevant objects in the image. The remaining layers form what is known as the control layers which are responsible for coming up with the appropriate control actions required to perform the task at hand. A key component in such end-to-end networks is some form of a spatial attention mechanism that learns to attend to the relevant object of interest in the scene. The work presented in~\\cite{zhang2018deep} demonstrates the use of imitation learning for teaching a PR2 robot to perform simple tasks such as pick-and-place. The authors developed a virtual reality based system to teleoperate the robot and collect training data. The data was then used to train an end-to-end network that maps image pixels directly to robot joint velocities. The network consists of an initial set of convolution layers that generates a feature map. The feature map is passed through a spatial softargmax layer to output a feature vector. The resulting feature vector is then passed through a few fully connected layers to predict the joint velocities of the robot. The spatial softargmax layer serves as a simple spatial attention mechanism where the attention weight corresponding to each pixel of the feature map depends on the degree of activation.\n\n\\subsection{One Shot Learning}\nApart from inferring the object of interest in an image in the presence of other objects, another goal of this paper is to enable robots to recognize objects that they have never encountered before by training it on only a few examples involving the novel object. Broadly speaking, meta learning and Siamese networks are two approaches one can take to achieve this. We review both approaches below.\n\n\\subsubsection{Meta Learning}\nIn Meta learning, also known as learning to learn, a distribution of tasks are provided as training data. Typically only a few examples for each task are provided in the training data. The weights of the network after the training process completes serves as a good initialization for the network to learn to perform any new previously unseen task. Only a few training examples involving the novel task and a few gradient descent steps are required for the network to converge to an optimal set of weights~\\cite{maml}. Recently, meta learning and imitation learning has been combined to enable robots to perform novel tasks such as pick-and-place by training on just a single example \\cite{osiml, daml}.\n\n\\begin{figure*}[!t]\n \\centering\n \\includegraphics[width=0.99\\linewidth]{figures\/archext.PNG}\n \\caption{The proposed neural network architecture for one-shot localization of the object selected by the pointing hand. The convolutional layers in the ``Conv\" block are Conv3x3(16)-ELU-Conv3x3(32)-ELU-Conv3x3(64)-ELU-MaxPool2x2-Conv3x3(64)-ELU-Conv3x3(128)-ELU-MaxPool2x2-Conv3x3(256)-ELU-Conv3x3(512)-ELU-Conv3x3(1024)-ELU. All convolutions are ``valid\" convolutions that do not use padding so that the feature vector for the object is the same regardless of whether it is near the edge of the image or at the center. The receptive field of the ``Conv\" block is 34~px.}\n \\label{fig:arch}\n\\end{figure*}\n\n\\subsubsection{Siamese Networks}\nSiamese networks are used to address the similarity learning problem where it is desirable to infer if a pair of images (referred to exemplar and search images) are similar to each other or not. This is done by using twin convolution neural networks with shared weights that transform the images $x_1$ and $x_2$ into feature embeddings $\\phi(x_1)$ and $\\phi(x_2)$, respectively. The embedding pair is then combined using a transformation $g$ that can be used to make suitable predictions depending on the task at hand. For example, in the context of image classification~\\cite{koch2015siamese} the transformation $g$ is a distance metric that can be used to measure the similarity score between the object in the exemplar image and the search image. The system is trained on several examples of similar and dissimilar pairs of images. Once training is complete a database of images is built with one image corresponding to each object of interest. At test time the similarity of the search image is tested against each image in the database to determine the object class of the search image. Siamese networks have been used in face recognition systems as well~\\cite{deepface}. However, in both these papers the comparison is possible only if the exemplar and search images are of the same dimension. \n\nThe authors of~\\cite{bertinetto2016fully} used fully convolutional neural networks to enable comparison of images of dissimilar dimensions with Siamese networks. Their architecture was adapted successfully for object tracking in videos. Here the user provides the exemplar image by cropping out the object of interest from the first frame of the video which is then compared against each subsequent frame using the Siamese network. More recently, the authors in~\\cite{venkatesh2019one} combined fully convolutional Siamese networks with spatial attention to enable object localization for robot pick-and-place tasks. The paper explores specifying the object of interest by using visual cues instead of requiring the user to provide a cropped image of the object. Given a group of objects in a scene the user indicates to the robot the object of interest by shining a laser beam directly on it. Although the authors talk of localizing novel objects using a laser beam as a visual cue, the network designed by them should work for any other kind of visual cue (such as a stick or even a hand) so long as it is in very close proximity with the object of interest. However, a human merely has to point at an object from a distance to convey that it is of interest and an observer infers and localizes the object being referred to by looking in the direction of the pointing hand. We would like to design systems that communicate intent to robots much like how humans communicate with each other via visual cues or using natural language~\\cite{blocks, touchdown}. Learning to localize an object of interest from natural language instructions requires a different architecture design compared to the one presented in this paper. We will restrict our focus to learning to localize the object of interest by using a visual cue provided such as a hand pointing at the object from a distance.\n\n\n\n\n\\section{NETWORK ARCHITECTURE}\n\n\n\\subsection{Localizing the Object of Interest}\n\n\n\n\n\n\n\n\nThe proposed neural network for one-shot localization is shown in Fig.~\\ref{fig:arch}. Let the exemplar image (denoted as $x$) correspond to the image that contains the novel object that is being pointed at by the hand. Let the search image (denoted as $\\hat{x}$) be the image of the new scene in which the same object must be localized. The network outputs the locations of the object in the exemplar image and the search image which are denoted as $(p_x, p_y)$ and $(\\hat{p}_x, \\hat{p}_y)$, respectively. The mean squared error loss is used to train the network.\n\nThe localization of the object is performed in a similar fashion as described in \\cite{venkatesh2019one} except for the attention modulation block. The exemplar image is passed through the CNN to obtain a feature map $x_f$. This is then passed through a bottleneck convolutional layer (conv$1\\times1$) to obtain $x_o$. Let us ignore for now how the attention modulation map $x_m$ is generated. We will describe the generation of the attention map $x_m$ in Section~\\ref{sec:attn-mod}. A spatial attention map $x_{o^*}$ is generated by adding the attention modulation map $x_m$ to $x_o$ and the resulting sum is passed through a spatial soft-argmax layer whose output is the predicted location of the object of interest $(p_x, p_y)$ in the exemplar image $x$ (see Eqns. \\eqref{eqn:x_ostart}, \\eqref{eqn:softargmax_x} and \\eqref{eqn:softargmax_y}). The spatial attention map $x_{o^*}$ is used to obtain the feature vector $f$ corresponding to the object of interest from the feature map $x_f$ (see Eqn. \\eqref{eqn:feat_vec}). Note that $\\odot$ in Fig.~\\ref{fig:arch} corresponds to the element-wise multiplication operation (with the appropriate broadcasting done to account for the different number of channels present in $x_f$ and $x_{o^*}$).\n\n\n\\begin{equation}\n x_{o^*_{i, j}} = softmax_{i, j} \\big(x_{o_{i, j}} + x_{m_{i, j}}\\big)\n \\label{eqn:x_ostart}\n\\end{equation}\n\n\\begin{equation}\n p_x = \\sum_{i, j} x_{o^*_{i, j}} i\n \\label{eqn:softargmax_x}\n\\end{equation}\n\\begin{equation}\n p_y = \\sum_{i, j} x_{o^*_{i, j}} j\n \\label{eqn:softargmax_y}\n\\end{equation}\n\n\\begin{equation}\n f = \\sum_{i, j} x_{o^*_{i, j}} x_{f_{i, j}}\n \\label{eqn:feat_vec}\n\\end{equation}\n\nThe localization of the object in the search image $\\hat{x}$ is done by first passing $\\hat{x}$ through the CNN to obtain $\\hat{x}_f$. Then the feature vector $f$ is used like a matched filter (or equivalently as a conv1$\\times$1 layer with $f$ as the weights) to generate $\\hat{x}_{o^*}$. The location of the object $(\\hat{p}_x,\\hat{p}_y)$ in the search image $\\hat{x}$ is then determined by passing $\\hat{x}_{o^*}$ through a spatial soft-argmax layer (similar to the operations in Eqns.~\\eqref{eqn:softargmax_x} and \\eqref{eqn:softargmax_y}).\n\n\n\n\\subsection{Generating the Attention Modulation Map}\n\\label{sec:attn-mod}\n\n\\begin{figure}[!t]\n \\centering\n \\includegraphics[width=1.0\\linewidth]{figures\/attn_mod.PNG}\n \\caption{Beam \/ Cone like attention modulation maps for different positions and orientations of the pointing hand. The dark regions correspond to a value of -2.0 which suppresses peaks corresponding to objects in $x_o$ (Fig.~\\ref{fig:arch}), whereas the bright regions correspond to value 0.0 which allows values in that area in $x_o$ to pass through unchanged. The beam width is \\ang{30}, and the step size is \\ang{15}.}\n \\label{fig:attn_mod}\n\\end{figure}\n\nWhen there are multiple objects in the scene, we expect multiple bright spots each corresponding to an object in $x_o$ (Fig.~\\ref{fig:sample_non_siamese}). We would like to suppress the peaks in $x_o$ corresponding to objects that are not being pointed at. Had we known the location and orientation of the hand, we could have directly suppressed the irrelevant peaks. However, since we do not have labels corresponding to the pose of the pointing hand in the scene, the neural network must learn to attend to the hand and then use this to suppress the irrelevant peaks in $x_o$. To enable this, we use a ``soft\" or differentiable way to compute the position and orientation of the hand which is then employed to suppress irrelevant objects in $x_o$.\n\nThe feature map $x_f$ is passed through two independent bottleneck layers to produce maps $x_{hp}$ and $x_{ho}$ corresponding to the position and orientation of the pointing hand respectively in $x$. Similar to Eq.~\\eqref{eqn:feat_vec}, spatial attention is used to attend to the pointing hand and to obtain the orientation of the hand $x_{ho^*}$. The final position and orientation of the hand ($x_h$) is used to ``soft select\" a pre-defined attention modulation map $x_m$ (see Fig.\\ref{fig:attn_mod}). The set of pre-defined attention modulation maps include beams from all possible locations and orientations of the pointing hand as shown in Fig.~\\ref{fig:attn_mod}. Each modulation map is constructed by drawing a beam emanating from the position of the hand and in the direction the hand is pointing at. We use an orientation step size of $\\ang{15}$ with a beam width of $\\ang{30}$. Thus, there are 30$\\times$30$\\times$24 $=$ 21600 such maps. Note that no explicit loss function is used to learn $x_{hp}$ and $x_{ho}$. Rather, network learns to predict appropriate values for $x_{hp}$ and $x_{ho}$ that result in the ``selection\" of an appropriate attention map, which is possible only by correctly recognizing the position and orientation of the hand. The modulation map thus obtained, $x_m$, is added to $x_o$ to highlight the object being pointed at while suppressing the irrelevant ones. Thus, the pixels in $x_o$ that lie inside the beam are passed as is whereas the pixels that lie outside the beam are suppressed. Note that the entire attention modulation scheme is differentiable and hence can be learned through back-propagation.\n\nThe proposed way of creating attention modulation maps is most suitable for top view images (Fig.~\\ref{fig:sample_non_siamese}). For perspective views where the depth of the object is more relevant, it may be necessary to generate maps in 3D by casting a cone of rays and using the perspective projection. We leave this for future work.\n\n\n\n\n\\section{EXPERIMENTAL RESULTS}\n\n\n\nTo evaluate the proposed neural network, we first train it on a synthetic dataset and compare it with alternative architectures. The trained network is deployed on a robot arm to demonstrate its real world performance.\n\n\\subsection{Localization Performance}\nA dataset of 5000 training images and 1000 test images is created by placing emojis (Fig.~\\ref{fig:emoji}) at non-overlapping positions against a backdrop as shown in Fig.~\\ref{fig:sample_non_siamese}. A hand emoji (Fig.~\\ref{fig:hand_emoji}) is placed at a random location pointing to an object. One or more distracting objects are placed at random locations not on the line segment between the pointing hand and the object. The label for each sample is the position of the object that the hand is pointing at.\n\nTo evaluate the proposed spatial attention modulation mechanism, we first consider only localization of the object in the image containing the pointing hand ($x$) while ignoring the other input ($\\hat{x}$) and the output of Siamese network $(\\hat{p}_x, \\hat{p}_y)$. Table~\\ref{table:compare} compares the proposed approach with two baselines. The FC layers baseline refers to using fully connected layers\\footnote{The fully connected layers used are FC1024-ELU-FC256-ELU-FC2.} to predict $(p_x, p_y)$ from $x_f$. The Conv layers baseline uses convolutional layers\\footnote{Conv3x3(2048)-ELU-MaxPool2x2-Conv3x3(2048)-ELU-Conv3x3(2048)-ELU-MaxPool2x2-Conv3x3(2048)-ELU-Conv3x3(2048)-ELU-FC2.} to predict the position of the object. The networks are trained with mean squared error loss with weight decay 1e-8 using the Adam optimizer\\cite{kingma2014adam} with learning rate 1e-4. The evaluation metric is accuracy where we consider the prediction to be accurate if there is sufficient overlap between the ground truth and predicted bounding box. Specifically, the IOU (intersection-over-union) between the ground truth bounding box and the predicted bounding box has to be at least 0.5. All the three networking achieve low training error (accuracy over 99\\%), but the test error varies, and the proposed approach generalizes the best. A sample output is shown in Fig.~\\ref{fig:sample_non_siamese} where we observe that the spatial attention modulation mechanism is working as one might expect.\n\nA second dataset containing images corresponding to a new environment ($\\hat{x}$) where the object highlighted by the pointing is present along with distracting objects is constructed as before (Fig.~\\ref{fig:sample_siamese_synth}). The proposed architecture in its entirety with the Siamese network to process $\\hat{x}$ and predict $(\\hat{p}_x, \\hat{p}_y)$ is trained on this dataset. The accuracy on this dataset drops only marginally to 95.31\\%. Table~\\ref{table:compare2} compares performance on this dataset and shows that attention modulation is essential to localize the desired object. The sample output in Fig.~\\ref{fig:sample_siamese_synth} shows that the desired object in $\\hat{x}$ is being attended to.\n\n\\begin{figure}[!t]\n \\centering\n \\includegraphics[width=0.99\\linewidth]{figures\/emoji.png}\n \\caption{A few sample objects used for training and evaluation. The set of emojis is divided into 2075 for training and 703 for testing.}\n \\label{fig:emoji}\n\\end{figure}\n\n\\begin{figure}[!t]\n \\centering\n \\includegraphics[width=0.9\\linewidth]{figures\/hand_emoji.png}\n \\caption{A few sample hand images used for training and evaluation. The set of hand emojis is divided into 47 for training and 8 for testing.}\n \\label{fig:hand_emoji}\n\\end{figure}\n\n\n\\begin{figure}[!t]\n \\centering\n \\includegraphics[width=0.99\\linewidth]{figures\/sample_nonsiamese2.png}\n \\caption{Sample prediction of the proposed architecture. The network has properly localized the pointing hand and chosen a suitable attention modulation map. The activation corresponding to the object that is not pointed at has been appropriately suppressed in $x_{o^*}$.}\n \\label{fig:sample_non_siamese}\n\\end{figure}\n\n\\begin{table}[!t]\n\\caption{Comparison of the proposed approach with different baselines\n\\label{table:compare}\n\\begin{center}\n\\begin{tabular}{ll}\n\\hline\nNeural Network Architecture & Accuracy\\\\\n\\hline\nFC layers & 11.72\\%\\\\\nConv layers & 41.41\\%\\\\\nProposed approach & 96.88\\%\\\\\n\\hline\n\\end{tabular}\n\\end{center}\n\\end{table}\n\n\\begin{table}[!t]\n\\caption{Comparison of localization performance of the Siamese network on novel objects with and without attention modulation\n\\label{table:compare2}\n\\begin{center}\n\\begin{tabular}{ll}\n\\hline\nNeural Network Architecture & Accuracy\\\\\n\\hline\nWithout Attention Modulation\\cite{venkatesh2019one} & 12.5\\%\\\\\nProposed approach (with modulation) & 95.31\\%\\\\\n\\hline\n\\end{tabular}\n\\end{center}\n\\end{table}\n\n\\begin{figure}[!t]\n \\centering\n \\includegraphics[width=0.99\\linewidth]{figures\/sample_siamese_synth.png}\n \\caption{A sample prediction from the proposed architecture on synthetic data.}\n \\label{fig:sample_siamese_synth}\n\\end{figure}\n\n\\subsection{Evaluation on Robot Arm}\nWe demonstrate the proposed neural network using the Dobot Magician, a 3-DoF robot arm (Fig.~\\ref{fig:dobot}). The objects used for evaluation with the robot are shown in Fig.~\\ref{fig:real_objs}. To convert the localized object in pixel space to the robot co-ordinate space, a chessboard calibration pattern is used (Fig.~\\ref{fig:calib}), and OpenCV is used for calibration. Figure~\\ref{fig:sample_siamese} shows a sample predicted from the proposed neural network. We see that the pointing hand has been localized, and the network has learnt to predict an appropriate attention modulation map that selects the object being pointed at (blue bottle cap in Fig.~\\ref{fig:sample_siamese}). We also see that the activation corresponding to the distracting object in $x_{o^*}$ has been successfully suppressed. With the feature vector $f$ corresponding to the bottle cap extracted, the Siamese net successfully attends to the same bottle cap in a new scene ($\\hat{x}$). In this manner, 20 trials were performed. The proposed network localizes the desired object to within 1~cm in all the trials. A video of the robot in operation is available at \\url{https:\/\/youtu.be\/bJ5HKllhqLg}.\n\n\\begin{figure}[!t]\n \\centering\n \\includegraphics[width=0.85\\linewidth]{figures\/dobot_1.png}\n \\caption{A sample demonstration with the Dobot Magician robot arm.}\n \\label{fig:dobot}\n\\end{figure}\n\n\\begin{figure}[!t]\n \\centering\n \\includegraphics[width=0.85\\linewidth]{figures\/real_objs.jpg}\n \\caption{Objects used for evaluating the proposed approach on the Dobot arm.}\n \\label{fig:real_objs}\n\\end{figure}\n\n\\begin{figure}[!t]\n \\centering\n \\includegraphics[width=0.85\\linewidth]{figures\/calib2.png}\n \\caption{The chessboard calibration pattern used to convert pixels to robot co-ordinates.}\n \\label{fig:calib}\n\\end{figure}\n\n\\begin{figure}[!t]\n \\centering\n \\includegraphics[width=0.99\\linewidth]{figures\/sample_siamese.png}\n \\caption{A sample prediction from the proposed architecture.}\n \\label{fig:sample_siamese}\n\\end{figure}\n\\section{CONCLUSIONS}\n\nWe have proposed a spatial attention modulation method that endows a neural network with the ability to attend to a hand pointing at an object in an image and to focus on the object that is being pointed at. The proposed approach generalizes significantly better compared to architectures that use only fully connected or convolutional layers for localization. Furthermore, this approach can be combined with a Siamese network to localize objects that were not present in the training dataset. This network architecture can be used in building robots that can interact naturally with humans and learn about new objects over time.\n\n \n \n \n \n \n\n\n\n\n\n\n\n\n\\section*{ACKNOWLEDGMENT}\n\nThis project was supported by the Robert Bosch Center for Cyber-Physical Systems.\n\n\n\n\n\\bibliographystyle{IEEEtran}\n","meta":{"redpajama_set_name":"RedPajamaArXiv"}}