diff --git "a/data_all_eng_slimpj/shuffled/split2/finalzzgxcd" "b/data_all_eng_slimpj/shuffled/split2/finalzzgxcd" new file mode 100644--- /dev/null +++ "b/data_all_eng_slimpj/shuffled/split2/finalzzgxcd" @@ -0,0 +1,5 @@ +{"text":"\\section{Introduction}\nConsider the ball $\\bar{B}(C,R)$ in the space where $C=(a,b,c)\\in \\mathbb{R}^3$, $R>0$ and $$\\bar{B}(C,R)=\\{(x,y,z)\\in \\mathbb{R}^3|(x-a)^2+(y-b)^2+(z-c)^2\\leq R^2\\}.$$\nAlso consider $\\sigma(C,R)$ as the boundary of $\\bar{B}(C,R)$, i.e. $$\\sigma(C,R)=\\{(x,y,z)\\in \\mathbb{R}^3|(x-a)^2+(y-b)^2+(z-c)^2= R^2\\}.$$\nThe following result has proved in \\cite{drag}, which is the Hermite-Hadamard inequality for convex functions defined on a ball $\\bar{B}(C,R)$.\n\\begin{theorem}\nLet $\\bar{B}(C,R)\\to \\mathbb{R}$ be a convex mapping on the ball $\\bar{B}(C,R)$. Then we have the inequality:\n\\begin{align}\\label{eq.08}\n&f(a,b,c)\\leq\\frac{3}{4\\pi R^3}\\int\\int\\int_{\\bar{B}(C,R)}f(x,y,z)dv\\leq\\\\\n&\\frac{1}{4\\pi R^2}\\int\\int_{\\sigma(C,R)}f(x,y,z)d\\sigma.\\notag\n\\end{align}\n\\end{theorem}\nMotivated by (\\ref{eq.08}), we obtain some trapezoid and mid-point type inequalities related to the Hermite-Hadamard inequality for the mappings defined on a ball $\\bar{B}(C,R)$ in\nthe space. In this paper we use the spherical coordinates to prove our results.\n\n\\section{Main Results}\n\n\nThe following is trapezoid type inequalities related to the (\\ref{eq.08}) for the mappings defined on $\\bar{B}(C,R)$.\n\\begin{theorem}\nSuppose that $\\bar{B}(C,R)\\subset I^\\circ$, where $I\\subset R^3$ and consider $f:\\bar{B}(C,R)\\to \\mathbb{R}$ which has continuous partial derivatives with respect to the variables $\\rho$, $\\varphi$ and $\\theta$ on $I^\\circ$ in spherical coordinates. If $|\\frac{\\partial f}{\\partial \\rho}|$ is convex on $\\bar{B}(C,R)$, then\n\\begin{align}\\label{eq.08'}\n&\\bigg|\\frac{1}{4\\pi R^2}\\int\\int_{\\sigma(C,R)}f(x,y,z)d\\sigma-\\frac{1}{\\frac{4}{3}\\pi R^3}\\int\\int\\int_{\\bar{B}(C,R)}f(x,y,z)dv\\bigg|\\leq\\\\\n&\\frac{1}{16\\pi R}\\int\\int_{\\sigma(C,R)}\\big|\\frac{\\partial f}{\\partial \\rho}\\big|(x,y,z)d\\sigma\\notag.\n\\end{align}\nFurthermore above inequality is sharp.\n\\end{theorem}\n\n\\begin{proof}\nFirst notice that\n\\begin{align}\\label{eq.09}\n&\\int\\int\\int_{\\bar{B}(C,R)}f(x,y,z)dv=\\\\\n&\\int_0^{2\\pi}\\int_0^{\\pi}\\int_0^R f(a+\\rho cos\\theta sin\\varphi, b+\\rho sin\\theta cos\\varphi, c+\\rho cos\\varphi)\\rho^2 sin\\varphi d\\rho d\\varphi d\\theta.\\notag\n\\end{align}\n\nSecond notice that\n\\begin{align}\\label{eq.10}\n&\\int\\int_{\\sigma(C,R)}f(x,y,z)d\\sigma=\\\\\n&\\int_0^{2\\pi}\\int_0^{\\pi} f(a+R cos\\theta sin\\varphi, b+R sin\\theta cos\\varphi, c+R cos\\varphi)R^2 sin\\varphi d\\varphi d\\theta.\\notag\n\\end{align}\n\nNow for fixed $\\varphi\\in [0,\\pi]$ and $\\theta\\in [0,2\\pi]$, we have\n\\begin{align}\\label{eq.11}\n&\\int_0^R\\frac{\\partial f}{\\partial \\rho}(a+\\rho cos\\theta sin\\varphi, b+\\rho sin\\theta cos\\varphi, c+\\rho cos\\varphi)\\rho^3 sin\\varphi d\\rho=\\\\\n&R^3 f(a+R cos\\theta sin\\varphi, b+R sin\\theta cos\\varphi, c+R cos\\varphi)-\\notag\\\\\n&-3\\int_0^R f(a+\\rho cos\\theta sin\\varphi, b+\\rho sin\\theta cos\\varphi, c+\\rho cos\\varphi)\\rho^2 sin\\varphi d\\rho.\\notag\n\\end{align}\n\nSo integrating with respect to $\\varphi\\in [0,\\pi]$ and $\\theta\\in [0,2\\pi]$ in (\\ref{eq.11}) along with (\\ref{eq.09}), (\\ref{eq.10}) and the convexity of $\\big|\\frac{\\partial f}{\\partial \\rho}\\big|$ imply that\n\\begin{align}\\label{eq.12}\n&\\bigg|R\\int\\int_{\\sigma(C,R)}f(x,y,z)d\\sigma-3\\int\\int\\int_{\\bar{B}(C,R)}f(x,y,z)dv\\bigg|\\leq\\\\\n&\\int_0^{2\\pi}\\int_0^{\\pi}\\int_0^R \\big|\\frac{\\partial f}{\\partial \\rho}\\big|(a+\\rho cos\\theta sin\\varphi, b+\\rho sin\\theta cos\\varphi, c+\\rho cos\\varphi)\\rho^3 sin\\varphi d\\rho d\\varphi d\\theta\\leq\\notag\\\\\n&\\int_0^{2\\pi}\\int_0^{\\pi}\\int_0^R \\big|\\frac{\\partial f}{\\partial \\rho}\\big|\\Big((1-\\frac{\\rho}{R})(a,b,c)+\\frac{\\rho}{R}(a+Rcos\\theta sin\\varphi, b+R sin\\theta cos\\varphi, c+R cos\\varphi\\Big)\\times\\notag\\\\\n&\\rho^3 sin\\varphi d\\rho d\\varphi d\\theta\\leq\\notag\n\\end{align}\n\\begin{align}\n&\\int_0^{2\\pi}\\int_0^{\\pi}\\int_0^R \\rho^3 \\big(1-\\frac{\\rho}{R}\\big) \\big|\\frac{\\partial f}{\\partial \\rho}\\big|(a,b,c)sin\\varphi d\\rho d\\varphi d\\theta+\\notag\\\\\n&\\int_0^{2\\pi}\\int_0^{\\pi}\\int_0^R \\frac{\\rho^4}{R}\\big|\\frac{\\partial f}{\\partial \\rho}\\big|\\big(a+Rcos\\theta sin\\varphi, b+R sin\\theta cos\\varphi, c+R cos\\varphi\\big)sin\\varphi d\\rho d\\varphi d\\theta=\\notag\\\\\n&\\frac{\\pi R^4}{5}\\big|\\frac{\\partial f}{\\partial \\rho}\\big|(a,b,c)+\\notag\\\\\n&\\frac{R^4}{5}\\int_0^{2\\pi}\\int_0^{\\pi}\\big|\\frac{\\partial f}{\\partial \\rho}\\big|\\big(a+Rcos\\theta sin\\varphi, b+R sin\\theta cos\\varphi, c+R cos\\varphi\\big)sin\\varphi d\\varphi d\\theta=\\notag\\\\\n&\\frac{\\pi R^4}{5}\\big|\\frac{\\partial f}{\\partial \\rho}\\big|(a,b,c)+\\frac{R^2}{5}\\int\\int_{\\sigma(C,R)}\\big|\\frac{\\partial f}{\\partial \\rho}\\big|(x,y,z)d\\sigma\\notag.\n\\end{align}\nSince $\\big|\\frac{\\partial f}{\\partial \\rho}\\big|$ is convex, then from (\\ref{eq.08}) and (\\ref{eq.12}) we obtain that\n\\begin{align}\\label{eq.13}\n&\\bigg|R\\int\\int_{\\sigma(C,R)}f(x,y,z)d\\sigma-3\\int\\int\\int_{\\bar{B}(C,R)}f(x,y,z)dv\\bigg|\\leq\\\\\n&\\frac{R^2}{20}\\int\\int_{\\sigma(C,R)}\\big|\\frac{\\partial f}{\\partial \\rho}\\big|(x,y,z)d\\sigma+\\frac{R^2}{5}\\int\\int_{\\sigma(C,R)}\\big|\\frac{\\partial f}{\\partial \\rho}\\big|(x,y,z)d\\sigma=\\notag\\\\\n&\\frac{R^2}{4}\\int\\int_{\\sigma(C,R)}\\big|\\frac{\\partial f}{\\partial \\rho}\\big|(x,y,z)d\\sigma\\notag.\n\\end{align}\nDividing (\\ref{eq.13}) with \"$4\\pi R^3$\" we obtain the desired result (\\ref{eq.08'}). For the sharpness of (\\ref{eq.08'}) consider the function $f:\\bar{B}(C,R)\\to \\mathbb{R}$ defined as\n$$f(x,y,z)=R-\\sqrt{(x-a)^2+(y-b)^2+(z-c)^2}.$$\nBy the use of spherical coordinates we have $f(\\rho,\\varphi,\\theta)=R-\\rho$, for $\\rho\\in [0,R]$, $\\varphi\\in [0,\\pi]$ and $\\theta\\in [0,2\\pi]$. With some calculations we obtain that\n\\begin{align}\\label{eq.14}\n\\frac{1}{\\frac{4}{3}\\pi R^3}\\int\\int\\int_{\\bar{B}(C,R)}f(x,y,z)dv=\\frac{1}{\\frac{4}{3}\\pi R^3}\\int_0^{2\\pi}\\int_0^{\\pi}\\int_0^R(R-\\rho)\\rho^2 sin\\varphi d\\rho d\\varphi d\\theta=\\frac{R}{4}.\n\\end{align}\nAlso\n\\begin{align}\\label{eq.15}\n&\\int\\int_{\\sigma(C,R)}f(x,y,z)d\\sigma=\\\\\n&\\int_0^{2\\pi}\\int_0^{\\pi} f(a+R cos\\theta sin\\varphi, b+R sin\\theta cos\\varphi, c+R cos\\varphi)R^2 sin\\varphi d\\varphi d\\theta=\\notag\\\\\n&\\int_0^{2\\pi}\\int_0^{\\pi} (R-R) R^2 sin\\varphi d\\varphi d\\theta=0.\\notag\n\\end{align}\nOn the other hand since $\\big|\\frac{\\partial f}{\\partial \\rho}\\big|=1$, then\n\\begin{align\n&\\frac{1}{16\\pi R}\\int\\int_{\\sigma(C,R)}\\big|\\frac{\\partial f}{\\partial \\rho}\\big|(x,y,z)d\\sigma=\\frac{4\\pi R^2}{16\\pi R}=\\frac{R}{4}\\notag,\n\\end{align}\nwhich along with (\\ref{eq.14}) and (\\ref{eq.15}) show that (\\ref{eq.08'}) is sharp.\n\\end{proof}\nThe following is trapezoid type inequalities related to the (\\ref{eq.08}) for the mappings defined on $\\bar{B}(C,R)$.\n\\begin{theorem}\nSuppose that $\\bar{B}(C,R)\\subset I^\\circ$, where $I\\subset R^3$ and consider $f:\\bar{B}(C,R)\\to \\mathbb{R}$ which has continuous partial derivatives with respect to the variables $\\rho$, $\\varphi$ and $\\theta$ on $I^\\circ$ in spherical coordinates. If $|\\frac{\\partial f}{\\partial \\rho}|$ is convex on $\\bar{B}(C,R)$, then\n\\begin{align\n&\\bigg|\\frac{1}{\\frac{4}{3}\\pi R^3}\\int\\int\\int_{\\bar{B}(C,R)}f(x,y,z)dv-f(a,b,c)\\bigg|\\leq\\frac{5}{16\\pi R}\\int\\int_{\\sigma(C,R)}\\big|\\frac{\\partial f}{\\partial \\rho}\\big|(x,y,z)d\\sigma.\\notag\n\\end{align}\n\\end{theorem}\n\\begin{proof}\nFor fixed $\\varphi\\in [0,\\pi]$ and $\\theta\\in [0,2\\pi]$ we have\n\\begin{align}\\label{eq.17}\n&\\int_0^R\\frac{\\partial f}{\\partial \\rho}(a+\\rho cos\\theta sin\\varphi, b+\\rho sin\\theta cos\\varphi, c+\\rho cos\\varphi) sin\\varphi d\\rho=\\\\\n&f(a+R cos\\theta sin\\varphi, b+R sin\\theta cos\\varphi, c+R cos\\varphi)sin\\varphi-f(a, b, c) sin\\varphi.\\notag\n\\end{align}\nIntegration with respect to the variables $\\varphi\\in [0,\\pi]$ and $\\theta\\in [0,2\\pi]$ in (\\ref{eq.17}) implies that\n\\begin{align\n&\\int_0^{2\\pi}\\int_0^{\\pi}\\int_0^R \\frac{\\partial f}{\\partial \\rho}(a+\\rho cos\\theta sin\\varphi, b+\\rho sin\\theta cos\\varphi, c+\\rho cos\\varphi) sin\\varphi d\\rho d\\varphi d\\theta=\\notag\\\\\n&\\int_0^{2\\pi}\\int_0^{\\pi} f(a+Rcos\\theta sin\\varphi, b+R sin\\theta cos\\varphi, c+R cos\\varphi\\Big) sin\\varphi d\\varphi d\\theta-\\notag\\\\\n&\\int_0^{2\\pi}\\int_0^{\\pi}f(a, b, c) sin\\varphi d\\varphi d\\theta=\\frac{1}{R^2}\\int\\int_{\\sigma(C,R)}\\big|\\frac{\\partial f}{\\partial \\rho}\\big|(x,y,z)d\\sigma-4\\pi f(a,b,c).\\notag\n\\end{align}\nSo from the convexity of $\\big|\\frac{\\partial f}{\\partial \\rho}\\big|$ we get\n\\begin{align}\\label{eq.19}\n&\\bigg|\\frac{1}{4\\pi R^2}\\int\\int_{\\sigma(C,R)}f(x,y,z)d\\sigma-f(a,b,c)\\bigg|\\leq\\\\\n&\\frac{1}{4\\pi}\\int_0^{2\\pi}\\int_0^{\\pi}\\int_0^R \\big|\\frac{\\partial f}{\\partial \\rho}\\big|(a+\\rho cos\\theta sin\\varphi, b+\\rho sin\\theta cos\\varphi, c+\\rho cos\\varphi) sin\\varphi d\\rho d\\varphi d\\theta\\leq\\notag\\\\\n&\\frac{1}{4\\pi}\\int_0^{2\\pi}\\int_0^{\\pi}\\int_0^R (1-\\frac{\\rho}{R})\\big|\\frac{\\partial f}{\\partial \\rho}\\big|(a, b, c) sin\\varphi d\\rho d\\varphi d\\theta+\\notag\\\\\n&\\frac{1}{4\\pi}\\int_0^{2\\pi}\\int_0^{\\pi}\\int_0^R \\frac{\\rho}{R}\\big|\\frac{\\partial f}{\\partial \\rho}\\big|(a+R cos\\theta sin\\varphi, b+R sin\\theta cos\\varphi, c+R cos\\varphi) sin\\varphi d\\rho d\\varphi d\\theta=\\notag\\\\\n&\\frac{R}{2}\\big|\\frac{\\partial f}{\\partial \\rho}\\big|(a, b, c)+\\frac{1}{8\\pi R}\\int\\int_{\\sigma(C,R)}f(x,y,z)d\\sigma\\notag.\n\\end{align}\nIt follows from triangle inequality, (\\ref{eq.19}), (\\ref{eq.08}) and (\\ref{eq.08'}) that\n\\begin{align*\n&\\bigg|\\frac{1}{\\frac{4}{3}\\pi R^3}\\int\\int\\int_{\\bar{B}(C,R)}f(x,y,z)dv-f(a,b,c)\\bigg|\\leq\\\\\n&\\bigg|\\frac{1}{\\frac{4}{3}\\pi R^3}\\int\\int\\int_{\\bar{B}(C,R)}f(x,y,z)dv-\\frac{1}{4\\pi R^2}\\int\\int_{\\sigma(C,R)}f(x,y,z)d\\sigma \\bigg|+\\\\\n&\\bigg|\\frac{1}{4\\pi R^2}\\int\\int_{\\sigma(C,R)}f(x,y,z)d\\sigma-f(a,b,c)\\bigg|\\leq\\\\\n&\\frac{1}{16\\pi R}\\int\\int_{\\sigma(C,R)}\\big|\\frac{\\partial f}{\\partial \\rho}\\big|(x,y,z)d\\sigma+\\frac{R}{2}\\big|\\frac{\\partial f}{\\partial \\rho}\\big|(a, b, c)+\\frac{1}{8\\pi R}\\int\\int_{\\sigma(C,R)}f(x,y,z)d\\sigma\\leq\\notag\\\\\n&\\frac{1}{16\\pi R}\\int\\int_{\\sigma(C,R)}\\big|\\frac{\\partial f}{\\partial \\rho}\\big|(x,y,z)d\\sigma+\\frac{1}{8\\pi R}\\int\\int_{\\sigma(C,R)}\\big|\\frac{\\partial f}{\\partial \\rho}\\big|(x,y,z)d\\sigma+\\notag\\\\\n&\\frac{1}{8\\pi R}\\int\\int_{\\sigma(C,R)}\\big|\\frac{\\partial f}{\\partial \\rho}\\big|(x,y,z)d\\sigma=\\frac{5}{16\\pi R}\\int\\int_{\\sigma(C,R)}\\big|\\frac{\\partial f}{\\partial \\rho}\\big|(x,y,z)d\\sigma,\\notag\n\\end{align*}\nwhich implies the desired result.\n\\end{proof}\n\n\n","meta":{"redpajama_set_name":"RedPajamaArXiv"}} +{"text":"\\section*{Acknowledgement}\nB.R. and J.C. contributed equally to this work. This work was supported by KERI primary research program through the NST funded by the MSIT of the Republic of Korea (ROK): grant No. 19-12-N0101-22. It was also supported by the KETEP and the MOTIE of the ROK: \ngrant Nos. 20162000000910, 20172010000830, 20188550000290.\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","meta":{"redpajama_set_name":"RedPajamaArXiv"}} +{"text":"\\section{Introduction}\nPersistent homology~\\cite{EdelsHarer} is a widely applicable stable descriptor of metric spaces. It is usually computed by treating coboundary matrices, in effect computing the isomorphic persistent cohomology. The reason is that the structure of cohomology is more amenable to certain speedups and in most cases results in faster computation~\\cite{bauer2019ripser, de2011circular, de2011dualities}.\n\nWhile persistent homology contains information about the lifespans of homology elements interpreted as holes, the actual cycles representing these holes can be extracted from the reduced boundary matrix. When computations are carried out by reducing the coboundary matrix though, we can only obtain representative cocycles. These are favorable in some specific settings~\\cite{de2011circular}, but in general cycles are the much preferred method of visualization and expression of persistent homology elements. The difference between them is demonstrated in Figure \\ref{fig:cycles-cocycles}.\n\n\n\\textbf{Contributions}. In this paper, we present an algorithm that allows us to leverage the speed of\npersistent cohomology computation and to recover representative cycles. The essential part of the algorithm has two distinct phases:\n\\begin{enumerate}\n \\item Reduce the coboundary matrix and extract simplices contributing to the computation of representative cycles.\n \\item Reduce the boundary matrix restricted to the columns determined by the previous part.\n\\end{enumerate}\n\nIn effect we use the reduced coboundary matrix to determine the death simplices of persistence pairs and then ``ignore'' all other columns in the reduction of the boundary matrix.\nWe call this approach involuted homology computation.\nThe efficiency of our algorithm as compared to the standard approach depends on the disparity of the reduction times for persistent homology and cohomology.\nWhile the algorithm works for any filtration, we restrict our comparison to Rips complexes, as the speedup there is the most significant.\nA comparative analysis suggests our approach is the fastest way to obtain the representatives in almost all settings.\n\n\\textbf{Related work}. In this paper we describe how to compute homology representatives arising from the reduction process of persistent homology in any dimension. Further modifications and optimizations of these cycles have been treated in~\\cite{Tamal1, Tamal2, Chao, Optimal1, Optimal2}.\nIn a different setting a nominally similar but essentially different problem has been considered in~\\cite{Kozlov}. Amongst the existing software Eirene~\\cite{henselman2016matroid} seems to handle the representative computations efficiently and has thus been used for comparative purposes.\n\n\n\\begin{figure}\n \\includegraphics[width=.8\\textwidth]{cycle-cocycle-comparison}\n \\caption{Comparison of persistence cocycles and persistence cycles on a data set consisting of 20 points sampled randomly from a circle, 12 points sampled uniformly from a circle, and 40 points sampled randomly from an annulus.}\n \\label{fig:cycles-cocycles}\n\\end{figure}\n\n\\section{Theoretical background}\n\nIn this section we review theoretical background on persistent homology and cohomology. For further details on see~\\cite{EdelsHarer} and~\\cite{de2011dualities}.\n\n\\subsection{Persistent homology}\n\nThroughout the paper we fix a field of coefficients $\\mathbb{F}$ for all homology and cohomology groups. Let $K$ be a finite simplicial complex. A \\textbf{filtration} of $K$ is a nested collection of subcomplexes:\n$$\nK_1 \\leq K_2 \\leq \\ldots, K_m=K.\n$$\nA \\textbf{filtration function} associated to a filtration is a function $\\varphi$ assigning to each simplex $\\sigma \\in K$ the index $\\varphi(\\sigma)=\\argmin_i \\{\\sigma \\in K_i\\}$. Throughout the paper we assume that $\\varphi$ is injective, i.e., each $K_i$ is obtained from $K_{i-1}$ by an addition of a single simplex $\\sigma_i$.\n\nLet $p\\in\\{0,1,\\ldots\\}$. \\textbf{Persistent homology} in dimension $p$ is the collection of ranks of inclusion induced maps of a filtration on homology. In particular, it consists of \\textbf{Betti numbers}\n$$\n\\beta^p_{i,j} = \\rank H_p (K_i \\hookrightarrow K_{j}), \\quad \\forall 1 \\leq i \\leq j \\leq m.\n$$\nPersistent homology is typically visualized and described by a persistence diagram. For each $i$, the addition of and $n$-dimensional simplex $\\sigma_i$ to $K_{i-1}$ either:\n\\begin{enumerate}\n \\item creates an $n$-dimensional homology class, or\n \\item destroys an $(n-1)$-dimensional class.\n\\end{enumerate}\nSimplices of type (1) are called \\textbf{birth simplices}. A simplex $\\sigma_i$ is a birth simplex iff its boundary is a linear (with coefficients in $\\mathbb{F}$) combination of boundaries in $K_{i-1}$.\n\nSimplices of type (2) are called \\textbf{death simplices}. A simplex $\\sigma_i$ is a birth simplex iff its boundary is not a linear (with coefficients in $\\mathbb{F}$) combination of boundaries in $K_{i-1}$.\n\nEach death $n$-simplex $\\sigma_i$ is paired to a unique birth $(n-1)$-simplex $\\sigma_j$ with $j < i$ to form a \\textbf{persistence} (homology) \\textbf{pair}. The addition of $\\sigma_j$ creates a homology class destroyed by $\\sigma_i$. Birth simplices not contained in any persistence pair are called \\textbf{essential} simplices.\n\nA \\textbf{persistence diagram} is a collection of points $$\\{(i,j) \\mid (\\sigma_i, \\sigma_j) \\textit{ a persistence pair } \\} \\ \\cup\n$$\n$$\n\\cup \\ \\{(i, \\infty) \\mid \\sigma_i \\textit{ an essential simplex } \\}$$\n\n\n\n\\subsection{Reduction algorithm}\n\nThe original reduction algorithm of~\\cite{ELZ} returns persistence pairs and essential simplices. Let $\\partial$ be the full boundary matrix of $K$ with the columns and rows indexed by $\\{1,2,\\ldots, m\\}$. Index $i$ represents simplex $\\sigma_i$. Given a matrix $A$ whose rows and columns are indexed by $i\\in \\{1,2,\\ldots, m\\}$ define:\n\\begin{itemize}\n\\item $\\col_A(i)$ as the $i$th column of $A$ as a vector.\n\\item $\\row_A(i)$ as the $i$th row of $A$ as a vector.\n\\item $\\low_A(i)$ as the index of the lowest non-trivial entry in $\\col_A(i)$ or $0$ if the column is trivial.\n\\end{itemize}\n\nThe reduction algorithm is essentially a column reduction process.\n \\medskip\n\n\\begin{algorithm}[H]\n\\label{Alg1}\n \\KwResult{reduced boundary matrix}\n\\For{$i=1,2, \\ldots, m$}\n\t{\n\t\\For{$j=m-1,m-2, \\ldots, 1$}\n\t\t{\n\t\t\\If {$\\low_\\partial(i)=\\low_\\partial(j)\\neq 0$}\n\t\t\t{\n\t\t\t$\\lambda= \\partial(i,\\low_\\partial(i))\/ \\partial(j,\\low_\\partial(j))$ \\\\\n\t\t\t$\\col_\\partial(i) = \\col_\\partial(i) - \\lambda \\col_\\partial(j)$\n\t\t\t}\n\t\t}\n\t}\n\\KwRet{$\\partial$}\\\\\n\\medskip\n\n \\caption{Column reduction algorithm for persistent homology.}\n\\end{algorithm}\n\\medskip\n\nLet $\\partial'$ denote the reduced boundary matrix as reduced by Algorithm \\ref{Alg1}. If $\\col_{\\partial'}(i)$ is not trivial then $(\\sigma_{\\low_{\\partial'}(i)}, \\sigma_i)$ is a persistence pair. Simplices unpaired in this manner are essential simplices.\n\n\n\\subsection{Representatives}\n\nThe standard representative of a persistence pair $(\\sigma_{\\low_{\\partial'}(i)}, \\sigma_i)$ is the vector represented by $\\col_{\\partial'}(i)$. A representative is a chain whose homology class spans the homology that appeared at $\\low_{\\partial'}(i)$ and died at $i$.\n\nGiven an essential simplex $\\sigma_i$ Algorithm \\ref{Alg1} reduces $\\col_\\partial(i)$ to the trivial column, i.e., there exist $\\lambda_j\\in \\mathbb{F}$ such that $\\col_\\partial(i) - \\sum_{j=1}^{i-1} \\lambda_j \\col_\\partial(j)$. The standard representative corresponding to the essential simplex $\\sigma_i$ is $\\sigma_i - \\sum_{j=1}^{i-1} \\lambda_j \\sigma_j$. This representative is a chain whose homology class appears at $\\low_{\\partial'}(i)$ and never dies.\n\n\n\n\\subsection{Persistent cohomology}\n\nWhile the definition of homology $H_p(K)$ is based on chains ($\\mathbb{F}$-combinations of simplices), the definition of cohomology $H^p(K)$ (see~\\cite{Hatcher} for an introduction) is based on cochains ($\\mathbb{F}$-linear maps from the space of chains into $\\mathbb{F}$). It turns out that both invariants are isomorphic but that cohomology is contravariant in the sense that it reverses the direction of induced maps. The following result is a well known consequence of the universal coefficients theorem and contravariant functoriality of cohomology. In our setting it has first appeared in ~\\cite{de2011dualities}.\n\n\\begin{theorem}\n\\label{ThmMain}\nLet $K \\hookrightarrow L$ be an inclusion of simplicial complexes. Then for each dimension $p$ there exists a commutative diagram\n$$\n\\xymatrix\n{H_p(K) \\ar[d] \\ar[r] & H_p(L) \\ar[d] \\\\\nH^p(K) & H^p(L) \\ar[l]}\n$$\nwith the vertical maps being isomorphisms.\n \\end{theorem}\n\n\nPersistent cohomology is constructed from a filtration (we still assume the filtration function is injective) in the same way as persistent cohomology. We refrain from repeating the construction and instead point out the fundamental differences of the computational aspect:\n\\begin{enumerate}\n \\item The standard coboundary matrix is the transpose of the boundary matrix. This results in a lower-triangular matrix and would require us to use the mentioned column reduction in the opposite direction to compute cohomology. For this reason we rather define our coboundary matrix $d$ to be the \\textbf{anti-transpose} of $\\partial$ as in~\\cite{de2011dualities}. In particular, $d$ is obtained from $\\partial^T$ by reversing the order of simplices labeling columns and rows.\n \\item Persistent cohomology is computed using the mentioned column reduction on $d$.\n \\item $(\\sigma_i,\\sigma_j)$ is a persistence homology pair iff $(\\sigma_j,\\sigma_i)$ is a persistence cohomology pair. Essential simplices coincide in both cases.\n \\item A cohomology representative of a persistence cohomology pair is a cochain, i.e., a linear map from the space of chains into $\\mathbb{F}$.\n\\end{enumerate}\n\nFor each $p\\in \\{0,1,\\ldots\\}$ we define $\\partial_p$ and $d_p$ be the p-dimensional (co)boundary matrices.\nIn particular, the columns of $\\partial_p$ are labeled by $p$-simplices, the rows are labeled by $(p-1)$-simplices.\nMatrix $d_p$ is the anti-transpose of $\\partial_{p-1}$. These matrices form the block structure of $\\partial$ and $d$ respectively.\n\n\\subsection{Rips complexes}\n\nGiven a finite metric space $(X,d)$ and $r>0$ the \\textbf{Rips complex} is defined as $\\Rips(X,r)=\\{\\sigma \\subseteq X \\mid \\diam(\\sigma)\\leq r\\}$. The Rips filtration of $X$ is the collection of all Rips complexes of $X$ for all positive $r$. In order to obtain a filtration with an injective filtration function we order the simplices of a Rips filtration as $\\sigma_1, \\sigma_2, \\ldots$ so that:\n\\begin{itemize}\n \\item If $\\diam(\\sigma_i) < \\diam(\\sigma_j)$ then $i 0$ and $Tr(-I) < 0$ and therefore standard complexification does not contain generalizations of complex numbers. Only internal complexification can lead to non-unitary quantal algebras and $so(2,4)$ is the only possible orthogonal solution. To obtain relativity, recall that we are looking only at a subset space defined by the constraint: $J \\alpha f = 0$. Once $J$ is selected, quantions are defined into a subspace of $so(2,4)$, the centralizer space $O_J (2,4)$. The centralizer reduces itself to a complex Minkowski space of dimensionality $8$: $M_0 (\\mathbb{C}) = M_0 \\oplus i M_0$ and any element $f\\in O_J (2,4)$ is of the form:\n\\begin{equation}\nf = f_r + J \\beta f_i\n\\label{newNumberSystem}\n\\end{equation}\nwith $f_r$ and $f_i$ real. \nThe linear space $L^{(2,4)}$ on which the group $SO(2,4)$ acts, is a distinguished unique space, because only in this case one can define uniquely complex conjugation as a reflection that cannot be undone by continuous transformations. \n\t\n\\subsection{Algebraic properties of quantions}\nLet us explore same basic properties of the quantions. This section will follow closely the quantionic book of Emile Grgin \\cite{GrginBook1}. The first observation is that $J = \\sqrt{(-e)}$ is not unique. There are an infinity of solutions of dimensionality 3 which are transitively related by the $SO(1,3)$ group. The algebraic unit $e$ of quantion algebra $\\mathbb D$ is a contravariant complex four vector that defines the time direction in the local frame. \n\nIn terms of complex numbers, a quantion is a $2\\times 2$ matrix\n\\begin{math}\n\\left(\n\\begin{tabular}{cc}\nz & v\\\\\nu & w\\\\\n\\end{tabular}\n\\right)\n\\end{math}\nwith the following multiplication rule:\n\\begin{equation}\n\\left(\n\\begin{tabular}{cc}\na & c\\\\\nb & d\\\\\n\\end{tabular}\n\\right)\n*\n\\left(\n\\begin{tabular}{cc}\nz & v\\\\\nu & w\\\\\n\\end{tabular}\n\\right)\n=\n\\left(\n\\begin{tabular}{cc}\naz + cu & av + cw\\\\\nbz + du & bv + dw\\\\\n\\end{tabular}\n\\right)\n\\end{equation}\n\nUsing the Minkowski scalar product:\n\\begin{equation}\n(u, v) \\equiv \\eta_{\\mu \\nu} u^{\\mu}v^{\\nu}\n\\end{equation}\nwhere $\\eta_{\\mu \\nu} = diag (1, -1, -1, -1)$\nand renaming the unit $e$ as $\\Omega$, the product $\\beta$ is:\n\\begin{equation}\nu \\beta v = (\\Omega , u) v + (\\Omega , v) u - (u, v) - i * (\\Omega \\wedge u \\wedge v)\n\\end{equation}\nwhere $*$ is the Hodge duality mapping.\n\nIn general, one can decompose any arbitrary quantion in the following form:\n\\begin{equation}\nu = U \\Omega + \\overrightarrow{u}\n\\end{equation}\n\nIf we introduce $\\Pi$ as the 3-dimensional hyperplane orthogonal to $\\Omega$, and choosing a set $\\{\\overrightarrow{e_1}, \\overrightarrow{e_2}, \\overrightarrow{e_3}\\}$ of orthonormal vectors in $\\Pi$, then the multiplication table for $\\beta$ is:\n\\begin{equation}\n\\begin{tabular}{|c|c|c|c|c|c|}\n\\hline\n$~~\\beta~~$ && $~~\\Omega~~$ & $\\overrightarrow{e_1}$ & $\\overrightarrow{e_2}$ & $\\overrightarrow{e_3}$ \\\\\n\\hline\n\\hline\n$\\Omega$ && $\\Omega$ & $\\overrightarrow{e_1}$ & $\\overrightarrow{e_2}$ & $\\overrightarrow{e_3}$ \\\\\n\\hline\n$\\overrightarrow{e_1}$ && $\\overrightarrow{e_1}$ & $\\Omega$ & $i \\overrightarrow{e_3}$ & $-i \\overrightarrow{e_2}$ \\\\\n\\hline\n$\\overrightarrow{e_2}$ && $\\overrightarrow{e_2}$ & $-i \\overrightarrow{e_3}$ & $\\Omega$ & $i \\overrightarrow{e_1}$ \\\\\n\\hline\n$\\overrightarrow{e_3}$ && $\\overrightarrow{e_3}$ & $i \\overrightarrow{e_2}$ & $-i \\overrightarrow{e_1}$ & $\\Omega$ \\\\\n\\hline\n\\end{tabular}\n\\end{equation}\n\nThis multiplication table is identical with the Pauli matrices multiplication table with the following identification: $( \\Omega \\leftrightarrow \\sigma_0, \\overrightarrow{e_i} \\leftrightarrow \\sigma_i )$. Hence, in a fixed tetrad, the algebra of quantions can be represented by the algebra of $2 \\times 2$ complex matrices. This is because the Lorenz group is isomorphic with $SL(2, \\mathbb{C})$. Expressed in terms of Pauli matrices, a quantion can be written as:\n\\begin{equation}\nq = q_0 I + \\overrightarrow{q} . \\overrightarrow{\\sigma}\n\\end{equation}\nThis form was first studied by James Edmonds \\cite{EdmondsPaper} in 1972. \n\nQuaternionic multiplication table is:\n\\begin{equation}\n\\begin{tabular}{|c|c|c|c|c|c|}\n\\hline\n$~~.~~$ && $~~1~~$ & $\\overrightarrow{i}$ & $\\overrightarrow{j}$ & $\\overrightarrow{k}$ \\\\\n\\hline\n\\hline\n$1$ && $1$ & $\\overrightarrow{i}$ & $\\overrightarrow{j}$ & $\\overrightarrow{k}$ \\\\\n\\hline\n$\\overrightarrow{i}$ && $\\overrightarrow{i}$ & $-1$ & $\\overrightarrow{k}$ & $- \\overrightarrow{j}$ \\\\\n\\hline\n$\\overrightarrow{j}$ && $\\overrightarrow{j}$ & $- \\overrightarrow{k}$ & $-1$ & $ \\overrightarrow{i}$ \\\\\n\\hline\n$\\overrightarrow{k}$ && $\\overrightarrow{k}$ & $\\overrightarrow{j}$ & $- \\overrightarrow{i}$ & $-1$ \\\\\n\\hline\n\\end{tabular}\n\\end{equation}\n\nComparing quaternions to quantions, the transformation rule between the two algebras is:\n\\begin{equation}\n\\begin{array}{rcl}\n\\Omega &=&1\\\\\ni \\overrightarrow{e_1}&=& \\overrightarrow{i}\\\\\ni \\overrightarrow{e_2}&=& \\overrightarrow{j}\\\\\ni \\overrightarrow{e_3}&=& \\overrightarrow{k}\\\\\n\\end{array}\n\\end{equation}\n\nThe linear spaces of real quantions and real quaternions are different four-dimensional slices of the algebra of complex quaternions.\n\nGiven the tetrad $\\{ \\Omega, \\overrightarrow{e_1}, \\overrightarrow{e_2}, \\overrightarrow{e_3}\\}$, let us introduce the null tetrad $\\{ l, n, m, \\overline{m} \\}$ by the relations:\n\\begin{equation}\n\\begin{array}{rcl}\nl &=&\\frac{1}{2} (\\Omega + \\overrightarrow{e_3})\\\\\nn &=&\\frac{1}{2} (\\Omega - \\overrightarrow{e_3})\\\\\nm &=&\\frac{1}{2} (\\overrightarrow{e_1} + i \\overrightarrow{e_2})\\\\\n\\overline{m} &=&\\frac{1}{2} (\\overrightarrow{e_1} - i \\overrightarrow{e_2})\\\\\n\\end{array}\n\\end{equation}\n\nUp to the coefficients, those are also the Newman-Penrose null tetrads \\cite{NewmanPenrose}.\n\nThe multiplication table for $\\{l, n, m, \\overline{m} \\}$ is:\n\\begin{equation}\n\\begin{tabular}{|c|c|c|c|c|c|}\n\\hline\n$~~\\beta~~$ && $~~l~~$ & $~~\\overline{m}~~$ & $~~m~~$ & $~~n~~$ \\\\\n\\hline\n\\hline\n$l$ && $l$ & $0$ & $m$ & $0$ \\\\\n\\hline\n$\\overline{m}$ && $\\overline{m}$ & $0$ & $n$ & $0$ \\\\\n\\hline\n$m$ && $0$ & $l$ & $0$ & $m$ \\\\\n\\hline\n$n$ && $0$ & $\\overline{m}$ & $0$ & $n$ \\\\\n\\hline\n\\end{tabular}\n\\end{equation}\n\nThis multiplication table was first obtained in 1882 by Benjamin Pierce \\cite{Pierceref} and was named algebra $g_4$. \n\n\\subsection{Quantions: a mixed relativity and quantum mechanics object}\n\nIn quantum field theory an important theorem is the $CPT$ theorem. This theorem mixes quantum mechanics and relativity concepts. Complex conjugation and charge are properties of the quantum theory, and parity and time are relativity concepts. Since the quantionic algebra $\\mathbb{D}$ is the only possible mathematical structure that structurally unifies relativity with quantum mechanics, the $CPT$ theorem arises naturally from it via the group of discrete transformation for quantions. \n\nA real quantion is defined as \n\\begin{math}\np = \\left(\n\\begin{tabular}{cc}\n$r$ & $z^*$\\\\\n$z$ & $s$\\\\\n\\end{tabular}\n\\right)\n\\end{math} where $r, s \\in \\mathbb{R}$ and $z \\in \\mathbb{C}$. Expressing $r$, $s$, and $z$ in terms of four real variables: $p_0$, $p_1$, $p_2$, $p_3$:\n\\begin{equation}\n\\begin{array}{rcl}\nr &=&p_0 + p_3\\\\\ns &=& p_0 - p_3\\\\\nz &=& p_1 + i p_2\\\\\n\\end{array}\n\\end{equation}\none has:\n\\begin{equation}\n(p,p) = {p_0}^2 - {p_1}^2 - {p_2}^2 - {p_3}^2\n\\end{equation}\nand\n\\begin{equation}\n{\\left(\n\\begin{tabular}{cc}\n$r$ & $z^*$\\\\\n$z$ & $s$\\\\\n\\end{tabular}\n\\right)}^{-1} = \\frac{1}{(p,p)} \\left(\n\\begin{tabular}{cc}\n$s$ & $-z$\\\\\n$-z^*$ & $r$\\\\\n\\end{tabular}\n\\right)\n\\end{equation}\n\nQuantions are not a division algebra, and the real quantions that lack an inverse are the null rays in the Minkowski cone. Having an inverse is not a mandatory property in quantum mechanics. An easy way to see this is the fact that we do not divide by the wavefunctions directly. In the case of perturbation theory, Feynman diagrams, and propagators, one deforms the integration contour to avoid exactly the points where quantions do not have an inverse.\n \n\\section{Quantions: lifting a degeneracy of complex numbers}\n\nQuantionic algebra was originally discovered in 1882, but its properties remained unexplored for a very long time until the quantal algebra research program rediscovered them using a systematic approach. However, there is another road that leads to quantions, this time completely in the realm of mathematics. For a long time, there was a mathematical bias towards division algebras, and the reason for this was an old Hurwitz theorem that states that there are only four normed division algebras: real numbers $\\mathbb{R}$, complex numbers $\\mathbb{C}$, quaternions $\\mathbb{H}$, and octonions $\\mathbb{O}$ \\cite{HurwitzTheorem}. Probably the original appeal of the theorem stems from the restriction of the number of such algebras, as opposed to an infinite number of associative non-division algebras. However, as seen earlier, null space-time intervals do not have an inverse in quantionic algebra $\\mathbb{D}$, and imposing the unnecessary division property eliminates relativity from $\\mathbb{D}$, forcing us back at using complex numbers. \n\nBut the complex numbers themselves have an additional property that can be regarded as a ``defect\": they have a mathematical degeneracy of algebraic and geometrical concepts which if lifted will lead uniquely to the quantionic algebra. The algebraic norm of complex numbers is defined as:\n\\begin{equation}\nA(z) = z z^*\n\\end{equation}\n\nExpanding $A(z)$ in terms of the components $z = x+iy$, one has:\n\\begin{equation}\nA(z) = x^2 +y^2\n\\label\n{equationMetric1}\n\\end{equation}\nNow Eq.~\\ref{equationMetric1} can be understood as a metric ($M(z) = x^2 +y^2$) and this is a geometric concept. Since complex numbers were introduced for their property of algebraic closure, and since the metric is the trivial Euclidean metric in two dimensions, it takes a bit of effort to see $A(z)$ and $M(z)$ as really separate concepts. However, once the separation is made, straightforward algebraic analysis will lead uniquely to the quantionic algebra $\\mathbb{D}$ as the only algebra that is able to lift this degeneracy \\cite{GrginBook1} and has different algebraic and geometric norms. The two norms of quantions also have a remarkable physics interpretation. The algebraic property of quantions is related to standard quantum mechanics, and the geometric property is related to relativity. \n\nIn quantionic algebra one can introduce complex conjugation $(*)$ and metric dual $(\\sharp)$ as follows:\n\\begin{eqnarray}\nq^*= \\{a^*, c^*, b^*, d^* \\}\\\\\nq^{\\sharp} = \\{d, -b, -c, a\\}\n\\end{eqnarray}\t\nwhere $q = \\{a,b,c,d \\}$.\n\nThe quantionic algebraic norm $A(q)$ is defined using standard Hermitian conjugation:\n\\begin{equation}\nA(q) =q^* q = \\{ a^* a + b^* b, c^* a + d^* b, a^* c + b^* d, c^* c + d^* d \\}\n\\end{equation}\nand the quantionic metric norm $M(q)$ is the determinant of the quantionic matrix:\n\\begin{equation}\nM(q) = ad - bc\n\\end{equation}\nThe inverse of a quantion is:\n\\begin{equation}\nq^{-1} = \\frac{q^\\sharp}{M(q)}\n\\end{equation}\nSince $M(q)$ may be zero, quantions are not a division algebra.\n\nNot only $A(q) \\ne M(q)$ in general, but as functions they reduce an eight-dimensional quantion to a four, and a two dimensional object respectively.\n$M(q)$ is obviously a complex number and $A(q)$ is a real quantion because:\n\\begin{equation}\n{ (A(q) ) }^*= {(q^* q)}^* = q^* q^{**} = q^* q = A(q)\n\\end{equation}\n\n$M(q)$ maps quantions to complex numbers and non-relativistic quantum mechanics, while $A(q)$ maps quantions into Minkowski four vectors, thus extracting relativity. \n\nBy removing the algebraic-geometric degeneracy of complex numbers, quantions are the next number system in the sequence: natural numbers, real numbers, and complex numbers. Quantionic physics does not deform the Hilbert space; it only replaces complex numbers with a new number system. The unnecessary division property of complex numbers was the main hindrance in uncovering the relativity structure. Due to their uniqueness, quantions are nature's number system where a lot of physics will follow straight as mathematical theorems with no external ad-hoc justification. Another reason of calling quantions a number system is the existence of a hyperquantionic sequence. For real numbers, the Cayley-Dickson construction combines two real numbers into a complex number, four real numbers into a quaternion number, eight real numbers into an octonion number, and so forth using the powers of two. In the hyperquantionic sequence one starts with complex numbers and constructs groups of complex numbers using the powers of four. \n\n\\section{Born and Zovko interpretation of the wave function}\nStandard quantum mechanics based on complex numbers consists of several parts. First, we have the Hilbert space. Then, we need to postulate space and time as concepts outside Hilbert space. Finally, we need to add Born's interpretation of the wave function and the Schr\\\"{o}dinger equation. Generalizations of quantum mechanics were attempted to solve the unification problem. One approach is to uncover first the geometrical formulation of quantum mechanics \\cite{AshtekarSchilling}. Hilbert space is understood as a K\\\"{a}hler space endowed with a symplectic and a metric structure. The starting point is the Hermitian inner product decomposition into real and imaginary parts:\n\\begin{equation}\n<\\Phi, \\Psi> = \\frac{1}{2 \\hbar}G(\\Phi, \\Psi) + \\frac{i}{2 \\hbar}\\Omega (\\Phi, \\Psi)\n\\end{equation}\nwith $G(\\Phi, \\Psi) = \\Omega (\\Phi, J \\Psi)$, $J = G^{-1} \\Omega$, and $J^2 = -1$. The space of physical states is the projective Hilbert space $CP(n) = U(n+1)\/U(n) \\times U(1)$ and the Schr\\\"{o}dinger equation describes a Killing Hamiltonian flow along $CP(n)$.\n\nA complex number $z = x + i y$ can be represented as $z = x G + y \\Omega$ where \\begin{math}\nG = \\left(\n\\begin{tabular}{cc}\n1 & 0\\\\\n0 & 1\\\\\n\\end{tabular}\n\\right)\n\\end{math}\nand \\begin{math}\n\\Omega = \\left(\n\\begin{tabular}{cc}\n0 & 1\\\\\n-1 & 0\\\\\n\\end{tabular}\n\\right)\n\\end{math}. We can see that from Born's interpretation, complex numbers occurs naturally in quantum mechanics but the interpretation of $G$ and $\\Omega$ have completely different meaning when compared with the complex numbers introduced as a consequence of the composability principle. This geometric approach stems from the usual quantization procedure of replacing the Poisson brackets with commutators. What this does is to augment a symplectic structure with a metric structure resulting into a K\\\"{a}hler space. Born's interpretation of the function $\\rho = \\psi^* \\psi$ as a probability density implies a positive norm which in turn guarantees a division algebra. Since quantions are not a division algebra, if one is to find deformations of quantum mechanics to obtain (structural) unification with relativity, the staring point must be the replacement of Born's interpretation with something else. In 2002, Nikola Zovko proposed a generalization of Born's interpretation. In quantionic algebra, Zovko's interpretation uses a current probability density $j = q^{\\dagger} q$ with $j$ being a future oriented time-like Minkowski vector. Combining quantions with Zovko's interpretation leads to Dirac and Schr\\\"{o}dinger equations. Moreover, the Minkowski metric is fully contained within quantions and does not need to be postulated as an outside component.\n\nSo far we have discussed the main algebraic properties of quantions. As a $2 \\times 2$ matrix, quantions has only the symmetries of the Lorenz group. To have equations of motions, we need to introduce additional degrees of freedom and the new structure requires the Riemannian space. Only in the flat case, derivations generate the Abelian group of translations and therefore the Poincar\\'{e} group. The unique way to generalize quantions is using a sub-algebra of the $4 \\times 4$ complex matrices in the following block diagonal form \\cite{GrginBook2}:\n\\begin{equation}\nQ = \\left(\n\\begin{tabular}{cc}\nA & 0\\\\\n0 & A\\\\\n\\end{tabular}\n\\right)\n\\end{equation}\nwhere\n\\begin{equation}\nA = \\left(\n\\begin{tabular}{cc}\nz & v\\\\\nu & w\\\\\n\\end{tabular}\n\\right)\n\\end{equation}\nis a regular $2 \\times 2$ quantion. This representation appears naturally from the complex number degeneracy elimination problem.\n\nUp to a similarity transformation, $Q$ are unique generalizations of the $2 \\times 2$ quantions. The extension, called the left algebra of quantions, allows derivation, limited analyticity properties, quantion-spinor complementarity, and Dirac equation \\cite{GrginBook2}. The algebra of matrices $Q$ is a representation in terms of matrices the quantionic algebra, acts on ket column vectors, and has the $SU(2) \\times U(1)$ electroweak symmetry. Associated with the left representation is a right representation which acts on bra row vectors and the left and right representations commute. The commutation property is equivalent with the associative property of quantions.\n\nThose advanced topics are outside the scope of this introductory paper and interested readers should consult the {\\it Structural Unification of Quantum Mechanics and Relativity} book by Emile Grgin\\cite{GrginBook2}.\n\n\\section{Discussions and open problems}\nThe author believes that structural unification of relativity and quantum mechanics is a major milestone in understanding nature because it holds the potential to support a new physics paradigm centered on the old question of physics axiomatization. Although quantionic physics is still in the early development stages with many critical questions not yet researched, quantionic physics may put the phenomenological postulates of the Standard Model on a solid axiomatic foundation and might one day become the backbone of an ultimate theory of everything challenging string theory's aspirations in this area. While Emile Grgin refrains from speculations about the future and prefers to follow the math wherever it may lead, in this section the author is free to use the glimpses and insights learned from this new research area to provide discussions, conjectures and speculations. As such, math rigor will be replaced mostly by heuristic and philosophical arguments. Existing results will be presented in a way that will support the new paradigm, and although this paradigm is inspired in part by quantionic research, it is independent from it, or from the original intention of the other cited results.\n\n One of the major successes of quantionic physics is the fact that structural unification is only possible for a four dimensional space time obeying the Minkowski metric. Without a complete unification theory, the proof of the space-time dimensionality is incomplete, but quantionic research is a big step forward. No other unification approaches (string theory included) can claim any credible success in this area. (Outside unification approaches, the four dimensionality is singled out as the only case where Yang-Mills theories are renormalizable. Also, from the geometrical point of view, one can construct uncountably many inequivalent differential structures and have an interplay between Hodge duality and two-forms \\cite{IngemarBengtsson}.) However, quantionic research is just beginning and there are many open problems. Structural unification does not offer yet answers for quantum gravity, or even for the complete Standard Model. \n\nNon-commuting geometry \\cite{Connes} provides the Lagrangeian for the entire Standard Model, but the ``clothes for the SM beggar'' as Connes put it do not have a clear physics origin, or a provable uniqueness associated with it. (It is also possible, and probably a better explanation, that the lack of clear physics origin is only a reflection of the author's lack of understanding of non-commuting geometry.) In quantionic physics, the natural symmetry is $U_q (1) = U(1) \\times SU(2)$, and determining the origin of the strong force $SU(3)$ symmetry is an open problem currently under vigorous research. Increasing the available degrees of freedom by considering $U_q (2)$ can lead to $SU(3)$, but the question becomes why stop here and not consider for example $U_q (17)$, or any arbitrarily high number. What is the distinguishing property of $SU(3)$ from quantionic perspective? Preliminary results appear to answer fully this question, but it is premature to present them here.\n\nAnother open problem is the elimination of split complex numbers. From the point of view of degeneracy, split complex numbers have a different degeneracy, that of identity operation and of complex conjugation. They form a non-division algebra, but the concept of charge is not well defined. In this case the $SO(2,4)$ group is replaced most likely by the $SO(3,3)$ group.\n\nIn terms of quantum gravity, there are links between the $SO(2,4)$ group and loop quantum gravity \\cite{Kerrick1},\\cite{Kerrick2} and between twistors and string theory \\cite{stringsTwistors}. The major problems of general relativity such as renormalizability, singularities, and global structure do not yet get much clarification from quantionic physics. Structural unification provides some backing for the impossibility of existence of closed time-like curves (CTCs), because in this case quantionic algebra is not possible. The very point of Grgin complexification and of the linear space $L^{(2,4)}$ was to find a space where reflections cannot be undone by continuous transformations, and on a CTC space one can undo the reflections. This corresponds to a particle being created at a point, going back in time and acquiring a phase shift, and then being reabsorbed at the creation point thus loosing the phase information \\cite{Hawking1} and breaking unitarity \\cite{Boulware}. \n\nSecond quantization and spontaneous symmetry breaking are not yet researched in quantionic theory. As non-Abelian gauge theories, electroweak theory is renormalizable and the strong force is renormalizable only at high energy or small distances. Is renormalizability always satisfied in quantionic physics? Probably yes and the best way to prove it is to interpret quantionic physics as a Yang-Mills theory using $U_q (1)$. This should not be very hard since electroweak theory is a Yang-Mills quantum field theory already. Then one can use either the massless or the massive on-shell Yang-Mills renormalizability property \\cite{ALarin} or other more laborious methods. \n\nHowever, although not exceptionally hard, second quantization and obtaining (at least) the electroweak interaction are far from being a trivial task either. First, let us view quantions a as a number system and compare their internal symmetry $U_q (1)$ with the $U(1)$ symmetry of the complex numbers. QED is a far more sophisticated theory than what one might expect from the complex number symmetry alone. If quantions are simply a number system, then one would not expect a lot of physics to follow from it. Existing and preliminary unpublished results shows however that critical features of the Standard Model are naturally appearing in quantionic algebra and this looks to be more than just a mathematical coincidence. In electroweak and strong force, nature exhibits nonlinear self-interaction, and quantions are a linear algebra. The author's expectation is that composability principle (which may also include split-complex composability) and the Yang-Mills local symmetry principle would generate the full axiomatization of the Standard Model. The main thrust of Grgin's research is however in a slightly different direction. The current aim of quantionic research is to discover first all the ``inherent'' properties of quantionic algebra without using gauge symmetry or any other concepts outside quantions.\n\nFrom the principle of local symmetry, converting the global symmetries of quantions into local gauge symmetries resulting in a quantionic Yang-Mills theory will introduce nonlinearity. The following important questions will then arise: how does the $CPT$ quantionic property, the space-time dimensionality, and all other inherent quantionic properties survive the opposite local to global transition?\n \nIf we analyze isomorphisms between unitary and orthogonal groups, at low dimensionality, we have only four such cases:\n\\begin{equation}\n\\begin{tabular}{rcl}\nU(1) & $\\approx$ & SO(2)\\\\\nSU(2) & $\\approx$ & SO(3)\\\\\nSU(4) & $\\approx$ & SO(6)\\\\\nSU(2,2) & $\\approx$ & SO(2,4)\\\\\n\\end{tabular}\n\\end{equation}\nIf quantizing general relativity does not take the route of quantions, then one is restricted to using one or all of the top three isomorphisms above instead. The first isomorphism is too simplistic and the $SU(4) \\approx SO(6)$ does not yet appear to play any significant role in physics. The $SU(2) \\approx SO(3)$ isomorphism is used by loop quantum gravity, and from the renormalizability property, if true, it is at least conceivable that we may have a dual unification problem: a canvas space-time quantization, and a matter quantization using quantions. The strong force may then arise out of the necessity of making the two unification approaches compatible. Using any of the first three isomorphism above (and in particular the $SU(2) \\approx SO(3)$ isomorphism) has a major disadvantage in terms of the time problem for canonical quantum gravity \\cite{timeProblem}, but $SU(2)$ is a core symmetry of quantionic physics and the link between loop quantum gravity and quantions could appear naturally \\cite{Kerrick1},\\cite{Kerrick2}. \n\nStandard Model has the $U(1) \\times SU(2) \\times SU(3)$ symmetry, and Geoffrey Dixon proposed using the algebra $\\mathbb C \\otimes \\mathbb H \\otimes \\mathbb O$\\cite{GDixon}. From quantionic algebra, we can see that using only norm division algebras is not enough to construct the correct axiomatization of the Standard Model. \n\nIn terms of normed division algebras, one has the following isomorphisms:\n\\begin{equation}\n\\begin{tabular}{rcl}\nsl(2, $\\mathbb R$) & $\\approx$ & so(2,1)\\\\\nsl(2, $\\mathbb C$) & $\\approx$ & so(3,1)\\\\\nsl(2, $\\mathbb H$) & $\\approx$ & so(5,1)\\\\\nsl(2, $\\mathbb O$) & $\\approx$ & so(9,1)\\\\\n\\end{tabular}\n\\end{equation}\nQuantions are related to the second isomorphism, while the last isomorphism is related to the10-dimesional superstring theory \\cite{stringTheory}, and to supersymmetric gauge theories \\cite{TKugo}. Given the correct space-time dimensionality predicted by $ \\rm{sl}(2, \\mathbb{C}) $ under the structural relativity-quantum mechanics unification, the space-time dimensionality predicted by superstring theory, and the rigidity of the algebraic structures, then the likelihood of string theory to be the correct fundamental physics theory is now much lower.\n\nAs a speculation, in the dual unification approach, maybe the space-time quantization should be done using split-complex composability, the electroweak quantization should be done using quantionic physics, and the strong force is a mixed object of both split-complex and regular composability \\cite{Ulrych}, within the Pati-Salam $SU(4)\\times SU(4)$ grand unification theory \\cite{PatiSalam} (The AdS-CFT correspondence\\cite{Maldacena} hints towards the deep connection between gravity and strong force.). However, three serious arguments count against the dual composability approach: the von Newman uniqueness property of quantum mechanics based on complex numbers \\cite{vonNewman}, the non-commuting geometry framework for the Standard Model using only complex numbers \\cite{Connes}, and the inherent Standard Model properties contained inside quantionic algebra which is also based on elliptical composability. Also it is not clear at this point if quantizing gravity is even possible in a split-complex quantum mechanics. However, the non-uniqueness of the split-complex quantum mechanics may not be that serious of a problem on curved space-time. Even in standard quantum mechanics uniqueness is not absolute since the Unruh effect shows that the number of particles is not globally definable. Standard composability leads to elliptical quantum mechanics, while split-complex composability leads to hyperbolic quantum mechanics. Due to its unbounded nature, hyperbolic quantum mechanics may explain the inflation period before the Big Bang and the current value of the cosmological constant $\\Lambda$. If this were true, then cosmology would run along the lines of Penrose's before the Big Bang ideas \\cite{PenrosePreBB}. Expanding on those ideas, the universe is always dominated by hyperbolic composability, experiences locally a parabolic fluctuation leading to inflation and Big Bang, followed by the normal cosmic evolution. Some of the fundamental constants may incorporate the remnants of the frozen original interaction between hyperbolic and elliptic composability$^{3}$\\footnotetext[3]{Are all of the Standard Model physical constants derivable from fundamental principles? Some of the constants have already been ``derived'' from some mathematical arguments, but without a complete theory it is easy to dismiss them as ``numerology''. Grgin makes two arguments in favor of deriving the constants. First, algebraic methods are more powerful than traditional gauge theory methods which have only the power of dimensional analysis. Second, if the constants have some value at some point in space-time why would they not have another value at another point unless there is a mathematical necessity for their value in the first place?}, and our universe may be only one of uncountable other universes, majority of them being uninteresting due to the lack of stable atoms$^{4}$\\footnotetext[4]{Because this argument is similar with the anthropic principle, it should be used only as a speculative philosophical argument at this time. The author believes that anthropic principal can be used, but only after the fact and not to make predictions.}. Later, after all black holes evaporate and all matter decays, the original traces of the elliptical composability are erased and the cycle can start anew. \n\nSpeculation aside, the major quantionic problem is then this: can the unification of gravity and relativity be worked out completely inside quantionic physics, or are we faced with a double unification problem? Either way, quantionic renormalizability, asymptotic freedom, quark charges, split-complex composability, AdS-CFT correspondence, non-commutative geometry, Higgs mechanism, and the $U_q (2)$ symmetries are the major puzzle pieces in constructing a coherent theory of nature. \n\nThe author conjectures that quantionic physics can be proved always renormalizable. It is unclear if we may be facing in fact a double unification problem with the strong force arising out of the necessity of making the two unification approaches compatible. It is important to find out if gravity can be also quantized using split-complex composability, because the answer can decide if split-complex quantum mechanics will ever play a physical role (like the positron solutions from Dirac's equation), or it is just a mathematical dead end.\n\n\\subsection{Axiomatization of physics}\nAfter the Galilean revolution, physics became an experimental science. Now, with quantionic advances in unifying quantum mechanics and relativity, here is the boldest speculation of all: what if nature enjoys uniqueness in the sense that four dimensional space time, general relativity, and quantum mechanics are mandatory consequences of a hypothetical theory of everything? What if all physics can be derived mathematically without the need for experiments in a post Galilean era$^{5}$\\footnotetext[5]{Those speculations are not new and are periodically rediscovered independently in slightly different forms by the people working in the foundational areas which by its very nature forces to consider them. Big caution has to be exercised however because if the speculation is false, its utopian appeal can have very real negative consequences in terms of pursuing other options as the history of the string theory shows \\cite{LeeSmolin}. This is not an argument against string theory per se, since it applies to the quantionic approach as well. It is a warning against abandoning the Galilean era too soon.}? Since G\\\"{o}del's famous incompleteness theorem\\cite{GodelTh}, we know that mathematics is infinite. But how about physics? Is physics axiomatizable? This is not a new question. It was first proposed in 1900 by David Hilbert as problem six of his famous twenty three problems that should define the next century of mathematics \\cite{Hilbert1}. If problem six is solvable, uniqueness results are critical. So far, the author is aware of the following uniqueness results of various strengts: quantions, time, orthogonal groups, and Hilbert space. \n\nWhen considering this problem, one should consider axioms that are not mere technical postulates, like for example the definition of Hilbert space, but principles that will separate the Platonic world of abstract mathematics from the real physical world. One such postulate is the composability principle discussed above. \n\t\nDimensional analysis of Lie groups is a very powerful tool to prove uniqueness, and two important results were obtained in this way. First, in general relativity, if we demand that one needs to support local mathematical structures of infinite complexity (in other words a general ontology), then one necessarily obtains the orthogonal groups \\cite{Rau1}. For ontology to be possible, orthogonal groups are required. Second, if quantum mechanics is defined as a framework of reasoning when hypothesis forms a continuum and the maximum evidence accessible through experiment is not allowed to exceed a finite upper bound, then by dimensional analysis one obtains unitary groups and the Hilbert space \\cite{Rau2}. \n\nOrthogonal groups correspond to real numbers, and if nature were to be described by real numbers only, then EPR's definition of reality would hold: ``If, without in any way disturbing a system, we can predict with certainty $(\\dots)$ the value of a physical quantity, then there exists an element of physical reality corresponding to this physical quantity.\" \\cite{EPRPaper}. Creating a universe using only orthogonal and symplectic groups might not be logically possible. If we add general relativity (a subset of the ways orthogonal and symplectic groups can be coherently combined), then there are three supporting arguments for this conclusion. Recall that in general relativity any object falling inside of a black hole will reach the end of a geodesic line in a finite time, meaning that it will be erased out of existence\/ontology. While solving the information paradox is extremely hard in quantum gravity, information is guaranteed to be lost for sure in the absence of quantum mechanics. Another argument would be the existence of singularities in general relativity, but the Big Bang may have very well started from a singularity, and therefore this is a weak argument. The third argument is the existence of CTCs in general relativity which can easily lead to paradoxes \\cite{RamaSen}. (There are counter arguments along the line of the principle of minimal action which try to eliminate the initial conditions leading to paradoxes \\cite{Carlini1}, but those arguments demand that free will is only an illusion, and more importantly, have unintended unphysical consequences \\cite{Florin2}). Taking the three arguments together, unitary groups (and global hyperbolicity, because unitary groups do not solve the CTC problem) may be a necessity if nature is to be self consistent. This argument for the necessity of unitary groups is incomplete because we need to prove first the necessity of general relativity. General relativity follows from the equivalence principle, and uses mass as a fundamental concept. As a concept, mass lacks a clear mathematical and physical origin and may force us into a circular argument (the spontaneous symmetry breaking origin of mass requires unitary groups).\n\nLet us continue the discussion by proposing two other principles: the deformability principle and the universal truth property principle. \n\nOne principle that should never be used is the anthropic principle because this principle can hide all unsolved problems in a scientific dishonest way. If we strip away the need for the existence of our universe as is, we are left with requiring just its existence (ontology must be present, but we do not specify how), and then this is a scientifically valid principle (falsifiable). To avoid confusion and to keep it consistent with the original paper where it was first mentioned \\cite{Rau1}, this principle should be called the ``deformability'' principle. In the original paper context, deformability meant that the local physical structure was allowed to vary freely which corresponded to the requirement that arbitrary matter distributions should be allowed. Expressed in terms free of general relativity concepts, this principle demands the support of local mathematical structures of infinite complexity which in turn imply the existence of orthogonal groups of arbitrary signature $SO(p,q)$. The existence of time, or the transition from $SO(p,q)$ to $SO(1, n-1)$ requires yet another principle: the universal truth property.\n\nIn general in mathematics, the truth value of a statement depends on the context. For example, the statement that two parallel lines never meet is true in Euclidean geometry, and false on Riemannian geometry. The mathematical meaning of truth is coded by the Tarski theorem \\cite{Tarski1} which roughly states that inside an axiomatic system, one cannot define the truth value of its own predicates. Thus, in mathematics, truth means that something is derived from axioms, while in the physical world truth is usually defined as something corresponding to reality and has a ubiquitous non-trivial (but easily overlooked) universal property. In physics, events occurring on the four dimensional event manifold are true for all observers and across all contexts. This is a remarkable property that can be shown to lead to the necessity of time as the only way to avoid self-referencing paradoxes via the Liar's paradox \\cite{Florin1}.\n\nThe original inspiration for this result was G\\\"{o}del's incompleteness of arithmetic theorem, but this theorem was not used directly due to the continuous nature of the event manifold. The incompleteness theorem shows that mathematics in infinite in the sense that, at least in some cases, one can always find a new statement (or in the mathematical terminology a predicate) $p$, which cannot be proved or disproved within the existing axiomatic system. If the predicate is then added as a new axiom, the process can be repeated again in the extended axiomatic system. Since the new axiom can be added as either ($p$) or (not $p$), the process generates two new incompatible axiomatic systems. This process shows that the outside space and time Platonic world of mathematics is not only infinite, but also filled with contradictory axiomatic systems that cannot be organized into a coherent system. \n\nIn the physical world however (which share at least the same complexity as the mathematical world since we can discover the mathematical axioms), the universal truth property (or equivalently global consistency) leads to a constraint which manifests itself as global hyperbolicity, or time.\n\nG\\\"{o}del's proof can be translated almost one to one into the time proof theorem with only one twist at the end because event manifolds are ``decidable\" on the account of the universal truth property. Event manifolds use real numbers which have a complete axiomatization and the difficult part was solving the conceptual problem of finding a mapping between the two domains: formalized arithmetic and continuous event manifolds. The conceptual mapping is as follows: predicate $\\leftrightarrow$ event, proof $\\leftrightarrow$ affine transformation, g\\\"{o}delization of proofs (assigning numbers to proofs) $\\leftrightarrow$ parameterization of initial conditions, diagonal argument $\\leftrightarrow$ self-interaction. Once the conceptual problem is solved (which in hindsight looks obvious and trivial, but was particularly hard to discover in the first place), straightforward technical details follow easily. In the end of the proof, unlike G\\\"{o}del, we are forced to conclude that we do have inconsistency instead of incompleteness for non-globally hyperbolic event manifolds. Only globally hyperbolic event manifolds save the physical world from contradictions.\n\nThe proof can also be expressed in terms of time travel and CTC spaces. Here the Liar's paradox is typically presented as the grandfather paradox when one goes back in time to kill his own grandfather and thus is preventing his own birth. Proponents of time travel use quantum (and sometimes classical) mechanics as a justification for Novikov's principle \\cite{Novikov} which forbids all self-referencing paradoxes. The justification is that there are no self-referencing paradoxes in classical or quantum mechanics (see also the no clone theorem of quantum mechanics \\cite{Zizzi1}) due to the symplectic structure contained in both of them. (In phase space for example, closed self-consistent evolution loops do exist.) However, analyzing its consequences, this principle leads to infinities \\cite{Florin2}. G\\\"{o}del's incompleteness proof relies on multiplication to perform the `` g\\\"{o}delization of proofs'', while the necessity of time proof relies on the ability to have initial conditions leading to paradoxes (or the ability to extend the local parameterization of the initial conditions into globally inconsistent self-interaction). To the extent that this is not possible due to the symplectic structure of either classical or quantum mechanics, the infinities mentioned above play a critical role in completing the proof$^{6}$\\footnotetext[6]{The proof of the necessity of time did not originate from an attempt to prove the impossibility of time travel, but it naturally led there. On this (easier) track, all that remained to be proven was the rejection of Novikov's principle. Since this principle is sometimes understood as a tautology, the way to reject it was not to prove it wrong directly, but to analyze its consequences \\cite{Florin2}.}. The impossibility of CTC spaces does not provide a mechanism for which the creation of wormholes or CTC spaces is impossible. This remains an important open problem to be solved under a complete unified theory.\n\nThe last (conceptual) problems in a hypothetical theory of everything are the problem of free will and the total information of the universe. In a totally deterministic universe, free will is just an illusion while in a completely chaotic universe, there is no controllability and hence free will does not exist as well. Chaitin's algorithmic information theory shows that ``if one has ten pounds of axioms and a twenty-pound theorem, then that theorem cannot be derived from those axioms'' as Chaitin puts it \\cite{Chaitin}. So why do we have free will and how come we can discover the infinite world of mathematics if physics is truly axiomatizable using only a handful of axioms? \n\nThe composability principle may provide the answer to both of those questions. Here are the extremely high level heuristic arguments. \n\nFirst, free will is equivalent with the ability to set the orientation of physics detectors \\cite{Conway1} (for example the spin orientation of an electron does not have a definite value before measurement) which corresponds to the ability to split a composed system (or generators and observables for a single particle) into sub-systems in an arbitrary fashion. \n\nSecond, in terms of information, quantum mechanics is equivalent with having a continuous set of possibilities and only a finite set of answers. The total information of a system able to be decomposed in an infinite number of ways is infinite and this is why our infinitely complex universe can exist and mathematicians can continue to discover new mathematical axioms. As a speculation, in conjunction to this, hyperbolic quantum mechanics and Penrose's before the Big Bang ideas may explain the low entropy at the time of the Big Bang and the arrow of time.\n\n\\subsection{Uniqueness of structural unification}\nStructural unification of non-relativistic quantum mechanics and relativity requires changes on either quantum mechanics or relativity. Quantions take the algebraic route of changing quantum mechanics by removing the unnecessary division property. On the geometric route, adding division to relativity is impossible because it would either contradict the experimental evidence (a Galilean era argument), or will violate the universal truth property (a post-Galilean era argument). Therefore the geometric route to structural unification is not allowed. Only within the boundaries of the more ``complicated'' Hilbert space (relative to the Lorenz metric signature), the non-relativistic quantum mechanics can be changed (see Eq.~\\ref{newNumberSystem} and its similarity with complex numbers). Another route on the geometric track is the conformal compactification of the Minkowski space (using the $SO(2,4)$ group). In this case, a null ray is mapped to a point, and a point on the Minkowski space becomes a Riemann sphere transforming geometry into complex numbers. As seen from quantionic research, the $SO(2,4)$ group expands the quantionic centralizer, and while the twistor space possesses shared relativity and quantum mechanics characteristics, this unification goes outside the regular Hilbert space and special relativity. In the original derivation of quantions \\cite{foundationPaper2} a mistake was uncovered \\cite{BertramCommunication} and as a result the strength of the uniqueness result was weakened. The only open problem at this point is to seek a stronger proof of uniqueness of structural unification which would include large dimensionality and this is currently under active research. \n\n\\subsection{Discussion summary}\nQuantionic research is the latest attempt in constructing a unified physics theory. The $SU(3)$ symmetry research effort in the quantionic program is the most active area right now and holds the promise of unexpected new insights. In particular, it may lead to the correct grand unification theory (GUT), if nature does indeed have one. Second quantization, renormalizability, nonlinear self-interaction, spontaneous symmetry breaking, the links with non-commuting geometry, canonical quantum gravity, and string theory are not yet researched in this new approach. If composability is to be taken seriously$^{7}$\\footnotetext[7]{Elliptic quantum mechanics and parabolic classical mechanics are real phenomena. Is hyperbolic quantum mechanics a real phenomenon as well?} and physics is axiomatizable, then either hyperbolic quantum mechanics is a real physical phenomenon which may explain the positive cosmological constant, or some other fundamental principle is yet to be discovered. \n\nIn the end, following the mathematics will lead us into the right direction, but at this point, the author offers the following conjectures: quantionic physics will be proved always renormalizable$^{8}$\\footnotetext[8]{Because gravity is not renormalizable, this may force us to consider a dual unification problem.}. The second (not so novel) conjecture is that physics is axiomatizable and Hilbert's sixth problem is completely solvable. Three new physics principles which clearly separate the real physical world from the Platonic world of mathematics were identified so far: composability from quantionic research, deformability principle from Lie group dimensional analysis, and the universal truth property from the impossibility of closed timelike curves. In addition to the Yang-Mills local gauge symmetry principle, they may completely explain the Standard Model and may possibly lead to GUT and quantum gravity$^{9}$\\footnotetext[9]{It is unclear if Hamilton's principle is really needed since Dirac's equation can be obtained directly from quantions. In a field theory context we may be forced to start from a Lagrangian.}.\n\nLacking the critical mass evidence to make it a conjecture but presented as a speculation backed by indirect supporting results, split-complex hyperbolic quantum mechanics may get to play an actual physical role in the gravity unification problem and in the strong force. Two core results are needed to settle this question: research the hyperbolic equivalent of quantions, and the feasibility of using split-complex numbers into a modified diffeomorphism invariant quantum gravity theory.\n\n\\section{Acknowledgments} \nI would like to thank Emile Grgin for countless enlightening, stimulating, and enjoyable discussions. I would also like to thank Hrvoje Nikolic for introducing me to the quantionic research results and for suggesting to write this paper in the first place.\n","meta":{"redpajama_set_name":"RedPajamaArXiv"}} +{"text":"\\section{Introduction}\nImage restoration refers to a class of ill-posed inverse problems recovering unknown images from their degraded observations (e.g., noisy, blurred or down-sampled). It is well known image prior (a.k.a. regularization) plays an important role in the development of solution algorithms to ill-posed image restoration problems. Depending on the availability of training data, one can obtain image prior by either model-based or learning-based approaches. In model-based approaches, image prior is obtained by mathematical construction of a penalty functional (e.g., total-variation or sparse coding) and its parameters have to be \\emph{intrinsically} estimated from the observation data; in learning-based approaches, image prior is leveraged \\emph{externally} from training data - e.g., a deep convolutional neural network is trained to learn the mapping from the space of degraded images to that of restored ones. We will briefly review the key advances within each paradigm in the past decade, which serves as the motivation for developing a hybrid (internal+external) prior in this work.\n\nIn model-based approaches, sparse coding and its variations are likely to be the most studied in the literature \\cite{yu2010image,marquina2008image,yang2010image,yu2012solving,dong2011image,dong2011centralized,dong2013nonlocally,timofte2014a+,timofte2016seven,dong2016image,osendorfer2014image,wang2015deep,kim2016accurate,kim2016deeply,egiazarian2015single}.\nThe basic idea behind sparse coding is that natural images admit sparse representations in a transformed space. Early works in sparse coding have focused on the characterization of localized structures or transient events in natural images; to obtain basis functions with good localization properties in both spatial and frequency domains, one can either construct them through mathematical design (e.g., wavelet \\cite{mallat1999wavelet}) or learn them from training data (e.g., dictionary learning \\cite{mairal2009online}). Later on the importance of exploiting nonlocal similarity in natural images (e.g., self-repeating patterns in textured regions) was recognized in a flurry of so-called simultaneous sparse coding works including BM3D \\cite{dabov2007image} and LSSC \\cite{mairal2009non} as well as nonlocal sparsity based image restoration \\cite{dong2011image,dong2011centralized,dong2013nonlocally}. Most recently, nonlocal sparsity has been connected with the powerful Gaussian scalar mixture (GSM) model \\cite{portilla2003image} leading to the state-of-the-art performance in image restoration \\cite{dong2015image}.\n\nIn learning-based approaches, deep neural network (DNN) techniques have attracted increasingly more attention and shown significant improvements in various low-level vision applications including superresolution (SR) and restoration \\cite{dong2016image,kim2016accurate,kim2016deeply,wang2015deep,dong2016accelerating,shi2016real}. In \\cite{cui2014deep}, stacked collaborative auto-encoders are used to gradually recover a high-resolution (HR) image layer by layer; in \\cite{osendorfer2014image}, a SR method using predictive convolutional sparse coding and deconvolution network was developed. Multiple convolutional neural network \\cite{dong2016image,kim2016accurate,kim2016deeply} have been proposed to directly learn the nonlinear mapping between low-resolution (LR) and high-resolution (HR) images; and multi-stage trainable nonlinear reaction diffusion network has also been proposed for image restoration \\cite{chen2016trainable}. Moreover, most recent studies have shown that deeper neural network can lead to even better SR performance \\cite{kim2016accurate,kim2016deeply}. However, it should be noted that the DNN approach \\cite{dong2016image,kim2016accurate,kim2016deeply} still performs poorly on some particular sample images (e.g., if certain texture information is absent in the training data). Such mismatch between training and testing data is a fundamental limitation of all learning-based approaches.\n\nOne possible remedy for overcoming the above limitation is to explore somewhere between - i.e., a \\emph{hybrid} approach combining the best of both worlds. Since training data and degraded image respectively contain supplementary (external and internal) prior information, it is natural to combine them for image restoration. The key challenge is how to pursue such a hybrid approach in a principled manner. Inspired by the previous work connecting DNN with sparse coding (e.g., \\cite{gregor2010learning} and \\cite{wang2015deep}), we propose a Structured Analysis Sparse Coding (SASC) framework to jointly exploit the prior in both external and internal sources. Specifically, an external structured sparse prior is learned from training data via a deep convolutional neural network (in a similar way to previous learning-based approaches); meantime another internal structured sparse prior is estimated from the degraded image (similar to previous model-based approaches). Two structured sparse priors will be combined to produce a hybrid prior incorporating the knowledge from both domains. To manage the computational complexity, we have developed a novel framework of implementing hybrid structured sparse coding processes by deep convolutional neural networks. Experimental results have shown that the proposed hybrid image restoration method performs comparably with and often better than the current state-of-the-art techniques.\n\n\\section{Related Work}\n\n\\subsection{Sparse models for image restoration}\n\nGenerally speaking, sparse models can be classified into \\emph{synthesis} models and \\emph{analysis} models \\cite{nam2013cosparse}. Synthesis sparse models assume that image patches can be represented as linear combinations of a few atoms from a dictionary. Let $\\bm{y}=\\textbf{H}\\bm{x}+\\bm{n}$ denote the degraded image, where $\\textbf{H}\\in\\mathbb{R}^{N\\times M}$ is the observation matrix (e.g. blurring and down-sampling) and $\\bm{n}\\in\\mathbb{R}^N$ is the additive Gaussian noise. Then synthesis sparse model based image restoration can be formulated as Eq. (\\ref{Syn_sparse})\n\\begin{small}\n\\begin{equation}\n(\\bm{x}, \\bm{\\alpha}_i)=\\argmin_{\\bm{x}, \\bm{\\alpha}_i} ||\\bm{y}-\\textbf{H}\\bm{x}||_2^2 + \\eta\\sum_i \\{||\\textbf{R}_i\\bm{x}-\\textbf{D}\\bm{\\alpha}_i||_2^2 + \\lambda||\\bm{\\alpha}_i||_1 \\}, \\label{Syn_sparse}\n\\end{equation}\n\\end{small}\nwhere $\\textbf{R}_i$ denote the matrix extracting patches of size $\\sqrt{n}\\times \\sqrt{n}$ at position $i$ and $\\textbf{D}\\in\\mathbb{R}^{n\\times K}$ is the dictionary. The above optimization problem can be solved by alternatively optimizing $\\bm{\\alpha}_i$ and $\\bm{x}$. The $\\ell_1$ norm minimization problem in Eq. (\\ref{Syn_sparse}) requires many iterations and is typically computational expensive.\n\nAlternatively, analysis sparse model (ASC) \\cite{nam2013cosparse} assumes that image patches are sparse in a transform domain- i.e., for a given dictionary $\\textbf{W}\\in\\mathbb{R}^{K\\times n}$ of analysis, $||\\textbf{W}\\bm{x}_i||_0\\ll K$ is sparse. With the ASC model, the unknown image can be recovered by solving\n\\begin{align}(\\bm{x},\\bm{\\alpha}_i) = \\argmin_{\\bm{x}, \\bm{\\alpha}_i} &||\\bm{y}-\\textbf{H}\\bm{x}||_2^2 + \\nonumber\\\\\n&\\eta\\sum_i \\{||\\textbf{W}(\\textbf{R}_i\\bm{x})-\\bm{\\alpha}_i||_2^2 +\\lambda||\\bm{\\alpha}_i||_1 \\}. \\label{Ana_sparse}\n\\end{align}\n\nNote that if image patches are extracted with maximum overlapping along both horizontal and vertical directions, the transformation of each patches can be implemented by the convolution with the set of filters $\\bm{w}_k, k=1,2, \\cdots, K$ with $\\bm{x}$- i.e.,\n\\begin{align}(\\bm{x},\\bm{z}_k) = \\argmin_{\\bm{x}, \\bm{z}_k} &||\\bm{y}-\\textbf{H}\\bm{x}||_2^2 + \\nonumber\\\\\n&\\eta\\sum_{k=1}^K \\{||\\bm{w}_k*\\bm{x}-\\bm{z}_k||_2^2 +\\lambda|| \\bm{z}_k||_1 \\}. \\label{Ana_sparse2}\n\\end{align}\n$z_k$ represents sparse feature map corresponding to filter $w_k$. Compared with the synthesis sparse model, sparse codes or feature maps in Eq. (\\ref{Ana_sparse}) and (\\ref{Ana_sparse2}) can be solved in a closed-form solution, leading to significant reduction in computational complexity.\n\n\\subsection{Connecting sparsity with neural networks}\n\nRecent studies have shown that sparse coding problem can be approximately solved by a neural network \\cite{gregor2010learning}. In \\cite{gregor2010learning}, a feed-forward neural network, which mimics the process of sparse coding, is proposed to approximate the sparse codes $\\bm{\\alpha}_i$ with respect to a given synthesis dictionary $\\textbf{D}$. By joint learning all model parameters from training dataset, good approximation of the underlying sparse codes can be obtained. In \\cite{wang2015deep}, the connection between sparse coding and neural networks has been further extended for the application of image SR. Sparse coding (SC) based neural network is designed to emulate sparse coding based SR process - i.e., sparse codes of LR patches are first approximated by a neural network and then used to reconstruct HR patches with a HR synthesis dictionary. By jointly training all model parameters, SC-based neural network can achieve much better results than conventional SC-based methods. The fruitful connection between sparse coding and neural networks also inspires us to combine them in a more principled manner in this paper.\n\n\\section{Structured analysis sparse coding (SASC) for image restoration}\n\nThe analysis SC model of Eq. (\\ref{Ana_sparse}) and (\\ref{Ana_sparse2}) has the advantage of computational efficiency when compared to the synthesis SC model. However, $\\ell_1$-norm based SC model ignores the correlation among sparse coefficients, leading to unsatisfactory results. Similar to previous works of nonlocal sparsity \\cite{dong2011centralized,dong2013nonlocally}, a structured ASC model for image restoration can be formulated as\n\\begin{align}(\\bm{x},\\bm{z}_k) = \\argmin_{\\bm{x}, \\bm{z}_k} &||\\bm{y}-\\textbf{H}\\bm{x}||_2^2 + \\nonumber\\\\\n&\\eta\\sum_{k=1}^K \\{||\\bm{w}_k*\\bm{x}-\\bm{z}_k||_2^2 + \\lambda||\\bm{z}_k-\\bm{\\mu}_k||_1 \\}, \\label{Stru_ASC}\n\\end{align}\n\nwhere $\\bm{\\mu}_k$ denotes the new nonlocal prior of the feature map (note that when $\\bm{\\mu}_k=\\bm{0}$ the structured ASC model reduces to the conventional ASC model in Eq. (\\ref{Ana_sparse2})). The introduction of $\\bm{\\mu}_k$ to sparse prior has the potential of leading to significant improvement of the estimation of sparse feature map $\\bm{z}_k$, which bridges the two competing approaches (model-based vs. learning-based).\n\nThe objective function of Eq. (\\ref{Stru_ASC}) can be solved by alternatively optimizing $\\bm{x}$ and $\\bm{z}_k$. With fixed feature maps $\\bm{z}_k$, the restored image $\\bm{x}$ can be updated by computing\n\\begin{equation}\n\\bm{x} = (\\textbf{H}^{\\top}\\textbf{H}+\\eta\\sum_k\\textbf{W}_k^{\\top}\\textbf{W}_k)^{-1}(\\textbf{H}^{\\top}\\bm{y} + \\eta\\sum_k\\textbf{W}_k^{\\top}\\bm{z}_k), \\label{Cal_x}\n\\end{equation}\nwhere $\\textbf{W}_k\\bm{x} = \\bm{w}_k*\\bm{x}$ denotes the 2D convolution with filter $\\bm{w}_k$. Since the matrix to be inverted in Eq. (\\ref{Cal_x}) is very large, it is impossible to compute Eq. (\\ref{Cal_x}) directly. Instead, it can be computed by the iterative conjugated gradient (CG) algorithm, which requires many iterations. Here, instead of computing an exact solution of the $\\bm{x}$-subproblem, we propose to update $\\bm{x}$ with a single step of gradient descent of the objective function for an inexact solution, as\n\\begin{equation}\n\\begin{split}\n\\bm{x}^{(t+1)} &= \\bm{x}^{(t)} - \\delta[\\textbf{H}^{\\top}(\\textbf{H}\\bm{x}^{(t)} - \\bm{y}) + \\eta\\sum_k\\textbf{W}_k^{\\top}(\\textbf{W}_k\\bm{x}^{(t)}-\\bm{z}_k)] \\\\\n&=\\textbf{A}\\bm{x}^{(t)} + \\delta\\textbf{H}^{\\top}\\bm{y} + \\delta\\eta\\sum_k\\textbf{W}_k^{\\top}\\bm{z}_k, \\label{Update_x}\n\\end{split}\n\\end{equation}\nwhere $\\textbf{A}=\\textbf{I}-\\delta\\textbf{H}^{\\top}\\textbf{H}-\\delta\\eta\\sum_k\\textbf{W}_k^{\\top}\\textbf{W}_k$, $\\delta$ is the predefined step size, and $\\bm{x}^{(t)}$ denotes the estimate of the whole image $\\bm{x}$ at the $t$-th iteration. As will be shown later, the update of $\\bm{x}^{(t)}$ can be efficiently implemented by convolutional operations. With fixed $\\bm{x}$, the feature maps can be updated via\n\\begin{equation}\n\\bm{z}_k = \\mathcal{S}_{\\lambda\/2}(\\bm{w}_k*\\bm{x} - \\bm{\\mu}_k) + \\bm{\\mu}_k, \\label{Cal_features}\n\\end{equation}\nwhere $\\mathcal{S}_{\\lambda}(\\cdot)$ denotes the soft-thresholding operator with a threshold of $\\lambda$. Now the question boils down tos how to accurately estimate $\\bm{\\mu}_k$. In the following subsections, we propose to learn the structured sparse prior from both training data (external) and the degraded image (internal).\n\n\\subsection{Prior learning from training dataset}\n\nFor a given observation image $\\bm{y}$, we target at learning the feature maps $\\bm{z}_k$ of a desirable restored image $\\bm{x}$ with respect to filters $\\bm{w}_k$. Without the loss of generality, the learning function can be defined as follows\n\\begin{equation}\n\\hat{\\textbf{Z}} = G(\\bm{y}; \\mathbf{\\Theta}),\n\\end{equation}\nwhere $\\hat{\\textbf{Z}}=[\\hat{\\bm{z}}_1,\\hat{\\bm{z}}_2,\\cdots,\\hat{\\bm{z}}_K]$ and $G(\\cdot)$ denotes the learning function parameterized by $\\mathbf{\\Theta}$. Considering the strong representing abilities of convolutional neural networks (CNN), we choose to learn $\\bm{z}_k$ on a deep CNN (DCNN). We have found that directly learning a set of feature maps $\\bm{z}_k$ with respect to $\\bm{w}_k$ is unstable; instead, we propose to first learn the desirable restored image $\\hat{\\bm{x}}$ and then compute the feature maps via $\\hat{\\bm{z}}_k=\\bm{w}_k*\\hat{\\bm{x}}$. Generally speaking, any existing DCNN can be used for an initial estimate of $\\bm{x}$. The architecture of DCNN (as shown in Fig. \\ref{fig:cnn}) is similar to that of \\cite{dong2016image}. However, different from \\cite{dong2016image}, convolution filters of smaller size and more convolution layers are used for better estimation performance. The CNN contains $12$ convolution layer, each of which uses 64 filters sized by $3\\times 3\\times 64$. The last layer uses a single filter of size $3\\times 3$ for reconstruction. A shortcut or skip connection (not shown in the figure) exists from input to output implementing the concept of deep residue learning (similar to \\cite{kim2016deeply}). The objective function of DCNN training can be formulated as\n\\begin{equation}\n\\mathbf{\\Theta} = \\argmin_{\\mathbf{\\Theta}}\\sum_i ||CNN(\\bm{y}_i;\\mathbf{\\Theta}) - \\bm{x}_i||_2^2\n\\end{equation}\nwhere $\\bm{y}_i$ and $\\bm{x}_i$ denotes the observed and target training image pairs and $CNN(\\bm{y}_i;\\mathbf{\\Theta})$ denotes the output of CNN with parameters $\\mathbf{\\Theta}$. All network parameters are optimized through the back-propagation algorithm. After the estimation of $\\bm{x}$, the set of feature maps can be estimated by convoluting it with a set of analysis filters $\\bm{w}_k$- i.e., $\\hat{\\bm{z}_k} = \\bm{w}_k*\\hat{\\bm{x}}, k=1,2,\\cdots,K$.\n\n\\begin{figure}\n\\centering\n \\includegraphics[width=0.8\\linewidth]{.\/Figures\/CNN.png}\n\\caption{The structure of the CNN for prior learning. The CNN contains 11 convolution layer with ReLu nonlinear activation function. For each convolution layer, 64 filters of size $3\\times 3$ are used. The degraded image is fed into the network to get an initial estimate of the original image.}\n\\label{fig:cnn}\n\\end{figure}\n\n\\subsection{Prior learning by exploiting nonlocal self-similarity}\n\nIn addition to externally learning the prior feature maps via CNN, we can also obtain the estimates of $\\bm{z}_k$ from an \\emph{internal} estimate of the target image. Let $\\hat{\\bm{x}}_i$ denote the patch of size $\\sqrt{n}\\times\\sqrt{n}$ extracted at position $i$ from an initial estimate $\\hat{\\bm{x}}$; then sparse codes of $\\hat{\\bm{x}}_i$ can be computed as $\\hat{\\bm{z}}_{i,k} = \\bm{w}_k^{\\top}\\hat{\\bm{x}}_i$. Considering that the natural images contain rich self-repetitive patterns, a better estimate of $\\bm{z}_{i,k}$ can be obtained by a weighted average of the sparse codes over similar patches. Let $\\hat{\\bm{x}}_{i_l}, l=1,2,\\cdots,L$ denote the set of similar patches that are within the first $L$-th closest matches and $G_i={i_1,i_2\\cdots,i_L}$ denote the collection of the positions corresponding to those similar patches. A nonlocal estimate of $\\hat{\\bm{z}}_{i,k}$ can be calculated as\n\\begin{equation}\n\\tilde{\\bm{z}}_{i,k} = \\sum_{l=1}^L w_{i_l}\\bm{w}_k^{\\top}\\hat{\\bm{x}}_{i_l} = \\bm{w}_k^{\\top}\\tilde{\\bm{x}}_{i}, \\label{NL_feature}\n\\end{equation}\nwhere $w_{i_l}=\\frac{1}{c}\\exp(-||\\hat{\\bm{x}}_{i_l}-\\hat{\\bm{x}}_{i}||\/h)$, $c$ is the normalization constant, $h$ is the predefined constant, and $\\tilde{\\bm{x}}_i=\\sum_{l=1}^Lw_{i_l}\\hat{\\bm{x}}_{i_l}$. From Eq. (\\ref{NL_feature}), we can see that a nonlocal estimate of the sparse codes can be obtained by first computing the nonlocal estimate of the target image followed by a 2D convolution with the filters $\\bm{w}_k$.\n\nBy combining the estimate obtained by CNN and nonlocal estimation, an improved hybrid prior of the feature maps can be obtained by\n\\begin{equation}\n\\bm{\\mu}_k = \\delta \\hat{\\bm{z}}_{k} + (1-\\delta)\\tilde{\\bm{z}}_k, \\label{Cal_mu}\n\\end{equation}\nwhere $0<\\delta<1$ is a preselected constant. The overall structured analysis sparse coding (SASC) with prior learning for image restoration is summarized in \\textbf{Algorithm 1}. We note that \\textbf{Algorithm 1} usually requires dozens of iterations for converging to a satisfactory result. Hence, the computational cost of the proposed SASC model is high; meanwhile, the analysis filters $\\bm{w}_k$ used in \\textbf{Algorithm 1} are kept fixed.\nA more computationally efficient implementation is to approximate the proposed SASC model by a deep neural network. Through end-to-end training, we can jointly optimize the parameters $\\eta$, $\\lambda$ and the analysis filters $\\bm{w}_k$ as will be elaborated next.\n\n\\setenumerate[1]{itemsep=0pt,partopsep=0pt,parsep=\\parskip,topsep=5pt}\n\\setitemize[1]{itemsep=0pt,partopsep=0pt,parsep=\\parskip,topsep=5pt}\n\\setdescription{itemsep=0pt,partopsep=0pt,parsep=\\parskip,topsep=5pt}\n\\begin{algorithm}[t]\n\\textbf{Initialization}:\\\n\\begin{itemize}\n\\item[(a)] Set parameters $\\eta$ and $\\lambda$;\n\\item[(b)] Compute the initial estimate $\\hat{\\bm{x}}^{(0)}$ by the CNN;\n\\item[(c)] Group a set of similar patches $G_i$ for each patch $\\hat{\\bm{x}}_{i}$ using $\\hat{\\bm{x}}^{(0)}$;\n\\item[(c)] Compute the prior feature maps $\\bm{\\mu}_k$ using Eq. (\\ref{Cal_mu});\n\\end{itemize}\n\\textbf{Outer loop}: Iteration over $t=1,2,\\cdots,T$\n\\begin{itemize}\n\\item[(a)] Compute the feature maps $\\bm{z}_k^{(t)}, k=1,\\cdots,K$ using Eq. (\\ref{Cal_features});\n\\item[(b)] Update the HR image $\\hat{\\bm{x}}^{(t)}$ via Eq. (\\ref{Update_x});\n\\item[(c)] Update $\\bm{\\mu}_k$ via Eq. (\\ref{Cal_mu}) based on $\\hat{\\bm{x}}^{(t)}$;\n\\end{itemize}\n\\textbf{Output}: $\\bm{x}^{(t)}$.\n\\caption{Image SR with structured ASC}\n\\label{alg:Alg1}\n\\end{algorithm}\n\n\\section{Network implementation of SASC for image restoration}\n\n\\begin{figure*}\n\\centering\n \\includegraphics[width=1\\linewidth]{.\/Figures\/net_s.png}\n\\caption{The structure of the proposed SASC network for image restoration. The whole architecture consists of CNN sub-network and SASC sub-network. Degraded image or intermediate result combine with CNN estimates, feed into multiple SASC recurrent stages to get the final reconstructed image.}\n\\label{Fig:NN_structure}\n\\end{figure*}\n\nThe main architecture for network implementation of SASC is shown in Fig. (\\ref{Fig:NN_structure}), which mimics the iterative steps of \\textbf{Algorithm 1}.\nAs shown in Fig. (\\ref{Fig:NN_structure}), the degraded observation image $\\bm{y}$ goes through the CNN for an initial estimate of the target image, which will then be used for grouping similar patches and computing prior feature maps. Let $G_i$ denote the set of similar patch positions for each exemplar patch $\\hat{\\bm{x}}_i$ (for computational simplicity, $G_i$ will not be updated during the iterative processing).\n\nThe initial estimate obtained via CNN and the set of similar patch positions $G_i$ are then fed into the SASC network that contains $k$ recurrent stages to reconstruct the target image. The SASC network exactly mimics the process of alternatively updating of the feature maps $\\bm{z}_k$ and the HR image $\\bm{x}$ as shown in Eq. (\\ref{Cal_features}) and (\\ref{Update_x}). The degraded image $\\bm{y}$ (after bicubic interpolation if down-sampling is involved) first goes through a convolution layer for sparse feature maps $\\bm{z}_k$, which will then be predicted by the learned prior feature maps $\\bm{\\mu}_k$. The residuals of the predicted feature maps, denoted by $\\bm{r}_k$, will go through a nonlinear soft-thresholding layer. Similar to \\cite{wang2015deep}, we can write the soft-thresholding operator as\n\\begin{equation}\ns_{\\tau_i}(r_i) = \\text{Sign}(r_i)r_i(|r_i|\/\\tau_i-1)_+,\n\\end{equation}\nwhere $\\tau_i$ denotes a tunable threshold. Note that the soft-thresholding layer can be implemented as two linear layers and a unit-threshold layer. After soft-thresholding layer, the learned prior feature maps $\\bm{\\mu}_k$ are added back to the output of soft-thresholding layer. The updated feature maps $\\bm{z}_k$ then go through a reconstruction layer with a set of 2D convolution filters- i.e., $\\sum_k\\textbf{W}_k^{\\top}\\bm{z}_k$. The final output of the reconstruction layer is further added with the preprocessed degraded image- i.e., denoted as $\\textbf{H}^{\\top}\\bm{y}$. Finally, the weighted intermediate result of reconstructed HR image is fed into a linear layer parameterized by matrix $\\textbf{A}$. Note that $\\textbf{A}$ corresponds to the matrix - i.e.,\n\\begin{equation}\n\\textbf{A}=\\textbf{I}-\\delta\\textbf{H}^{\\top}\\textbf{H}-\\delta\\eta\\sum_k\\textbf{W}_k^{\\top}\\textbf{W}_k.\n\\end{equation}\nNote that $\\sum_k(\\textbf{W}_k^{\\top}\\textbf{W}_k\\bm{x})$ can be efficiently computed by first convoluting $\\bm{x}$ with 2D filters and adding up the resulting feature maps - i.e., $\\sum_k(\\bm{w}_k^{\\top}*(\\bm{w}_k*\\bm{x}))$. For typical degradation matrices $\\textbf{H}$, $\\bar{\\textbf{H}}=\\textbf{H}^{\\top}\\textbf{H}$ can also be efficiently computed by convolutional operations. For image denoising, $\\bar{\\textbf{H}}=\\textbf{I}$. For image deblurring, the matrix-vector multiplication $\\textbf{H}^{\\top}(\\textbf{H}\\bm{x})$ can be simply implemented by two convolutional operations. For image super-resolution, we consider two typical downsampling operators, i.e., the Gaussian downsampling and the bicubic downsampling. For Gaussian downsampling, \\textbf{H}=\\textbf{D}\\textbf{B}, where $\\textbf{D}$ and $\\textbf{B}$ denote the downsampling and Gaussian blur matrices, respectively. In this case, $\\bm{y}=\\textbf{H}\\bm{x}$ can be efficiently computed by first convoluting $\\bm{x}$ with the corresponding Gaussian filter followed by subsampling, whereas $\\textbf{H}^{\\top}\\bm{y}$ can also be efficiently computed by first upsampling $\\bm{y}$ with zero-padding followed by convolution with the transposed Gaussian filter. For bicubic downsampling, we simply use the bicubic interpolator function with scaling factor $s$ and $1\/s$ ($s=2,3,4$) to implement $\\textbf{H}^{\\top}\\bm{y}$ and $\\textbf{H}\\bm{x}$, respectively. Note that all convolutional filters and the scale variables involved in the linear layer $\\textbf{A}$ can be discriminately learned through end-to-end training. After going through the linear $\\textbf{A}$, we obtain the reconstructed image; for better performance, such SASC sub-network can be repeated $K$ times.\n\nIn summary, there are totally $5$ trainable layers in each stage of our proposed network: two convolution layers $\\textbf{W}$, one reconstruction layer parameterized with $\\sum_k\\textbf{W}_k^{\\top}\\bm{z}_k$, one nonlinear soft-thresholding layer, and one linear layer $\\textbf{A}$. Parameters at different stages are not same; but the $i$-th stage of diffenent networks share the same weights $W^{(i)}$. Mean square error is used as the cost function to train the network, and the overall objective function is given by\n\\begin{equation}\n\\mathbf{\\Theta} = \\argmin_{\\mathbf{\\Theta}}\\sum_i ||SASC(\\bm{y}_i;\\mathbf{\\Theta}) - \\bm{x}_i||_2^2\n\\end{equation}\nwhere $\\mathbf{\\Theta}$ denotes the set of parameters and $SASC(\\bm{y}_i;\\mathbf{\\Theta})$ denotes the reconstructed image by the network with parameters $\\mathbf{\\Theta}$. To train the network, the ADAM optimizer \\cite{} with setting $\\beta_1=0.9$ and $\\beta_2=0.999$ and $\\epsilon=10^{-8}$ is used.\nNote that to facilitate training, we separately train the CNN network and the SASC network. Some examples of the learned convolution filters are shown in Fig. (\\ref{Fig:filters}).\n\\begin{figure}\n\\centering\n \\includegraphics[width=1\\linewidth]{.\/Figures\/filters.png}\n\\caption{Visualization of some of the learned analysis filters in first SASC stage. It can be infer from this figure that the filters has different responses to the edge features of different directions and frequencies.}\n\\label{Fig:filters}\n\\end{figure}\n\n\\section{Experimental results}\n\nTo verify the performance of the proposed method, several image restoration experiments have been conducted, including denoising, deblurring and super-resolution. In all experiments, we empirically set $K=5$ stages for the proposed SASC network. To gain deeper insight toward the proposed SASC network, we have implemented several variants of the proposed SASC network. The first variant is the analysis sparse coding (ASC) network without CNN and self-similarity prior learning. The second variant of the proposed method is the SASC network with self-similarity prior, which estimate $\\bm{\\mu}_k$ from intermediately recovered HR image (without using CNN sub-network), which is denoted as SASC-SS method. We also present the image restoration results of the CNN sub-network, which consists of 12 convolutional layers with ReLU nonlinearity and $3\\times 3\\times 64$ kernels. The proposed SASC network with CNN and self-similarity prior learning is denoted as SASC-CNN-SS method. To train the networks, we have adopted three training sets: the train400 dataset used in \\cite{Zhang:TIP17} for image denoising\/deblurring, the 91 training images used in \\cite{yang2010image} and the BSD200 dataset for image super-resolution.\n\n\\subsection{Image denoising}\n\nIn our experiment, we have extracted patches of size $40\\times 40$ from the train400 dataset \\cite{Zhang:TIP17} and used argumentation with flip and rotations to generate $6000\\times 128$ patches as the training data. The commonly used $12$ images used in \\cite{BM3D} (as shown in Fig. \\ref{fig:set12}) were used as the test set. The BSD68 dataset was also used as a benchmark dataset. The average PSNR and SSIM results of the variants of the proposed SASC methods on the two sets are shown in Table \\ref{Variants-den}. From Table \\ref{Variants-den}, one can see that by incorporating the nonlocal self-similarity prior, the SASC-SS method outperforms the ASC method; by integrating both CNN (external) and nonlocal self-similarity (internal) priors, the proposed SASC-CNN-SS method further improves the denoising performance. \\emph{Similar observations have also been made for image deblurring and super-resolution}. Due to the limited page spaces, here we only show the comparison studies of the variants of the proposed method for image denoising.\n\nWe have also compared the proposed method with several popular denoising methods including model-based denoising methods (BM3D\\cite{BM3D}, EPLL\\cite{Zoran:ICCV11}, and WNNM \\cite{WNNM}) and two deep learning based methods (TNRD \\cite{TNRD} and DnCNN-S\\cite{Zhang:TIP17}). Table \\ref{den-set12} shows the PSNR results of the competing methods on 12 test images. It can be seen that the proposed method performs much better than other competing methods. Specifically, the proposed method outperforms current state-of-the-art DnCNN-S \\cite{Zhang:TIP17} by up to $0.56dB$ on the average. Parts of the denoised images by different methods are shown in Figs. \\ref{den-Barbara}-\\ref{den-test044}. It can be seen that the proposed method produces better visually pleasant results, as can be clearly observed in the regions of self-repeating patterns (edges and textures).\n\n\n\\begin{figure*}[!tbh]\n\\renewcommand{\\arraystretch}{0.4}\n\\centering\n\\subfigure[]{\n \\includegraphics[width=0.07\\textwidth]{.\/Test_Image\/Set12\/01.png}\n }\\hspace{-0.6em}\n\\subfigure[]{\n \\includegraphics[width=0.07\\textwidth]{.\/Test_Image\/Set12\/02.png}\n }\\hspace{-0.6em}\n\\subfigure[]{\n \\includegraphics[width=0.07\\textwidth]{.\/Test_Image\/Set12\/03.png}\n }\\hspace{-0.6em}\n\\subfigure[]{\n \\includegraphics[width=0.07\\textwidth]{.\/Test_Image\/Set12\/04.png}\n }\\hspace{-0.6em}\n\\subfigure[]{\n \\includegraphics[width=0.07\\textwidth]{.\/Test_Image\/Set12\/05.png}\n }\\hspace{-0.6em}\n\\subfigure[]{\n \\includegraphics[width=0.07\\textwidth]{.\/Test_Image\/Set12\/06.png}\n }\\hspace{-0.6em}\n\\subfigure[]{\n \\includegraphics[width=0.07\\textwidth]{.\/Test_Image\/Set12\/07.png}\n }\\hspace{-0.6em}\n\\subfigure[]{\n \\includegraphics[width=0.07\\textwidth]{.\/Test_Image\/Set12\/08.png}\n }\\hspace{-0.6em}\n\\subfigure[]{\n \\includegraphics[width=0.07\\textwidth]{.\/Test_Image\/Set12\/09.png}\n }\\hspace{-0.6em}\n\\subfigure[]{\n \\includegraphics[width=0.07\\textwidth]{.\/Test_Image\/Set12\/10.png}\n }\\hspace{-0.6em}\n\\subfigure[]{\n \\includegraphics[width=0.07\\textwidth]{.\/Test_Image\/Set12\/11.png}\n }\\hspace{-0.6em}\n\\subfigure[]{\n \\includegraphics[width=0.07\\textwidth]{.\/Test_Image\/Set12\/12.png}\n }\n \\\\ \\caption{The test images used for image denoising\/deblurring. From left to right: \\textit{C.Man}, \\textit{House}, \\textit{Peppers}, \\textit{Starfish}, \\textit{Monarch}, \\textit{Airplane}, \\textit{Parrot}, \\textit{Lena}, \\textit{Barbara}, \\textit{Boat}, \\textit{Man}, and \\textit{Couple}. }\n\\label{fig:set12}\n\\end{figure*}\n\n\n\\begin{table*}[tbh]\n\\centering\n\\caption{Average PSNR and SSIM results of the variants of the proposed denoising method}\n\\label{Variants-den}\n\\begin{tabular}{!{\\vrule width1.2pt}c!{\\vrule width1.2pt}c!{\\vrule width1.2pt}c\n|c|c|c|c|c|c|c|c|c!{\\vrule width1.2pt}}\n\\Xhline{1.2pt}\n\n\\multirow{2}{*}{} & \\multicolumn{3}{c|}{Set12} & \\multicolumn{3}{c|}{BSD68} \\\\ \\cline{2-7}\n & $\\sigma=15$ & $\\sigma=25$ & $\\sigma=50$ & $\\sigma=15$ & $\\sigma=25$ & $\\sigma=50$ \\\\ \\hline\nASC & \\begin{tabular}[c]{@{}c@{}}32.60\\\\0.8928\\end{tabular} & \\begin{tabular}[c]{@{}c@{}}30.30\\\\0.8470\\end{tabular} & \\begin{tabular}[c]{@{}c@{}}27.01\\\\0.7400\\end{tabular} & \\begin{tabular}[c]{@{}c@{}}31.65\\\\0.8825\\end{tabular} & \\begin{tabular}[c]{@{}c@{}}29.11\\\\0.8097\\end{tabular} & \\begin{tabular}[c]{@{}c@{}}26.01\\\\0.6704\\end{tabular} \\\\ \\hline\n\nSASC-SS & \\begin{tabular}[c]{@{}c@{}}32.98\\\\0.9016\\end{tabular} & \\begin{tabular}[c]{@{}c@{}}30.57\\\\0.8601\\end{tabular} & \\begin{tabular}[c]{@{}c@{}}27.35\\\\0.7669\\end{tabular} & \\begin{tabular}[c]{@{}c@{}}31.88\\\\0.8888\\end{tabular} & \\begin{tabular}[c]{@{}c@{}}29.36\\\\0.8243\\end{tabular} & \\begin{tabular}[c]{@{}c@{}}26.34\\\\0.7006\\end{tabular} \\\\ \\hline\n\nCNN-Prior & \\begin{tabular}[c]{@{}c@{}}32.85\\\\0.8897\\end{tabular} & \\begin{tabular}[c]{@{}c@{}}30.38\\\\0.8394\\end{tabular} & \\begin{tabular}[c]{@{}c@{}}27.24\\\\0.7611\\end{tabular} & \\begin{tabular}[c]{@{}c@{}}31.75\\\\0.8839\\end{tabular} & \\begin{tabular}[c]{@{}c@{}}29.17\\\\0.8115\\end{tabular} & \\begin{tabular}[c]{@{}c@{}}26.23\\\\0.6924\\end{tabular} \\\\ \\hline\n\nSASC-CNN-SS & \\begin{tabular}[c]{@{}c@{}}33.32\\\\0.9039\\end{tabular} & \\begin{tabular}[c]{@{}c@{}}30.99\\\\0.8673\\end{tabular} & \\begin{tabular}[c]{@{}c@{}}27.69\\\\0.7915\\end{tabular} & \\begin{tabular}[c]{@{}c@{}}32.03\\\\0.8870\\end{tabular} & \\begin{tabular}[c]{@{}c@{}}29.63\\\\0.8289\\end{tabular} & \\begin{tabular}[c]{@{}c@{}}26.66\\\\0.7254\\end{tabular} \\\\ \\hline\n\n\\end{tabular}\n\\end{table*}\n\n\n\\begin{table*}[tbh]\n\\centering\n\\caption{Results of proposed denoising method in Set12}\n\\setlength{\\tabcolsep}{4.5pt}\n\\begin{tabular}{!{\\vrule width1pt}l!{\\vrule width1pt}l\n!{\\vrule width1pt}l!{\\vrule width1pt}l!{\\vrule width1pt}l\n!{\\vrule width1pt}l!{\\vrule width1pt}l!{\\vrule width1pt}l\n!{\\vrule width1pt}l!{\\vrule width1pt}l!{\\vrule width1pt}l\n!{\\vrule width1pt}l!{\\vrule width1pt}l!{\\vrule width1pt}l!{\\vrule width1pt}}\\Xhline{1.2pt}\n\n\\multirow{1}{*}{IMAGE} & \\multirow{1}{*}{C.Man} \t& \\multirow{1}{*}{House} \t\n& \\multirow{1}{*}{Peppers} \t\t& \\multirow{1}{*}{Starfish} \t& \\multirow{1}{*}{Monar} \t\t& \\multirow{1}{*}{Airpl} \t& \\multirow{1}{*}{Parrot} \t\t & \\multirow{1}{*}{Lena} \t\t& \\multirow{1}{*}{Barbara} \t\t& \\multirow{1}{*}{Boat} \t\t& \\multirow{1}{*}{Man} \t\t\t& \\multirow{1}{*}{Couple} \t& \\multirow{1}{*}{\\textbf{Avg}} \\\\ \\Xhline{1pt}\n\n\\multirowthead{1}{Noise Lv} & \\multicolumn{13}{c!{\\vrule width1pt}} {$\\sigma=15$}\n\\\\ \\Xhline{1pt}\n\\multirowthead{1}{\\cite{BM3D}} & \\multicolumn{1}{l|}{31.92} & \\multicolumn{1}{l|}{34.94} & \\multicolumn{1}{l|}{32.70} & \\multicolumn{1}{l|}{31.15} & \\multicolumn{1}{l|}{31.86} & \\multicolumn{1}{l|}{31.08} & \\multicolumn{1}{l|}{31.38} & \\multicolumn{1}{l|}{34.27} & \\multicolumn{1}{l|}{33.11} & \\multicolumn{1}{l|}{32.14} & \\multicolumn{1}{l|}{31.93} & \\multicolumn{1}{l!{\\vrule width1pt}}{32.11} & \\multicolumn{1}{l!{\\vrule width1pt}}{32.38} \\\\ \\hline\n\\multirowthead{1}{\\cite{WNNM}} & \\multicolumn{1}{l|}{32.18} & \\multicolumn{1}{l|}{35.15} & \\multicolumn{1}{l|}{32.97} & \\multicolumn{1}{l|}{31.83} & \\multicolumn{1}{l|}{32.72} & \\multicolumn{1}{l|}{31.40} & \\multicolumn{1}{l|}{31.61} & \\multicolumn{1}{l|}{34.38} & \\multicolumn{1}{l|}{33.61} & \\multicolumn{1}{l|}{32.28} & \\multicolumn{1}{l|}{32.12} & \\multicolumn{1}{l!{\\vrule width1pt}}{32.18} & \\multicolumn{1}{l!{\\vrule width1pt}}{32.70} \\\\ \\hline\n\\multirowthead{1}{\\cite{Zoran:ICCV11}} & \\multicolumn{1}{l|}{31.82} & \\multicolumn{1}{l|}{34.14} & \\multicolumn{1}{l|}{32.58} & \\multicolumn{1}{l|}{31.08} & \\multicolumn{1}{l|}{32.03} & \\multicolumn{1}{l|}{31.16} & \\multicolumn{1}{l|}{31.40} & \\multicolumn{1}{l|}{33.87} & \\multicolumn{1}{l|}{31.34} & \\multicolumn{1}{l|}{31.91} & \\multicolumn{1}{l|}{31.97} & \\multicolumn{1}{l!{\\vrule width1pt}}{31.90} & \\multicolumn{1}{l!{\\vrule width1pt}}{32.10} \\\\ \\hline\n\\multirowthead{1}{\\cite{TNRD}} & \\multicolumn{1}{l|}{32.19} & \\multicolumn{1}{l|}{34.55} & \\multicolumn{1}{l|}{33.03} & \\multicolumn{1}{l|}{31.76} & \\multicolumn{1}{l|}{32.57} & \\multicolumn{1}{l|}{31.47} & \\multicolumn{1}{l|}{31.63} & \\multicolumn{1}{l|}{34.25} & \\multicolumn{1}{l|}{32.14} & \\multicolumn{1}{l|}{32.15} & \\multicolumn{1}{l|}{32.24} & \\multicolumn{1}{l!{\\vrule width1pt}}{32.11} & \\multicolumn{1}{l!{\\vrule width1pt}}{32.51} \\\\ \\hline\n\\multirowthead{1}{\\cite{Zhang:TIP17}} & \\multicolumn{1}{l|}{\\textbf{32.62}} & \\multicolumn{1}{l|}{35.00} & \\multicolumn{1}{l|}{33.29} & \\multicolumn{1}{l|}{32.23} & \\multicolumn{1}{l|}{33.10} & \\multicolumn{1}{l|}{31.70} & \\multicolumn{1}{l|}{31.84} & \\multicolumn{1}{l|}{34.63} & \\multicolumn{1}{l|}{32.65} & \\multicolumn{1}{l|}{32.42} & \\multicolumn{1}{l|}{32.47} & \\multicolumn{1}{l!{\\vrule width1pt}}{32.47} & \\multicolumn{1}{l!{\\vrule width1pt}}{32.87} \\\\ \\hline\n\n\\multirowthead{1}{\\textbf{Ours}} & \\multicolumn{1}{l|}{32.16} & \\multicolumn{1}{l|}{\\textbf{35.51}} & \\multicolumn{1}{l|}{\\textbf{33.87}} & \\multicolumn{1}{l|}{\\textbf{32.67}} & \\multicolumn{1}{l|}{\\textbf{33.30}} & \\multicolumn{1}{l|}{\\textbf{31.98}} & \\multicolumn{1}{l|}{\\textbf{32.21}} & \\multicolumn{1}{l|}{\\textbf{35.19}} & \\multicolumn{1}{l|}{\\textbf{33.92}} & \\multicolumn{1}{l|}{\\textbf{32.99}} & \\multicolumn{1}{l|}{\\textbf{32.93}} & \\multicolumn{1}{l!{\\vrule width1pt}}{\\textbf{33.08}} & \\multicolumn{1}{l!{\\vrule width1pt}}{\\textbf{33.31}} \\\\ \\Xhline{1pt}\n\n\\multirowthead{1}{Noise Lv} & \\multicolumn{13}{c!{\\vrule width1pt}}{$\\sigma=25$} \\\\ \\Xhline{1pt}\n\\multirowthead{1}{\\cite{BM3D}} & \\multicolumn{1}{l|}{29.45} & \\multicolumn{1}{l|}{32.86} & \\multicolumn{1}{l|}{30.16} & \\multicolumn{1}{l|}{28.56} & \\multicolumn{1}{l|}{29.25} & \\multicolumn{1}{l|}{28.43} & \\multicolumn{1}{l|}{28.93} & \\multicolumn{1}{l|}{32.08} & \\multicolumn{1}{l|}{30.72} & \\multicolumn{1}{l|}{29.91} & \\multicolumn{1}{l|}{29.62} & \\multicolumn{1}{l!{\\vrule width1pt}}{29.72} & \\multicolumn{1}{l!{\\vrule width1pt}}{29.98} \\\\ \\hline\n\\multirowthead{1}{\\cite{WNNM}} & \\multicolumn{1}{l|}{29.64} & \\multicolumn{1}{l|}{33.23} & \\multicolumn{1}{l|}{30.40} & \\multicolumn{1}{l|}{29.03} & \\multicolumn{1}{l|}{29.85} & \\multicolumn{1}{l|}{28.69} & \\multicolumn{1}{l|}{29.12} & \\multicolumn{1}{l|}{32.24} & \\multicolumn{1}{l|}{31.24} & \\multicolumn{1}{l|}{30.03} & \\multicolumn{1}{l|}{29.77} & \\multicolumn{1}{l!{\\vrule width1pt}}{29.82} & \\multicolumn{1}{l!{\\vrule width1pt}}{30.26} \\\\ \\hline\n\\multirowthead{1}{\\cite{Zoran:ICCV11}} & \\multicolumn{1}{l|}{29.24} & \\multicolumn{1}{l|}{32.04} & \\multicolumn{1}{l|}{30.07} & \\multicolumn{1}{l|}{28.43} & \\multicolumn{1}{l|}{29.30} & \\multicolumn{1}{l|}{28.56} & \\multicolumn{1}{l|}{28.91} & \\multicolumn{1}{l|}{31.62} & \\multicolumn{1}{l|}{28.55} & \\multicolumn{1}{l|}{29.69} & \\multicolumn{1}{l|}{29.63} & \\multicolumn{1}{l!{\\vrule width1pt}}{29.48} & \\multicolumn{1}{l!{\\vrule width1pt}}{29.63} \\\\ \\hline\n\\multirowthead{1}{\\cite{TNRD}} & \\multicolumn{1}{l|}{29.71} & \\multicolumn{1}{l|}{32.54} & \\multicolumn{1}{l|}{30.55} & \\multicolumn{1}{l|}{29.02} & \\multicolumn{1}{l|}{29.86} & \\multicolumn{1}{l|}{28.89} & \\multicolumn{1}{l|}{29.18} & \\multicolumn{1}{l|}{32.00} & \\multicolumn{1}{l|}{29.41} & \\multicolumn{1}{l|}{29.92} & \\multicolumn{1}{l|}{29.88} & \\multicolumn{1}{l!{\\vrule width1pt}}{29.71} & \\multicolumn{1}{l!{\\vrule width1pt}}{30.06} \\\\ \\hline\n\\multirowthead{1}{\\cite{Zhang:TIP17}} & \\multicolumn{1}{l|}{\\textbf{30.19}} & \\multicolumn{1}{l|}{33.09} & \\multicolumn{1}{l|}{30.85} & \\multicolumn{1}{l|}{29.40} & \\multicolumn{1}{l|}{30.23} & \\multicolumn{1}{l|}{29.13} & \\multicolumn{1}{l|}{29.42} & \\multicolumn{1}{l|}{32.45} & \\multicolumn{1}{l|}{30.01} & \\multicolumn{1}{l|}{30.22} & \\multicolumn{1}{l|}{30.11} & \\multicolumn{1}{l!{\\vrule width1pt}}{30.12} & \\multicolumn{1}{l!{\\vrule width1pt}}{30.43} \\\\ \\hline\n\n\n\\multirowthead{1}{\\textbf{Ours}} & \\multicolumn{1}{l|}{29.82} & \\multicolumn{1}{l|}{\\textbf{33.82}} & \\multicolumn{1}{l|}{\\textbf{31.47}} & \\multicolumn{1}{l|}{\\textbf{30.10}} & \\multicolumn{1}{l|}{\\textbf{30.67}} & \\multicolumn{1}{l|}{\\textbf{29.50}} & \\multicolumn{1}{l|}{\\textbf{29.87}} & \\multicolumn{1}{l|}{\\textbf{33.09}} & \\multicolumn{1}{l|}{\\textbf{31.32}} & \\multicolumn{1}{l|}{\\textbf{30.86}} & \\multicolumn{1}{l|}{\\textbf{30.64}} & \\multicolumn{1}{l!{\\vrule width1pt}}{\\textbf{30.77}} & \\multicolumn{1}{l!{\\vrule width1pt}}{\\textbf{30.99}} \\\\ \\Xhline{1pt}\n\n\\multirowthead{1}{Noise Lv} & \\multicolumn{13}{c!{\\vrule width1pt}}{$\\sigma=50$} \\\\ \\Xhline{1pt}\n\\multirowthead{1}{\\cite{BM3D}} & \\multicolumn{1}{l|}{26.13} & \\multicolumn{1}{l|}{29.69} & \\multicolumn{1}{l|}{26.68} & \\multicolumn{1}{l|}{25.04} & \\multicolumn{1}{l|}{25.82} & \\multicolumn{1}{l|}{25.10} & \\multicolumn{1}{l|}{25.90} & \\multicolumn{1}{l|}{29.05} & \\multicolumn{1}{l|}{27.23} & \\multicolumn{1}{l|}{26.78} & \\multicolumn{1}{l|}{26.81} & \\multicolumn{1}{l!{\\vrule width1pt}}{26.46} & \\multicolumn{1}{l!{\\vrule width1pt}}{26.73} \\\\ \\hline\n\\multirowthead{1}{\\cite{WNNM}} & \\multicolumn{1}{l|}{26.42} & \\multicolumn{1}{l|}{30.33} & \\multicolumn{1}{l|}{26.91} & \\multicolumn{1}{l|}{25.43} & \\multicolumn{1}{l|}{26.32} & \\multicolumn{1}{l|}{25.42} & \\multicolumn{1}{l|}{26.09} & \\multicolumn{1}{l|}{29.25} & \\multicolumn{1}{l|}{\\textbf{27.79}} & \\multicolumn{1}{l|}{26.97} & \\multicolumn{1}{l|}{26.94} & \\multicolumn{1}{l!{\\vrule width1pt}}{26.64} & \\multicolumn{1}{l!{\\vrule width1pt}}{27.04} \\\\ \\hline\n\\multirowthead{1}{\\cite{Zoran:ICCV11}} & \\multicolumn{1}{l|}{26.02} & \\multicolumn{1}{l|}{28.76} & \\multicolumn{1}{l|}{26.63} & \\multicolumn{1}{l|}{25.04} & \\multicolumn{1}{l|}{25.78} & \\multicolumn{1}{l|}{25.24} & \\multicolumn{1}{l|}{25.84} & \\multicolumn{1}{l|}{28.43} & \\multicolumn{1}{l|}{24.82} & \\multicolumn{1}{l|}{26.65} & \\multicolumn{1}{l|}{26.72} & \\multicolumn{1}{l!{\\vrule width1pt}}{26.24} & \\multicolumn{1}{l!{\\vrule width1pt}}{26.35} \\\\ \\hline\n\\multirowthead{1}{\\cite{TNRD}} & \\multicolumn{1}{l|}{26.62} & \\multicolumn{1}{l|}{29.48} & \\multicolumn{1}{l|}{27.10} & \\multicolumn{1}{l|}{25.42} & \\multicolumn{1}{l|}{26.31} & \\multicolumn{1}{l|}{25.59} & \\multicolumn{1}{l|}{26.16} & \\multicolumn{1}{l|}{28.93} & \\multicolumn{1}{l|}{25.70} & \\multicolumn{1}{l|}{26.94} & \\multicolumn{1}{l|}{26.98} & \\multicolumn{1}{l!{\\vrule width1pt}}{26.50} & \\multicolumn{1}{l!{\\vrule width1pt}}{26.81} \\\\ \\hline\n\\multirowthead{1}{\\cite{Zhang:TIP17}} & \\multicolumn{1}{l|}{\\textbf{27.00}} & \\multicolumn{1}{l|}{30.02} & \\multicolumn{1}{l|}{27.29} & \\multicolumn{1}{l|}{25.70} & \\multicolumn{1}{l|}{26.77} & \\multicolumn{1}{l|}{25.87} & \\multicolumn{1}{l|}{26.48} & \\multicolumn{1}{l|}{29.37} & \\multicolumn{1}{l|}{26.23} & \\multicolumn{1}{l|}{27.19} & \\multicolumn{1}{l|}{27.24} & \\multicolumn{1}{l!{\\vrule width1pt}}{26.90} & \\multicolumn{1}{l!{\\vrule width1pt}}{27.17} \\\\ \\hline\n\n\n\\multirowthead{1}{\\textbf{Ours}} & \\multicolumn{1}{l|}{26.90} & \\multicolumn{1}{l|}{\\textbf{30.50}} & \\multicolumn{1}{l|}{\\textbf{27.89}} & \\multicolumn{1}{l|}{\\textbf{26.46}} & \\multicolumn{1}{l|}{\\textbf{27.37}} & \\multicolumn{1}{l|}{\\textbf{26.35}} & \\multicolumn{1}{l|}{\\textbf{26.96}} & \\multicolumn{1}{l|}{\\textbf{29.87}} & \\multicolumn{1}{l|}{27.17} & \\multicolumn{1}{l|}{\\textbf{27.74}} & \\multicolumn{1}{l|}{\\textbf{27.67} } & \\multicolumn{1}{l!{\\vrule width1pt}}{\\textbf{27.41}} & \\multicolumn{1}{l!{\\vrule width1pt}}{\\textbf{27.69}} \\\\ \\Xhline{1pt}\n\\end{tabular}\n\\label{den-set12}\n\\end{table*}\n\n\n\\begin{figure*}[!tbh]\n\\renewcommand{\\arraystretch}{0.4}\n\\centering\n\\subfigure[]{\n \\includegraphics[width=0.28\\textwidth]{.\/Figures\/sf\/ori_both.png}\n }\\hspace{-0.8em}\n\\subfigure[]{\n \\includegraphics[width=0.28\\textwidth]{.\/Figures\/sf\/bm3d_zoomed.png}\n }\\hspace{-0.8em}\n\\subfigure[]{\n \\includegraphics[width=0.28\\textwidth]{.\/Figures\/sf\/wnnm_zoomed.png}\n }\\hspace{-0.8em}\n\\subfigure[]{\n \\includegraphics[width=0.28\\textwidth]{.\/Figures\/sf\/tnrd_zoomed.png}\n }\\hspace{-0.8em}\n\\subfigure[]{\n \\includegraphics[width=0.28\\textwidth]{.\/Figures\/sf\/dncnn_zoomed.png}\n }\\hspace{-0.8em}\n\\subfigure[]{\n \\includegraphics[width=0.28\\textwidth]{.\/Figures\/sf\/ours_zoomed.png}\n }\\hspace{-0.8em}\n\n \\caption{Denoising results of noise level of 50. (a) Parts of the original images of ``starfish'' in Set12; (b) BM3D\\cite{BM3D}(PSNR=25.04dB); (c) WNNM\\cite{WNNM}(PSNR=25.43dB); (d) TNRD\\cite{TNRD}(PSNR=25.42dB);(e) DnCNN\\cite{Zhang:TIP17}(PSNR=25.70dB); (f) \\bf{Proposed method(PSNR=26.46dB)}}\n\\label{den-Barbara}\n\\end{figure*}\n\n\\begin{figure*}[!tbh]\n\\renewcommand{\\arraystretch}{0.4}\n\\centering\n\n\\subfigure[]{\n \\includegraphics[width=0.28\\textwidth]{.\/Figures\/mo\/ori_both.png}\n }\\hspace{-0.8em}\n\\subfigure[]{\n \\includegraphics[width=0.28\\textwidth]{.\/Figures\/mo\/bm3d_zoomed.png}\n }\\hspace{-0.8em}\n\\subfigure[]{\n \\includegraphics[width=0.28\\textwidth]{.\/Figures\/mo\/wnnm_zoomed.png}\n }\\hspace{-0.8em}\n\\subfigure[]{\n \\includegraphics[width=0.28\\textwidth]{.\/Figures\/mo\/tnrd_zoomed.png}\n }\\hspace{-0.8em}\n\\subfigure[]{\n \\includegraphics[width=0.28\\textwidth]{.\/Figures\/mo\/dncnn_zoomed.png}\n }\\hspace{-0.8em}\n\\subfigure[]{\n \\includegraphics[width=0.28\\textwidth]{.\/Figures\/mo\/ours_zoomed.png}\n }\\hspace{-0.8em}\n\n \\caption{Denoising results of noise level of 50. (a) Parts of the original images of ``monarch'' in Set12; (b) BM3D\\cite{BM3D}(PSNR=25.82dB); (c) WNNM\\cite{WNNM}(PSNR=26.32dB); (d) TNRD\\cite{TNRD}(PSNR=26.31dB);(e) DnCNN\\cite{Zhang:TIP17}(PSNR=26.77dB); (f) \\bf{Proposed method(PSNR=27.37dB)}}\n\\end{figure*}\n\n\n\\begin{figure*}[!tbh]\n\\renewcommand{\\arraystretch}{0.4}\n\\centering\n\n\\subfigure[]{\n \\includegraphics[width=0.18\\textwidth]{.\/Figures\/denoise_44_50\/ori_both.png}\n }\\hspace{-0.8em}\n\\subfigure[]{\n \\includegraphics[width=0.18\\textwidth]{.\/Figures\/denoise_44_50\/bm3d.png}\n }\\hspace{-0.8em}\n\\subfigure[]{\n \\includegraphics[width=0.18\\textwidth]{.\/Figures\/denoise_44_50\/tnrd.png}\n }\\hspace{-0.8em}\n\\subfigure[]{\n \\includegraphics[width=0.18\\textwidth]{.\/Figures\/denoise_44_50\/dncnn.png}\n }\\hspace{-0.8em}\n\\subfigure[]{\n \\includegraphics[width=0.18\\textwidth]{.\/Figures\/denoise_44_50\/ours.png}\n }\\hspace{-0.8em}\n\n \\caption{Denoising results of noise level of 50. (a) Parts of the original images of ``test044'' in BSD68; (b) BM3D\\cite{BM3D}(PSNR=23.65dB); (c) TNRD\\cite{TNRD}(PSNR=24.05dB);(d) DnCNN\\cite{Zhang:TIP17}(PSNR=24.35dB); (e)\\textbf{Proposed method(PSNR=24.89dB)}}\n\\label{den-test044}\n\\end{figure*}\n\n\n\\begin{table*}[tbh]\n\\centering\n\\caption{Results of different deblurring methods}\n\\begin{tabular}{!{\\vrule width1pt}c!{\\vrule width1pt}c!{\\vrule width1pt}c|c|c|c|c|c|c|c|c|c|c!{\\vrule width1pt}}\n\\Xhline{1pt}\nMethods & $\\sigma$ & Butt. & Pepp. & Parr. & star. & Barb. & Boats & C.Man & House & Leaves & Lena & Avg \\\\ \\Xhline{1pt}\n\n\\multicolumn{13}{!{\\vrule width1pt}c!{\\vrule width1pt}}{Kenel 1 (19*19)} \\\\ \\Xhline{1pt}\n\\cite{Zoran:ICCV11} & \\multirow{3}{*}{2.6} & 26.23 & 27.40 & 33.78 & 29.79 & 29.78 & 30.15 & 30.24 & 31.73 & 25.84 & 31.37 & 29.63 \\\\ \\cline{1-1} \\cline{3-13}\n\\cite{Zhang:CVPR17} & & 32.23 & 32.00 & 34.48 & 32.26 & 32.38 & 33.05 & 31.50 & 34.89 & 33.29 & 33.54 & 32.96 \\\\ \\cline{1-1} \\cline{3-13}\n\n\n\\textbf{Ours} & & \\textbf{32.58} & \\textbf{32.36} & \\textbf{34.63} & \\textbf{32.54} & \\textbf{32.52} & \\textbf{33.27} & \\textbf{31.83} & \\textbf{35.03} & \\textbf{33.30} & \\textbf{33.66} & \\textbf{ 33.17 } \\\\ \\Xhline{1pt}\n\\cite{Zoran:ICCV11} & \\multirow{3}{*}{7.7} & 24.27 & 26.15 & 30.01 & 26.81 & 26.95 & 27.72 & 27.37 & 29.89 & 23.81 & 28.69 & 27.17 \\\\ \\cline{1-1} \\cline{3-13}\n\\cite{Zhang:CVPR17} & & 28.51 & 28.88 & \\textbf{31.07 } & 27.86 & \\textbf{28.18} & \\textbf{29.13} & 28.11 & \\textbf{32.03} & \\textbf{28.42} & \\textbf{29.52} & 29.17 \\\\ \\cline{1-1} \\cline{3-13}\n\n\n\\textbf{Ours} & & \\textbf{28.53} & \\textbf{28.88} & 31.06 & \\textbf{27.93} & 28.17 & 29.11 & \\textbf{28.14} & 31.94 & 28.40 & 29.49 & \\textbf{29.17} \\\\ \\Xhline{1pt}\n\\multicolumn{13}{!{\\vrule width1pt}c!{\\vrule width1pt}}{Kenel 2 (17*17)} \\\\ \\Xhline{1pt}\n\\cite{Zoran:ICCV11} & \\multirow{3}{*}{2.6} & 26.48 & 27.37 & 33.88 & 29.56 & 28.29 & 29.61 & 29.66 & 32.97 & 25.69 & 30.67 & 29.42 \\\\ \\cline{1-1} \\cline{3-13}\n\\cite{Zhang:CVPR17} & & 31.97 & 31.89 & 34.46 & 32.18 & 32.00 & 33.06 & 31.29 & 34.82 & 32.96 & 33.35 & 32.80 \\\\ \\cline{1-1} \\cline{3-13}\n\n\n\\textbf{Ours} & & \\textbf{32.22} & \\textbf{32.16} & \\textbf{34.57} & \\textbf{32.36} & \\textbf{32.06} & \\textbf{33.17} & \\textbf{31.52} & \\textbf{34.99} & \\textbf{32.96} & \\textbf{33.41} & \\textbf{32.94} \\\\ \\Xhline{1pt}\n\\cite{Zoran:ICCV11} & \\multirow{3}{*}{7.7} & 23.85 & 26.04 & 29.99 & 26.78 & 25.47 & 27.46 & 26.58 & 30.49 & 23.42 & 28.20 & 26.83 \\\\ \\cline{1-1} \\cline{3-13}\n\\cite{Zhang:CVPR17} & & \\textbf{28.21} & \\textbf{28.71} & \\textbf{30.68} & 27.67 & 27.37 & 28.95 & 27.70 & \\textbf{31.95} & 27.92 & \\textbf{29.27} & 28.84 \\\\ \\cline{1-1} \\cline{3-13}\n\n\n\\textbf{Ours} & & 28.20 & 28.71 & 30.64 & \\textbf{27.73} & \\textbf{27.47} & \\textbf{28.97} & \\textbf{27.71} & 31.86 & \\textbf{27.94} & 29.21 & \\textbf{28.84}\\\\ \\Xhline{1pt}\n\\end{tabular}\n\\label{deblur-set10}\n\\end{table*}\n\n\n\\begin{figure*}[!tbh]\n\\renewcommand{\\arraystretch}{0.4}\n\\centering\n\\subfigure[]{\n \\includegraphics[width=0.22\\textwidth]{.\/Figures\/deblur_house\/ori_both.png}\n }\\hspace{-0.8em}\n\\subfigure[]{\n \\includegraphics[width=0.22\\textwidth]{.\/Figures\/deblur_house\/epll_zoomed.png}\n }\\hspace{-0.8em}\n\\subfigure[]{\n \\includegraphics[width=0.22\\textwidth]{.\/Figures\/deblur_house\/dncnn_zoomed.png}\n }\\hspace{-0.8em}\n\\subfigure[]{\n \\includegraphics[width=0.22\\textwidth]{.\/Figures\/deblur_house\/ours_zoomed.png}\n }\\hspace{-0.8em}\n\n \\caption{Deblurring results at the noise level of 2.55, kernel 1. (a) Parts of the original images of ``house'' in Set10; (b) EPLL\\cite{Zoran:ICCV11}(PSNR=32.13dB); (c) IR-CNN\\cite{Zhang:CVPR17}(PSNR=34.87dB); (d)\\textbf{Proposed method(PSNR=35.53dB)}}\n\\label{fig:deblur1}\n\\end{figure*}\n\n\\begin{figure*}[!tbh]\n\\renewcommand{\\arraystretch}{0.4}\n\\centering\n\\subfigure[]{\n \\includegraphics[width=0.22\\textwidth]{.\/Figures\/deblur_barbara\/ori.png}\n }\\hspace{-0.8em}\n\\subfigure[]{\n \\includegraphics[width=0.22\\textwidth]{.\/Figures\/deblur_barbara\/epll.png}\n }\\hspace{-0.8em}\n\\subfigure[]{\n \\includegraphics[width=0.22\\textwidth]{.\/Figures\/deblur_barbara\/dncnn.png}\n }\\hspace{-0.8em}\n\\subfigure[]{\n \\includegraphics[width=0.22\\textwidth]{.\/Figures\/deblur_barbara\/ours.png}\n }\\hspace{-0.8em}\n\n \\caption{Deblurring results at the noise level of 7.65, kernel 2. (a) Parts of the original images of ``barbara'' in Set10; (b) EPLL\\cite{Zoran:ICCV11}(PSNR=25.70dB); (c) IR-CNN\\cite{Zhang:CVPR17}(PSNR=27.38dB); (d)\\bf{Proposed method(PSNR=27.73dB)}}\n\\label{fig:deblur2}\n\\end{figure*}\n\n\n\\subsection{Image super-resolution}\n\nWith augmentation, $600, 000$ pairs of LR\/HR image patches were extracted from the pair of LR\/HR training images. The LR patch is of size $40\\times 40$ and the HR patch is sized by $40s\\times 40s$; we have trained a separate network for each scaling factor $s$($s=2,3,4$). The commonly used datasets, including Set5, Set14, the BSD100, and the Urban 100 dataset \\cite{kim2016accurate} containing 100 high-quality images were used in our experiments.\nWe have compared the proposed method against several leading deep learning based image SR methods including SRCNN \\cite{SRCNN}, VDSR \\cite{kim2016accurate} and DRCN\\cite{DRCN}, and denoising-based SR methods (i.e. TNRD\\cite{TNRD}). For fair comparisons, the results ofall benchamrk methods are either directly cited from their papers or generated by the codes released by the authors. The PSNR results of these competing methods for the bicubic case are shown in Tables \\ref{SR-set5}-\\ref{SR-others}, from which one can see that the proposed method outperforms other competing methods. Portions of reconstructed HR images by different methods are shown in Figs. \\ref{fig:SR1} and \\ref{fig:SR2}. It can be seen that the proposed method can more faithfully restore fine text details, while other methods including VDSR \\cite{kim2016accurate} fail to deliver the same.\n\n\\begin{table*}[tbh]\n\\centering\n\\caption{Results of proposed super-resolution method in Set5}\n\\setlength{\\tabcolsep}{3.5pt}\n\\begin{tabular}{!{\\vrule width1pt}c|c|c|c|c|c|c!{\\vrule width1pt}}\n\\Xhline{1pt}\nImages & Scale & TNRD\\cite{TNRD} & SRCNN\\cite{SRCNN} & VDSR\\cite{VDSR} & DRCN\\cite{DRCN} & Ours \\\\ \\Xhline{1pt}\nBaby & \\multirow{6}{*}{2} & 38.53 & 38.54 & 38.75 & 38.80 &\\textbf{38.83} \\\\ \\cline{1-1} \\cline{3-7}\nBird & & 41.31 & 40.91 & 42.42 & 42.68 & \\textbf{42.70} \\\\ \\cline{1-1} \\cline{3-7}\nButterfly & & 33.17 & 32.75 & 34.49 & 34.56 & \\textbf{34.72} \\\\ \\cline{1-1} \\cline{3-7}\nHead & & 35.75 & 35.72 & 35.93 & 35.95 & \\textbf{35.96} \\\\ \\cline{1-1} \\cline{3-7}\nWoman & & 35.50 & 35.37 & 36.05 & 36.15 & \\textbf{36.33} \\\\ \\Xcline{1-1}{1.0pt}\n\\Xcline{3-7} {1pt}\n\\textbf{Average} & & 36.85 & 36.66 & 37.53 & 37.63 & \\textbf{37.71} \\\\ \\Xhline{1pt}\nBaby & \\multirow{6}{*}{3} & 35.28 & 35.25 & 35.38 & 35.50 & \\textbf{35.56} \\\\ \\cline{1-1} \\cline{3-7}\nBird & & 36.09 & 35.48 & 36.66 & 37.05 & \\textbf{37.20} \\\\ \\cline{1-1} \\cline{3-7}\nButterfly & & 28.92 & 27.95 & 29.96 & 30.03 & \\textbf{30.23} \\\\ \\cline{1-1} \\cline{3-7}\nHead & & 33.75 & 33.71 & 33.96 & 34.00 & \\textbf{34.01} \\\\ \\cline{1-1}\\cline{3-7}\nWoman & & 31.79 & 31.37 & 32.36 & 32.53 & \\textbf{32.63} \\\\ \\Xcline{1-1}{1.0pt}\n\\Xcline{3-7} {1pt}\n\\textbf{Average} & & 33.17 & 32.75 & 33.66 & 33.82 & \\textbf{33.93} \\\\ \\Xhline{1pt}\nBaby & \\multirow{6}{*}{4} & 31.30 & 33.13 & 33.41 & 33.51 & \\textbf{33.61} \\\\ \\cline{1-1} \\cline{3-7}\nBird & & 32.99 & 32.52 & 33.54 & 33.78 & \\textbf{33.93} \\\\ \\cline{1-1} \\cline{3-7}\nButterfly & & 26.22 & 25.46 & 27.28 & 27.47 & \\textbf{27.56} \\\\ \\cline{1-1} \\cline{3-7}\nHead & & 32.51 & 32.44 & 32.70 & 32.82 & \\textbf{32.82} \\\\ \\cline{1-1} \\cline{3-7}\nWoman & & 29.20 & 28.89 & 29.81 & 30.09 & \\textbf{30.20} \\\\ \\Xcline{1-1}{1pt}\n\\Xcline{3-7} {1.0pt}\n\\textbf{Average} & & 30.85 & 30.48 & 31.35 & 31.53 & \\textbf{31.62} \\\\ \\Xhline{1pt}\n\n\\end{tabular}\n\\label{SR-set5}\n\\end{table*}\n\n\n\\begin{table*}[tbh]\n\\centering\n\\caption{Results of proposed super-resolution method in Set14, BSD100 and Urban100}\n\\begin{tabular}{!{\\vrule width1pt}c!{\\vrule width1pt}c|c|c|c|c|c|c|c|c|c|c!{\\vrule width1pt}}\n\\Xhline{1pt}\n\\multirow{2}{*}{Dataset} & \\multirow{2}{*}{Scale} & \\multicolumn{2}{c|}{TNRD\\cite{TNRD}}& \\multicolumn{2}{c|}{SRCNN\\cite{SRCNN}} & \\multicolumn{2}{c|}{VDSR\\cite{VDSR}} & \\multicolumn{2}{c|}{DRCN\\cite{DRCN}} & \\multicolumn{2}{c!{\\vrule width1pt}}{Ours} \\\\ \\cline{3-12}\n & & PSNR & SSIM & PSNR & SSIM & PSNR & SSIM & PSNR & SSIM & PSNR & SSIM \\\\ \\Xhline{1pt}\n\n\\multirow{3}{*}{Set14} & 2 & 32.54 & 0.907 & 32.42 & 0.906 & 33.03 & 0.912 & 33.04 & 0.912 & \\textbf{33.20} & \\textbf{0.914}\\\\ \\cline{2-2} \\cline{2-12}\n & 3 & 29.46 & 0.823 & 29.28 & 0.821 & 29.77 & 0.831 & 29.76 & 0.831 & \\textbf{29.96} & \\textbf{0.835 } \\\\ \\cline{2-2} \\cline{2-12}\n & 4 & 27.68 & 0.756 & 27.49 & 0.750 & 28.01 & 0.767 & 28.02 & 0.767 & \\textbf{ 28.15 } & \\textbf{0.770} \\\\ \\cline{2-12} \\Xhline{1pt}\n\\multirow{3}{*}{BSD100} & 2 & 31.40 & 0.888 & 31.36 & 0.888 & 31.90 & 0.896 & 31.85 & 0.894 & \\textbf{ 31.94 } & \\textbf{0.896}\\\\ \\cline{2-2} \\cline{2-12}\n & 3 & 28.50 & 0.788 & 28.41 & 0.786 & 28.80 & 0.796 & 28.80 & 0.795 & \\textbf{28.88} & \\textbf{0.799} \\\\ \\cline{2-2} \\cline{2-12}\n & 4 & 27.00 & 0.714 & 26.90 & 0.710 & 27.23 & 0.723 & 27.08 & 0.709 & \\textbf{ 27.33 } & \\textbf{0.726} \\\\ \\cline{2-12} \\Xhline{1pt}\n\\multirow{3}{*}{Urban100} & 2 & 29.70 & 0.899 & 29.50 & 0.895 & 30.76 & 0.914 & 30.75 & 0.913 & \\textbf{ 30.97 } & \\textbf{0.915}\\\\ \\cline{2-2} \\cline{3-12}\n & 3 & 26.44 & 0.807 & 26.24 & 0.799 & 27.15 & 0.828 & 27.08 & 0.824 & \\textbf{27.33} & \\textbf{0.831}\\\\ \\cline{2-2} \\cline{3-12}\n & 4 & 24.62 & 0.729 & 24.52 & 0.722 & 25.14 & 0.751 & 24.94 & 0.735 & \\textbf{ 25.33 } & \\textbf{0.756} \\\\ \\cline{2-12} \\Xhline{1pt}\n\\end{tabular}\n\\label{SR-others}\n\\end{table*}\n\n\n\\section{Conclusion}\n\nIn this paper, we propose a structured analysis sparse coding (SASC) based network for image restoration and show that the structured sparse prior learned from both large-scale training dataset and the input degraded image can significantly improve the sparsity-based performance. Furthermore, we propose a network implementation of the SASC for image restoration for efficiency and better performance. Experimental results show that the proposed method performs comparably to and often even better than the current state-of-the-art restoration methods.\n\n\\begin{figure*}[!tbh]\n\\renewcommand{\\arraystretch}{0.4}\n\\centering\n\\subfigure[]{\n \\includegraphics[width=0.28\\textwidth]{.\/Figures\/SR_ppt3_8_3\/bicubic.png}\n }\\hspace{-0.8em}\n\\subfigure[]{\n \\includegraphics[width=0.28\\textwidth]{.\/Figures\/SR_ppt3_8_3\/ncsr.png}\n }\\hspace{-0.8em}\n\\subfigure[]{\n \\includegraphics[width=0.28\\textwidth]{.\/Figures\/SR_ppt3_8_3\/srcnn.png}\n }\\hspace{-0.8em}\n\\subfigure[]{\n \\includegraphics[width=0.28\\textwidth]{.\/Figures\/SR_ppt3_8_3\/vdsr.png}\n }\\hspace{-0.8em}\n\\subfigure[]{\n \\includegraphics[width=0.28\\textwidth]{.\/Figures\/SR_ppt3_8_3\/drcn.png}\n }\\hspace{-0.8em}\n\\subfigure[]{\n \\includegraphics[width=0.28\\textwidth]{.\/Figures\/SR_ppt3_8_3\/proposed.png}\n }\\hspace{-0.8em}\n \\\\\n \\caption{SR results of scaling factor of 3. (a) Parts of the original images of ``ppt3'' in Set14; (b) NCSR\\cite{dong2013nonlocally}(PSNR=25.66dB); (c) SRCNN\\cite{SRCNN}(PSNR=27.04dB); (d) VDSR\\cite{VDSR}(PSNR=27.86dB);(e) DRCN\\cite{DRCN}(PSNR=27.73dB); (f) \\bf{Proposed method(PSNR=28.16dB)}}\n\\label{fig:SR1}\n\\end{figure*}\n\n\\begin{figure*}[!tbh]\n\\renewcommand{\\arraystretch}{0.4}\n\\centering\n \\subfigure[]{\n \\includegraphics[width=0.28\\textwidth]{.\/Figures\/SR_U5_4\/1.png}\n }\\hspace{-0.8em}\n\\subfigure[]{\n \\includegraphics[width=0.28\\textwidth]{.\/Figures\/SR_U5_4\/2.png}\n }\\hspace{-0.8em}\n\\subfigure[]{\n \\includegraphics[width=0.28\\textwidth]{.\/Figures\/SR_U5_4\/3.png}\n }\\hspace{-0.8em}\n\\subfigure[]{\n \\includegraphics[width=0.28\\textwidth]{.\/Figures\/SR_U5_4\/4.png}\n }\\hspace{-0.8em}\n\\subfigure[]{\n \\includegraphics[width=0.28\\textwidth]{.\/Figures\/SR_U5_4\/5.png}\n }\\hspace{-0.8em}\n\\subfigure[]{\n \\includegraphics[width=0.28\\textwidth]{.\/Figures\/SR_U5_4\/6.png}\n }\\hspace{-0.8em}\n \\\\\n \\caption{SR results of scaling factor of 4. (a) Parts of the original images of ``img005'' in Urban100 dataset; (b) NCSR\\cite{dong2013nonlocally}(PSNR=26.44dB); (c) SRCNN\\cite{SRCNN}(PSNR=25.50dB); (d) VDSR\\cite{VDSR}(PSNR=26.70dB);(e) DRCN\\cite{DRCN}(PSNR=26.82dB); (f)\\bf{ Proposed method(PSNR=27.01dB)}}\n\n\\label{fig:SR2}\n\\end{figure*}\n\n\n\\ifCLASSOPTIONcaptionsoff\n \\newpage\n\\fi\n\n\\bibliographystyle{IEEEtran}\n","meta":{"redpajama_set_name":"RedPajamaArXiv"}}