jackkuo commited on
Commit
e0b4fd5
·
verified ·
1 Parent(s): 8eb9358

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +50 -0
  2. 09E0T4oBgHgl3EQfuQEC/content/tmp_files/2301.02601v1.pdf.txt +804 -0
  3. 09E0T4oBgHgl3EQfuQEC/content/tmp_files/load_file.txt +0 -0
  4. 09E1T4oBgHgl3EQf5AUq/content/2301.03506v1.pdf +3 -0
  5. 09E1T4oBgHgl3EQf5AUq/vector_store/index.faiss +3 -0
  6. 09E1T4oBgHgl3EQf5AUq/vector_store/index.pkl +3 -0
  7. 19FST4oBgHgl3EQfXDjl/content/tmp_files/2301.13783v1.pdf.txt +2753 -0
  8. 19FST4oBgHgl3EQfXDjl/content/tmp_files/load_file.txt +0 -0
  9. 39FQT4oBgHgl3EQfHTWW/content/2301.13248v1.pdf +3 -0
  10. 4dAzT4oBgHgl3EQfffz3/content/tmp_files/2301.01455v1.pdf.txt +440 -0
  11. 4dAzT4oBgHgl3EQfffz3/content/tmp_files/load_file.txt +179 -0
  12. 59FAT4oBgHgl3EQfnB39/content/2301.08627v1.pdf +3 -0
  13. 59FAT4oBgHgl3EQfnB39/vector_store/index.faiss +3 -0
  14. 59FAT4oBgHgl3EQfnB39/vector_store/index.pkl +3 -0
  15. 5tE3T4oBgHgl3EQfpQqt/content/tmp_files/2301.04641v1.pdf.txt +2422 -0
  16. 5tE3T4oBgHgl3EQfpQqt/content/tmp_files/load_file.txt +0 -0
  17. 6tAyT4oBgHgl3EQfcvc9/content/tmp_files/2301.00288v1.pdf.txt +3087 -0
  18. 6tAyT4oBgHgl3EQfcvc9/content/tmp_files/load_file.txt +0 -0
  19. 8NAzT4oBgHgl3EQfgfyv/content/2301.01470v1.pdf +3 -0
  20. 8NAzT4oBgHgl3EQfgfyv/vector_store/index.faiss +3 -0
  21. 9NAyT4oBgHgl3EQf3Plu/vector_store/index.faiss +3 -0
  22. 9NAyT4oBgHgl3EQf3Plu/vector_store/index.pkl +3 -0
  23. A9AzT4oBgHgl3EQfTPz-/vector_store/index.faiss +3 -0
  24. A9AzT4oBgHgl3EQfTPz-/vector_store/index.pkl +3 -0
  25. A9AzT4oBgHgl3EQfhv18/vector_store/index.faiss +3 -0
  26. CtE0T4oBgHgl3EQfgQFr/content/2301.02415v1.pdf +3 -0
  27. CtE0T4oBgHgl3EQfgQFr/vector_store/index.pkl +3 -0
  28. E9FJT4oBgHgl3EQfCizA/content/tmp_files/2301.11430v1.pdf.txt +1472 -0
  29. E9FJT4oBgHgl3EQfCizA/content/tmp_files/load_file.txt +0 -0
  30. ENE0T4oBgHgl3EQfgwE0/vector_store/index.pkl +3 -0
  31. ENE2T4oBgHgl3EQfSgdx/content/tmp_files/2301.03793v1.pdf.txt +715 -0
  32. ENE2T4oBgHgl3EQfSgdx/content/tmp_files/load_file.txt +396 -0
  33. FNE1T4oBgHgl3EQfWwTK/content/2301.03119v1.pdf +3 -0
  34. FNE1T4oBgHgl3EQfWwTK/vector_store/index.pkl +3 -0
  35. FtAyT4oBgHgl3EQfSvdV/content/2301.00091v1.pdf +3 -0
  36. FtAyT4oBgHgl3EQfSvdV/vector_store/index.pkl +3 -0
  37. GNAzT4oBgHgl3EQfxP7V/vector_store/index.faiss +3 -0
  38. HNAzT4oBgHgl3EQfHfvA/content/tmp_files/2301.01047v1.pdf.txt +660 -0
  39. HNAzT4oBgHgl3EQfHfvA/content/tmp_files/load_file.txt +0 -0
  40. HdE1T4oBgHgl3EQfFgNH/content/2301.02902v1.pdf +3 -0
  41. HdE1T4oBgHgl3EQfFgNH/vector_store/index.faiss +3 -0
  42. I9E2T4oBgHgl3EQfpAhk/vector_store/index.faiss +3 -0
  43. ItA0T4oBgHgl3EQfCP98/content/2301.01987v1.pdf +3 -0
  44. ItA0T4oBgHgl3EQfCP98/vector_store/index.faiss +3 -0
  45. ItA0T4oBgHgl3EQfCP98/vector_store/index.pkl +3 -0
  46. JdAzT4oBgHgl3EQfyP4s/content/tmp_files/2301.01749v1.pdf.txt +0 -0
  47. JdAzT4oBgHgl3EQfyP4s/content/tmp_files/load_file.txt +0 -0
  48. LdFOT4oBgHgl3EQf0DSF/content/tmp_files/2301.12934v1.pdf.txt +1228 -0
  49. LdFOT4oBgHgl3EQf0DSF/content/tmp_files/load_file.txt +0 -0
  50. MdFRT4oBgHgl3EQf2zi9/vector_store/index.faiss +3 -0
.gitattributes CHANGED
@@ -2256,3 +2256,53 @@ AdAzT4oBgHgl3EQfF_tg/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -tex
2256
  QNE2T4oBgHgl3EQfrwi3/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2257
  StAzT4oBgHgl3EQf0f7W/content/2301.01786v1.pdf filter=lfs diff=lfs merge=lfs -text
2258
  L9FQT4oBgHgl3EQfUzaG/content/2301.13298v1.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2256
  QNE2T4oBgHgl3EQfrwi3/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2257
  StAzT4oBgHgl3EQf0f7W/content/2301.01786v1.pdf filter=lfs diff=lfs merge=lfs -text
2258
  L9FQT4oBgHgl3EQfUzaG/content/2301.13298v1.pdf filter=lfs diff=lfs merge=lfs -text
2259
+ MdFRT4oBgHgl3EQf2zi9/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2260
+ mdE4T4oBgHgl3EQfuA0N/content/2301.05228v1.pdf filter=lfs diff=lfs merge=lfs -text
2261
+ 09E1T4oBgHgl3EQf5AUq/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2262
+ 8NAzT4oBgHgl3EQfgfyv/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2263
+ 09E1T4oBgHgl3EQf5AUq/content/2301.03506v1.pdf filter=lfs diff=lfs merge=lfs -text
2264
+ mdE4T4oBgHgl3EQfuA0N/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2265
+ StAzT4oBgHgl3EQf0f7W/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2266
+ rdAzT4oBgHgl3EQfO_vG/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2267
+ 39FQT4oBgHgl3EQfHTWW/content/2301.13248v1.pdf filter=lfs diff=lfs merge=lfs -text
2268
+ ZtAzT4oBgHgl3EQf2f4s/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2269
+ S9FLT4oBgHgl3EQfPy8r/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2270
+ S9FLT4oBgHgl3EQfPy8r/content/2301.12029v1.pdf filter=lfs diff=lfs merge=lfs -text
2271
+ i9FIT4oBgHgl3EQfpys8/content/2301.11324v1.pdf filter=lfs diff=lfs merge=lfs -text
2272
+ I9E2T4oBgHgl3EQfpAhk/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2273
+ HdE1T4oBgHgl3EQfFgNH/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2274
+ 59FAT4oBgHgl3EQfnB39/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2275
+ ZtAzT4oBgHgl3EQf2f4s/content/2301.01814v1.pdf filter=lfs diff=lfs merge=lfs -text
2276
+ A9AzT4oBgHgl3EQfhv18/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2277
+ GNAzT4oBgHgl3EQfxP7V/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2278
+ MtE0T4oBgHgl3EQf0QIf/content/2301.02682v1.pdf filter=lfs diff=lfs merge=lfs -text
2279
+ CtE0T4oBgHgl3EQfgQFr/content/2301.02415v1.pdf filter=lfs diff=lfs merge=lfs -text
2280
+ ItA0T4oBgHgl3EQfCP98/content/2301.01987v1.pdf filter=lfs diff=lfs merge=lfs -text
2281
+ FNE1T4oBgHgl3EQfWwTK/content/2301.03119v1.pdf filter=lfs diff=lfs merge=lfs -text
2282
+ 8NAzT4oBgHgl3EQfgfyv/content/2301.01470v1.pdf filter=lfs diff=lfs merge=lfs -text
2283
+ k9A0T4oBgHgl3EQfI__f/content/2301.02085v1.pdf filter=lfs diff=lfs merge=lfs -text
2284
+ tNE3T4oBgHgl3EQf9guV/content/2301.04817v1.pdf filter=lfs diff=lfs merge=lfs -text
2285
+ 59FAT4oBgHgl3EQfnB39/content/2301.08627v1.pdf filter=lfs diff=lfs merge=lfs -text
2286
+ FtAyT4oBgHgl3EQfSvdV/content/2301.00091v1.pdf filter=lfs diff=lfs merge=lfs -text
2287
+ HdE1T4oBgHgl3EQfFgNH/content/2301.02902v1.pdf filter=lfs diff=lfs merge=lfs -text
2288
+ A9AzT4oBgHgl3EQfTPz-/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2289
+ htFMT4oBgHgl3EQf4DEU/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2290
+ vNAyT4oBgHgl3EQfN_bA/content/2301.00001v1.pdf filter=lfs diff=lfs merge=lfs -text
2291
+ XdE1T4oBgHgl3EQfbwSy/content/2301.03177v1.pdf filter=lfs diff=lfs merge=lfs -text
2292
+ kdE1T4oBgHgl3EQfggQS/content/2301.03229v1.pdf filter=lfs diff=lfs merge=lfs -text
2293
+ htFMT4oBgHgl3EQf4DEU/content/2301.12450v1.pdf filter=lfs diff=lfs merge=lfs -text
2294
+ 9NAyT4oBgHgl3EQf3Plu/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2295
+ XdE1T4oBgHgl3EQfbwSy/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2296
+ ztAyT4oBgHgl3EQf0_ll/content/2301.00727v1.pdf filter=lfs diff=lfs merge=lfs -text
2297
+ i9FIT4oBgHgl3EQfpys8/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2298
+ MtE0T4oBgHgl3EQf0QIf/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2299
+ P9FRT4oBgHgl3EQf6Tj7/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2300
+ cdE2T4oBgHgl3EQfwgij/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2301
+ NdE0T4oBgHgl3EQf0gIE/content/2301.02685v1.pdf filter=lfs diff=lfs merge=lfs -text
2302
+ PdAzT4oBgHgl3EQfIvsJ/content/2301.01065v1.pdf filter=lfs diff=lfs merge=lfs -text
2303
+ vNAyT4oBgHgl3EQfN_bA/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2304
+ NdE0T4oBgHgl3EQf0gIE/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2305
+ btFJT4oBgHgl3EQf9S1J/content/2301.11687v1.pdf filter=lfs diff=lfs merge=lfs -text
2306
+ ItA0T4oBgHgl3EQfCP98/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2307
+ cdE2T4oBgHgl3EQfwgij/content/2301.04102v1.pdf filter=lfs diff=lfs merge=lfs -text
2308
+ PNE4T4oBgHgl3EQfkQ1N/content/2301.05149v1.pdf filter=lfs diff=lfs merge=lfs -text
09E0T4oBgHgl3EQfuQEC/content/tmp_files/2301.02601v1.pdf.txt ADDED
@@ -0,0 +1,804 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ SEQUENT: Towards Traceable Quantum Machine Learning using
2
+ Sequential Quantum Enhanced Training ∗
3
+ Philipp Altmann1, Leo S¨unkel1, Jonas Stein1, Tobias M¨uller2,
4
+ Christoph Roch1 and Claudia Linnhoff-Popien1
5
+ 1LMU Munich
6
+ 2SAP SE, Walldorf, Germany
7
+ philipp.altmann@ifi.lmu.de
8
+ Keywords:
9
+ Quantum Machine Learning, Transfer Learning, Supervised Learning, Hybrid Quantum Computing.
10
+ Abstract:
11
+ Applying new computing paradigms like quantum computing to the field of machine learning has recently
12
+ gained attention. However, as high-dimensional real-world applications are not yet feasible to be solved us-
13
+ ing purely quantum hardware, hybrid methods using both classical and quantum machine learning paradigms
14
+ have been proposed. For instance, transfer learning methods have been shown to be successfully applicable
15
+ to hybrid image classification tasks. Nevertheless, beneficial circuit architectures still need to be explored.
16
+ Therefore, tracing the impact of the chosen circuit architecture and parameterization is crucial for the devel-
17
+ opment of beneficially applicable hybrid methods. However, current methods include processes where both
18
+ parts are trained concurrently, therefore not allowing for a strict separability of classical and quantum impact.
19
+ Thus, those architectures might produce models that yield a superior prediction accuracy whilst employing the
20
+ least possible quantum impact. To tackle this issue, we propose Sequential Quantum Enhanced Training (SE-
21
+ QUENT) an improved architecture and training process for the traceable application of quantum computing
22
+ methods to hybrid machine learning. Furthermore, we provide formal evidence for the disadvantage of current
23
+ methods and preliminary experimental results as a proof-of-concept for the applicability of SEQUENT.
24
+ 1
25
+ INTRODUCTION
26
+ With classical computation evolving towards per-
27
+ formance saturation, new computing paradigms like
28
+ quantum computing arise, promising superior perfor-
29
+ mance in complex problem domains. However, cur-
30
+ rent architectures merely reach numbers of 100 quan-
31
+ tum bits (qubits), prone to noise, and classical com-
32
+ puters run out of resources simulating similar sized
33
+ systems (Preskill, 2018). Thus, most real world appli-
34
+ cations are not yet feasible solely relying on quantum
35
+ compute. Especially in the field of machine learn-
36
+ ing, where parameter spaces sized upwards of 50 mil-
37
+ lion are required for tasks like image classification,
38
+ the resources of current quantum hardware or simula-
39
+ tors is not yet sufficient for pure quantum approaches
40
+ (He et al., 2016). Therefore, hybrid approaches have
41
+ been proposed, where the power of both classical and
42
+ quantum computation are united for improved results
43
+ (Bergholm et al., 2018). By this, it is possible to lever-
44
+ age the advantages of quantum computing for tasks
45
+ with parameter spaces that cannot be computed solely
46
+ *accepted for publication at ICAART 2023
47
+ by quantum computers due to hardware and simula-
48
+ tion limitations. Within those hybrid algorithms the
49
+ quantum part is, analogue to the classical deep neu-
50
+ ral networks (DNNs), represented by so called vari-
51
+ ational quantum circuits (VQCs), which are param-
52
+ eterized and can be trained in a supervised manner
53
+ using labeled data (Cerezo et al., 2021). For hybrid
54
+ machine learning, we will from hereon refer to VQCs
55
+ as quantum parts and to DNNs as classical parts.
56
+ To solve large-scale real-world tasks, like image
57
+ classification, the concept of transfer learning has
58
+ been applied for training such hybrid models (Gir-
59
+ shick et al., 2014; Pan and Yang, 2010). Given a com-
60
+ plex model, with high-dimensional input- and param-
61
+ eter spaces, the term transfer leaning classically refers
62
+ to the two-step procedures of first pre-training using a
63
+ large but generic dataset and secondly fine-tuning us-
64
+ ing a smaller but more specific dataset (Torrey and
65
+ Shavlik, 2010).
66
+ Usually, a subset of the model’s
67
+ weights are frozen for the fine-tuning to compensate
68
+ for insufficient amounts of fine-tuning data.
69
+ Applied to hybrid quantum machine learning
70
+ (QML), the pre-trained model is used as a feature ex-
71
+ arXiv:2301.02601v1 [quant-ph] 6 Jan 2023
72
+
73
+ tractor and the dense classifier is replaced by a hybrid
74
+ model referred to as dressed quantum circuit (DQC)
75
+ including classical pre- and post-processing layers,
76
+ and the central VQC (Mari et al., 2020). This archi-
77
+ tecture results in concurrent updates to both classical
78
+ and quantum weights. Even though, this produces up-
79
+ dates towards overall optimal classification results, it
80
+ does not allow for tracing the advantageousness of the
81
+ quantum part of the architecture. Thus, besides pro-
82
+ viding competitive classification results, such hybrid
83
+ approaches do not allow for valid judgment whether
84
+ the chosen quantum circuit benefits the classification.
85
+ The only arguable result is that it does not harm the
86
+ overall performance or that the introduced inaccura-
87
+ cies may be compensated by the classical layers in the
88
+ end. However, as we currently are still only exploring
89
+ VQCs, this verdict, i.e. traceability of the impact of
90
+ both the quantum and the classical part, is crucial to
91
+ infer the architecture quality from common metrics.
92
+ Overall, with current approaches we find a mismatch
93
+ between the goal of exploring viable architectures and
94
+ the process applied.
95
+ We therefore propose the application of Sequen-
96
+ tial Quantum Enhanced Training (SEQUENT), an
97
+ adapted architecture and training procedure for hybrid
98
+ quantum transfer learning, where the effect of both
99
+ classical and quantum parts are separably assessable.
100
+ We see our contributions as follows:
101
+ • We provide formal evidence that current quantum
102
+ transfer learning architectures might result in an
103
+ optimal network configuration (perfect classifica-
104
+ tion / regression results) with the least-most quan-
105
+ tum impact, i.e., a solution equivalent to a purely
106
+ classical one.
107
+ • We propose SEQUENT, a two-step procedure of
108
+ classical pre-training and quantum fine-tuning us-
109
+ ing an adapted architecture to reduce the number
110
+ of features classically extracted to the number of
111
+ features manageable by the VQC producing the
112
+ final classification.
113
+ • We show competitive results with a traceable im-
114
+ pact of the chosen VQC on the overall perfor-
115
+ mance using preliminary benchmark datasets.
116
+ 2
117
+ BACKGROUND
118
+ To delimit SEQUENT, the following section pro-
119
+ vides a brief general introduction to the related fields
120
+ of quantum computation, quantum machine learning,
121
+ deep learning and transfer learning.
122
+ 2.1
123
+ Quantum Computing
124
+ Quantum Computation
125
+ works fundamentally dif-
126
+ ferent than classical computation, since QC uses
127
+ qubits instead of classical bits. Where classical bit
128
+ can be in the state 0 or 1, the corresponding state of
129
+ a qubit is described in Dirac notation as | 0⟩ and | 1⟩.
130
+ However, more importantly, qubits can be in a super-
131
+ position, i.e., a linear combination of both:
132
+ | ψ⟩ = α | 0⟩+β | 1⟩
133
+ (1)
134
+ To alter this state, a set of reversible unitary op-
135
+ erations like rotations can be applied sequentially to
136
+ individual target qubits or in conjunction with a con-
137
+ trol qubit. Upon measurement, the superposition col-
138
+ lapses and the qubit takes on either the state | 0⟩ or
139
+ | 1⟩ according to a probability. Note that α and β in
140
+ (1) are complex numbers where | α |2 and | β |2 give
141
+ the probability of measuring the qubit in state | 0⟩ or
142
+ | 1⟩ respectively. Note that | α |2 + | β |2= 1, i.e., the
143
+ probabilities sum up to 1. (Nielsen and Chuang, 2010)
144
+ Quantum algorithms like Grover (Grover, 1996)
145
+ or Shor (Shor, 1994) provide a theoretical speedup
146
+ compared to classical algorithms. Moreover, in 2019
147
+ quantum supremacy was claimed (Arute et al., 2019),
148
+ and the race to find more algorithms providing a quan-
149
+ tum advantage is currently underway. However, the
150
+ current state of quantum computing is often referred
151
+ to as the noisy-intermediate-scale quantum (NISQ)
152
+ era (Preskill, 2018), a period when relatively small
153
+ and noisy quantum computers are available, however,
154
+ still no error-correction to mitigate them, limiting the
155
+ execution to small quantum circuits.
156
+ Furthermore,
157
+ current quantum computers are not yet capable to ex-
158
+ ecute algorithms that provide any quantum advantage
159
+ in a practically useful setting.
160
+ Thus, much research has recently been put into the
161
+ investigation of hybrid-classical-quantum algorithms.
162
+ That is, algorithms that consist of quantum and clas-
163
+ sical parts, each responsible for a distinct task. In this
164
+ regard, quantum machine learning has been gaining
165
+ in popularity.
166
+ Quantum
167
+ Machine
168
+ Learning
169
+ algorithms
170
+ have
171
+ been proposed in several varieties over the last years
172
+ (Farhi et al., 2014; Dong et al., 2008; Biamonte et al.,
173
+ 2017).
174
+ Besides quantum kernel methods (Schuld
175
+ and Killoran, 2019) variational quantum algorithms
176
+ (VQAs) seem to be the most relevant in the current
177
+ NISQ-era for various reasons (Cerezo et al., 2021).
178
+ VQAs generally are comprised of multiple com-
179
+ ponents, but the central part is the structure of the ap-
180
+ plied circuit or Ansatz. Furthermore, a VQA Ansatz
181
+ is intrinsically parameterized in order to use it as a
182
+
183
+ predictive model by optimizing the parameterization
184
+ towards a given objective, i.e. to minimize a given
185
+ loss. Overall, given a set of data and targets, a param-
186
+ eterized circuit and an objective, an approximation
187
+ of the generator underlying the data can be learned.
188
+ Applying methods like gradient descent, this model
189
+ can be trained to predict the label of unseen data
190
+ (Cerezo et al., 2021; Mitarai et al., 2018). For the
191
+ field of QML, various circuit architectures have been
192
+ proposed (Biamonte et al., 2017; Khairy et al., 2020;
193
+ Schuld et al., 2020).
194
+ For the remainder of this paper, we consider the
195
+ following simple φ-parameterized variational quan-
196
+ tum circuit (VQC) for η qubits:
197
+ VQCφ(z) = meassureσ ◦entangleφδ ◦···◦
198
+ ◦entangleφ1 ◦embedη(z)
199
+ (2)
200
+ with the depth δ, and the output dimension σ given
201
+ the input z = (z1,...,zη), where embedη loads the
202
+ data-points z into η balanced qubits in superposi-
203
+ tion via z-rotations, entangleφ applies controlled
204
+ not gates to entangle neighboring qubits followed by
205
+ φ-parameterized z rotations, and measureσ applies
206
+ the Pauli-Z operator and measures the first σ qubits
207
+ (Schuld and Killoran, 2019; Mitarai et al., 2018).
208
+ This architecture has also been shown to be di-
209
+ rectly applicable to classification tasks, using the
210
+ measurement expectation value as a one-hot encoded
211
+ prediction of the target (Schuld et al., 2020).
212
+ Overall, VQAs have been shown to be applica-
213
+ ble to a wide variety of classification tasks (Abo-
214
+ hashima et al., 2020) and successfully utilized by
215
+ Mari et al. (2020), using the simple architecture de-
216
+ fined in (2). Thus, to provide a proof-of-concept for
217
+ SEQUENT, we will focus on said architecture for
218
+ classification tasks and leave the optimization of em-
219
+ beddings (LaRose and Coyle, 2020) and architectures
220
+ (Khairy et al., 2020) to future research.
221
+ 2.2
222
+ Deep Learning
223
+ Deep Neural Networks (DNNs)
224
+ refer to parame-
225
+ terized networks consisting of a set of fully-connected
226
+ layers.
227
+ A layer comprises a set of distinct neu-
228
+ rons, whereas each neuron takes a vector of inputs
229
+ x = (x1,x2,...xn), which is multiplied with the cor-
230
+ responding weight vector w j = (w j1,w j2,...w jn). A
231
+ bias b j is added before being passed into an activa-
232
+ tion function ϕ. Therefore, the output of neuron z
233
+ at position j takes the following form (Bishop and
234
+ Nasrabadi, 2006):
235
+ z j = ϕ
236
+
237
+ n
238
+
239
+ i=1
240
+ w jixi +bj
241
+
242
+ (3)
243
+ Given a target function f(x) : X �→ y, we can de-
244
+ fine the approximate
245
+ ˆfθ(x) : X �→ ˆy = Lhd→o ◦···◦Ln→h1
246
+ (4)
247
+ as a composition of multiple layers L with multiple
248
+ neurons z parameterized by θ, d − 1 h-dimensional
249
+ hidden layers, and the respective input and target di-
250
+ mensions n and o. Using the prediction error J =
251
+ (y − ˆfθ(x))2, ˆfθ can be optimized by propagating the
252
+ error backwards through the network using the gradi-
253
+ ent ∇θJ (Bishop and Nasrabadi, 2006).
254
+ Those feed forward models have been shown ca-
255
+ pable of approximating arbitrary functions, given a
256
+ sufficient amount of data and either a sufficient depth
257
+ (i.e. number of hidden layers) or width (i.e. size of
258
+ hidden state) (Leshno et al., 1993).
259
+ Deep neural networks for image classification
260
+ tasks are comprised of two parts: A feature extrac-
261
+ tor containing a composite of convolutional layers to
262
+ extract a υ-sized vector of features FE : X �→ υ, and
263
+ a composite of fully connected layers to classify the
264
+ extracted feature vector FC : υ �→ ˆy. Thus, the overall
265
+ model is defined as ˆf : X �→ ˆy = FCθ ◦FEθ(x). Those
266
+ models have been successfully applied to a wide vari-
267
+ ety of real-world classification tasks (He et al., 2016;
268
+ Krizhevsky et al., 2012). However, to find a parame-
269
+ terization that optimally separates the given dataset, a
270
+ large amount of training data is required.
271
+ Transfer Learning
272
+ aims to solve the problem of in-
273
+ sufficient training data by transferring already learned
274
+ knowledge (weights, biases) from a task Ts of a source
275
+ domain Ds to a related target task Tt of a target do-
276
+ main Dt. More specifically, a domain D = X,P(x)
277
+ comprises a feature space X and the probability dis-
278
+ tribution P(x) where x = (x1,x2,...,xn) ∈ X. The cor-
279
+ responding task T is given by T = {y, f(x)} with la-
280
+ bel space y and target function f(x) (Zhuang et al.,
281
+ 2021). A deep transfer learning task is defined by
282
+ ⟨Ds,Ts,Dt,Tt, ˆft(·)⟩, where ˆft(·) is defined according
283
+ to Equation 4 (Tan et al., 2018).
284
+ Generally, transfer learning is a two-stage process.
285
+ Initially, a source model is trained according to a spe-
286
+ cific task Ts in the source domain Ds. Consequently,
287
+ transfer learning aims to enhance the performance of
288
+ the target predictive function ˆft(·) for the target learn-
289
+ ing task Tt in target domain Dt by transferring la-
290
+ tent knowledge from Ts in Ds, where Ds ̸= Dt and/or
291
+ Ts ̸= Tt. Usually, the size of Ds >> Dt (Tan et al.,
292
+ 2018). The knowledge transfer and learning step is
293
+ commonly achieved via feature extraction and/or fine-
294
+ tuning.
295
+
296
+ The feature extraction process freezes the source
297
+ model and adds a new classifier to the output of the
298
+ pre-trained model. Thereby, the feature maps learned
299
+ from Ts in Ds can be repurposed and the newly-added
300
+ classifier is trained according to the target task Tt
301
+ (Donahue et al., 2014). The fine-tuning process ad-
302
+ ditionally unfreezes top layers from the source model
303
+ and jointly trains the unfreezed feature representa-
304
+ tions from the source model with the added classifier.
305
+ By this, the time and space complexity for the tar-
306
+ get task Tt can be reduced by transferring and/or fine-
307
+ tuning the already learned features of a pre-trained
308
+ source model to a target model (Girshick et al., 2014).
309
+ 3
310
+ RELATED WORK
311
+ In the context of machine learning, VQAs are of-
312
+ ten applied to the problem of classification (Schuld
313
+ et al., 2020; Mitarai et al., 2018; Havl´ıˇcek et al., 2019;
314
+ Schuld and Killoran, 2019), although other applica-
315
+ tion areas exist. Different techniques, e.g. embed-
316
+ ding (Lloyd et al., 2020; LaRose and Coyle, 2020),
317
+ or problems, e.g. barren plateaus (McClean et al.,
318
+ 2018), have been widely discussed in the QML liter-
319
+ ature. However, we focus on hybrid quantum transfer
320
+ learning (Mari et al., 2020) in this paper.
321
+ Classical Transfer Learning is widely applied in
322
+ present-day machine learning algorithms (Torrey and
323
+ Shavlik, 2010; Pan and Yang, 2010; Pratt, 1992) and
324
+ can be extended with concepts of the emerging quan-
325
+ tum computing technology (Zen et al., 2020). Mari
326
+ et al. (2020) propose various hybrid transfer learning
327
+ architectures ranging from classical to quantum (CQ),
328
+ quantum to classical (QC) and quantum to quantum
329
+ (QQ). The authors focus on the former CQ architec-
330
+ ture, which which comprises the previously explained
331
+ DQC. In the current era of intermediate-scale quan-
332
+ tum technology the DQC transfer learning approach
333
+ is the most widely investigated and applied one, as
334
+ it allows to some extend optimally pre-process high-
335
+ dimensional data and afterwards load the most rele-
336
+ vant features into a quantum computer. Gokhale et al.
337
+ (2020) used this architecture to classify and detect im-
338
+ age splicing forgeries, while Acar and Yilmaz (2021)
339
+ applied it to detect COVID-19 from CT images. Also,
340
+ Mari et al. (2020) assess their approach exemplary on
341
+ image classification tasks. Although the results are
342
+ quite promising it is not clear from the evaluation,
343
+ whether the dressed quantum circuit is advantageous
344
+ over a fully classical approach.
345
+ 4
346
+ DQC QUANTUM IMPACT
347
+ We argue that within certain problem instance DQCs
348
+ may yield accurate results while not making active
349
+ use of any quantum effects in the VQC. This possi-
350
+ bility exists especially for easy to solve problem in-
351
+ stances, when all purely classical layers are sufficient
352
+ to yield accurate results and the quantum layer rep-
353
+ resents the identity.
354
+ This can be seen by realizing
355
+ that the classical pre-processing layer acts as a hid-
356
+ den layer with a non-polynomial activation function,
357
+ hence being capable of approximating arbitrary con-
358
+ tinuous functions depending on the number of hidden
359
+ units by the universal approximation theorem (Leshno
360
+ et al., 1993). Therefore, the overall DQC architecture
361
+ is portrayed in Figure 1.
362
+ The central VQC is defined according to sec-
363
+ tion 2.1 as introduced above.
364
+ Both pre- and post-
365
+ processing layers are implemented by fully connected
366
+ layers of neurons with a non-linear activation function
367
+ according to subsection 2.2. Formally, the DQC for η
368
+ qubits can thus be depicted as:
369
+ DQC = Lη→σ ◦VQCφ ◦Ln→η
370
+ (5)
371
+ where Ln→η and Lη→σ are the fully connected clas-
372
+ sical dressing layers according to Equation 3, map-
373
+ ping from the input size n to the number of qubits η
374
+ and from the number of qubits η to the target size σ
375
+ respectively, and VQCφ is the actual variational quan-
376
+ tum circuit according to Equation 2 with η qubits and
377
+ σ = η measured outputs.
378
+ Now let us consider a parameterization φ, where
379
+ VQCφ(z) = id(z) = z resembles the identity function.
380
+ Consequently (5) collapses into the following purely
381
+ classical, 2-layer feed-forward network with the hid-
382
+ den dimension η:
383
+ DQC = Lη→σ ◦id ◦Ln→η = Lη→σ ◦Ln→η
384
+ (6)
385
+ By the universal function approximation theorem,
386
+ this allows DQC to approximate any polynomial func-
387
+ tion f : Rn → Ro of degree 1 arbitrarily well, even if
388
+ the VQC is not affecting the prediction at all.
389
+
390
+
391
+
392
+
393
+
394
+ ��������
395
+ ��������
396
+ ��������
397
+ ������
398
+ ������
399
+ ������
400
+ ��������
401
+ ��������
402
+ ��������
403
+ ���������
404
+ ������������
405
+ ������������
406
+
407
+ Pre-
408
+ processing
409
+ Post-
410
+ processing
411
+ ������
412
+ Figure 1: Dressed Quantum Circuit Architecture
413
+
414
+ Consequently, one has to be careful in the selec-
415
+ tion of suitable problem instances, as they must not
416
+ be too easy in order to ensure that the VQC is even
417
+ needed to yield the desired results. This becomes es-
418
+ pecially difficult as current quantum hardware is quite
419
+ limited, typically restricting the choice to fairly easy
420
+ problem instances. On top of this, no necessity to use
421
+ a post-processing layer seems apparent, as it has been
422
+ shown in various publications (Schuld et al., 2020;
423
+ Schuld and Killoran, 2019) that variational quantum
424
+ classifiers, i.e, VQCs can successfully complete clas-
425
+ sification tasks without any post-processing. Overall,
426
+ whilst conveying a proof-of-concept, that the com-
427
+ bination of classical neural networks and variational
428
+ quantum circuits in the dressed quantum circuit hy-
429
+ brid architecture is able to produce competitive re-
430
+ sults, this architecture is neither able to convey the
431
+ advantageousness of the chosen quantum circuit nor
432
+ exclude the possibility of the classical part just being
433
+ able to compensate for quantum in-steadiness.
434
+ 5
435
+ SEQUENT
436
+ To improve the traceability of quantum impact in hy-
437
+ brid architectures, we propose Sequential Quantum
438
+ Enhanced Training. SEQUENT improves upon the
439
+ dressed quantum circuit architecture by introducing
440
+ two adaptations to it: First, we omit the classical
441
+ post-processing layer and use the variational quan-
442
+ tum circuit output directly as the classification result.
443
+ Therefore we reduce the measured outputs σ from the
444
+ number of qubits η (cf. Figure 1) to the dimension of
445
+ the target ˆy (cf. Figure 2).
446
+ The direct use of VQCs as a classifier has been
447
+ frequently proposed and shown equally applicable as
448
+ classical counterparts (Schuld et al., 2020). By this,
449
+ the overall quality of the chosen circuit and parame-
450
+ terization are directly assessable by the classification
451
+ result, thus the final accuracy. Moreover, a parame-
452
+ ter setting of universal approximation capabilities (cf.
453
+ Equation 6) with the least (identitary) quantum con-
454
+ tribution is mathematically precluded by the removal
455
+ of the hidden state (compare Equation 5).
456
+ Concurrently omitting the pre-processing or com-
457
+ pression layer however would increase the number of
458
+ at least required qubits to the number of output fea-
459
+ tures of the problem domain, or, when applied to im-
460
+ age classification, the chosen feature extractor (e.g.
461
+ 512 for Resnet-18). However, both current quantum
462
+ hardware and simulators do not allow for arbitrate
463
+ sized circuits, especially maxing out at around 100
464
+ qubits.
465
+ ������
466
+
467
+
468
+
469
+
470
+
471
+ ��������
472
+ ��������
473
+ ��������
474
+ ������
475
+ ������
476
+ ������
477
+ ��������
478
+ ��������
479
+ ��������
480
+ ���������
481
+ ������������
482
+ ������������
483
+ ������
484
+ Figure 2: SEQUENT Architecture: Sequential Quantum
485
+ Enhanced Training comprised of a classical compression
486
+ layer (CCL) parameterized by θ and a variational quantum
487
+ circuit (VQC) parameterized by φ with separate phases for
488
+ classical (blue) and quantum (green) training for variable
489
+ sets of input data X, prediction targets ˆy and VQCs with η
490
+ qubits and δ entangling layers.
491
+ We therefore secondly propose to maintain the
492
+ classical compression layer to provide a map-
493
+ ping/compression X �→ η and, in order to fully clas-
494
+ sically pre-train the compression layer, add a surro-
495
+ gate classical classification layer η �→ ˆy. Replacing
496
+ this surrogate classical classification layer with the
497
+ chosen variational quantum circuit to be assessed and
498
+ freezing the pre-trained weights of the classical com-
499
+ pression layer then allows for a second, purely quan-
500
+ tum training phase and yield the following sequential
501
+ training procedure depicted in Figure 3:
502
+ 1. Pre-train SEQUENT: ˆf : X �→ η �→ ˆy = CCLθ(x)◦
503
+ CCLθ(z) containing a classical compression layer
504
+ and a surrogate classification layer by optimizing
505
+ the classical weights θ
506
+ 2. Freeze the classical weights θ, replace the sur-
507
+ rogate classical classification layer by the vari-
508
+ ational quantum classification circutit VQCφ(cf.
509
+ Equation 2) and optimize the quantum weights φ.
510
+ This two-step procedure can be seen as an applica-
511
+ tion of transfer learning on its own, transferring from
512
+ classical to quantum weights in a hybrid architecture.
513
+ Overall, the SEQUENT architecture displayed in
514
+ Figure 2 can be formalized as:
515
+ SEQUENTθ,φ : X �→ η �→ ˆy = VQCφ(z)◦CCLθ(x)
516
+ (7)
517
+ CCLθ(x) : X �→ η = Ln→η
518
+ (cf. Equation 3)
519
+ VQCφ(z) : η �→ ˆy
520
+ (cf. Equation 2)
521
+ �������������������
522
+ �������������������
523
+ �����������������
524
+ �������������������
525
+ �����������������
526
+ �����������������������������
527
+ ���������������������������
528
+ �������������������
529
+ ���
530
+ ���
531
+ Figure 3:
532
+ SEQUENT Training Process consisting of
533
+ a classical (blue) pre-training phase (1) and a quantum
534
+ (green) fine-tuning phase (2).
535
+
536
+ To be used for the classification of high-
537
+ dimensional data, like images, the input x needs to
538
+ be replaced by the intermediate output of an image
539
+ recognition model z (cf. subsection 2.2). Combining
540
+ both two-step transfer learning procedures, the fol-
541
+ lowing three-step procedure is yielded:
542
+ 1. Classically pre-train a full classification model
543
+ (e.g. Resnet (He et al., 2016)) ˆf : X �→ υ �→ ˆy =
544
+ FCθ(z) ◦ FEθ(x) to a large generic dataset (com-
545
+ pare subsection 2.2)
546
+ 2. Freeze convolutional feature extraction layers FE
547
+ and fine-tune fully-connected layers consisting of
548
+ a compression layer and a surrogate classification
549
+ layer FE : υ �→ η �→ ˆy = CCLθ(z)◦CCLθ(x).
550
+ 3. Freeze classical weights and replace surrogate
551
+ classification layer with VQC to train the quan-
552
+ tum weights φ of the hybrid model:
553
+ ˆfθ,φ : X �→ υ �→ η �→ ˆy = VQCφ(z)◦CCLθ(x)◦FE
554
+ For a classification task with n classes, at least η ≥ n
555
+ qubits are required. Whilst we use the simple Ansatz
556
+ introduced in Equation 2 with η = 6 qubits and a
557
+ circuit depth of δ = 10 to validate our approach in
558
+ the following, any VQC architecture yielding a direct
559
+ classification result would be conceivable.
560
+ 6
561
+ EVALUATION
562
+ We evaluate SEQUENT by comparing its perfor-
563
+ mance to its predecessor, the DQC, and a purely clas-
564
+ sical feed forward neural network. All models were
565
+ trained on 2000 datapoints of the moons and spirals
566
+ (Lang and Witbrock, 1988) benchmark dataset for
567
+ two and four epochs of sequential, hybrid and clas-
568
+ sical training respectively. To guarantee comparabil-
569
+ ity, we set the size of the hidden state of the classical
570
+ model to h = η = 6. The code for all experiments
571
+ is available here1. The classification results are vi-
572
+ sualized in Figure 4. Looking at the result for the
573
+ moons dataset, all compared models are able to de-
574
+ pict the shape underlying data. Note, that even the
575
+ considerably simpler classical model is perfectly able
576
+ to separate the given classes. Hence, these experi-
577
+ mental results support the concerns about the impact
578
+ of the VQC to the overall DQC’s performance (cf.
579
+ section 4).
580
+ With a final test accuracy of 95%, the
581
+ DQC performs even worse than the purely classical
582
+ model reaching 96%. Looking at the SEQUENT re-
583
+ sults however, these concerns are eliminated, as the
584
+ performance and final accuracy of 97%, besides out-
585
+ performing both compared models, can certainly be
586
+ 1https://github.com/philippaltmann/SEQUENT
587
+ Figure 4: Classification Results of SEQUENT, DQC and
588
+ Classical Feed Forward Neural Network for moons (left)
589
+ and spirals (right) benchmark datasets
590
+ denoted to VQC, due to the applied training process
591
+ and the used architecture. Similar results show for the
592
+ second benchmark dataset of intertwined spirals on
593
+ the right side of Figure 4. The overall best accuracy
594
+ of 86% however suggests, that further adjustments to
595
+ the VQC could be beneficial. This result also depicts
596
+ the application of SEQUENT we imagine for bench-
597
+ marking and optimizing VQC architectures.
598
+ 7
599
+ CONCLUSIONS
600
+ We proposed Sequential Quantum Enhanced Train-
601
+ ing (SEQUENT), a two-step transfer learning proce-
602
+ dure applied to training hybrid QML algorithms com-
603
+ bined with an adapted hybrid architecture to allow
604
+ for tracing both the classical and quantum impact on
605
+ the overall performance.
606
+ Furthermore, we showed
607
+ the need for said adaptions by formally pointing out
608
+ weaknesses of the DQC, the current state-of-the-art
609
+ approach to this regard. Finally, we showed that SE-
610
+ QUENT yields competitive results for two representa-
611
+
612
+ SEQUENT
613
+ SEQUENT
614
+ .97
615
+ .86
616
+ DQC
617
+ DQC
618
+ .95
619
+ .81
620
+ Classical
621
+ Classical
622
+ .96
623
+ .79tive benchmark datasets compared to DQCs and clas-
624
+ sical neural networks. Thus, we a provided proof-
625
+ of-concept for both the proposed reduced architecture
626
+ and the adapted transfer learning training procedure.
627
+ However, whilst SEQUENT theoretically is appli-
628
+ cable to any kind of VQC, we only considered the
629
+ simple architecture with fixed angle embeddings and
630
+ δ entangling layers as proposed by (Mari et al., 2020).
631
+ Furthermore, we only supplied preliminary experi-
632
+ mental implications and did not yet test any high di-
633
+ mensional real-world applications. Overall, we do not
634
+ expect superior results that outperform state-of-the-
635
+ art approaches in the first place, as viable circuit ar-
636
+ chitectures for quantum machine learning are still an
637
+ active and fast-moving field of research.
638
+ Thus, both the real world applicability and the de-
639
+ velopment of circuit architectures that indeed offer
640
+ a benefit over classical ones should undergo further
641
+ research attention. To empower real-world applica-
642
+ tions, the use of hybrid quantum methods should also
643
+ be kept in mind when pre-training large classification
644
+ models like Resnet. Also, applying more advanced
645
+ techniques to train the pre-processing or compression
646
+ layer to take full advantage of the chosen quantum
647
+ circuit should be examined. Therefore, auto-encoder
648
+ architectures might be applicable to train a more gen-
649
+ eralized mapping from the classical input-space to
650
+ the quantum-space. Overall, we belief, that applying
651
+ the proposed concepts and building upon SEQUENT,
652
+ both valuable hybrid applications and beneficial quan-
653
+ tum circuit architectures can be found.
654
+ ACKNOWLEDGEMENTS
655
+ This work is part of the Munich Quantum Valley,
656
+ which is supported by the Bavarian state govern-
657
+ ment with funds from the Hightech Agenda Bayern
658
+ Plus and was partially funded by the German BMWK
659
+ Project PlanQK (01MK20005I).
660
+ REFERENCES
661
+ Abohashima, Z., Elhosen, M., Houssein, E. H., and
662
+ Mohamed, W. M. (2020).
663
+ Classification with quan-
664
+ tum machine learning:
665
+ A survey.
666
+ arXiv preprint
667
+ arXiv:2006.12270.
668
+ Acar, E. and Yilmaz, I. (2021). Covid-19 detection on ibm
669
+ quantum computer with classical-quantum transferlearn-
670
+ ing. Turkish Journal of Electrical Engineering and Com-
671
+ puter Sciences, 29(1):46–61.
672
+ Arute, F., Arya, K., Babbush, R., Bacon, D., Bardin, J. C.,
673
+ Barends, R., Biswas, R., Boixo, S., Brandao, F. G.,
674
+ Buell, D. A., et al. (2019).
675
+ Quantum supremacy us-
676
+ ing a programmable superconducting processor. Nature,
677
+ 574(7779):505–510.
678
+ Bergholm, V., Izaac, J., Schuld, M., Gogolin, C., Alam,
679
+ M. S., Ahmed, S., Arrazola, J. M., Blank, C., Delgado,
680
+ A., Jahangiri, S., et al. (2018). Pennylane: Automatic dif-
681
+ ferentiation of hybrid quantum-classical computations.
682
+ arXiv preprint arXiv:1811.04968.
683
+ Biamonte, J., Wittek, P., Pancotti, N., Rebentrost, P., Wiebe,
684
+ N., and Lloyd, S. (2017). Quantum machine learning.
685
+ Nature, 549(7671):195–202.
686
+ Bishop, C. M. and Nasrabadi, N. M. (2006). Pattern recog-
687
+ nition and machine learning, volume 4. Springer.
688
+ Cerezo, M., Arrasmith, A., Babbush, R., Benjamin, S. C.,
689
+ Endo, S., Fujii, K., McClean, J. R., Mitarai, K., Yuan, X.,
690
+ Cincio, L., et al. (2021). Variational quantum algorithms.
691
+ Nature Reviews Physics, 3(9):625–644.
692
+ Donahue, J., Jia, Y., Vinyals, O., Hoffman, J., Zhang, N.,
693
+ Tzeng, E., and Darrell, T. (2014). Decaf: A deep convo-
694
+ lutional activation feature for generic visual recognition.
695
+ In International conference on machine learning, pages
696
+ 647–655. PMLR.
697
+ Dong, D., Chen, C., Li, H., and Tarn, T.-J. (2008). Quan-
698
+ tum reinforcement learning.
699
+ IEEE Transactions on
700
+ Systems, Man, and Cybernetics, Part B (Cybernetics),
701
+ 38(5):1207–1220.
702
+ Farhi, E., Goldstone, J., and Gutmann, S. (2014). A quan-
703
+ tum approximate optimization algorithm. arXiv preprint
704
+ arXiv:1411.4028.
705
+ Girshick, R. B., Donahue, J., Darrell, T., and Malik, J.
706
+ (2014). Rich feature hierarchies for accurate object de-
707
+ tection and semantic segmentation. 2014 IEEE Confer-
708
+ ence on Computer Vision and Pattern Recognition, pages
709
+ 580–587.
710
+ Gokhale, A., Pande, M. B., and Pramod, D. (2020). Im-
711
+ plementation of a quantum transfer learning approach to
712
+ image splicing detection. International Journal of Quan-
713
+ tum Information, 18(05):2050024.
714
+ Grover, L. K. (1996). A fast quantum mechanical algorithm
715
+ for database search. In Proceedings of the twenty-eighth
716
+ annual ACM symposium on Theory of computing, pages
717
+ 212–219.
718
+ Havl´ıˇcek, V., C´orcoles, A. D., Temme, K., Harrow,
719
+ A. W., Kandala, A., Chow, J. M., and Gambetta, J. M.
720
+ (2019). Supervised learning with quantum-enhanced fea-
721
+ ture spaces. Nature, 567(7747):209–212.
722
+ He, K., Zhang, X., Ren, S., and Sun, J. (2016). Deep resid-
723
+ ual learning for image recognition. In Proceedings of the
724
+ IEEE conference on computer vision and pattern recog-
725
+ nition, pages 770–778.
726
+
727
+ Khairy, S., Shaydulin, R., Cincio, L., Alexeev, Y., and Bal-
728
+ aprakash, P. (2020).
729
+ Learning to optimize variational
730
+ quantum circuits to solve combinatorial problems.
731
+ In
732
+ Proceedings of the AAAI conference on artificial intel-
733
+ ligence, volume 34, pages 2367–2375.
734
+ Krizhevsky, A., Sutskever, I., and Hinton, G. E. (2012). Im-
735
+ agenet classification with deep convolutional neural net-
736
+ works. In Pereira, F., Burges, C., Bottou, L., and Wein-
737
+ berger, K., editors, Advances in Neural Information Pro-
738
+ cessing Systems, volume 25. Curran Associates, Inc.
739
+ Lang, K. and Witbrock, M. (1988). Learning to tell two spi-
740
+ rals apart proceedings of the 1988 connectionists models
741
+ summer school.
742
+ LaRose, R. and Coyle, B. (2020). Robust data encodings for
743
+ quantum classifiers. Physical Review A, 102(3):032420.
744
+ Leshno, M., Lin, V. Y., Pinkus, A., and Schocken, S. (1993).
745
+ Multilayer feedforward networks with a nonpolynomial
746
+ activation function can approximate any function. Neural
747
+ networks, 6(6):861–867.
748
+ Lloyd, S., Schuld, M., Ijaz, A., Izaac, J., and Killoran,
749
+ N. (2020). Quantum embeddings for machine learning.
750
+ arXiv preprint arXiv:2001.03622.
751
+ Mari, A., Bromley, T. R., Izaac, J., Schuld, M., and Kil-
752
+ loran, N. (2020). Transfer learning in hybrid classical-
753
+ quantum neural networks. Quantum, 4:340.
754
+ McClean, J. R., Boixo, S., Smelyanskiy, V. N., Babbush, R.,
755
+ and Neven, H. (2018). Barren plateaus in quantum neu-
756
+ ral network training landscapes. Nature communications,
757
+ 9(1):1–6.
758
+ Mitarai, K., Negoro, M., Kitagawa, M., and Fujii, K.
759
+ (2018). Quantum circuit learning. Physical Review A,
760
+ 98(3):032309.
761
+ Nielsen, M. A. and Chuang, I. (2010). Quantum computa-
762
+ tion and quantum information.
763
+ Pan, S. J. and Yang, Q. (2010). A survey on transfer learn-
764
+ ing. IEEE Transactions on Knowledge and Data Engi-
765
+ neering, 22(10):1345–1359.
766
+ Pratt, L. Y. (1992). Discriminability-based transfer between
767
+ neural networks. In Proceedings of the 5th International
768
+ Conference on Neural Information Processing Systems,
769
+ NIPS’92, page 204–211, San Francisco, CA, USA. Mor-
770
+ gan Kaufmann Publishers Inc.
771
+ Preskill, J. (2018). Quantum computing in the nisq era and
772
+ beyond. Quantum, 2:79.
773
+ Schuld, M., Bocharov, A., Svore, K. M., and Wiebe, N.
774
+ (2020). Circuit-centric quantum classifiers. Phys. Rev.
775
+ A, 101:032308.
776
+ Schuld, M. and Killoran, N. (2019).
777
+ Quantum machine
778
+ learning in feature hilbert spaces.
779
+ Phys. Rev. Lett.,
780
+ 122:040504.
781
+ Shor, P. W. (1994). Algorithms for quantum computation:
782
+ discrete logarithms and factoring. In Proceedings 35th
783
+ annual symposium on foundations of computer science,
784
+ pages 124–134. Ieee.
785
+ Tan, C., Sun, F., Kong, T., Zhang, W., Yang, C., and Liu,
786
+ C. (2018). A survey on deep transfer learning. In Inter-
787
+ national conference on artificial neural networks, pages
788
+ 270–279. Springer.
789
+ Torrey, L. and Shavlik, J. (2010).
790
+ Transfer learning.
791
+ In
792
+ Handbook of research on machine learning applications
793
+ and trends: algorithms, methods, and techniques, pages
794
+ 242–264. IGI global.
795
+ Zen, R., My, L., Tan, R., H´ebert, F., Gattobigio, M.,
796
+ Miniatura, C., Poletti, D., and Bressan, S. (2020). Trans-
797
+ fer learning for scalability of neural-network quantum
798
+ states. Phys. Rev. E, 101:053301.
799
+ Zhuang, F., Qi, Z., Duan, K., Xi, D., Zhu, Y., Zhu, H.,
800
+ Xiong, H., and He, Q. (2021). A comprehensive sur-
801
+ vey on transfer learning.
802
+ Proceedings of the IEEE,
803
+ 109(1):43–76.
804
+
09E0T4oBgHgl3EQfuQEC/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
09E1T4oBgHgl3EQf5AUq/content/2301.03506v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a802027d53fdb96c6c6e81724cb48cce8e75561425fe61e3ba4be65dfad965c7
3
+ size 643402
09E1T4oBgHgl3EQf5AUq/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:27a7df0a9d26b6f3204f4b387baf43eab4e9b63113c4922e6d2bb3b2573e8347
3
+ size 3014701
09E1T4oBgHgl3EQf5AUq/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6abf254adc67e7741ce57701e36c7576b5f321d275fc83eeefc733749c3aa50
3
+ size 111432
19FST4oBgHgl3EQfXDjl/content/tmp_files/2301.13783v1.pdf.txt ADDED
@@ -0,0 +1,2753 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ An analytical approach to Bayesian evidence computation
2
+ Juan Garc´ıa-Bellido
3
+ Departamento de F´ısica Te´orica C-XI, Universidad Aut´onoma de Madrid,
4
+ Cantoblanco, 28049 Madrid, Spain
5
+ April 14th, 2005
6
+ Abstract
7
+ The Bayesian evidence is a key tool in model selection, allowing a comparison of models with differ-
8
+ ent numbers of parameters. Its use in analysis of cosmological models has been limited by difficulties
9
+ in calculating it, with current numerical algorithms requiring supercomputers. In this paper we give
10
+ exact formulae for the Bayesian evidence in the case of Gaussian likelihoods with arbitrary correlations
11
+ and top-hat priors, and approximate formulae for the case of likelihood distributions with leading non-
12
+ Gaussianities (skewness and kurtosis). We apply these formulae to cosmological models with and without
13
+ isocurvature components, and compare with results we previously obtained using numerical thermody-
14
+ namic integration. We find that the results are of lower precision than the thermodynamic integration,
15
+ while still being good enough to be useful.
16
+ 1
17
+ Introduction
18
+ Model selection refers to the statistical problem of deciding which model description of observational data
19
+ is the best [1, 2]. It differs from parameter estimation, where the choice of a single model (i.e. choice of
20
+ parameters to be varied) has already been made and the aim is to find their best-fitting values and ranges.
21
+ While there have been widespread applications of parameter estimation techniques, usually likelihood fitting,
22
+ to cosmological data, there has so far been quite limited application of model selection statistics [3, 4, 5].
23
+ This is unfortunate, as model selection techniques are necessary to robustly distinguish between models
24
+ with different numbers of parameters, and many of the most interesting issues in cosmology concern the
25
+ desirability or otherwise of incorporating additional parameters to describe new physical effects.
26
+ Within the context of Bayesian inference, model selection should be carried out using the Bayesian
27
+ evidence [1, 2], which measures the probability of the model in light of the observational data (i.e. the
28
+ average likelihood over the prior distribution). The Bayesian evidence associates a single number with each
29
+ model, and the models can then be ranked in order of the evidence, with the ratios of those values interpretted
30
+ as the relative probability of the models. This process sets up a desirable tension between model simplicity
31
+ and ability to fit the data.
32
+ Use of the Bayesian evidence has so far been limited by difficulties in calculating it.
33
+ The standard
34
+ technique is thermodynamic integration [6, 7], which varies the temperature in a Monte Carlo Markov Chain
35
+ (MCMC) approach in order that the distribution is sampled in a way covering both posterior and prior
36
+ distributions. However, in recent work [5] we showed that in order to obtain sufficiently-accurate results
37
+ in a cosmological context, around 107 likelihood evaluations are required per model.
38
+ Such analyses are
39
+ CPU-limited by the time needed to generate the predicted spectra to compare with the data, and this
40
+ requirement pushes the problem into the supercomputer class (for comparison, parameter estimation runs
41
+ typically employ 105 to 106 likelihood evaluations).
42
+ In this paper, we propose and exploit a new analytic method to compute the evidence based on an
43
+ expansion of the likelihood distribution function.
44
+ The method pre-supposes that the covariance of the
45
+ posterior distribution has been obtained, for instance via an MCMC parameter estimation run, and in its
46
+ 1
47
+ arXiv:2301.13783v1 [astro-ph.CO] 31 Jan 2023
48
+
49
+ present form requires that the prior distributions of the parameters are uniform top-hat priors.1 While the
50
+ method will not be applicable for general likelihood distributions, we include the leading non-gaussianities
51
+ (skewness and kurtosis) in approximating the likelihood shape, with the expectation of obtaining good
52
+ results whenever the likelihood distribution is sufficiently simple. Cosmological examples commonly exhibit
53
+ likelihood distributions with only a single significant peak.
54
+ We apply the method both to toy model examples and to genuine cosmological situations. In particular,
55
+ we calculate the evidences for adiabatic and isocurvature models, which we previously computed using
56
+ thermodynamic integration in Ref. [5]. We find that the discrepancies between the methods are typically no
57
+ worse than 1 in ln(Evidence), meaning that the analytic method is somewhat less accurate than would be
58
+ ideal, but is accurate enough to give a useful indication of model preference.
59
+ 2
60
+ The Bayesian evidence
61
+ The posterior probability distribution P(θ, M|D) for the parameters θ of the model M, given the data D,
62
+ is related to the likelihood function L(D|θ, M) within a given set of prior distribution functions π(θ, M) for
63
+ the parameters of the model, by Bayes’ theorem:
64
+ P(θ, M|D) = L(D|θ, M) π(θ, M)
65
+ E(D|M)
66
+ ,
67
+ (1)
68
+ where E is the Bayesian evidence, i.e. the average likelihood over the priors,
69
+ E(D|M) =
70
+
71
+ dθ L(D|θ, M) π(θ, M) ,
72
+ (2)
73
+ where θ is a vector with n-components characterising the n independent parameters. The prior distribution
74
+ function π contains all the information about the parameters before observing the data, i.e. our theoretical
75
+ prejudices, our physical understanding of the model, and input from previous experiments.
76
+ In the case of a large number of parameters (n ≫ 1), the evidence integral cannot be performed straight-
77
+ forwardly and must be obtained either numerically or via an analytic approximation. Amongst numerical
78
+ methods the most popular is thermodynamic integration [6, 7] but this can be computationally extremely
79
+ intensive [5]. The simplest analytical approximation is the Laplace approximation, valid when the distribu-
80
+ tion can be approximated by a multivariate Gaussian. This may hold when the quantity and quality of the
81
+ data is optimal, but is likely to be valid only in limited cosmological circumstances.
82
+ The Bayesian evidence is of interest because it allows a comparison of models amongst an exclusive and
83
+ exhaustive set {Mi}i=1...N. We can compute the posterior probability for each hypothesis given the data D
84
+ using Bayes theorem:
85
+ P(Mi|D) ∝ E(D|Mi) π(Mi) ,
86
+ (3)
87
+ where E(D|Mi) is the evidence of the data under the model Mi, and π(Mi) is the prior probability of the
88
+ ith model before we see the data. The ratio of the evidences for the two competing models is called the
89
+ Bayes factor [8]
90
+ Bij = E(D|Mi)
91
+ E(D|Mj) ,
92
+ (4)
93
+ and this is also equal to the ratio of the posterior model probabilities if we assume that we do not favour
94
+ any model a priori, so that π(M1) = π(M2) = ... = π(MN) = 1/N.
95
+ The Bayes factor Eq. (4) provides a mathematical representation of Occam’s razor, because more complex
96
+ models tend to be less predictive, lowering their average likelihood in comparison to simpler, more predictive
97
+ models. More complex models can only be favoured if they are able to provide a significantly improved fit to
98
+ the data. In simple cases where models give vastly different maximum likelihoods there is no need to employ
99
+ model selection techniques, but they are essential for properly discussing cases where the improvement
100
+ 1An extension to gaussian priors should be feasible, but not one to arbitrary priors.
101
+ 2
102
+
103
+ of fit is marginal. This latter situation is more or less inevitable whenever the possibility of requiring an
104
+ additional parameter arises from new data, unless the new data is of vastly greater power than that preceding
105
+ it; cosmological examples include the inclusion of spectral tilt, dark energy density variation, or the case
106
+ explored later in this paper of trace isocurvature perturbations.
107
+ In this paper we will obtain an analytical formula which approximates the Bayesian evidence by consid-
108
+ ering the higher-order cumulants of the distribution in a systematic way. The advantage is that with these
109
+ analytical formulae one can compute the evidence for a given model with an arbitrary number of parame-
110
+ ters, given the hierarchy of cumulants of the distribution, assumed previously computed for the likelihood
111
+ distribution function within the parameter estimation programme.
112
+ The evidence needs to be calculated to sufficient precision for robust conclusions to be drawn.
113
+ The
114
+ standard interpretational scale, due to Jeffreys [1] and summarized in Ref. [5], strengthens its verdict roughly
115
+ each time the difference in ln(Evidence) increases by one. The evidence therefore needs to be computed more
116
+ accurately than this, with an uncertainty of 0.1 in ln(Evidence) easily sufficient, and a factor two worse than
117
+ that acceptable. This accuracy requirement ensures that the relative model probabilities are little changed
118
+ by the uncertainty.
119
+ The first thing we need is to characterize the distribution function for the model with n parameters. Let
120
+ f(x) be this function, and let us assume that it is properly normalized,
121
+ � ∞
122
+ −∞
123
+ dnx f(x) = 1 .
124
+ (5)
125
+ Then, the p-point correlation function is given by
126
+ ⟨xi1 . . . xip⟩ =
127
+ � ∞
128
+ −∞
129
+ dnx xi1 . . . xip f(x) .
130
+ (6)
131
+ From this distribution function one can always construct the generating functional, φ(u), as the Fourier
132
+ transform
133
+ φ(u) =
134
+ � ∞
135
+ −∞
136
+ dnx ei u·x f(x) .
137
+ (7)
138
+ This function can be expanded as
139
+ φ(u) = exp
140
+ � ∞
141
+
142
+ p=1
143
+ ip
144
+ p! Ai1...ip ui1 . . . uip
145
+
146
+ ,
147
+ (8)
148
+ where Ai1...ip are totally symmetric rank-p tensors. For instance, if we restrict ourselves to order 4, we can
149
+ write
150
+ φ(u) = exp
151
+
152
+ i µiui − 1
153
+ 2! Cij uiuj − i
154
+ 3! Bijk uiujuk + 1
155
+ 4! Dijkl uiujukul + · · · + in
156
+ n! Ai1...in ui1 . . . uin
157
+
158
+ ,
159
+ (9)
160
+ where µi is the mean value of variable xi; Cij is the covariance matrix; Bijk is the trilinear matrix associated
161
+ with the third cumulant or skewness; Dijkl is the rank-4 tensor associated with the fourth cumulant or
162
+ kurtosis, and Ai1...in is the rank-n tensor associated with the n-th cumulant. Their expressions in terms of
163
+ n-point correlation functions can be obtained from Eq. (7), by realising that
164
+ ⟨xi1 . . . xin⟩ = (−i)n
165
+ ∂n��(u)
166
+ ∂ui1 . . . ∂uin
167
+ ����
168
+ u=0
169
+ .
170
+ (10)
171
+ For instance, the first-order term gives
172
+ ⟨xi⟩ = (−i) ∂φ(u)
173
+ ∂ui
174
+ ����
175
+ u=0
176
+ = µi .
177
+ (11)
178
+ 3
179
+
180
+ The second-order correlation function gives
181
+ ⟨xixj⟩ = (−i)2 ∂2φ(u)
182
+ ∂ui∂uj
183
+ ����
184
+ u=0
185
+ = Cij + µiµj ,
186
+ (12)
187
+ such that the covariance matrix is obtained, as usual, from
188
+ Cij = ⟨xixj⟩ − ⟨xi⟩⟨xj⟩ .
189
+ The third-order correlation function gives
190
+ ⟨xixjxk⟩ = (−i)3
191
+ ∂3φ(u)
192
+ ∂ui∂uj∂uk
193
+ ����
194
+ u=0
195
+ = Bijk + µiCjk + µjCki + µkCij + µiµjµk ,
196
+ (13)
197
+ such that the skewness matrix is obtained from
198
+ Bijk = ⟨xixjxk⟩ − ⟨xi⟩⟨xjxk⟩ − ⟨xj⟩⟨xkxi⟩ − ⟨xk⟩⟨xixj⟩ + 2⟨xi⟩⟨xj⟩⟨xk⟩ .
199
+ (14)
200
+ The fourth-order correlation function gives
201
+ ⟨xixjxkxl⟩ = (−i)4
202
+ ∂4φ(u)
203
+ ∂ui∂uj∂uk∂ul
204
+ ����
205
+ u=0
206
+ =
207
+ Dijkl + CijCkl + CikCjl + CilCjk
208
+ (15)
209
+ +
210
+ Bijkµl + Bijlµk + Bjklµi + Biklµj
211
+ +
212
+ Cijµkµl + Cikµjµl + Cilµjµk
213
+ +
214
+ Cjkµiµl + Cjlµiµk + Cklµiµj
215
+ +
216
+ µiµjµkµl ,
217
+ such that the kurtosis matrix is obtained from
218
+ Dijkl
219
+ =
220
+ ⟨xixjxkxl⟩ − ⟨xixj⟩⟨xkxl⟩ − ⟨xixk⟩⟨xjxl⟩ − ⟨xixl⟩⟨xjxk⟩
221
+ (16)
222
+
223
+ ⟨xixjxk⟩⟨xl⟩ − ⟨xixjxl⟩⟨xk⟩ − ⟨xixkxl⟩⟨xj⟩ − ⟨xjxkxl⟩⟨xi⟩
224
+ +
225
+ 2 ⟨xixj⟩⟨xk⟩⟨xl⟩ + 2 ⟨xixk⟩⟨xj⟩⟨xl⟩ + 2 ⟨xixl⟩⟨xj⟩⟨xk⟩ + 2 ⟨xjxk⟩⟨xi⟩⟨xl⟩
226
+ +
227
+ 2 ⟨xjxl⟩⟨xi⟩⟨xk⟩ + 2 ⟨xkxl⟩⟨xi⟩⟨xj⟩ − 6 ⟨xi⟩⟨xj⟩⟨xk⟩⟨xl⟩ ,
228
+ and so on, for the higher order cumulants.
229
+ 3
230
+ The Gaussian approximation
231
+ Let us first evaluate the evidence for a multivariate Gaussian distribution, that is, one in which all the
232
+ cumulants are zero except the covariance matrix Cij and the means µi. In this case, the generating functional
233
+ and the distribution are given by
234
+ φ(u) = exp
235
+
236
+ − iµiui − 1
237
+ 2 Cij uiuj
238
+
239
+ ,
240
+ (17)
241
+ f(x) =
242
+ 1
243
+ (2π)n
244
+ � ∞
245
+ −∞
246
+ dnu e−i u·x φ(u)
247
+ (18)
248
+ =
249
+ 1
250
+ (2π)n/2√
251
+ det C
252
+ exp
253
+
254
+ − 1
255
+ 2C−1
256
+ ij (xi − µi)(xj − µj)
257
+
258
+ ,
259
+ (19)
260
+ 4
261
+
262
+ which satisfies
263
+ ⟨xi⟩ = µi ,
264
+ ⟨xixj⟩ = Cij + µiµj ,
265
+ ⟨xixjxk⟩ = µ(iCjk) + µiµjµk ,
266
+ . . .
267
+ (20)
268
+ where the subindices in parenthesis, (ijk), indicate a cyclic sum. Notice that all the n-point correlation
269
+ functions can be written in terms of the first two moments of the distribution, and all the higher-order
270
+ cumulants vanish.
271
+ 3.1
272
+ Centred priors
273
+ For initial calculations, we assume a top-hat prior and make the unrealistic assumption, to be lifted later,
274
+ that it is centered at the mean value:
275
+ π(x, a) ≡
276
+
277
+ (2a)−1
278
+ −a < x − µ < a ,
279
+ 0
280
+ otherwise .
281
+ (21)
282
+ Since the Fourier transform of a top-hat function is
283
+ � ∞
284
+ −∞
285
+ dx eiux π(x, a) = sin au
286
+ au
287
+ exp[iµu] ,
288
+ we can write the evidence either way
289
+ E(a1, . . . , an)
290
+ =
291
+ � ∞
292
+ −∞
293
+ dnx f(x)
294
+ n
295
+
296
+ i=1
297
+ π(xi, ai) =
298
+ n
299
+
300
+ i=1
301
+ (2ai)−1
302
+ � a1
303
+ −a1
304
+ dx1· · ·
305
+ � an
306
+ −an
307
+ dxn f(˜x)
308
+ (22)
309
+ =
310
+ 1
311
+ (2π)n
312
+ � ∞
313
+ −∞
314
+ dnu φ(u)
315
+ n
316
+
317
+ i=1
318
+ sin aiui
319
+ aiui
320
+ .
321
+ (23)
322
+ In Eq. (22) we integrate over the displaced coordinate, ˜xi ≡ xi − µi, such that ⟨˜xi⟩ = 0 and ⟨˜xi˜xj⟩ = Cij.
323
+ From now on, we ignore the tildes, and assume we have moved to those coordinates. Note that the choice
324
+ of prior is not crucial. We could have chosen a Gaussian prior, and the result would not be very different,
325
+ except that the window functions, sin z/z, would then be Gaussians. Let us now perform the integration
326
+ Eq. (22) in the case of 1, 2 and then n variables.
327
+ 1 variable. Suppose the covariance is just C = σ2. The evidence is then
328
+ E(a) =
329
+ 1
330
+ 2a σ
331
+
332
+
333
+ � a
334
+ −a
335
+ dx e− x2
336
+ 2σ2 = 1
337
+
338
+ � ∞
339
+ −∞
340
+ du sin au
341
+ au
342
+ e− 1
343
+ 2 σ2u2 = 1
344
+ 2aErf
345
+
346
+ a
347
+ σ
348
+
349
+ 2
350
+
351
+ ,
352
+ (24)
353
+ where Erf[x] is the error function, which asymptotes very quickly to one for x ≥ 2, or a ≥ 3σ. Therefore,
354
+ the evidence of a model with centred top-hat prior of width 2a is well approximated by (2a)−1. The wider
355
+ is the theoretical prior, the smaller is the evidence, as expected.
356
+ 2 variables. Suppose we have two correlated variables, x1 and x2, with covariance matrix
357
+ C =
358
+
359
+ C11
360
+ C12
361
+ C12
362
+ C22
363
+
364
+ =
365
+
366
+ σ2
367
+ 1
368
+ ρσ1σ2
369
+ ρσ1σ2
370
+ σ2
371
+ 2
372
+
373
+ .
374
+ (25)
375
+ where the cross-correlation ρ is defined by
376
+ ρ =
377
+ ⟨x1x2⟩
378
+
379
+ ⟨x2
380
+ 1⟩⟨x2
381
+ 2⟩
382
+ = ⟨x1x2⟩
383
+ σ1σ2
384
+ ,
385
+ 5
386
+
387
+ with σ1 and σ2 the corresponding quadratic dispersions. In this case, the normalized 2-dimensional distri-
388
+ bution function is
389
+ f(x) =
390
+ 1
391
+ 2πσ1σ2
392
+
393
+ 1 − ρ2 exp
394
+
395
+ −1
396
+ 1 − ρ2
397
+ � x2
398
+ 1
399
+ 2σ2
400
+ 1
401
+ − ρx1x2
402
+ σ1σ2
403
+ + x2
404
+ 2
405
+ 2σ2
406
+ 2
407
+ ��
408
+ ,
409
+ (26)
410
+ which has the property that integrating (“marginalizing”) over one of the two variables, leaves a properly-
411
+ normalized Gaussian distribution for the remaining variable,
412
+ � ∞
413
+ −∞
414
+ dx2 f(x) =
415
+ 1
416
+ σ1
417
+
418
+ 2π e
419
+
420
+ x2
421
+ 1
422
+ 2σ2
423
+ 1 .
424
+ (27)
425
+ Let us now evaluate the evidence Eq. (22) by integrating first over the prior in x2,
426
+ 1
427
+ 2a2
428
+ � a2
429
+ −a2
430
+ dx2 f(x) = e
431
+
432
+ x2
433
+ 1
434
+ 2σ2
435
+ 1
436
+ σ1
437
+
438
+ 2π ·
439
+ 1
440
+ 4a2
441
+
442
+ Erf
443
+ � a2σ1 + ρσ2 x1
444
+ σ1σ2
445
+
446
+ 2(1 − ρ2)
447
+
448
+ + Erf
449
+ � a2σ1 − ρσ2 x1
450
+ σ1σ2
451
+
452
+ 2(1 − ρ2)
453
+ ��
454
+ .
455
+ (28)
456
+ The first term is the result we would have obtained if we had been marginalizing over x2; the second is a
457
+ sum of error functions that still depend on x1, and modulates the marginalization. We can use the series
458
+ expansion of the error function to second order,
459
+ 1
460
+ 2
461
+
462
+ Erf[a + x] + Erf[a − x]
463
+
464
+ = Erf[a] − 2a x2
465
+ √π e−a2 + O(x4) ,
466
+ to write Eq. (28) to order x2
467
+ 1 as
468
+ 1
469
+ 2a2
470
+ � a2
471
+ −a2
472
+ dx2 f(x) = e
473
+
474
+ x2
475
+ 1
476
+ 2σ2
477
+ 1
478
+ σ1
479
+
480
+
481
+
482
+ �� 1
483
+ 2a2
484
+ Erf
485
+
486
+ a2
487
+ σ2
488
+
489
+ 2(1 − ρ2)
490
+
491
+
492
+ ρ2 x2
493
+ 1 e
494
+
495
+ a2
496
+ 2
497
+ 2σ2
498
+ 2(1−ρ2)
499
+ 2σ2
500
+ 1σ2(1 − ρ2)
501
+
502
+ 2π(1 − ρ2)
503
+
504
+ �� .
505
+ (29)
506
+ Integrating now over the x1 prior, we finally obtain the evidence
507
+ E(a1, a2)
508
+ =
509
+ 1
510
+ 4a1a2
511
+ � a1
512
+ −a1
513
+ dx1
514
+ � a2
515
+ −a2
516
+ dx2 f(x)
517
+ =
518
+ 1
519
+ 4a1a2
520
+ Erf
521
+
522
+ a2
523
+ σ2
524
+
525
+ 2(1 − ρ2)
526
+
527
+ Erf
528
+
529
+ a1
530
+ σ1
531
+
532
+ 2
533
+
534
+ (30)
535
+
536
+ ρ2 e
537
+
538
+ a2
539
+ 2
540
+ 2σ2
541
+ 2(1−ρ2)
542
+ 2σ1σ2(1 − ρ2)
543
+
544
+ 2π(1 − ρ2)
545
+ Erf
546
+
547
+ a1
548
+ σ1
549
+
550
+ 2
551
+
552
+ 2a1
553
+ + ρ2 e
554
+
555
+ a2
556
+ 2
557
+ 2σ2
558
+ 2(1−ρ2) −
559
+ a2
560
+ 1
561
+ 2σ2
562
+ 1
563
+ 4πσ2
564
+ 1σ2
565
+
566
+ 1 − ρ2
567
+ .
568
+ Note that in the limit of no cross-correlations, ρ → 0, the integral factorizes and we can write an exact
569
+ expression for the evidence,
570
+ E(a1, a2)
571
+ =
572
+ 1
573
+ 4a1a2
574
+ 1
575
+ 2πσ1σ2
576
+ � a1
577
+ −a1
578
+ dx1
579
+ � a2
580
+ −a2
581
+ dx2 e
582
+
583
+ x2
584
+ 1
585
+ 2σ2
586
+ 1
587
+
588
+ x2
589
+ 2
590
+ 2σ2
591
+ 2
592
+ (31)
593
+ =
594
+ 1
595
+ 4π2
596
+ � ∞
597
+ −∞
598
+ du1
599
+ � ∞
600
+ −∞
601
+ du2
602
+ sin a1u1
603
+ a1u1
604
+ sin a2u2
605
+ a2u2
606
+ e− 1
607
+ 2 σ2
608
+ 1u2
609
+ 1− 1
610
+ 2 σ2
611
+ 2u2
612
+ 2
613
+ (32)
614
+ =
615
+ 1
616
+ 4a1a2
617
+ Erf
618
+
619
+ a1
620
+ σ1
621
+
622
+ 2
623
+
624
+ Erf
625
+
626
+ a2
627
+ σ2
628
+
629
+ 2
630
+
631
+ .
632
+ (33)
633
+ 6
634
+
635
+ It happens, however, that even in the presence of cross-correlations, if the prior is wide (ai ≥ 2σi), then the
636
+ terms proportional to exponentials are negligible and the evidence becomes, to very good approximation,
637
+ E(a1, a2) =
638
+ 1
639
+ 4a1a2
640
+ Erf
641
+
642
+ a2
643
+ σ2
644
+
645
+ 2(1 − ρ2)
646
+
647
+ Erf
648
+
649
+ a1
650
+ σ1
651
+
652
+ 2
653
+
654
+ .
655
+ (34)
656
+ Moreover, in that case, the error functions are very approximately given by 1.
657
+ n variables. Suppose we have n correlated variables, x = (x1, . . . , xn), with covariance matrix
658
+ Cn =
659
+
660
+
661
+
662
+
663
+
664
+
665
+
666
+
667
+ C11
668
+ C12
669
+ . . .
670
+ C1n
671
+ C12
672
+ C22
673
+ . . .
674
+ C2n
675
+ ...
676
+ ...
677
+ ...
678
+ ...
679
+ C1n
680
+ C2n
681
+ . . .
682
+ Cnn
683
+
684
+
685
+
686
+
687
+
688
+
689
+
690
+
691
+ .
692
+ (35)
693
+ In that case, the probability distribution function can be expressed as
694
+ f(x) =
695
+ 1
696
+ (2π)n/2√det Cn
697
+ exp
698
+
699
+ − 1
700
+ 2xT C−1
701
+ n x
702
+
703
+ ,
704
+ (36)
705
+ which has the property that marginalizing over the last variable, xn, we obtain a correlated probability
706
+ distribution function for the n − 1 variables, x = (x1, . . . , xn−1),
707
+ f(x) =
708
+ 1
709
+ (2π)(n−1)/2�
710
+ det Cn−1
711
+ exp
712
+
713
+ − 1
714
+ 2xT C−1
715
+ n−1x
716
+
717
+ ,
718
+ (37)
719
+ where the Cn−1 covariance matrix is given by Eq. (35) without the last column and the last row.
720
+ We will now evaluate the evidence Eq. (22) for this multivariate Gaussian, starting with the integration
721
+ over the last variable, xn,
722
+ 1
723
+ 2an
724
+ � an
725
+ −an
726
+ dxn f(x)
727
+ =
728
+ 1
729
+ (2π)(n−1)/2�
730
+ det Cn−1
731
+ exp
732
+
733
+ − 1
734
+ 2xT C−1
735
+ n−1x
736
+
737
+ ×
738
+
739
+ 1
740
+ 2an
741
+ Erf
742
+
743
+ an
744
+
745
+ 2
746
+
747
+ det Cn−1
748
+ det Cn
749
+
750
+ + O
751
+
752
+ e−
753
+ a2
754
+ n det Cn−1
755
+ 2 det Cn
756
+ ��
757
+ .
758
+ (38)
759
+ Integrating now over the next variable, xn−1, we find
760
+ 1
761
+ 4anan−1
762
+ � an
763
+ −an
764
+ dxn
765
+ � an−1
766
+ −an−1
767
+ dxn−1 f(x) =
768
+ 1
769
+ (2π)(n−2)/2�
770
+ det Cn−2
771
+ exp
772
+
773
+ − 1
774
+ 2 xT C−1
775
+ n−2x
776
+
777
+ ×
778
+
779
+ 1
780
+ 4anan−1
781
+ Erf
782
+
783
+ an
784
+
785
+ 2
786
+
787
+ det Cn−1
788
+ det Cn
789
+
790
+ Erf
791
+
792
+ an
793
+
794
+ 2
795
+
796
+ det Cn−2
797
+ det Cn−1
798
+
799
+ + O
800
+
801
+ e−
802
+ a2
803
+ n det Cn−1
804
+ 2 det Cn
805
+ ��
806
+ .
807
+ (39)
808
+ Continuing the integration over the priors, we end up with the evidence for the n-dimensional distribution,
809
+ E(a1, . . . , an)
810
+ =
811
+ 1
812
+ �n
813
+ p=1 2ap
814
+ � a1
815
+ −a1
816
+ · · ·
817
+ � an
818
+ −an
819
+ dnx f(x)
820
+ =
821
+ n
822
+
823
+ p=1
824
+ 1
825
+ 2ap
826
+ Erf
827
+
828
+ ap
829
+
830
+ 2
831
+
832
+ det Cp−1
833
+ det Cp
834
+
835
+ + O
836
+
837
+ exp
838
+
839
+
840
+ n
841
+
842
+ p=1
843
+ a2
844
+ p det Cp−1
845
+ 2 det Cp
846
+ ��
847
+ ,
848
+ (40)
849
+ 7
850
+
851
+ where the covariance matrices Cp are constructed as above, by eliminating the n−p last rows and columns, un-
852
+ til we end up with C0 ≡ 1. Note that the approximation is very good whenever �n
853
+ p=1(a2
854
+ p det Cp−1)/(2 det Cp) ≫
855
+ 1, which is often the case. Note also that we recover the previous result Eq. (34) for the particular case
856
+ n = 2.
857
+ In the limit that the cross-correlation between the n variables vanishes, the evidence (40) reduces to the
858
+ exact result
859
+ E(a1, . . . , an) =
860
+ n
861
+
862
+ p=1
863
+ 1
864
+ 2ap
865
+ Erf
866
+
867
+ ap
868
+ σp
869
+
870
+ 2
871
+
872
+ .
873
+ (41)
874
+ Note that the evidence Eq. (40) reflects correctly the limit in which we eliminate the need for a new variable
875
+ xn, by making its prior vanish,
876
+ lim
877
+ an→0 E(a1, . . . , an) = E(a1, . . . , an−1)
878
+ 1
879
+
880
+
881
+
882
+ det Cn−1
883
+ det Cn
884
+ ,
885
+ (42)
886
+ and thus we recover in that limit a properly-normalized distribution, f(x1, . . . , xn) → f(x1, . . . , xn−1), while
887
+ the inspection of the likelihood function alone would not have been able to give a reasonable answer.
888
+ On the other hand, in the case that our theoretical prejudice cannot assign a concrete prior to a given
889
+ variable, we see that the evidence decreases as 1/2a as a increases. Therefore, the Bayesian evidence seems
890
+ to be a very good discriminator between theoretical priors, and penalizes including too many parameters, a
891
+ la Occam’s razor.
892
+ 3.2
893
+ Uncentered priors
894
+ It is unlikely that the priors will actually be centred on the mean of the distribution, as the priors are not
895
+ supposed to know what the data will tell us. We therefore need to generalize the above for uncentred priors.
896
+ We continue to assume that the priors are top hats.
897
+ We also continue to assume for the moment that the probability distribution is well approximated by
898
+ a Gaussian with mean value µ. We will then use displaced variables ˜xi = xi − µi, and write the Gaussian
899
+ distribution function as in Eq. (36). The normalized top-hat prior is now uncentered with respect to the
900
+ mean value,
901
+ π(˜x; a, b) ≡
902
+
903
+ (a + b)−1
904
+ −a < ˜x < b ,
905
+ 0
906
+ otherwise .
907
+ (43)
908
+ For a single variable, the result is exact,
909
+ E(a; b) =
910
+ � ∞
911
+ −∞
912
+ dx f(x) π(x; a, b) =
913
+ 1
914
+ 2a + 2b
915
+
916
+ Erf
917
+
918
+ a
919
+ σ
920
+
921
+ 2
922
+
923
+ + Erf
924
+
925
+ b
926
+ σ
927
+
928
+ 2
929
+ ��
930
+ .
931
+ (44)
932
+ where we are integrating over the displaced variable ˜x, from now on renamed as x. Note that we recover the
933
+ result Eq. (24) for the centered prior case in the limit b → a.
934
+ For two variables, with distribution function Eq. (26), the uncentered Bayesian evidence is
935
+ E(a1, a2; b1, b2)
936
+ =
937
+ 1
938
+ (a1 + b1)(a2 + b2)
939
+ � b1
940
+ −a1
941
+ dx1
942
+ � b2
943
+ −a2
944
+ dx2 f(x1, x2)
945
+ (45)
946
+ =
947
+ 1
948
+ (2a1 + 2b1)(2a2 + 2b2)
949
+ ��
950
+ Erf
951
+
952
+ a1
953
+ σ1
954
+
955
+ 2
956
+
957
+ + Erf
958
+
959
+ b1
960
+ σ1
961
+
962
+ 2
963
+ ��
964
+ (46)
965
+ ×
966
+
967
+ Erf
968
+
969
+ a2
970
+ σ2
971
+
972
+ 2(1 − ρ2)
973
+
974
+ + Erf
975
+
976
+ b2
977
+ σ2
978
+
979
+ 2(1 − ρ2)
980
+ ��
981
+
982
+ ρ
983
+
984
+
985
+ 1 − ρ2
986
+
987
+ e
988
+
989
+ a2
990
+ 1
991
+ 2σ2
992
+ 1 − e
993
+
994
+ b2
995
+ 1
996
+ 2σ2
997
+ 1
998
+ � �
999
+ e
1000
+
1001
+ a2
1002
+ 2
1003
+ 2σ2
1004
+ 2(1−ρ2) + e
1005
+
1006
+ b2
1007
+ 2
1008
+ 2σ2
1009
+ 2(1−ρ2)
1010
+ ��
1011
+ 8
1012
+
1013
+ The evidence for the multiple-variable case Eq. (36) is
1014
+ E(a, b) =
1015
+ � ∞
1016
+ −∞
1017
+ dnx f(x)
1018
+ n
1019
+
1020
+ i=1
1021
+ π(xi; ai, bi) =
1022
+ n
1023
+
1024
+ i=1
1025
+ (ai + bi)−1
1026
+ � b1
1027
+ −a1
1028
+ d˜x1· · ·
1029
+ � bn
1030
+ −an
1031
+ d˜xn f(˜x) .
1032
+ (47)
1033
+ Let us now evaluate it for the multivariate Gaussian Eq. (36), starting with the integration over the last
1034
+ variable, xn,
1035
+ 1
1036
+ an + bn
1037
+ � bn
1038
+ −an
1039
+ dxn f(x) =
1040
+ 1
1041
+ (2π)(n−1)/2�
1042
+ det Cn−1
1043
+ exp
1044
+
1045
+ − 1
1046
+ 2xT C−1
1047
+ n−1x
1048
+
1049
+ 1
1050
+ (2an + 2bn)
1051
+ ×
1052
+
1053
+ Erf
1054
+
1055
+ an
1056
+
1057
+ 2
1058
+
1059
+ det Cn−1
1060
+ det Cn
1061
+
1062
+ + Erf
1063
+
1064
+ bn
1065
+
1066
+ 2
1067
+
1068
+ det Cn−1
1069
+ det Cn
1070
+
1071
+ + O
1072
+
1073
+ e−
1074
+ a2
1075
+ n det Cn−1
1076
+ 2 det Cn
1077
+ + e−
1078
+ b2
1079
+ n det Cn−1
1080
+ 2 det Cn
1081
+ ��
1082
+ (48)
1083
+ Integrating now over the next variable, xn−1, we find
1084
+ 1
1085
+ (an + bn)(an−1 + bn−1)
1086
+ � bn
1087
+ −an
1088
+ dxn
1089
+ � bn−1
1090
+ −an−1
1091
+ dxn−1 f(x) =
1092
+ 1
1093
+ (2π)(n−2)/2�
1094
+ det Cn−2
1095
+ exp
1096
+
1097
+ − 1
1098
+ 2 xT C−1
1099
+ n−2x
1100
+
1101
+ 1
1102
+ (2an + 2bn)(2an−1 + 2bn−1)
1103
+ (49)
1104
+ ×
1105
+ ��
1106
+ Erf
1107
+
1108
+ an
1109
+
1110
+ 2
1111
+
1112
+ det Cn−1
1113
+ det Cn
1114
+
1115
+ + Erf
1116
+
1117
+ bn
1118
+
1119
+ 2
1120
+
1121
+ det Cn−1
1122
+ det Cn
1123
+ ��
1124
+ (50)
1125
+ ×
1126
+
1127
+ Erf
1128
+
1129
+ an−1
1130
+
1131
+ 2
1132
+
1133
+ det Cn−2
1134
+ det Cn−1
1135
+
1136
+ + Erf
1137
+
1138
+ bn−1
1139
+
1140
+ 2
1141
+
1142
+ det Cn−2
1143
+ det Cn−1
1144
+ ��
1145
+ (51)
1146
+ + O
1147
+
1148
+ e−
1149
+ a2
1150
+ n det Cn−1
1151
+ 2 det Cn
1152
+ + e−
1153
+ b2
1154
+ n det Cn−1
1155
+ 2 det Cn
1156
+
1157
+ ×
1158
+
1159
+ e
1160
+
1161
+ a2
1162
+ n−1 det Cn−2
1163
+ 2 det Cn−1
1164
+ + e
1165
+
1166
+ b2
1167
+ n−1 det Cn−2
1168
+ 2 det Cn−1
1169
+ ��
1170
+ .
1171
+ Continuing the integration over the priors, we end up with the evidence for the n-dimensional distribution,
1172
+ E(a, b)
1173
+ =
1174
+ 1
1175
+ �n
1176
+ p=1(ap + bp)
1177
+ � b1
1178
+ −a1
1179
+ · · ·
1180
+ � bn
1181
+ −an
1182
+ dnx f(x)
1183
+ =
1184
+ n
1185
+
1186
+ p=1
1187
+ 1
1188
+ (2ap + 2bp)
1189
+
1190
+ Erf
1191
+
1192
+ ap
1193
+
1194
+ 2
1195
+
1196
+ det Cp−1
1197
+ det Cp
1198
+
1199
+ + Erf
1200
+
1201
+ bp
1202
+
1203
+ 2
1204
+
1205
+ det Cp−1
1206
+ det Cp
1207
+ ��
1208
+ (52)
1209
+ + O
1210
+ � n
1211
+
1212
+ p=1
1213
+
1214
+ exp
1215
+
1216
+ − a2
1217
+ p det Cp−1
1218
+ 2 det Cp
1219
+
1220
+ + exp
1221
+
1222
+ − b2
1223
+ p det Cp−1
1224
+ 2 det Cp
1225
+ ���
1226
+ ,
1227
+ where the covariance matrices Cp are constructed as above, by eliminating the n−p last rows and columns, un-
1228
+ til C0 ≡ 1. Note that the approximation is very good whenever the exponents are large, �n
1229
+ p=1(a2
1230
+ p det Cp−1)/(2 det Cp) ≫
1231
+ 1, which is often the case. Note also that we recover the expression of the evidence for the centered priors
1232
+ Eq. (40) in the limit b → a.
1233
+ Let us now evaluate the evidence for a distribution normalized to the maximum of the likelihood distri-
1234
+ bution,
1235
+ f(x) = Lmax exp
1236
+
1237
+ − 1
1238
+ 2xT C−1
1239
+ n x
1240
+
1241
+ (53)
1242
+ 9
1243
+
1244
+ In this case, the evidence is given by Eq. (52), multiplied by a factor Lmax × (2π)n/2√det Cn from the nor-
1245
+ malization. We can then evaluate the logarithm of the evidence, ignoring the exponentially-small corrections,
1246
+ as
1247
+ ln E
1248
+ =
1249
+ ln Lmax + n
1250
+ 2 ln(2π) + 1
1251
+ 2 ln det Cn −
1252
+ n
1253
+
1254
+ p=1
1255
+ ln(2ap + 2bp)
1256
+ +
1257
+ n
1258
+
1259
+ p=1
1260
+ ln
1261
+
1262
+ Erf
1263
+
1264
+ ap
1265
+
1266
+ 2
1267
+
1268
+ det Cp−1
1269
+ det Cp
1270
+
1271
+ + Erf
1272
+
1273
+ bp
1274
+
1275
+ 2
1276
+
1277
+ det Cp−1
1278
+ det Cp
1279
+ ��
1280
+ .
1281
+ (54)
1282
+ Uncorrelated case. Suppose we have a multivariate Gaussian distribution without correlations between
1283
+ variables, i.e. Cij = σ2
1284
+ i δij is a diagonal matrix; then the evidence reads exactly,
1285
+ E(a, b) =
1286
+ 1
1287
+ �n
1288
+ p=1(ap + bp)
1289
+ � b1
1290
+ −a1
1291
+ · · ·
1292
+ � bn
1293
+ −an
1294
+ dnx f(x) =
1295
+ n
1296
+
1297
+ p=1
1298
+ 1
1299
+ 2(ap + bp)
1300
+
1301
+ Erf
1302
+
1303
+ ap
1304
+ σp
1305
+
1306
+ 2
1307
+
1308
+ + Erf
1309
+
1310
+ bp
1311
+ σp
1312
+
1313
+ 2
1314
+ ��
1315
+ ,
1316
+ (55)
1317
+ where σp are the dispersions of each variable ˜xp, and thus the logarithm of the evidence becomes
1318
+ ln E = ln Lmax + n
1319
+ 2 ln(2π) +
1320
+ n
1321
+
1322
+ p=1
1323
+ ln σp −
1324
+ n
1325
+
1326
+ p=1
1327
+ ln(2ap + 2bp) +
1328
+ n
1329
+
1330
+ p=1
1331
+ ln
1332
+
1333
+ Erf
1334
+
1335
+ ap
1336
+ σp
1337
+
1338
+ 2
1339
+
1340
+ + Erf
1341
+
1342
+ bp
1343
+ σp
1344
+
1345
+ 2
1346
+ ��
1347
+ (56)
1348
+ Laplace approximation. The Laplacian approximation to the evidence assumes the distribution is a
1349
+ correlated Gaussian, and that the priors are large enough so that the whole distribution fits easily inside
1350
+ them, in which case the error functions are approximately unity and do not contribute to the evidence; from
1351
+ Eq. (54) we now have
1352
+ ln E = ln Lmax + n
1353
+ 2 ln(2π) + 1
1354
+ 2 ln det Cn −
1355
+ n
1356
+
1357
+ p=1
1358
+ ln ∆θp ,
1359
+ (57)
1360
+ where ∆θp = ap + bp is the parameter interval associated to the prior. In the next section we will compare
1361
+ the different approximations.
1362
+ 4
1363
+ Non-Gaussian corrections
1364
+ The advantage of this method is that one can perform a systematic computation of the evidence of a given
1365
+ model with its own priors, given an arbitrary set of moments of the distribution. Here we will consider the
1366
+ first two beyond the covariance matrix, i.e. the skewness and the kurtosis terms, see Eq. (9).
1367
+ 4.1
1368
+ Skewness
1369
+ Let us start with the first correction to the Gaussian approximation, the trilinear term Bijk. For this, we
1370
+ write the generating functional (9) as
1371
+ φ(u) = exp
1372
+
1373
+ i µiui − 1
1374
+ 2! Cij uiuj − i
1375
+ 3! Bijk uiujuk
1376
+
1377
+ .
1378
+ (58)
1379
+ 10
1380
+
1381
+ By performing a change of variable, ui = yi −i C−1
1382
+ ik (xk −µk), we can evaluate the Fourier transform integral
1383
+ and obtain the properly-normalized probability distribution function
1384
+ f(x)
1385
+ =
1386
+ 1
1387
+ (2π)n/2√det Cn
1388
+ exp
1389
+
1390
+ − 1
1391
+ 2xT C−1
1392
+ n x
1393
+
1394
+ ×
1395
+
1396
+ 1 − 1
1397
+ 2Bijk C−1
1398
+ ij C−1
1399
+ kl xl + 1
1400
+ 6Bijk C−1
1401
+ il C−1
1402
+ jmC−1
1403
+ kn xlxmxn
1404
+
1405
+ ,
1406
+ (59)
1407
+ where xk are the displaced coordinates (xk − µk). This skewed distribution function satisfies
1408
+ ⟨xi⟩ = 0 ,
1409
+ ⟨xixj⟩ = Cij ,
1410
+ ⟨xixjxk⟩ = Bijk ,
1411
+ ⟨xixjxkxl⟩ = 0 ,
1412
+ . . .
1413
+ (60)
1414
+ as can be confirmed by direct evaluation. Let us now compute the evidence Eq. (22) for this skewed model.
1415
+ Since the extra terms in the parenthesis of Eq. (59) are both odd functions of x, when integrating over an
1416
+ even range like that of the centered top-hat prior Eq. (21), their contribution to the evidence vanish, and
1417
+ thus the final evidence for the skewed model does not differ from that of the Gaussian model Eq. (40). In
1418
+ case the prior is off-centered with respect to the mean, e.g. like in Eq. (43), then the contribution of the odd
1419
+ terms to the evidence would not vanish. Let us evaluate their contribution.
1420
+ For a single variable (n = 1), the correctly-normalized likelihood function can be written as
1421
+ f(x) = e−x2/2σ2
1422
+ σ
1423
+
1424
+
1425
+
1426
+ 1 − B x
1427
+ 2σ4 + B x3
1428
+ 6σ6
1429
+
1430
+ ,
1431
+ satisfying ⟨x⟩ = 0, ⟨x2⟩ = σ2, ⟨x3⟩ = B, and the Bayesian integral can be computed exactly as
1432
+ E(a, b) =
1433
+ 1
1434
+ 2a + 2b
1435
+
1436
+ Erf
1437
+
1438
+ a
1439
+ σ
1440
+
1441
+ 2
1442
+
1443
+ + Erf
1444
+
1445
+ b
1446
+ σ
1447
+
1448
+ 2
1449
+ ��
1450
+ − Bσ−3
1451
+ 6
1452
+
1453
+
1454
+ ��
1455
+ 1 − a2
1456
+ σ2
1457
+
1458
+ e− a2
1459
+ 2σ2 −
1460
+
1461
+ 1 − b2
1462
+ σ2
1463
+
1464
+ e− b2
1465
+ 2σ2
1466
+
1467
+ 1
1468
+ a + b .
1469
+ (61)
1470
+ Note that for even (centered) priors, with b = a, the evidence reduces to Eq. (24).
1471
+ For an arbitrary number of variables, the computation is more complicated. Let us start with the n-th
1472
+ variable and, in order to compute the integral, let us define the auxiliary function
1473
+ g(λ)
1474
+ =
1475
+ � bn
1476
+ −an
1477
+ dxn xn
1478
+ exp
1479
+
1480
+ − λ
1481
+ 2 xT C−1
1482
+ n x
1483
+
1484
+ (2π)n/2√det Cn
1485
+ =
1486
+ exp
1487
+
1488
+ − 1
1489
+ 2xT C−1
1490
+ n−1x
1491
+
1492
+ (2π)(n−1)/2�
1493
+ det Cn−1
1494
+ ×
1495
+ ×
1496
+ 1
1497
+ λ
1498
+
1499
+
1500
+
1501
+ exp
1502
+
1503
+ − λa2
1504
+ n
1505
+ 2
1506
+ det Cn−1
1507
+ det Cn
1508
+
1509
+ − exp
1510
+
1511
+ − λb2
1512
+ n
1513
+ 2
1514
+ det Cn−1
1515
+ det Cn
1516
+ ��
1517
+ ,
1518
+ (62)
1519
+ such that, using Erf′[x] =
1520
+ 2
1521
+ √π e−x2,
1522
+ −2g′(λ = 1) =
1523
+ � bn
1524
+ −an
1525
+ dxn xn
1526
+ (xT C−1
1527
+ n x) exp
1528
+
1529
+ − 1
1530
+ 2xT C−1
1531
+ n x
1532
+
1533
+ (2π)n/2√det Cn
1534
+ =
1535
+ exp
1536
+
1537
+ − 1
1538
+ 2xT C−1
1539
+ n−1x
1540
+
1541
+ (2π)(n−1)/2�
1542
+ det Cn−1
1543
+ ×
1544
+ ×
1545
+ 1
1546
+
1547
+
1548
+ ��
1549
+ 2 + a2
1550
+ n
1551
+ det Cn−1
1552
+ det Cn
1553
+
1554
+ exp
1555
+
1556
+ − a2
1557
+ n
1558
+ 2
1559
+ det Cn−1
1560
+ det Cn
1561
+
1562
+
1563
+
1564
+ 2 + b2
1565
+ n
1566
+ det Cn−1
1567
+ det Cn
1568
+
1569
+ exp
1570
+
1571
+ − b2
1572
+ n
1573
+ 2
1574
+ det Cn−1
1575
+ det Cn
1576
+ ��
1577
+ .
1578
+ (63)
1579
+ Therefore, with the use of Eq. (63), the integral of the skewness-corrected distribution function Eq. (59) over
1580
+ the xn uncentered prior, becomes
1581
+ � bn
1582
+ −an
1583
+ dxn f(x) =
1584
+ exp
1585
+
1586
+ − 1
1587
+ 2xT C−1
1588
+ n−1x
1589
+
1590
+ (2π)(n−1)/2�
1591
+ det Cn−1
1592
+
1593
+ 1
1594
+ 2
1595
+
1596
+ Erf
1597
+
1598
+ an
1599
+
1600
+ 2
1601
+
1602
+ det Cn−1
1603
+ det Cn
1604
+
1605
+ + Erf
1606
+
1607
+ bn
1608
+
1609
+ 2
1610
+
1611
+ det Cn−1
1612
+ det Cn
1613
+ ��
1614
+ − 1
1615
+ 6Bijn C−1
1616
+ ij
1617
+ 1
1618
+
1619
+
1620
+
1621
+ det Cn−1
1622
+ det Cn
1623
+ ��
1624
+ 1 − a2
1625
+ n
1626
+ det Cn−1
1627
+ det Cn
1628
+
1629
+ e−
1630
+ a2
1631
+ n det Cn−1
1632
+ 2 det Cn
1633
+
1634
+
1635
+ 1 − b2
1636
+ n
1637
+ det Cn−1
1638
+ det Cn
1639
+
1640
+ e−
1641
+ b2
1642
+ n det Cn−1
1643
+ 2 det Cn
1644
+ ��
1645
+ .
1646
+ (64)
1647
+ 11
1648
+
1649
+ Let us define two new functions,
1650
+ Ei(ai, bi)
1651
+ =
1652
+ 1
1653
+ 2
1654
+
1655
+ Erf
1656
+
1657
+ ai
1658
+
1659
+ 2
1660
+
1661
+ det Ci−1
1662
+ det Ci
1663
+
1664
+ + Erf
1665
+
1666
+ bi
1667
+
1668
+ 2
1669
+
1670
+ det Ci−1
1671
+ det Ci
1672
+ ��
1673
+ ,
1674
+ (65)
1675
+ Fi(ai, bi)
1676
+ =
1677
+ 1
1678
+ 6
1679
+
1680
+
1681
+
1682
+ det Ci−1
1683
+ det Ci
1684
+ ��
1685
+ 1 − a2
1686
+ i
1687
+ det Ci−1
1688
+ det Ci
1689
+
1690
+ e−
1691
+ a2
1692
+ i det Ci−1
1693
+ 2 det Ci
1694
+
1695
+
1696
+ 1 − b2
1697
+ i
1698
+ det Ci−1
1699
+ det Ci
1700
+
1701
+ e−
1702
+ b2
1703
+ i det Ci−1
1704
+ 2 det Ci
1705
+
1706
+ .
1707
+ Integrating iteratively over xn−1, . . . , x1, we end up with the Bayesian evidence for the third-order-corrected
1708
+ probability distribution function f(x),
1709
+ E(a, b) =
1710
+ n
1711
+
1712
+ p=1
1713
+ Ep(ap, bp)
1714
+ (ap + bp)
1715
+
1716
+ 1 −
1717
+ n
1718
+
1719
+ k=1
1720
+ Bijk C−1
1721
+ ij
1722
+ Fk(ak, bk)
1723
+ Ek(ak, bk)
1724
+
1725
+ .
1726
+ (66)
1727
+ Unless Bijk C−1
1728
+ ij
1729
+ is very large, the correction to the error function is exponentially suppressed, and we do
1730
+ not expect significant departures from the Gaussian case Eq. (40). Note also that if the prior is symmetric,
1731
+ it is easy to see that the skewness part of the integral vanishes, Fk(ak, bk) → 0, as can be checked explicitly
1732
+ by taking bk → ak.
1733
+ 4.2
1734
+ Kurtosis
1735
+ The next correction beyond skewness is the fourth order moment or kurtosis, given by the Dijkl term in
1736
+ Eq. (9). Let us ignore for the moment the third order skewness and write
1737
+ φ(u) = exp
1738
+
1739
+ i µiui − 1
1740
+ 2! Cij uiuj + 1
1741
+ 4! Dijkl uiujukul
1742
+
1743
+ .
1744
+ (67)
1745
+ By performing the same change of variables, ui = yi − i C−1
1746
+ ik (xk − µk), we can now compute the Fourier
1747
+ transform and obtain the properly-normalized probability distribution function
1748
+ f(x)
1749
+ =
1750
+ 1
1751
+ (2π)n/2√det Cn
1752
+ exp
1753
+
1754
+ − 1
1755
+ 2xT C−1
1756
+ n x
1757
+ � �
1758
+ 1 + 1
1759
+ 8Dijkl C−1
1760
+ ij C−1
1761
+ kl
1762
+ −1
1763
+ 4Dijkl C−1
1764
+ ij C−1
1765
+ kmC−1
1766
+ ln xmxn + 1
1767
+ 24Dijkl C−1
1768
+ im C−1
1769
+ jn C−1
1770
+ kp C−1
1771
+ lq xmxnxpxq
1772
+
1773
+ .
1774
+ (68)
1775
+ Performing the integrals, it is easy to see that this distribution satisfies
1776
+ ⟨xixj⟩ = Cij ,
1777
+ ⟨xixjxkxl⟩ = Dijkl + CijCkl + CikCjl + CilCjk ,
1778
+ . . .
1779
+ (69)
1780
+ Note that in order for the new likelihood distribution (68) to be positive definite, it is required that
1781
+ DijklC−1
1782
+ ij C−1
1783
+ kl
1784
+ < 4, and if we impose that there is only one maximum at the center, then it must sat-
1785
+ isfy DijklC−1
1786
+ ij C−1
1787
+ kl < 2. These conditions impose bounds on the maximum possible deviation of the evidence
1788
+ from a that of a gaussian.
1789
+ Let us now compute the evidence Eq. (22) for this kurtosis model. The extra terms in the parenthesis of
1790
+ Eq. (68) are both even functions of x, and we cannot ignore them, even for centered priors.
1791
+ For a single variable (n = 1), the correctly-normalized likelihood function can be written as
1792
+ f(x) = e− x2
1793
+ 2σ2
1794
+ σ
1795
+
1796
+
1797
+
1798
+ 1 + D
1799
+ 8σ4 − D x2
1800
+ 4σ6 + D x4
1801
+ 24σ8
1802
+
1803
+ ,
1804
+ satisfying ⟨x⟩ = 0, ⟨x2⟩ = σ2, ⟨x3⟩ = 0, ⟨x4⟩ = D + 3σ4, etc. The Bayesian integral can be computed exactly
1805
+ as
1806
+ E(a, b) =
1807
+ 1
1808
+ 2a + 2b
1809
+
1810
+ Erf
1811
+
1812
+ a
1813
+ σ
1814
+
1815
+ 2
1816
+
1817
+ + Erf
1818
+
1819
+ b
1820
+ σ
1821
+
1822
+ 2
1823
+ ��
1824
+ + Dσ−4
1825
+ 8
1826
+
1827
+
1828
+ � a
1829
+ σ
1830
+
1831
+ 1 − a2
1832
+ 3σ2
1833
+
1834
+ e− a2
1835
+ 2σ2 + b
1836
+ σ
1837
+
1838
+ 1 − b2
1839
+ 3σ2
1840
+
1841
+ e− b2
1842
+ 2σ2
1843
+
1844
+ 1
1845
+ a + b .
1846
+ (70)
1847
+ 12
1848
+
1849
+ For arbitrary number of variables, the computation is again much more complicated. Let us start with
1850
+ the n-th variable and, in order to compute the first integral, let us define a new auxiliary function
1851
+ h(λ)
1852
+ =
1853
+ � bn
1854
+ −an
1855
+ dxn
1856
+ exp
1857
+
1858
+ − λ
1859
+ 2 xT C−1
1860
+ n x
1861
+
1862
+ (2π)n/2√det Cn
1863
+ =
1864
+ exp
1865
+
1866
+ − 1
1867
+ 2xT C−1
1868
+ n−1x
1869
+
1870
+ (2π)(n−1)/2�
1871
+ det Cn−1
1872
+ ×
1873
+ ×
1874
+ 1
1875
+ 2
1876
+
1877
+ λ
1878
+
1879
+ Erf
1880
+
1881
+ an
1882
+
1883
+ λ
1884
+
1885
+ 2
1886
+
1887
+ det Cn−1
1888
+ det Cn
1889
+
1890
+ + Erf
1891
+
1892
+ bn
1893
+
1894
+ λ
1895
+
1896
+ 2
1897
+
1898
+ det Cn−1
1899
+ det Cn
1900
+ ��
1901
+ ,
1902
+ (71)
1903
+ such that,
1904
+ −2h′(λ = 1)
1905
+ =
1906
+ � bn
1907
+ −an
1908
+ dxn
1909
+ (xT C−1
1910
+ n x) exp
1911
+
1912
+ − 1
1913
+ 2xT C−1
1914
+ n x
1915
+
1916
+ (2π)n/2√det Cn
1917
+ =
1918
+ exp
1919
+
1920
+ − 1
1921
+ 2xT C−1
1922
+ n−1x
1923
+
1924
+ (2π)(n−1)/2�
1925
+ det Cn−1
1926
+ ×
1927
+ ×
1928
+
1929
+ 1
1930
+ 2
1931
+
1932
+ Erf
1933
+
1934
+ an
1935
+
1936
+ 2
1937
+
1938
+ det Cn−1
1939
+ det Cn
1940
+
1941
+ + Erf
1942
+
1943
+ bn
1944
+
1945
+ 2
1946
+
1947
+ det Cn−1
1948
+ det Cn
1949
+ ��
1950
+ (72)
1951
+
1952
+ 1
1953
+
1954
+
1955
+
1956
+ det Cn−1
1957
+ det Cn
1958
+
1959
+ an exp
1960
+
1961
+ − a2
1962
+ n
1963
+ 2
1964
+ det Cn−1
1965
+ det Cn
1966
+
1967
+ + bn exp
1968
+
1969
+ − b2
1970
+ n
1971
+ 2
1972
+ det Cn−1
1973
+ det Cn
1974
+ ���
1975
+ .
1976
+ 4h′′(λ = 1)
1977
+ =
1978
+ � bn
1979
+ −an
1980
+ dxn
1981
+ (xT C−1
1982
+ n x)2 exp
1983
+
1984
+ − 1
1985
+ 2xT C−1
1986
+ n x
1987
+
1988
+ (2π)n√det Cn
1989
+ =
1990
+ exp
1991
+
1992
+ − 1
1993
+ 2xT C−1
1994
+ n−1x
1995
+
1996
+ (2π)(n−1)/2�
1997
+ det Cn−1
1998
+ ×
1999
+ ×
2000
+
2001
+ 3
2002
+ 2
2003
+
2004
+ Erf
2005
+
2006
+ an
2007
+
2008
+ 2
2009
+
2010
+ det Cn−1
2011
+ det Cn
2012
+
2013
+ + Erf
2014
+
2015
+ bn
2016
+
2017
+ 2
2018
+
2019
+ det Cn−1
2020
+ det Cn
2021
+ ��
2022
+ (73)
2023
+
2024
+ 3
2025
+
2026
+
2027
+
2028
+ det Cn−1
2029
+ det Cn
2030
+
2031
+ an exp
2032
+
2033
+ − a2
2034
+ n
2035
+ 2
2036
+ det Cn−1
2037
+ det Cn
2038
+
2039
+ + bn exp
2040
+
2041
+ − b2
2042
+ n
2043
+ 2
2044
+ det Cn−1
2045
+ det Cn
2046
+ ��
2047
+
2048
+ a2
2049
+ n
2050
+
2051
+
2052
+ �det Cn−1
2053
+ det Cn
2054
+ �3/2 �
2055
+ an exp
2056
+
2057
+ − a2
2058
+ n
2059
+ 2
2060
+ det Cn−1
2061
+ det Cn
2062
+
2063
+ + bn exp
2064
+
2065
+ − b2
2066
+ n
2067
+ 2
2068
+ det Cn−1
2069
+ det Cn
2070
+ ���
2071
+ .
2072
+ Therefore, with the use of Eqs. (72) and (73), the integral of the kurtosis-corrected distribution function (68)
2073
+ over the xn prior, becomes
2074
+ � bn
2075
+ −an
2076
+ dxn f(x) =
2077
+ exp
2078
+
2079
+ − 1
2080
+ 2xT C−1
2081
+ n−1x
2082
+
2083
+ (2π)(n−1)/2�
2084
+ det Cn−1
2085
+
2086
+ 1
2087
+ 2
2088
+
2089
+ Erf
2090
+
2091
+ an
2092
+
2093
+ 2
2094
+
2095
+ det Cn−1
2096
+ det Cn
2097
+
2098
+ + Erf
2099
+
2100
+ bn
2101
+
2102
+ 2
2103
+
2104
+ det Cn−1
2105
+ det Cn
2106
+ ��
2107
+ +
2108
+ (74)
2109
+ + 1
2110
+ 8Dijkl C−1
2111
+ ij C−1
2112
+ kl
2113
+ 1
2114
+
2115
+
2116
+
2117
+ det Cn−1
2118
+ det Cn
2119
+
2120
+ an
2121
+
2122
+ 1 − a2
2123
+ n
2124
+ 3
2125
+ det Cn−1
2126
+ det Cn
2127
+
2128
+ e−
2129
+ a2
2130
+ n det Cn−1
2131
+ 2 det Cn
2132
+ + bn
2133
+
2134
+ 1 − b2
2135
+ n
2136
+ 3
2137
+ det Cn−1
2138
+ det Cn
2139
+
2140
+ e−
2141
+ b2
2142
+ n det Cn−1
2143
+ 2 det Cn
2144
+ ��
2145
+ .
2146
+ We can now define a new function
2147
+ Gi(ai, bi) =
2148
+ 1
2149
+ 8
2150
+
2151
+
2152
+
2153
+ det Ci−1
2154
+ det Ci
2155
+
2156
+ ai
2157
+
2158
+ 1 − a2
2159
+ i
2160
+ 3
2161
+ det Ci−1
2162
+ det Ci
2163
+
2164
+ e−
2165
+ a2
2166
+ i det Ci−1
2167
+ 2 det Ci
2168
+ − bi
2169
+
2170
+ 1 − b2
2171
+ i
2172
+ 3
2173
+ det Ci−1
2174
+ det Ci
2175
+
2176
+ e−
2177
+ b2
2178
+ i det Ci−1
2179
+ 2 det Ci
2180
+
2181
+ .
2182
+ (75)
2183
+ Integrating iteratively over xn−1, . . . , x1, we end up with the Bayesian evidence for the fourth-order-corrected
2184
+ probability distribution function f(x),
2185
+ E(a, b) =
2186
+ n
2187
+
2188
+ p=1
2189
+ Ep(ap, bp)
2190
+ (ap + bp)
2191
+
2192
+ 1 + Dijkl C−1
2193
+ ij C−1
2194
+ kl
2195
+ n
2196
+
2197
+ m=1
2198
+ Gm(am, bm)
2199
+ Em(am, bm)
2200
+
2201
+ .
2202
+ (76)
2203
+ 13
2204
+
2205
+ so, unless Dijkl C−1
2206
+ ij C−1
2207
+ kl
2208
+ is very large, the correction to the error function is exponentially suppressed, and
2209
+ we do not expect significant departures from the Gaussian case, Eq. (40).
2210
+ In order to compare models it is customary to compute the logarithm of the evidence. Let us assume that
2211
+ we are given a likelihood distribution function normalized by the maximum likelihood, and with corrections
2212
+ up to fourth order,
2213
+ f(x) = Lmax exp
2214
+
2215
+ − 1
2216
+ 2xT C−1
2217
+ n x
2218
+ � �
2219
+ 1 + 1
2220
+ 8Dijkl C−1
2221
+ ij C−1
2222
+ kl
2223
+ �−1�
2224
+ 1 − 1
2225
+ 2Bijk C−1
2226
+ ij C−1
2227
+ kl xl + 1
2228
+ 6Bijk C−1
2229
+ il C−1
2230
+ jmC−1
2231
+ kn xlxmxn
2232
+ + 1
2233
+ 8Dijkl C−1
2234
+ ij C−1
2235
+ kl − 1
2236
+ 4Dijkl C−1
2237
+ ij C−1
2238
+ kmC−1
2239
+ ln xmxn + 1
2240
+ 24Dijkl C−1
2241
+ im C−1
2242
+ jn C−1
2243
+ kp C−1
2244
+ lq xmxnxpxq
2245
+
2246
+ .
2247
+ (77)
2248
+ Note that it is normalized so that the maximum corresponds to the mean-centered distribution, i.e. x = 0.
2249
+ In this case, the evidence of the normalized distribution is given by
2250
+ E(a, b) = Lmax (2π)n/2�
2251
+ det Cn
2252
+
2253
+ 1 + 1
2254
+ 8Dijkl C−1
2255
+ ij C−1
2256
+ kl
2257
+ �−1
2258
+ ×
2259
+ (78)
2260
+ n
2261
+
2262
+ p=1
2263
+ Ep(ap, bp)
2264
+ (ap + bp)
2265
+
2266
+ 1 −
2267
+ n
2268
+
2269
+ k=1
2270
+ Bijk C−1
2271
+ ij
2272
+ Fk(ak, bk)
2273
+ Ek(ak, bk) + Dijkl C−1
2274
+ ij C−1
2275
+ kl
2276
+ n
2277
+
2278
+ m=1
2279
+ Gm(am, bm)
2280
+ Em(am, bm)
2281
+
2282
+ .
2283
+ We can then evaluate the logarithm of the evidence by
2284
+ ln E
2285
+ =
2286
+ ln Lmax + n
2287
+ 2 ln(2π) + 1
2288
+ 2 ln det Cn − ln
2289
+
2290
+ 1 + 1
2291
+ 8Dijkl C−1
2292
+ ij C−1
2293
+ kl
2294
+
2295
+
2296
+ n
2297
+
2298
+ p=1
2299
+ ln(2ap + 2bp)
2300
+ +
2301
+ n
2302
+
2303
+ p=1
2304
+ ln
2305
+
2306
+ Erf
2307
+
2308
+ ap
2309
+
2310
+ 2
2311
+
2312
+ det Cp−1
2313
+ det Cp
2314
+
2315
+ + Erf
2316
+
2317
+ bp
2318
+
2319
+ 2
2320
+
2321
+ det Cp−1
2322
+ det Cp
2323
+ ��
2324
+ (79)
2325
+ + ln
2326
+
2327
+ 1 −
2328
+ n
2329
+
2330
+ k=1
2331
+ Bijk C−1
2332
+ ij
2333
+ Fk(ak, bk)
2334
+ Ek(ak, bk) + Dijkl C−1
2335
+ ij C−1
2336
+ kl
2337
+ n
2338
+
2339
+ m=1
2340
+ Gm(am, bm)
2341
+ Em(am, bm)
2342
+
2343
+ .
2344
+ Note that the condition DijklC−1
2345
+ ij C−1
2346
+ kl
2347
+ < 2 constrains the maximum amount that the kurtosis corrections
2348
+ can contribute to the evidence.
2349
+ Uncorrelated case. In the case where the likelihood distribution had no correlations among the different
2350
+ variables, the exact expression for the Bayesian evidence is
2351
+ ln E = ln Lmax + n
2352
+ 2 ln(2π) +
2353
+ n
2354
+
2355
+ p=1
2356
+ ln σp −
2357
+ n
2358
+
2359
+ p=1
2360
+ ln(2ap + 2bp) +
2361
+ n
2362
+
2363
+ p=1
2364
+ ln
2365
+
2366
+ Erf
2367
+
2368
+ ap
2369
+ σp
2370
+
2371
+ 2
2372
+
2373
+ + Erf
2374
+
2375
+ bp
2376
+ σp
2377
+
2378
+ 2
2379
+ ��
2380
+ (80)
2381
+ − ln
2382
+
2383
+ 1 + 1
2384
+ 8Diijj σ−2
2385
+ i
2386
+ σ−2
2387
+ j
2388
+
2389
+ + ln
2390
+
2391
+ 1 −
2392
+ n
2393
+
2394
+ k=1
2395
+ Biik σ−2
2396
+ k
2397
+ Fk(ak, bk)
2398
+ Ek(ak, bk) + Diijj σ−2
2399
+ i
2400
+ σ−2
2401
+ j
2402
+ n
2403
+
2404
+ m=1
2405
+ Gm(am, bm)
2406
+ Em(am, bm)
2407
+
2408
+ ,
2409
+ where σp are the corresponding dispersions of variables xp, and the functions Ei, Fi and Gi are the corre-
2410
+ sponding limiting functions of Eqs. (65) and (75) for uncorrelated matrices.
2411
+ 5
2412
+ Model comparison
2413
+ Finally we turn to specific applications of the formalism discussed above. Initially we will carry out some
2414
+ toy model tests of its performance, and then examine real cosmological applications for which we previously
2415
+ obtained results by thermodynamic integration [5].
2416
+ 14
2417
+
2418
+ Figure 1: This figure shows the calculated evidence as a function of the number of likelihood evaluations.
2419
+ Note that the horizontal axis is logarithmic. The solid line corresponds to the thermodynamic integration.
2420
+ The dotted line and dot-dashed lines are the analytical methods with and without non-Gaussian corrections
2421
+ applied. The horizontal dashed line is the number obtained by the direct integration. The upper two panels
2422
+ correspond to Lg, while the lower two to Lng. The left-hand side panels correspond to wide flat priors of
2423
+ (−7, 10) on both parameters, while the right-hand side to the narrow priors of (−2, 3) on both parameters.
2424
+ See text for discussion.
2425
+ 5.1
2426
+ A baby-toy model comparison
2427
+ We begin with a very simple two-dimensional toy model. The purpose of this section is to illustrate the
2428
+ ineffectiveness of the thermodynamic integration and to give an indication of the performance of the method
2429
+ we propose here. In addition, the two-dimensional model is simple enough to allow a brute-force direct
2430
+ numerical integration of evidence allowing us to check the accuracy at the same time. We use the following
2431
+ two forms of likelihood:
2432
+ Lg(x, y)
2433
+ =
2434
+ exp
2435
+
2436
+ −2x2 − 2(y − 1)2 − xy
2437
+ 2
2438
+
2439
+ (81)
2440
+ Lng(x, y)
2441
+ =
2442
+ exp
2443
+
2444
+ −2x2 − 2(y − 1)2 − xy
2445
+ 2
2446
+
2447
+ + exp
2448
+
2449
+ −2x2 − 2y2 − 3xy
2450
+ 2
2451
+
2452
+ (82)
2453
+ The subscripts g and ng indicate the Gaussian and non-Gaussian cases respectively.
2454
+ Firstly, we calculate the evidence by the analytical method using Eqs. (56) and (80) and covariance
2455
+ 15
2456
+
2457
+ matrices inferred from sampling the likelihood using the vanilla Metropolis–Hastings algorithm with fixed
2458
+ proposal widths. Chains ranging from few to several million samples were used. We also calculate evidence
2459
+ using thermodynamic algorithm explained in Ref. [5]. Again, we vary algorithm parameters to get evidence
2460
+ values of varying accuracy. The resulting evidence as a function of number of likelihood evaluations is plotted
2461
+ in the Figure 1, together with the correct value inferred by direct numerical integration. The number of
2462
+ likelihood evaluations is crucial as this is the time-limiting step in the cosmological parameter estimation
2463
+ and model comparison exercises. The results are what could have been anticipated. We note that the size
2464
+ of the prior does not seem to be of crucial importance. This is comforting, given that the analytical method
2465
+ requires the knowledge of the true covariance information, while we can only supply a covariance matrix
2466
+ estimated from the prior-truncated likelihood. We also note that the thermodynamic integration converges
2467
+ to the correct value in all cases. However, it does so after very many likelihood evaluations; typically about
2468
+ a million or so even for a two-dimensional problem. The analytical method becomes limited by systematics
2469
+ already by the ten-thousand samples.
2470
+ For Gaussian case, there is no systematic by construction, while
2471
+ the non-gaussian case suffers a systematic of about 0.1 in ln E. The non-Gaussian correction reduces the
2472
+ error by about a half and thus correctly estimates the uncertainty associated with the purely Gaussian
2473
+ approximation. In the case of wide priors, the only non-Gaussian correction of an appreciable size is the
2474
+ ln(1 + DijklC−1
2475
+ ij C−1
2476
+ kl /8).
2477
+ 5.2
2478
+ A toy model comparison
2479
+ We now proceed by calculating the Bayesian evidence for simple toy models with 5 and 6 parameters, shown
2480
+ in Table I. The purpose is to compare results with those obtained from thermodynamic integration again,
2481
+ but this time using a model that bears more resemblance to a typical problem one encounters in cosmology.
2482
+ Parameter
2483
+ Mean
2484
+ Prior Range
2485
+ Model
2486
+ x1
2487
+ 0.022
2488
+ [0.0001, 0.044]
2489
+ toy5,toy6
2490
+ x2
2491
+ 0.12
2492
+ [0.001, 0.3]
2493
+ toy5,toy6
2494
+ x3
2495
+ 1.04
2496
+ [0.8, 1.4]
2497
+ toy5,toy6
2498
+ x4
2499
+ 0.1
2500
+ [0.01, 0.3]
2501
+ toy5,toy6
2502
+ x5
2503
+ 3.1
2504
+ [2.6, 3.6]
2505
+ toy5,toy6
2506
+ x6
2507
+ 0.98
2508
+ [0.5, 1.5]
2509
+ toy6
2510
+ Table 1:
2511
+ The parameters used in the analytical evaluation of the toy model evidences, with 5 and 6
2512
+ parameters respectively. The maximum likelihod of the toy models is taken (arbitrarily) to be Lmax = 1.
2513
+ Beginning with the five-parameter model, we assume first that it has an uncorrelated multivariate Gaus-
2514
+ sian likelihood distribution. In this case the aim is to test the thermodynamic integration method, which
2515
+ gives ln Enum
2516
+ toy5 = −8.65 ± 0.03, while the exact expression gives ln Eana
2517
+ toy5 = −8.66. Therefore, we conclude
2518
+ that the thermodynamic integration method is rather good in obtaining the correct evidence of the model.
2519
+ The Laplace approximation Eq. (57) also fares well for uncorrelated distributions, ln ELap
2520
+ toy5 = −8.67.
2521
+ We now consider a likelihood function with a correlated covariance matrix Cij, with the same mean
2522
+ values and dispersions as the previous case, but with significant correlations. The analytic formula needed,
2523
+ Eq. (54), is no longer exact,2 and gives ln Eana
2524
+ toy5c = −7.32. For comparison thermodynamic integration gives
2525
+ ln Enum
2526
+ toy5c = −7.28 ± 0.06, again in perfect agreement within errors. In this case the Laplace approximation
2527
+ fails significantly, ln ELap
2528
+ toy5c = −6.89, the reason being that the correlations chosen bring the posterior into
2529
+ significant contact with the edges of the priors.
2530
+ Let us now return to the uncorrelated case and include a new parameter, x6, as in Table I, and evaluate the
2531
+ different evidences that appear because of this new parameter, in order to see the sensitivity to systematic
2532
+ errors in the evaluation of the Bayesian evidence and their effects on model comparison. The numerical
2533
+ 2One could rotate the parameter basis to remove the correlations, but then the priors wouldn’t be top-hats.
2534
+ 16
2535
+
2536
+ result is ln Enum
2537
+ toy6 = −10.75 ± 0.03, while the exact analytical expression gives ln Eana
2538
+ toy6 = −10.74, in perfect
2539
+ agreement, within errors. The Laplace approximation Eq. (57) again fares well for uncorrelated distributions,
2540
+ ln ELap
2541
+ toy6 = −10.74.
2542
+ When the likelihood function has large correlations, and the priors are not too large, the naive Laplace
2543
+ approximation, Eq. (57), fares less well than the analytical approximation, Eq. (54).
2544
+ 5.3
2545
+ A real model comparison
2546
+ In this subsection we will make use of the results obtained in Ref. [5], where we evaluated the evidence for
2547
+ 5- and 6-parameter adiabatic models, and for three 10-parameter mixed adiabatic plus isocurvature models.
2548
+ The prior ranges used are given in Table II. The latter models give a marginally better fit to the data but
2549
+ require more parameters, which is exactly the situation where model selection techniques are needed to draw
2550
+ robust conclusions. In Ref. [5] we used thermodynamic integration to compute the evidence and showed that
2551
+ the isocurvature models ware less favoured than the adiabatic ones, but only at a mild significance level.3
2552
+ Beginning with the simplest adiabtic model, which uses the Harrison–Zel’dovich spectrum, we have
2553
+ used the analytical formulae above, Eq. (54), together with the covariance matrix provided by the cosmoMC
2554
+ programme [10], and obtained ln Eana
2555
+ ad
2556
+ = −854.07, while the thermodynamical integration gave ln Enum
2557
+ ad
2558
+ =
2559
+ −854.1±0.1 [5]. The agreement is excellent; this is because the distribution function for the adiabatic model
2560
+ is rather well approximated by a Gaussian, and the priors are rather large, so the formula Eq. (54) is very
2561
+ close to that obtained in the Laplace approximation, ln ELap
2562
+ ad
2563
+ = −854.08.
2564
+ Parameter
2565
+ Mean
2566
+ Prior Range
2567
+ Model
2568
+ ωb
2569
+ 0.022
2570
+ [0.018, 0.032]
2571
+ AD-HZ,AD-ns,ISO
2572
+ ωdm
2573
+ 0.12
2574
+ [0.04, 0.16]
2575
+ AD-HZ,AD-ns,ISO
2576
+ θ
2577
+ 1.04
2578
+ [0.98, 1.10]
2579
+ AD-HZ,AD-ns,ISO
2580
+ τ
2581
+ 0.17
2582
+ [0, 0.5]
2583
+ AD-HZ,AD-ns,ISO
2584
+ ln[1010Rrad]
2585
+ 3.1
2586
+ [2.6, 4.2]
2587
+ AD-HZ,AD-ns,ISO
2588
+ ns
2589
+ 1.0
2590
+ [0.8, 1.2]
2591
+ AD-ns,ISO
2592
+ niso
2593
+ 1.5
2594
+ [0, 3]
2595
+ ISO
2596
+ δcor
2597
+ 1.5
2598
+ [−0.14, 0.4]
2599
+ ISO
2600
+ √α
2601
+ 0
2602
+ [−1, 1]
2603
+ ISO
2604
+ β
2605
+ 0
2606
+ [−1, 1]
2607
+ ISO
2608
+ Table 2:
2609
+ The parameters used in the models; see Ref. [5] for nomenclature and other details. For the
2610
+ AD-HZ model ns was fixed to 1 and niso, δcor, α and β were fixed to 0. In the AD-ns model, ns also varies.
2611
+ Every isocurvature model holds the same priors for the whole set of parameters.
2612
+ However the analytic method fares less well for the adiabatic model with varying ns, with both the
2613
+ analytic and Laplace methods giving ln EAD−ns = −853.4, while the numerical method gives the smaller
2614
+ value -854.1, a discrepency of nearly unity.
2615
+ Turning now to the iscurvature cases, we found an extremely good result for the CDI model, gaining from
2616
+ Eq. (54) the value ln Eana
2617
+ cdi = −855.08, while the thermodynamical integration gives ln Enum
2618
+ cdi
2619
+ = −855.1 ± 0.1.
2620
+ This is surprising, given the relatively large non-gaussianities for at least three variables: niso, β and δcor,
2621
+ whose priors are not centered with respect to the mean.
2622
+ However the NID case shows much less good
2623
+ agreement, with a discrepency of 0.6. That suggests that the closeness of the CDI comparison is to some
2624
+ extent a statistical fluke, with the underlying method less accurate.
2625
+ A summary of the different models can be found in Table 3.
2626
+ 3Recently Trotta [9] used a different technique to analyze a restricted class of isocurvature model featuring just one extra
2627
+ parameter, and found it highly disfavoured. The different conclusion is primarily due to the very different prior he chose on
2628
+ the isocurvature amplitude, such that almost all the models under the prior are domintaed by isocurvature modes and in poor
2629
+ agreement with the data.
2630
+ 17
2631
+
2632
+ Model
2633
+ ln Lmax
2634
+ ln Enum
2635
+ ln Eana
2636
+ ln ELap
2637
+ toy5
2638
+ 0
2639
+ −8.65 ± 0.03
2640
+ −8.66
2641
+ −8.67
2642
+ toy5c
2643
+ 0
2644
+ −7.28 ± 0.06
2645
+ −7.32
2646
+ −6.89
2647
+ toy6
2648
+ 0
2649
+ −10.75 ± 0.03
2650
+ −10.74
2651
+ −10.74
2652
+ toy6c
2653
+ 0
2654
+ −9.73 ± 0.06
2655
+ −9.71
2656
+ −9.63
2657
+ AD
2658
+ −840.78
2659
+ −854.1 ± 0.1
2660
+ −854.1
2661
+ −854.1
2662
+ AD-ns
2663
+ −838.50
2664
+ −854.1 ± 0.1
2665
+ −853.4
2666
+ −853.4
2667
+ CDI
2668
+ −838.05
2669
+ −855.1 ± 0.2
2670
+ −855.1
2671
+ −854.5
2672
+ NID
2673
+ −836.60
2674
+ −855.1 ± 0.2
2675
+ −854.5
2676
+ −854.5
2677
+ NIV
2678
+ −842.53
2679
+ −855.1 ± 0.3
2680
+ −854.9
2681
+ −854.9
2682
+ Table 3:
2683
+ The different models, both toy and real, with their maximum likelihoods and evidences.
2684
+ 5.4
2685
+ Savage–Dickey method
2686
+ Another numerical method for evidence calculation is the Savage–Dickey method, first described in Ref. [11]
2687
+ and recently used in Ref. [9]. This technique allows one to calculate the evidence ratio of two models from a
2688
+ simple and quick analysis of the Markov chains used for parameter estimation, provided that the models are
2689
+ nested; i.e., that one of them is included in the parameter space of the other. For instance, the AD model
2690
+ is nested within the AD-ns model, and the AD and AD-ns models are both nested within the CDI, NID
2691
+ and NIV ones. In the context of Markov chains, the Savage–Dickey method is essentially a measure of how
2692
+ much time the sampler spends in the nested model, weighted by the respective volumes of the two models.
2693
+ When the outer model has extra parameters, this method relies on approximating the nested model as a
2694
+ model with negligibly narrow priors in directions of extra parameters. We note, however, that when many
2695
+ extra parameters are present, this method must fail for reasons similar to those why grid-based parameter
2696
+ estimation approaches fail with models with many parameters. The MCMC parameter estimation simply
2697
+ does not have high enough dynamic range to probe the two models given the large prior volume ratio.
2698
+ The AD and AD-ns models differ by one parameter. Using the same AD+ns samples as for the analytic
2699
+ method (i.e., the samples from which we extracted the covariance matrix), we obtained ln(EAD/EAD+ns) =
2700
+ 0.03. The result from the precise thermodynamical integration, ln(EAD/EAD−ns) = 0 ± 0.1 is in excellent
2701
+ agreement. The AD-ns and CDI (or NID, NIV) models differ by four parameters. With most simple choices
2702
+ of parametrization (including in particular the isocurvature and cross-correlation tilts), the AD-ns is not a
2703
+ point, but a hypersurface within the parameter space of the isocurvature models (i.e. α = 0 and other three
2704
+ parameters act as dummy, unconstrained, parameters which do not affect the evidence). In these cases, the
2705
+ evidence ratios given by the Savage–Dickey method do not converge as the priors of the extra parameters
2706
+ are tightened up around the nested model, although they match thermodynamically-determined values to
2707
+ within a unit of ln E.
2708
+ 6
2709
+ Discussion and Conclusions
2710
+ We have developed an analytical formalism for computing the Bayesian evidence in the case of an arbitrary
2711
+ likelihood distribution with a hierarchy of non-Gaussian corrections, and with arbitrary top-hat priors,
2712
+ centered or uncentered. This analysis can be of great help for the problem of model comparison in the
2713
+ present context of cosmology, where observational data is still unable to rule out most extensions of the
2714
+ standard model based on the ΛCDM inflationary paradigm.
2715
+ As an application of the exact and approximate formulae obtained for the Bayesian evidence of a model
2716
+ with approximately Gaussian likelihood distributions, we have compared the value predicted analytically
2717
+ with that computed with a time-consuming algorithm based on the thermodynamical integration approach.
2718
+ The values obtained analytically agree surprisingly well with those obtained numerically. While one can
2719
+ estimate the magnitude of the higher order corrections for the analytical formulae, it is very difficult to
2720
+ 18
2721
+
2722
+ estimate the systematic effects of the numerical approach. Thus, with this analytical method we can test
2723
+ for systematics in the thermodynamical integration approach. So far, the values obtained agree, so it seems
2724
+ that the numerical approach is a good tool for estimating the evidence. However, it takes considerable effort
2725
+ and machine time to do the correct evaluation, and therefore, we propose the use of the analytical estimate,
2726
+ whose corrections are well under control, in the sense that one can compute the next order corrections and
2727
+ show that they are small.
2728
+ Note added: Many years after my work was finished, a book appeared [12] which thoroughly discussed
2729
+ Bayesian Methods in Cosmology.
2730
+ References
2731
+ [1] H. Jeffreys, Theory of Probability, 3rd ed, Oxford University Press (1961).
2732
+ [2] D. J. C. MacKay, Information theory, inference and learning algorithms, Cambridge University Press
2733
+ (2003).
2734
+ [3] A. Jaffe, Astrophys. J. 471, 24 (1996); P. S. Drell, T. J. Loredo, and I. Wasserman I, Astrophys. J. 530,
2735
+ 593 (2000); M. V. John and J. V. Narlikar, Phys. Rev. D 65, 043506 (2002); M. P. Hobson, S. L. Bridle,
2736
+ and O. Lahav, Mon. Not. Roy. Astr. Soc. 335, 377 (2002); A. Slosar et al., Mon. Not. Roy. Astr. Soc.
2737
+ 341, L29 (2003); T. D. Saini, J. Weller, and S. L. Bridle, Mon. Not. Roy. Astr. Soc. 348, 603 (2004);
2738
+ A. Niarchou, A. H. Jaffe, and L. Pogosian, Phys. Rev. D 69, 063515 (2004); P. Marshall, N. Rajguru,
2739
+ and A. Slosar, Phys. Rev. D 73, 067302 (2006).
2740
+ [4] A. R. Liddle, Mon. Not. Roy. Astr. Soc. 351, L49 (2004).
2741
+ [5] M. Beltran, J. Garc´ıa-Bellido, J. Lesgourgues, A. R. Liddle and A. Slosar, Phys. Rev. D 71, 063532
2742
+ (2005).
2743
+ [6] J. J. K. ´O’Ruanaidh and W. J. Fitzgerald, Numerical Bayesian Methods Applied to Signal Processing,
2744
+ Springer–Verlag, New York (1996).
2745
+ [7] M. P. Hobson and C. McLachlan, Mon. Not. Roy. Astr. Soc. 338, 765 (2003).
2746
+ [8] R. E. Kass and A. E. Raftery, Journ. Amer. Stat. Assoc. 90, 773 (1995).
2747
+ [9] R. Trotta, Mon. Not. Roy. Astr. Soc. 378, 72 (2007).
2748
+ [10] A. Lewis and S. Bridle, Phys. Rev. D66, 103511 (2002).
2749
+ [11] J. M. Dickey, Ann. Math. Stat 42, 204 (1971).
2750
+ [12] M. P. Hobson, A. H. Jaffe, A. R. Liddle, P. Mukherjee & D. Parkinson, Bayesian Methods in Cosmology,
2751
+ Cambridge University Press (2010).
2752
+ 19
2753
+
19FST4oBgHgl3EQfXDjl/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
39FQT4oBgHgl3EQfHTWW/content/2301.13248v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:840399aba2314dcf819a1604819d5f1428df4bde1e543f892f6ea7bf56d988f3
3
+ size 1649301
4dAzT4oBgHgl3EQfffz3/content/tmp_files/2301.01455v1.pdf.txt ADDED
@@ -0,0 +1,440 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Engineering sub-Poisson light in a simple mirror and beam
2
+ splitter system
3
+ Sun-Hyun Youn∗
4
+ Department of Physics, Chonnam National University, Gwangju 500-757, Korea
5
+ Abstract
6
+ Vacuum fluctuation, which is the intrinsic nature of an electric field can be measured via homo-
7
+ dyne detection. Moreover, electric field intensity fluctuation are also related to vacuum fluctuations.
8
+ Squeezed vacuum and sub-Poisson light can be obtained by controlling the vacuum fluctuation us-
9
+ ing noble nonlinear interaction. Based on the squeezed vacuum by inserting a mirror on the unused
10
+ part of the beam splitter was proposed in 1994, we present the mode matching method for the
11
+ vacuum and light fields. Light intensity fluctuations also can be reduced by inserting a mirror on
12
+ the unused part of the beam splitter. To obtain sub-Poisson light as a function of the distance
13
+ between the mirror and detector, a detector with a thinner active layer than the wavelength is
14
+ required.
15
+ PACS numbers: 03.67.-a,03.70.+k, 03.65.Yz
16
+ Keywords: Quantum optics, Squeezed State, Vacuum fluctuation, Sub-Poisson, Beam splitter and Mirror
17
+ ∗ E-mail: [email protected], fax: +82-62-530-3369
18
+ 1
19
+ arXiv:2301.01455v1 [quant-ph] 4 Jan 2023
20
+
21
+ I.
22
+ INTRODUCTION
23
+ When a single photon is in a particular mode, according to the particle nature of light,
24
+ photons will be sequentially found in that mode. The probability of finding a photon is
25
+ proportional to the absolute square of the wave function related to the electromagnetic
26
+ wave. Vacuum fluctuations are related to the spatial characteristics of the electromagnetic
27
+ wave. The spontaneous decay caused by the vacuum can be suppressed in cavities [1]. The-
28
+ oretical and experimental studies have beem conducted on methods to change the vacuum
29
+ fluctuations near mirrors[2–4].
30
+ In this study, in contrast to previous studies on the vacuum noise characteristics of
31
+ light using a homodyne detector, we calculate the intensity fluctuations when photons are
32
+ directly measured using photon counter. The obtained results are similar to those obtained
33
+ in previous studies, but herein we predict the results considering mode matching in the
34
+ experiment.
35
+ In section II, the fluctuation of light that can be measured using a detector is calculated
36
+ with a mirror placed on one side of the beam splitter. In section III, an experimental device
37
+ is proposed for perfect mode matching, and in the last section, the practical limits of the
38
+ vacuum fluctuation near the mirror are discussed.
39
+ II.
40
+ VACUUM FLUCTUATION NEAR A MIRROR.
41
+ An electric field can be written as
42
+ ˆEL = ˆEcl + ˆEQ,
43
+ (1)
44
+ where
45
+ ˆEcl = i
46
+
47
+ ℏω
48
+ 2ϵ0V (αei(ωt−k0z) − α∗ei(ωt−k0z))⃗x,
49
+ ˆEQ = i
50
+
51
+ k
52
+
53
+ ℏωk
54
+ 2ϵ0V (ˆbke−i(ωkt−kz) − ˆb†
55
+ kei(ωkt−kz))⃗x.
56
+ (2)
57
+ Here, k0 and ω are the wave number and angular frequency of the laser, respectively, ℏ and
58
+ ϵ0 have usual meanings, and V is the normalization volume[5]. Considering the laser mode
59
+ 2
60
+
61
+ FIG. 1: Vacuum mode relations in the beam splitter with a mirror. BS: Beam splitter, M: mirror
62
+ in Fig. 1, the modes aout
63
+ 1
64
+ and aout
65
+ 2
66
+ can be written as
67
+ aout
68
+ 1
69
+ =
70
+
71
+ Tb +
72
+
73
+ Rc,
74
+ aout
75
+ 2
76
+ = −
77
+
78
+ Rb +
79
+
80
+ Tc,
81
+ (3)
82
+ where the modes c and cout can be written as
83
+ c =
84
+
85
+ Tmd −
86
+
87
+ Rmcout,
88
+ cout =
89
+
90
+ Ra1 +
91
+
92
+ Ta2.
93
+ Then the electric field in fluctuating vacuum modes at a1 is
94
+ ˆE(+)
95
+ vac,1 =
96
+
97
+ k
98
+ i
99
+
100
+ ℏωk
101
+ 4ϵ0V {
102
+
103
+ Tˆb†
104
+ kei(ωkt−kZ1) + µˆa†
105
+ 1,kei(ωkt+kz1)
106
+ −R
107
+
108
+ Rmˆa†
109
+ 1,kei(ωkt−kz1) −
110
+
111
+ RT
112
+
113
+ Rmˆa†
114
+ 2,kei(ωkt−kz1) +
115
+
116
+ RTm ˆd†
117
+ kei(ωkt−kZM)} (4)
118
+ where Rm(Tm) is the reflectance(transmittance) of the mirror and R(T) is the reflectance
119
+ (transmittance) of the beam splitter, z1(Z1) is the distance from the mirror (laser) to the
120
+ detector. ZM is related to the vacuum source behind the mirror and it can be any number.
121
+ We add the factor
122
+ 1
123
+
124
+ 2 for the normalization of the vacuum fluctuation. The vacuum mode
125
+ (ˆa†
126
+ 1ei(ωt−kz1)) at the detector is the reflected vacuum mode (ˆa†
127
+ 1ei(ωt+kz1)) at the mirror. If two
128
+ modes are perfectly matched the µ in Eq. 4 is 1 and the two counterpropagating modes
129
+ yield the standing wave mode[2, 3]. If µ = 0, the fluctuation value from Eq. 7 becomes |α|2T
130
+ 2 ,
131
+ 3
132
+
133
+ b
134
+ BS
135
+ α2
136
+ C
137
+ d
138
+ ino
139
+
140
+ M
141
+ ino
142
+ a1
143
+ ait is the square of the constant dc current T|α|2
144
+ 2 . In other words, if we directly measure the
145
+ fluctuation of the laser intensity, the fluctuation is dependent on the distance (z1) between
146
+ the mirror and the detector.
147
+ Even in photo counting experiments, the photon number
148
+ fluctuation is related to the vacuum fluctuation, therefor, the photon number fluctuation is
149
+ also depend on the distance z1.
150
+ If we used the photodetetion theory [6] with instantaneous response of the photodetector
151
+ [7],
152
+ ˆI1 = {
153
+
154
+ T ˆE(+)
155
+ cl
156
+ + ˆE(+)
157
+ vac,1} × {
158
+
159
+ T ˆE(−)
160
+ cl
161
+ + ˆE(−)
162
+ vac,1},
163
+ (5)
164
+ where we normalize the photocurrent. If the electric field of the local oscillator is considerably
165
+ greater than the vacuum field, the terms containig α have physical significance. When the
166
+ constant dc current T|α|2
167
+ 2
168
+ is neglected, Eq. 5 yields
169
+ ˆIo
170
+ 1(z1, Z1) = |α|
171
+
172
+ 2[
173
+
174
+ Teiφ{(µe−ik(Z1+z1) − e−ik(Z1−z1)R
175
+
176
+ Rm)ˆa1 − e−ik(Z1−z1)ˆa2}
177
+ +
178
+
179
+ Te−iφ{(µeik(Z1+z1) − eik(Z1−z1)R
180
+
181
+ Rm)ˆa†
182
+ 1 − e−k(Z1−z1)ˆa†
183
+ 2}
184
+ + eiφTˆb + e−iφTˆb† + eiφeik(ZM−Z1)�
185
+ TRTm ˆd + e−iφe−ik(ZM−z1)�
186
+ TRTm ˆd†],
187
+ (6)
188
+ We then evaluate the square of the photocurrent to determine the fluctuation. After
189
+ squaring Eq. 6, we find the photocurrent fluctuation as follows:
190
+ ⟨(ˆIo
191
+ 1)2⟩ = |α|2T
192
+ 2
193
+ {1 + µ2 − 2µR
194
+
195
+ Rm cos(2kz1)}
196
+ (7)
197
+ If µ = 0, the fluctuation value from Eq. 7 becomes |α|2T
198
+ 2 , which is the square of the con-
199
+ stant dc current
200
+
201
+ T|α|
202
+
203
+ 2 . In other words, if we directly measure the laser intensity fluctuation,
204
+ the fluctuation is dependent on the distance (z1) between the mirror and detector. Even
205
+ in the photo counting experiment, the photon number fluctuation is related to the vacuum
206
+ fluctuation; therefore, the photon number fluctuation is also dependent on the distance z1.
207
+ If we consider practical limits such as finite linewidth and finite absorption length, Eq. 7
208
+ will change as follows[2, 8].
209
+ ⟨(ˆIo
210
+ 1)2⟩P = |α|2T
211
+ 2
212
+ {1 + µ2 − 2µR
213
+
214
+ Rme−z2
215
+ 1∆k2
216
+ × κ[cos(2k0z1 + φ0) − e−κD cos(2k0(z1 + D) + φ0)]
217
+
218
+ 4k2
219
+ 0 + κ2
220
+ },
221
+ (8)
222
+ 4
223
+
224
+ where ∆k is the line width of the local oscillator beam with Gaussian line width distribution
225
+ functions. κ is the absorption coefficient, D is the detector active length, and φ0 = arctan 2k
226
+ κ .
227
+ We assumed that the probability that a photon is converted into an electron hole pair at
228
+ distance η from the surface of the detector’s active region is κe−κη[9].
229
+ The two coefficients √Rm and µ depend on the mode matching condition. Even when
230
+ we used the total mirror, if the mode from the mirror is not perfectly matched with the
231
+ mode from the laser, the effective reflectance √Rm can not be 1. Furthermore, the mode
232
+ a1 to the mirror is reflected by the mirror and then meets at the detector. At the detector,
233
+ if two counter-propagating modes are not exactly matched, the coefficient µ cannot be 1.
234
+ To evaluate this mode matching condition, we assume that the amplitude envelope of the
235
+ electromagnetic wave in the transverse plane is given by a Gaussian function.
236
+ Considering the Gaussian modes [10]
237
+ E(ρ, z) = E0
238
+ w0
239
+ w(z) exp[−
240
+ ρ2
241
+ w(z)2] exp[−ikz − ik
242
+ ρ2
243
+ 2R(z) + iζ(z)]
244
+ (9)
245
+ , where w0 is the radius of the beam waist and
246
+ w(z) = w0
247
+
248
+ 1 + ( z
249
+ z0
250
+ )2
251
+ R(z) = z(1 + (z0
252
+ z )2)
253
+ ζ(z) = tan−1 z
254
+ z0
255
+ (10)
256
+ and z0 is defined as follows:
257
+ z0 = π
258
+ λw2
259
+ 0.
260
+ (11)
261
+ First, we assume that the laser and vacuu modes have the same beam waist w0 at the
262
+ detector. Then the laser and vacuum modes are perfectly matched; thus, √Rm = 1. On
263
+ the other hand, the vacuum Ev(0) starting from the detector propagates to the mirror and
264
+ reflects at the mirror. The returned vacuum Ev(2z1) is not the same Ev(0). The coefficient
265
+ µ can be calculated as follow:
266
+ µ =
267
+ | < Ev(0)Ev(2z1)∗ > |
268
+
269
+ < Ev(0)2 >< Ev(2z1)2 >
270
+ =
271
+ (1 + 4z2
272
+ 1
273
+ z2
274
+ 0 )
275
+ 1
276
+ 4
277
+ (1 + 5z2
278
+ 1
279
+ z2
280
+ 0 + 4 z4
281
+ 1
282
+ z4
283
+ 0 )
284
+ 1
285
+ 4
286
+ (12)
287
+ 5
288
+
289
+ FIG. 2:
290
+ Mode matching value µ as a function of w0 and z1.
291
+ In Fig. 2, µ is plotted as a function of z1 and w0, where z1 is the distance between the
292
+ mirror and detector We assume that the detector and mirror are large enough that all the
293
+ waves are detected and reflected. If the distance between the mirror and detector and the
294
+ size of the beam waist are small enough, the coefficient µ remains near 1.
295
+ If we consider the case where the vacuum field has waist at the mirror, the coefficient µ
296
+ automatically becomes 1 due to the symmetry, but the vacuum field Ev(z1) at the detector
297
+ does not matche the laser field EL(0). We assumed that the laser field has beam waist w0 at
298
+ the detector, and the vacuum field has a beam waist wm at the mirror. Then the effective
299
+ reflectance √Rm becomes
300
+
301
+ Rm =
302
+ | < Ev(z1)EL(0)∗ > |
303
+
304
+ < Ev(z1)2 >< EL(0)2 >
305
+ =
306
+
307
+ 2
308
+
309
+ wm
310
+ w0 (1 + z2
311
+ 1
312
+ z2m)
313
+ 1
314
+ 4
315
+ ({(1 + w2m
316
+ w2
317
+ 0 )2 + z2
318
+ 1
319
+ z2
320
+ 0 }{1 + z2
321
+ 1
322
+ z2m})
323
+ 1
324
+ 4
325
+ ,
326
+ (13)
327
+ where zm = π
328
+ λw2
329
+ m.
330
+ In Fig. 3, √Rm is plotted as a function of z1 and wm, where z1 is the distance between
331
+ the mirror and detector. We set w0 to 100λ. Additionally, we also assume that the detector
332
+ and mirror are large enough that all the waves are detected and reflected. The coefficient
333
+ √Rm can be 1 only when the distance between the mirror and detector is small and the size
334
+ of the beam waist is sufficiently small.
335
+ 6
336
+
337
+ μ
338
+ 1
339
+ 0.5
340
+ 3
341
+ 4
342
+ 5
343
+ log(
344
+ 0m
345
+ 3
346
+
347
+ log()
348
+ 7
349
+ 2FIG. 3: Mode matching value √Rm as a function of w0 and z1, with w0 equal to 100λ
350
+ The mode matching condition is crucial for detecting the modulation effect of the vacuum
351
+ fluctuation near the mirror, as denoted by Eq. 8. With the usual setup, we can not satisfy
352
+ the conditions µ = 1 and √Rm = 1. In the next section, we suggest a noble experimental
353
+ setup that satisfies two mode-matching conditions.
354
+ III.
355
+ SET UP FOR MODE MATCHING
356
+ For a laser that has a Gaussian transverse mode, we have to establish a vacuum mode that
357
+ also has a Gaussian transverse mode. Fig. 4 displays the setup for perfect mode matching
358
+ between the laser light mode and a vacuum mode.
359
+ The laser used in the experiment passes through lens L1 and is divided into two by
360
+ the beam splitter (BS1). The laser is a Gaussian beam and it proceeds according to the
361
+ Gaussian approximation. The light passing through BS1 and traveling to mirror M2 reaches
362
+ the partial mirror B and yields a beam waist on the L3 side surface of B. Similarly, the
363
+ light reflecting from the mirror M1 passes through the partial reflector A and yields a beam
364
+ waist on the L2 side surface of A.
365
+ The light passing through A and B passes through the L2 and L3 of the same focal length,
366
+ respectively, and yields another beam waist on the detector surface. The transmittance of
367
+ light passing through A from M1 is almost 0, and the reflectance of light stemming from the
368
+ L2 side is almost 1. In this way, if the mode is perfectly matched using the light passing
369
+ through B and A, an experimental setup can be established wherein one side of the beam
370
+ splitter BS2 is a mirror (A).
371
+ 7
372
+
373
+ /Rm
374
+ 1
375
+ 0.5
376
+ 3
377
+ 4
378
+ 5
379
+ 3
380
+ 1og
381
+ wm
382
+ log()
383
+
384
+ 7
385
+ 2Using this method, the degree of mode matching can be increased compared to that
386
+ when the experiment is performed by simply placing a plane mirror on one side of the beam
387
+ splitter. Additionally the experimental constraints caused by the mode matching can be
388
+ overcome. The experimental setup in Fig. 4 enables the measurement of how the vacuum
389
+ fluctuations of the light passing through the beam splitter change when a mirror is placed
390
+ on one side of the beam splitter.
391
+ FIG. 4: Mode matching setup
392
+ IV.
393
+ CONCLUSION AND DISCUSSION.
394
+ The quantum nature of photons is highly dependent on their vacuum fluctuations. Vac-
395
+ uum fluctuations can be directly measured via homodyne detection. The fluctuation of one
396
+ quadrature of the vacuum can be less than that of the usual vacuum, e.g., squeezed vacuum.
397
+ Light intensity fluctuations are also dependent on vacuum fluctuations. Sub-Poisson light
398
+ can be generated by controlling the vacuum fluctuations based on the nonlinear interaction
399
+ of light and matter. In this study, we proposed the modulation of vacuum fluctuations by
400
+ inserting a mirror on the unused part of the beam splitter in a homodyne measuring system.
401
+ Furthermore, we calculated the effect of the line width of the laser and the thickness of the
402
+ detector layer. The line width can be practically reduced to modulate vacuum fluctuations,
403
+ but the decrease of the thickness of the detector to modulate vacuum fluctuations is chal-
404
+ lenging. We calculated the effect of mode matching between the vacuum and light fields and
405
+ 8
406
+
407
+ BS1
408
+ M1
409
+ A
410
+ L1
411
+ L2
412
+ DD1
413
+ M2
414
+ BS2
415
+ B
416
+ L3
417
+ D2showed that the degree of mode matching obtained by adding a simple mirror in the unused
418
+ beam splitter may not be sufficient to modulate the vacuum fluctuations. We present the
419
+ perfect mode matching method for the vacuum and light fields. Then, the light intensity
420
+ fluctuations can be reduced by inserting a beam splitter and a mirror. We still require a
421
+ detector with an active layer thinner than the wavelength to obtain a sub-Poisson light as a
422
+ function of the distance between the mirror and detector. We expect that our simple method
423
+ of reducing vacuum fluctuations will play a great role in quantum information science.
424
+ [1] W. Jhe, A Anderson, E. A. Hinds, D. Meschede, L. Moi, and S. Haroche, Phys. Rev. Lett.
425
+ 58, 666 (1987)
426
+ [2] S. H. Youn, J. H. Lee, J. S. Chang, Opt. and Quant. Elec. 27, 355 (1995)
427
+ [3] S. H. Youn, J. H. Lee, J. S. Chang, International Workshop on Squeezed States and Uncer-
428
+ tainty Relations, N95-13921 (1994)
429
+ [4] S. A. Wadood, J. T, Schultz, A. N. Vamivakas, and C.R. Stroud Jr, J. of Mod. Opt. 66, 1116
430
+ (2019)
431
+ [5] A. Yariv, Quantum Electronics 3rd ed., John Wiley & Sons. Inc, (1989)
432
+ [6] P. D. Drummond, Phys. Rev. A 35, 4253 (1987).
433
+ [7] B. Yurke, Phys. Rev. A 32, 311 (1985)
434
+ [8] A. E. Siegman, Laser (Oxford University Press, Oxford, 1986 )
435
+ [9] S. M. Sze,
436
+ Semiconductor Devices Physics and Technology (AT&T Bell Lab. Murray Hill,
437
+ New Jersey, 1985)
438
+ [10] B. E. A. Saleh, M. C. Teich, Fundamentals of Photonics ( Wiley, Nw York, 1991)
439
+ 9
440
+
4dAzT4oBgHgl3EQfffz3/content/tmp_files/load_file.txt ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf,len=178
2
+ page_content='Engineering sub-Poisson light in a simple mirror and beam splitter system Sun-Hyun Youn∗ Department of Physics, Chonnam National University, Gwangju 500-757, Korea Abstract Vacuum fluctuation, which is the intrinsic nature of an electric field can be measured via homo- dyne detection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
3
+ page_content=' Moreover, electric field intensity fluctuation are also related to vacuum fluctuations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
4
+ page_content=' Squeezed vacuum and sub-Poisson light can be obtained by controlling the vacuum fluctuation us- ing noble nonlinear interaction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
5
+ page_content=' Based on the squeezed vacuum by inserting a mirror on the unused part of the beam splitter was proposed in 1994, we present the mode matching method for the vacuum and light fields.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
6
+ page_content=' Light intensity fluctuations also can be reduced by inserting a mirror on the unused part of the beam splitter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
7
+ page_content=' To obtain sub-Poisson light as a function of the distance between the mirror and detector, a detector with a thinner active layer than the wavelength is required.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
8
+ page_content=' PACS numbers: 03.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
9
+ page_content='67.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
10
+ page_content='-a,03.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
11
+ page_content='70.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
12
+ page_content='+k, 03.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
13
+ page_content='65.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
14
+ page_content='Yz Keywords: Quantum optics, Squeezed State, Vacuum fluctuation, Sub-Poisson, Beam splitter and Mirror ∗ E-mail: sunyoun@jnu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
15
+ page_content='ac.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
16
+ page_content='kr, fax: +82-62-530-3369 1 arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
17
+ page_content='01455v1 [quant-ph] 4 Jan 2023 I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
18
+ page_content=' INTRODUCTION When a single photon is in a particular mode, according to the particle nature of light, photons will be sequentially found in that mode.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
19
+ page_content=' The probability of finding a photon is proportional to the absolute square of the wave function related to the electromagnetic wave.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
20
+ page_content=' Vacuum fluctuations are related to the spatial characteristics of the electromagnetic wave.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
21
+ page_content=' The spontaneous decay caused by the vacuum can be suppressed in cavities [1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
22
+ page_content=' The- oretical and experimental studies have beem conducted on methods to change the vacuum fluctuations near mirrors[2–4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
23
+ page_content=' In this study, in contrast to previous studies on the vacuum noise characteristics of light using a homodyne detector, we calculate the intensity fluctuations when photons are directly measured using photon counter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
24
+ page_content=' The obtained results are similar to those obtained in previous studies, but herein we predict the results considering mode matching in the experiment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
25
+ page_content=' In section II, the fluctuation of light that can be measured using a detector is calculated with a mirror placed on one side of the beam splitter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
26
+ page_content=' In section III, an experimental device is proposed for perfect mode matching, and in the last section, the practical limits of the vacuum fluctuation near the mirror are discussed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
27
+ page_content=' II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
28
+ page_content=' VACUUM FLUCTUATION NEAR A MIRROR.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
29
+ page_content=' An electric field can be written as ˆEL = ˆEcl + ˆEQ, (1) where ˆEcl = i � ℏω 2ϵ0V (αei(ωt−k0z) − α∗ei(ωt−k0z))⃗x, ˆEQ = i � k � ℏωk 2ϵ0V (ˆbke−i(ωkt−kz) − ˆb† kei(ωkt−kz))⃗x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
30
+ page_content=' (2) Here, k0 and ω are the wave number and angular frequency of the laser, respectively, ℏ and ϵ0 have usual meanings, and V is the normalization volume[5].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
31
+ page_content=' Considering the laser mode 2 FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
32
+ page_content=' 1: Vacuum mode relations in the beam splitter with a mirror.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
33
+ page_content=' BS: Beam splitter, M: mirror in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
34
+ page_content=' 1, the modes aout 1 and aout 2 can be written as aout 1 = √ Tb + √ Rc, aout 2 = − √ Rb + √ Tc, (3) where the modes c and cout can be written as c = � Tmd − � Rmcout, cout = √ Ra1 + √ Ta2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
35
+ page_content=' Then the electric field in fluctuating vacuum modes at a1 is ˆE(+) vac,1 = � k i � ℏωk 4ϵ0V { √ Tˆb† kei(ωkt−kZ1) + µˆa† 1,kei(ωkt+kz1) −R � Rmˆa† 1,kei(ωkt−kz1) − √ RT � Rmˆa† 2,kei(ωkt−kz1) + � RTm ˆd† kei(ωkt−kZM)} (4) where Rm(Tm) is the reflectance(transmittance) of the mirror and R(T) is the reflectance (transmittance) of the beam splitter, z1(Z1) is the distance from the mirror (laser) to the detector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
36
+ page_content=' ZM is related to the vacuum source behind the mirror and it can be any number.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
37
+ page_content=' We add the factor 1 √ 2 for the normalization of the vacuum fluctuation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
38
+ page_content=' The vacuum mode (ˆa† 1ei(ωt−kz1)) at the detector is the reflected vacuum mode (ˆa† 1ei(ωt+kz1)) at the mirror.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
39
+ page_content=' If two modes are perfectly matched the µ in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
40
+ page_content=' 4 is 1 and the two counterpropagating modes yield the standing wave mode[2, 3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
41
+ page_content=' If µ = 0, the fluctuation value from Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
42
+ page_content=' 7 becomes |α|2T 2 , 3 b BS α2 C d ino ↑ M ino a1 ait is the square of the constant dc current T|α|2 2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
43
+ page_content=' In other words, if we directly measure the fluctuation of the laser intensity, the fluctuation is dependent on the distance (z1) between the mirror and the detector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
44
+ page_content=' Even in photo counting experiments, the photon number fluctuation is related to the vacuum fluctuation, therefor, the photon number fluctuation is also depend on the distance z1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
45
+ page_content=' If we used the photodetetion theory [6] with instantaneous response of the photodetector [7], ˆI1 = { √ T ˆE(+) cl + ˆE(+) vac,1} × { √ T ˆE(−) cl + ˆE(−) vac,1}, (5) where we normalize the photocurrent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
46
+ page_content=' If the electric field of the local oscillator is considerably greater than the vacuum field, the terms containig α have physical significance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
47
+ page_content=' When the constant dc current T|α|2 2 is neglected, Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
48
+ page_content=' 5 yields ˆIo 1(z1, Z1) = |α| √ 2[ √ Teiφ{(µe−ik(Z1+z1) − e−ik(Z1−z1)R � Rm)ˆa1 − e−ik(Z1−z1)ˆa2} + √ Te−iφ{(µeik(Z1+z1) − eik(Z1−z1)R � Rm)ˆa† 1 − e−k(Z1−z1)ˆa† 2} + eiφTˆb + e−iφTˆb† + eiφeik(ZM−Z1)� TRTm ˆd + e−iφe−ik(ZM−z1)� TRTm ˆd†], (6) We then evaluate the square of the photocurrent to determine the fluctuation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
49
+ page_content=' After squaring Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
50
+ page_content=' 6, we find the photocurrent fluctuation as follows: ⟨(ˆIo 1)2⟩ = |α|2T 2 {1 + µ2 − 2µR � Rm cos(2kz1)} (7) If µ = 0, the fluctuation value from Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
51
+ page_content=' 7 becomes |α|2T 2 , which is the square of the con- stant dc current √ T|α| √ 2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
52
+ page_content=' In other words, if we directly measure the laser intensity fluctuation, the fluctuation is dependent on the distance (z1) between the mirror and detector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
53
+ page_content=' Even in the photo counting experiment, the photon number fluctuation is related to the vacuum fluctuation;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
54
+ page_content=' therefore, the photon number fluctuation is also dependent on the distance z1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
55
+ page_content=' If we consider practical limits such as finite linewidth and finite absorption length, Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
56
+ page_content=' 7 will change as follows[2, 8].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
57
+ page_content=' ⟨(ˆIo 1)2⟩P = |α|2T 2 {1 + µ2 − 2µR � Rme−z2 1∆k2 × κ[cos(2k0z1 + φ0) − e−κD cos(2k0(z1 + D) + φ0)] � 4k2 0 + κ2 }, (8) 4 where ∆k is the line width of the local oscillator beam with Gaussian line width distribution functions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
58
+ page_content=' κ is the absorption coefficient, D is the detector active length, and φ0 = arctan 2k κ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
59
+ page_content=' We assumed that the probability that a photon is converted into an electron hole pair at distance η from the surface of the detector’s active region is κe−κη[9].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
60
+ page_content=' The two coefficients √Rm and µ depend on the mode matching condition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
61
+ page_content=' Even when we used the total mirror, if the mode from the mirror is not perfectly matched with the mode from the laser, the effective reflectance √Rm can not be 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
62
+ page_content=' Furthermore, the mode a1 to the mirror is reflected by the mirror and then meets at the detector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
63
+ page_content=' At the detector, if two counter-propagating modes are not exactly matched, the coefficient µ cannot be 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
64
+ page_content=' To evaluate this mode matching condition, we assume that the amplitude envelope of the electromagnetic wave in the transverse plane is given by a Gaussian function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
65
+ page_content=' Considering the Gaussian modes [10] E(ρ, z) = E0 w0 w(z) exp[− ρ2 w(z)2] exp[−ikz − ik ρ2 2R(z) + iζ(z)] (9) , where w0 is the radius of the beam waist and w(z) = w0 � 1 + ( z z0 )2 R(z) = z(1 + (z0 z )2) ζ(z) = tan−1 z z0 (10) and z0 is defined as follows: z0 = π λw2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
66
+ page_content=' (11) First, we assume that the laser and vacuu modes have the same beam waist w0 at the detector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
67
+ page_content=' Then the laser and vacuum modes are perfectly matched;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
68
+ page_content=' thus, √Rm = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
69
+ page_content=' On the other hand, the vacuum Ev(0) starting from the detector propagates to the mirror and reflects at the mirror.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
70
+ page_content=' The returned vacuum Ev(2z1) is not the same Ev(0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
71
+ page_content=' The coefficient µ can be calculated as follow: µ = | < Ev(0)Ev(2z1)∗ > | � < Ev(0)2 >< Ev(2z1)2 > = (1 + 4z2 1 z2 0 ) 1 4 (1 + 5z2 1 z2 0 + 4 z4 1 z4 0 ) 1 4 (12) 5 FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
72
+ page_content=' 2: Mode matching value µ as a function of w0 and z1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
73
+ page_content=' In Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
74
+ page_content=' 2, µ is plotted as a function of z1 and w0, where z1 is the distance between the mirror and detector We assume that the detector and mirror are large enough that all the waves are detected and reflected.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
75
+ page_content=' If the distance between the mirror and detector and the size of the beam waist are small enough, the coefficient µ remains near 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
76
+ page_content=' If we consider the case where the vacuum field has waist at the mirror, the coefficient µ automatically becomes 1 due to the symmetry, but the vacuum field Ev(z1) at the detector does not matche the laser field EL(0).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
77
+ page_content=' We assumed that the laser field has beam waist w0 at the detector, and the vacuum field has a beam waist wm at the mirror.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
78
+ page_content=' Then the effective reflectance √Rm becomes � Rm = | < Ev(z1)EL(0)∗ > | � < Ev(z1)2 >< EL(0)2 > = √ 2 � wm w0 (1 + z2 1 z2m) 1 4 ({(1 + w2m w2 0 )2 + z2 1 z2 0 }{1 + z2 1 z2m}) 1 4 , (13) where zm = π λw2 m.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
79
+ page_content=' In Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
80
+ page_content=' 3, √Rm is plotted as a function of z1 and wm, where z1 is the distance between the mirror and detector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
81
+ page_content=' We set w0 to 100λ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
82
+ page_content=' Additionally, we also assume that the detector and mirror are large enough that all the waves are detected and reflected.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
83
+ page_content=' The coefficient √Rm can be 1 only when the distance between the mirror and detector is small and the size of the beam waist is sufficiently small.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
84
+ page_content=' 6 μ 1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
85
+ page_content='5 3 4 5 log( 0m 3 入 log() 7 2FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
86
+ page_content=' 3: Mode matching value √Rm as a function of w0 and z1, with w0 equal to 100λ The mode matching condition is crucial for detecting the modulation effect of the vacuum fluctuation near the mirror, as denoted by Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
87
+ page_content=' 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
88
+ page_content=' With the usual setup, we can not satisfy the conditions µ = 1 and √Rm = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
89
+ page_content=' In the next section, we suggest a noble experimental setup that satisfies two mode-matching conditions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
90
+ page_content=' III.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
91
+ page_content=' SET UP FOR MODE MATCHING For a laser that has a Gaussian transverse mode, we have to establish a vacuum mode that also has a Gaussian transverse mode.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
92
+ page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
93
+ page_content=' 4 displays the setup for perfect mode matching between the laser light mode and a vacuum mode.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
94
+ page_content=' The laser used in the experiment passes through lens L1 and is divided into two by the beam splitter (BS1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
95
+ page_content=' The laser is a Gaussian beam and it proceeds according to the Gaussian approximation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
96
+ page_content=' The light passing through BS1 and traveling to mirror M2 reaches the partial mirror B and yields a beam waist on the L3 side surface of B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
97
+ page_content=' Similarly, the light reflecting from the mirror M1 passes through the partial reflector A and yields a beam waist on the L2 side surface of A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
98
+ page_content=' The light passing through A and B passes through the L2 and L3 of the same focal length, respectively, and yields another beam waist on the detector surface.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
99
+ page_content=' The transmittance of light passing through A from M1 is almost 0, and the reflectance of light stemming from the L2 side is almost 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
100
+ page_content=' In this way, if the mode is perfectly matched using the light passing through B and A, an experimental setup can be established wherein one side of the beam splitter BS2 is a mirror (A).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
101
+ page_content=' 7 /Rm 1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
102
+ page_content='5 3 4 5 3 1og wm log() 入 7 2Using this method, the degree of mode matching can be increased compared to that when the experiment is performed by simply placing a plane mirror on one side of the beam splitter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
103
+ page_content=' Additionally the experimental constraints caused by the mode matching can be overcome.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
104
+ page_content=' The experimental setup in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
105
+ page_content=' 4 enables the measurement of how the vacuum fluctuations of the light passing through the beam splitter change when a mirror is placed on one side of the beam splitter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
106
+ page_content=' FIG.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
107
+ page_content=' 4: Mode matching setup IV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
108
+ page_content=' CONCLUSION AND DISCUSSION.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
109
+ page_content=' The quantum nature of photons is highly dependent on their vacuum fluctuations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
110
+ page_content=' Vac- uum fluctuations can be directly measured via homodyne detection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
111
+ page_content=' The fluctuation of one quadrature of the vacuum can be less than that of the usual vacuum, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
112
+ page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
113
+ page_content=', squeezed vacuum.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
114
+ page_content=' Light intensity fluctuations are also dependent on vacuum fluctuations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
115
+ page_content=' Sub-Poisson light can be generated by controlling the vacuum fluctuations based on the nonlinear interaction of light and matter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
116
+ page_content=' In this study, we proposed the modulation of vacuum fluctuations by inserting a mirror on the unused part of the beam splitter in a homodyne measuring system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
117
+ page_content=' Furthermore, we calculated the effect of the line width of the laser and the thickness of the detector layer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
118
+ page_content=' The line width can be practically reduced to modulate vacuum fluctuations, but the decrease of the thickness of the detector to modulate vacuum fluctuations is chal- lenging.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
119
+ page_content=' We calculated the effect of mode matching between the vacuum and light fields and 8 BS1 M1 A L1 L2 DD1 M2 BS2 B L3 D2showed that the degree of mode matching obtained by adding a simple mirror in the unused beam splitter may not be sufficient to modulate the vacuum fluctuations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
120
+ page_content=' We present the perfect mode matching method for the vacuum and light fields.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
121
+ page_content=' Then, the light intensity fluctuations can be reduced by inserting a beam splitter and a mirror.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
122
+ page_content=' We still require a detector with an active layer thinner than the wavelength to obtain a sub-Poisson light as a function of the distance between the mirror and detector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
123
+ page_content=' We expect that our simple method of reducing vacuum fluctuations will play a great role in quantum information science.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
124
+ page_content=' [1] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
125
+ page_content=' Jhe, A Anderson, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
126
+ page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
127
+ page_content=' Hinds, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
128
+ page_content=' Meschede, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
129
+ page_content=' Moi, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
130
+ page_content=' Haroche, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
131
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
132
+ page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
133
+ page_content=' 58, 666 (1987) [2] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
134
+ page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
135
+ page_content=' Youn, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
136
+ page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
137
+ page_content=' Lee, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
138
+ page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
139
+ page_content=' Chang, Opt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
140
+ page_content=' and Quant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
141
+ page_content=' Elec.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
142
+ page_content=' 27, 355 (1995) [3] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
143
+ page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
144
+ page_content=' Youn, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
145
+ page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
146
+ page_content=' Lee, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
147
+ page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
148
+ page_content=' Chang, International Workshop on Squeezed States and Uncer- tainty Relations, N95-13921 (1994) [4] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
149
+ page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
150
+ page_content=' Wadood, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
151
+ page_content=' T, Schultz, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
152
+ page_content=' N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
153
+ page_content=' Vamivakas, and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
154
+ page_content='R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
155
+ page_content=' Stroud Jr, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
156
+ page_content=' of Mod.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
157
+ page_content=' Opt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
158
+ page_content=' 66, 1116 (2019) [5] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
159
+ page_content=' Yariv, Quantum Electronics 3rd ed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
160
+ page_content=', John Wiley & Sons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
161
+ page_content=' Inc, (1989) [6] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
162
+ page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
163
+ page_content=' Drummond, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
164
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
165
+ page_content=' A 35, 4253 (1987).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
166
+ page_content=' [7] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
167
+ page_content=' Yurke, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
168
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
169
+ page_content=' A 32, 311 (1985) [8] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
170
+ page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
171
+ page_content=' Siegman, Laser (Oxford University Press, Oxford, 1986 ) [9] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
172
+ page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
173
+ page_content=' Sze, Semiconductor Devices Physics and Technology (AT&T Bell Lab.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
174
+ page_content=' Murray Hill, New Jersey, 1985) [10] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
175
+ page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
176
+ page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
177
+ page_content=' Saleh, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
178
+ page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
179
+ page_content=' Teich, Fundamentals of Photonics ( Wiley, Nw York, 1991) 9' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/4dAzT4oBgHgl3EQfffz3/content/2301.01455v1.pdf'}
59FAT4oBgHgl3EQfnB39/content/2301.08627v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aeff6a7369ac0c1d85eef2be35c6202c6b95330852fdafd658be2ee8729d9faa
3
+ size 160979
59FAT4oBgHgl3EQfnB39/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dafd1b99039cf71614020e3030ea8a3b2d078e881ef8cdb3264cda8ac4587cfa
3
+ size 1900589
59FAT4oBgHgl3EQfnB39/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9d06f8379544721a7eafd258ce88b9192681f761775b9297c2914de0fc4b9c0
3
+ size 90732
5tE3T4oBgHgl3EQfpQqt/content/tmp_files/2301.04641v1.pdf.txt ADDED
@@ -0,0 +1,2422 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 1
2
+ Plug-in Channel Estimation with Dithered
3
+ Quantized Signals in Spatially Non-Stationary
4
+ Massive MIMO Systems
5
+ Tianyu Yang1, Johannes Maly2,3, Sjoerd Dirksen4,
6
+ and Giuseppe Caire1
7
+ Abstract
8
+ As the array dimension of massive MIMO systems increases to unprecedented levels, two problems
9
+ occur. First, the spatial stationarity assumption along the antenna elements is no longer valid. Second,
10
+ the large array size results in an unacceptably high power consumption if high-resolution analog-to-
11
+ digital converters are used. To address these two challenges, we consider a Bussgang linear minimum
12
+ mean square error (BLMMSE)-based channel estimator for large scale massive MIMO systems with
13
+ one-bit quantizers and a spatially non-stationary channel. Whereas other works usually assume that the
14
+ channel covariance is known at the base station, we consider a plug-in BLMMSE estimator that uses an
15
+ estimate of the channel covariance and rigorously analyze the distortion produced by using an estimated,
16
+ rather than the true, covariance. To cope with the spatial non-stationarity, we introduce dithering into the
17
+ quantized signals and provide a theoretical error analysis. In addition, we propose an angular domain
18
+ fitting procedure which is based on solving an instance of non-negative least squares. For the multi-user
19
+ data transmission phase, we further propose a BLMMSE-based receiver to handle one-bit quantized data
20
+ signals. Our numerical results show that the performance of the proposed BLMMSE channel estimator
21
+ is very close to the oracle-aided scheme with ideal knowledge of the channel covariance matrix. The
22
+ BLMMSE receiver outperforms the conventional maximum-ratio-combining and zero-forcing receivers
23
+ in terms of the resulting ergodic sum rate.
24
+ 1Communications and Information Theory Group (CommIT), Technische Universit¨at Berlin, 10587 Berlin, Germany (e-mail:
25
+ {tianyu.yang, caire}@tu-berlin.de).
26
+ 2Ludwig-Maximilians-Universit¨at Munich, 80333 Munich, Germany (e-mail: [email protected]).
27
+ 3Munich Center for Machine Learning (MCML).
28
+ 4Utrecht University, 3584 CD Utrecht, Netherlands (e-mail: [email protected]).
29
+ This work has been submitted to the IEEE for possible publication. Copyright may be transferred without notice, after which
30
+ this version may no longer be accessible.
31
+ arXiv:2301.04641v1 [cs.IT] 11 Jan 2023
32
+
33
+ 2
34
+ Index Terms
35
+ Extra-Large Scale Massive MIMO, Spatially Non-Stationary, One-Bit Quantization, Dithering, Buss-
36
+ gang Linear MMSE (BLMMSE).
37
+ I. INTRODUCTION
38
+ Massive multiple-input-multiple-output (MIMO) has been vastly researched and considered as
39
+ an essential technology in 5G wireless communication systems within sub-6 GHz bands [1–3].
40
+ Benefiting from the large number (tens to hundreds) of antennas at the base station (BS) array,
41
+ dozens of users can be served in the same time-frequency slots. This results in higher spectrum
42
+ and energy efficiency due to the spatial multiplexing and high array gain [1, 4]. Theory shows that
43
+ by increasing the array dimension, i.e., the number of antenna elements, it is possible to achieve
44
+ higher data rates and to mitigate the impacts of inter-cell interference and thermal noise [5].
45
+ Nevertheless, as the array dimension increases, two new challenges occur. First, some inherent
46
+ properties of the channel environment change compared to small-scale MIMO, so that the basic
47
+ assumptions of massive MIMO design are no longer valid for large arrays. Specifically, most
48
+ existing massive MIMO works are based on the assumption of a spatially stationary channel,
49
+ where all antenna elements observe the same far-field propagation from the channel scatters [6–9].
50
+ However, in the very large antenna array regime, spatial non-stationarity has been experimentally
51
+ observed [10]. Two main reasons for this non-stationarity are further discussed in [11–13]. First,
52
+ with large arrays, the distance between the BS array and some scattering clusters may be smaller
53
+ than the Rayleigh distance. As a consequence, user signals impinging onto the BS array cannot
54
+ be assumed as far-field propagation and have a spherical wavefront instead of a plane wavefront.
55
+ Second, due to the physical large size of the array, some of the scattering clusters may only be
56
+ visible to a part of the array. Furthermore, a new deployment of large arrays that are usually
57
+ integrated into large structures, e.g., along the walls of buildings [14], was considered as an
58
+ extension of massive MIMO and referred to as extra-large scale massive MIMO (XL-MIMO) in
59
+ [15]. It is also pointed out in [15] that due to the large dimension (tens of meters) of XL-MIMO,
60
+ spatially non-wide sense stationary (non-WSS) characteristics appear along the array.
61
+ Aside from the spatially non-WSS property of large antenna arrays, a second major concern is
62
+ the hardware cost and power consumption of high-resolution analog-to-digital converters (ADCs).
63
+ Commercial high-resolution ADCs (12 to 16 bits) are expensive and their power consumption
64
+
65
+ 3
66
+ grows exponentially in terms of the number of quantization bits [16]. This problem is even more
67
+ severe for wideband systems, where the power consumption of high-resolution ADCs increases
68
+ linearly with the signal bandwidth due to required higher sampling rates [17]. To alleviate the
69
+ issue of high power consumption, low-resolution ADCs (e.g., 1-3 bits) are utilized for massive
70
+ MIMO systems [18–20] and it is shown in [18, 19] that the capacity loss due to the coarse
71
+ quantization is approximately equal to only π/2 at low signal-to-noise ratios (SNRs). In massive
72
+ MIMO systems the SNR per antenna element may be relatively low, while still achieving an
73
+ overall large spectral efficiency over data stream due to the large number of antennas per user
74
+ (data stream), such that both spatial multiplexing gain and array gain are achieved.
75
+ A. Contributions
76
+ Accurate estimation of channel state information (CSI) at the BS is a key factor to achieve
77
+ the potential benefits of massive MIMO systems. Taking the spatially non-WSS property into
78
+ account, an adaptive grouping sparse Bayesian learning scheme was proposed for uplink channel
79
+ estimation in [21]. A model-driven deep learning-based channel reconstruction scheme was
80
+ proposed in [22]. On the other hand, many recent works have investigated channel estimators
81
+ with one-bit quantized signals in massive MIMO systems, see e.g., [23, 24] and references
82
+ therein. Very recently, in [25] a covariance recovery scheme for one-bit sampled non-stationary
83
+ signals with time-varying sampling thresholds was proposed, where a modified arcsine law was
84
+ further generalized to fit the non-stationary case. However, the study in [25] is not within the
85
+ massive MIMO regime. To the best of our knowledge, no work has addressed the problem of
86
+ channel estimation with both low-resolution quantization and spatially non-WSS channels in
87
+ massive MIMO systems. To fill this gap, in this paper we adopt the Bussgang linear minimum
88
+ mean square estimation (BLMMSE) method that was initially proposed in [23], and propose
89
+ a BLMMSE-based “plug-in” channel estimator for one-bit Massive MIMO systems with the
90
+ spatially non-WSS property.1 Our main contributions are summarized as follows.
91
+ • We adopt a BLMMSE channel estimator to deal with the one-bit quantized signal. However,
92
+ in contrast to [23] that assumes exact knowledge of the channel covariance at the BS, we
93
+ 1By “plug-in” we mean that the BLMMSE requires the knowledge of the channel covariance, which is typically assumed to
94
+ be known. However, in our setting, also the channel covariance needs to be estimated from low-resolution quantized samples.
95
+ The plug-in estimator consists of using the estimated covariance “as if” it were the true one, in the BLMMSE.
96
+
97
+ 4
98
+ propose a “plug-in” version that instead uses an estimate of the channel covariance. Our first
99
+ contribution is a theoretical analysis of the distortion caused by the use of this estimate: we
100
+ estimate the mean squared distance between the BLMMSE estimate based on an estimate of
101
+ the channel covariance versus the BLMMSE estimate based on the true channel covariance
102
+ (see Lemma 1).
103
+ • Our second contribution is a method to estimate the channel covariance matrix based on
104
+ one-bit samples. We introduce dithering into the one-bit ADCs to cope with the non-Toeplitz
105
+ structure of the channel covariance matrix (resulting from the spatially non-WSS channel).
106
+ We propose a covariance estimator based on dithered quantized samples and derive bounds
107
+ on the estimation error in terms of the maximum norm and the Frobenius norm (Theorem 1).
108
+ By combining this result with the aforementioned bound on the MSE achieved by the
109
+ BLMMSE with given estimated covariance, we derive a bound on the expected MSE of the
110
+ channel estimator in terms of the number of samples used to estimate the channel covariance
111
+ (Theorem 2).
112
+ • We empirically further enhance the proposed channel covariance estimator by exploiting
113
+ the angle domain of the spatially non-WSS channel. Using dictionary functions in the angle
114
+ domain, we formulate the channel covariance estimation as a non-negative least-squares
115
+ problem (NNLS), which can be efficiently solved by a standard numerical NNLS solver,
116
+ e.g., [26], even for very large problem dimensions.
117
+ • We design a linear receiver for the uplink (UL) data transmission phase, based on an estimate
118
+ of the channel matrix obtained in the training phase, to achieve better rate detection and thus
119
+ improve the ergodic sum rate of multi-user severing. Contrary to the conventional maximum-
120
+ ratio-combining (MRC) and zero-forcing (ZF) receivers that do not take the quantization into
121
+ account, the proposed receiver considers the Bussgang decomposition of one-bit quantized
122
+ data signals and uses BLMMSE-based estimation with knowledge of only the estimated
123
+ channel covariance matrix.
124
+ • Our numerical results show that the proposed BLMMSE channel estimator, which uses the
125
+ proposed channel covariance estimator based on dithered quantized samples, is superior
126
+ to benchmark methods and achieves a performance very close to the performance of an
127
+ oracle-aided scheme using the true channel covariance matrix. The proposed BLMMSE-
128
+ based receiver also significantly outperforms MRC and ZF receivers as expected due to its
129
+
130
+ 5
131
+ specific consideration of quantized signals.
132
+ B. Organization
133
+ The rest of this paper is organized as follows. In Section II, we introduce the channel model
134
+ with the spatially non-stationary property. Section III is devoted to the analysis of the BLMMSE
135
+ channel estimator and our results on channel covariance estimation from one-bit quantized
136
+ samples. In Section IV, we propose the BLMMSE receiver for the data transmission phase
137
+ to obtain a higher sum rate. The numerical results are then provided in Section V. Finally, in
138
+ Section VI we conclude our work and provide a discussion of possible future research directions.
139
+ C. Notation
140
+ For any N ∈ N we write [N] = {1, 2, . . . , N}. We use lower-case, bold lower-case, and
141
+ bold upper-case letters to denote scalars, column vectors, and matrices, respectively. The trace,
142
+ transpose and Hermitian transpose are respectively denoted by tr(·), (·)T and (·)H. E[·] returns the
143
+ mathematical expectation. diag(A) gives a diagonal matrix with diagonal of A, while diag(a)
144
+ denotes the diagonal matrix with diagonal equal to a. We denote the M × M identity matrix by
145
+ IM. The i-th element of a vector a is denoted by [a]i, while the i-th row and column of a matrix
146
+ A are respectively denoted by [A]i,· and [A]·,i. An all-zero matrix is denoted by 0. ∥a∥2 denotes
147
+ the Euclidean norm of a vector a. ∥A∥F, ∥A∥, and ∥A∥∞ denote the Frobenius, operator, and
148
+ maximum norms of a matrix A. We use ⟨A, B⟩F := tr(AHB) to denote the Frobenius inner
149
+ product. We furthermore use a ≲ b to abbreviate a ≤ Cb, for some absolute constant C > 0.
150
+ II. SYSTEM MODEL WITH SPATIALLY NON-STATIONARY CHANNEL
151
+ Consider a BS equipped with M antennas in a uniform linear array (ULA). We assume that
152
+ the channel scattering clusters consist of common and local clusters, where common clusters
153
+ are visible to all antennas while local clusters are only visible to a sub-array. An illustration of
154
+ the considered scattering geometry is shown in Fig. 1. The channel vector resulting from the
155
+ contribution of the common clusters at the n-th time slot is given by
156
+ hc
157
+ n =
158
+ Lc
159
+
160
+ i=1
161
+ ρc
162
+ i(n)a(θc
163
+ i),
164
+ (1)
165
+ where Lc denotes the total number of multipaths in the common clusters, ρc
166
+ i(n) ∼ CN(0, γc
167
+ i )
168
+ is the i-th complex channel gain of the common clusters with its power γc
169
+ i , θc
170
+ i is the i-th
171
+
172
+ 6
173
+ User
174
+ Local Cluster
175
+ BS
176
+ ULA
177
+ Local Cluster
178
+ Common
179
+ Cluster
180
+ Fig. 1: Illustration of the studied large-scale Massive MIMO system in ULA with spatially non-
181
+ WSS channel, where local clusters are only visible to a part of antenna elements while the
182
+ common clusters are visible to the whole array.
183
+ angle of arrival (AoA), and where a(θ) ∈ CM×1 is the steering vector, whose m-th entry is
184
+ [a(θ)]m
185
+ = ejπ(m−1) sin(θ) by assuming that the antenna spacing is equal to half of the carrier
186
+ wavelength, for all m ∈ M, where M = [M] is the antenna index set of all M antennas.
187
+ Assume that there are L local clusters and that each local cluster is visible to a consecutive sub-
188
+ array. The i-th local cluster is thus visible to M l
189
+ i antennas with index set Ml
190
+ i, where M l
191
+ i = |Ml
192
+ i|.
193
+ The channel vector resulting from the contribution of the paths in the i-th local cluster at the
194
+ n-th time slot is given by
195
+ hl
196
+ n,i =
197
+ Ll
198
+ j
199
+
200
+ j=1
201
+ ρl
202
+ ij(n)Sia(θl
203
+ ij),
204
+ (2)
205
+ where Ll
206
+ i denotes the total number of multipaths in the i-th local cluster, ρl
207
+ ij(n) ∼ CN(0, γl
208
+ ij) is
209
+ the j-th complex channel gain of the i-th local cluster with its power γl
210
+ ij, θl
211
+ ij is the AoA, and
212
+ where Si ∈ CM×M is the diagonal selection matrix indicating the visible sub-array of the i-th
213
+ local cluster, whose diagonal is defined as
214
+ [Si]m,m =
215
+
216
+
217
+
218
+
219
+
220
+ 1,
221
+ m ∈ Ml
222
+ i
223
+ 0,
224
+ m ∈ M \ Ml
225
+ i.
226
+ (3)
227
+ We further assume that the channel gains of different paths in all common and local clusters at
228
+
229
+ 7
230
+ each time slot n, {ρc
231
+ i(n)}Lc
232
+ i=1 and
233
+
234
+ ρl
235
+ ij(n)
236
+ �Ll
237
+ i
238
+ j=1, ∀i ∈ [L], are uncorrelated2. Note that we implicitly
239
+ assume that the channel geometry and visibility of all clusters do not change over the channel
240
+ geometry coherent time Tc, which is a much longer time period than the channel coherent time
241
+ (see [30] and references therein). Concretely, the Angular Power Spectrum (APS) {γc
242
+ i }Lc
243
+ i=1 and
244
+
245
+ γl
246
+ ij
247
+ �Ll
248
+ i
249
+ j=1, ∀i ∈ [L] along with the AoAs {θc
250
+ i}Lc
251
+ i=1 and
252
+
253
+ θl
254
+ ij
255
+ �Ll
256
+ j
257
+ j=1 , ∀i ∈ [L] as well as the selection
258
+ matrices {Si}L
259
+ i=1 are constant over Tc. Under these assumptions, the total channel vector at n-th
260
+ time slot hn and the corresponding total channel covariance matrix Ch are given by
261
+ hn = hc
262
+ n +
263
+ L
264
+
265
+ i=1
266
+ hl
267
+ n,i,
268
+ (4)
269
+ Ch = E[hnhH
270
+ n] = Chc +
271
+ L
272
+
273
+ i=1
274
+ Chl
275
+ i
276
+ (5)
277
+ =
278
+ Lc
279
+
280
+ i=1
281
+ γc
282
+ i a(θc
283
+ i)a(θc
284
+ i)H +
285
+ L
286
+
287
+ i=1
288
+ Ll
289
+ j
290
+
291
+ j=1
292
+ γl
293
+ ijSia(θl
294
+ ij)a(θl
295
+ ij)HSH
296
+ i
297
+ (6)
298
+ = Acdiag(γc)(Ac)H +
299
+ L
300
+
301
+ i=1
302
+ SiAl
303
+ idiag(γl
304
+ i)(Al
305
+ i)HSH
306
+ i ,
307
+ (7)
308
+ where Ac := [a(θc
309
+ 1), . . . , a(θc
310
+ Lc)], γc := [γc
311
+ 1, . . . , γc
312
+ Lc]T and Al
313
+ i := [a(θl
314
+ i,1), . . . , a(θl
315
+ i,Ll
316
+ i)], γl
317
+ i :=
318
+ [γl
319
+ i,1, . . . , γl
320
+ i,Ll
321
+ i]T, ∀i ∈ [L].
322
+ The total channel power gain of all common clusters and all local clusters are given by
323
+ P c =
324
+ Lc
325
+
326
+ i=1
327
+ γc
328
+ i
329
+ and
330
+ P l =
331
+ L
332
+
333
+ i=1
334
+ P l
335
+ i =
336
+ L
337
+
338
+ i=1
339
+ Ll
340
+ j
341
+
342
+ j=1
343
+ γl
344
+ ij,
345
+ (8)
346
+ where we assume that P c and P l are normalized such that max(diag(Ch)) = 1. To help
347
+ readability, Table I summarizes the model notation.
348
+ III. CHANNEL ESTIMATION WITH ONE-BIT SAMPLES
349
+ For a generic user under a normalized pilot, the BS receives at the n-th time slot the signal
350
+ yn = hn + nn,
351
+ (9)
352
+ where hn ∼ CN(0, Ch) is the M × 1 channel vector and nn ∼ CN(0, N0IM) is additive white
353
+ Gaussian noise (AWGN) with noise power N0. The SNR is thus defined by 1/N0 due to the
354
+ 2Note that this is a standard assumption specified in, e.g., the channel models of 3GPP standard TR 38.901 [27] and TR
355
+ 25.996 [28]. This assumption is also implicitly included in the documentation of the well-known channel simulator QuaDRiGa
356
+ [29].
357
+
358
+ 8
359
+ L
360
+ Number of local clusters
361
+ Lc, Ll
362
+ i
363
+ Number of multipaths of common and local clusters
364
+ ρc
365
+ i, ρl
366
+ ij
367
+ Complex channel gain of common and local clusters
368
+ γc
369
+ i, γl
370
+ ij
371
+ APS of common and local clusters
372
+ Si
373
+ Diagonal selection matrices of local clusters
374
+ θc
375
+ i, θl
376
+ ij
377
+ AoAs of common and local clusters
378
+ Ac, Al
379
+ i
380
+ Matrices of steering vectors of common and local clusters
381
+ TABLE I: Summary of the used notations
382
+ assumption that max(diag(Ch)) = 1. After one-bit ADC the quantized signal becomes
383
+ rn = Q(yn),
384
+ (10)
385
+ where Q(·) is a suitable one-bit quantizer that is applied separately to the real and imaginary
386
+ part. One popular instance for such a quantizer is the complex-sign operator [31]
387
+ rnd
388
+ n = csign(yn) = 1
389
+
390
+ 2
391
+
392
+ sign(Re(yn)) + j sign(Im(yn))
393
+
394
+ ,
395
+ (11)
396
+ which quantizes the entries of Re(yn) and Im(yn) independently, i.e., the sign-function
397
+ sign: R → {−1, 1}
398
+ sign(x) =
399
+
400
+
401
+
402
+
403
+
404
+ 1
405
+ x ≥ 0
406
+ −1
407
+ x < 0
408
+ (12)
409
+ acts componentwise (memoryless scalar quantization). We use the superscript “nd” (“non-dithered”)
410
+ in (11) since the sign-function is applied directly to the samples without dithering. Note that
411
+ this type of one-bit quantization looses any scaling information.
412
+ A. Bussgang LMMSE channel estimator
413
+ We consider channel estimation for a generic time slot. Thus, we ignore the time slot index
414
+ n for simplicity. In order to estimate the channel vector h from a quantized sample r, we first
415
+ transfer the nonlinear quantizer operation to a statistically equivalent linear formulation via the
416
+ well-known Bussgang decomposition [32], which yields
417
+ r = Q(y) = Ay + q
418
+ (13)
419
+ = Ah + An + q
420
+ (14)
421
+ = Ah + �n,
422
+ (15)
423
+ where the linear operator A is called the Bussgang gain, q is a mean-zero random vector that
424
+ is uncorrelated with y, and �n := An+q is the total noise. To enforce q to be uncorrelated with
425
+
426
+ 9
427
+ y, the Bussgang gain A is chosen to minimize the power of the equivalent quantization noise
428
+ [33] such that
429
+ A = E
430
+
431
+ ryH�
432
+ E
433
+
434
+ yyH�−1 = CryC−1
435
+ y ,
436
+ (16)
437
+ where Cry = E
438
+
439
+ ryH�
440
+ denotes the covariance between the quantized signal r and the received
441
+ signal y. The so-called BLMMSE estimator [23] of the channel vector h given the quantized
442
+ signal r is then expressed as
443
+ �hBLM = ChrC−1
444
+ r r.
445
+ (17)
446
+ Note that this is not the optimal MMSE estimator since q is not Gaussian noise. We however
447
+ know that the vector q is uncorrelated with the vector y and one can prove that q is also
448
+ uncorrelated with the channel vector h, see [23, App. A] for the proof of E[hqH] = 0. Thus, h
449
+ is uncorrelated with the total noise �n and consequently we obtain from (15) that
450
+ Chr = ChAH.
451
+ (18)
452
+ Similarly as in [23], A and Cr can be easily computed as follows. For the one-bit quantizer in
453
+ (11) and Gaussian inputs, Cry is given as [34], [35, Ch.12]
454
+ Cry =
455
+
456
+ 2
457
+ πdiag(Cy)− 1
458
+ 2Cy
459
+ (19)
460
+ and combining (19) and (16) we obtain
461
+ A =
462
+
463
+ 2
464
+ πdiag(Cy)− 1
465
+ 2.
466
+ (20)
467
+ Furthermore, Cr can be obtained using the map Parcsine(·) by the arcsine law [34, 36] as
468
+ Cr = Parcsine(Cy)
469
+ = 2
470
+ π
471
+
472
+ arcsin
473
+
474
+ diag(Cy)− 1
475
+ 2Re(Cy)diag(Cy)− 1
476
+ 2
477
+
478
+ + j arcsin
479
+
480
+ diag(Cy)− 1
481
+ 2Im(Cy)diag(Cy)− 1
482
+ 2
483
+ � �
484
+ .
485
+ (21)
486
+ With the BLMMSE estimator in hand, (17) has a closed form that only depends on Cy =
487
+ Ch + N0IM. Whereas the noise power N0 is normally assumed to be known at the BS3, the
488
+ channel covariance matrix Ch still needs to be estimated from samples to finally apply the
489
+ BLMMSE estimator (17). Consider an “plug-in estimator” �Cy of Cy, we define the estimated
490
+ 3This can be achieved via, e.g., low rate control channel.
491
+
492
+ 10
493
+ channel vector as
494
+ �h = �Chr �C−1
495
+ r r,
496
+ (22)
497
+ where �Chr and �Cr are the estimators of Chr and Cr obtained by replacing Cy by its estimator
498
+ �Cy, i.e.,
499
+ �Chr = �Ch �AH,
500
+ �Ch = �Cy−N0IM,
501
+ �A =
502
+
503
+ 2
504
+ πdiag(�Cy)− 1
505
+ 2,
506
+ �Cr = Parcsine(�Cy). (23)
507
+ The following lemma controls the estimation error in (17) if an estimator �Cy of Cy is used.
508
+ Lemma 1: There are absolute constants c1, c2, C > 0 such that the following holds. Let
509
+ θ ∈ (0, 1) be fixed. Assume that
510
+ ����
511
+
512
+ diag(Cy)− 1
513
+ 2Cydiag(Cy)− 1
514
+ 2
515
+
516
+ i,j
517
+ ���� ≤ 1 − θ,
518
+ for all i ̸= j
519
+ (24)
520
+ and
521
+ min
522
+ i∈[M] |[Cy]i,i| ≥ θ,
523
+ λmin(Cr) ≥ θ,
524
+ (25)
525
+ where λmin(·) gives the minimal eigenvalue of the matrix. Consider εF > 0, ε∞ > 0 such that
526
+ ∥�Cy − Cy∥F < εF,
527
+ ∥�Cy − Cy∥∞ < ε∞
528
+ and assume that
529
+ ε∞ ≤ c1 min
530
+
531
+ εF
532
+ ∥Cy∥F
533
+ ,
534
+ θ3
535
+ ∥Cy∥∞
536
+ , θ, 1
537
+
538
+ and εF ≤ c2 min
539
+
540
+ θ4,
541
+ θ6∥Ch∥F
542
+ max{1, ∥Ch∥} ∥Cy∥
543
+
544
+ .
545
+ (26)
546
+ Then,
547
+ E
548
+ �����h − �hBLM���
549
+ 2
550
+ 2
551
+
552
+ ≤ Cθ−6 max{1, ∥Ch∥} ∥Ch∥FεF,
553
+ (27)
554
+ where the expectation is taken with respect to r.
555
+ Proof: See Appendix A.
556
+ Remark 1: Let us briefly comment on the assumptions in Lemma 1. As the construction
557
+ of the BLMMSE involves the inverses of diag(Cy) and Cr, it is to be expected that in the
558
+ situation that these matrices are near-singular, a small error in the estimation of the covariance
559
+ can lead to a large difference between �h and �hBLM. This expected behaviour is quantified in
560
+ Lemma 1 using the parameter θ. The lower bound on λmin(Cr) is an implicit condition on
561
+ Cy. To give a more explicit condition, let us write offdiag(Cr) for the off-diagonal part. Using
562
+ that ∥ arcsin(B)∥ ≤ π
563
+ 2∥B∥ if ∥B∥∞ ≤ 1 (see [37, Supplementary material]), we can make the
564
+ potentially crude estimate
565
+ λmin(Cr) ≥ λmin(diag(Cr)) − ∥ offdiag(Cr)∥ ≥ 1 − ∥ offdiag(Cy)∥,
566
+ (28)
567
+
568
+ 11
569
+ so that it is sufficient if
570
+ ∥ offdiag(Cy)∥ ≤ 1 − θ.
571
+ (29)
572
+ Note that the latter condition also implies (24). Finally, let us comment on the condition linking
573
+ ε∞ and εF in (26). In the application that follows, we will see that the ℓ∞-error achieved by the
574
+ estimator �Cy is a factor M smaller than the achieved Frobenius norm error. As a consequence,
575
+ the relation between ε∞ and εF will be satisfied.
576
+
577
+ B. Channel covariance estimation from quantized samples
578
+ In this part, we present an approach to estimate the covariance matrix Cy from a finite number
579
+ of samples so that we can use the estimate �Cy to apply the plug-in BLMMSE channel estimator in
580
+ (22). Assume that the BS collects N unquantized i.i.d. samples {yn}N
581
+ n=1 for covariance estimation
582
+ and applies coarse quantization in the ADCs. In the case of a spatially WSS channel, the diagonal
583
+ of Ch is constant and the (non-dithered) one-bit samples rnd
584
+ n defined in (11) can be used. Defining
585
+ the sample covariance of the quantized samples
586
+ �Cnd
587
+ r = 1
588
+ N
589
+ N
590
+
591
+ n=1
592
+ rnd
593
+ n
594
+
595
+ rnd
596
+ n
597
+ �H ,
598
+ (30)
599
+ the true covariance matrix Cy can then be estimated via the arcsin-law [31, 36, 37]
600
+ �Cnd
601
+ y = sin
602
+ �π
603
+ 2 Re
604
+
605
+ �Cnd
606
+ r
607
+ ��
608
+ + j sin
609
+ �π
610
+ 2 Im
611
+
612
+ �Cnd
613
+ r
614
+ ��
615
+ .
616
+ (31)
617
+ Due to the spatially non-WSS property in our model, however, it is seen from the formulation in
618
+ (6) that the channel covariance may have a non-constant diagonal and non-Toeplitz structure. In
619
+ such a scenario, the estimator in (31) will perform poorly since it enforces a constant diagonal.
620
+ To overcome this limitation of the quantizer csign, we will introduce random dithering [38–
621
+ 40]. The beneficial effect of dithering in memoryless one-bit quantization was recently rigorously
622
+ analyzed in the context of one-bit compressed sensing, see, e.g., [41–46]. We will adapt a
623
+ covariance estimator from [37] that uses two-bit dithered quantized samples. Specifically, we
624
+ assume that the real and imaginary parts of each entry are quantized independently with two
625
+ independent dithers, so that we are given the (dithered) four-bit samples
626
+
627
+ Re(rd
628
+ n), Im(rd
629
+ n), Re(�rd
630
+ n), Im(�rd
631
+ n)
632
+
633
+ :=
634
+ (32)
635
+
636
+ sign
637
+
638
+ Re(yn) + τ Re
639
+ n
640
+
641
+ , sign
642
+
643
+ Im(yn) + τ Im
644
+ n
645
+
646
+ , sign
647
+
648
+ Re(yn) + �τ Re
649
+ n
650
+
651
+ , sign
652
+
653
+ Im(yn) + �τ Im
654
+ n
655
+ ��
656
+ ,
657
+
658
+ 12
659
+ RF
660
+ S/H
661
+ 1-bit ADC
662
+ DSP
663
+ S/H
664
+ Switch
665
+ S/H
666
+ 1-bit ADC
667
+ S/H
668
+ Switch
669
+ DG
670
+ Fig. 2: Illustration of the implementation of dithered one-bit quantizer in the m-th antenna chain.
671
+ where the real dithering vectors τ Re
672
+ n , τ Im
673
+ n , �τ Re
674
+ n , �τ Im
675
+ n
676
+ ∈ RM, for n ∈ [N], are independent and
677
+ uniformly distributed in [−λ, λ]M and λ > 0 is a tuning parameter. An example of an im-
678
+ plementation of such a dithered quantization design is illustrated in Fig. 2. The real part and
679
+ imaginary part of the received signal after the radio frequency (RF) circuits are sampled and
680
+ stored separately in two sample-and-hold (S/H) circuits. Then, a switch is used to extract in turn
681
+ the signals from two S/H circuits and forward them to the one-bit ADC. Meanwhile, a dithering
682
+ signal generated by the dithering generator (DG) is added into the one-bit ADC and the analog
683
+ signal is dithered quantized. For instance, if the switches connect the a points, the DG generates
684
+ random dithering signals τ Re and τ Im. Oppositely, if the b points are connected, ��τ Re and �τ Im
685
+ are generated from DG. The quantized signals of all antenna chains will be processed in the
686
+ digital signal processor (DSP). Note that we use S/H circuits to avoid using two one-bit ADCs
687
+ for each real or imaginary part signal. Also, this circuit can be directly used for non-dithered
688
+ one-bit quantization by fixing the connection of switches and turn off the DG.
689
+ Given N dithered quantized samples from (32), we can estimate Cy via
690
+ �Cd
691
+ y = 1
692
+ 2
693
+ �Cd + 1
694
+ 2
695
+
696
+ �Cd�H
697
+ ,
698
+ (33)
699
+ where
700
+ �Cd = λ2
701
+ N
702
+ N
703
+
704
+ n=1
705
+ rd
706
+ n
707
+ ��rd
708
+ n
709
+ �H
710
+ (34)
711
+ is an asymmetric version of the sample covariance matrix scaled with λ2. We can now quantify
712
+ the approximation of Cy by �Cd
713
+ y for all random vectors y ∈ CM with S-subgaussian coordinates.
714
+ Definition 1: We say that a random vector y ∈ CM with covariance matrix Cy has S-
715
+
716
+ 13
717
+ subgaussian coordinates if, for all p ≥ 2 and j ∈ [M],
718
+ max
719
+ ��
720
+ E
721
+
722
+ |[Re(y)]j|p�� 1
723
+ p,
724
+
725
+ E
726
+
727
+ |[Im(y)]j|p�� 1
728
+ p�
729
+ ≤ S√p∥Cy∥
730
+ 1
731
+ 2∞.
732
+ (35)
733
+ Note that if y ∈ CM is complex Gaussian with mean zero, then both Re(y) and Im(y) are
734
+ mean-zero real Gaussian vectors with covariance matrix 1
735
+ 2Re(Cy). Hence, y has S-subgausian
736
+ coordinates for some absolute constant S. The following estimates, which complement operator
737
+ norm error bounds derived in [37], are tailored to be used in Lemma 1.
738
+ Theorem 1: Let y ∈ CM be a mean-zero random vector vector with covariance matrix
739
+ E
740
+
741
+ yyH�
742
+ = Cy and S-subgaussian coordinates. Let y1, ..., yN
743
+ i.i.d.
744
+ ∼ y. Then there exists a constant
745
+ c > 0 which only depends on S such that if λ2 ≳ log(N)∥Cy∥∞, the covariance estimator �Cd
746
+ y
747
+ fulfills, for any t ≥ 0, with probability at least 1 − 8e−cNt
748
+ ����Cd
749
+ y − Cy
750
+ ���
751
+ ∞ ≲ λ2
752
+
753
+ log(M) + t
754
+ N
755
+ .
756
+ (36)
757
+ and
758
+ ����Cd
759
+ y − Cy
760
+ ���
761
+ F ≲ λ2
762
+
763
+ M 2(log(M) + t)
764
+ N
765
+ .
766
+ (37)
767
+ Proof: See Appendix B.
768
+ By combining Theorem 1 with Lemma 1, we can derive a bound on the expected estimation
769
+ error of the channel vector in terms of the number of samples N used to estimate Cy.
770
+ Theorem 2: There exist constants c1, . . . , c4 > 0 depending only on S such that the following
771
+ holds. Let y ∈ CM be a zero-mean random vector with covariance matrix Cy and S-subgaussian
772
+ coordinates. Let y1, ..., yN
773
+ i.i.d.
774
+ ∼ y. Suppose that Cy, Cr, and θ ∈ (0, 1) satisfy (24) and (25).
775
+ Further suppose that λ2 ≥ c1 log(N)∥Cy∥∞ and
776
+ N ≥ c2λ4M 2�
777
+ θ−6 +θ−12∥Ch∥−2
778
+ F
779
+ max{1, ∥Ch∥2}∥Cy∥2�
780
+ max{1, ∥Cy∥2
781
+ ∞}
782
+
783
+ log(M)+t
784
+
785
+ . (38)
786
+ Then, for any t ≥ 0, with probability at least 1 − 8e−c3Nt
787
+ E
788
+ �����h − �hBLM���
789
+ 2
790
+ 2
791
+
792
+ ≤ c4λ2θ−6M max{1, ∥Cy∥∞} max{1, ∥Ch∥} ∥Ch∥F
793
+
794
+ log(M) + t
795
+ N
796
+ .
797
+ (39)
798
+ Proof: See Appendix C.
799
+ Remark 2: Theorem 2 implies that the parameter λ of the uniform distribution of the dithering
800
+ vectors must be carefully tuned. Developing a corresponding method is thus desirable. We defer
801
+ this open point to future work.
802
+
803
+
804
+ 14
805
+ C. APS-based channel covariance estimation
806
+ Let us now revisit the problem of estimating the channel covariance based on an estimator
807
+ �Cy of Cy. Previously we used the basic estimator for the channel covariance given by
808
+ �Ch =
809
+
810
+ �Cy − N0IM
811
+
812
+ ,
813
+ (40)
814
+ which may not necessarily be a positive semi-definite matrix. To heuristically improve the
815
+ performance of this basic estimator, we further exploit the angle domain and apply a commonly
816
+ considered APS-based covariance fitting to estimate the APS and subsequently enhance the
817
+ estimate of the channel covariance, see e.g., [9, 47–49]. Specifically, assuming that the visible
818
+ antennas of all scattering clusters are known at the BS, i.e., the BS has the exact knowledge of
819
+ the selection matrices {Si}L
820
+ i=1. Using G Dirac delta functions that are equally spaced in the angle
821
+ domain with AoAs {θi}G
822
+ i=1 as dictionary, the channel covariance matrix can be approximated as
823
+ �Ch(�γ) = �Adiag(�γL+1)�AH +
824
+ L
825
+
826
+ i=1
827
+ Si �Adiag(�γi)�AHSH
828
+ i ,
829
+ (41)
830
+ where �A := [a(θ1), . . . , a(θG)] and we define �γ ∈ R �G
831
+ + := [�γT
832
+ 1 , . . . , �γT
833
+ L+1]T as the non-negative
834
+ coefficients to be estimated, where �G = (L + 1)G. Then, we estimate the coefficients by fitting
835
+ the parametric channel covariance �Ch(�γ) to the basic estimator �Ch in terms of the Frobenius
836
+ norm. We denote the estimated coefficients by
837
+ �γ⋆ = arg min
838
+ �γ∈R �
839
+ G
840
+ +
841
+ ����Ch(�γ) − �Ch
842
+ ���
843
+ 2
844
+ F .
845
+ (42)
846
+ By defining b := vec(�Ci
847
+ h) and B :=
848
+
849
+ Bl
850
+ 1, . . . , Bl
851
+ L, Bc�
852
+ , where
853
+ Bc :=
854
+
855
+ vec(a(θ1)a(θ1)H), . . . , vec(a(θG)a(θG)H)
856
+
857
+ ,
858
+ (43)
859
+ Bl
860
+ i :=
861
+
862
+ vec(Sia(θ1)a(θ1)HSH
863
+ i ), . . . , vec(Sia(θG)a(θG)HSH
864
+ i )
865
+
866
+ ,
867
+ ∀i ∈ [L],
868
+ (44)
869
+ we see that �γ⋆ can be obtained by solving the nonnegative least squares (NNLS) problem
870
+ min
871
+ �γ∈R �
872
+ G
873
+ +
874
+ ∥B�γ − b∥2
875
+ 2 .
876
+ (45)
877
+ This problem can be efficiently solved using a variety of convex optimization techniques (see,
878
+ e.g., [50, 51]). In our simulations, we use the novel MATLAB NNLS solver [26] which is much
879
+ faster and stabler than the built-in MATLAB function lsqnonneg, especially under large problem
880
+ dimensions as considered here. With the estimated ASF �γ⋆ in hand, we obtain �C⋆
881
+ h := �Ch(�γ⋆)
882
+ as the final estimator of the channel covariance.
883
+
884
+ 15
885
+ IV. DATA TRANSMISSION RATE
886
+ In the UL data transmission phase, we assume K users simultaneously transmit their data.
887
+ The received signal at the BS before quantization is given by
888
+ yD = Hs + nD,
889
+ (46)
890
+ where H ∈ CM×K is the channel matrix of K users, s ∼ CN(0, IK) is the vector of the data
891
+ signals of K users and nD ∼ CN(0, N0IM) is additive white Gaussian noise. Note that we use
892
+ the subscript ‘D’ to indicate the signal during data transmission. After the one-bit non-dithered
893
+ quantization, the quantized signal is given by
894
+ rD = Q(yD) = ADHs + ADnD + qD,
895
+ (47)
896
+ where AD =
897
+
898
+ 2
899
+ π(diag(HHH+N0IM))− 1
900
+ 2 is the Bussgang gain calculated similarly as in (16) and
901
+ (20). By treating interference as noise, we apply a linear receiver WH to separate the quantized
902
+ signal into K streams as
903
+ �s = WHrD = WHADHs + WH(ADnD + qD).
904
+ (48)
905
+ The data signal of the k-th user is then decoded by the k-th element of �s as
906
+ �sk = wH
907
+ k ADhksk + wH
908
+ k
909
+ K
910
+
911
+ i̸=k
912
+ ADhisi + wH
913
+ k (ADnD + qD),
914
+ (49)
915
+ where wk and hk are the k-th columns of the W and H, respectively. The covariance matrix of
916
+ the statistically equivalent quantizer noise qD is given by
917
+ CqD = CrD − ADCyDAH
918
+ D,
919
+ (50)
920
+ where CyD = HHH+N0IM and the covariance matrix CrD of rD can be obtained via the arcsine
921
+ law as CrD = Parcsine(CyD). Note that the quantizer noise qD is non-Gaussian. Considering the
922
+ worst case by treating qD as Gaussian distributed with the same covariance matrix CqD, we can
923
+ obtain a lower bound of the optimistic ergodic sum rate4 of K users [53], given as
924
+ Rsum =
925
+ K
926
+
927
+ k=1
928
+ E
929
+
930
+ log2
931
+
932
+ 1 +
933
+ |wH
934
+ k ADhk|2
935
+ �K
936
+ i̸=k |wH
937
+ k ADhi|2 + N0∥wH
938
+ k AD∥2
939
+ 2 + wH
940
+ k CqDwk
941
+ ��
942
+ .
943
+ (51)
944
+ Now, we consider the design of the linear receiver W. Using the proposed plug-in channel
945
+ estimator, the channel matrix H is estimated as �H. Then, the conventional MRC and ZF receivers
946
+ 4Note that the “optimistic ergodic sum rate” is an upper bound assuming Gaussian signaling since it assumes that the useful
947
+ signal coefficient and the interference variance are perfectly known [52].
948
+
949
+ 16
950
+ are given as
951
+ WH
952
+ MRC = �HH,
953
+ (52)
954
+ WH
955
+ ZF =
956
+
957
+ �HH �H
958
+ �−1 �HH.
959
+ (53)
960
+ Note that the conventional MRC and ZF receivers might not perform so well due to the quantized
961
+ signal. Therefore, we propose a BLMMSE receiver that takes directly into account the quantized
962
+ signal by considering its Bussgang decomposition, which is expected to yield better performance.
963
+ Specifically, the BLMMSE receiver is given as
964
+ WH
965
+ BLM = CsrDC−1
966
+ rD ,
967
+ (54)
968
+ where
969
+ CsrD = E
970
+
971
+ srH
972
+ D
973
+
974
+ = HHAH
975
+ D,
976
+ CrD = E
977
+
978
+ rDrH
979
+ D
980
+
981
+ = Parcsine(CyD).
982
+ (55)
983
+ In practice, using the estimated channel matrix �H, the BLMMSE receiver WH
984
+ BLM is given as
985
+ WH
986
+ BLM = �HH �AH
987
+ D
988
+
989
+ Parcsine
990
+
991
+ �CyD
992
+ ��−1
993
+ ,
994
+ (56)
995
+ where �AD =
996
+
997
+ 2
998
+ π(diag( �H �HH + N0IM))− 1
999
+ 2 and �CyD = �H �HH + N0IM.
1000
+ V. SIMULATION RESULTS
1001
+ In our simulation, we take M = 256 antennas at the BS in ULA. The channel consists of Lc =
1002
+ 3 multipaths in common clusters and L = 2 local clusters, where each local cluster is composed
1003
+ of three multipaths, i.e., Ll
1004
+ i = 3, i = 1, 2. The first local cluster is visible to the first quarter
1005
+ of antennas and the second local cluster is visible to the last quarter of antennas, i.e., Ml
1006
+ 1 =
1007
+ {1, 2, . . . , M
1008
+ 4 }, Ml
1009
+ 2 = { 3M
1010
+ 4 +1, 3M
1011
+ 4 +2, . . . , M}. The AoAs of the common clusters and the first
1012
+ and second local clusters are uniformly and randomly generated from [−60, 60], [−60, 0] and
1013
+ [0, 60] degrees, respectively, i.e., θc
1014
+ i ∼ U(−60, 60), ∀i ∈ [Lc], θl
1015
+ 1,j ∼ U(−60, 0), ∀j ∈ [Ll
1016
+ 1], θl
1017
+ 2,j ∼
1018
+ U(0, 60), ∀j ∈ [Ll
1019
+ 2]. The APS of all multipaths are randomly generated with the constraints P c =
1020
+ 0.3, P l
1021
+ 1 = 0.7, and P l
1022
+ 2 = 0.5. Note that this setting satisfies the assumption of max(diag(Ch)) =
1023
+ 1. The SNR is set to 10dB (equivalently, N0 = 0.1). We consider three different estimators �Cy
1024
+ for Cy: the estimator (31), based on non-dithered one-bit quantized samples, the estimator (33),
1025
+ based on dithered one-bit quantized samples, and finally, as a reference, the sample covariance
1026
+ matrix of the unquantized samples. In the first two cases with quantized samples, we use the
1027
+ estimator to produce the APS-based estimator �C⋆
1028
+ h of the channel covariance Ch, as is detailed
1029
+ in Section III-C. All results presented below are averaged over 10 random channel geometry
1030
+ realizations, each with 20 groups of N i.i.d. random channel realizations.
1031
+
1032
+ 17
1033
+ A. Channel covariance estimation
1034
+ Given an estimator �C⋆
1035
+ h of the channel covariance matrix, we first evaluate it in terms of the
1036
+ normalized Frobenius norm error, which is given by
1037
+ ENF = E
1038
+
1039
+ ��
1040
+ ���Ch − �C⋆
1041
+ h
1042
+ ���
1043
+ 2
1044
+ F
1045
+ ∥Ch∥2
1046
+ F
1047
+
1048
+ �� .
1049
+ (57)
1050
+ The numerical results under different values of λ are shown in Fig. 3a. It is seen that the choice
1051
+ of λ significantly influences the results of dithered quantization. A larger number of samples N
1052
+ can result in a more robust covariance estimation in terms of tuning λ. Moreover, the results
1053
+ under various numbers of samples N are shown in Fig. 3b, where 3 different choices of λ for
1054
+ dithered quantization are included. It is observed that the Frobenius norm errors of the dithered
1055
+ estimators are much smaller than the ones of the non-dithered estimators over a large range of
1056
+ number of samples N. It is also seen that by finding a proper λ, the estimation performance
1057
+ can be much improved. Furthermore, in the regime of a small number of samples (e.g., when
1058
+ N < 100 in our case), the results for the estimator based on dithered quantized samples are even
1059
+ better than the sample covariance of the unquantized samples. This shows that our algorithm
1060
+ has a benefit in practical cases with limited number of samples.
1061
+ B. Channel vector estimation via BLMMSE
1062
+ Next, we numerically evaluate the BLMMSE-based channel vector estimator in terms of the
1063
+ normalized MSE, which is given by
1064
+ ENMSE =
1065
+ E
1066
+ ����h − �h
1067
+ ���
1068
+ 2
1069
+ 2
1070
+
1071
+ tr(Ch)
1072
+ .
1073
+ (58)
1074
+ Given an estimated channel covariance, we calculate the NMSE with 100 i.i.d. random channel
1075
+ realizations. The averaged results under various λ and various N are depicted in Fig. 4a and
1076
+ Fig. 4b, respectively, where the lower bound is given by the BLMMSE estimator obtained using
1077
+ the true channel covariance. It is observed again that the choice of λ significantly influences the
1078
+ estimation performance of the dithered case. By applying a proper λ (e.g., λ = 1 in Fig. 4b)
1079
+ the channel estimate can be considerably improved compared to the non-dithered case for a
1080
+ large range of number of samples. Moreover, it is seen from Figs. 3a and 4a that the trend of
1081
+ tuning λ in BLMMSE based channel estimation is different from the trend in channel covariance
1082
+ estimation. In the range of 1 ≤ λ ≤ 2, the results of BLMMSE-based channel estimation are no
1083
+
1084
+ 18
1085
+ longer as robust as the results in covariance estimation even under large N. The optimal choices
1086
+ of λ for the two estimation problems are also different, e.g., λ⋆ ≈ 1.5 in Fig. 3a and λ⋆ ≈ 1.2
1087
+ in Fig. 4a under N = 500.
1088
+ C. Ergodic sum rate evaluation
1089
+ Finally, we evaluate the proposed scheme in terms of the ergodic sum rate given in (51). We
1090
+ test with K = 4 users and assume that the channel geometry of all users follows the setting
1091
+ described at the beginning of this section. For each channel estimation we test with MRC, ZF, and
1092
+ BLMMSE receivers given in (52), (53), and (56), respectively. Similarly as in the previous part,
1093
+ beside the non-dithered and dithered schemes we also provide results based on the true channel
1094
+ covariance matrix. However, unlike for the channel MSE criterion used in the previous part, the
1095
+ use of the true covariance is not guaranteed to yield a better sum rate, as a channel estimator
1096
+ with smaller MSE does not necessarily yield a higher sum rate. Furthermore, we provide results
1097
+ based on the true channel vectors as upper bounds.
1098
+ We first present the resulting sum rates under various λ with N = 50 samples for covariance
1099
+ estimation and BLMMSE channel estimation in Fig. 5a. It is firstly observed that the BLMMSE
1100
+ receiver performs much better than the ZF and MRC receivers. This is expected since the
1101
+ BLMMSE receiver takes the quantization into account whereas the conventional ZF and MRC
1102
+ receivers do not. Next, we observe that the results based on the true covariance do not always
1103
+ provide the largest sum rate as previously explained. Specifically, among the results with ZF
1104
+ receivers (the dashed lines) the non-dithered one is the best. Since the ZF and MRC receivers are
1105
+ designed to deal with non-quantized received signals and perform much worse than the BLMMSE
1106
+ receiver, we now focus on the results of the BLMMSE receiver, which is also individually
1107
+ depicted in Fig. 5b with one more case of N = 500 samples. It is noticed again that a larger
1108
+ number of samples N makes the results with dithering in terms of the sum rate not only better
1109
+ but also more robust against λ. From Fig. 5b it is seen that the highest sum rate obtained by
1110
+ the proposed scheme with dithering (under N = 500 and λ ≈ 0.6) is very close to the result
1111
+ based on the true covariance matrix. This shows the advantage of the proposed scheme for both
1112
+ channel estimation and multi-user receivers under one-bit quantization.
1113
+ Finally, we focus on the influence of the number of samples N. The sum rates under various
1114
+ N of MRC, ZF, and BLMMSE are depicted in Fig. 6a and of only BLMMSE with 3 different
1115
+
1116
+ 19
1117
+ 0.5
1118
+ 1
1119
+ 1.5
1120
+ 2
1121
+ 0
1122
+ 0.05
1123
+ 0.1
1124
+ 0.15
1125
+ 0.2
1126
+ 0.25
1127
+ 0.3
1128
+ 0.35
1129
+ 0.4
1130
+ 0.45
1131
+ 0.5
1132
+ ENF
1133
+ Unquantized, N = 50
1134
+ Unquantized, N = 500
1135
+ Non-Dithered, N = 50
1136
+ Non-Dithered, N = 500
1137
+ Dithered, N = 50
1138
+ Dithered, N = 500
1139
+ (a) ENF v.s. λ
1140
+ 101
1141
+ 102
1142
+ 103
1143
+ N
1144
+ 0
1145
+ 0.1
1146
+ 0.2
1147
+ 0.3
1148
+ 0.4
1149
+ 0.5
1150
+ 0.6
1151
+ 0.7
1152
+ 0.8
1153
+ 0.9
1154
+ ENF
1155
+ Unquantized
1156
+ Non-Dithered
1157
+ Dithered, = 0.66667
1158
+ Dithered, = 1
1159
+ Dithered, = 1.5
1160
+ (b) ENF v.s. N
1161
+ Fig. 3: Normalized Frobenius-norm error of channel covariance under various λ in (a) and i.i.d.
1162
+ samples N in (b).
1163
+ choices of λ for the dithered case are depicted in Fig. 6b. In Fig. 6a we see a similar behavior as
1164
+ before: the BLMMSE receiver produces better sum rates than the ZF and MRC receivers over a
1165
+ large range of N. It is seen from Fig. 6b that under the best λ⋆ ≈ 0.6 the proposed scheme with
1166
+ dithering produces sum rates comparable to the results based on the true channel covariance when
1167
+ N ≥ 100. It is additionally observed from Fig. 6b that as the number of samples N increases
1168
+ the difference between the results obtained with different λ is decreasing. This indicates again
1169
+ that under larger N the results with dithering are more robust to variations in λ.
1170
+ VI. CONCLUSION AND DISCUSSION
1171
+ In this work, we proposed a plug-in channel estimator for massive MIMO systems with
1172
+ spatially non-stationary channels and one-bit quantizers. We analyzed the quantized signal via
1173
+ the Bussgang decomposition and analyzed the distortion produced by using an estimated, rather
1174
+ than the true, channel covariance in the construction of the BLMMSE estimator of the channel.
1175
+ To obtain an estimate of the covariance of the spatially non-stationary channel, we introduced
1176
+ a channel covariance estimator based on dithered quantized samples and theoretically analyzed
1177
+ its performance. We further enhanced this estimator using an APS-based NNLS solution. Our
1178
+ numerical results showed large performance gains of the proposed scheme with dithering in
1179
+ terms of both channel vector and covariance estimation. Finally, we proposed a BLMMSE-
1180
+ based receiver tailored to one-bit quantized data signals for the multi-user data transmission
1181
+
1182
+ 20
1183
+ 0.5
1184
+ 1
1185
+ 1.5
1186
+ 2
1187
+ 0.05
1188
+ 0.1
1189
+ 0.15
1190
+ 0.2
1191
+ 0.25
1192
+ 0.3
1193
+ ENMSE
1194
+ Non-Dithered, N = 50
1195
+ Non-Dithered, N = 500
1196
+ Dithered, N = 50
1197
+ Dithered, N = 500
1198
+ True Covariance
1199
+ (a) EMMSE v.s. λ
1200
+ 101
1201
+ 102
1202
+ 103
1203
+ N
1204
+ 0.05
1205
+ 0.1
1206
+ 0.15
1207
+ 0.2
1208
+ 0.25
1209
+ 0.3
1210
+ 0.35
1211
+ ENMSE
1212
+ Non-Dithered
1213
+ Dithered, = 0.66667
1214
+ Dithered, = 1
1215
+ Dithered, = 1.5
1216
+ True Covariance
1217
+ (b) EMMSE v.s. N
1218
+ Fig. 4: Normalized MSE of channel vectors via BLMMSE under various λ in (a) and i.i.d.
1219
+ samples N in (b).
1220
+ 0.5
1221
+ 1
1222
+ 1.5
1223
+ 2
1224
+ 6
1225
+ 7
1226
+ 8
1227
+ 9
1228
+ 10
1229
+ 11
1230
+ 12
1231
+ 13
1232
+ 14
1233
+ 15
1234
+ Rsum
1235
+ Non-Dithered, N = 50, MRC
1236
+ Non-Dithered, N = 50, ZF
1237
+ Non-Dithered, N = 50, BLMMSE
1238
+ Dithered, N = 50, MRC
1239
+ Dithered, N = 50, ZF
1240
+ Dithered, N = 50, BLMMSE
1241
+ True Covariance, MRC
1242
+ True Covariance, ZF
1243
+ True Covariance, BLMMSE
1244
+ True Channel, MRC
1245
+ True Channel, ZF
1246
+ True Channel, BLMMSE
1247
+ (a) Rsum v.s. λ via MRC, ZF, BLMMSE receivers
1248
+ 0.5
1249
+ 1
1250
+ 1.5
1251
+ 2
1252
+ 12.6
1253
+ 12.7
1254
+ 12.8
1255
+ 12.9
1256
+ 13
1257
+ 13.1
1258
+ 13.2
1259
+ 13.3
1260
+ 13.4
1261
+ 13.5
1262
+ Rsum
1263
+ Non-Dithered, N = 50, BLMMSE
1264
+ Non-Dithered, N = 500, BLMMSE
1265
+ Dithered, N = 50, BLMMSE
1266
+ Dithered, N = 500, BLMMSE
1267
+ True Covariance, BLMMSE
1268
+ (b) Rsum v.s. λ via BLMMSE receiver
1269
+ Fig. 5: Ergodic sum rate of K = 4 users under various λ via MRC, ZF and BLMMSE receivers
1270
+ in (a) and enlarged view of results via BLMMSE receiver in (b).
1271
+ phase and showed in numerical experiments that it outperforms the conventional MRC and ZF
1272
+ receivers in terms of the resulting ergodic sum rate.
1273
+ There are two important aspects of our work that can be improved. First, we observed in
1274
+ the numerical experiments that the hyperparameter λ of the dithering generation influences the
1275
+ channel estimation significantly. Even though this influence was observed to diminish as the
1276
+ sample size increases, it is still of significant interest to develop a data-driven method to optimally
1277
+
1278
+ 21
1279
+ 101
1280
+ 102
1281
+ 103
1282
+ N
1283
+ 6
1284
+ 7
1285
+ 8
1286
+ 9
1287
+ 10
1288
+ 11
1289
+ 12
1290
+ 13
1291
+ 14
1292
+ 15
1293
+ Rsum
1294
+ Non-Dithered, MRC
1295
+ Non-Dithered, ZF
1296
+ Non-Dithered, BLMMSE
1297
+ Dithered, = 0.66667, MRC
1298
+ Dithered, = 0.66667, ZF
1299
+ Dithered, = 0.66667, BLMMSE
1300
+ True Covariance, MRC
1301
+ True Covariance, ZF
1302
+ True Covariance, BLMMSE
1303
+ True Channel, MRC
1304
+ True Channel, ZF
1305
+ True Channel, BLMMSE
1306
+ (a) Rsum v.s. N via MRC, ZF, BLMMSE receivers
1307
+ 101
1308
+ 102
1309
+ 103
1310
+ N
1311
+ 12.4
1312
+ 12.6
1313
+ 12.8
1314
+ 13
1315
+ 13.2
1316
+ 13.4
1317
+ 13.6
1318
+ Rsum
1319
+ Non-Dithered, BLMMSE
1320
+ Dithered, = 0.66667, BLMMSE
1321
+ Dithered, = 1, BLMMSE
1322
+ Dithered, = 1.5, BLMMSE
1323
+ True Covariance, BLMMSE
1324
+ (b) Rsum v.s. N via BLMMSE receiver
1325
+ Fig. 6: Ergodic sum rate of K = 4 users under various number of i.i.d. samples N via MRC,
1326
+ ZF and BLMMSE receivers in (a) and enlarged view of results via BLMMSE receiver in (b).
1327
+ tune λ. Second, in the proposed APS-based channel covariance estimation scheme, the visibility
1328
+ of local clusters is assumed to be known at the BS. In practice, however, the visibility of local
1329
+ clusters is usually not easy to estimate. It is therefore desirable to develop a scheme without
1330
+ the assumption of known visibility of local clusters. We will investigate these two questions in
1331
+ future work.
1332
+ APPENDIX A
1333
+ PROOF OF LEMMA 1
1334
+ Recall that
1335
+ �hBLM = ChrC−1
1336
+ r r = ChAHC−1
1337
+ r r = (Cy − N0I)AHC−1
1338
+ r r
1339
+ (59)
1340
+ and
1341
+ �h =
1342
+
1343
+ �Cy − N0I
1344
+
1345
+ �AH �C−1
1346
+ r r,
1347
+ (60)
1348
+ where �A and �Cr are defined like A and Cr with Cy being replaced by �Cy. Let us abbreviate
1349
+ �hBLM = Mr
1350
+ and
1351
+ �h = �
1352
+ Mr.
1353
+ (61)
1354
+ Consider α, β, γ > 0 such that mini∈[M] |[Cy]i,i| ≥ α, λmin(Cr) ≥ γ, and
1355
+ ����
1356
+
1357
+ diag(Cy)− 1
1358
+ 2Cydiag(Cy)− 1
1359
+ 2
1360
+
1361
+ i,j
1362
+ ���� ≤ 1 − β,
1363
+ for all i ̸= j.
1364
+ (62)
1365
+ In particular, θ ≤ min{α, β, γ}. We start by writing
1366
+ E
1367
+ �����h − �hBLM���
1368
+ 2
1369
+ 2
1370
+
1371
+ = E
1372
+
1373
+ tr
1374
+
1375
+ �hBLM �
1376
+ �hBLM�H
1377
+ − �hBLM�hH − �h
1378
+
1379
+ �hBLM�H
1380
+ + �h�hH
1381
+ ��
1382
+ (63)
1383
+
1384
+ 22
1385
+ = tr
1386
+
1387
+ MCr
1388
+
1389
+ M − �
1390
+ M
1391
+ �H
1392
+ + �
1393
+ MCr
1394
+
1395
+
1396
+ M − M
1397
+ �H�
1398
+ (64)
1399
+ =
1400
+
1401
+ MCr + �
1402
+ MCr,
1403
+
1404
+ M − �
1405
+ M
1406
+ ��
1407
+ F
1408
+ (65)
1409
+ = 2
1410
+
1411
+ MCr,
1412
+
1413
+ M − �
1414
+ M
1415
+ ��
1416
+ F −
1417
+ ��
1418
+
1419
+ M − M
1420
+
1421
+ Cr,
1422
+
1423
+ M − �
1424
+ M
1425
+ ��
1426
+ F
1427
+ (66)
1428
+ ≤ 2∥MCr∥F ∥M − �
1429
+ M∥F + ∥Cr∥ ∥M − �
1430
+ M∥2
1431
+ F
1432
+ (67)
1433
+ Observe that ∥A∥ ≤
1434
+ 1
1435
+ √α by assumption such that
1436
+ ∥MCr∥F = ∥ChAH∥F ≤ ∥Ch∥F ∥AH∥ ≤
1437
+ 1
1438
+ √α∥Ch∥F.
1439
+ (68)
1440
+ Moreover, using that ∥ arcsin(B)∥ ≤ π
1441
+ 2∥B∥ if ∥B∥∞ ≤ 1 (see [37, Supplementary Material, Eq.
1442
+ (4)]), we find
1443
+ ∥Cr∥ ≤
1444
+ ���diag(Cy)− 1
1445
+ 2Re(Cy)diag(Cy)− 1
1446
+ 2
1447
+ ��� +
1448
+ ���diag(Cy)− 1
1449
+ 2Im(Cy)diag(Cy)− 1
1450
+ 2
1451
+ ���
1452
+ (69)
1453
+ ≤ 2
1454
+ ���diag(Cy)− 1
1455
+ 2
1456
+ ��� ∥Cy∥
1457
+ ���diag(Cy)− 1
1458
+ 2
1459
+ ��� ≤ 2
1460
+ α ∥Cy∥ .
1461
+ (70)
1462
+ We conclude that
1463
+ E
1464
+ �����h − �hBLM���
1465
+ 2
1466
+ 2
1467
+
1468
+ ≲ α− 1
1469
+ 2∥Ch∥F∥M − �
1470
+ M∥F + α−1 ∥Cy∥ ∥M − �
1471
+ M∥2
1472
+ F.
1473
+ (71)
1474
+ We will now show that
1475
+ ���M − �
1476
+ M
1477
+ ���
1478
+ F ≤ κ ≤ α
1479
+ 1
1480
+ 2 ∥Ch∥F
1481
+ ∥Cy∥ ,
1482
+ (72)
1483
+ so that we obtain
1484
+ κ
1485
+ √α∥Ch∥F as a final estimate.
1486
+ We start by estimating
1487
+ ���M − �
1488
+ M
1489
+ ���
1490
+ F =
1491
+ ���(Cy − N0I)AHC−1
1492
+ r
1493
+
1494
+
1495
+ �Cy − N0I
1496
+
1497
+ �AH �C−1
1498
+ r
1499
+ ���
1500
+ F
1501
+ (73)
1502
+
1503
+ ���Cy − �Cy
1504
+ ���
1505
+ F ∥A∥
1506
+ ��C−1
1507
+ r
1508
+ �� +
1509
+ ����Cy − N0I
1510
+ ���
1511
+ F
1512
+ ���A − �A
1513
+ ���
1514
+ ��C−1
1515
+ r
1516
+ ��
1517
+ (74)
1518
+ + ∥�Cy − N0I∥∥�A∥∥C−1
1519
+ r
1520
+ − �C−1
1521
+ r ∥F.
1522
+ (75)
1523
+ The first term is clearly bounded by γ−1α− 1
1524
+ 2εF. To estimate the second term, we note that
1525
+ ����Cy − N0I
1526
+ ���
1527
+ F ≤ ∥Ch∥F +
1528
+ ����Cy − Cy
1529
+ ���
1530
+ F ≤ ∥Ch∥F + εF and
1531
+ ����Cy − N0I
1532
+ ��� ≤ ∥Ch∥ + εF.
1533
+ (76)
1534
+ Furthermore, we use that
1535
+ Z−1
1536
+ 1
1537
+ − Z−1
1538
+ 2
1539
+ = Z−1
1540
+ 1 (Z2 − Z1)Z−1
1541
+ 2
1542
+ (77)
1543
+ for any invertible Z1, Z2 of the same dimensions. This yields
1544
+ ����A − A
1545
+ ��� = 2
1546
+ π
1547
+ ���diag(�Cy)− 1
1548
+ 2
1549
+
1550
+ diag(Cy)
1551
+ 1
1552
+ 2 − diag(�Cy)
1553
+ 1
1554
+ 2
1555
+
1556
+ diag(Cy)− 1
1557
+ 2
1558
+ ���
1559
+ (78)
1560
+
1561
+ 23
1562
+ ≤ 2
1563
+ π
1564
+ ���diag(�Cy)− 1
1565
+ 2
1566
+ ���
1567
+ ���diag(Cy)
1568
+ 1
1569
+ 2 − diag(�Cy)
1570
+ 1
1571
+ 2
1572
+ ���
1573
+ ���diag(Cy)− 1
1574
+ 2
1575
+ ���
1576
+ (79)
1577
+ By assumption, we have
1578
+ ���diag(Cy)− 1
1579
+ 2
1580
+ ��� =
1581
+
1582
+ 1
1583
+ mini[Cy]i,i
1584
+
1585
+
1586
+ 1
1587
+ α.
1588
+ (80)
1589
+ Moreover, since
1590
+ ����Cy − Cy
1591
+ ���
1592
+ ∞ ≤ α
1593
+ 2 by (26), we find
1594
+ min
1595
+ i [�Cy]i,i ≥ min
1596
+ i [Cy]i,i −
1597
+ ����Cy − Cy
1598
+ ���
1599
+ ∞ ≥ α
1600
+ 2
1601
+ (81)
1602
+ and so
1603
+ ���diag(�Cy)− 1
1604
+ 2
1605
+ ��� ≤
1606
+
1607
+ 2
1608
+ α.
1609
+ (82)
1610
+ Note that this also implies that ∥�A∥ ≲
1611
+ 1
1612
+ √α. Using that |√x − √y| ≤ |x−y|
1613
+ √c
1614
+ if x ≥ c > 0, y ≥ 0,
1615
+ we find
1616
+ ���diag(Cy)
1617
+ 1
1618
+ 2 − diag(�Cy)
1619
+ 1
1620
+ 2
1621
+ ��� ≤
1622
+
1623
+ 1
1624
+ α
1625
+ ���Cy − �Cy
1626
+ ���
1627
+ ∞ =
1628
+
1629
+ 1
1630
+ αε∞,
1631
+ (83)
1632
+ and hence
1633
+ ���A − �A
1634
+ ��� ≤ 4
1635
+ πα− 3
1636
+ 2ε∞.
1637
+ (84)
1638
+ Let us finally estimate the last term on the right hand side of (73). Write cij = [Cy]i,j, ˆcij =
1639
+ [�Cy]i,j and observe that
1640
+ �����
1641
+ ˆcij
1642
+
1643
+ ˆciiˆcjj
1644
+
1645
+ cij
1646
+ √ciicjj
1647
+ ����� ≤
1648
+ �����
1649
+ ˆcij − cij
1650
+
1651
+ ˆciiˆcjj
1652
+ ����� + |cij| 1
1653
+ √ˆcii
1654
+ �����
1655
+ 1
1656
+
1657
+ ˆcjj
1658
+
1659
+ 1
1660
+ √cjj
1661
+ ����� + |cij|
1662
+ 1
1663
+
1664
+ ˆcjj
1665
+ ����
1666
+ 1
1667
+ √ˆcii
1668
+
1669
+ 1
1670
+ √cii
1671
+ ���� (85)
1672
+ ≲ 1
1673
+ α
1674
+ ����Cy − Cy
1675
+ ���
1676
+ ∞ + ∥Cy∥∞
1677
+ 1
1678
+ α2
1679
+ ����Cy − Cy
1680
+ ���
1681
+
1682
+ (86)
1683
+ ≲ ∥Cy∥∞
1684
+ 1
1685
+ α2
1686
+ ����Cy − Cy
1687
+ ���
1688
+ ∞ ≤ β
1689
+ 2
1690
+ (87)
1691
+ as
1692
+ ε∞ ≲ β
1693
+ α2
1694
+ ∥Cy∥∞
1695
+ .
1696
+ (88)
1697
+ By (62), this implies that
1698
+ ����
1699
+
1700
+ diag(�Cy)− 1
1701
+ 2 �Cydiag(�Cy)− 1
1702
+ 2
1703
+
1704
+ i,j
1705
+ ���� ≤ 1 − β
1706
+ 2 ,
1707
+ for all i ̸= j.
1708
+ (89)
1709
+ Clearly, for any
1710
+ | arcsin(x) − arcsin(y)| ≤ Lβ|x − y|,
1711
+ (90)
1712
+ for all x, y ∈ (−1 + β
1713
+ 2, 1 − β
1714
+ 2) where
1715
+ Lβ =
1716
+ sup
1717
+ 0≤z<1− β
1718
+ 2
1719
+
1720
+ 1
1721
+ 1 − z2 =
1722
+
1723
+ 1
1724
+ 1 − (1 − β
1725
+ 2)2 ≤
1726
+ � 2
1727
+ β .
1728
+ (91)
1729
+
1730
+ 24
1731
+ Together with (62) and (89) this yields
1732
+ ∥Cr − �Cr∥F ≲ β−1/2 ���diag(�Cy)− 1
1733
+ 2Re(�Cy)diag(�Cy)− 1
1734
+ 2 − diag(Cy)− 1
1735
+ 2Re(Cy)diag(Cy)− 1
1736
+ 2
1737
+ ���
1738
+ F
1739
+ + β−1/2 ���diag(�Cy)− 1
1740
+ 2Im(�Cy)diag(�Cy)− 1
1741
+ 2 − diag(Cy)− 1
1742
+ 2Im(Cy)diag(Cy)− 1
1743
+ 2
1744
+ ���
1745
+ F .
1746
+ (92)
1747
+ Now observe that
1748
+ ���diag(�Cy)− 1
1749
+ 2Re(�Cy)diag(�Cy)− 1
1750
+ 2 − diag(Cy)− 1
1751
+ 2Re(Cy)diag(Cy)− 1
1752
+ 2
1753
+ ���
1754
+ F
1755
+
1756
+ ���diag(�Cy)− 1
1757
+ 2 − diag(Cy)− 1
1758
+ 2
1759
+ ��� ∥�Cy∥F
1760
+ ���diag(�Cy)− 1
1761
+ 2
1762
+ ���
1763
+ +
1764
+ ���diag(Cy)− 1
1765
+ 2
1766
+ ��� ∥�Cy − Cy∥F
1767
+ ���diag(�Cy)− 1
1768
+ 2
1769
+ ���
1770
+ +
1771
+ ���diag(Cy)− 1
1772
+ 2
1773
+ ��� ∥Cy∥F
1774
+ ���diag(�Cy)− 1
1775
+ 2 − diag(Cy)− 1
1776
+ 2
1777
+ ���
1778
+ (93)
1779
+ ≲ α−2∥Cy∥Fε∞ + (α−2ε∞ + α−1)εF
1780
+ (94)
1781
+ and analogously,
1782
+ ���diag(�Cy)− 1
1783
+ 2Im(�Cy)diag(�Cy)− 1
1784
+ 2 − diag(Cy)− 1
1785
+ 2Im(Cy)diag(Cy)− 1
1786
+ 2
1787
+ ���
1788
+ F
1789
+ ≲ α−2∥Cy∥Fε∞ + (α−2ε∞ + α−1)εF.
1790
+ (95)
1791
+ Hence,
1792
+ ���Cr − �Cr
1793
+ ���
1794
+ F ≲ β− 1
1795
+ 2α−2∥Cy∥Fε∞ + β− 1
1796
+ 2(α−2ε∞ + α−1)εF.
1797
+ (96)
1798
+ By our assumptions on ε∞ and εF, the right hand side is bounded by γ/2 and hence the
1799
+ assumption ∥C−1
1800
+ r ∥ ≤ γ−1 implies that ∥�C−1
1801
+ r ∥ ≤ 2γ−1. Using now again (77) we finally arrive
1802
+ at
1803
+ ���C−1
1804
+ r
1805
+ − �C−1
1806
+ r
1807
+ ���
1808
+ F ≲ β− 1
1809
+ 2γ−2 �
1810
+ α−2∥Cy∥Fε∞ + (α−2ε∞ + α−1)εF
1811
+
1812
+ .
1813
+ (97)
1814
+ Combining all our estimates in (73), we find
1815
+ ���M − �
1816
+ M
1817
+ ���
1818
+ F ≲ γ−1α− 1
1819
+ 2εF + γ−1(∥Ch∥F + εF)α− 3
1820
+ 2ε∞
1821
+ + (∥Ch∥ + εF)α− 1
1822
+ 2β− 1
1823
+ 2γ−2�
1824
+ α−2∥Cy∥Fε∞ +
1825
+
1826
+ α−2ε∞ + α−1�
1827
+ εF
1828
+
1829
+ .
1830
+ (98)
1831
+ Since
1832
+ ε∞ ≤ min
1833
+
1834
+ εF
1835
+ ∥Cy∥F
1836
+ , 1
1837
+
1838
+ ,
1839
+ (99)
1840
+ we can estimate the right hand side by
1841
+ κ := c α− 5
1842
+ 2β− 1
1843
+ 2γ−2 max{1, ∥Ch∥}εF,
1844
+ (100)
1845
+
1846
+ 25
1847
+ for an absolute constant c > 0. Clearly,
1848
+ κ ≤ α− 1
1849
+ 2 ∥Ch∥F
1850
+ ∥Cy∥
1851
+ (101)
1852
+ by our assumption on εF, which completes the proof.
1853
+ APPENDIX B
1854
+ PROOF OF THEOREM 1
1855
+ In the proof of Theorem 1 we will use the following lemmas. The first one bounds the bias
1856
+ of (34) in terms of λ.
1857
+ Lemma 2: Let S > 0. There exist constants c1, c2 > 0 depending only on S such that the fol-
1858
+ lowing holds. Let y ∈ CM be a mean-zero random vector with covariance matrix E
1859
+
1860
+ yyH�
1861
+ = Cy
1862
+ and S-subgaussian coordinates. Let λ > 0 and let rRe = sign(Re(y)+τ Re), rIm = sign(Im(y)+
1863
+ τ Im), �rRe = sign(Re(y)+�τ Re), and �rIm = sign(Im(y)+�τ Im), where τ Re, τ Im, �τ Re, �τ Im are inde-
1864
+ pendent and uniformly distributed in [−λ, λ]M and independent of y. Abbreviate r = rRe +jrIm
1865
+ and �r = �rRe + j�rIm. Then,
1866
+ ��λ2E
1867
+
1868
+ r�rH�
1869
+ − Cy
1870
+ ��
1871
+ ∞ ≤ c1(λ2 + ∥Cy∥∞)e
1872
+ −c2λ2
1873
+ ∥Cy∥∞ .
1874
+ (102)
1875
+ Proof: The proof of this lemma is a straightforward extension of [37, Lemma 17] to the
1876
+ complex domain. We include it for the convenience of the reader. First note that
1877
+ ��λ2E
1878
+
1879
+ r�rH�
1880
+ − Cy
1881
+ ��
1882
+
1883
+ =
1884
+ ��λ2E
1885
+
1886
+ (rRe + jrIm)(�rRe + j�rIm)H�
1887
+ − E
1888
+
1889
+ (Re(y) + jIm(y))(Re(y) + jIm(y))H���
1890
+
1891
+
1892
+ ��λ2E
1893
+
1894
+ rRe(�rRe)T�
1895
+ − E
1896
+
1897
+ Re(y)Re(y)T���
1898
+ ∞ +
1899
+ ��λ2E
1900
+
1901
+ rRe(�rIm)T�
1902
+ − E
1903
+
1904
+ Re(y)Im(y)T���
1905
+
1906
+ +
1907
+ ��λ2E
1908
+
1909
+ rIm(�rRe)T�
1910
+ − E
1911
+
1912
+ Im(y)Re(y)T���
1913
+ ∞ +
1914
+ ��λ2E
1915
+
1916
+ rIm(�rIm)T�
1917
+ − E
1918
+
1919
+ Im(y)Im(y)T���
1920
+
1921
+ (103)
1922
+ Since y has S-subgaussian coordinates, we get from (35) that ∥[Re(y)]i∥ψ2, ∥[Im(y)]i∥ψ2 ≤
1923
+ S∥Cy∥
1924
+ 1
1925
+ 2∞, for any i ∈ [M], where ∥ · ∥ψ2 denotes the subgaussian norm. Applying [37, Lemma
1926
+ 17] for U = [Re(y)]i and V = [Re(y)]j yields
1927
+ ���λ2E
1928
+
1929
+ sign
1930
+
1931
+ [Re(y)]i + [τ Re]i
1932
+
1933
+ · sign
1934
+
1935
+ [Re(y)]j + [�τ Re]j
1936
+ ��
1937
+ − E
1938
+
1939
+ [Re(y)]i[Re(y)]j
1940
+ ����
1941
+ ≲ (λ2 + S2∥Cy∥∞)e
1942
+ −c
1943
+ λ2
1944
+ S2∥Cy∥∞ .
1945
+ (104)
1946
+ Since this holds for any choice of i, j ∈ [M], the first term on the right-hand side of (103)
1947
+ satisfies the claimed bound. The three other terms can be treated in the same way such that our
1948
+ claim follows.
1949
+
1950
+ 26
1951
+ The second lemma is a simple concentration inequality that applies to dithered samples of real
1952
+ distributions.
1953
+ Lemma 3: There exist absolute constants c1, c2 > 0 such that the following holds. Let y, �y ∈
1954
+ RM be random vectors. Let y1, ..., yN
1955
+ i.i.d.
1956
+ ∼ y, let �y1, ..., �yN
1957
+ i.i.d.
1958
+ ∼ �y, and let τ 1, . . . , τ N, �τ 1, . . . , �τ N
1959
+ be independent and uniformly distributed in [−λ, λ], for λ > 0. Define rk = sign(yk + τ k) and
1960
+ �rk = sign(�yk + �τ k). If N ≥ c1 log(M), then
1961
+ Pr
1962
+ ������
1963
+ λ2
1964
+ N
1965
+ N
1966
+
1967
+ k=1
1968
+ rk�rT
1969
+ k − E
1970
+
1971
+ rk�rT
1972
+ k
1973
+
1974
+ �����
1975
+
1976
+
1977
+
1978
+ λ4
1979
+
1980
+ c1
1981
+ log(M)
1982
+ N
1983
+ + t
1984
+ ��
1985
+ ≤ 2e−c2Nt.
1986
+ (105)
1987
+ In particular, the claim holds if y = �y and yi = �yi, for all i ∈ [N].
1988
+ Proof: Write Rk
1989
+ i,j = [rk]i[�rk]j for i, j ∈ [M]. Since |Rk
1990
+ i,j − E[Rk
1991
+ i,j]| ≤ 2 for all i, j, k, the
1992
+ bound is trivial for t ≥ 4. Moreover, by Bernstein’s inequality for bounded random variables
1993
+ (see, e.g., [54, Theorem 2.8.4]), we find for any u ≤ 8λ2
1994
+ Pr
1995
+
1996
+ 1
1997
+ N
1998
+ �����
1999
+ N
2000
+
2001
+ k=1
2002
+ λ2 �
2003
+ Rk
2004
+ i,j − E[Rk
2005
+ i,j]
2006
+
2007
+ ����� ≥ u
2008
+
2009
+ ≤ 2e
2010
+ −c min
2011
+
2012
+ N2u2
2013
+ σ2
2014
+ i,j
2015
+ , Nu
2016
+ 2λ2
2017
+
2018
+ (106)
2019
+ ≤ 2e
2020
+ −cN min
2021
+
2022
+ u2
2023
+ λ4 , u
2024
+ λ2
2025
+
2026
+ ≤ 2e−c2N u2
2027
+ λ4 ,
2028
+ (107)
2029
+ as
2030
+ σ2
2031
+ i,j :=
2032
+ N
2033
+
2034
+ k=1
2035
+ λ4E
2036
+ ��
2037
+ Rk
2038
+ i,j − E[Rk
2039
+ i,j]
2040
+ �2�
2041
+ =
2042
+ N
2043
+
2044
+ k=1
2045
+ λ4 �
2046
+ E
2047
+
2048
+ (Rk
2049
+ i,j)2�
2050
+
2051
+
2052
+ E[Rk
2053
+ i,j]
2054
+ �2�
2055
+ ≤ λ4N.
2056
+ (108)
2057
+ Hence, for any given t < 4 we can set u =
2058
+
2059
+ λ4
2060
+
2061
+ c1
2062
+ log(M)
2063
+ N
2064
+ + t
2065
+
2066
+ and note that u ≤ 8λ2 as
2067
+ N ≥ c1 log(M). By applying the union bound over all M 2 entries we obtain the result.
2068
+ Proof of Theorem 1: By the triangle inequality,
2069
+ ����Cd
2070
+ y − Cy
2071
+ ���
2072
+ ∞ ≤
2073
+ ����Cd
2074
+ y − E
2075
+
2076
+ �Cd
2077
+ y
2078
+ ����
2079
+ ∞ +
2080
+ ���E
2081
+
2082
+ �Cd
2083
+ y
2084
+
2085
+ − Cy
2086
+ ���
2087
+
2088
+ (109)
2089
+ Write r = rRe + jrIm and �r = �rRe + j�rIm, where rRe = sign(Re(y) + τ Re), rIm = sign(Im(y) +
2090
+ τ Im), �rRe = sign(Re(y) + �τ Re), and �rIm = sign(Im(y) + �τ Im). By Lemma 2,
2091
+ ���E
2092
+
2093
+ �Cd
2094
+ y
2095
+
2096
+ − Cy
2097
+ ���
2098
+ ∞ =
2099
+ ��λ2E
2100
+
2101
+ r�rH�
2102
+ − Cy
2103
+ ��
2104
+ ∞ ≲
2105
+
2106
+ λ2 + ∥Cy∥∞
2107
+ �2 e
2108
+ −c2λ2
2109
+ ∥Cy∥∞ ≲ λ2
2110
+
2111
+ N
2112
+ ,
2113
+ (110)
2114
+ where we have used that λ2 ≳ log(N)∥Cy∥∞. To estimate the first term in (109), observe that
2115
+ ����Cd
2116
+ y − E
2117
+
2118
+ �Cd
2119
+ y
2120
+ ����
2121
+ ∞ =
2122
+ ����Cd − E
2123
+
2124
+ �Cd����
2125
+
2126
+ (111)
2127
+ =
2128
+ �����
2129
+
2130
+ λ2
2131
+ N
2132
+ N
2133
+
2134
+ k=1
2135
+ (rRe
2136
+ k + jrIm
2137
+ k )(�rRe
2138
+ k + j�rIm
2139
+ k )H
2140
+
2141
+ − E
2142
+
2143
+ λ2
2144
+ N
2145
+ N
2146
+
2147
+ k=1
2148
+ (rRe
2149
+ k + jrIm
2150
+ k )(�rRe
2151
+ k + j�rIm
2152
+ k )H
2153
+ � �����
2154
+
2155
+
2156
+ 27
2157
+
2158
+ �����
2159
+
2160
+ λ2
2161
+ N
2162
+ N
2163
+
2164
+ k=1
2165
+ rRe
2166
+ k (�rRe
2167
+ k )T
2168
+
2169
+ − E
2170
+
2171
+ λ2
2172
+ N
2173
+ N
2174
+
2175
+ k=1
2176
+ rRe
2177
+ k (�rRe
2178
+ k )T
2179
+ ������
2180
+
2181
+ +
2182
+ �����
2183
+
2184
+ λ2
2185
+ N
2186
+ N
2187
+
2188
+ k=1
2189
+ rRe
2190
+ k (�rIm
2191
+ k )T
2192
+
2193
+ − E
2194
+
2195
+ λ2
2196
+ N
2197
+ N
2198
+
2199
+ k=1
2200
+ rRe
2201
+ k (�rIm
2202
+ k )T
2203
+ ������
2204
+
2205
+ +
2206
+ �����
2207
+
2208
+ λ2
2209
+ N
2210
+ N
2211
+
2212
+ k=1
2213
+ rIm
2214
+ k (�rRe
2215
+ k )T
2216
+
2217
+ − E
2218
+
2219
+ λ2
2220
+ N
2221
+ N
2222
+
2223
+ k=1
2224
+ rIm
2225
+ k (�rRe
2226
+ k )T
2227
+ ������
2228
+
2229
+ +
2230
+ �����
2231
+
2232
+ λ2
2233
+ N
2234
+ N
2235
+
2236
+ k=1
2237
+ rIm
2238
+ k (�rIm
2239
+ k )T
2240
+
2241
+ − E
2242
+
2243
+ λ2
2244
+ N
2245
+ N
2246
+
2247
+ k=1
2248
+ rIm
2249
+ k (�rIm
2250
+ k )T
2251
+ ������
2252
+
2253
+ (112)
2254
+ Using Lemma 3 for each of the four terms and applying a union bound, we get
2255
+ Pr
2256
+ �����Cd
2257
+ y − E
2258
+
2259
+ �Cd
2260
+ y
2261
+ ����
2262
+ ∞ ≳ λ2
2263
+
2264
+ log(M) + t
2265
+ N
2266
+
2267
+ ≤ 8e−cNt,
2268
+ (113)
2269
+ and thus the first statement of Theorem 1. The second statement follows trivially using
2270
+ ����Cd
2271
+ y − Cy
2272
+ ���
2273
+ F ≤ M
2274
+ ����Cd
2275
+ y − Cy
2276
+ ���
2277
+ ∞ .
2278
+ (114)
2279
+ APPENDIX C
2280
+ PROOF OF THEOREM 2
2281
+ By Theorem 1, we can apply Lemma 1 with
2282
+ ε∞ ∼ λ2
2283
+
2284
+ log(M) + t
2285
+ N
2286
+ ,
2287
+ εF ∼ M max{1, ∥Cy∥∞}λ2
2288
+
2289
+ log(M) + t
2290
+ N
2291
+ Note that we do not pick the “minimal setting” for εF suggested by Theorem 1: the additional
2292
+ factor max{1, ∥Cy∥∞} ensures that ε∞ ≲ εF/∥Cy∥F holds (as required in (26)). It remains to
2293
+ note that all other conditions on ε∞ and εF in Lemma 1 under the stated assumption on N. This
2294
+ completes the proof.
2295
+ REFERENCES
2296
+ [1] L. Lu, G. Y. Li, A. L. Swindlehurst, A. Ashikhmin, and R. Zhang, “An overview of massive MIMO: Benefits
2297
+ and challenges,” IEEE journal of selected topics in signal processing, vol. 8, no. 5, pp. 742–758, 2014.
2298
+ [2] E. Bjornson, L. Van der Perre, S. Buzzi, and E. G. Larsson, “Massive MIMO in sub-6 GHz and mmWave:
2299
+ Physical, practical, and use-case differences,” IEEE Wireless Communications, vol. 26, no. 2, 2019.
2300
+ [3] Y. Li, Y. Luo, G. Yang et al., “12-port 5G massive MIMO antenna array in sub-6GHz mobile handset for
2301
+ LTE bands 42/43/46 applications,” IEEE access, vol. 6, pp. 344–354, 2017.
2302
+ [4] H. Q. Ngo, E. G. Larsson, and T. L. Marzetta, “Energy and spectral efficiency of very large multiuser MIMO
2303
+ systems,” IEEE Transactions on Communications, vol. 61, no. 4, pp. 1436–1449, 2013.
2304
+ [5] T. L. Marzetta, “Noncooperative cellular wireless with unlimited numbers of base station antennas,” IEEE
2305
+ transactions on wireless communications, vol. 9, no. 11, pp. 3590–3600, 2010.
2306
+
2307
+ 28
2308
+ [6] S. Haghighatshoar and G. Caire, “Massive MIMO channel subspace estimation from low-dimensional
2309
+ projections,” IEEE Transactions on Signal Processing, vol. 65, no. 2, pp. 303–318, 2016.
2310
+ [7] ——, “Massive MIMO pilot decontamination and channel interpolation via wideband sparse channel
2311
+ estimation,” IEEE Transactions on Wireless Communications, vol. 16, no. 12, pp. 8316–8332, 2017.
2312
+ [8] ——, “Low-complexity massive MIMO subspace estimation and tracking from low-dimensional projections,”
2313
+ IEEE Transactions on Signal Processing, vol. 66, no. 7, pp. 1832–1844, 2018.
2314
+ [9] M. B. Khalilsarai, T. Yang, S. Haghighatshoar, and G. Caire, “Structured channel covariance estimation from
2315
+ limited samples in Massive MIMO,” in IEEE International Conference on Communications (ICC), 2020.
2316
+ [10] S. Payami and F. Tufvesson, “Channel measurements and analysis for very large array systems at 2.6 ghz,”
2317
+ in 2012 6th European Conference on Antennas and Propagation (EUCAP).
2318
+ IEEE, 2012, pp. 433–437.
2319
+ [11] X. Gao, F. Tufvesson, and O. Edfors, “Massive MIMO channels—measurements and models,” in 2013
2320
+ Asilomar conference on signals, systems and computers.
2321
+ IEEE, 2013, pp. 280–284.
2322
+ [12] X. Gao, O. Edfors, F. Rusek, and F. Tufvesson, “Massive MIMO performance evaluation based on measured
2323
+ propagation data,” IEEE Transactions on Wireless Communications, vol. 14, no. 7, pp. 3899–3911, 2015.
2324
+ [13] S. Wu, C.-X. Wang, M. M. Alwakeel, Y. He et al., “A non-stationary 3-D wideband twin-cluster model for
2325
+ 5G massive MIMO channels,” IEEE journal on selected areas in communications, vol. 32, no. 6, 2014.
2326
+ [14] J. Medbo, K. B¨orner, K. Haneda, V. Hovinen, T. Imai, J. J¨arvelainen, T. J¨ams¨a, A. Karttunen, K. Kusume,
2327
+ J. Kyr¨ol¨ainen et al., “Channel modelling for the fifth generation mobile communications,” in The 8th European
2328
+ Conference on Antennas and Propagation (EuCAP 2014).
2329
+ IEEE, 2014, pp. 219–223.
2330
+ [15] E. De Carvalho, A. Ali, A. Amiri, M. Angjelichinoski, and R. W. Heath, “Non-stationarities in extra-large-scale
2331
+ massive MIMO,” IEEE Wireless Communications, vol. 27, no. 4, pp. 74–80, 2020.
2332
+ [16] R. H. Walden, “Analog-to-digital converter survey and analysis,” IEEE Journal on selected areas in
2333
+ communications, vol. 17, no. 4, pp. 539–550, 1999.
2334
+ [17] B. Murmann, “The race for the extra decibel: A brief review of current ADC performance trajectories,” IEEE
2335
+ Solid-State Circuits Magazine, vol. 7, no. 3, pp. 58–66, 2015.
2336
+ [18] A. Mezghani and J. A. Nossek, “Analysis of Rayleigh-fading channels with 1-bit quantized output,” in 2008
2337
+ IEEE International Symposium on Information Theory.
2338
+ IEEE, 2008, pp. 260–264.
2339
+ [19] J. A. Nossek and M. T. Ivrlaˇc, “Capacity and coding for quantized MIMO systems,” in Proceedings of the
2340
+ 2006 international conference on Wireless communications and mobile computing, 2006, pp. 1387–1392.
2341
+ [20] J. Singh, O. Dabeer, and U. Madhow, “On the limits of communication with low-precision analog-to-digital
2342
+ conversion at the receiver,” IEEE Transactions on Communications, vol. 57, no. 12, pp. 3629–3639, 2009.
2343
+ [21] X. Cheng, K. Xu, J. Sun, and S. Li, “Adaptive grouping sparse Bayesian learning for channel estimation
2344
+ in non-stationary uplink massive MIMO systems,” IEEE Transactions on Wireless Communications, vol. 18,
2345
+ no. 8, pp. 4184–4198, 2019.
2346
+ [22] Y. Han, M. Li, S. Jin, C.-K. Wen, and X. Ma, “Deep learning-based FDD non-stationary massive MIMO
2347
+
2348
+ 29
2349
+ downlink channel reconstruction,” IEEE Journal on Selected Areas in Communications, vol. 38, no. 9, 2020.
2350
+ [23] Y. Li, C. Tao, G. Seco-Granados, A. Mezghani, A. L. Swindlehurst, and L. Liu, “Channel estimation and
2351
+ performance analysis of one-bit massive MIMO systems,” IEEE Transactions on Signal Processing, vol. 65,
2352
+ no. 15, pp. 4075–4089, 2017.
2353
+ [24] Q. Wan, J. Fang, H. Duan, Z. Chen, and H. Li, “Generalized Bussgang LMMSE channel estimation for one-bit
2354
+ massive MIMO systems,” IEEE Transactions on Wireless Communications, vol. 19, no. 6, 2020.
2355
+ [25] A. Eamaz, F. Yeganegi, and M. Soltanalian, “Covariance recovery for one-bit sampled non-stationary signals
2356
+ with time-varying sampling thresholds,” IEEE Transactions on Signal Processing, vol. 70, 2022.
2357
+ [26] B. Whiten. (2013) NNLS - non negative least squares. [Online]. Available: https://www.mathworks.com/
2358
+ matlabcentral/fileexchange/38003-nnls-non-negative-least-squares
2359
+ [27] 3GPP, “Study on channel model for frequencies from 0.5 to 100 ghz (release 15),” 3rd Generation Partnership
2360
+ Project (3GPP), Tech. Rep. TR 38.901 V15.0.0, 2018.
2361
+ [28] ——, “Spatial channel model for multiple input multiple output (MIMO) simulations (release 16),” 3rd
2362
+ Generation Partnership Project (3GPP), Tech. Rep. TR 25.996 V16.0.0, 2020.
2363
+ [29] S. Jaeckel, L. Raschkowski, K. B¨orner, and L. Thiele, “QuaDRiGa: A 3-D multi-cell channel model with time
2364
+ evolution for enabling virtual field trials,” IEEE Transactions on Antennas and Propagation, vol. 62, no. 6,
2365
+ pp. 3242–3256, 2014.
2366
+ [30] V. Va, J. Choi, and R. W. Heath, “The impact of beamwidth on temporal channel variation in vehicular
2367
+ channels and its implications,” IEEE Transactions on Vehicular Technology, vol. 66, no. 6, 2016.
2368
+ [31] O. Bar-Shalom and A. J. Weiss, “DOA estimation using one-bit quantized measurements,” IEEE Transactions
2369
+ on Aerospace and Electronic Systems, vol. 38, no. 3, pp. 868–884, 2002.
2370
+ [32] J. J. Bussgang, “Crosscorrelation functions of amplitude-distorted Gaussian signals,” 1952.
2371
+ [33] O. T. Demir and E. Bjornson, “The Bussgang decomposition of nonlinear systems: Basic theory and MIMO
2372
+ extensions [lecture notes],” IEEE Signal Processing Magazine, vol. 38, no. 1, pp. 131–136, 2020.
2373
+ [34] A. Mezghani and J. A. Nossek, “Capacity lower bound of MIMO channels with output quantization and
2374
+ correlated noise,” in Proc. IEEE Int. Symp. Inf. Theory, 2012, pp. 1–5.
2375
+ [35] A. Papoulis and S. U. Pillai, Probability, Random Variables, and Stochastic Processes.
2376
+ Tata McGraw-Hill
2377
+ Education, 2002.
2378
+ [36] G. Jacovitti and A. Neri, “Estimation of the autocorrelation function of complex Gaussian stationary processes
2379
+ by amplitude clipped signals,” IEEE transactions on information theory, vol. 40, no. 1, pp. 239–245, 1994.
2380
+ [37] S. Dirksen, J. Maly, and H. Rauhut, “Covariance estimation under one-bit quantization,” Annals of Statistics,
2381
+ to appear. ArXiv:2104.01280, 2021.
2382
+ [38] R. M. Gray and D. L. Neuhoff, “Quantization,” IEEE Transactions on Information Theory, vol. 44, no. 6, pp.
2383
+ 2325–2383, 1998.
2384
+ [39] R. M. Gray and T. G. Stockham, “Dithered quantizers,” IEEE Transactions on Information Theory, vol. 39,
2385
+
2386
+ 30
2387
+ no. 3, pp. 805–812, 1993.
2388
+ [40] L. Roberts, “Picture coding using pseudo-random noise,” IRE Transactions on Information Theory, vol. 8,
2389
+ no. 2, pp. 145–154, 1962.
2390
+ [41] R. G. Baraniuk, S. Foucart, D. Needell, Y. Plan, and M. Wootters, “Exponential decay of reconstruction error
2391
+ from binary measurements of sparse signals,” IEEE Transactions on Information Theory, vol. 63, no. 6, pp.
2392
+ 3368–3385, 2017.
2393
+ [42] S. Dirksen, “Quantized compressed sensing: A Survey,” in Compressed Sensing and Its Applications. Springer,
2394
+ 2019, pp. 67–95.
2395
+ [43] S. Dirksen and S. Mendelson, “Robust one-bit compressed sensing with partial circulant matrices,” Annals of
2396
+ Applied Probability, to appear. ArXiv:1812.06719, 2018.
2397
+ [44] ——, “Non-Gaussian hyperplane tessellations and robust one-bit compressed sensing,” Journal of the European
2398
+ Mathematical Society, vol. 23, no. 9, pp. 2913–2947, 2021.
2399
+ [45] H. C. Jung, J. Maly, L. Palzer, and A. Stollenwerk, “Quantized compressed sensing by rectified linear units,”
2400
+ IEEE Transactions on Information Theory, vol. 67, no. 6, pp. 4125–4149, 2021.
2401
+ [46] K. Knudson, R. Saab, and R. Ward, “One-bit compressive sensing with norm estimation,” IEEE Transactions
2402
+ on Information Theory, vol. 62, no. 5, pp. 2748–2758, 2016.
2403
+ [47] M. B. Khalilsarai, S. Haghighatshoar, X. Yi, and G. Caire, “FDD massive MIMO via UL/DL channel
2404
+ covariance extrapolation and active channel sparsification,” IEEE Transactions on Wireless Communications,
2405
+ vol. 18, no. 1, pp. 121–135, 2018.
2406
+ [48] M. B. Khalilsarai, T. Yang, S. Haghighatshoar, X. Yi, and G. Caire, “Dual-polarized FDD massive MIMO: A
2407
+ comprehensive framework,” IEEE Transactions on Wireless Communications, vol. 21, no. 2, 2021.
2408
+ [49] T. Yang, M. B. Khalilsarai, S. Haghighatshoar, and G. Caire, “Structured channel covariance estimation from
2409
+ limited samples for large antenna arrays,” arXiv preprint arXiv:2110.03324, 2021.
2410
+ [50] D. Chen and R. J. Plemmons, “Nonnegativity constraints in numerical analysis,” in The birth of numerical
2411
+ analysis.
2412
+ World Scientific, 2010, pp. 109–139.
2413
+ [51] C. L. Lawson and R. Hanson, Solving least squares problems.
2414
+ Prentice-Hall, Englewood Cliffs, NJ, 1974.
2415
+ [52] G. Caire, “On the ergodic rate lower bounds with applications to massive mimo,” IEEE Transactions on
2416
+ Wireless Communications, vol. 17, no. 5, pp. 3258–3268, 2018.
2417
+ [53] S. N. Diggavi and T. M. Cover, “The worst additive noise under a covariance constraint,” IEEE Transactions
2418
+ on Information Theory, vol. 47, no. 7, pp. 3072–3081, 2001.
2419
+ [54] R. Vershynin, High-dimensional probability: An introduction with applications in data science.
2420
+ Cambridge
2421
+ University Press, 2018, vol. 47.
2422
+
5tE3T4oBgHgl3EQfpQqt/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
6tAyT4oBgHgl3EQfcvc9/content/tmp_files/2301.00288v1.pdf.txt ADDED
@@ -0,0 +1,3087 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.00288v1 [math.AP] 31 Dec 2022
2
+ LINEAR INVISCID DAMPING AND VORTICITY DEPLETION FOR
3
+ NON-MONOTONIC SHEAR FLOWS
4
+ ALEXANDRU D. IONESCU, SAMEER IYER, AND HAO JIA
5
+ Abstract. We give a proof of linear inviscid damping and vorticity depletion for non-monotonic
6
+ shear flows with one critical point in a bounded periodic channel. In particular, we obtain
7
+ quantitative depletion rates for the vorticity function without any symmetry assumptions.
8
+ Dedicated to Carlos Kenig, on the occasion of his 70th birthday.
9
+ Key Words: Inviscid damping, vorticity depletion, non-monotonic shear flows.
10
+ Mathematics Subject Classification: 35B40, 35Q31, 35P25
11
+ Contents
12
+ 1.
13
+ Introduction
14
+ 1
15
+ 2.
16
+ Spectral property and representation formula
17
+ 5
18
+ 3.
19
+ Bounds on the Green’s function and modified Green’s function
20
+ 7
21
+ 4.
22
+ The limiting absorption principle
23
+ 13
24
+ 5.
25
+ Bounds on ψι
26
+ k,ǫ: the non-degenerate case
27
+ 18
28
+ 6.
29
+ Bounds on ψι
30
+ k,ǫ: the degenerate case
31
+ 20
32
+ 7.
33
+ Proof of Theorem 1.2
34
+ 27
35
+ References
36
+ 30
37
+ 1. Introduction
38
+ The study of stability problems in mathematical analysis of fluid dynamics has a long and
39
+ distinguished history, dating back to the work of Kelvin [18], Orr [25] and Rayleigh [26] among
40
+ many others, and continuing to the present day. Hydrodynamical stability problems can be
41
+ considered in both two and three dimensions. In this paper we work with two dimensional
42
+ inviscid flows.
43
+ For the Euler equations, there are significant recent progresses on the asymptotic stability
44
+ of monotonic shear flows and vortices, assuming spectral stability, see for example [9, 30, 34,
45
+ 35, 14, 16, 3, 17, 22, 28] for linear results.
46
+ The main mechanism of stabilization is the so
47
+ called “inviscid damping”, which refers to the transfer of energy of vorticity to higher and
48
+ higher frequencies leading to decay of the stream and velocity functions, as t → ∞. Extending
49
+ the linearized stability analysis for inviscid fluid equations to the full nonlinear setting is a
50
+ challenging problem, and the only available results are on spectrally stable monotonic shear
51
+ The first author was supported in part by NSF grant DMS-2007008. The second author is partially supported
52
+ by a UC Davis startup grant. The third author was supported in part by NSF grant DMS-1945179.
53
+ 1
54
+
55
+ 2
56
+ ALEXANDRU D. IONESCU, SAMEER IYER, AND HAO JIA
57
+ flows [2, 23, 10, 12], and on point vortices [11]. We refer also to the recent review article [13]
58
+ for a more in-depth discussion of recent developments of both linear and nonlinear inviscid
59
+ damping.
60
+ Many physically important shear flows are not monotonic, such as Poiseuille flow and Kol-
61
+ mogorov flows. For such flows on the linear inviscid level, there is an additional significant
62
+ physical phenomenon called “vorticity depletion” which refers to the asymptotic vanishing of
63
+ vorticity as t → ∞ near the critical point where the derivative of the shear flow is zero, first
64
+ predicted in Bouchet and Morita [5], and proved rigorously in Wei-Zhang-Zhao [31]. A similar
65
+ phenomenon was proved in Bedrossian-Coti Zelati-Vicol [3] for the case of vortices. See also
66
+ [17] by the first and third author for a refined description of the dynamics in Gevrey spaces as
67
+ a step towards proving nonlinear vortex symmetrization.
68
+ In [31] by Wei-Zhang-Zhao, sharp linear inviscid damping estimates and quantitative deple-
69
+ tion estimates were obtained for an important class of “symmetric shear flows” in a periodic
70
+ channel (see also [32] by Wei-Zhang-Zhao for a similar result for Kolmogorov flow). When
71
+ no symmetry is assumed, only qualitative bounds are available.
72
+ Heuristically the general
73
+ case should be similar to the symmetric one, since the main vorticity depletion mechanism
74
+ is completely local and asymptotically all shear flows approach symmetric ones at the (non-
75
+ degenerate) critical points. However there are significant difficulties in using the approach of
76
+ [31] to extend the quantitative depletion bounds of [31] to the general case, as the argument
77
+ in [31] relies heavily on decomposition of functions into odd and even parts, which are specific
78
+ to symmetric shear flows.
79
+ In this paper we prove linear inviscid damping estimates and quantitative vorticity depletion
80
+ estimates for a class of stable non-monotonic shear flows with one non-degenerate critical
81
+ point. The main new features of our results are that we do not need symmetry condition on
82
+ the background shear flow, and that our formulation on quantitative depletion for vorticity
83
+ function seem to be new even for general symmetric shear flows (see however Wei-Zhang-Zhao
84
+ [32] which contains a sharp depletion rate at the critical points for Kolmogorov flow), see
85
+ Theorem 1.2 below for the precise statements.
86
+ We begin with the description of our main
87
+ equations and theorem.
88
+ 1.1. Main equations. Consider the two dimensional Euler equation linearized around a shear
89
+ flow (b(y), 0), in the periodic channel (x, y, t) ∈ T × [0, 1] × [0, ∞):
90
+ ∂tω + b(y)∂xω − b′′(y)uy = 0,
91
+ div u = 0
92
+ and
93
+ ω = −∂yux + ∂xuy,
94
+ (1.1)
95
+ with the natural non-penetration boundary condition uy|y=0,1 = 0.
96
+ For the linearized flow,
97
+
98
+ T×[0, 1]
99
+ ux(x, y, t) dxdy and
100
+
101
+ T×[0, 1]
102
+ ω(x, y, t) dxdy are conserved quan-
103
+ tities. In this paper, we will assume that
104
+
105
+ T×[0,1]
106
+ ux
107
+ 0(x, y) dxdy =
108
+
109
+ T×[0,1]
110
+ ω0 dxdy = 0.
111
+ These assumptions can be dropped by adjusting b(y) with a linear shear flow C0y + C1. Then
112
+ one can see from the divergence free condition on u that there exists a stream function ψ(t, x, y)
113
+ with ψ(t, x, 0) = ψ(t, x, 1) ≡ 0, such that
114
+ ux = −∂yψ, uy = ∂xψ.
115
+ (1.2)
116
+
117
+ LINEAR INVISCID DAMPING AND VORTICITY DEPLETION FOR NON-MONOTONIC SHEAR FLOWS
118
+ 3
119
+ The stream function ψ can be solved through
120
+ ∆ψ = ω,
121
+ ψ|y=0,1 = 0.
122
+ (1.3)
123
+ We summarize our equations as follows
124
+
125
+
126
+
127
+ ∂tω + b(y)∂xω − b′′(y)∂xψ = 0,
128
+ ∆ψ(t, x, y) = ω(t, x, y),
129
+ ψ(t, x, 0) = ψ(t, x, 1) = 0,
130
+ (ux, uy) = (−∂yψ, ∂xψ),
131
+ (1.4)
132
+ for t ≥ 0, (x, y) ∈ T × [0, 1].
133
+ Our goal is to understand the long time behavior of ω(t) as t → ∞, with Sobolev regular
134
+ initial vorticity ω0.
135
+ 1.2. The main results. We describe more precisely the main assumptions and our main
136
+ conclusion. The main conditions we shall assume on the shear flow b(y) ∈ C4([0, 1]) are as
137
+ follows.
138
+ Assumption 1.1. We assume that the background flow b(y) ∈ C4([0, 1]) satisfies the following
139
+ conditions.
140
+ (1)
141
+ S := {y ∈ [0, 1] : b′(y) = 0} = {y∗} ⊂ (0, 1).
142
+ (1.5)
143
+ In addition, b′′(y∗) ̸= 0.
144
+ (2) For k ∈ Z\{0}, the linearized operator Lk : L2(0, 1) → L2(0, 1) defined as
145
+ Lkg(y) := b(y)g(y) + b′′(y)
146
+ � 1
147
+ 0
148
+ Gk(y, z)g(z) dz
149
+ (1.6)
150
+ has no discrete eigenvalues nor generalized embedded eigenvalues. In the above Gk is
151
+ the Green’s function for k2 −
152
+ d2
153
+ dy2 on the interval (0, 1) with zero Dirichlet boundary
154
+ condition.
155
+ We refer to section 2 below for the definition and more discussion about generalized embed-
156
+ ded eigenvalues.
157
+ Our main result is the following theorem.
158
+ Theorem 1.2. Assume that ω(t, ·) ∈ C([0, ∞), H4(T×[0, 1])) with the associated stream func-
159
+ tion ψ(t, ·) is the unique solution to (1.4), with initial data ω0 ∈ H4(T × [0, 1]) satisfying for
160
+ all y ∈ [0, 1],
161
+
162
+ T
163
+ ω0(x, y) dx = 0.
164
+ (1.7)
165
+ Then we have the following bounds.
166
+ (i) Inviscid damping estimates:
167
+ ∥ψ(t, ·)∥L2(T×[0,1]) ≲
168
+ 1
169
+ ⟨t⟩2 ∥ω0∥H4(T×[0,1]),
170
+ (1.8)
171
+ ∥ux(t, ·)∥L2(T×[0,1]) ≲ 1
172
+ ⟨t⟩∥ω0∥H4(T×[0,1]),
173
+ ∥uy(t, ·)∥L2(T×[0,1]) ≲
174
+ 1
175
+ ⟨t⟩2 ∥ω0∥H4(T×[0,1]).
176
+ (1.9)
177
+ (ii) Vorticity depletion estimates: there exists a decomposition
178
+ ω(t, x, y) := ωloc(t, x, y) + ωnloc(t, x, y),
179
+ (1.10)
180
+
181
+ 4
182
+ ALEXANDRU D. IONESCU, SAMEER IYER, AND HAO JIA
183
+ where for (x, y, t) ∈ T × [0, 1] × [0, ∞),
184
+ |ωloc(t, x, y)| ≲ |y − y∗|7/4∥ω0∥H4(T×[0,1]),
185
+ |ωnloc(t, x, y)| ≲
186
+ 1
187
+ ⟨t⟩7/8 ∥ω0∥H4(T×[0,1]).
188
+ (1.11)
189
+ 1.3. Remarks and main ideas of proof. We have the following remarks on Theorem 1.2.
190
+ Firstly, in the above theorem we have not tracked the minimal regularity required for the
191
+ bounds (1.8), (1.9) and (1.11) to hold, and a more careful argument can probably significantly
192
+ reduce the number of derivatives needed on the initial data ω0. Secondly, we note also that
193
+ the argument here can be applied to non-monotonic shear flows with multiple non-degenerate
194
+ points, although the presentation will be more complicated.
195
+ Thirdly, a more sophisticated
196
+ analysis may yield a sharper rate of vorticity depletion with rate
197
+ |ωloc(t, x, y)| ≲ |y − y∗|2−,
198
+ |ωnloc(t, x, y)| ≲ ⟨t⟩−1+.
199
+ It is not clear to us though if one can reach the optimal rates of |y − y∗|2 and ⟨t⟩−1.
200
+ We briefly explain the main ideas of the proof.
201
+ By a standard spectral representation formula, see (2.7), it suffices to study the spectral
202
+ density functions and the associated Rayleigh equation (2.8). There are two main cases to
203
+ consider. When the spectral parameter λ is not close to the critical value b(y∗), the situation
204
+ is similar to monotonic shear flows and can be treated as in [14]. The main new case is when
205
+ the spectral parameter λ is close to the critical value b(y∗). In this case, the Rayleigh equation
206
+ (2.8) is very singular, and the potential term
207
+ b′′(y)
208
+ b(y)−λ+iǫ has a quadratic singularity roughly of
209
+ the form
210
+ 2
211
+ (y−y∗)2+(λ−b(y∗))+iǫ for y close to y∗.
212
+ The key observation here, as in [17], is that the potential term
213
+ b′′(y)
214
+ b(y)−λ+iǫ is critically singular
215
+ and has real part with a favorable sign for 1 ≫ |y − y∗| ≫ |λ − b(y∗)|1/2, which needs to be
216
+ incorporated as part of the main term. We therefore define a modified Green’s function for
217
+ the main term, see (3.12)-(3.13), which has strong vanishing conditions near y = y∗, leading
218
+ ultimately to vorticity depletion. After extracting the main terms in the Rayleigh equation
219
+ (2.8), the rest of the terms can be treated as compact perturbations, and can be bounded using
220
+ a limiting absorption principle, see Lemma 4.4, thanks to the spectral assumption 1.1.
221
+ The limiting absorption principle provides preliminary bounds on the spectral density func-
222
+ tions ψι
223
+ k,ǫ(y, λ) with ι ∈ {±}. To obtain the desired quantitative decay rates, we take up to
224
+ two derivatives in λ of the spectral density functions, and again use the limiting absorption
225
+ principle to estimate the resulting derivatives, after extracting the main singular terms. The
226
+ procedure is more or less straightforward but the calculations are quite lengthy. We refer to
227
+ [14] also for similar calculations in a simpler setting. Lastly, we note that there are important
228
+ cancellations between ψ+
229
+ k,ǫ(y, λ) and ψ−
230
+ k,ǫ(y, λ) in the limit ǫ → 0+, which is the reason why we
231
+ need two versions of the limiting absorption principle, see Lemma 4.4, with different weighted
232
+ spaces.
233
+ 1.4. Notations. We summarize here some notations that are specific for this paper for the
234
+ reader’s conveniences.
235
+ For positive numbers α, β, we set α ∧ β := min{α, β}.
236
+ We denote
237
+ for d > 0, Σd := {b(y) :
238
+ y ∈ [y∗ − d, y∗ + d]}, Sd := [y∗ − d, y∗ + d].
239
+ We also denote
240
+ Σ := {b(y) : y ∈ [0, 1]} and I := [0, 1]. For k ∈ Z\{0}, we define for f ∈ H1(I) the norm
241
+ ∥f∥H1
242
+ k(I) := ∥f∥L2(I) + |k|−1∥f ′∥L2(I).
243
+
244
+ LINEAR INVISCID DAMPING AND VORTICITY DEPLETION FOR NON-MONOTONIC SHEAR FLOWS
245
+ 5
246
+ 2. Spectral property and representation formula
247
+ Taking Fourier transform in x in the equation (1.4) for ω, we obtain that
248
+ ∂tωk + ikb(y)ωk − ikb′′(y)ψk = 0,
249
+ (2.1)
250
+ for k ∈ Z, t ≥ 0, y ∈ [0, 1]. In the above, ωk and ψk are the k-th Fourier coefficients of ω, ψ in
251
+ x respectively. For each k ∈ Z\{0}, recall from (1.6) that for any g ∈ L2(0, 1),
252
+ Lkg(y) = b(y)g(y) + b′′(y)
253
+ � 1
254
+ 0
255
+ Gk(y, z)g(z)dz,
256
+ (2.2)
257
+ where Gk is the Green’s function for the operator k2− d2
258
+ dy2 on (0, 1) with zero Dirichlet boundary
259
+ condition. Then (2.1) can be reformulated abstractly as
260
+ ∂tωk + ikLkωk = 0.
261
+ (2.3)
262
+ In contrast to the spectral property of the linearized operator around monotonic shear flows,
263
+ the spectral property of Lk is less understood, especially on the generation of discrete eigen-
264
+ values and embedded eigenvalues. From general spectral theory, we know that the spectrum
265
+ of Lk consists of the continuous spectrum
266
+ Σ :=
267
+
268
+ b(y) : y ∈ [0, 1]
269
+
270
+ ,
271
+ (2.4)
272
+ together with some discrete eigenvalues with nonzero imaginary part which can only accumulate
273
+ at the set of continuous spectrum Σ. Unlike the case of monotonic shear flows where the discrete
274
+ eigenvalues can accumulate only at inflection points of the background shear flow, there appears
275
+ no simple characterization of the possible accumulation points for non-monotonic shear flows.
276
+ Recall that λ ∈ Σ is called an embedded eigenvalue if there exists a nontrivial g ∈ L2(0, 1),
277
+ such that
278
+ Lkg = λg.
279
+ (2.5)
280
+ For non-monotonic shear flows, this definition is too restrictive, as accumulation points of
281
+ discrete eigenvalues may no longer be embedded eigenvalues. To capture the discrete eigen-
282
+ values, we recall the following definition of “generalized embedded eigenvalues”, which can be
283
+ found already in [31], adapted to our setting.
284
+ Definition 2.1. We call λ ∈ Σ a generalized embedded eigenvalue, if one of the following
285
+ conditions is satisfied.
286
+ • λ is an embedded eigenvalue.
287
+ • λ ̸= b(y∗) and there exists a nontrivial ψ ∈ H1
288
+ 0(0, 1) : (0, 1) → C such that in the sense
289
+ of distributions on (0, 1),
290
+ (k2 − ∂2
291
+ y)ψ(y) + P.V.b′′(y)ψ(y)
292
+ b(y) − λ + iπ
293
+
294
+ z∈[0,1], b(z)=λ
295
+ b′′(z)ψ(z)
296
+ |b′(z)|
297
+ δ(y − z) = 0.
298
+ (2.6)
299
+ We remark that our assumption that the critical point y∗ of b(y) being non-degenerate
300
+ implies that the sum in (2.6) is finite, and that the spectral assumption 1.1 is satisfied if b′′ > 0
301
+ on [0, 1].
302
+
303
+ 6
304
+ ALEXANDRU D. IONESCU, SAMEER IYER, AND HAO JIA
305
+ Proposition 2.2. Suppose that k ∈ Z\{0} and ωk
306
+ 0 ∈ L2([0, 1]). Then the stream function
307
+ ψk(t, y) for k ∈ Z\{0}, y ∈ [0, 1], t ≥ 0 has the representation
308
+ ψk(t, y) = − 1
309
+ 2πi lim
310
+ ǫ→0+
311
+
312
+ Σ
313
+ e−ikλt �
314
+ ψ−
315
+ k,ǫ(y, λ) − ψ+
316
+ k,ǫ(y, λ)
317
+
318
+ dλ,
319
+ (2.7)
320
+ where ψι
321
+ k,ǫ(y, λ) for ι ∈ {+, −}, y ∈ [0, 1], λ ∈ Σ, k ∈ Z\{0}, and sufficiently small ǫ ∈
322
+ [−1/4, 1/4]\{0}, are the solutions to
323
+ −k2ψι
324
+ k,ǫ(y, λ) + d2
325
+ dy2 ψι
326
+ k,ǫ(y, λ) −
327
+ b′′(y)
328
+ b(y) − λ + iιǫψι
329
+ k,ǫ(y, λ) =
330
+ −ωk
331
+ 0(y)
332
+ b(y) − λ + iιǫ,
333
+ (2.8)
334
+ with zero Dirichlet boundary condition.
335
+ Proof. By standard theory of spectral projection, from (2.3), we obtain that for y ∈ [0, 1],
336
+ ωk(t, y) =
337
+ 1
338
+ 2πi lim
339
+ ǫ→0+
340
+
341
+ Σ
342
+ eiλt ��
343
+ (λ + kLk − iǫ)−1 − (λ + kLk + iǫ)−1�
344
+ ωk
345
+ 0
346
+
347
+ (y) dλ.
348
+ (2.9)
349
+ We then obtain for y ∈ [0, 1],
350
+ ψk(t, y) = − 1
351
+ 2πi lim
352
+ ǫ→0+
353
+
354
+ Σ
355
+ e−ikλt
356
+ � 1
357
+ 0
358
+ Gk(y, z)
359
+ ×
360
+ ��
361
+ (−λ + Lk − iǫ)−1 − (−λ + Lk + iǫ)−1�
362
+ ωk
363
+ 0
364
+
365
+ (z) dzdλ
366
+ = − 1
367
+ 2πi lim
368
+ ǫ→0+
369
+
370
+ Σ
371
+ e−ikλt �
372
+ ψ−
373
+ k,ǫ(y, λ) − ψ+
374
+ k,ǫ(y, λ)
375
+
376
+ dλ.
377
+ (2.10)
378
+ In the above, for y ∈ [0, 1] and λ ∈ Σ,
379
+ ψ+
380
+ k,ǫ(y, λ) :=
381
+ � 1
382
+ 0
383
+ Gk(y, z)
384
+
385
+ (−λ + Lk + iǫ)−1ωk
386
+ 0
387
+
388
+ (z) dz,
389
+ ψ−
390
+ k,ǫ(y, λ) :=
391
+ � 1
392
+ 0
393
+ Gk(y, z)
394
+
395
+ (−λ + Lk − iǫ)−1ωk
396
+ 0
397
+
398
+ (z) dz.
399
+ (2.11)
400
+ Therefore for ι ∈ {+, −}, y ∈ [0, 1], λ ∈ Σ,
401
+
402
+ k2 − d2
403
+ dy2
404
+
405
+ ψι
406
+ k,ǫ(y, y0) = (−λ + Lk + iιǫ)−1ωk
407
+ 0(y),
408
+ (2.12)
409
+ which implies
410
+ ωk
411
+ 0(y) =(−λ + Lk + iιǫ)
412
+
413
+ k2 − d2
414
+ dy2
415
+
416
+ ψι
417
+ k,ǫ(y, λ)
418
+ =(b(y) − λ + iιǫ)
419
+
420
+ k2 − d2
421
+ dy2
422
+
423
+ ψι
424
+ k,ǫ(y, λ) + b′′(y)ψι
425
+ k,ǫ(y, λ).
426
+ (2.13)
427
+ It follows from (2.13) that ψ+
428
+ k,ǫ(y, λ), ψ−
429
+ k,ǫ(y, λ) satisfy (2.8). The proposition is now proved.
430
+
431
+ Remark 2.3. The existence of ψι
432
+ k,ǫ for sufficiently small ǫ ̸= 0 follows from our spectral
433
+ assumptions, which imply the solvability of (2.8) for sufficiently small ǫ ̸= 0, see also (4.9).
434
+
435
+ LINEAR INVISCID DAMPING AND VORTICITY DEPLETION FOR NON-MONOTONIC SHEAR FLOWS
436
+ 7
437
+ 3. Bounds on the Green’s function and modified Green’s function
438
+ 3.1. Elementary properties of the standard Green’s function. For integers k ∈ Z\{0},
439
+ recall that the Green’s function Gk(y, z) solves
440
+ − d2
441
+ dy2 Gk(y, z) + k2Gk(y, z) = δ(y − z),
442
+ (3.1)
443
+ with Dirichlet boundary conditions Gk(0, z) = Gk(1, z) = 0, z ∈ (0, 1). Gk has the explicit
444
+ formula
445
+ Gk(y, z) =
446
+ 1
447
+ k sinh k
448
+
449
+ sinh(k(1 − z)) sinh(ky)
450
+ if y ≤ z,
451
+ sinh(kz) sinh(k(1 − y))
452
+ if y ≥ z,
453
+ (3.2)
454
+ and the symmetry
455
+ Gk(y, z) = Gk(z, y),
456
+ for k ∈ Z\{0}, y, z ∈ [0, 1].
457
+ (3.3)
458
+ We note the following bounds for Gk
459
+ sup
460
+ y∈[0,1],|A|≤10
461
+
462
+ |k|2��Gk(y, z)(log |z − A|)m��
463
+ L1(z∈[0,1]) + |k|
464
+ ��∂y,zGk(y, z)(log |z − A|)m��
465
+ L1(z∈[0,1])
466
+
467
+ +
468
+ sup
469
+ y∈[0,1],α∈{0,1}
470
+
471
+ |k|3/2−α ��∂α
472
+ y,zGk(y, z)
473
+ ��
474
+ L2(z∈[0,1])
475
+
476
+ ≲ | log ⟨k⟩|m,
477
+ for m ∈ {0, 1, 2, 3}.
478
+ (3.4)
479
+ Define
480
+ Fk(y, z) =
481
+ 1
482
+ sinh k
483
+
484
+ −k cosh (k(1 − z)) cosh (ky),
485
+ 0 ≤ y ≤ z ≤ 1;
486
+ −k cosh (kz) cosh (k(1 − y)),
487
+ 1 ≥ y > z ≥ 0.
488
+ (3.5)
489
+ We note that
490
+ ∂y∂zGk(y, z) = ∂z∂yGk(y, z) = δ(y − z) + Fk(y, z),
491
+ for y, z ∈ [0, 1].
492
+ (3.6)
493
+ By direct computation, we see Fk satisfies the bounds
494
+ sup
495
+ y∈[0,1],|A|≤10
496
+ ���Fk(y, z)(log |z − A|)m��
497
+ L1(z∈[0,1]) + |k|−1��∂y,zFk(y, z)(log |z − A|)m��
498
+ L1(z∈[0,1])
499
+
500
+ +
501
+ sup
502
+ y∈[0,1],α∈{0,1}
503
+
504
+ |k|−1/2−α ��∂α
505
+ y,zFk(y, z)
506
+ ��
507
+ L2(z∈[0,1])
508
+
509
+ ≲ | log ⟨k⟩|m,
510
+ for m ∈ {0, 1, 2, 3}.
511
+ (3.7)
512
+ The bounds (3.4) and (3.7) can be proved by explicit calculations and are useful in the proof
513
+ of Lemma 4.1 below.
514
+ 3.2. Bounds on the modified Green’s function. It follows from Assumption 1.1 that there
515
+ exists a δ0 ∈ (0, 1/8) such that
516
+ inf{|y∗|, |y∗ − 1|} > 10δ0
517
+ and
518
+ sup
519
+ y∈(y∗−4δ0,y∗+4δ0)
520
+ |b′′′(y)|δ0 < |b′′(y∗)|/10.
521
+ (3.8)
522
+ Define the set
523
+ Σδ0 := {b(y) : y ∈ [y∗ − δ0, y∗ + δ0]},
524
+ (3.9)
525
+
526
+ 8
527
+ ALEXANDRU D. IONESCU, SAMEER IYER, AND HAO JIA
528
+ and fix a standard smooth cutoff function ϕ ∈ C∞
529
+ c (−2, 2) satisfying ϕ ≡ 1 on [−3/2, 3/2]. For
530
+ simplicity of notations, we denote
531
+ I := (0, 1).
532
+ (3.10)
533
+ To simplify notations we define also for d ∈ (0, 1/10),
534
+ Sd := [y∗ − d, y∗ + d].
535
+ (3.11)
536
+ For applications below, we also need to study the “modified Green’s function” Gk(y, z; λ+iǫ)
537
+ for y, z ∈ [0, 1], λ ∈ Σδ0 and ǫ ∈ [−1/8, 1/8]\{0}, which satisfies for y, z ∈ (0, 1),
538
+ (k2−∂2
539
+ y)Gk(y, z; λ+iǫ)+
540
+ b′′(y)
541
+ b(y) − λ + iǫ
542
+
543
+ ϕ
544
+ �y − y∗
545
+ δ0
546
+
547
+ −ϕ
548
+ �y − y∗
549
+ δ(λ)
550
+ ��
551
+ Gk(y, z; λ+iǫ) = δ(y−z), (3.12)
552
+ with the boundary condition
553
+ Gk(y, z; λ + iǫ)|y∈{0,1} = 0.
554
+ (3.13)
555
+ In the above, we have used the notation that
556
+ δ(λ) := 8
557
+
558
+ |λ − b(y∗)|/b′′(y∗).
559
+ (3.14)
560
+ Define the weight ̺(y; λ + iǫ) for y, z ∈ [0, 1], λ ∈ Σδ0 and ǫ ∈ [−1/8, 1/8]\{0} as
561
+ ̺(y; λ + iǫ) :=|λ − b(y∗)|1/2 + |ǫ|1/2 + |y − y∗|.
562
+ (3.15)
563
+ The crucial bounds we need for the modified Green’s function Gk(y, z; λ + iǫ) is the following.
564
+ Lemma 3.1. Let Gk(y, z; λ + iǫ) for y, z ∈ [0, 1], λ ∈ Σδ0 and ǫ ∈ [−1/8, 1/8]\{0} be defined as
565
+ in (3.12). Then we have the identity for y, z ∈ [0, 1],
566
+ Gk(y, z; λ + iǫ) = Gk(z, y; λ + iǫ),
567
+ (3.16)
568
+ and the following statements hold.
569
+ (i) We have the bounds
570
+ sup
571
+ y∈[0,1], |y−z|≤min{̺(z;λ+iǫ),1/|k|}
572
+ |Gk(y, z; λ + iǫ)| ≲ min{̺(z; λ + iǫ), 1/|k|},
573
+ sup
574
+ y∈[0,1], |y−z|≤min{̺(z;λ+iǫ),1/|k|}
575
+ |∂yGk(y, z; λ + iǫ)| ≲ 1;
576
+ (3.17)
577
+ (ii) For y1, y2 ∈ [0, 1] with y2 ∈ [min{y1, z}, max{y1, z}] and ̺(y2; λ + iǫ) ≳ 1/|k|, we have
578
+ the bounds with α ∈ {0, 1}
579
+ |∂α
580
+ y Gk(y1, z; λ + iǫ)|
581
+
582
+
583
+ |k| + ̺−1(y1; λ + iǫ)
584
+ �α
585
+ e−|k||y1−y2|
586
+
587
+ |k|
588
+
589
+ [y2−1/|k|,y2+1/|k|]∩I
590
+ |Gk(y, z; λ + iǫ)|2 dy
591
+ �1/2
592
+ .
593
+ (3.18)
594
+ (iii) For y1, y2 ∈ [0, 1] with y2 ∈ [min{y1, z}, max{y1, z}] and ̺(y2; λ + iǫ) ≪ 1/|k|, we have
595
+ the bounds with α ∈ {0, 1}
596
+ |∂α
597
+ y Gk(y1, z; λ + iǫ)| ≲
598
+
599
+ |k| + ̺−1(y1; λ + iǫ)
600
+ ���
601
+ min
602
+ �̺2(y1; λ + iǫ)
603
+ ̺2(y2; λ + iǫ), ̺(y2; λ + iǫ)
604
+ ̺(y1; λ + iǫ)
605
+
606
+ M,
607
+ (3.19)
608
+ where
609
+ M :=
610
+
611
+ 1
612
+ ̺(y2; λ + iǫ)
613
+
614
+ [y2−̺(y2;λ+iǫ),y2+̺(y2;λ+iǫ)]∩I
615
+ |Gk(y, z; λ + iǫ)|2 dy
616
+ �1/2
617
+ .
618
+ (3.20)
619
+
620
+ LINEAR INVISCID DAMPING AND VORTICITY DEPLETION FOR NON-MONOTONIC SHEAR FLOWS
621
+ 9
622
+ Proof. The proof is based on energy estimates and “entanglement inequalities”, as in [15]. See
623
+ also the earlier work [33] where this type of inequality was used. We divide the proof into
624
+ several steps.
625
+ Step 1:
626
+ the proof of (3.17).
627
+ We first establish the bounds (3.17).
628
+ For simplicity of
629
+ notation, we suppress the dependence on z, λ + iǫ and set for y ∈ [0, 1],
630
+ h(y) := Gk(y, z; λ + iǫ),
631
+ V (y) :=
632
+ b′′(y)
633
+ b(y) − λ + iǫ
634
+
635
+ ϕ
636
+ �y − y∗
637
+ δ0
638
+
639
+ − ϕ
640
+ �y − y∗
641
+ δ
642
+ ��
643
+ .
644
+ (3.21)
645
+ Multiplying h to (3.12) and integrating over [0, 1], we obtain that
646
+ � 1
647
+ 0
648
+ |∂yh(y)|2 + |k|2|h(y)|2 dy +
649
+ � 1
650
+ 0
651
+ b′′(y)
652
+ b(y) − λ + iǫ
653
+
654
+ ϕ
655
+ �y − y∗
656
+ δ0
657
+
658
+ − ϕ
659
+ �y − y∗
660
+ δ
661
+ ��
662
+ |h(y)|2 dy = h(z).
663
+ (3.22)
664
+ Note that for y ∈ [0, 1], ℜV (y) ≥ 0, and in addition, for y ∈ Sδ0 and
665
+ |y − y∗| > C0
666
+
667
+ |λ − b(y∗)|1/2 + |ǫ|1/2�
668
+ with sufficiently large C0 ≫ 1,
669
+ 1 + ℜV (y) ≳
670
+ 1
671
+ ̺2(y; λ + iǫ).
672
+ (3.23)
673
+ It follows from (3.22) that
674
+ � 1
675
+ 0
676
+ |∂yh(y)|2 + |k|2|h(y)|2 dy +
677
+
678
+ y∈Sδ0, |y−y∗|>C0(δ+|ǫ|1/2)
679
+ 1
680
+
681
+ ̺(y; λ + iǫ)
682
+ �2 |h(y)|2 dy
683
+ ≲ |h(z)|.
684
+ (3.24)
685
+ Using the Sobolev type inequality
686
+ ∥h∥L∞(J) ≲ ∥h∥L2(J∗)|J|−1/2 + ∥∂yh∥L2(J)|J|1/2,
687
+ (3.25)
688
+ for any interval J, J∗ with J∗ ⊆ J and |J∗| ≳ |J|, and choosing the interval J ⊂ I as an interval
689
+ containing z with length of the size C1 min{1/|k|, ̺(z; λ + iǫ)}, we obtain from (3.24) that
690
+ � 1
691
+ 0
692
+ |∂yh(y)|2 + |k|2|h(y)|2 dy +
693
+
694
+ y∈Sδ0, |y−y∗|>C0(δ+|ǫ|1/2)
695
+ 1
696
+
697
+ ̺(y; λ + iǫ)
698
+ �2 |h(y)|2 dy
699
+ ≲ min{1/|k|, ̺(z; λ + iǫ)}.
700
+ (3.26)
701
+ The desired bound (3.17) follows from (3.26), (3.25), and equation (3.12).
702
+ Step 2: the proof of (3.18). Denote
703
+ M1 :=
704
+
705
+ |k|
706
+
707
+ [y2−1/|k|,y2+1/|k|]∩I
708
+ |Gk(y, z; λ + iǫ)|2 dy
709
+ �1/2
710
+ .
711
+ (3.27)
712
+ For the sake of concreteness, we assume that y1 > z (so y2 ∈ [z, y1]). We shall also assume
713
+ that y1 − y2 ≫ 1/|k| as the other case is analogous but easier. For ϕ ∈ C1
714
+ p([y2, 1]), the space of
715
+ piecewise C1 functions, with ϕ(y2) = 0, we multiply ϕ2h to equation (3.12) and integrate over
716
+
717
+ 10
718
+ ALEXANDRU D. IONESCU, SAMEER IYER, AND HAO JIA
719
+ [y2, 1] to obtain that
720
+ � 1
721
+ y2
722
+ |∂yh(y)|2ϕ2(y) + 2∂yh(y)h(y)ϕ(y)∂yϕ(y) + |k|2ϕ2(y)|h(y)|2 + V (y)|h(y)|2ϕ2(y) dy = 0.
723
+ (3.28)
724
+ Taking the real part of (3.28) and using Cauchy-Schwarz inequality, we get that
725
+ � 1
726
+ y2
727
+
728
+ |∂yϕ(y)|2 − |k|2|ϕ(y)|2�
729
+ |h(y)|2 dy ≥ 0.
730
+ (3.29)
731
+ We now choose ϕ more specifically as follows. We require that
732
+ ϕ(y2) = 0, ϕ′′(y) = 0 for y ∈ [y2, y2 + 1/|k|], ϕ(y2 + 1/|k|) = 1,
733
+ ϕ′(y) = |k|ϕ(y) for y ∈ [y2 + 1/|k|, y1 − 1/|k|], ϕ′(y) = 0 for y ∈ [y1 − 1/|k|, 1].
734
+ (3.30)
735
+ It follows from (3.29)-(3.30) that
736
+ � 1
737
+ y1−1/|k|
738
+ |k|2ϕ2(y)|h(y)|2 dy ≲ |k|M2
739
+ 1 ,
740
+ ϕ(y) ≈ e|k||y1−y2| for y ∈ [y1 − 1/|k|, y1 + 1/|k|] ∩ I.
741
+ (3.31)
742
+ The desired bounds (3.18) follow from (3.31) and equation (3.12).
743
+ Step 3: the the proof of (3.19). For the sake of concreteness, we assume that y1 > z (and
744
+ so y2 ∈ [z, y1]). We shall also assume that y1 − y2 ≫ ̺(y2; λ + iǫ) and that y2 > y∗ + δ + |ǫ|1/2
745
+ as the other cases are analogous.
746
+ For ϕ ∈ C1
747
+ p([y2, 1]) with ϕ(y2) = 0, we multiply ϕ2h to equation (3.12) and integrate over
748
+ [y2, 1] to obtain that
749
+ � 1
750
+ y2
751
+ |∂yh(y)|2ϕ2(y) + 2∂yh(y)h(y)ϕ(y)∂yϕ(y) + |k|2ϕ2(y)|h(y)|2 + V (y)|h(y)|2ϕ2(y) dy = 0.
752
+ (3.32)
753
+ Write for y ∈ [y2, 1]
754
+ h(y) = (y − y∗)1/2h∗(y).
755
+ (3.33)
756
+ Simple calculations show that
757
+ � 1
758
+ y2
759
+ (y − y∗)|∂yh∗(y)|2ϕ2(y) + 2(y − y∗)∂yϕ(y)ϕ(y)∂yh∗(y)h∗(y) +
760
+ 1
761
+ 4(y − y∗)|h∗(y)|2ϕ2(y)
762
+ + |k|2|h(y)|2ϕ2(y) + (y − y∗)V (y)ϕ2(y)|h∗(y)|2 dy = 0.
763
+ (3.34)
764
+ Therefore
765
+ � 1
766
+ y2
767
+
768
+ 1
769
+ 4(y − y∗) + (y − y∗)ℜVy∗(y)
770
+
771
+ ϕ2(y)|h∗(y)|2 dy ≤
772
+ � 1
773
+ y2
774
+ (y − y∗)(∂yϕ)2(y)|h∗(y)|2 dy, (3.35)
775
+ which implies that
776
+ � 1
777
+ y2
778
+ 1
779
+ y − y∗
780
+ ��
781
+ (y − y∗)∂yϕ
782
+ �2(y) −
783
+
784
+ 1/4 + (y − y∗)2ℜV (y)
785
+
786
+ ϕ2(y)
787
+
788
+ |h∗(y)|2 dy ≥ 0.
789
+ (3.36)
790
+
791
+ LINEAR INVISCID DAMPING AND VORTICITY DEPLETION FOR NON-MONOTONIC SHEAR FLOWS 11
792
+ We notice the pointwise bounds for y ∈ [y2, 1],
793
+ 1/4 + (y − y∗)2ℜV (y) ≥ max
794
+
795
+ 0, 9/4 − C2
796
+ ̺2(y2; λ + iǫ)
797
+ (y − y∗)2
798
+ − C2|y − y∗|
799
+
800
+ .
801
+ (3.37)
802
+ Now we choose ϕ ∈ C1
803
+ p([y2, 1]) more precisely as follows. We require that
804
+ ϕ(y2) = 0, ϕ′′(y) = 0 for y ∈ [y2, y2 + ̺(y2; λ + iǫ)], ϕ(y2 + ̺(y2; λ + iǫ)) = 1,
805
+ (y − y∗)ϕ′(y) =
806
+
807
+ 1/4 + (y − y∗)2ℜV (y)
808
+ �1/2ϕ(y)
809
+ for y ∈ [y2 + ̺(y2; λ + iǫ), y1 − ̺(y1; λ + iǫ)], and ϕ′(y) = 0 for y ∈ [y1 − ̺(y1; λ + iǫ), 1].
810
+ (3.38)
811
+ It follows from (3.36)-(3.38) that
812
+ � y1
813
+ y1���̺(y1;λ+iǫ)
814
+ 1
815
+ ̺(y1; λ + iǫ)ϕ2(y)|h∗(y)|2 dy ≲ M2/̺(y2; λ + iǫ),
816
+ ϕ(y) ≈
817
+ (y1 − y∗)3/2
818
+ ̺3/2(y2; λ + iǫ) for y ∈ [y1 − ̺(y1; λ + iǫ), y1].
819
+ (3.39)
820
+ The desired bounds (3.19) follow from the change of variable (3.33), the bound (3.36), (3.39)
821
+ and equation (3.12).
822
+
823
+ As a corollary of Lemma 3.1, we have the following additional bounds on the modified
824
+ Green’s function.
825
+ Lemma 3.2. Let Gk(y, z; λ + iǫ) for y, z ∈ [0, 1], λ ∈ Σδ0, k ∈ Z\{0} and ǫ ∈ [−1/8, 1/8]\{0}
826
+ be defined as in (3.12). Recall the definition (3.14) for δ = δ(λ) > 0. Define
827
+ h := 10(δ + |ǫ|1/2),
828
+ (3.40)
829
+ and also for y, z ∈ [0, 1],
830
+ Hk(y, z; λ + iǫ) :=
831
+
832
+ ∂z + ϕ
833
+ �y − y∗
834
+ h
835
+
836
+ ∂y
837
+
838
+ Gk(y, z; λ + iǫ).
839
+ (3.41)
840
+ Then the following statements hold for z ∈ S4δ.
841
+ (i) We have the bounds
842
+ sup
843
+ y∈[0,1], |y−z|≤min{̺(z;λ+iǫ),1/|k|}
844
+ |Hk(y, z; λ + iǫ)| ≲ 1,
845
+ sup
846
+ y∈[0,1], |y−z|≤min{̺(z;λ+iǫ),1/|k|}
847
+ |∂yHk(y, z; λ + iǫ)| ≲ 1/ min{̺(z; λ + iǫ), 1/|k|};
848
+ (3.42)
849
+ (ii) For y1, y2 ∈ [0, 1] with y2 ∈ [min{y1, z}, max{y1, z}] and ̺(y2; λ + iǫ) ≳ 1/|k|, we have
850
+ the bounds with α ∈ {0, 1}
851
+
852
+ min{̺(y1; λ + iǫ), 1/|k|}
853
+ �α|∂α
854
+ y Hk(y1, z; λ + iǫ)|
855
+
856
+ e−|k||y1−y2|
857
+ min{̺(z; λ + iǫ), 1/|k|}
858
+
859
+ |k|
860
+
861
+ [y2−1/|k|,y2+1/|k|]∩I
862
+ |Gk(y, z; λ + iǫ)|2 dy
863
+ �1/2
864
+ .
865
+ (3.43)
866
+
867
+ 12
868
+ ALEXANDRU D. IONESCU, SAMEER IYER, AND HAO JIA
869
+ (iii) For y1, y2 ∈ [0, 1] with y2 ∈ [min{y1, z}, max{y1, z}] and ̺(y2; λ + iǫ) ≪ 1/|k|, we have
870
+ the bounds with α ∈ {0, 1}
871
+
872
+ min{̺(y1; λ + iǫ), 1/|k|}
873
+ �α|∂α
874
+ y Hk(y1, z; λ + iǫ)|
875
+
876
+ 1
877
+ min{̺(z; λ + iǫ), 1/|k|} min
878
+ �̺2(y1; λ + iǫ)
879
+ ̺2(y2; λ + iǫ), ̺(y2; λ + iǫ)
880
+ ̺(y1; λ + iǫ)
881
+
882
+ M,
883
+ (3.44)
884
+ where
885
+ M :=
886
+
887
+ 1
888
+ ̺(y2; λ + iǫ)
889
+
890
+ [y2−̺(y2;λ+iǫ),y2+̺(y2;λ+iǫ)]∩I
891
+ |Gk(y, z; λ + iǫ)|2 dy
892
+ �1/2
893
+ .
894
+ (3.45)
895
+ Proof. Denote with a slight abuse of notation for y ∈ [0, 1],
896
+ ϕ†(y) := ϕ
897
+ �y − y∗
898
+ h
899
+
900
+ ,
901
+ V (y) :=
902
+ b′′(y)
903
+ b(y) − λ + iǫ
904
+
905
+ ϕ
906
+ �y − y∗
907
+ δ0
908
+
909
+ − ϕ
910
+ �y − y∗
911
+ δ(λ)
912
+ ��
913
+ .
914
+ (3.46)
915
+ Then Hk,j(y, z; λ + iǫ) satisfies for y ∈ [0, 1], z ∈ S4δ,
916
+ (k2 − ∂2
917
+ y)Hk(y, z; λ + iǫ) + V (y)Hk(y, z; λ + iǫ)
918
+ = −∂2
919
+ yϕ†(y)∂yGk(y, z; λ + iǫ) − ∂yV (y)ϕ†(y)Gk(y, z; λ + iǫ) − 2∂yϕ†(y)∂2
920
+ yGk(y, z; λ + iǫ).
921
+ (3.47)
922
+ The desired bounds then follow from equation (3.47), Lemma 3.1 and standard elliptic regu-
923
+ larity theory.
924
+
925
+ The bounds in Lemma 3.1 and Lemma 3.2 are quite sharp, since we can exploit the decay
926
+ coming from both k2 and
927
+ b′′(y)
928
+ b(y)−λ+iǫ
929
+
930
+ ϕ
931
+ � y−y∗
932
+ δ0
933
+
934
+ − ϕ
935
+ � y−y∗
936
+ δ(λ)
937
+ ��
938
+ . It is however somewhat complicated
939
+ to formulate a concrete bound that is easy to use. Instead, the following simple bounds are
940
+ more often used.
941
+ Corollary 3.3. Let Gk(y, z; λ + iǫ) for y, z ∈ [0, 1], λ ∈ Σδ0 and ǫ ∈ [−1/8, 1/8]\{0} be defined
942
+ as in (3.12). Then we have the following bounds.
943
+ (i) For y, z ∈ [0, 1], we have the bounds with α ∈ {0, 1}
944
+
945
+ |k| + ̺−1(y; λ + iǫ)
946
+ �−α
947
+ |∂α
948
+ y Gk(y, z; λ + iǫ)|
949
+
950
+ 1
951
+ |k| + ̺−1(z; λ + iǫ) min
952
+
953
+ e−|k||y−z|, ̺2(y; λ + iǫ)
954
+ ̺2(z; λ + iǫ), ̺(z; λ + iǫ)
955
+ ̺(y; λ + iǫ)
956
+
957
+ .
958
+ (3.48)
959
+ (iii) For y ∈ [0, 1], z ∈ S4δ, we have the bounds with α ∈ {0, 1, 2}
960
+
961
+ |k| + ̺−1(y; λ + iǫ)
962
+ �−α
963
+ |∂α
964
+ y Hk(y, z; λ + iǫ)| ≲ min
965
+
966
+ e−|k||y−z|, ̺2(y; λ + iǫ)
967
+ ̺2(z; λ + iǫ), ̺(z; λ + iǫ)
968
+ ̺(y; λ + iǫ)
969
+
970
+ .
971
+ (3.49)
972
+ Proof. The desired bounds (3.48)-(3.49) follow directly from Lemma 3.1 and Lemma 3.2, by
973
+ choosing, if necessary, another point y′ between y and z such that ̺(y′; λ + iǫ) ≈ 1/|k|, and
974
+ applying (3.48)-(3.49) on intervals [min{z, y′}, max{z, y′}] and [min{y′, y}, max{y′, y}] succes-
975
+ sively.
976
+
977
+
978
+ LINEAR INVISCID DAMPING AND VORTICITY DEPLETION FOR NON-MONOTONIC SHEAR FLOWS 13
979
+ 4. The limiting absorption principle
980
+ In this section we study the solvability of the main Rayleigh equations (2.8). It turns out
981
+ that the situation is very different for the spectral range λ ∈ Σ\Σδ0/2 (the non-degenerate case)
982
+ and λ ∈ Σδ0 (the degenerate case). We first consider the non-degenerate case.
983
+ 4.1. The non-degenerate case. Fix ǫ ∈ [−1/4, 1/4]\{0}, λ ∈ Σ\Σδ0/2, k ∈ Z\{0}. Define for
984
+ each g ∈ L2(0, 1) the operator
985
+ Tk,λ,ǫg(y) :=
986
+ � 1
987
+ 0
988
+ Gk(y, z)
989
+ b′′(z)g(z)
990
+ b(z) − λ + iǫdz.
991
+ (4.1)
992
+ For applications below, we fix a smooth cutoff function Φ ∈ C∞
993
+ 0 (y∗ − δ0/3, y∗ + δ0/3) with
994
+ Φ ≡ 1 on [y∗ − δ0/4, y∗ + δ0/4]. To obtain the optimal dependence on the frequency variable
995
+ k, we define
996
+ ∥g∥H1
997
+ k(I) := ∥g∥L2(I) + |k|−1∥g′∥L2(I).
998
+ (4.2)
999
+ Lemma 4.1. For ǫ ∈ [−1/4, 1/4]\{0}, λ ∈ Σ\Σδ0/2, k ∈ Z\{0}, the operator Tk,λ,ǫ satisfies the
1000
+ bound
1001
+ ∥Tk,λ,ǫg∥H1
1002
+ k(I) ≲ |k|−1/3∥g∥H1
1003
+ k(I),
1004
+ for all g ∈ H1
1005
+ k(I).
1006
+ (4.3)
1007
+ In addition, we have the more precise regularity structure
1008
+ �����∂yTk,λ,ǫg(y) + b′′(y)(1 − Φ(y))g(y)
1009
+ b′(y)
1010
+ log (b(y) − λ + iǫ)
1011
+ ����
1012
+ W 1,1(R)
1013
+ ≲ ⟨k⟩4/3∥g∥H1
1014
+ k(I).
1015
+ (4.4)
1016
+ Proof. We can decompose for y ∈ [0, 1],
1017
+ Tk,λ,ǫg(y) := T 1
1018
+ k,λ,ǫg(y) + T 2
1019
+ k,λ,ǫg(y),
1020
+ (4.5)
1021
+ where
1022
+ T 1
1023
+ k,λ,ǫg(y) :=
1024
+ � 1
1025
+ 0
1026
+ Gk(y, z)Φ(z)b′′(z)g(z)
1027
+ b(z) − λ − iǫ dz,
1028
+ T 2
1029
+ k,λ,ǫg(y) :=
1030
+ � 1
1031
+ 0
1032
+ Gk(y, z)(1 − Φ(z))b′′(z)g(z)
1033
+ b(z) − λ + iǫ
1034
+ dz.
1035
+ (4.6)
1036
+ It follows from the definition of Φ that T 1
1037
+ k,λ,ǫg(y) satisfies the bound
1038
+ ∥T 1
1039
+ k,λ,ǫg(y)∥H1
1040
+ k(I) ≲ |k|−1/3∥g∥H1
1041
+ k(I),
1042
+ ∥∂yT 1
1043
+ k,λ,ǫg(y)∥W 1,1(I) ≲ ⟨k⟩4/3∥g∥H1
1044
+ k(I).
1045
+ (4.7)
1046
+ To bound T 2
1047
+ k,λ,ǫg(y), we follow the approach in [14]. Using integration by parts, we obtain that
1048
+ T 2
1049
+ k,λ,ǫg(y) =
1050
+ � 1
1051
+ 0
1052
+ Gk(y, z)(1 − Φ(z))b′′(z)g(z)
1053
+ b′(z)
1054
+ ∂z log(b(z) − λ + iǫ) dz
1055
+ = −
1056
+ � 1
1057
+ 0
1058
+ ∂zGk(y, z)(1 − Φ(z))b′′(z)g(z)
1059
+ b′(z)
1060
+ log(b(z) − λ + iǫ) dz
1061
+
1062
+ � 1
1063
+ 0
1064
+ Gk(y, z)∂z
1065
+ �(1 − Φ(z))b′′(z)g(z)
1066
+ b′(z)
1067
+
1068
+ log(b(z) − λ + iǫ) dz.
1069
+ (4.8)
1070
+ The desired bounds follow from the bound (3.4), the formula (3.6) and (3.7).
1071
+
1072
+ We now prove the limiting absorption principle, using the assumption that there is no discrete
1073
+ or generalized embedded eigenvalues.
1074
+
1075
+ 14
1076
+ ALEXANDRU D. IONESCU, SAMEER IYER, AND HAO JIA
1077
+ Lemma 4.2. There exist ǫ0, κ > 0, such that the following statement holds.
1078
+ For all λ ∈
1079
+ Σ\Σδ0/2, k ∈ Z\{0}, 0 < |ǫ| < ǫ0 and any g ∈ H1
1080
+ k(I), we have the bound
1081
+ ∥g + Tk,λ,ǫg∥H1
1082
+ k(I) ≥ κ∥g∥H1
1083
+ k(I).
1084
+ (4.9)
1085
+ Proof. We prove (4.9) by contradiction. Assume that there exist for j ≥ 1, a sequence of num-
1086
+ bers kj ∈ Z\{0}, λj ∈ Σ\Σδ0/2, ǫj ∈ R\{0} → 0 and functions gj ∈ H1
1087
+ kj(I) with ∥gj∥H1
1088
+ kj (I) = 1,
1089
+ satisfying kj → k∗ ∈ (Z\{0}) ∪ {±∞}, λj → λ∗ ∈ Σ\Σδ0 as j → ∞, such that
1090
+ ��gj + Tkj,λj,ǫjgj
1091
+ ��
1092
+ H1
1093
+ kj (I) → 0,
1094
+ as j → ∞.
1095
+ (4.10)
1096
+ The bounds (4.3) and (4.10) imply that |kj| ≲ 1. Thus k∗ ∈ Z\{0}. Using ∥gj∥H1
1097
+ kj (I) = 1, the
1098
+ bounds (4.4) and the compact embedding W 1,1(I) → L2(I), we conclude that by passing to a
1099
+ subsequence, Tkj,λj,ǫjgj converges in H1(I). In view of (4.10) we can assume that gj → g in
1100
+ H1(I), where ∥g∥H1
1101
+ k∗ = 1.
1102
+ Using formula (4.1), we obtain from (4.10) that for y ∈ I,
1103
+ g(y) + lim
1104
+ j→∞
1105
+ � 1
1106
+ 0
1107
+ Gk∗(y, z)
1108
+ b′′(z)g(z)
1109
+ b(z) − λ + iǫj
1110
+ dz = 0.
1111
+ (4.11)
1112
+ Applying k2
1113
+ ∗ − d2
1114
+ dy2 to (4.11), we get that for y ∈ I,
1115
+ k2
1116
+ ∗g(y) − g′′(y) + lim
1117
+ j→∞
1118
+ (b(y) − λ∗)b′′(y)g(y)
1119
+ (b(y) − λ∗)2 + ǫ2
1120
+ j
1121
+ + iπ
1122
+
1123
+ z∈[0,1],b(z)=λ
1124
+ b′′(z)g(z)
1125
+ |b′(z)|
1126
+ δ(y − z) = 0,
1127
+ (4.12)
1128
+ in the sense of distributions for y ∈ (0, 1), which contradicts our spectral assumption that λ∗
1129
+ is not a generalized embedded eigenvalue for Lk. The lemma is then proved.
1130
+
1131
+ 4.2. The degenerate case λ ∈ Σδ0. Recall the definition (3.14) for δ = δ(λ). For λ ∈ Σδ0, k ∈
1132
+ Z\{0}, y ∈ I and ǫ ∈ [−1/8, 1/8]\{0}, we denote
1133
+ dk(λ, ǫ) :=
1134
+
1135
+ |λ − b(y∗)|1/2 + |ǫ|1/2�
1136
+ ∧ 1
1137
+ |k|,
1138
+ ̺k(y; λ + iǫ) := ̺(y; λ + iǫ) ∧ 1
1139
+ |k|.
1140
+ (4.13)
1141
+ Define the weight and the associated weighted Sobolev spaces XN,̺k and XL,̺k as
1142
+ ∥g∥XN,̺k (I) :=
1143
+
1144
+ α∈{0,1}
1145
+ (δ + |ǫ|1/2)−1/2���
1146
+
1147
+ dk(λ, ǫ)
1148
+ �(−7/4+α)∂α
1149
+ y g
1150
+ ���
1151
+ L2(S3(δ+|ǫ|1/2))
1152
+ +
1153
+
1154
+ α∈{0,1}
1155
+ ∥̺−7/4+α
1156
+ k
1157
+ (·; λ + iǫ)∂α
1158
+ y g∥L∞(I\S3(δ+|ǫ|1/2)),
1159
+ (4.14)
1160
+ and
1161
+ ∥g∥XL,̺k (I) :=
1162
+
1163
+ α∈{0,1}
1164
+ (δ + |ǫ|1/2)−1/2��dα
1165
+ k(λ, ǫ)∂α
1166
+ y g
1167
+ ��
1168
+ L2(S3(δ+|ǫ|1/2))
1169
+ +
1170
+
1171
+ α∈{0,1}
1172
+ ��dk(λ, ǫ)−1̺α+1
1173
+ k
1174
+ (·; λ + iǫ)∂α
1175
+ y g
1176
+ ��
1177
+ L∞(I\S3(δ+|ǫ|1/2)),
1178
+ (4.15)
1179
+
1180
+ LINEAR INVISCID DAMPING AND VORTICITY DEPLETION FOR NON-MONOTONIC SHEAR FLOWS 15
1181
+ Fix ǫ ∈ [−1/4, 1/4]\{0}, λ ∈ Σδ0, k ∈ Z\{0}. Recall the definition (3.14) for δ = δ(λ) > 0.
1182
+ Define for each g ∈ L2(0, 1) the operator
1183
+ T ∗
1184
+ k (λ + iǫ)g(y) :=
1185
+ � 1
1186
+ 0
1187
+ Gk(y, z; λ + iǫ)
1188
+
1189
+ 1 − ϕ
1190
+ �y − y∗
1191
+ δ0
1192
+
1193
+ + ϕ
1194
+ �y − y∗
1195
+ δ
1196
+ ��
1197
+ b′′(z)g(z)
1198
+ b(z) − λ + iǫdz.
1199
+ (4.16)
1200
+ Then we have the following bounds for T ∗
1201
+ k (λ + iǫ).
1202
+ Lemma 4.3. For ǫ ∈ [−1/4, 1/4]\{0}, λ ∈ Σδ0, k ∈ Z\{0}, the operator T ∗
1203
+ k (λ + iǫ) satisfies the
1204
+ bound for X ∈ {XN,̺k(I), XL,̺k(I)}
1205
+ ∥T ∗
1206
+ k (λ + iǫ)g∥X ≲ (1 + |k|(|λ − b(y∗)|1/2 + |ǫ|1/2))−1/4∥g∥X,
1207
+ for all g ∈ H1
1208
+ k(I).
1209
+ (4.17)
1210
+ Proof. We provide the detailed proof only for the case X = XN,̺k(I) as the other case is
1211
+ analogous. Since k, λ, ǫ are fixed, for simplicity of notations, we suppress the dependence on
1212
+ k, λ, ǫ to write T ∗ as T ∗
1213
+ k (λ + iǫ), and decompose for y ∈ I,
1214
+ T ∗g(y) := T ∗
1215
+ 1 g(y) + T ∗
1216
+ 2 g(y),
1217
+ (4.18)
1218
+ where
1219
+ T ∗
1220
+ 1 g(y) :=
1221
+ � 1
1222
+ 0
1223
+ Gk(y, z; λ + iǫ)
1224
+
1225
+ 1 − ϕ
1226
+ �z − y∗
1227
+ δ0
1228
+ ��
1229
+ b′′(z)g(z)
1230
+ b(z) − λ + iǫdz,
1231
+ T ∗
1232
+ 2 g(y) :=
1233
+ � 1
1234
+ 0
1235
+ Gk(y, z; λ + iǫ)ϕ
1236
+ �z − y∗
1237
+ δ
1238
+
1239
+ b′′(z)g(z)
1240
+ b(z) − λ + iǫdz.
1241
+ (4.19)
1242
+ It follows from the bounds on modified Green’s function Gk(y, z; λ + iǫ), see Lemma 3.1, that
1243
+ ��T ∗
1244
+ 1 g
1245
+ ��
1246
+ XN,̺k(I) ≲ |k|−1/2��g
1247
+ ��
1248
+ XN,̺k (I).
1249
+ (4.20)
1250
+ To prove (4.17), it suffices to prove
1251
+ ∥T ∗
1252
+ 2 g∥XN,̺k (I) ≲
1253
+
1254
+ 1 + |k|(δ + |ǫ|1/2)
1255
+ �−1/4∥g∥XN,̺k (I).
1256
+ (4.21)
1257
+ We assume momentarily that |ǫ| ≲ |λ − b(y∗)| and explain how to remove this assumption
1258
+ at the end of the proof. We decompose further for y ∈ I,
1259
+ T ∗
1260
+ 2 g(y) =
1261
+ � 1
1262
+ 0
1263
+ Gk(y, z; λ + iǫ)ϕ
1264
+ �z − y∗
1265
+ δ′
1266
+
1267
+ ϕ
1268
+ �z − y∗
1269
+ δ
1270
+
1271
+ b′′(z)g(z)
1272
+ b(z) − λ + iǫdz
1273
+ +
1274
+ � 1
1275
+ 0
1276
+ Gk(y, z; λ + iǫ)
1277
+
1278
+ 1 − ϕ
1279
+ �z − y∗
1280
+ δ′
1281
+ ��
1282
+ ϕ
1283
+ �z − y∗
1284
+ δ
1285
+
1286
+ b′′(z)g(z)
1287
+ b(z) − λ + iǫdz
1288
+ := T ∗
1289
+ 2,Rg(y) + T ∗
1290
+ 2,Sg(y),
1291
+ (4.22)
1292
+ where we have chosen δ′ = δ/C3 with a large constant C3 so that |b(y) − λ| ≈ |λ − b(y∗)| for
1293
+ |y − y∗| < δ′.
1294
+ It suffices to prove for ⋄ ∈ {R, S}
1295
+ ∥T ∗
1296
+ 2,⋄g∥XN,̺k (I) ≲
1297
+
1298
+ 1 + |k|(|λ − b(y∗)|1/2 + |ǫ|1/2)
1299
+ �−1/4∥g∥XN,̺k (I).
1300
+ (4.23)
1301
+ Step 1. We first prove (4.23) with ⋄ = R.
1302
+ Case I: 1/|k| > |λ − b(y∗)|1/2 + |ǫ|1/2. In this case for |z − y∗| ≲ δ and |y − y∗| ≲ 1 we have
1303
+ the bound
1304
+ ��Gk(y, z; λ + iǫ)
1305
+ �� ≲
1306
+ δ2 + |ǫ|
1307
+ |y − y∗| + δ + |ǫ|1/2 ,
1308
+ ��∂yGk(y, z; λ + iǫ)
1309
+ �� ≲
1310
+ δ2 + |ǫ|
1311
+ (|y − y∗| + δ + |ǫ|1/2)2 . (4.24)
1312
+
1313
+ 16
1314
+ ALEXANDRU D. IONESCU, SAMEER IYER, AND HAO JIA
1315
+ It follows from the bound (4.24) that
1316
+ ∥T ∗
1317
+ 2,Rg∥XN,̺k (I) ≲
1318
+
1319
+ 1 + |k|(|λ − b(y∗)|1/2 + |ǫ|1/2)
1320
+ �−1/4∥g∥XN,̺k (I)
1321
+ (4.25)
1322
+ Case II: 1/|k| ≪ |λ − b(y∗)|1/2 + |ǫ|1/2. In this case, we have for |z − y∗| ≲ δ and |y − y∗| ≲ 1
1323
+ that
1324
+ ��Gk(y, z; λ + iǫ)
1325
+ �� + |k|−1��∂yGk(y, z; λ + iǫ)
1326
+ �� ≲ |k|−1e−|k||y−z|.
1327
+ (4.26)
1328
+ The desired bound
1329
+ ∥T ∗
1330
+ 2,Rg∥XN,̺k (I) ≲
1331
+
1332
+ 1 + |k|(|λ − b(y∗)|1/2 + |ǫ|1/2)
1333
+ �−1/4∥g∥XN,̺k (I)
1334
+ (4.27)
1335
+ follows from (4.26).
1336
+ Step 2. We now turn to the proof of (4.23) with ⋄ = S and still consider two cases.
1337
+ Case I: 1/|k| > |λ − b(y∗)|1/2 + |ǫ|1/2. Denoting for y ∈ I,
1338
+ ϕ∗�y − y∗
1339
+ δ
1340
+
1341
+ :=
1342
+
1343
+ 1 − ϕ
1344
+ �z − y∗
1345
+ δ′
1346
+ ��
1347
+ ϕ
1348
+ �z − y∗
1349
+ δ
1350
+
1351
+ ,
1352
+ (4.28)
1353
+ we can rewrite
1354
+ T ∗
1355
+ 2,Sg(y) =
1356
+ � 1
1357
+ 0
1358
+ Gk(y, z; λ + iǫ)ϕ∗�z − y∗
1359
+ δ
1360
+ �b′′(z)g(z)
1361
+ b′(z)
1362
+ ∂z log b(z) − λ + iǫ
1363
+ δ2
1364
+ = −
1365
+ � 1
1366
+ 0
1367
+ ∂z
1368
+
1369
+ Gk(y, z; λ + iǫ)ϕ∗�z − y∗
1370
+ δ
1371
+ �b′′(z)g(z)
1372
+ b′(z)
1373
+
1374
+ log b(z) − λ + iǫ
1375
+ δ2
1376
+ dz.
1377
+ (4.29)
1378
+ As a consequence of (4.29), we also have
1379
+ ∂y
1380
+
1381
+ T ∗
1382
+ 2,Sg(y)
1383
+
1384
+ = ∂y
1385
+ � 1
1386
+ 0
1387
+ Gk(y, z; λ + iǫ)ϕ∗�z − y∗
1388
+ δ
1389
+ �b′′(z)g(z)
1390
+ b′(z)
1391
+ ∂z log b(z) − λ + iǫ
1392
+ δ2
1393
+ dz
1394
+ = −
1395
+ � 1
1396
+ 0
1397
+
1398
+ ∂y(∂z + ∂y)Gk(y, z; λ, ǫ)ϕ∗�z − y∗
1399
+ δ
1400
+ �b′′(z)g(z)
1401
+ b′(z)
1402
+
1403
+ log b(z) − λ + iǫ
1404
+ δ2
1405
+ dz
1406
+ +
1407
+ � 1
1408
+ 0
1409
+
1410
+ ∂2
1411
+ yGk(y, z; λ + iǫ)ϕ∗�z − y∗
1412
+ δ
1413
+ �b′′(z)g(z)
1414
+ b′(z)
1415
+
1416
+ log b(z) − λ + iǫ
1417
+ δ2
1418
+ dz
1419
+
1420
+ � 1
1421
+ 0
1422
+ ∂yGk(y, z; λ + iǫ)∂z
1423
+
1424
+ ϕ∗�z − y∗
1425
+ δ
1426
+ �b′′(z)g(z)
1427
+ b′(z)
1428
+
1429
+ log b(z) − λ + iǫ
1430
+ δ2
1431
+ dz.
1432
+ (4.30)
1433
+ Note that on the support of ϕ∗(z−y∗
1434
+ δ
1435
+ ), we have
1436
+ |b′(z)| ≈ δ,
1437
+ ̺(z; λ + iǫ) ≈ δ.
1438
+ (4.31)
1439
+ The desired bound (4.23) for ⋄ = S follows from (4.29)-(4.30), Lemma 3.1 and 3.2, and we
1440
+ have, in addition,
1441
+ (δ + |ǫ|1/2)−1/2
1442
+ ����∂y
1443
+
1444
+ ∂yT ∗
1445
+ 2,Sg(y) + ϕ∗�y − y∗
1446
+ δ
1447
+ �b′′(y)g(y)
1448
+ b′(y)
1449
+ log b(y) − λ + iǫ
1450
+ δ2
1451
+ �����
1452
+ L2(S3(δ+|ǫ|1/2),j)
1453
+ ≲ δ−1/4�
1454
+ 1 + |k|(|λ − b(y∗)|1/2 + |ǫ|1/2)
1455
+ �−1/4
1456
+ ∥g∥XN,̺k (I).
1457
+ (4.32)
1458
+
1459
+ LINEAR INVISCID DAMPING AND VORTICITY DEPLETION FOR NON-MONOTONIC SHEAR FLOWS 17
1460
+ Case II: 1/|k| ≪ |λ − b(y∗)|1/2 + |ǫ|1/2. This case is analogous to Case I, using Lemma 3.1
1461
+ and Lemma 3.2.
1462
+ Finally we turn to the assumption that |ǫ|1/2 ≲ δ.
1463
+ Suppose |ǫ|1/2 ≫ δ, then the factor
1464
+ 1
1465
+ b(z)−λ+iǫ is not truly singular, and the desired bounds (4.21) follow directly from the bounds
1466
+ on the modified Green’s function Gk(y, z; λ + iǫ) from Lemma 3.1 and Lemma 3.2. Indeed, we
1467
+ have the stronger bound
1468
+ ∥T ∗
1469
+ 2 g∥XN,̺k (I) ≲
1470
+ δ
1471
+
1472
+ |ǫ|
1473
+ ∥g∥XN,̺k (I),
1474
+ (4.33)
1475
+ which will be useful below.
1476
+
1477
+ The following limiting absorption principle plays an essential role in establishing the vorticity
1478
+ depletion phenomenon.
1479
+ Lemma 4.4. There exist positive numbers ǫ0, κ such that the following statement holds.
1480
+ For ǫ ∈ [−ǫ0, ǫ0]\{0}, λ ∈ Σδ0, k ∈ Z\{0}, and X ∈ {XN,̺k(I), XL,̺k(I)},
1481
+ ∥(I + T ∗
1482
+ k (λ + iǫ))g∥XN,̺k (I) ≥ κ∥g∥XN,̺k (I),
1483
+ for all g ∈ H1
1484
+ k(I).
1485
+ (4.34)
1486
+ Proof. We only consider the case X = XN,̺k(I) as the other case is analogous. We prove (4.34)
1487
+ by a contradiction argument. Assume (4.34) does not hold for any ǫ0 > 0. Then there exist
1488
+ for ℓ ∈ Z ∩ [1, ∞),
1489
+ λℓ → λ∗ ∈ Σδ0, ǫℓ ̸= 0 with ǫℓ → 0, kℓ → k∗ ∈ (Z\{0}) ∪ {±∞},
1490
+ (4.35)
1491
+ and functions gℓ satisfying
1492
+ ∥gℓ∥XN,��kℓ (I) = 1
1493
+ (4.36)
1494
+ such that
1495
+ ��(I + T ∗
1496
+ kℓ(λℓ + iǫℓ))gℓ
1497
+ ��
1498
+ XN,̺kℓ (I) → 0.
1499
+ (4.37)
1500
+ We can assume that λ∗ = b(y∗), otherwise the proof follows from the argument in the non-
1501
+ degenerate case. We consider several cases.
1502
+ Case I: lim supℓ→∞ ∥gℓ∥H1(I\Sδ0) > 0. By the bound (4.20), we can assume that k∗ ∈ Z\{0}.
1503
+ By the bounds (4.36) and (4.37), we can assume (passing to a subsequence if necessary) that
1504
+ gℓ → g, in H1
1505
+ loc(I\{y∗}) as ℓ → ∞,
1506
+ g(0) = g(1) = 0.
1507
+ (4.38)
1508
+ Then it follows from (4.36) and (4.37) that g satisfies
1509
+ |g(y)| ≲ |y − y∗|7/4,
1510
+ (4.39)
1511
+ and for y ∈ (0, 1),
1512
+ (k2
1513
+ ∗ − ∂2
1514
+ y)g(y) +
1515
+ b′′(y)
1516
+ b(y) − b(y∗)g(y) = 0,
1517
+ (4.40)
1518
+ which imply that b(y∗) is an embedded eigenvalue for Lk, a contradiction to the spectral
1519
+ assumption.
1520
+ Case II: lim supℓ→∞ ∥gℓ∥H1(I\Sδ0) = 0. By the bound (4.17) we can assume that |kℓ|(δℓ +
1521
+ |ǫℓ|1/2) ≲ 1. In this case, using (4.37), we obtain that (passing to a subsequence if necessary)
1522
+ ��(|λℓ − b(y∗)| + |ǫ|)−9/8gℓ
1523
+ ��
1524
+ L2([y∗−δℓ−|ǫℓ|1/2, y∗+δℓ+|ǫℓ|1/2])
1525
+ +
1526
+ ��(|λℓ − b(y∗)| + |ǫ|)−5/8∂ygℓ
1527
+ ��
1528
+ L2([y∗−δℓ−|ǫℓ|1/2, y∗+δℓ+|ǫℓ|1/2]) ≥ σ > 0,
1529
+ (4.41)
1530
+
1531
+ 18
1532
+ ALEXANDRU D. IONESCU, SAMEER IYER, AND HAO JIA
1533
+ where we recall from (3.14) that
1534
+ δℓ ≈ |λℓ − b(y∗)|1/2.
1535
+ (4.42)
1536
+ We divide into several subcases.
1537
+ Subcase II.1: |ǫℓ|1/2 ≈ δℓ for a subsequence.
1538
+ Define the change of variables for ℓ ≥ 1, y ∈ I,
1539
+ y − y∗ = δℓY,
1540
+ gℓ(y) := (|λℓ − b(y∗)| + |ǫℓ|)−7/8Hℓ(Y ).
1541
+ (4.43)
1542
+ It follows from (4.32) that we can extract a nontrivial limit H ∈ H1(R) of Hℓ satisfying for
1543
+ Y ∈ R,
1544
+ (β2 − ∂2
1545
+ Y )H(Y ) +
1546
+ b′′(y∗)
1547
+ b′′(y∗)Y 2/2 + γ + iαH(Y ) = 0,
1548
+ (4.44)
1549
+ where β ∈ R, α, γ ∈ R\{0}. This is impossible since the shear flow (b′′(y∗)Y 2/2, 0), Y ∈ R is
1550
+ spectrally stable.
1551
+ Subcase II.2: |ǫℓ|1/2 = o(δℓ) for a subsequence.
1552
+ Passing to a subsequence and using rescaling
1553
+ as in (4.43) we can extract a nontrivial limit H ∈ H1(R), such that
1554
+ (β2 − ∂2
1555
+ Y )H(Y ) + lim
1556
+ ǫ→0
1557
+ b′′(y∗)
1558
+ b′′(y∗)Y 2/2 + γ + iǫH(Y ) = 0.
1559
+ (4.45)
1560
+ This is again impossible since the shear flow (b′′(y∗)Y 2/2, 0), Y ∈ R is spectrally stable.
1561
+ Subcase II.3: δℓ = o(|ǫℓ|1/2) for a subsequence.
1562
+ This case is not possible thanks to the
1563
+ bound (4.33). The lemma is now proved.
1564
+
1565
+ 5. Bounds on ψι
1566
+ k,ǫ: the non-degenerate case
1567
+ In this section we obtain bounds on ψι
1568
+ k,ǫ(y, λ) in the non-degenerate case, i.e. when λ ∈
1569
+ Σ\Σδ0/2. Since the arguments are analogous to those in [14], we will be brief in the proofs, and
1570
+ provide only comments on the main ideas involved.
1571
+ We begin with the following preliminary bounds.
1572
+ Lemma 5.1. For λ ∈ Σ\Σδ0/2, k ∈ Z\{0}, ι ∈ {±} and 0 < ǫ < ǫ0, we have the bounds
1573
+ ∥ψι
1574
+ k,ǫ(·, λ)∥H1
1575
+ k(I) ≲ |k|−1/2∥ω0k∥H1
1576
+ k(I).
1577
+ (5.1)
1578
+ Proof. The desired bounds (5.1) follow directly from the Rayleigh equation (2.8) and Lemma
1579
+ 4.2, once we use the Green’s function Gk to invert k2 − ∂2
1580
+ y and formulate (2.8) as an integral
1581
+ equation.
1582
+
1583
+ To obtain control on ∂λψι
1584
+ k,ǫ(·, λ) for λ ∈ Σ\Σδ0/2, we take derivative in (2.8), and obtain
1585
+ that
1586
+ (k2 − ∂2
1587
+ y)∂λψι
1588
+ k,ǫ(y, λ) +
1589
+ b′′(y)∂λψι
1590
+ k,ǫ(y, λ)
1591
+ b(y) − λ + iιǫ
1592
+ =
1593
+ ωk
1594
+ 0(y)
1595
+ (b(y) − λ + iιǫ)2 −
1596
+ b′′(y)ψι
1597
+ k,ǫ(z, λ)
1598
+ (b(y) − λ + iιǫ)2 ,
1599
+ (5.2)
1600
+
1601
+ LINEAR INVISCID DAMPING AND VORTICITY DEPLETION FOR NON-MONOTONIC SHEAR FLOWS 19
1602
+ for y ∈ I with zero boundary value at y ∈ {0, 1}. Reformulating (5.2) as an integral equation,
1603
+ we obtain that
1604
+ ∂λψι
1605
+ k,ǫ(y, λ) +
1606
+ � 1
1607
+ 0
1608
+ Gk(y, z)
1609
+ b′′(z)∂λψι
1610
+ k,ǫ(z, λ)
1611
+ b(z) − λ + iιǫ
1612
+ dz
1613
+ =
1614
+ � 1
1615
+ 0
1616
+ Gk(y, z)
1617
+ ωk
1618
+ 0(z)
1619
+ (b(z) − λ + iιǫ)2 dz −
1620
+ � 1
1621
+ 0
1622
+ Gk(y, z)
1623
+ b′′(z)ψι
1624
+ k,ǫ(z, λ)
1625
+ (b(z) − λ + iιǫ)2 dz.
1626
+ (5.3)
1627
+ Recall the definition of the smooth cutoff function Φ below (4.1). We have the following bounds
1628
+ for ∂λψι
1629
+ k,ǫ(y, λ) when λ ∈ Σ\Σδ0.
1630
+ Lemma 5.2. For λ ∈ Σ\Σδ0/2, k ∈ Z\{0}, ι ∈ {±} and 0 < ǫ < ǫ0, ∂λψι
1631
+ k,ǫ(y, λ) satisfies the
1632
+ following decomposition
1633
+ ∂λψι
1634
+ k,ǫ(y, λ) =
1635
+ �b′(y0)ωk
1636
+ 0(y)
1637
+ |b′(y)|2
1638
+
1639
+ b′′(y)ψι
1640
+ k,ǫ(y, λ)
1641
+ |b′(y)|2
1642
+
1643
+ (1 − Φ(y)) log (b(y) − λ + iιǫ)
1644
+ +
1645
+
1646
+ σ=0,1
1647
+ ωk
1648
+ 0(σ)Ψι
1649
+ k,σ,ǫ(y, λ) log (b(σ) − λ + iιǫ) + Rι
1650
+ σ,k,y0,ǫ(y).
1651
+ (5.4)
1652
+ In the above for σ ∈ {0, 1}, ι ∈ {±}, 0 < ǫ < ǫ0, and λ ∈ Σ\Σδ0/2,
1653
+ ��Rι
1654
+ σ,k,y0,ǫ
1655
+ ��
1656
+ H1
1657
+ k(I) ≲ |k|1/2∥ω0k∥H2
1658
+ k(I),
1659
+ ��Ψι
1660
+ k,σ,ǫ(·, λ)
1661
+ ��
1662
+ H1
1663
+ k(I) ≲ |k|−1/2.
1664
+ (5.5)
1665
+ Proof. The basic idea is to expand the right hand side of (5.3) using integration by parts, and
1666
+ apply Lemma 4.2 after removing the most singular parts. Indeed, denoting schematically,
1667
+ U :=
1668
+ � 1
1669
+ 0
1670
+ Gk(y, z)
1671
+ ωk
1672
+ 0(z)
1673
+ (b(z) − λ + iιǫ)2 dz −
1674
+ � 1
1675
+ 0
1676
+ Gk(y, z)
1677
+ b′′(z)ψι
1678
+ k,ιǫ(z, λ)
1679
+ (b(z) − λ + iιǫ)2 dz,
1680
+ (5.6)
1681
+ we note that ∂λψι
1682
+ k,ǫ(y, λ) − U satisfies the equation (recalling (4.1) for the definition of Tk,λ,ιǫ),
1683
+ (I + Tk,λ,ιǫ)
1684
+
1685
+ ∂λψι
1686
+ k,ǫ(y, λ)
1687
+
1688
+ = −Tk,λ,ιǫU.
1689
+ (5.7)
1690
+ The term Tk,λ,ιǫU ∈ H1
1691
+ k(I) (noting however that for the boundary terms we need to track the
1692
+ singular coefficient log (b(σ) − λ + iιǫ), σ ∈ {0, 1}), and we can apply Lemma 4.2 to (5.7) in
1693
+ order to obtain the desired conclusions. We refer to [14] for the detailed proof.
1694
+
1695
+ To obtain bounds on ∂2
1696
+ λψι
1697
+ k,ǫ(y, λ) for λ ∈ Σ\Σδ0/2, we take two derivatives in (2.8) and obtain
1698
+ that
1699
+ (k2 − ∂2
1700
+ y)∂2
1701
+ λψι
1702
+ k,ǫ(y, λ) +
1703
+ b′′(y)∂2
1704
+ λψι
1705
+ k,ǫ(y, λ)
1706
+ b(y) − λ + iιǫ
1707
+ = 2
1708
+ ωk
1709
+ 0(y)
1710
+ (b(y) − λ + iιǫ)3 − 2
1711
+ b′′(y)ψι
1712
+ k,ǫ(z, λ)
1713
+ (b(y) − λ + iιǫ)3 +
1714
+ b′′(y)∂λψι
1715
+ k,ǫ(z, λ)
1716
+ (b(y) − λ + iιǫ)2 ,
1717
+ (5.8)
1718
+
1719
+ 20
1720
+ ALEXANDRU D. IONESCU, SAMEER IYER, AND HAO JIA
1721
+ for y ∈ I with zero boundary value at y ∈ {0, 1}. We can reformulate (5.8) in the integral form
1722
+ for y ∈ I, as
1723
+ ∂2
1724
+ λψι
1725
+ k,ǫ(y, λ) +
1726
+ � 1
1727
+ 0
1728
+ Gk(y, z)
1729
+ b′′(z)∂2
1730
+ λψι
1731
+ k,ǫ(z, λ)
1732
+ b(z) − λ + iιǫ
1733
+ dz
1734
+ =
1735
+ � 1
1736
+ 0
1737
+ Gk(y, z)
1738
+
1739
+ 2
1740
+ ωk
1741
+ 0(z)
1742
+ (b(z) − λ + iιǫ)3 − 2
1743
+ b′′(z)ψι
1744
+ k,ǫ(z, λ)
1745
+ (b(z) − λ + iιǫ)3 +
1746
+ b′′(z)∂λψι
1747
+ k,ǫ(z, λ)
1748
+ (b(z) − λ + iιǫ)2
1749
+
1750
+ dz.
1751
+ (5.9)
1752
+ We have the following bounds on ∂2
1753
+ λψι
1754
+ k,ǫ(y, λ) for λ ∈ Σ\Σδ0/2.
1755
+ Lemma 5.3. For k ∈ Z\{0}, ι ∈ {±} and 0 < ǫ < ǫ0, we have the following bound
1756
+ ����∂2
1757
+ λψι
1758
+ k,ǫ(y, λ) −
1759
+ ωk
1760
+ 0(1)Φ1ι
1761
+ k,ǫ(y, λ)
1762
+ b(1) − λ + iιǫ −
1763
+ ωk
1764
+ 0(0)Φ0ι
1765
+ k,ǫ(y, λ)
1766
+ b(0) − λ + iιǫ −
1767
+ b′′(y)ψι
1768
+ k,ǫ(y, λ) − ωk
1769
+ 0(y)
1770
+ |b′(y)|2(b(y) − λ + iιǫ)
1771
+ ����
1772
+ L2(y∈I,λ∈Σ\Σδ0/2)
1773
+ ≲ |k|3/2∥ω0k∥H3
1774
+ k(I)
1775
+ (5.10)
1776
+ In the above the functions Φσι
1777
+ k,ǫ, σ ∈ {0, 1} satisfy the equation for y ∈ I
1778
+ (I + Tk,λ,ιǫ)Φ1ι
1779
+ k,ǫ =
1780
+ sinh (ky)
1781
+ |b′(1)|2 sinh k,
1782
+ (I + Tk,λ,ιǫ)Φ0ι
1783
+ k,ǫ = sinh (k(1 − y))
1784
+ |b′(0)|2 sinh k .
1785
+ (5.11)
1786
+ Proof. The main idea of the proof is to expand the right side of (5.9) and apply Lemma 4.2
1787
+ after removing the most singular terms. Indeed, denoting schematically,
1788
+ U∗ :=
1789
+ � 1
1790
+ 0
1791
+ Gk(y, z)
1792
+
1793
+ 2
1794
+ ωk
1795
+ 0(z)
1796
+ (b(z) − λ + iιǫ)3 − 2
1797
+ b′′(z)ψι
1798
+ k,ιǫ(z, λ)
1799
+ (b(z) − λ + iιǫ)3 +
1800
+ b′′(z)∂λψι
1801
+ k,ιǫ(z, λ)
1802
+ (b(z) − λ + iιǫ)2
1803
+
1804
+ dz,
1805
+ (5.12)
1806
+ we have
1807
+ (I + Tk,λ,ιǫ)
1808
+
1809
+ ∂2
1810
+ λψι
1811
+ k,ǫ(y, λ) − U∗ + Tk,λ,ιǫU∗�
1812
+ =
1813
+
1814
+ Tk,λ,ιǫ
1815
+ �2U∗.
1816
+ (5.13)
1817
+ We note that ∂2
1818
+ λψι
1819
+ k,ǫ(y, λ) − U∗ + Tk,λ,ιǫU∗ ∈ H1
1820
+ k(I) (however we again need to track the
1821
+ singularities in λ in the boundary terms, involving log(b(σ) − λ + iιǫ) and 1/(b(σ) − λ + iιǫ)
1822
+ for σ ∈ {0, 1}), and we can apply Lemma (4.2) in order to obtain the desired conclusions. We
1823
+ refer to [14] for the detailed proof.
1824
+
1825
+ 6. Bounds on ψι
1826
+ k,ǫ: the degenerate case
1827
+ In this section we use the limiting absorption principle to study the Rayleigh equation (2.8)
1828
+ for λ ∈ Σδ0. More precisely, write for k ∈ Z\{0}, ι ∈ {±}, λ ∈ Σδ0, 0 < ǫ < ǫ0, (recall the
1829
+ definition of ǫ0 from Lemma 4.4)
1830
+ ψι
1831
+ k,ǫ(y, λ) = φι
1832
+ k,ǫ(y, λ) + Ψ(y)
1833
+ 1
1834
+ b′′(y)ω0k(y),
1835
+ (6.1)
1836
+ where Ψ ∈ C∞
1837
+ c (S3δ0) and Ψ ≡ 1 on S2δ0. Recall that Sd = [y∗ −d, y∗ +d] for d > 0 from (3.11).
1838
+ Then φι
1839
+ k,ǫ(y, λ) satisfies for y ∈ I,
1840
+ (k2 − ∂2
1841
+ y)φι
1842
+ k,ǫ(y, λ) +
1843
+ b′′(y)
1844
+ b(y) − λ + iιǫφι
1845
+ k,ǫ(y, λ) = gι
1846
+ k,ǫ(y, λ),
1847
+ (6.2)
1848
+
1849
+ LINEAR INVISCID DAMPING AND VORTICITY DEPLETION FOR NON-MONOTONIC SHEAR FLOWS 21
1850
+ where for k ∈ Z\{0}, ι ∈ {±}, λ ∈ Σδ0, 0 < ǫ < ǫ0
1851
+
1852
+ k,ǫ(y, λ) :=
1853
+ 1 − Ψ(y)
1854
+ b(y) − λ + iιǫω0k(y) − (k2 − ∂2
1855
+ y)
1856
+ � Ψ(y)
1857
+ b′′(y)ω0k(y)
1858
+
1859
+ .
1860
+ (6.3)
1861
+ Our main results are bounds for the functions φι
1862
+ k,ǫ(y, λ). We begin with the following pre-
1863
+ liminary bounds.
1864
+ Lemma 6.1. Assume that k ∈ Z\{0}, λ ∈ Σδ0 and let φι
1865
+ k,ǫ(y, λ) with ι ∈ {±}, ǫ ∈ (0, ǫ0) be as
1866
+ defined in (6.1)-(6.2). Recall from (3.14) and (4.13) that
1867
+ δ := δ(λ) = 8
1868
+
1869
+ |λ − b(y∗)|/|b′′(y∗)|,
1870
+ dk = dk(λ, ǫ) :=
1871
+
1872
+ |λ − b(y∗)|1/2 + |ǫ|1/2�
1873
+ ∧ 1
1874
+ |k|.
1875
+ (6.4)
1876
+ We have the bounds for k ∈ Z\{0}, ǫ ∈ (0, ǫ0), ι ∈ {±}, λ ∈ Σδ0,
1877
+
1878
+ α∈{0,1}
1879
+ ��d−7/4+α
1880
+ k
1881
+ ∂α
1882
+ y φι
1883
+ k,ǫ(y, λ)
1884
+ ��
1885
+ L2�
1886
+ [y∗−3(δ+|ǫ|1/2),y∗+3(δ+|ǫ|1/2)]
1887
+ �(δ + |ǫ|1/2)−1/2
1888
+ +
1889
+
1890
+ α∈{0,1}
1891
+ ��(|y − y∗| ∧ dk)−7/4+α∂α
1892
+ y φι
1893
+ k,ǫ(y, λ)
1894
+ ��
1895
+ L∞�
1896
+ [0,1]\[y∗−3(δ+|ǫ|1/2),y∗+3(δ+|ǫ|1/2)]
1897
+
1898
+ ≲ |k|5/2��ω0k
1899
+ ��
1900
+ H3
1901
+ k(I).
1902
+ (6.5)
1903
+ Define for y ∈ [0, 1], k ∈ Z\{0}, λ ∈ Σδ0\{b(y∗)},
1904
+ ψk(y, λ) := lim
1905
+ ǫ→0+
1906
+
1907
+ ψ+
1908
+ k,ǫ(y, λ) − ψ−
1909
+ k,ǫ(y, λ)
1910
+
1911
+ = lim
1912
+ ǫ→0+
1913
+
1914
+ φ+
1915
+ k,ǫ(y, λ) − φ−
1916
+ k,ǫ(y, λ)
1917
+
1918
+ .
1919
+ (6.6)
1920
+ Then we have the bounds for λ ∈ Σδ0\{b(y∗)},
1921
+
1922
+ α∈{0,1}
1923
+ ��(δ ∧ |k|−1)−7/4+α∂α
1924
+ y ψk(y, λ)
1925
+ ��
1926
+ L2([y∗−3δ,y∗+3δ])δ−1/2
1927
+ +
1928
+
1929
+ ��{0,1}
1930
+ ��(δ ∧ |k|−1)−11/4(|y − y∗| ∧ 1
1931
+ |k|)1+α∂α
1932
+ y ψk(y, λ)
1933
+ ��
1934
+ L∞([0,1]\[y∗−3δ,y∗+3δ]))
1935
+ ≲ |k|5/2��ω0k
1936
+ ��
1937
+ H3
1938
+ k(I).
1939
+ (6.7)
1940
+ Proof. It follows from (6.3) and our assumptions on the initial data ω0k that we have the bound
1941
+ for k ∈ Z\{0}, ι ∈ {±}, 0 < ǫ < ǫ0 and λ ∈ Σδ0,
1942
+ ��gι
1943
+ k,ǫ(y, λ)
1944
+ ��
1945
+ C2
1946
+ k(I) ≲ |k|1/2∥ω0k∥H3
1947
+ k(I).
1948
+ (6.8)
1949
+ We can reformulate equation (6.2) in the integral form as (recall the definition of T ∗(λ + iǫ)
1950
+ from (4.16))
1951
+ φι
1952
+ k,ǫ(y, λ) + T ∗
1953
+ k (λ + iιǫ)φι
1954
+ k,ǫ(y, λ) =
1955
+ � 1
1956
+ 0
1957
+ Gk(y, z; λ + iιǫ)gι
1958
+ k,ǫ(z, λ)dz,
1959
+ (6.9)
1960
+ for y ∈ I. By Lemma 4.4, we obtain the bound
1961
+ ��φι
1962
+ k,ǫ(·, λ)
1963
+ ��
1964
+ XN,̺k(I) ≲
1965
+ ���
1966
+ � 1
1967
+ 0
1968
+ Gk(y, z; λ + iιǫ)gι
1969
+ k,ǫ(z, λ)dz
1970
+ ���
1971
+ XN,̺k
1972
+ ≲ |k|5/2∥ω0k∥H3
1973
+ k(I),
1974
+ (6.10)
1975
+ which, by the definition of the space XN,̺k, see (4.14), implies the desired bounds (6.5).
1976
+
1977
+ 22
1978
+ ALEXANDRU D. IONESCU, SAMEER IYER, AND HAO JIA
1979
+ For applications below on isolating the singularity at λ = b(y), we fix ϕδ(y) ∈ C∞
1980
+ c (S2δ) as
1981
+ ϕδ(y) := ϕ(y
1982
+ δ )
1983
+
1984
+ 1 − ϕ( y
1985
+ δ′ )
1986
+
1987
+ ,
1988
+ (6.11)
1989
+ for y ∈ I, with δ′ := δ/M and an M ≫ 1 sufficiently large such that |b(y) − λ| ≈ |λ − b(y∗)| for
1990
+ |y − y∗| < δ/M.
1991
+ To prove (6.7), we note from (6.2) that φ+
1992
+ k,ǫ(y, λ) − φ−
1993
+ k,ǫ(y, λ) satisfies the equation for y ∈ I.
1994
+ (k2 − ∂2
1995
+ y)
1996
+
1997
+ φ+
1998
+ k,ǫ(y, λ) − φ−
1999
+ k,ǫ(y, λ)
2000
+
2001
+ +
2002
+ b′′(y)
2003
+ b(y) − λ + iǫ
2004
+
2005
+ φ+
2006
+ k,ǫ(y, λ) − φ−
2007
+ k,ǫ(y, λ)
2008
+
2009
+ = g+
2010
+ k,ǫ(y, λ) − g−
2011
+ k,ǫ(y, λ) −
2012
+
2013
+ b′′(y)
2014
+ b(y) − λ + iǫ −
2015
+ b′′(y)
2016
+ b(y) − λ − iǫ
2017
+
2018
+ φ−
2019
+ k,ǫ(y, λ).
2020
+ (6.12)
2021
+ Denote for λ ∈ Σδ0\{b(y∗)}, ǫ ∈ (0, ǫ0) and y ∈ I the function hk,ǫ(y, λ) as the solution to
2022
+ (k2 − ∂2
2023
+ y)hk,ǫ(y, λ) +
2024
+ b′′(y)
2025
+ b(y) − λ + iǫhk,ǫ(y, λ) = ϕδ(y)
2026
+
2027
+ b′′(y)
2028
+ b(y) − λ − iǫ −
2029
+ b′′(y)
2030
+ b(y) − λ + iǫ
2031
+
2032
+ φ−
2033
+ k,ǫ(y, λ),
2034
+ (6.13)
2035
+ with zero Dirichlet boundary condition. Then it is clear that for λ ∈ Σδ0\{b(y∗)}, y ∈ I,
2036
+ ψk(y, λ) = lim
2037
+ ǫ→0+ hk,ǫ(y, λ).
2038
+ (6.14)
2039
+ We can reformulate (6.13) as the following integral equation for λ ∈ Σδ0\{b(y∗)}, y ∈ I,
2040
+ hk,ǫ(y, λ) + T ∗
2041
+ k (λ + iǫ)hk,ǫ(y, λ)
2042
+ = −
2043
+ � 1
2044
+ 0
2045
+ Gk(y, z; λ + iǫ)ϕδ(z)
2046
+
2047
+ b′′(z)
2048
+ b(z) − λ + iǫ −
2049
+ b′′(z)
2050
+ b(z) − λ − iǫ
2051
+
2052
+ φ−
2053
+ k,ǫ(z, λ) dz.
2054
+ (6.15)
2055
+ It follows from the bound (6.5) that for |ǫ| ≲ (δ ∧ 1
2056
+ |k|)4,
2057
+ ����
2058
+ � 1
2059
+ 0
2060
+ Gk(y, z; λ + iǫ)ϕδ(z)
2061
+
2062
+ b′′(z)
2063
+ b(z) − λ + iǫ −
2064
+ b′′(z)
2065
+ b(z) − λ − iǫ
2066
+
2067
+ φ−
2068
+ k,ǫ(z, λ) dz
2069
+ ����
2070
+ XL,̺k
2071
+ ≲ (δ ∧ 1
2072
+ |k|)7/4.
2073
+ (6.16)
2074
+ The desired bound (6.7) then follows from Lemma 4.4 with X = XL,̺k.
2075
+
2076
+ To obtain higher order regularity bounds (in λ) of φι
2077
+ k,ǫ(·, λ), we take the derivative ∂λ in
2078
+ (6.2). It follows that ∂λφι
2079
+ k,ǫ(y, λ) satisfies for y ∈ I,
2080
+
2081
+ k2 − ∂2
2082
+ y +
2083
+ b′′(y)
2084
+ b(y) − λ + iιǫ
2085
+
2086
+ ∂λφι
2087
+ k,ǫ(y, λ) = −
2088
+ b′′(y)
2089
+ (b(y) − λ + iιǫ)2 φι
2090
+ k,ǫ(y, λ) + ∂λgι
2091
+ k,ǫ(y, λ),
2092
+ (6.17)
2093
+ with zero Dirichlet boundary condition.
2094
+ Recall the definition of ϕδ from (6.11). We have the following bounds on ∂λφι
2095
+ k,ǫ(y, λ).
2096
+ Lemma 6.2. Assume that k ∈ Z\{0}, λ ∈ Σδ0\{b(y∗)}.
2097
+ Let ψι
2098
+ k,ǫ(y, λ) and φι
2099
+ k,ǫ(y, λ) with
2100
+ ι ∈ {±}, 0 < ǫ < min{|λ − b(y∗)|, ǫ0} be as defined in (2.8) and (6.1) respectively. Recall from
2101
+ (3.14) that
2102
+ δ := δ(λ) = 8
2103
+
2104
+ |λ − b(y∗)|/b′′(y∗).
2105
+ (6.18)
2106
+
2107
+ LINEAR INVISCID DAMPING AND VORTICITY DEPLETION FOR NON-MONOTONIC SHEAR FLOWS 23
2108
+ Denote for y ∈ [0, 1], ι ∈ {±}, λ ∈ Σδ0\{b(y∗)}, 0 < ǫ < min{|λ − b(y∗)|, ǫ0},
2109
+ Λι
2110
+ 1,ǫ(y, λ) := φι
2111
+ k,ǫ(y, λ)ϕδ(y) b′′(y)
2112
+ (b′(y))2 log b(y) − λ + iιǫ
2113
+ δ2
2114
+ ,
2115
+ Λ1(y, λ) := ψk(y, λ)ϕδ(y) b′′(y)
2116
+ (b′(y))2 log b(y) − λ
2117
+ δ2
2118
+ .
2119
+ (6.19)
2120
+ We have the bounds for 0 < ǫ < min{|λ − b(y∗)|, ǫ0}, ι ∈ {±}, and λ ∈ Σδ0 that
2121
+
2122
+ α∈{0,1}
2123
+ ���(δ ∧ |k|−1)1/4+α∂α
2124
+ y
2125
+
2126
+ ∂λφι
2127
+ k,ǫ(y, λ) − Λι
2128
+ 1,ǫ(y, λ)
2129
+ ����
2130
+ L2([y∗−3δ,y∗+3δ])δ−1/2
2131
+ +
2132
+
2133
+ α∈{0,1}
2134
+ ���(δ ∧ |k|−1)2(|y − y∗| ∧ 1
2135
+ |k|)−7/4+α∂α
2136
+ y ∂λφι
2137
+ k,ǫ(y, λ)
2138
+ ���
2139
+ L∞([0,1]\[y∗−3δ,y∗+3δ]))
2140
+ ≲ |k|5/2��ω0k
2141
+ ��
2142
+ H3
2143
+ k(I).
2144
+ (6.20)
2145
+ In addition, we have the bounds for λ ∈ Σδ0\{b(y∗)} and k ∈ Z\{0},
2146
+
2147
+ α∈{0,1}
2148
+ ��(δ ∧ |k|−1)1/4+α∂α
2149
+ y
2150
+
2151
+ ∂λψk(y, λ) − Λ1(y, λ)
2152
+ ����
2153
+ L2([y∗−3δ,y∗+3δ])δ−1/2
2154
+ +
2155
+
2156
+ α∈{0,1}
2157
+ ��(δ ∧ |k|−1)−3/4(|y − y∗| ∧ 1
2158
+ |k|)1+α∂α
2159
+ y ∂λψk(y, λ)
2160
+ ��
2161
+ L∞([0,1]\[y∗−3δ,y∗+3δ]))
2162
+ ≲ |k|5/2��ω0k
2163
+ ��
2164
+ H3
2165
+ k(I).
2166
+ (6.21)
2167
+ Proof. Define for k ∈ Z\{0}, ι ∈ {±}, λ ∈ Σδ0\{b(y∗)}, 0 < ǫ < min{|λ − b(y∗)|, ǫ0}, y ∈ I,
2168
+ ∂λφι
2169
+ k,ǫ(y, λ) := φι
2170
+ k,ǫ(y, λ; 1) +
2171
+ � 1
2172
+ 0
2173
+ Gk(y, z; λ + iιǫ)
2174
+
2175
+ −b′′(z)
2176
+ (b(z) − λ + iιǫ)2 φι
2177
+ k,ǫ(z, λ) + ∂λgι
2178
+ k,ǫ(z, λ)
2179
+
2180
+ dz.
2181
+ (6.22)
2182
+ It follows from (6.17) that φι
2183
+ k,ǫ(y, λ; 1) satisfies for y ∈ I,
2184
+ φι
2185
+ k,ǫ(y, λ; 1) + T ∗
2186
+ k (λ + iιǫ)φι
2187
+ k,ǫ(y, λ; 1)
2188
+ = −T ∗
2189
+ k (λ + iιǫ)
2190
+ � 1
2191
+ 0
2192
+ Gk(y, z; λ + iιǫ)
2193
+
2194
+
2195
+ b′′(z)
2196
+ (b(z) − λ + iιǫ)2 φι
2197
+ k,ǫ(z, λ) + ∂λgι
2198
+ k,ǫ(z, λ)
2199
+
2200
+ dz.
2201
+ (6.23)
2202
+ Denote for k ∈ Z\{0}, ι ∈ {±}, λ ∈ Σδ0\{b(y∗)}, 0 < ǫ < min{|λ − b(y∗)|, ǫ0}, z ∈ I,
2203
+
2204
+ k,ǫ(z, λ; 1) :=
2205
+ b′′(z)
2206
+ (b(z) − λ + iιǫ)2 ϕδ(z)φι
2207
+ k,ǫ(z, λ),
2208
+
2209
+ k,ǫ(z, λ; 2) :=
2210
+ b′′(z)
2211
+ (b(z) − λ + iιǫ)2 (1 − ϕδ(z))φι
2212
+ k,ǫ(z, λ),
2213
+
2214
+ k,ǫ(z, λ; 3) := ∂λgι
2215
+ k,ǫ(z, λ).
2216
+ (6.24)
2217
+ It follows from the bound (6.5) and Lemma 3.1 that for j ∈ {2, 3}
2218
+ ��T ∗
2219
+ k (λ + iιǫ)
2220
+ � 1
2221
+ 0
2222
+ Gk(y, z; λ + iιǫ)hι
2223
+ k,ǫ(z, λ; j) dz
2224
+ ��
2225
+ XN,̺k ≲ (δ ∧ |k|−1)−2|k|5/2∥ω0k∥H3
2226
+ k(I).
2227
+ (6.25)
2228
+
2229
+ 24
2230
+ ALEXANDRU D. IONESCU, SAMEER IYER, AND HAO JIA
2231
+ Using integration by parts argument similar to (4.29)-(4.30), we have also
2232
+ ����T ∗
2233
+ k (λ + iιǫ)
2234
+ � 1
2235
+ 0
2236
+ Gk(y, z; λ + iιǫ)hι
2237
+ k,ǫ(z, λ; 1) dz
2238
+ ����
2239
+ XN,̺k
2240
+ ≲ (δ ∧ |k|−1)−2|k|5/2��ω0k
2241
+ ��
2242
+ H3
2243
+ k(I).
2244
+ (6.26)
2245
+ It follows from (6.25)-(6.26) and Lemma 4.4 that for λ\{b(y∗)},
2246
+ ��φι
2247
+ k,ǫ(y, λ; 1)
2248
+ ��
2249
+ XN,̺k ≲ (δ ∧ |k|−1)−2|k|5/2��ω0k
2250
+ ��
2251
+ H3
2252
+ k(I).
2253
+ (6.27)
2254
+ The desired bound (6.20) follows, as a consequence of (6.27) and (6.22).
2255
+ Using (6.17), we get that for y ∈ I,
2256
+
2257
+ k2 − ∂2
2258
+ y +
2259
+ b′′(y)
2260
+ b(y) − λ + iǫ
2261
+ ��
2262
+ ∂λφ+
2263
+ k,ǫ(y, λ) − ∂λφ−
2264
+ k,ǫ(y, λ)
2265
+
2266
+ = −
2267
+
2268
+ b′′(y)
2269
+ (b(y) − λ + iǫ)2 φ+
2270
+ k,ǫ(y, λ) −
2271
+ b′′(y)
2272
+ (b(y) − λ − iǫ)2 φ−
2273
+ k,ǫ(y, λ)
2274
+
2275
+ +
2276
+
2277
+ ∂λg+
2278
+ k,ǫ(y, λ) − ∂λg−
2279
+ k,ǫ(y, λ)
2280
+
2281
+
2282
+
2283
+ b′′(y)
2284
+ b(y) − λ + iǫ −
2285
+ b′′(y)
2286
+ b(y) − λ − iǫ
2287
+
2288
+ ∂λφ−
2289
+ k,ǫ(y, λ),
2290
+ (6.28)
2291
+ with zero Dirichlet boundary condition.
2292
+ Denoting for λ ∈ Σδ0\{b(y∗)} and y ∈ I, Dφk,ǫ(y, λ) as the solution to
2293
+
2294
+ k2 − ∂2
2295
+ y +
2296
+ b′′(y)
2297
+ b(y) − λ + iιǫ
2298
+
2299
+ Dφk,ǫ(y, λ)
2300
+ = −ϕδ(y)
2301
+
2302
+ b′′(y)
2303
+ (b(y) − λ + iǫ)2 φ+
2304
+ k,ǫ(y, λ) −
2305
+ b′′(y)
2306
+ (b(y) − λ − iǫ)2 φ−
2307
+ k,ǫ(y, λ)
2308
+
2309
+ − ϕδ(y)
2310
+
2311
+ b′′(y)
2312
+ b(y) − λ + iιǫ −
2313
+ b′′(y)
2314
+ b(y) − λ − iιǫ
2315
+
2316
+ ∂λφ−
2317
+ k,ǫ(y, λ),
2318
+ (6.29)
2319
+ for y ∈ I with zero Dirichlet boundary condition.
2320
+ We notice the identity that for y ∈ I, λ ∈ Σδ0\{b(y∗)},
2321
+ ∂λψk(y, λ) = lim
2322
+ ǫ→0+ Dφk,ǫ(y, λ).
2323
+ (6.30)
2324
+ We can reformulate (6.29) as the integral equation for y ∈ I,
2325
+ Dφk,ǫ(y, λ) + T ∗
2326
+ k (λ + iǫ)Dφk,ǫ(y, λ)
2327
+ = −
2328
+ � 1
2329
+ 0
2330
+ Gk(y, z; λ + iǫ)ϕδ(z)
2331
+
2332
+ b′′(z)
2333
+ (b(z) − λ + iǫ)2 φ+
2334
+ k,ǫ(z, λ) −
2335
+ b′′(z)
2336
+ (b(z) − λ − iǫ)2 φ−
2337
+ k,ǫ(z, λ)
2338
+
2339
+ dz
2340
+
2341
+ � 1
2342
+ 0
2343
+ Gk(y, z; λ + iǫ)ϕδ(z)
2344
+
2345
+ b′′(z)
2346
+ b(z) − λ + iǫ −
2347
+ b′′(z)
2348
+ b(z) − λ − iιǫ
2349
+
2350
+ ∂λφ−
2351
+ k,ǫ(z, λ) dz
2352
+ := Rk,ǫ(y, λ).
2353
+ (6.31)
2354
+ We can write for y ∈ I, λ ∈ Σδ0\{b(y∗)}, 0 < ǫ < min{|λ − b(y∗)|, ǫ0},
2355
+ Dφk,ǫ(y, λ) := Rk,ǫ(y, λ) + Dφk,ǫ(y, λ; 1).
2356
+ (6.32)
2357
+
2358
+ LINEAR INVISCID DAMPING AND VORTICITY DEPLETION FOR NON-MONOTONIC SHEAR FLOWS 25
2359
+ Then Dφk,ǫ(y, λ; 1) satisfies for y ∈ I, λ ∈ Σδ0\{b(y∗)}, 0 < ǫ < min{|λ − b(y∗)|, ǫ0}, the
2360
+ equation
2361
+ Dφk,ǫ(y, λ; 1) + T ∗
2362
+ k (λ + iǫ)Dφk,ǫ(y, λ; 1) = −T ∗
2363
+ k (λ + iǫ)Rk,ǫ(y, λ).
2364
+ (6.33)
2365
+ The desired bounds (6.37) follow from (6.31)-(6.33), and Lemma 3.2 with X = XL,̺k.
2366
+
2367
+ Lastly we turn to the highest order derivative ∂2
2368
+ λψι
2369
+ k,ǫ(y, λ) that we need to control. To study
2370
+ ∂2
2371
+ λψι
2372
+ k,ǫ(y, λ), we take the derivative ∂λ in (6.17) and obtain that
2373
+
2374
+ k2 − ∂2
2375
+ y +
2376
+ b′′(y)
2377
+ b(y) − λ + iιǫ
2378
+
2379
+ ∂2
2380
+ λφι
2381
+ k,ǫ(·, λ) = −
2382
+ 2b′′(y)
2383
+ (b(y) − λ + iιǫ)2 ∂λφι
2384
+ k,ǫ(·, λ)
2385
+
2386
+ 2b′′(y)
2387
+ (b(y) − λ + iιǫ)3 φι
2388
+ k,ǫ(y, λ) + ∂2
2389
+ λgι
2390
+ k,ǫ(y, λ).
2391
+ (6.34)
2392
+ Lemma 6.3. Assume that k ∈ Z\{0}, λ ∈ Λδ0\{b(y∗)} and let φι
2393
+ k,ǫ(y, λ) with ι ∈ {±}, 0 < ǫ <
2394
+ min{|λ − b(y∗)|, ǫ0} be as defined in (6.2). Recall that
2395
+ δ := δ(λ) = 8
2396
+
2397
+ |λ − b(y∗)|/b′′(y∗).
2398
+ (6.35)
2399
+ Denoting for y ∈ [0, 1], λ ∈ Λδ0\{b(y∗)},
2400
+ Λ2(y, λ) := − ψk(y, λ)ϕδ(y) b′′(y)
2401
+ (b′(y))2 lim
2402
+ ǫ→0+
2403
+ 1
2404
+ b(y) − λ + iǫ
2405
+ − ϕδ(y) b′′(y)
2406
+ (b′(y))2 lim
2407
+ ǫ→0+
2408
+
2409
+ 1
2410
+ b(y) − λ + iǫ −
2411
+ 1
2412
+ b(y) − λ − iǫ
2413
+
2414
+ φ−
2415
+ k,ǫ(y, λ),
2416
+ (6.36)
2417
+ then we have the bounds for λ ∈ Λδ0\{b(y∗)},
2418
+
2419
+ α∈{0,1}
2420
+ ���(δ ∧ |k|−1)9/4�
2421
+ ∂2
2422
+ λψk(y, λ) − Λ2(y, λ)
2423
+ ����
2424
+ L2([y∗−3δ,y∗+3δ])δ−1/2
2425
+ +
2426
+
2427
+ α∈{0,1}
2428
+ ���(δ ∧ |k|−1)5/4(|y − y∗| ∧ 1
2429
+ |k|)∂2
2430
+ λψk(y, λ)
2431
+ ���
2432
+ L∞([0,1]\[y∗−3δ,y∗+3δ])) ≲ |k|5/2��ω0k
2433
+ ��
2434
+ H3
2435
+ k(I).
2436
+ (6.37)
2437
+ Proof. Denote for k ∈ Z\{0}, λ ∈ Λδ0\{b(y∗)}, ι ∈ {±}, 0 < ǫ < min{|λ − b(y∗)|, ǫ0} and y ∈ I,
2438
+
2439
+ k,ǫ(z, λ; 4) := −
2440
+ 2b′′(z)
2441
+ (b(z) − λ − iιǫ)2 ϕδ(z)∂λφι
2442
+ k,ǫ(z, λ),
2443
+
2444
+ k,ǫ(z, λ; 5) = −
2445
+ 2b′′(z)
2446
+ (b(z) − λ − iιǫ)3 ϕδ(z)φι
2447
+ k,ǫ(z, λ)
2448
+
2449
+ k,ǫ(z, λ; 6) := −
2450
+ b′′(z)
2451
+ (b(z) − λ − iιǫ)2 (1 − ϕδ(z))∂λφι
2452
+ k,ǫ(z, λ),
2453
+
2454
+ k,ǫ(z, λ; 7) = −
2455
+ 2b′′(z)
2456
+ (b(z) − λ − iιǫ)3 (1 − ϕδ(z))φι
2457
+ k,ǫ(z, λ),
2458
+
2459
+ k,ǫ(z, λ; 8) := ∂2
2460
+ λgι
2461
+ k,ǫ(z, λ).
2462
+ (6.38)
2463
+
2464
+ 26
2465
+ ALEXANDRU D. IONESCU, SAMEER IYER, AND HAO JIA
2466
+ Define for k ∈ Z\{0}, λ ∈ Λδ0\{b(y∗)}, ι ∈ {±}, 0 < ǫ < min{|λ − b(y∗)|, ǫ0} and z ∈ I,
2467
+ ∂2
2468
+ λφι
2469
+ k,ǫ(y, λ) := φι
2470
+ k,ǫ(y, λ; 2) +
2471
+ 8
2472
+
2473
+ j=4
2474
+ � 1
2475
+ 0
2476
+ Gk(y, z; λ + iιǫ)hι
2477
+ k,ǫ(z, λ; j) dz
2478
+
2479
+ 8
2480
+
2481
+ j=4
2482
+ T ∗
2483
+ k (λ + iιǫ)
2484
+ � 1
2485
+ 0
2486
+ Gk(y, z; λ + iιǫ)hι
2487
+ k,ǫ(z, λ; j) dz
2488
+ (6.39)
2489
+ It follows from (6.34) that φι
2490
+ k,ǫ(y, λ; 2) satisfies for y ∈ I,
2491
+ φι
2492
+ k,ǫ(y, λ; 2) + T ∗
2493
+ k (λ + iιǫ)φι
2494
+ k,ǫ(y, λ; 2) =
2495
+ 8
2496
+
2497
+ j=4
2498
+
2499
+ T ∗
2500
+ k (λ + iιǫ)
2501
+ �2 � 1
2502
+ 0
2503
+ Gk(y, z; λ + iιǫ)hι
2504
+ k,ǫ(z, λ; j) dz.
2505
+ (6.40)
2506
+ It follows from Lemma 6.2 and Lemma 3.1 that for j ∈ {6, 7, 8}
2507
+ ���
2508
+
2509
+ T ∗
2510
+ k (λ + iǫ)
2511
+ �2
2512
+ � 1
2513
+ 0
2514
+ Gk(y, z; λ + iιǫ)hι
2515
+ k,ǫ(z, λ; j) dz
2516
+ ���
2517
+ XN,̺k
2518
+ ≲ (δ ∧ |k|−1)−4|k|5/2∥ω0k∥H3
2519
+ k(I). (6.41)
2520
+ Using integration by parts argument similar to (4.29)-(4.30), we have also for j ∈ {4, 5},
2521
+ ����
2522
+
2523
+ T ∗
2524
+ k (λ + iǫ)
2525
+ �2 � 1
2526
+ 0
2527
+ Gk(y, z; λ + iιǫ)hι
2528
+ k,ǫ(z, λ; j) dz
2529
+ ����
2530
+ XN,̺k
2531
+ ≲ (δ ∧ |k|−1)−4|k|5/2��ω0k
2532
+ ��
2533
+ H3
2534
+ k(I).
2535
+ (6.42)
2536
+ It follows from (6.38)-(6.42) and Lemma 4.4 that for λ ∈ Λδ0\{b(y∗)}, ι ∈ {±}, 0 < ǫ <
2537
+ min{|λ − b(y∗)|, ǫ0},
2538
+ ��φι
2539
+ k,ǫ(y, λ; 2)
2540
+ ��
2541
+ XN,̺k ≲ (δ ∧ |k|−1)−4|k|5/2��ω0k
2542
+ ��
2543
+ H1
2544
+ k(I).
2545
+ (6.43)
2546
+ Using (6.34), we get that for y ∈ I,
2547
+
2548
+ k2 − ∂2
2549
+ y +
2550
+ b′′(y)
2551
+ b(y) − λ + iǫ
2552
+ ��
2553
+ ∂2
2554
+ λφ+
2555
+ k,ǫ(y, λ) − ∂2
2556
+ λφ−
2557
+ k,ǫ(y, λ)
2558
+
2559
+ =
2560
+ 8
2561
+
2562
+ j=4
2563
+
2564
+ h+
2565
+ k,ǫ(y, λ; j) − h−
2566
+ k,ǫ(y, λ; j)
2567
+
2568
+ .
2569
+ (6.44)
2570
+ Denoting D2φk,ǫ(y, λ), h ∈ I, λ ∈ Λδ0\{b(y∗)}, as the solution to
2571
+
2572
+ k2 − ∂2
2573
+ y +
2574
+ b′′(y)
2575
+ b(y) − λ + iιǫ
2576
+
2577
+ D2φk,ǫ(y, λ) =
2578
+ 5
2579
+
2580
+ j=4
2581
+
2582
+ h+
2583
+ k,ǫ(y, λ; j) − h−
2584
+ k,ǫ(y, λ; j)
2585
+
2586
+ ,
2587
+ (6.45)
2588
+ for y ∈ I with zero Dirichlet boundary condition.
2589
+ We note the identity that for y ∈ I, λ ∈ Σδ0\{b(y∗)},
2590
+ ∂2
2591
+ λψk(y, λ) = lim
2592
+ ǫ→0+ D2φk,ǫ(y, λ).
2593
+ (6.46)
2594
+
2595
+ LINEAR INVISCID DAMPING AND VORTICITY DEPLETION FOR NON-MONOTONIC SHEAR FLOWS 27
2596
+ We can reformulate (6.45) as the integral equation for y ∈ I
2597
+ D2φk,ǫ(y, λ) + T ∗
2598
+ k (λ + iǫ)D2φk,ǫ(y, λ)
2599
+ =
2600
+ � 1
2601
+ 0
2602
+ Gk(y, z; λ + iǫ)ϕδ(z)
2603
+ 5
2604
+
2605
+ j=4
2606
+
2607
+ h+
2608
+ k,ǫ(z, λ; j) − h−
2609
+ k,ǫ(z, λ; j)
2610
+
2611
+ dz := R∗
2612
+ k,ǫ(y, λ).
2613
+ (6.47)
2614
+ We can write for λ ∈ Σδ0\{b(y∗)}, 0 < ǫ < min{|λ − b(y∗)|, ǫ0}, y ∈ I,
2615
+ D2φk,ǫ(y, λ) := D2φk,ǫ(y, λ; 2) + R∗
2616
+ k,ǫ(y, λ) − T ∗
2617
+ k (λ + iǫ)R∗
2618
+ k,ǫ(y, λ).
2619
+ (6.48)
2620
+ Then D2φk,ǫ(y, λ; 2) satisfies for y ∈ I, λ ∈ Σδ0\{b(y∗)},
2621
+ D2φk,ǫ(y, λ; 2) + T ∗
2622
+ k (λ + iǫ)D2φk,ǫ(y, λ; 2) =
2623
+
2624
+ T ∗
2625
+ k (λ + iǫ)
2626
+ �2R∗
2627
+ k,ǫ(y, λ).
2628
+ (6.49)
2629
+ The desired bounds (6.37) follow from (6.47)-(6.49), and Lemma 3.2 with X = XL,̺k, using
2630
+ also the bound
2631
+ ���
2632
+ T ∗
2633
+ k (λ + iǫ)
2634
+ �2R∗
2635
+ k,ǫ(·, λ)
2636
+ ��
2637
+ XL,̺k ≲
2638
+
2639
+ δ ∧ 1
2640
+ |k|
2641
+ �−4
2642
+ |k|5/2∥ω0k∥H3
2643
+ k(I).
2644
+ (6.50)
2645
+
2646
+ 7. Proof of Theorem 1.2
2647
+ In this section, we prove Theorem 1.2. We can assume that t ≥ 1. We first give the proof of
2648
+ (1.8)-(1.9). Using the representation formula (2.7), we have
2649
+ ψk(t, y) =
2650
+ 1
2651
+ 2πi lim
2652
+ ǫ→0+
2653
+
2654
+ Σ
2655
+ e−ikλt�
2656
+ ψ+
2657
+ k,ǫ(y, λ) − ψ−
2658
+ k,ǫ(y, λ)
2659
+
2660
+
2661
+ = −
2662
+ 1
2663
+ 2πik2t2 lim
2664
+ ǫ→0+
2665
+
2666
+ Σ
2667
+ e−ikλt�
2668
+ ∂2
2669
+ λψ+
2670
+ k,ǫ(y, λ) − ∂2
2671
+ λψ−
2672
+ k,ǫ(y, λ)
2673
+
2674
+ dλ.
2675
+ (7.1)
2676
+ Fix Φ∗ ∈ C∞
2677
+ 0 (Σδ0) with Φ∗ ≡ 1 on Σ2δ0/3. We can decompose for t ≥ 1, y ∈ [0, 1],
2678
+ ψk(t, y) := ψ1
2679
+ k(t, y) + ψ2
2680
+ k(t, y),
2681
+ (7.2)
2682
+ where
2683
+ ψ1
2684
+ k(t, y) := −
2685
+ 1
2686
+ 2πik2t2 lim
2687
+ ǫ→0+
2688
+
2689
+ Σ
2690
+ e−ikλt(1 − Φ∗(λ))
2691
+
2692
+ ∂2
2693
+ λψι
2694
+ k,ǫ(y, λ) − ∂2
2695
+ λψ−
2696
+ k,ǫ(y, λ)
2697
+
2698
+ dλ,
2699
+ ψ2
2700
+ k(t, y) := −
2701
+ 1
2702
+ 2πik2t2 lim
2703
+ ǫ→0+
2704
+
2705
+ Σ
2706
+ e−ikλtΦ∗(λ)
2707
+
2708
+ ∂2
2709
+ λψι
2710
+ k,ǫ(y, λ) − ∂2
2711
+ λψ−
2712
+ k,ǫ(y, λ)
2713
+
2714
+ dλ.
2715
+ (7.3)
2716
+ For (1.8), it suffices to prove that for σ ∈ {1, 2}, k ∈ Z\{0} and t ≥ 1,
2717
+ ��ψσ
2718
+ k(t, ·)
2719
+ ��
2720
+ L2([0,1]) ≲ |k|3
2721
+ t2 ∥ω0k∥H3
2722
+ k([0,1]).
2723
+ (7.4)
2724
+ The case σ = 1 in (7.4) corresponding to the non-degenerate case is analogous to the case of
2725
+ monotonic shear flows, see [14], and follow from Lemma 5.1-Lemma 5.3. We focus on the main
2726
+ new case σ = 2 in (7.4). Denote for k ∈ Z\{0},
2727
+ Mk := |k|5/2∥ω0k∥H3
2728
+ k([0,1]).
2729
+ (7.5)
2730
+ Our main tools are Lemmas 6.1, Lemma 6.2 and Lemma 6.3, which imply the following bounds
2731
+ for y ∈ [0, 1], λ ∈ Σδ0.
2732
+
2733
+ 28
2734
+ ALEXANDRU D. IONESCU, SAMEER IYER, AND HAO JIA
2735
+ • If |λ − b(y∗)|1/2 < |y − y∗|/20, then
2736
+ |ψk(y, λ)| ≲
2737
+
2738
+ min
2739
+
2740
+ |λ − b(y∗)|1/2, |k|��1��11/4(|y − y∗|−1 + |k|)Mk,
2741
+ |∂2
2742
+ λψk(y, λ) ≲
2743
+
2744
+ min
2745
+
2746
+ |λ − b(y∗)|1/2, |k|−1��−5/4(|y − y∗|−1 + |k|)Mk;
2747
+ (7.6)
2748
+ • If |y − y∗|/20 < |λ − b(y∗)|1/2 < 20|y − y∗|, then
2749
+ |ψk(y, λ)| ≲
2750
+
2751
+ min
2752
+
2753
+ |λ − b(y∗)|1/2, |k|−1��5/4|λ − b(y∗)|1/4Mk,
2754
+ |ψk(y, λ) − ψk(y, b(y))| ≲ |λ − b(y)|1/2|λ − b(y∗)|3/8Mk,
2755
+ ��∂2
2756
+ λψk(·, λ) − Λ2(·, λ)
2757
+ ��
2758
+ L2(|y−y∗|≈|λ−b(y∗)|1/2) ≲ (|λ − b(y∗)|−1/2 + |k|)9/4|λ − b(y∗)|1/4Mk;
2759
+ (7.7)
2760
+ • If |λ − b(y∗)|1/2 > 20|y − y∗|, then
2761
+ |ψk(y, λ)| ≲ |λ − b(y∗)|1/4�
2762
+ min
2763
+
2764
+ |λ − b(y∗)|1/2, |k|−1��5/4Mk,
2765
+ ��∂2
2766
+ λψk(·, λ) − Λ2(·, λ)
2767
+ ��
2768
+ L2(|y−y∗|<|λ−b(y∗)|1/2/20) ≲ (|λ − b(y∗)|−1/2 + |k|)9/4|λ − b(y∗)|1/4Mk.
2769
+ (7.8)
2770
+ It follows from (7.6)-(7.8) that for y ∈ [0, 1], t ≥ 1,
2771
+ ���
2772
+
2773
+ R
2774
+ e−ikλtΦ∗(λ)Λ2(y, λ)dλ
2775
+ ��� ≲ |y − y∗|−1/4 max
2776
+
2777
+ 1, |k|1/2|y − y∗|1/2�
2778
+ Mk,
2779
+ (7.9)
2780
+ and, by considering the cases |λ − b(y∗)| ≪ |y − y∗|2, |λ − b(y∗)| ≈ |y − y∗|2 and |λ − b(y∗)| ≫
2781
+ |y − y∗|2, also that for y ∈ [0, 1], t ≥ 1,
2782
+ ���
2783
+
2784
+ R
2785
+ e−ikλtΦ∗(λ)
2786
+
2787
+ ∂2
2788
+ λψk(y, λ) − Λ2(y, λ)
2789
+
2790
+
2791
+ ���
2792
+ L2([0,1]) ≲ |k|9/4Mk.
2793
+ (7.10)
2794
+ The desired bound (7.4) for σ = 2 follows from (7.9)-(7.10).
2795
+ The proof of (1.9) is similar to the proof of (1.8), using Lemma 6.1 and Lemma 6.2.
2796
+ We now turn to the proof of the depletion bounds (1.11). Assume that k ∈ Z\{0}. Applying
2797
+ −k2 + ∂2
2798
+ y to ψk(t, y) in (2.7), and using (2.8), we get that for y ∈ [0, 1], t ≥ 1,
2799
+ ωk(t, y) = ω∗
2800
+ k(t, y) + ω∗∗
2801
+ k (t, y),
2802
+ (7.11)
2803
+ where
2804
+ ω∗
2805
+ k(t, y)
2806
+ :=
2807
+ 1
2808
+ 2πi lim
2809
+ ǫ→0+
2810
+
2811
+ Σ
2812
+ e−ikλt(1 − Φ∗(y))
2813
+ �b′′(y)ψ+
2814
+ k,ǫ(y, λ) − ω0k(y)
2815
+ b(y) − λ + iǫ
2816
+
2817
+ b′′(y)ψ−
2818
+ k,ǫ(y, λ) − ω0k(y)
2819
+ b(y) − λ − iǫ
2820
+
2821
+ dλ,
2822
+ ω∗∗
2823
+ k (t, y) :=
2824
+ 1
2825
+ 2πi lim
2826
+ ǫ→0+
2827
+
2828
+ Σ
2829
+ e−ikλtΦ∗(y)
2830
+ �b′′(y)ψ+
2831
+ k,ǫ(y, λ) − ω0k(y)
2832
+ b(y) − λ + iǫ
2833
+
2834
+ b′′(y)ψ−
2835
+ k,ǫ(y, λ) − ω0k(y)
2836
+ b(y) − λ − iǫ
2837
+
2838
+ dλ.
2839
+ (7.12)
2840
+ We have the bound for t ≥ 1,
2841
+ ��ω∗
2842
+ k(t, y)
2843
+ ��
2844
+ L∞([0,1]) ≲ |k|2Mk.
2845
+ (7.13)
2846
+
2847
+ LINEAR INVISCID DAMPING AND VORTICITY DEPLETION FOR NON-MONOTONIC SHEAR FLOWS 29
2848
+ For |y − y∗| < δ0/10, t ≥ 1, since (b(y) − λ + iιǫ) with ι ∈ {±} is not singular in this case, we
2849
+ have in addition by integration by parts that
2850
+ |ω∗
2851
+ k(t, y)| ≲ |k|2 1
2852
+ t Mk.
2853
+ (7.14)
2854
+ We now turn to ω∗∗
2855
+ k (t, y). Using (6.1), we can write for y ∈ [0, 1], t ≥ 1,
2856
+ 2πi ω∗∗
2857
+ k (t, y)
2858
+ = lim
2859
+ ǫ→0+
2860
+
2861
+ R
2862
+ e−ikλtΦ∗(λ)
2863
+ �φ+
2864
+ k,ǫ(y, λ) − (1 − Ψ(y))ω0k(y)
2865
+ b(y) − λ + iǫ
2866
+
2867
+ φ−
2868
+ k,ǫ(y, λ) − (1 − Ψ(y))ω0k(y)
2869
+ b(y) − λ − iǫ
2870
+
2871
+
2872
+ = lim
2873
+ ǫ→0+
2874
+
2875
+ R
2876
+ e−ikλtΦ∗(λ)
2877
+
2878
+ φ+
2879
+ k,ǫ(y, λ)
2880
+ b(y) − λ + iǫ −
2881
+ φ−
2882
+ k,ǫ(y, λ)
2883
+ b(y) − λ − iǫ
2884
+
2885
+ dλ + Wk(t, y),
2886
+ (7.15)
2887
+ where Wk(t, y) satisfies the bound for t ≥ 1,
2888
+ ∥Wk(t, ·)∥L∞([0,1]) ≲ t−1Mk,
2889
+ (7.16)
2890
+ which follows from simple integration by parts argument. We decompose for y ∈ [0, 1]\{y∗},
2891
+ ω∗∗
2892
+ k (t, y) − Wk(t, y)
2893
+ 2πi
2894
+ =
2895
+ 1
2896
+ 2πi lim
2897
+ ǫ→0+
2898
+
2899
+ R
2900
+ e−ikλtΦ∗(λ)
2901
+
2902
+ ψk(y, λ)
2903
+ b(y) − λ + iǫ
2904
+
2905
+
2906
+ +
2907
+ 1
2908
+ 2πi lim
2909
+ ǫ→0+
2910
+
2911
+ R
2912
+ e−ikλtΦ∗(λ)φ−
2913
+ k,ǫ(y, λ)
2914
+
2915
+ 1
2916
+ b(y) − λ + iǫ −
2917
+ 1
2918
+ b(y) − λ − iǫ
2919
+
2920
+ dλ.
2921
+ (7.17)
2922
+ It follows from (7.6)-(7.8) that
2923
+ ����
2924
+ 1
2925
+ 2πi lim
2926
+ ǫ→0+
2927
+
2928
+ R
2929
+ e−ikλtΦ∗(λ)φ−
2930
+ k,ǫ(y, λ)
2931
+
2932
+ 1
2933
+ b(y) − λ + iǫ −
2934
+ 1
2935
+ b(y) − λ − iǫ
2936
+
2937
+
2938
+ ���� ≲ |y − y∗|7/4Mk.
2939
+ (7.18)
2940
+ For γ ∈ (1, ∞) to be fixed below, by considering the three ranges (I) |λ − b(y∗)| ≲ |y − y∗|2,
2941
+ (II) |λ − b(y∗)| ≥ γ|y − y∗|2, and (III) |y − y∗|2 ≪ |λ − b(y∗)| < γ|y − y∗|2, and using Lemma
2942
+ 6.2 and Lemma 6.39, we get that
2943
+ ����
2944
+ 1
2945
+ 2πi lim
2946
+ ǫ→0+
2947
+
2948
+ R
2949
+ e−ikλtΦ∗(y)
2950
+
2951
+ ψk(y, λ)
2952
+ b(y) − λ + iǫ
2953
+
2954
+
2955
+ ����
2956
+
2957
+
2958
+ |y − y∗|7/4�
2959
+ 1 + |k|1/2|y − y∗|1/2�
2960
+ +
2961
+ 1
2962
+ |k|t(|k|1/2 + γ−1/8|y − y∗|−1/4) + γ7/8|y − y∗|7/4�
2963
+ Mk.
2964
+ (7.19)
2965
+ In the above, we used integration by part to get decay in t in range (II). Optimizing in γ, we
2966
+ get that for t ≥ 1,
2967
+ (i) if t|y − y∗|2 ≲ 1,
2968
+ ����
2969
+ 1
2970
+ 2πi lim
2971
+ ǫ→0+
2972
+
2973
+ R
2974
+ e−ikλtΦ∗(y)
2975
+
2976
+ ψk(y, λ)
2977
+ b(y) − λ + iǫ
2978
+
2979
+
2980
+ ����
2981
+
2982
+
2983
+ t−1 + |k|1/2|y − y∗|7/4 + t−7/8�
2984
+ (7.20)
2985
+
2986
+ 30
2987
+ ALEXANDRU D. IONESCU, SAMEER IYER, AND HAO JIA
2988
+ (ii) if t|y − y∗|2 ≫ 1,
2989
+ ����
2990
+ 1
2991
+ 2πi lim
2992
+ ǫ→0+
2993
+
2994
+ R
2995
+ e−ikλtΦ∗(y)
2996
+
2997
+ ψk(y, λ)
2998
+ b(y) − λ + iǫ
2999
+
3000
+
3001
+ ����
3002
+
3003
+
3004
+ |y − y∗|7/4�
3005
+ 1 + |k|1/2|y − y∗|1/2�
3006
+ +
3007
+ 1
3008
+ |k|1/2t7/8 + |y − y∗|7/4�
3009
+ Mk.
3010
+ (7.21)
3011
+ The desired bounds (7.16), (7.18), (7.20)-(7.21). Theorem 1.2 is now proved.
3012
+ References
3013
+ [1] V. Arnold and B. Khesin, Topological Methods in Hydrodynamics, Springer-Verlag, New York, 1998.
3014
+ [2] J. Bedrossian and N. Masmoudi Inviscid damping and the asymptotic stability of planar shear flows in the
3015
+ 2D Euler equations, Publ. Math. Inst. Hautes Etudes Sci. 122 (2015), 195-300.
3016
+ [3] J. Bedrossian, M. Coti Zelati, and V. Vicol, Vortex axisymmetrization, inviscid damping, and vorticity
3017
+ depletion in the linearized 2D Euler equations, Annals of PDE, Vol. 5, no.4 (2019)
3018
+ [4] J. Bedrossian, Nonlinear echoes and Landau damping with insufficient regularity, arXiv:1605.06841 2016
3019
+ [5] F. Bouchet and H. Morita, Large time behavior and asymptotic stability of the 2D Euler and linearized Euler
3020
+ equations, Physica D, 239(2010), 948-966
3021
+ [6] K. Case, Stability of inviscid plane Couette flow, Phys. Fluids, 3(1960), 143-148
3022
+ [7] Y. Deng and N. Masmoudi, Long time instability of the Couette flow in low Gevrey spaces, preprint (2018),
3023
+ arXiv 1803.01246.
3024
+ [8] L. Faddeev, On the theory of the stability of plane-parallel flows of an ideal fluid, Zapiski Nauchnykh Semi-
3025
+ narov Leningradskogo Otdeleniya Matematicheskogo Instituta im. V. A. Steklova Akademii Nauk SSSR, Vol.
3026
+ 21, pp. 164-172, 1971
3027
+ [9] E., Grenier, T., Nguyen, F., Rousset, A., Soffer, Linear inviscid damping and enhanced viscous dissipation
3028
+ of shear flows by using the conjugate operator method, arXiv:1804.08291
3029
+ [10] A. Ionescu and H. Jia, Inviscid damping near the Couette flow in a channel, Comm. Math. Phys. 374 (2020),
3030
+ no. 3, 2015 - 2096.
3031
+ [11] A. Ionescu and H. Jia, Axi-symmetrization near point vortex solutions for the 2D Euler equation, CPAM,
3032
+ Vol. 75 (2022), Issue 4, Pages 818-891
3033
+ [12] A. Ionescu and H. Jia, Nonlinear inviscid damping near monotonic shear flows, Acta Math. (to appear),
3034
+ see also arXiv:2001.03087
3035
+ [13] A. Ionescu and H. Jia, On the nonlinear stability of shear flows and vortices, Proceeding of the ICM 2022,
3036
+ to appear
3037
+ [14] H. Jia, Linear inviscid damping near monotone shear flows, SIAM Journal on Mathematical Analysis 52
3038
+ (1), 623-652 , 2020
3039
+ [15] H. Jia, Uniform linear inviscid damping and enhanced dissipation near monotonic shear flows in high
3040
+ Reynolds number regime (I): the whole space case, preprint 2022, see arXiv:2207.10987
3041
+ [16] H. Jia, Linear inviscid damping in Gevrey spaces, Arch. Ration. Mech. Anal. 235 (2020), no. 2, 1327 - 1355.
3042
+ [17] A. Ionescu and H. Jia, Linear vortex symmetrization: the spectral density function, Arch. Ration. Mech.
3043
+ Anal. 246 (2022), no. 1, 61-137
3044
+ [18] L. Kelvin, Stability of fluid motion-rectilinear motion of viscous fluid between two plates, Phi. Mag. 24
3045
+ (1887), 155
3046
+ [19] G. Kirchhoff, Vorlesungen ber mathematische Physik, Teubner, Leipzig 1876
3047
+ [20] Z. Lin and C. Zeng, Inviscid dynamical structures near Couette flow, Arch. Ration. Mech. Anal. 200 (2011),
3048
+ 1075-1097.
3049
+ [21] Z. Lin, Instability of some ideal plane flows, SIAM J. MATH. ANAL., Vol. 35, No. 2, pp 318-356
3050
+ [22] X. Liu and C. Zeng, Capillary gravity water waves linearized at monotone shear flows: eigenvalues and
3051
+ inviscid damping, preprint 2021, arXiv:2110.12604
3052
+ [23] N. Masmoudi and W. Zhao, Nonlinear inviscid damping for a class of monotone shear flows in finite channel,
3053
+ Preprint (2020), arXiv:2001.08564.
3054
+ [24] C. Mouhot and C. Villani, On Landau damping, Acta Math. 207 (2011), 29-201.
3055
+
3056
+ LINEAR INVISCID DAMPING AND VORTICITY DEPLETION FOR NON-MONOTONIC SHEAR FLOWS 31
3057
+ [25] W., Orr, The stability or instability of steady motions of a perfect liquid and of a viscous liquid, Part I: a
3058
+ perfect liquid, Proc. R. Ir. Acad., A Math. Phys. Sci., 27 (1907), 9-68
3059
+ [26] L. Rayleigh, On the stability or instability of certain fluid motions, Proc. Lond. Math. Soc., S1-11 (1880),
3060
+ 57
3061
+ [27] S. Rosencrans, D. Sattinger, On the spectrum of an operator occurring in the theory of Hydrodynamics
3062
+ stability, J. Math. Phys., 45(1966), 289-300.
3063
+ [28] S. Stepin, Nonself-adjoint Friedrichs Model in Hydrodynamic Stability, Functional analysis and its applica-
3064
+ tions, Vol. 29, No. 2, 1995, Translated from Funktsionaltnyi Analiz i Ego Prilozheniya, Vol. 29, No. 2, pp.
3065
+ 22-35, April- June, 1995. Original article submitted August 3, 1994.
3066
+ [29] T. Yamanaka, A new higher order chain rule and Gevrey class, Ann. Global Anal. Geom. 7 (1989), 179-203.
3067
+ [30] D. Wei, Z. Zhang, and W. Zhao, Linear Inviscid Damping for a Class of Monotone Shear Flow in Sobolev
3068
+ Spaces, Comm. Pure Appl. Math. 71(2018), 617-687
3069
+ [31] D. Wei, Z. Zhang and W. Zhao, Linear inviscid damping and vorticity depletion for shear flows, Annals of
3070
+ PDE, vol. 5, no. 3 (2019), see also arXiv:1704.00428.
3071
+ [32] D. Wei, Z. Zhang and W. Zhao, Linear inviscid damping and enhanced dissipation for the Kolmogorov flow,
3072
+ Advances in Mathematics 362 (2020), 106963
3073
+ [33] D. Wei, Diffusion and mixing in fluid flow via the resolvent estimate, Science China Mathematics, volume
3074
+ 64, pages 507-518 (2021)
3075
+ [34] C. Zillinger, Linear inviscid damping for monotone shear flows, Trans. Amer. Math. Soc. 369 (2017),
3076
+ 8799-8855.
3077
+ [35] C. Zillinger, Linear inviscid damping for monotone shear flows in a finite periodic channel, boundary effects,
3078
+ blow-up and critical Sobolev regularity, Arch. Ration. Mech. Anal. 221 (2016), 1449-1509.
3079
+ [36] M. Coti Zelati and C. Zillinger, On degenerate circular and shear flows: the point vortex and power law
3080
+ circular flows, Communications in Partial Differential Equations, 2019, 44:2, 110-155
3081
+ Princeton University
3082
+ Email address: [email protected]
3083
+ University of California, Davis
3084
+ Email address: [email protected]
3085
+ University of Minnesota
3086
+ Email address: [email protected]
3087
+
6tAyT4oBgHgl3EQfcvc9/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
8NAzT4oBgHgl3EQfgfyv/content/2301.01470v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8dde9e09bd0f7d8ff4ef8feeb7fb766e7048dda4c7789a44d12bf25054de2fda
3
+ size 5602782
8NAzT4oBgHgl3EQfgfyv/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7ed2ccc80b79fec4eac30a6bebeb7e64e88cae01e51fbf931cdfe03f93d5d15
3
+ size 1769517
9NAyT4oBgHgl3EQf3Plu/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df4eec81f3ec095ac8e7ff53b9ca150c2ede8f512682ed39eaecd6d4c2da276c
3
+ size 5439533
9NAyT4oBgHgl3EQf3Plu/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e43679f7390b23f887b16755487fc8d2f090f7e4b0c136182cd6aab7255463e
3
+ size 227851
A9AzT4oBgHgl3EQfTPz-/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f5cc4b4aab472e6e31e14be8386019c0ac0aa0458cb91e564543f70d2a23074
3
+ size 2752557
A9AzT4oBgHgl3EQfTPz-/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73489eeb0d65636865da7a9bee0bbc67856866f747be6e294611f0bff2612e1e
3
+ size 103778
A9AzT4oBgHgl3EQfhv18/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6d87ac3a0af276d4b252cfa6c3c678960625978da738ef1d103c6eb950081c4
3
+ size 4784173
CtE0T4oBgHgl3EQfgQFr/content/2301.02415v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ededbae1ba036e05993e7fa2a90a90d7c0a93016f1887b4b77ed0a3c4b684c3e
3
+ size 1009631
CtE0T4oBgHgl3EQfgQFr/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f67a6f5b1137022563619af99feff42e34e859b9eb1451a5f9a2c3182c99924
3
+ size 170137
E9FJT4oBgHgl3EQfCizA/content/tmp_files/2301.11430v1.pdf.txt ADDED
@@ -0,0 +1,1472 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.11430v1 [math.AP] 26 Jan 2023
2
+ Vortex sheet solutions for the Ginzburg-Landau system
3
+ in cylinders: symmetry and global minimality
4
+ Radu Ignat∗
5
+ Mircea Rus†
6
+ January 30, 2023
7
+ Abstract
8
+ We consider the Ginzburg-Landau energy Eε for RM-valued maps defined in a
9
+ cylinder shape domain BN ×(0, 1)n satisfying a degree-one vortex boundary condition
10
+ on ∂BN × (0, 1)n in dimensions M ≥ N ≥ 2 and n ≥ 1.
11
+ The aim is to study
12
+ the radial symmetry of global minimizers of this variational problem. We prove the
13
+ following: if N ≥ 7, then for every ε > 0, there exists a unique global minimizer
14
+ which is given by the non-escaping radially symmetric vortex sheet solution uε(x, z) =
15
+ (fε(|x|) x
16
+ |x|, 0RM−N), ∀x ∈ BN that is invariant in z ∈ (0, 1)n. If 2 ≤ N ≤ 6 and M ≥
17
+ N + 1, the following dichotomy occurs between escaping and non-escaping solutions:
18
+ there exists εN > 0 such that
19
+ • if ε ∈ (0, εN), then every global minimizer is an escaping radially symmetric
20
+ vortex sheet solution of the form R˜uε where ˜uε(x, z) = ( ˜fε(|x|) x
21
+ |x|, 0RM−N−1, gε(|x|))
22
+ is invariant in z-direction with gε > 0 in (0, 1) and R ∈ O(M) is an orthogonal
23
+ transformation keeping invariant the space RN × {0RM−N};
24
+ • if ε ≥ εN, then the non-escaping radially symmetric vortex sheet solution
25
+ uε(x, z) = (fε(|x|) x
26
+ |x|, 0RM−N), ∀x ∈ BN, z ∈ (0, 1)n is the unique global minimizer;
27
+ moreover, there are no bounded escaping solutions in this case.
28
+ We also discuss the problem of vortex sheet SM−1-valued harmonic maps.
29
+ Keywords:
30
+ vortex, uniqueness, symmetry, minimizers, Ginzburg-Landau equation,
31
+ harmonic maps.
32
+ MSC: 35A02, 35B06, 35J50.
33
+ Contents
34
+ 1
35
+ Introduction and main results
36
+ 2
37
+ 1.1
38
+ Minimality of the RN-valued vortex sheet solution
39
+ . . . . . . . . . . . . . .
40
+ 2
41
+ 1.2
42
+ Escaping RM-valued vortex sheet solutions when M ≥ N + 1 . . . . . . . .
43
+ 5
44
+ ∗Institut de Math´ematiques de Toulouse & Institut Universitaire de France, UMR 5219, Universit´e de
45
+ Toulouse, CNRS, UPS IMT, F-31062 Toulouse Cedex 9, France. Email: [email protected]
46
+ †Department of Mathematics, Technical University of Cluj-Napoca, 400027 Cluj-Napoca, Romania.
47
48
+ 1
49
+
50
+ 2
51
+ The non-escaping vortex sheet solution. Proof of Theorems 1 and 3
52
+ 7
53
+ 3
54
+ Properties of escaping vortex sheet solutions when M ≥ N + 1
55
+ 11
56
+ 3.1
57
+ Minimality of escaping vortex sheet solutions . . . . . . . . . . . . . . . . .
58
+ 11
59
+ 3.2
60
+ Escaping radial profile . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
61
+ 14
62
+ 3.3
63
+ Proof of Theorem 4 . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
64
+ 16
65
+ A Appendix. Vortex sheet SM−1-valued harmonic maps in cylinders
66
+ 17
67
+ 1
68
+ Introduction and main results
69
+ In this paper, we consider the following Ginzburg-Landau type energy functional
70
+ Eε(u) =
71
+
72
+
73
+ �1
74
+ 2|∇u|2 + 1
75
+ 2ε2 W(1 − |u|2)
76
+
77
+ dX,
78
+ (1)
79
+ where ε > 0, X = (x, z) ∈ Ω = BN × (0, 1)n is a cylinder shape domain with BN the unit
80
+ ball in RN, n ≥ 1, N ≥ 2 and the potential W ∈ C2((−∞, 1]; R) satisfies
81
+ W(0) = 0, W(t) > 0 for all t ∈ (−∞, 1] \ {0} and W is convex.
82
+ (2)
83
+ (The prototype potential is W(t) = t2
84
+ 2 for t ≤ 1.) We investigate the global minimizers of
85
+ the energy Eε in the set of RN-valued maps:
86
+ AN := {u ∈ H1(Ω; RN) : u(x, z) = x for every x ∈ ∂BN = SN−1, z ∈ (0, 1)n}.
87
+ The boundary assumption u(x, z) = x for every x ∈ SN−1 and every z ∈ (0, 1)n is referred
88
+ in the literature as the degree-one vortex boundary condition.
89
+ The direct method in the calculus of variations yields the existence of a global minimizer
90
+ uε of Eε over AN for all range of ε > 0. Moreover, any minimizer uε satisfies |uε| ≤ 1 in
91
+ Ω, uε belongs to C1(Ω; RN) and solves the system of PDEs (in the sense of distributions)
92
+ with mixed Dirichlet-Neumann boundary conditions:
93
+
94
+
95
+
96
+ −∆uε = 1
97
+ ε2uε W ′(1 − |uε|2)
98
+ in Ω,
99
+ ∂uε
100
+ ∂z = 0
101
+ on BN × ∂(0, 1)n,
102
+ u(x, z) = x
103
+ on ∂BN × (0, 1)n.
104
+ (3)
105
+ 1.1
106
+ Minimality of the RN-valued vortex sheet solution
107
+ The first goal of this paper is to prove the uniqueness and radial symmetry of the global
108
+ minimizer of Eε in AN for all ε > 0 in dimensions N ≥ 7 and n ≥ 1. In fact, in these
109
+ dimensions, we show that the global minimizer of Eε in AN is unique and given by the
110
+ following radially symmetric critical point of Eε that is invariant in z: 1
111
+ uε(x, z) = fε(|x|) x
112
+ |x|
113
+ for all x ∈ BN and z ∈ (0, 1)n,
114
+ (4)
115
+ 1If n = 0 and N ≥ 2, then SO(N) induces a group action on AN given by u(x) �→ R−1u(Rx) for every
116
+ x ∈ BN, R ∈ SO(N) and u ∈ AN under which the energy Eε and the vortex boundary condition are
117
+ invariant. Then every bounded critical point of Eε in AN that is invariant under this SO(N) group action
118
+ has the form (4), see e.g. [8, Lemma A.4].
119
+ 2
120
+
121
+ where the radial profile fε : [0, 1] → R in r = |x| is the unique solution to the ODE:
122
+ � −f ′′
123
+ ε − N−1
124
+ r f ′
125
+ ε + N−1
126
+ r2 fε = 1
127
+ ε2fε W ′(1 − f 2
128
+ ε )
129
+ for r ∈ (0, 1),
130
+ fε(0) = 0, fε(1) = 1.
131
+ (5)
132
+ We recall that the unique radial profile fε satisfies fε > 0 and f ′
133
+ ε > 0 in (0, 1) (see
134
+ e.g. [7, 9, 8]). Note that the zero set of uε is given by the n-dimensional vortex sheet
135
+ {0RN } × (0, 1)n in Ω (in particular, if n = 0, it is a vortex point, while for n = 1, it is a
136
+ vortex filament); therefore, uε in (4) is called (radially symmetric) vortex sheet solution to
137
+ the Ginzburg-Landau system (3).
138
+ Theorem 1. Assume that W satisfies (2) and n ≥ 1. If N ≥ 7, then uε given in (4) is
139
+ the unique global minimizer of Eε in AN for every ε > 0.
140
+ The proof is reminiscent of the works of Ignat-Nguyen-Slastikov-Zarnescu [12, 11]
141
+ studying uniqueness and symmetry of minimizers of the Ginzburg-Landau functionals for
142
+ RM-valued maps defined on smooth N-dimensional domains, where M is not necessarily
143
+ equal to N. The idea is to analyze Eε(u) for an arbitrary map u and to exploit the convex-
144
+ ity of W to lower estimate the excess energy w.r.t. Eε(uε) by a suitable quadratic energy
145
+ functional depending on u − uε. This quadratic functional comes from the linearized PDE
146
+ at uε and can be handled by a factorization argument. The positivity of the excess energy
147
+ then follows by a Hardy-type inequality holding true only in high dimensions N ≥ 7. This
148
+ is similar to the result of J¨ager and Kaul [14] on the minimality of the equator map for
149
+ the harmonic map problem in dimension N ≥ 7 that is proved using a certain inequality
150
+ involving the sharp constant in the Hardy inequality.
151
+ We expect that our result remains valid in dimensions 2 ≤ N ≤ 6:
152
+ Open problem 2. Assume that W satisfies (2), n ≥ 1 and 2 ≤ N ≤ 6. Is it true that
153
+ for every ε > 0, uε given in (4) is the unique global minimizer of Eε in AN?
154
+ It is well known that the uniqueness of uε holds true for large enough ε > 0 in any
155
+ dimension N ≥ 2. Indeed, denoting by λ1 the first eigenvalue of −∆x in BN with zero
156
+ Dirichlet boundary condition, then for any ε >
157
+
158
+ W ′(1)/λ1, Eε is strictly convex in AN
159
+ (see e.g., [1, Theorem VIII.7], [12, Remark 3.3]) and thus has a unique critical point in
160
+ AN that is the global minimizer of our problem. We improve this result as follows: for
161
+ the radial profile fε in (5), we denote by ℓ(ε) the first eigenvalue of the operator
162
+ Lε = −∆x − 1
163
+ ε2 W ′(1 − f 2
164
+ ε )
165
+ (6)
166
+ acting on maps defined in BN with zero Dirichlet boundary condition. It is proved in [8,
167
+ Lemma 2.3] that if 2 ≤ N ≤ 6 and W ∈ C2((−∞, 1]) satisfies (2), then the first eigenvalue
168
+ ℓ(ε) is a continuous function in ε and there exists εN ∈ (0, ∞) such that
169
+ ℓ(ε) < 0 in (0, εN),
170
+ ℓ(εN) = 0
171
+ and
172
+ ℓ(ε) > 0 in (εN, ∞).
173
+ (7)
174
+ 3
175
+
176
+ Note that2 0 = ℓ(εN) > λ1 −
177
+ 1
178
+ ε2
179
+ N W ′(1) yielding
180
+ εN <
181
+
182
+ W ′(1)/λ1.
183
+ Theorem 3. Assume that W satisfies (2), n ≥ 1 and 2 ≤ N ≤ 6. If ε ≥ εN, then uε
184
+ given in (4) is a global minimizer of Eε in AN. Moreover, if either ε > εN, or (ε = εN
185
+ and W is in addition strictly convex), then uε is the unique global minimizer of Eε in AN.
186
+ The case ε < εN is still not solved as stated in Open Problem 2. Let us summarize
187
+ some known results:
188
+ I. The case of n = 0 and Ω = BN (we also discuss here the problem for Ω = RN). In
189
+ this case, the above question was raised in dimension N = 2 for the disk Ω = B2 in
190
+ the seminal book of Bethuel, Brezis and H´elein [1, Problem 10, page 139], and in general
191
+ dimensions N ≥ 2 and also for the blow-up limiting problem around the vortex point
192
+ (when the domain Ω is the whole space RN and by rescaling, ε can be assumed equal to 1)
193
+ in an article of Brezis [3, Section 2]. For sufficiently small ε > 0 and for the disk domain
194
+ Ω = B2, Pacard and Rivi`ere [20, Theorem 10.2] showed that Eε has a unique critical point
195
+ in A2 and so, it is given by the radially symmetric solution uε in (4) (for n = 0). For
196
+ N ≥ 7, Ω = BN and any ε > 0, it is proved in [11] that Eε has a unique minimizer in AN
197
+ which is given by the radially symmetric solution uε in (4) (for n = 0). For 2 ≤ N ≤ 6
198
+ and Ω = BN, Ignat-Nguyen [8] proved that for any ε > 0, uε is a local minimizer of Eε
199
+ in A (which is an extension of the result of Mironescu [18] in dimension N = 2). Also,
200
+ Mironescu [19] showed in dimension N = 2 that, when B2 is replaced by R2 and ε = 1, a
201
+ local minimizer of Eε satisfying a degree-one boundary condition at infinity is unique (up
202
+ to translation and suitable rotation). This was extended in dimension N = 3 by Millot and
203
+ Pisante [17] and in dimensions N ≥ 4 by Pisante [21] in the case of the blow-up limiting
204
+ problem on RN and ε = 1. All these results (holding for n = 0) are related to the study of
205
+ the limit problem obtained by sending ε → 0 when the Ginzburg-Landau problem on the
206
+ unit ball ‘converges’ to the harmonic map problem from BN into the unit sphere SN−1.
207
+ For that harmonic map problem, the vortex boundary condition yields uniqueness of the
208
+ minimizing harmonic SN−1-valued map x �→
209
+ x
210
+ |x| if N ≥ 3; this is proved by Brezis, Coron
211
+ and Lieb [4] in dimension N = 3 and by Lin [15] in any dimension N ≥ 3; we also mention
212
+ J¨ager and Kaul [14] in dimension N ≥ 7 for the equator map x ∈ BN �→ ( x
213
+ |x|, 0) ∈ SN.
214
+ II. The case of n ≥ 1 and Ω = BN × (0, 1)n. As we explain in Remark 6 below, for some
215
+ ε > 0, if the minimality of the radially symmetric solution uε in (4) holds in the case n = 0
216
+ (so, for Ω = BN), then this implies the minimality of uε in Ω = BN ×(0, 1)n also for every
217
+ dimension n ≥ 1. In particular, the result of Pacard-Rivi`ere [20, Theorem 10.2] for n = 0
218
+ and N = 2 yields the minimality of uε in (4) defined in B2×(0, 1)n for every n ≥ 1 if ε > 0
219
+ is sufficiently small. Also, the result of Ignat-Nguyen-Slastikov-Zarnescu [11, Theorem 1]
220
+ 2Indeed, if v ∈ H1
221
+ 0(BN) is a first eigenfunction of LεN in BN such that ∥v∥L2(BN ) = 1 then
222
+ λ1 ≤
223
+
224
+ BN |∇xv|2 dx = 1
225
+ ε2
226
+ N
227
+
228
+ BN W ′(1 − f 2
229
+ εN )v2 dx < W ′(1)
230
+ ε2
231
+ N
232
+ because ℓ(εN) = 0, 0 < fεN < 1 in (0, 1) and (2) implies W ′(0) = 0 and W ′(t) > 0 for t ∈ (0, 1].
233
+ 4
234
+
235
+ for n = 0, N ≥ 7 and any ε > 0 generalizes to dimension n ≥ 1 for Ω = BN × (0, 1)n (see
236
+ the proof of Theorem 1). We also mention the work of Sandier-Shafrir [24] where they
237
+ treat the case of topologically trivial R2-valued solutions in the domain Ω = R3 (see also
238
+ [5, 22] for vortex filament solutions).
239
+ 1.2
240
+ Escaping RM-valued vortex sheet solutions when M ≥ N + 1
241
+ In dimension 2 ≤ N ≤ 6 and for ε < εN given in (7), a different type of radially symmetric
242
+ vortex sheet solution appears provided that the target space has dimension M ≥ N + 1.
243
+ More precisely, we consider the energy functional Eε in (1) over the set of RM-valued maps
244
+ A := {u ∈ H1(Ω; RM) : u(x, z) = (x, 0RM−N ) on ∂BN = SN−1 ⊂ RM, z ∈ (0, 1)n}.
245
+ (8)
246
+ If M ≥ N + 1, the prototype of radially symmetric critical points of Eε in A has the
247
+ following form (invariant in z-direction): 3
248
+ ˜uε(x, z) = ( ˜fε(r) x
249
+ |x|, 0RM−N−1, gε(r)) ∈ A ,
250
+ x ∈ BN, z ∈ (0, 1)n, r = |x|,
251
+ (9)
252
+ where ( ˜fε, gε) satisfies the system of ODEs
253
+ − ˜f ′′
254
+ ε − N − 1
255
+ r
256
+ ˜f ′
257
+ ε + N − 1
258
+ r2
259
+ ˜fε = 1
260
+ ε2 W ′(1 − ˜f 2
261
+ ε − g2
262
+ ε) ˜fε
263
+ in (0, 1),
264
+ (10)
265
+ −g′′
266
+ ε − N − 1
267
+ r
268
+ g′
269
+ ε = 1
270
+ ε2 W ′(1 − ˜f 2
271
+ ε − g2
272
+ ε)gε
273
+ in (0, 1),
274
+ (11)
275
+ ˜fε(1) = 1 and gε(1) = 0.
276
+ (12)
277
+ We distinguish two type of radial profiles:
278
+ • the non-escaping radial profile ( ˜fε = fε, gε = 0) with the unique radial profile fε given
279
+ in (5); in this case, we say that ˜uε = (uε, 0RM−N ) is a non-escaping (radially symmetric)
280
+ vortex sheet solution where uε is given in (4).
281
+ • the escaping radial profile ( ˜fε, gε) with gε > 0 in (0, 1); in this case, we call an
282
+ escaping (radially symmetric) vortex sheet solution ˜uε in (9). In this case, ˜fε ̸= fε and
283
+ obviously, ( ˜fε, −gε) is another radial profile to (9)-(12).
284
+ The properties of such radial profiles (e.g., existence, uniqueness, minimality, mono-
285
+ tonicity) are analyzed in Theorem 9 below and are based on ideas developed by Ignat-
286
+ Nguyen [8].
287
+ Our main result proves the radial symmetry of global minimizers of Eε in A . More
288
+ precisely, the following dichotomy occurs at εN defined in (7): if ε < εN, then escaping
289
+ radially symmetric vortex sheet solutions exist and determine (up to certain orthogonal
290
+ transformations) the full set of global minimizers of Eε in A ; if instead ε ≥ εN, then the
291
+ non-escaping radially symmetric vortex sheet solution is the unique global minimizer of
292
+ Eε in A and no escaping radially symmetric vortex sheet solutions exist in this case.
293
+ 3If M = N + 1, then ˜uε(x, z) = ( ˜fε(r) x
294
+ |x|, gε(r)) for every x ∈ BN and z ∈ (0, 1)n. In fact, if n = 0 (so,
295
+ for Ω = BN), every bounded critical point of Eε in A that is invariant under the action of a special group
296
+ (isomorphic to SO(N)) has the form of ˜uε, see [8, Definition A.1, Lemma A.5].
297
+ 5
298
+
299
+ Theorem 4. Let n ≥ 1, 2 ≤ N ≤ 6, M ≥ N + 1, W ∈ C2((−∞, 1]) satisfy (2) and be
300
+ strictly convex. Consider εN ∈ (0, ∞) such that ℓ(εN) = 0 in (7). Then there exists an
301
+ escaping radially symmetric vortex sheet solution ˜uε in (9) with gε > 0 in (0, 1) if and
302
+ only if 0 < ε < εN. Moreover,
303
+ 1. if 0 < ε < εN, the escaping radially symmetric vortex sheet solution ˜uε is a global
304
+ minimizer of Eε in A and all global minimizers of Eε in A are radially symmetric
305
+ given by R˜uε where R ∈ O(M) is an orthogonal transformation of RM satisfying
306
+ Rp = p for all p ∈ RN × {0RM−N }.
307
+ In this case, the non-escaping vortex sheet
308
+ solution (uε, 0RM−N ) in (4) is an unstable critical point of Eε in A .
309
+ 2. if ε ≥ εN, the non-escaping vortex sheet solution (uε, 0RM−N ) in (4) is the unique
310
+ global minimizer of Eε in A . Furthermore, there are no bounded critical points wε
311
+ of Eε in A that escape in some direction e ∈ SM−1 (i.e., wε · e > 0 a.e. in Ω).
312
+ The result above holds also if n = 0, i.e., Ω = BN and the vortex sheets corresponding
313
+ to the above solutions become vortex points (see Theorem 10). It generalizes [12, Theorem
314
+ 1.1] that was proved in the case N = 2 and M = 3 (without identifying the meaning of
315
+ the dichotomy parameter εN in (7)). The dichotomy in Theorem 4 happens in dimensions
316
+ 2 ≤ N ≤ 6 because of the phenomenology occurring for the limit problem ε → 0. More
317
+ precisely, if M ≥ N + 1, then minimizing SM−1-valued harmonic maps in A are smooth
318
+ and escaping in a direction of SM−1 provided that N ≤ 6; if N ≥ 7, then there is a unique
319
+ minimizing SM−1-valued harmonic maps in A , non-escaping and singular, the singular
320
+ set being given by a vortex sheet of dimension n in Ω (see Theorem 11 in Appendix
321
+ below). This suggests why in dimension N ≥ 7 and for any ε > 0, there is no escaping
322
+ radially symmetric vortex sheet critical point ˜uε of Eε in A while the non-escaping vortex
323
+ sheet solution (uε, 0RM−N ) is the unique global minimizer of Eε in A (see Theorem 5 and
324
+ Remark 8 below).
325
+ The paper is meant to be self-contained and it is organized as follows. In Section 2, we
326
+ prove the minimality and the uniqueness results for the non-escaping radially symmetric
327
+ solution in Theorems 1 and 3; this is done in a more general setting by considering the
328
+ target dimension M ≥ N for the set of configurations A instead of AN. Section 3 is
329
+ devoted to characterize escaping vortex sheet solutions. First, we prove the minimality
330
+ of such bounded solutions stated in Theorem 7. Second, we prove existence, minimality
331
+ and uniqueness results for the escaping radial profile in Theorem 9. Finally, we prove our
332
+ main result on the dichotomy between escaping / non-escaping radially symmetric vortex
333
+ sheet solutions in Theorem 4. In Appendix, we prove the corresponding dichotomy result
334
+ for SM−1-valued harmonic maps in Theorem 11 which again is based on the minimality of
335
+ escaping SM−1-valued harmonic maps in Theorem 12.
336
+ Acknowledgment. R.I. is partially supported by the ANR projects ANR-21-CE40-0004
337
+ and ANR-22-CE40-0006-01. He also thanks for the hospitality of the Hausdorff Research
338
+ Institute for Mathematics in Bonn during the trimester “Mathematics for Complex Ma-
339
+ terials”.
340
+ 6
341
+
342
+ 2
343
+ The non-escaping vortex sheet solution. Proof of Theo-
344
+ rems 1 and 3
345
+ Theorem 1 will be obtained as a consequence of a stronger result on the uniqueness of
346
+ global minimizers of the RM-valued Ginzburg-Landau functional with M ≥ N ≥ 7. For
347
+ that, we consider the energy functional Eε in (1) over the set A defined in (8). The aim
348
+ is to prove the minimality and uniqueness of the vortex sheet solution (uε, 0RM−N ) where
349
+ uε given in (4) with the obvious identification uε ≡ (uε, 0RM−N ) if M = N, following the
350
+ ideas of Ignat-Nguyen-Slastikov-Zarnescu [12, 11].
351
+ Theorem 5. Assume that W satisfies (2) and n ≥ 1. If M ≥ N ≥ 7, then for every
352
+ ε > 0, (uε, 0RM−N ) given in (4) is the unique global minimizer of Eε in A .
353
+ Proof. To simplify notation, we identify
354
+ uε ≡ (uε, 0RM−N )
355
+ when
356
+ M ≥ N.
357
+ (13)
358
+ The proof will be done in several steps following the strategy in [12, Theorem 1.7], [11,
359
+ Theorem 1].
360
+ First, for an arbitrary competitor uε + v, we consider the excess energy
361
+ Eε(uε + v) − Eε(uε) for the critical point uε defined in (4) and show a lower estimate
362
+ by a quadratic energy functional Fε(v) coming from the operator Lε in (6). Second, we
363
+ show that Fε(v) ≥ 0 using the properties of the radial profile fε in (5) and a Hardy
364
+ decomposition method; this proves in particular that uε is a global minimizer of Eε over
365
+ A . Finally, by analyzing the zero excess energy states, we conclude to the uniqueness of
366
+ the global minimizer uε.
367
+ Step 1: Excess energy. For any v ∈ H1
368
+ 0(BN × Rn; RM), we have
369
+ Eε(uε + v) − Eε(uε) =
370
+
371
+
372
+
373
+ ∇uε · ∇v + 1
374
+ 2|∇v|2�
375
+ dxdz
376
+ + 1
377
+ 2ε2
378
+
379
+
380
+
381
+ W(1 − |uε + v|2) − W(1 − |uε|2)
382
+
383
+ dxdz.
384
+ Note that for every u ∈ A , uε−u can be extended to v ∈ H1
385
+ 0(BN ×Rn; RM). In particular,
386
+ v(·, z) ∈ H1
387
+ 0(BN, RM) for a.e. z ∈ (0, 1)n. The convexity of W yields
388
+ W(1 − |uε + v|2) − W(1 − |uε|2) ≥ −W ′(1 − |uε|2)(|uε + v|2 − |uε|2).
389
+ (14)
390
+ Combining the above relations, we obtain the following lower bound for the excess energy:
391
+ Eε(uε + v) − Eε(uε) ≥
392
+
393
+
394
+
395
+ ∇uε · ∇v − 1
396
+ ε2 W ′(1 − f 2
397
+ ε )uε · v
398
+
399
+ dxdz
400
+ +
401
+
402
+
403
+ �1
404
+ 2|∇v|2 − 1
405
+ 2ε2 W ′(1 − f 2
406
+ ε )|v|2�
407
+ dxdz
408
+ =
409
+
410
+
411
+ 1
412
+ 2|∇zv|2 dxdz +
413
+
414
+ (0,1)n
415
+ 1
416
+ 2Fε(v(·, z)) dz,
417
+ (15)
418
+ 7
419
+
420
+ where we used the PDE (3) and introduced the quadratic functional
421
+ Fε(Ψ) =
422
+
423
+ BN
424
+
425
+ |∇xΨ|2 − 1
426
+ ε2 W ′(1 − f 2
427
+ ε )|Ψ|2�
428
+ dx,
429
+ for all Ψ ∈ H1
430
+ 0(BN; RM). Note that the L2-gradient of Fε represents a part of the lin-
431
+ earization of the PDE (3) at uε and it is given by the operator Lε in (6). The rest of the
432
+ proof is devoted to show that for N ≥ 3:
433
+ Fε(ψ) ≥
434
+ �(N − 2)2
435
+ 4
436
+ − (N − 1)
437
+ � �
438
+ BN
439
+ ψ2
440
+ r2 dx,
441
+ ∀ψ ∈ H1
442
+ 0(BN)
443
+ yielding the conclusion for N ≥ 7 and also the inequality for the first eigenvalue ℓ(ε) of
444
+ the operator Lε in (6) in BN: 4
445
+ ℓ(ε) ≥ (N − 2)2
446
+ 4
447
+ − (N − 1) > 0,
448
+ ∀ε > 0
449
+ and
450
+ N ≥ 7.
451
+ To keep the paper self-contained, we explain in the following the simple idea used in
452
+ [12, 11].
453
+ Step 2: A factorization argument. As fε > 0 is a smooth positive radial profile in (0, 1),
454
+ we decompose every scalar test function ψ ∈ C∞
455
+ c (BN \ {0}; R) as follows
456
+ ψ(x) = fε(r)w(x),
457
+ ∀x ∈ BN \ {0}, r = |x|,
458
+ where w ∈ C∞
459
+ c (BN \ {0}; R). Integrating by parts (see e.g. [10, Lemma A.1]), we deduce:
460
+ Fε(ψ) =
461
+
462
+ BN Lεψ · ψ dx =
463
+
464
+ BN w2(Lεfε · fε) dx +
465
+
466
+ BN f 2
467
+ ε |∇xw|2 dx
468
+ =
469
+
470
+ BN f 2
471
+ ε
472
+
473
+ |∇xw|2 − N − 1
474
+ r2
475
+ w2
476
+
477
+ dx,
478
+ because Lεfε · fε = − N−1
479
+ r2 f 2
480
+ ε in BN by (5). Furthermore, we decompose
481
+ w = ϕg
482
+ in
483
+ BN \ {0}
484
+ with ϕ = |x|− N−2
485
+ 2
486
+ satisfying
487
+ −∆xϕ = (N − 2)2
488
+ 4|x|2
489
+ ϕ
490
+ in RN \ {0}
491
+ and g ∈ C∞
492
+ c (BN \ {0}; R). Then
493
+ |∇xw|2 = |∇xg|2ϕ2 + |∇xϕ|2g2 + 1
494
+ 2∇x(ϕ2) · ∇x(g2).
495
+ 4Observe the difference between dimension N ≥ 7 and the case of dimension 2 ≤ N ≤ 6 where we have
496
+ ℓ(ε) < 0 for ε < εN in (7); moreover, if N ≤ 6, then ℓ(ε) blows up as − 1
497
+ ε2 as ε → 0 (see [8, Lemma 2.3]).
498
+ 8
499
+
500
+ As |∇xϕ|2 = (N−2)2
501
+ 4|x|2 ϕ2 and ϕ2 is harmonic in BN \ {0} (recall that N ≥ 7), integration by
502
+ parts yields
503
+ Fε(ψ) =
504
+
505
+ BN f 2
506
+ ε
507
+
508
+ |∇xg|2ϕ2 + (N − 2)2
509
+ 4r2
510
+ ϕ2g2 − N − 1
511
+ r2
512
+ ϕ2g2
513
+
514
+ dx − 1
515
+ 2
516
+
517
+ BN ∇x(ϕ2) · ∇x(f 2
518
+ ε )g2 dx
519
+
520
+
521
+ BN f 2
522
+ ε |∇xg|2ϕ2 dx +
523
+ �(N − 2)2
524
+ 4
525
+ − (N − 1)
526
+ � �
527
+ BN
528
+ f 2
529
+ ε
530
+ r2 ϕ2g2 dx
531
+
532
+ �(N − 2)2
533
+ 4
534
+ − (N − 1)
535
+ � �
536
+ BN
537
+ ψ2
538
+ r2 dx ≥ 0,
539
+ (16)
540
+ where we used N ≥ 7 and 1
541
+ 2∇x(ϕ2)·∇x(f 2
542
+ ε ) = 2ϕϕ′fεf ′
543
+ ε ≤ 0 in BN \{0} because ϕ, fε, f ′
544
+ ε >
545
+ 0 and ϕ′ < 0 in (0, 1) (see e.g. [7, 9, 8]).
546
+ Step 3: We prove that Fε(Ψ) ≥ 0 for every Ψ ∈ H1
547
+ 0(BN; RM); moreover, Fε(Ψ) = 0 if and
548
+ only if Ψ = 0. Let Ψ ∈ H1
549
+ 0(BN; RM). As a point in RN has zero H1 capacity, a standard
550
+ density argument implies the existence of a sequence Ψk ∈ C∞
551
+ c (BN \ {0}; RM) such that
552
+ Ψk → Ψ in H1(BN, RM) and a.e. in BN. On the one hand, by definition of Fε, since
553
+ W ′(1 − f 2
554
+ ε ) ∈ L∞, we deduce that Fε(Ψk) → Fε(Ψ) as k → ∞. On the other hand, by
555
+ (16) and Fatou’s lemma, we deduce
556
+ lim inf
557
+ k→∞ Fε(Ψk) ≥
558
+ �(N − 2)2
559
+ 4
560
+ − (N − 1)
561
+
562
+ lim inf
563
+ k→∞
564
+
565
+ BN
566
+ |Ψk|2
567
+ r2
568
+ dx
569
+
570
+ �(N − 2)2
571
+ 4
572
+ − (N − 1)
573
+ � �
574
+ BN
575
+ |Ψ|2
576
+ r2 dx.
577
+ Therefore, we conclude that
578
+ Fε(Ψ) ≥
579
+ �(N − 2)2
580
+ 4
581
+ − (N − 1)
582
+ � �
583
+ BN
584
+ |Ψ|2
585
+ r2 dx ≥ 0,
586
+ ∀Ψ ∈ H1
587
+ 0(BN; RM).
588
+ Moreover, Fε(Ψ) = 0 if and only if Ψ = 0.
589
+ Step 4: Conclusion. By (15) and Step 3, we deduce that uε is a global minimizer of Eε
590
+ over A . For uniqueness, assume that ˆuε is another global minimizer of Eε over A . If
591
+ v := ˆuε − uε, then v can be extended in H1
592
+ 0(BN × Rn; RM) and by Steps 1 and 3, we have
593
+ that
594
+ 0 = Eε(ˆuε) − Eε(uε) ≥
595
+
596
+
597
+ 1
598
+ 2|∇zv|2 dxdz +
599
+
600
+ (0,1)n
601
+ 1
602
+ 2Fε(v(·, z)) dz ≥ 0,
603
+ which yields ∇zv = 0 a.e. in Ω and Fε(v(·, z)) = 0 for a.e. z ∈ (0, 1)n. In other words,
604
+ v = v(x) and Step 3 implies that v = 0, i.e., ˆuε = uε in Ω.
605
+ Remark 6. Theorem 5 reveals the following fact: if for n = 0 (i.e., Ω = BN) and some
606
+ ε > 0, a (radially symmetric) critical point ˆuε : BN → RM of Eε in A is proved to be a
607
+ global minimizer (and additionally, if one proves that it is the unique global minimizer),
608
+ then for any dimensions n ≥ 1 (i.e., Ω = BN ×(0, 1)n), this z-invariant solution ˆuε of (3)
609
+ 9
610
+
611
+ in BN × (0, 1)n is also a global minimizer (and additionally, it is the unique minimizer)
612
+ of Eε in A . This is because for every u : BN × (0, 1)n → RM with u ∈ A , then u(·, z)
613
+ satisfies the degree-one vortex boundary condition on ∂BN for every z ∈ (0, 1)n yielding
614
+ Eε(u) =
615
+
616
+
617
+ 1
618
+ 2|∇zu|2 dxdz +
619
+
620
+ (0,1)n Eε(u(·, z)) dz
621
+
622
+
623
+ (0,1)n Eε(ˆuε) dz = Eε(ˆuε);
624
+ the equality occurs only when u is z-invariant. Thus, if the uniqueness of the global mini-
625
+ mizer ˆuε holds in BN (i.e., n = 0), then this yields uniqueness of the global minimizer ˆuε
626
+ in Ω = BN × (0, 1)n (as a map independent of z-variable) for every n ≥ 1.
627
+ Proof of Theorem 3. We prove the result in the more general setting of RM-valued maps
628
+ u belonging to A for M ≥ N using the same identification (13). By Step 1 in the proof
629
+ of Theorem 5 (see (15)), the excess energy is estimated for every v ∈ H1
630
+ 0(BN × Rn; RM):
631
+ Eε(uε + v) − Eε(uε) ≥
632
+
633
+
634
+ 1
635
+ 2|∇zv|2 dxdz + 1
636
+ 2
637
+
638
+ (0,1)n < Lεv(·, z), v(·, z) > dz,
639
+ where Lε is the operator in (6) and < ·, · > denotes the duality pairing (H−1, H1
640
+ 0) in BN.
641
+ If ε ≥ εN, then ℓ(ε) ≥ 0 (by [8, Lemma 2.3]) and therefore, 5
642
+ < Lεv(·, z), v(·, z) > ≥ ℓ(ε)∥v(·, z)∥2
643
+ L2(BN ) ≥ 0
644
+ for a.e. z ∈ (0, 1)n,
645
+ (17)
646
+ where we used that v(·, z) ∈ H1
647
+ 0(BN; RM) for a.e. z ∈ (0, 1)n. Thus, uε is a minimizer
648
+ of Eε over A . It remains to prove uniqueness of the global minimizer. For that, if ˆuε is
649
+ another global minimizer of Eε over A , setting v := ˆuε − uε, then v can be extended in
650
+ H1
651
+ 0(BN × Rn; RM) and
652
+ 0 = Eε(ˆuε) − Eε(uε) ≥
653
+
654
+
655
+ 1
656
+ 2|∇zv|2 dxdz + ℓ(ε)
657
+ 2
658
+
659
+ (0,1)n
660
+
661
+ BN |v(x, z)|2 dxdz ≥ 0
662
+ (18)
663
+ because ℓ(ε) ≥ 0 for ε ≥ εN. Thus, equality holds in the above inequalities.
664
+ Case 1: ε > εN. In this case, ℓ(ε) > 0 and we conclude that v = 0 in Ω, i.e., ˆuε = uε in Ω.
665
+ 5 Indeed, for a scalar function v ∈ C∞
666
+ c (BN \ {0}, R), if ψ = ψ(r) > 0 is a radial first eigenfunction of
667
+ Lε in BN with zero Dirichlet data, i.e., Lεψ = ℓ(ε)ψ in BN, then the duality pairing (H−1, H1
668
+ 0) term in
669
+ BN writes (see e.g. [10, Lemma A.1]):
670
+ < Lεv, v > =
671
+
672
+ BN ψ2|∇( v
673
+ ψ )|2 dx +
674
+
675
+ BN ( v
676
+ ψ )2Lεψ · ψ dx =
677
+
678
+ BN ψ2|∇( v
679
+ ψ )|2 dx + ℓ(ε)∥v∥2
680
+ L2(BN ).
681
+ By a density argument, Fatou’s lemma yields for every scalar function v ∈ H1
682
+ 0(BN, R),
683
+ < Lεv, v > ≥
684
+
685
+ BN ψ2|∇( v
686
+ ψ )|2 dx + ℓ(ε)∥v∥2
687
+ L2(BN ).
688
+ 10
689
+
690
+ Case 2: ε = εN and W is in addition strictly convex. In this case, ℓ(ε) = 0 and by (18), v
691
+ is invariant in z, i.e., v = v(x) and equality holds in (17) and in (15), thus, equality holds
692
+ in (14). Note that by footnote 5 the equality in (17) holds if and only if v = λψ for some
693
+ λ ∈ RM, where ψ = ψ(r) is a radial first eigenfunction of Lε in BN with zero Dirichlet
694
+ data, in particular ψ > 0 in [0, 1) and ψ(1) = 0. Also, by the strict convexity of W, the
695
+ equality (14) is achieved if and only if |uε + v| = |uε| a.e. in Ω, that is, |v|2 + 2v · uε = 0
696
+ a.e. in BN. It yields
697
+ |λ|2ψ2 + 2fε(|x|)( x
698
+ |x|, 0RM−N ) · λψ = 0
699
+ for every x ∈ BN.
700
+ (19)
701
+ Dividing by ψ in BN, the continuity up to the boundary ∂BN leads to 2fε(|x|)(x, 0RM−N )·
702
+ λ = 0 for every x ∈ ∂BN since ψ = 0 on ∂BN. As fε(1) = 1, it follows that the first N
703
+ components of λ vanish. Coming back to (19), we conclude that |λ|2ψ2 = 0 in BN, i.e.,
704
+ λ = 0 and so, v = 0 and ˆuε = uε in Ω.
705
+ 3
706
+ Properties of escaping vortex sheet solutions when M ≥
707
+ N + 1
708
+ 3.1
709
+ Minimality of escaping vortex sheet solutions
710
+ In this section, we require the additional assumption of strict convexity of W in order to
711
+ determine the set of global minimizers of Eε over A in (8). However, W is assumed to be
712
+ only C1 not C2. We prove that every bounded solution to (3) escaping in some direction
713
+ is a global minimizer of Eε over A ; moreover, such global minimizer is unique up to an
714
+ orthogonal transformation of RM keeping invariant the space RN × {0RM−N }.
715
+ Theorem 7. We consider the dimensions n ≥ 1 and M > N ≥ 2, the potential W ∈
716
+ C1((−∞, 1], R) satisfying (2) and an escaping direction e ∈ SM−1. Fix any ε > 0 and let
717
+ wε ∈ H1 ∩ L∞(Ω, RM) be a critical point of the energy Eε in the set A which is positive
718
+ in the direction e inside Ω:
719
+ wε · e > 0 a.e. in Ω.
720
+ (20)
721
+ Then wε is a global minimizer of Eε in A . If in addition W is strictly convex, then all
722
+ minimizers of Eε in A are given by Rwε where R ∈ O(M) is an orthogonal transformation
723
+ of RM satisfying Rp = p for all p ∈ RN × {0RM−N }.
724
+ This result is reminiscent from [12, Theorem 1.3]. However, it doesn’t apply directly
725
+ as the domain Ω is not smooth here and the boundary condition is a mixed Dirichlet-
726
+ Neumann condition (w.r.t. Dirichlet boundary condition in [12]).
727
+ Proof. In the following, we denote the variable X = (x, z) ∈ Ω = BN × (0, 1)n. As a
728
+ critical point of Eε in the set A , wε : Ω → RM satisfies
729
+
730
+
731
+
732
+ −∆wε = 1
733
+ ε2wε W ′(1 − |wε|2)
734
+ in Ω,
735
+ ∂wε
736
+ ∂z = 0
737
+ on BN × ∂(0, 1)n,
738
+ wε(x, z) = (x, 0RM−N )
739
+ on ∂BN × (0, 1)n.
740
+ (21)
741
+ 11
742
+
743
+ In particular, ∆wε ∈ L∞(Ω) (as W ′ is continuous and wε ∈ L∞(Ω)); then standard elliptic
744
+ regularity for the mixed boundary conditions in (21) yields wε ∈ C1(¯Ω, RM). Thus, (20)
745
+ implies wε·e ≥ 0 in ¯Ω and the vortex boundary condition in A implies that e is orthogonal
746
+ to RN × {0RM−N }. By the invariance of the energy and the vortex boundary condition
747
+ under the transformation wε(X) �→ Rwε(X) for any R ∈ O(M) satisfying Rp = p for all
748
+ p ∈ RN × {0RM−N }, we know that Rwε is also a critical point of Eε over A ; thus, we can
749
+ assume that
750
+ e := eM = (0, . . . , 0, 1) ∈ RM.
751
+ (22)
752
+ We prove the result in several steps.
753
+ Step 1: Excess energy.
754
+ By Step 1 in the proof of Theorem 5, we have for any v ∈
755
+ H1
756
+ 0(BN × Rn, RM):
757
+ Eε(wε + v) − Eε(wε) ≥
758
+
759
+
760
+ �1
761
+ 2|∇v|2 − 1
762
+ 2ε2 W ′(1 − |wε|2)|v|2�
763
+ dX =: 1
764
+ 2Gε(v)
765
+ (23)
766
+ (note that Gε(v) is larger than the integration of Fε(v) in (15) over (0, 1)n as it contains
767
+ also the integration of |∇zv|2). If in addition W is strictly convex, then equality holds
768
+ above if and only if |wε(X) + v(X)| = |wε(X)| a.e. X ∈ Ω (by (14)).
769
+ Step 2: Global minimality of wε. It is enough to show that the quadratic energy Gε(v)
770
+ defined in (23) is nonnegative for any v ∈ H1
771
+ 0(BN ×Rn, RM). Denoting the M-component
772
+ of wε by φ := wε · eM, we know that φ ∈ C1(¯Ω), φ ≥ 0 in Ω (by (20)) and satisfies the
773
+ Euler-Lagrange equation in the sense of distributions:
774
+
775
+
776
+
777
+ −∆φ − 1
778
+ ε2W ′(1 − |wε|2)φ = 0 in Ω,
779
+ φ = 0 on ∂BN × (0, 1)n,
780
+ ∂φ
781
+ ∂z = 0 on BN × ∂(0, 1)n.
782
+ (24)
783
+ Note that by strong maximum principle, φ > 0 in Ω (as φ cannot be identically 0 in Ω
784
+ by (20)). Moreover, Hopf’s lemma yields φ > 0 on BN × ∂(0, 1)n as ∂φ
785
+ ∂z vanishes there.
786
+ Now, for any smooth map v ∈ C∞
787
+ c (BN × Rn; RM), we can define Ψ = v
788
+ φ ∈ C1(¯Ω; RM)
789
+ with Ψ = 0 in a neighborhood of ∂BN × (0, 1)n and integration by parts yields for every
790
+ component vj = φΨj with 1 ≤ j ≤ M (as in [10, Lemma A.1.]):
791
+ Gε(vj) =
792
+
793
+
794
+
795
+ |∇vj|2 − 1
796
+ ε2 W ′(1 − |wε|2)φ · φΨ2
797
+ j
798
+
799
+ dX
800
+ (24)
801
+ =
802
+
803
+
804
+
805
+ |∇(φΨj)|2 − ∇φ · ∇(φ Ψ2
806
+ j)
807
+
808
+ dX =
809
+
810
+
811
+ φ2|∇Ψj|2 dX.
812
+ As Gε is continuous in strong H1(Ω) topology (since W ′(1 − |wε|2) ∈ L∞(Ω)), by density
813
+ of C∞
814
+ c (BN × Rn; RM) in H1
815
+ 0(BN × Rn; RM), Fatou’s lemma yields
816
+ Gε(v) ≥
817
+
818
+
819
+ φ2|∇
820
+ �v
821
+ φ
822
+
823
+ |2 dX ≥ 0,
824
+ ∀v ∈ H1
825
+ 0(BN × Rn; RM).
826
+ 12
827
+
828
+ As a consequence of (23), we deduce that wε is a minimizer of Eε over A . Moreover,
829
+ Gε(v) = 0 if and only if there exists a (constant) vector λ ∈ RM such that v = λφ for a.e.
830
+ x ∈ Ω.
831
+ Step 3: Set of global minimizers. From now on, we assume that W is strictly convex and
832
+ denote wε = (wε,1, . . . , wε,M). Note that the map
833
+ ˜wε := (wε,1, . . . , wε,N, 0RM−N−1,
834
+
835
+ w2
836
+ ε,N+1 + · · · + w2
837
+ ε,M)
838
+ (25)
839
+ belongs to A , | ˜wε| = |wε| and |∇ ˜wε| ≤ |∇wε| in Ω, so Eε(wε) ≥ Eε( ˜wε) and
840
+
841
+ w2
842
+ ε,N+1 + · · · + w2
843
+ ε,M ≥ wε,M = φ > 0
844
+ in
845
+ Ω.
846
+ Hence, ˜wε is a minimizer of Eε on A (as wε minimizes Eε over A by Step 2). Therefore,
847
+ up to interchanging wε and ˜wε, we may assume
848
+ � wε,N+1 = · · · = wε,M−1 ≡ 0 in Ω
849
+ wε,M = φ
850
+ (20)
851
+ > 0 in Ω.
852
+ We now consider another minimizer Uε of Eε over A and denote v := Uε −wε ∈ H1
853
+ 0(BN ×
854
+ Rn; RM) after a suitable extension. From Steps 1 and 2 we know that Eε(Uε) = Eε(v +
855
+ wε) = Eε(wε), Gε(v) = 0, |v+wε| = |wε| a.e. in Ω and v = λφ for some λ = (λ1, . . . , λM) ∈
856
+ RM where we recall that φ = wε·eM. By continuity of wε and φ, the relation |v+wε| = |wε|
857
+ a.e. in Ω implies 2wε · v + |v|2 = 0 everywhere in Ω. Since v = λφ, dividing by φ > 0 in
858
+ Ω, we obtain
859
+ 2λ · wε + φ|λ|2 = 0 in Ω
860
+ (26)
861
+ and by continuity, the equality holds also on ∂Ω. As for every (x, z) ∈ ∂BN × (0, 1)n,
862
+ φ(x, z) = 0 and wε(x, z) = (x, 0RM−N ), we deduce that λ · (x, 0RM−N ) = 0 for every
863
+ x ∈ ∂BN. It follows that λ1 = λ2 = · · · = λN = 0 and therefore, recalling that wε,N+1 =
864
+ · · · = wε,M−1 = 0 in Ω, we have by (26):
865
+ 2λMφ + (λ2
866
+ N+1 + · · · + λ2
867
+ M)φ = 0 in Ω.
868
+ As φ > 0 in Ω, we obtain
869
+ λ2
870
+ N+1 + · · · + λ2
871
+ M−1 + (λM + 1)2 = 1;
872
+ hence we can find R ∈ O(M) such that Rp = p for all p ∈ RN × {0RM−N } and
873
+ ReM = (0, . . . , 0, λN+1, . . . , λM−1, λM + 1).
874
+ This implies Uε = wε+v = wε+λφ = Rwε as required. The converse statement is obvious:
875
+ if wε is a minimizer of Eε over A and R ∈ O(M) is a transformation fixing all points of
876
+ RN ×{0RM−N }, then Rwε is also a minimizer of Eε over A (because Eε and the boundary
877
+ condition in A are invariant under such orthogonal transformation R).
878
+ 13
879
+
880
+ Remark 8. Note that if n ≥ 1, M > N ≥ 7 and W satisfies (2) (not necessarily strictly
881
+ convex), then there are no bounded critical points of the energy Eε in the set A escaping in
882
+ a direction e ∈ SM−1. Indeed, if such an escaping critical point of Eε in A exists, then by
883
+ Theorem 7, this solution would be a global minimizer of Eε in A which is a contradiction
884
+ with the uniqueness of the global minimizer (uε, 0RM−N ) in (4) (that is non-escaping)
885
+ proved in Theorem 5.
886
+ 3.2
887
+ Escaping radial profile
888
+ Let M ≥ N + 1. We give a necessary and sufficient condition for the existence of an
889
+ escaping radial profile ( ˜fε, gε > 0) in (0, 1) to the system (9)–(12); we also prove uniqueness,
890
+ minimality and monotonicity of the escaping radial profile. For that, in the context of Eε
891
+ defined over A , we introduce the functional
892
+ Iε(f, g) =
893
+ 1
894
+ |SN−1|Eε
895
+
896
+ (f(r) x
897
+ |x|, 0RM−N−1, g(r))
898
+
899
+ = 1
900
+ 2
901
+ � 1
902
+ 0
903
+
904
+ (f ′)2 + (g′)2 + N − 1
905
+ r2
906
+ f 2 + 1
907
+ ε2 W(1 − f 2 − g2)
908
+
909
+ rN−1 dr
910
+ where (f, g) belongs to
911
+ B =
912
+
913
+ (f, g) : r
914
+ N−1
915
+ 2 f ′, r
916
+ N−3
917
+ 2 f, r
918
+ N−1
919
+ 2 g′, r
920
+ N−1
921
+ 2 g ∈ L2(0, 1), f(1) = 1, g(1) = 0
922
+
923
+ .
924
+ (27)
925
+ The following result is reminiscent from Ignat-Nguyen [8, Theorem 2.4] (for ˜W ≡ 0).
926
+ The proof of [8, Theorem 2.4] is rather complicated (as it is proved for some general
927
+ potentials ˜W). We present here a simple proof that works in our context:
928
+ Theorem 9. Let 2 ≤ N ≤ 6, M ≥ N + 1, W ∈ C2((−∞, 1]) satisfy (2) and be strictly
929
+ convex. Consider εN ∈ (0, ∞) in (7) such that ℓ(εN) = 0. Then the system (9)–(12) has
930
+ an escaping radial profile ( ˜fε, gε) with gε > 0 in (0, 1) if and only if 0 < ε < εN. Moreover,
931
+ in the case 0 < ε < εN,
932
+ 1. ( ˜fε, gε > 0) is the unique escaping radial profile of (9)–(12) and
933
+ ˜fε
934
+ r , gε ∈ C2([0, 1]),
935
+ ˜f 2
936
+ ε + g2
937
+ ε < 1, ˜fε > 0, ˜f ′
938
+ ε > 0, g′
939
+ ε < 0 in (0, 1);
940
+ 2. there are exactly two minimizers of Iε in B given by ( ˜fε, ±gε);
941
+ 3. the non-escaping radial profile (fε, 0) is an unstable critical point of Iε in B where
942
+ fε is the unique radial profile in (5).
943
+ Recall that for ε ≥ εN, the non-escaping radial profile (fε, 0) is the unique global
944
+ minimizer of Iε in B (by Theorem 3 whose proof yields the minimality of (uε, 0RM−N ) of
945
+ Eε in A ).
946
+ Proof of Theorem 9. First, we focus on the existence of escaping radial profiles of (9)–(12).
947
+ Note that the direct method in calculus of variations implies that Iε admits a minimizer
948
+ 14
949
+
950
+ ( ˜fε, gε) ∈ B.
951
+ Since ( ˜fε, gε) ∈ B, ( ˜fε, gε) ∈ C((0, 1]).
952
+ It follows that ( ˜fε, gε) satisfies
953
+ (10)–(12) in the weak sense, and so ˜fε, gε ∈ C2((0, 1]). Since (| ˜fε|, |gε|) is also a minimizer
954
+ of Iε in B, the above argument also shows that | ˜fε|, |gε| ∈ C2((0, 1]) satisfies (10)–(12).
955
+ Since | ˜fε|, |gε| ≥ 0 and ˜fε(1) = 1, the strong maximum principle yields | ˜fε| > 0 in (0, 1),
956
+ and either |gε| > 0 in (0, 1) or gε ≡ 0 in (0, 1). It follows that ˜fε > 0 in (0, 1), and there
957
+ are three alternatives: gε > 0 in (0, 1), gε < 0 in (0, 1) or gε ≡ 0 in (0, 1). Clearly, when
958
+ gε ≡ 0, ˜fε is equal to the unique radial profile fε in (5). By considering ( ˜fε, −gε) instead
959
+ of ( ˜fε, gε) if necessary, we assume in the sequel that gε ≥ 0.
960
+ Claim: if 0 < ε < εN, then gε > 0 in (0, 1) and (fε, 0) is an unstable critical point of Iε in
961
+ B.
962
+ Proof of Claim: We define the second variation of Iε at (fε, 0) as
963
+ Qε(α, β) = d2
964
+ dt2
965
+ ����
966
+ t=0
967
+
968
+
969
+ (fε, 0) + t(α, β)
970
+
971
+ =
972
+
973
+ BN
974
+
975
+ Lεα · α + Lεβ · β + N − 1
976
+ r2
977
+ α2 + 2
978
+ ε2 W ′′(1 − f 2
979
+ ε )f 2
980
+ ε α2�
981
+ dx,
982
+ for α, β ∈ C∞
983
+ c ((0, 1)) which extends by density to the Hilbert space
984
+ H = {(α, β) : (fε + α, β) ∈ B} with the norm
985
+ ∥(α, β)∥H := ∥(α x
986
+ |x|, β)∥H1(BN,RN+1).
987
+ As ε ∈ (0, εN), we have ℓ(ε) < 0 by (7). Taking β ∈ H1
988
+ 0(BN) to be any first eigenfunction
989
+ of Lε in BN, which is radially symmetric, we have r
990
+ N−1
991
+ 2 β′, r
992
+ N−1
993
+ 2 β ∈ L2(0, 1), β(1) = 0 and
994
+ Qε(0, β) =
995
+
996
+ BN Lεβ · β dx = ℓ(ε)
997
+
998
+ BN β2 dx < 0.
999
+ So, (fε, 0) is an unstable critical point of Iε in B if ε < εN. In particular, (fε, 0) is not
1000
+ minimizing Iε in B and therefore, by the above construction of the minimizer ( ˜fε, gε) of
1001
+ Iε in B, we deduce that gε > 0. This proves the above Claim.
1002
+ Moreover, by [8, Lemmas 2.7 and A.5, Proposition 2.9] (for ˜W ≡ 0), we deduce that
1003
+ ˜fε
1004
+ r , gε ∈ C2([0, 1]), ˜f 2
1005
+ ε + g2
1006
+ ε < 1, ˜f ′
1007
+ ε > 0 and g′
1008
+ ε < 0 in (0, 1).
1009
+ To conclude, we distinguish two cases:
1010
+ Case 1: if ε ∈ (0, εN), Claim yields the existence of an escaping radial profile ( ˜fε, gε > 0).
1011
+ By [8, Lemmas 2.7], every escaping radial profile ( ˜fε, gε > 0) is bounded (i.e., ˜f 2
1012
+ ε + g2
1013
+ ε < 1
1014
+ in (0, 1)) and therefore, by Theorem 7, the corresponding (bounded) escaping critical point
1015
+ ˜uε in (9) is a global minimizer of Eε over A and the set of minimizers of Eε over A is then
1016
+ given by {R˜uε : R ∈ O(M), Rp = p, ∀p ∈ RN × {0RM−N }}. Therefore, ( ˜fε, ±gε) are the
1017
+ only two minimizers of Iε in B. In particular, this proves the uniqueness of the escaping
1018
+ radial profile ( ˜fε, gε > 0).
1019
+ Case 2: if ε ≥ εN, by the proof of Theorem 3, the non-escaping vortex sheet solution
1020
+ uε(x) ≡ (fε(|x|) x
1021
+ |x|, 0RM−N ) (by (13)) is the unique minimizer of Eε over A . In particular,
1022
+ (fε, 0) is the unique minimizer of Iε in B, i.e., in the above construction of the minimizer
1023
+ 15
1024
+
1025
+ ( ˜fε, gε) of Iε in B, we have ˜fε = fε and gε = 0 in (0, 1). We claim that no escaping
1026
+ radial profile ( ˆfε, ˆgε > 0) exists if ε ≥ εN. Assume by contradiction that such an escaping
1027
+ radial profile ( ˆfε, ˆgε > 0) exists. The same argument presented in Case 1 would imply
1028
+ that ( ˆfε, ˆgε > 0) is a minimizer of Iε in B which contradicts the uniqueness of the global
1029
+ minimizer (fε, 0).
1030
+ 3.3
1031
+ Proof of Theorem 4
1032
+ We now prove the main result:
1033
+ Proof of Theorem 4. By Theorem 9, the existence of an escaping radially symmetric so-
1034
+ lution ˜uε in (9) is equivalent to ε ∈ (0, εN). Moreover, in that case, the escaping radial
1035
+ profile ( ˜fε, gε > 0) is unique and bounded, i.e., ˜f 2
1036
+ ε + g2
1037
+ ε < 1 in (0, 1).
1038
+ Case 1: if ε ∈ (0, εN), Theorem 7 implies that the (bounded) escaping radially symmetric
1039
+ critical point ˜uε in (9) is a global minimizer of Eε over A and every minimizer of Eε over
1040
+ A has the form R˜uε for some orthogonal transformation R ∈ O(M) keeping invariant
1041
+ the space RN × {0RM−N }. Moreover, by Theorem 9, the non-escaping radial profile (fε, 0)
1042
+ is proved to be an unstable critical point of Iε in B, so the non-escaping vortex sheet
1043
+ solution (uε, 0RM−N ) is an unstable critical point of Eε in A .
1044
+ Case 2: if ε ≥ εN, the proof of Theorem 3 implies that the non-escaping radially symmetric
1045
+ vortex sheet solution uε(x) ≡ (fε(|x|) x
1046
+ |x|, 0RM−N ) (by (13)) is the unique minimizer of Eε
1047
+ over A . In this case, there is no bounded critical point wε of Eε over A that escapes in
1048
+ some direction e ∈ SM−1; indeed, if such (bounded) escaping solution wε satisfying (20)
1049
+ exists, then Theorem 7 would imply that wε is a global minimizer of Eε over A which
1050
+ contradicts that the non-escaping vortex sheet solution uε is the unique global minimizer
1051
+ of Eε over A .
1052
+ Theorem 4 holds also for the “degenerate” dimension n = 0. In this case, Ω = BN and
1053
+ vortex sheets are vortex points,
1054
+ Eε(u) =
1055
+
1056
+ BN
1057
+ �1
1058
+ 2|∇u|2 + 1
1059
+ 2ε2 W(1 − |u|2)
1060
+
1061
+ dx,
1062
+ A := {u ∈ H1(BN; RM) : u(x) = (x, 0RM−N ) on ∂BN = SN−1}
1063
+ and radially symmetric vortex critical points of Eε in A have the corresponding form in
1064
+ (9):
1065
+ ˜uε(x) = ( ˜fε(r) x
1066
+ |x|, 0RM−N−1, gε(r)) ∈ A ,
1067
+ x ∈ BN, r = |x|,
1068
+ (28)
1069
+ where the radial profiles ( ˜fε, gε) satisfy the system (10)-(12) and are described in Theo-
1070
+ rem 9; the non-escaping radially symmetric vortex solution is given here by
1071
+ uε(x) = (fε(|x|) x
1072
+ |x|, 0RM−N )
1073
+ for all x ∈ BN,
1074
+ (29)
1075
+ where the radial profile fε is the unique solution to (5). We obtain the following result
1076
+ which generalizes [12, Theorem 1.1] that was proved in the case N = 2 and M = 3 (without
1077
+ identifying the meaning of the dichotomy parameter εN in (7)).
1078
+ 16
1079
+
1080
+ Theorem 10. Let 2 ≤ N ≤ 6, M ≥ N + 1, Ω = BN, W ∈ C2((−∞, 1]) satisfy (2) and
1081
+ be strictly convex. Consider εN ∈ (0, ∞) such that ℓ(εN) = 0 in (7). Then there exists an
1082
+ escaping radially symmetric vortex solution ˜uε in (28) with the radial profile ( ˜fε, gε > 0)
1083
+ given in Theorem 9 if and only if 0 < ε < εN. Moreover,
1084
+ 1. if 0 < ε < εN, ˜uε is a global minimizer of Eε in A and all global minimizers of
1085
+ Eε in A are radially symmetric given by R˜uε where R ∈ O(M) is an orthogonal
1086
+ transformation of RM satisfying Rp = p for all p ∈ RN ×{0RM−N }. In this case, the
1087
+ non-escaping vortex solution uε in (29) is an unstable critical point of Eε in A .
1088
+ 2. if ε ≥ εN, the non-escaping vortex solution uε in (29) is the unique global minimizer
1089
+ of Eε in A . Furthermore, there are no bounded critical points wε of Eε in A that
1090
+ escape in a direction e ∈ SM−1, i.e., wε · e > 0 a.e. in Ω.
1091
+ The proof follows by the same argument used for Theorem 4, the main difference is
1092
+ that in the ball Ω = BN, a critical point wε of Eε in A satisfies the PDE system with
1093
+ Dirichlet boundary condition (instead of the mixed Dirichlet-Neumann condition in (21)):
1094
+ −∆wε = 1
1095
+ ε2 wε W ′(1 − |wε|2)
1096
+ in BN,
1097
+ wε(x) = (x, 0RM−N )
1098
+ on ∂BN.
1099
+ A
1100
+ Appendix. Vortex sheet SM−1-valued harmonic maps in
1101
+ cylinders
1102
+ In dimensions M > N ≥ 2 and n ≥ 1, for the cylinder shape domain Ω = BN × (0, 1)n,
1103
+ we consider the harmonic map problem for SM−1-valued maps u ∈ H1(Ω; SM−1) ∩ A
1104
+ associated to the Dirichlet energy
1105
+ E(u) = 1
1106
+ 2
1107
+
1108
+
1109
+ |∇u|2 dxdz.
1110
+ Any critical point u : Ω → SM−1 of this problem satisfies
1111
+
1112
+
1113
+
1114
+ −∆u = u |∇u|2
1115
+ in Ω,
1116
+ ∂u
1117
+ ∂z = 0
1118
+ on BN × ∂(0, 1)n,
1119
+ u(x, z) = (x, 0RM−N )
1120
+ on ∂BN × (0, 1)n.
1121
+ (30)
1122
+ We will focus on radially symmetric vortex sheet SM−1-valued harmonic maps having the
1123
+ following form (invariant in z-direction):
1124
+ u(x, z) = (f(r) x
1125
+ |x|, 0RM−N−1, g(r)) ∈ A ,
1126
+ x ∈ BN, z ∈ (0, 1)n, r = |x|,
1127
+ (31)
1128
+ where the radial profile (f, g) satisfies
1129
+ f 2 + g2 = 1
1130
+ in
1131
+ (0, 1),
1132
+ (32)
1133
+ 17
1134
+
1135
+ and the system of ODEs:
1136
+ −f ′′ − N − 1
1137
+ r
1138
+ f ′ + N − 1
1139
+ r2
1140
+ f = Γ(r)f
1141
+ in
1142
+ (0, 1),
1143
+ (33)
1144
+ −g′′ − N − 1
1145
+ r
1146
+ g′ = Γ(r)g
1147
+ in
1148
+ (0, 1),
1149
+ (34)
1150
+ f(1) = 1 and g(1) = 0,
1151
+ (35)
1152
+ where
1153
+ Γ(r) = (f ′)2 + N − 1
1154
+ r2
1155
+ f 2 + (g′)2
1156
+ is the Lagrange multiplier due to the unit length constraint in (32). As for the Ginzburg-
1157
+ Landau system, we distinguish two type of radial profiles:
1158
+ • the non-escaping radial profile ( ¯f ≡ 1, ¯g ≡ 0) yielding the non-escaping (radially
1159
+ symmetric) vortex sheet SM−1-valued harmonic map (also called “equator” map):
1160
+ ¯u(x, z) = ( x
1161
+ |x|, 0RM−N )
1162
+ x ∈ BN, z ∈ (0, 1)n.
1163
+ (36)
1164
+ Note that ¯u is singular and the singular set of this map is the vortex sheet {0RM−N }×(0, 1)n
1165
+ of dimension n in Ω. Also, observe that ¯u ∈ H1(Ω, SM−1) if and only if N ≥ 3.
1166
+ • the escaping radial profile (f, g) with g > 0 in (0, 1); in this case, it holds f(0) = 0,
1167
+ g(0) = 1 and we say that u in (31) is an escaping (radially symmetric) vortex sheet SM−1-
1168
+ valued harmonic map. Note that u is smooth for every dimension M > N ≥ 2 and n ≥ 1
1169
+ and the zero set of (u1, . . . , uN) is the vortex sheet {0RM−N } × (0, 1)n of dimension n in
1170
+ Ω. Obviously, (f, −g < 0) is another radial profile satisfying (32)-(35).
1171
+ The properties of such radial profiles are proved in [14] (see also [8, Theorem 2.6] for
1172
+ ˜W ≡ 0 in those notations). More precisely,
1173
+ (a) If N ≥ 7, the non-escaping radial profile ( ¯f ≡ 1, ¯g ≡ 0) is the unique minimizer of
1174
+ I(f, g) =
1175
+ 1
1176
+ |SN−1|E
1177
+
1178
+ (f(r) x
1179
+ |x|, 0RM−N−1, g(r))
1180
+
1181
+ = 1
1182
+ 2
1183
+ � 1
1184
+ 0
1185
+
1186
+ (f ′)2 + (g′)2 + N − 1
1187
+ r2
1188
+ f 2�
1189
+ rN−1 dr,
1190
+ where (f, g) belongs to B ∩
1191
+
1192
+ (f, g) : f 2 + g2 = 1
1193
+
1194
+ with B defined in (27). Moreover,
1195
+ the system (32)–(35) has no escaping radial profile (f, g) with g > 0 in (0, 1).
1196
+ (b) If 2 ≤ N ≤ 6, then there exists a unique escaping radial profile (f, g) with g > 0
1197
+ satisfying (32)–(35). Moreover, (f, ±g) are the only two global minimizers of I in
1198
+ B ∩
1199
+
1200
+ (f, g) : f 2 + g2 = 1
1201
+
1202
+ , f
1203
+ r , g ∈ C∞([0, 1]), f(0) = 0, g(0) = 1, f > 0, f ′ > 0 and
1204
+ g′ < 0 in (0, 1). In addition, for 3 ≤ N ≤ 6, the non-escaping solution ( ¯f ≡ 1, ¯g ≡ 0)
1205
+ is an unstable critical point of I in B ∩
1206
+
1207
+ (f, g) : f 2 + g2 = 1
1208
+
1209
+ .6
1210
+ 6For N = 2, (1, 0) /∈ B; however, we can define the second variation of I at (1, 0) along directions (0, q)
1211
+ compactly supported in (0, 1):
1212
+ Q(0, q) =
1213
+ � 1
1214
+ 0
1215
+
1216
+ (q′)2 − N − 1
1217
+ r2
1218
+ q2�
1219
+ rN−1 dr,
1220
+ and one can prove the existence of q ∈ Lipc(0, 1) such that Q(0, q) < 0 (see e.g. [8, Remark 2.16]).
1221
+ 18
1222
+
1223
+ There is a large number of articles studying existence, uniqueness, regularity and
1224
+ stability of radially symmetric SM−1-valued harmonic maps (e.g., [13, 14, 25, 26, 23, 16,
1225
+ 12]). We summarize here the main result for our problem in the cylinder shape domain
1226
+ Ω = BN × (0, 1)n: if N ≤ 6, then minimizing SM−1-valued harmonic maps in A are
1227
+ smooth, radially symmetric and escaping in one-direction; if N ≥ 7, then there is a unique
1228
+ minimizing SM−1-valued harmonic map in A which is singular and given by the equator
1229
+ map ¯u in (36). 7
1230
+ Theorem 11. Let n ≥ 1, N ≥ 2, M ≥ N + 1 and Ω = BN × (0, 1)n. Then
1231
+ 1. if 2 ≤ N ≤ 6, then the escaping radially symmetric vortex sheet solution u in (31)
1232
+ with g > 0 is a minimizing SM−1-valued harmonic map in A and all minimizing
1233
+ SM−1-valued harmonic maps in A are smooth radially symmetric given by Ru where
1234
+ R ∈ O(M) satisfies Rp = p for all p ∈ RN ×{0RM−N }. In this case, the equator map
1235
+ ¯u in (36) is an unstable SM−1-valued harmonic map in A .
1236
+ 2. if N ≥ 7, the non-escaping vortex sheet solution ¯u in (36) is the unique minimizing
1237
+ SM−1-valued harmonic map in A . Moreover, there is no SM−1-valued harmonic
1238
+ map w in A escaping in a direction e ∈ SM−1, i.e., w · e > 0 a.e. in Ω.
1239
+ The main ingredient is the following result yielding minimality of escaping SM−1-valued
1240
+ harmonic maps. This is reminiscent from Sandier-Shafrir [23] (see also [12, Theorem 1.5]).
1241
+ Theorem 12. Let n ≥ 1, M > N ≥ 2 and Ω = BN × (0, 1)n.
1242
+ Assume that w ∈
1243
+ A ∩ H1(Ω, SM−1) is a SM−1-valued harmonic map satisfying (30) and
1244
+ w · e > 0 a.e. in Ω
1245
+ (37)
1246
+ in an escaping direction e ∈ SM−1. Then w is a minimizing SM−1-valued harmonic map
1247
+ in A and all minimizing SM−1-valued harmonic maps in A are of the form Rw where R ∈
1248
+ O(M) is an orthogonal transformation of RM satisfying Rp = p for all p ∈ RN ×{0RM−N }.
1249
+ Proof of Theorem 12. We give here a simple proof based on the argument in [12] that
1250
+ avoids the regularity results used in [23]. By the H1/2-trace theorem applied for w ∈
1251
+ H1(Ω, SM−1), (37) implies that w · e ≥ 0 on ∂BN × (0, 1)n. Combined with the vortex
1252
+ boundary condition in (30), we deduce that the escaping direction e has to be orthogonal
1253
+ to RN × {0RM−N } and up to a rotation, we can assume that e = eM (as in (22)). Then
1254
+ φ = w · eM > 0 a.e. in Ω satisfies
1255
+ − ∆φ = |∇w|2φ in Ω, ∂φ
1256
+ ∂z = 0 on BN × ∂(0, 1)n, φ = 0 on ∂BN × (0, 1)n.
1257
+ (38)
1258
+ We consider configurations8 ˜w = w + v : Ω → SM−1 with v ∈ H1
1259
+ 0(BN × Rn, RM) (in
1260
+ particular, |v| ≤ 2 in Ω). Then
1261
+ 2w · v + |v|2 = 0
1262
+ a.e. in Ω.
1263
+ (39)
1264
+ 7We mention the paper of Bethuel-Brezis-Coleman-H´elein [2] about a similar phenomenology in a do-
1265
+ main Ω = (B2 \ Bρ) × (0, 1) ⊂ R3 where Bρ ⊂ R2 is the disk centered at 0 of radius ρ.
1266
+ 8Note that for any ˜w ∈ A ∩ H1(Ω, SM−1), the map ˜w − w has an extension in H1
1267
+ 0(BN × Rn, RM).
1268
+ 19
1269
+
1270
+ Using (30) and (39), we obtain
1271
+ 2
1272
+
1273
+
1274
+ ∇w · ∇v = 2
1275
+
1276
+
1277
+ |∇w|2w · v dx = −
1278
+
1279
+
1280
+ |∇w|2|v|2 dx,
1281
+ yielding9
1282
+
1283
+
1284
+ |∇(w + v)|2 dx −
1285
+
1286
+
1287
+ |∇w|2 dx =
1288
+
1289
+
1290
+ |∇v|2 − |∇w|2|v|2 dx =: Q(v).
1291
+ (40)
1292
+ To show that w is minimizing, we prove that Q(v) ≥ 0 for all v ∈ H1
1293
+ 0(BN × Rn, RM) ∩
1294
+ L∞(Ω; RM) (note that this is a class larger than what we need, as we do not require
1295
+ that v satisfy the pointwise constraint (39)). For that, we take an arbitrary map ˜v ∈
1296
+ C∞
1297
+ c (BN × Rn, RM) of support ω and decompose it as ˜v = φΨ in Ω. This decomposition
1298
+ makes sense as φ ≥ δ > 0 in ω ∩ Ω for some δ > 0 (which may depend on ω). Indeed, by
1299
+ (37) and (38), φ is a superharmonic function (i.e., −∆φ ≥ 0 in Ω) that belongs to H1(Ω).
1300
+ As ∂φ
1301
+ ∂z = 0 on BN × ∂(0, 1)n, φ can be extended by even mirror symmetry to the domain
1302
+ ˜Ω = BN × (−1, 2)n so that φ is superharmonic in ˜Ω. Thus, the weak Harnack inequality
1303
+ (see e.g. [6, Theorem 8.18]) implies that on the compact set ω ∩Ω in ˜Ω, we have φ ≥ δ > 0
1304
+ for some δ. So, ˜v = φΨ in Ω with Ψ = (Ψ1, . . . , ΨM) ∈ H1 ∩ L∞(Ω; RM) vanishing in a
1305
+ neighborhood of ∂BN × (0, 1)n. Then integration by parts yields for 1 ≤ j ≤ M:
1306
+ Q(˜vj) =
1307
+
1308
+
1309
+ |∇˜vj|2 − |∇w|2φ · φΨ2
1310
+ j dx
1311
+ (38)
1312
+ =
1313
+
1314
+
1315
+ |∇(φΨj)|2 − ∇φ · ∇(φ Ψ2
1316
+ j) dx =
1317
+
1318
+
1319
+ φ2|∇Ψj|2 dx ≥ 0
1320
+ for all ˜v ∈ C∞
1321
+ c (BN × Rn, RM). Then for every v ∈ H1
1322
+ 0(BN × Rn, RM) ∩ L∞(Ω; RM), there
1323
+ exists a sequence ˜vk ∈ C∞
1324
+ c (BN × Rn, RM) such that ˜vk → v and ∇˜vk → ∇v in L2 and
1325
+ a.e. in BN × Rn and |˜vk| ≤ ∥v∥L∞(Ω) + 1 in Ω for every k. In particular, by dominated
1326
+ convergence theorem, we have Q(˜vk) → Q(v) thanks to (40). Thus, we deduce that for
1327
+ every compact ω ⊂ ˜Ω = BN × (−1, 2)n,
1328
+ Q(v) = lim
1329
+ k→∞ Q(˜vk) ≥ lim inf
1330
+ k→∞
1331
+
1332
+ ω∩Ω
1333
+ φ2|∇
1334
+ �˜vk
1335
+ φ
1336
+
1337
+ |2 dx ≥
1338
+
1339
+ ω∩Ω
1340
+ φ2|∇
1341
+ � v
1342
+ φ
1343
+
1344
+ |2 dx ≥ 0,
1345
+ where we used Fatou’s lemma. In particular, w is a minimizing SM−1-valued harmonic
1346
+ map by (40) and Q(v) = 0 yields the existence of a vector λ ∈ RM such that v = λφ a.e.
1347
+ in Ω. Then the classification of the minimizing SM−1-valued harmonic maps follows by
1348
+ (39) as in the Step 3 of the proof of Theorem 7.
1349
+ Proof of Theorem 11. 1. This part concerning the dimension 2 ≤ N ≤ 6 follows from
1350
+ Theorem 12 and the instability of the radial profile (1, 0) for I in B∩
1351
+
1352
+ (f, g) : f 2+g2 = 1
1353
+
1354
+ as explained above.
1355
+ 9Note that the functional Q represents the second variation of E at w, but here the map v is not
1356
+ necessarily orthogonal to w.
1357
+ 20
1358
+
1359
+ 2. This part for dimension N ≥ 7 follows the ideas in [14]. More precisely, calling
1360
+ X = (x, z) the variable in Ω, we have as in the proof of Theorem 12 for every v ∈
1361
+ H1
1362
+ 0(BN × Rn, RM) with |v + ¯u| = 1 in Ω:
1363
+
1364
+
1365
+ |∇(¯u + v)|2 dX−
1366
+
1367
+
1368
+ |∇¯u|2 dX =
1369
+
1370
+
1371
+
1372
+ |∇v|2 − |∇¯u|2|v|2�
1373
+ dX
1374
+ =
1375
+
1376
+
1377
+ |∇zv|2 dX +
1378
+
1379
+ (0,1)n dz
1380
+
1381
+ BN
1382
+
1383
+ |∇xv|2 − N − 1
1384
+ |x|2 |v|2�
1385
+ dx
1386
+
1387
+
1388
+
1389
+ |∇zv|2 dX +
1390
+ �(N − 2)2
1391
+ 4
1392
+ − (N − 1)
1393
+ � �
1394
+
1395
+ |v|2
1396
+ |x|2 dX ≥ 0
1397
+ where we used the Hardy inequality for v(·, z) ∈ H1
1398
+ 0(BN, RM) for a.e. z ∈ (0, 1)n. This
1399
+ proves that ¯u is the unique minimizing SM−1-valued harmonic map in A .
1400
+ Combined
1401
+ with Theorem 12, we conclude that there is no escaping SM−1-valued harmonic map w in
1402
+ A .
1403
+ References
1404
+ [1] F. Bethuel, H. Brezis and F. H´elein, Ginzburg-Landau vortices, Progress in Nonlinear
1405
+ Differential Equations and their Applications, 13. Birkh¨auser Boston Inc., Boston,
1406
+ MA, 1994.
1407
+ [2] F. Bethuel, H. Brezis, B.D. Coleman and F. H´elein, Bifurcation analysis of minimizing
1408
+ harmonic maps describing the equilibrium of nematic phases between cylinders, Arch.
1409
+ Rational Mech. Anal. 118 (1992), 149-168.
1410
+ [3] H. Brezis, Symmetry in nonlinear PDEs, In Differential equations: La Pietra 1996
1411
+ (Florence), vol. 65 of Proc. Sympos. Pure Math. Amer. Math. Soc., Providence, RI,
1412
+ 1999, pp. 1-12.
1413
+ [4] H. Brezis, J.-M. Coron and E. H. Lieb, Harmonic maps with defects, Comm. Math.
1414
+ Phys. 107, 4 (1986), 649-705.
1415
+ [5] J. D´avila, M. del Pino, M. Medina and R. Rodiac, Interacting helical vortex filaments
1416
+ in the three-dimensional Ginzburg-Landau equation, J. Eur. Math. Soc. (JEMS) 24
1417
+ (2022), 4143-4199.
1418
+ [6] D. Gilbarg and N. Trudinger, Elliptic partial differential equations of second order,
1419
+ 2nd ed, Springer, Berlin Heidelberg, 2001.
1420
+ [7] R.-M. Herv´e and M. Herv´e, Etude qualitative des solutions r´eelles d’une ´equation
1421
+ diff´erentielle li´ee `a l’´equation de Ginzburg-Landau, Ann. Inst. H. Poincar´e Anal. Non
1422
+ Lin´eaire 11 (1994), pp. 427-440.
1423
+ [8] R. Ignat and L. Nguyen, Local minimality of RN-valued and SN-valued Ginzburg-
1424
+ Landau vortex solutions in the unit ball BN, arXiv:2111.07669, accepted in Ann.
1425
+ Inst. H. Poincar´e Anal. Non Lin´eaire, 2023.
1426
+ 21
1427
+
1428
+ [9] R. Ignat, L. Nguyen, V. Slastikov and A. Zarnescu, Uniqueness results for an ODE
1429
+ related to a generalized Ginzburg-Landau model for liquid crystals, SIAM J. Math.
1430
+ Anal. 46 (2014), pp. 3390-3425.
1431
+ [10] R. Ignat, L. Nguyen, V. Slastikov and A. Zarnescu, Stability of the melting hedgehog
1432
+ in the Landau-de Gennes theory of nematic liquid crystals, Arch. Ration. Mech. Anal.
1433
+ 215, 2 (2015), 633-673.
1434
+ [11] R. Ignat, L. Nguyen, V. Slastikov and A. Zarnescu, Uniqueness of degree-one
1435
+ Ginzburg-Landau vortex in the unit ball in dimensions N ≥ 7, C. R. Math. Acad.
1436
+ Sci. Paris 356 (2018), 922-926.
1437
+ [12] R. Ignat, L. Nguyen, V. Slastikov and A. Zarnescu, On the uniqueness of minimisers
1438
+ of Ginzburg-Landau functionals, Ann. Sci. ´Ec. Norm. Sup´er. 53 (2020), 589-613.
1439
+ [13] W. Jager and H. Kaul, Uniqueness and stability of harmonic maps and their Jacobi
1440
+ fields, Manuscripta Math. 28, 1-3 (1979), 269-291.
1441
+ [14] W. Jager and H. Kaul, Rotationally symmetric harmonic maps from a ball into a
1442
+ sphere and the regularity problem for weak solutions of elliptic systems, J. Reine
1443
+ Angew. Math. 343 (1983), 146-161.
1444
+ [15] F.-H., Lin, A remark on the map x/|x|, C. R. Acad. Sci. Paris S´er. I Math. 305, 12
1445
+ (1987), 529-531.
1446
+ [16] F.-H. Lin and C.Y. Wang, Stable stationary harmonic maps to spheres, Acta Math.
1447
+ Sin. (Engl. Ser.) 22 (2006), 319-330.
1448
+ [17] V. Millot and A. Pisante, Symmetry of local minimizers for the three-dimensional
1449
+ Ginzburg-Landau functional, J. Eur. Math. Soc. (JEMS) 12, 5 (2010), 1069-1096.
1450
+ [18] P. Mironescu, On the stability of radial solutions of the Ginzburg-Landau equation, J.
1451
+ Funct. Anal. 130 (1995), 334-344.
1452
+ [19] P. Mironescu, Les minimiseurs locaux pour l’´equation de Ginzburg-Landau sont `a
1453
+ sym´etrie radiale, C. R. Acad. Sci. Paris S´er. I Math. 323, 6 (1996), 593-598.
1454
+ [20] F. Pacard and T. Rivi`ere, Linear and nonlinear aspects of vortices, vol. 39 of Progress
1455
+ in Nonlinear Differential Equations and their Applications. Birkh¨auser Boston, Inc.,
1456
+ Boston, MA, 2000. The Ginzburg-Landau model.
1457
+ [21] A. Pisante, Two results on the equivariant Ginzburg-Landau vortex in arbitrary di-
1458
+ mension, J. Funct. Anal. 260, 3 (2011), 892-905.
1459
+ [22] E. Sandier, Ginzburg-Landau minimizers from Rn+1 to Rn and minimal connections,
1460
+ Indiana Univ. Math. J. 50 (2001), 1807-1844.
1461
+ [23] E. Sandier and I. Shafrir, On the uniqueness of minimizing harmonic maps to a closed
1462
+ hemisphere, Calc. Var. Partial Differential Equations 2, 1 (1994), 113-122.
1463
+ 22
1464
+
1465
+ [24] E. Sandier and I. Shafrir, Small energy Ginzburg-Landau minimizers in R3, J. Funct.
1466
+ Anal. 272, 9 (2017), 3946-3964.
1467
+ [25] R. Schoen and K. Uhlenbeck, Boundary regularity and the Dirichlet problem for har-
1468
+ monic maps, J. Differential Geom. 18, 2 (1983), 253-268.
1469
+ [26] R. Schoen and K. Uhlenbeck, A regularity theory for harmonic maps, J. Differential
1470
+ Geometry 17 (1982), 307-335.
1471
+ 23
1472
+
E9FJT4oBgHgl3EQfCizA/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
ENE0T4oBgHgl3EQfgwE0/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1dfe34debd29fde60b0b80cc10e4ed8daab63ca564b150c29a2ad7b5b57c228
3
+ size 82516
ENE2T4oBgHgl3EQfSgdx/content/tmp_files/2301.03793v1.pdf.txt ADDED
@@ -0,0 +1,715 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ESTIMATION OF USER’S WORLD MODEL USING GRAPH2VEC ∗
2
+ Tatsuya Sakai,
3
+ Takayuki Nagai
4
+ Graduate School of Engineering Science, Osaka University
5
+ 1-3, Machikaneyama, Toyonaka, Osaka, Japan
6
7
+ ABSTRACT
8
+ To obtain advanced interaction between autonomous robots and users, robots should be able to
9
+ distinguish their state space representations (i.e., world models). Herein, a novel method was
10
+ proposed for estimating the user’s world model based on queries. In this method, the agent learns
11
+ the distributed representation of world models using graph2vec and generates concept activation
12
+ vectors that represent the meaning of queries in the latent space. Experimental results revealed that
13
+ the proposed method can estimate the user’s world model more efficiently than the simple method of
14
+ using the “AND” search of queries.
15
+ Keywords Autonomous robot · Explainability · Representation learning · User’s world model
16
+ 1
17
+ Introduction
18
+ Autonomous robots are increasingly being used in numerous applications. Currently, they assist humans in performing
19
+ tasks by executing commands. For autonomous robots performing sophisticated decisions, the blind execution of
20
+ commands is not always the best strategy. Moreover, in many situations, fully executing commands is difficult. These
21
+ autonomous robots should be able to explain the reasons for their decisions to gain user trust. Explainable autonomous
22
+ robots (XAR) are defined as robots that have these explanatory capabilities. The following four requirements have been
23
+ identified for the XARs [1]:
24
+ (1) Owning an interpretable decision-making space
25
+ (2) Estimation of the model of others
26
+ (3) Estimation of the information required for a user to estimate the policy of the robot
27
+ (4) Presentation to the user of explanation
28
+ This explanation mechanism is a mutual process between the robot and the user and is displayed in Fig. 1. The world
29
+ model refers to the correspondence between actions and state changes, that is, the internal model [2] that represents the
30
+ dynamics of the environment, and we do not distinguish between the world model and the environment in this paper.
31
+ “Policy sharing” in Fig.1 is a spontaneous presentation of information of a policy (e.g., presentation of a sequence of
32
+ actions to be taken), and an explanation is generated when a query to this information presentation is requested by
33
+ others.
34
+ Among the four requirements, the estimation of the other’s world model is particularly crucial for providing a user-
35
+ specific explanation. In the context of human–robot interaction, the importance of estimating the user’s internal state
36
+ has already been recognized. Gao et al. [3] and Clair et al. [4] proposed a framework for estimating plausible action
37
+ strategies based on the actions and interaction history of users. Huang et al. [5] and Lage et al. [6] focused on restoring
38
+ explanations to policies and advocated the importance of appropriately estimating the restoration algorithm of users
39
+ requesting explanation. These studies have focused on internal states, particularly policies, and planning algorithms.
40
+ ∗This paper is an extended (English translated) version of “T.Sakai, T.Horii, T.Nagai, Representation Learning of World Models
41
+ and Estimation of World Model of Others Using Graph2vec, Journal of RSJ, 40, 2, pp.166-169, 2022,” (in Japanese) ISSN 1884-7145,
42
+ Print ISSN 0289-1824, https://doi.org/10.7210/jrsj.40.166
43
+ arXiv:2301.03793v1 [cs.RO] 10 Jan 2023
44
+
45
+ Preprint
46
+ Figure 1: Explanation process as communication. To clarify elements of the explanation, observations/actions, and
47
+ interactions between world models of others are segregated in the figure.
48
+ Figure 2: Simplified explanation process considered in this paper. We only consider unidirectional explanation process
49
+ from robot to user.
50
+ However, real-world robots are designed to exhibit desired behavior in terms of algorithms and policies, and they
51
+ share the same final objective with their users. In these situations, the results of action decisions typically stem from
52
+ discrepancies in environmental awareness.
53
+ In this study, a novel method was proposed for estimating the user’s world model based on the robot’s world model
54
+ and the questions (queries) posed by the user. The proposed method can identify differences in the world models and
55
+ generate an explanation that resolves the discrepancy between the perception of the environment by the robot and the
56
+ user. In this study, we simplified the mechanism of Fig.1 as in Fig.2 2 and focused on estimating the world model of the
57
+ explained person as the user’s world model.
58
+ 2
59
+ Proposed method
60
+ In the proposed method, the world model of the user is estimated by the following procedure (Fig. 3).
61
+ (1) Acquisition of a distributed representation of the world model: A distributed representation of each world
62
+ model is obtained using a graph-structured world model.
63
+ (2) Acquisition of the query vector: Based on the query given by the user, the system acquires a direction vector
64
+ that represents the meaning of the query in the representation space of the world model.
65
+ (3) Estimation of user’s world model: The distributed representations of the world model and query vector are
66
+ used to estimate the user’s world model based on cosine similarity.
67
+ 2When the model of others outputs an explanation, the content of the explanation is determined by considering information
68
+ received from the world model.
69
+ 2
70
+
71
+ Preprint
72
+ Why donʼt you do
73
+ Agentʼs
74
+ 1
75
+ 2
76
+ 3
77
+ 4
78
+ 5
79
+ 6
80
+ 7
81
+ 8
82
+ 9
83
+ Userʼs
84
+ Figure 3: Schematic of our proposed method.
85
+
86
+
87
+
88
+ =
89
+ 1
90
+ 2
91
+
92
+ =
93
+ 1
94
+ 2
95
+
96
+ Figure 4: Learning of a distributed representation of the world model. The experience on the environment is used to
97
+ obtain graph-based world models. Then, after converting to WL labels, a distributed representation of each environment
98
+ is obtained.
99
+ The robot and user are assumed to share the same state space and measures; only the connection relation between states
100
+ is assumed to be unshared. As a distributed representation of the world model, the parameters of a model representing a
101
+ continuous state space, for example, [2], could be used. However, presenting the differences of the world model to the
102
+ user requires the discretization of the state transition structure; the parameter space of the model does not necessarily
103
+ represent the similarity of the state transition structure of the world model. Therefore, the world model is considered to
104
+ be a discretized graph for obtaining a distributed representation. The generation of explanations is outside the scope of
105
+ this study.
106
+ 2.1
107
+ Acquisition of a distributed representation of the world model
108
+ The learning process of a distributed representation of the world model is shown in Fig. 4. The robot learns a
109
+ representation space representing the similarity of environments based on its experiences. First, the robot acquires an
110
+ undirected graph representing the state transitions of each environment; simultaneously, it acquires policies through
111
+ reinforcement learning. Specifically, the robot assumes that the states whose transitions were observed during the policy
112
+ learning search are adjacent to each other and adds edges3.
113
+ After acquiring the undirected graphs of all environments, graph2vec is applied to acquire the distributed representation
114
+ of the graph of each environment [8]. Graph2vec is a method in which doc2vec [9] is used to acquire distributed
115
+ representations of graphs, and instead of predicting the occurrence of words, the occurrence of labels that represent
116
+ each subgraph is presented. Labels representing subgraphs are obtained using the Weisfeiler–Lehman (WL) relabeling
117
+ process [10]. In this process, the label of the next layer is determined by considering the labels of neighboring nodes.
118
+ Higher layer labels represent more global information regarding the graph.
119
+ 3This method is assumed to be used in a discrete state space; discretization of the state space is required for application to a real
120
+ robot. However, this measure is beyond the scope of this study. For discretization, the method proposed in [7], wherein the policy
121
+ implications of each state is considered, can be used.
122
+ 3
123
+
124
+ ------Preprint
125
+ Graph2vec allows graphs in which the same subgraphs occur, that is, graphs in environments with similar state transition
126
+ structures, to be embedded closer together in the representation space. Efficient search based on user queries becomes
127
+ possible by acquiring the representation space of the world model in advance.
128
+ When acquiring a world model, we explicitly provide an environment label to indicate the environment wherein the
129
+ experience occurs. If no environment label is given, a world model is obtained using temporally continuous experiences.
130
+ Furthermore, we consider that models with similar distributed representations represent the same environment and
131
+ merging multiple models may be effective in obtaining a world model with higher accuracy.
132
+ 2.2
133
+ Acquisition of the query vector
134
+ Based on the query given by the user, the robot acquires a direction vector that represents the meaning of the query in
135
+ the representation space of the world model. Kim et al. [11] focused on the middle layer of the neural network and
136
+ generated concept vectors (CAV: concept activation vectors) by calculating the difference in latent representations when
137
+ features that satisfy a specific concept and features that do not satisfy the concept are input. In this study, this method
138
+ was applied to define a query vector vquery as Eq. (1) by considering the difference of distributed representations
139
+ between environments that satisfy the query and those that do not. The query assumes the form “action aquery should
140
+ be selected in state squery.”
141
+ vquery = vpos − vneg,
142
+ (1)
143
+ where
144
+ vpos =
145
+
146
+ i vi · P(aquery|vi, squery)
147
+
148
+ i P(aquery|vi, squery)
149
+ ,
150
+ vneg =
151
+
152
+ i vi · (1 − P(aquery|vi, squery))
153
+
154
+ i(1 − P(aquery|vi, squery))
155
+ .
156
+ (2)
157
+ Here, vi represents a distributed representation of the i-th environment. To correspond to a policy in which actions
158
+ are selected probabilistically, the probability value of selecting an action is used as the coefficient of each distributed
159
+ representation vi. If necessary, the coefficients can be expressed as the binary values of {0, 1}. Note that if the state
160
+ Squery is not included in the undirected graph of the environment i, the CAV is calculated by excluding vi4.
161
+ 2.3
162
+ Estimation of user’s world model
163
+ Using the distributed representation of the world model and the query vector, the user’s world model was estimated
164
+ using cosine similarity. The likelihood of an environment i as an estimated environment is expressed by Eq. (3).
165
+ S(vi, vobs, Vq) =
166
+
167
+ j
168
+ similarity(vj
169
+ query, vi − vobs) − λ · distance(vi, vobs),
170
+ (3)
171
+ where vobs is a distributed representation of the environment currently observed by the agent, Vq is any number of query
172
+ vectors, and vj
173
+ query ∈ Vq is the j-th query vector. Furthermore, similarity(a, b) and distance(a, b) are functions that
174
+ output the cosine similarity [−1, 1] and the distance between vectors a and b in the representation space, respectively.
175
+ When reasoning in a real environment, the robot and the user observe almost the same environment. Therefore, because
176
+ their world models are similar, the similarity between the direction of each environment and the direction of the query
177
+ vector, as seen from vobs, is used as the estimation criterion. The coefficient λ is a hyperparameter that determines
178
+ how much distance between vectors is considered and represents the strength of the assumption that the observed
179
+ environment of the robot and the user are similar.
180
+ Using the definitions, the world model of others to be estimated is expressed by Eq. (4).
181
+ Env_est = arg max
182
+ i
183
+ S(vi, vobs, Vq)
184
+ (4)
185
+ In this study, we assume that the importance of all queries are equivalent and designed the evaluation function S as the
186
+ sum of the similarities for each query vector vj
187
+ query. However, in the real world, the importance of each query may
188
+ differ, in which case the similarity should be multiplied by a coefficient ρj.
189
+ 4When estimating the user’s world model, the likelihood S(vi, vobs, Vq) is computed for all environments i, including those that
190
+ do not contain the state squery.
191
+ 4
192
+
193
+ Preprint
194
+ 2.3.1
195
+ User vectors
196
+ In addition to the query vector, the user vector that represents “what kind of environment with the distributed repre-
197
+ sentation the user is likely to retain as a world model” can also be defined. The user vector is a vector that represents
198
+ how the user tends to misunderstand the environment and plays a role in adding this tendency to the result of the user’s
199
+ world model estimation.
200
+ vuser = vu_pos − vu_neg,
201
+ (5)
202
+ where
203
+ vu_pos =
204
+
205
+ i vi · P(vi)
206
+
207
+ i P(vi)
208
+ ,
209
+ vu_neg =
210
+
211
+ i vi · (1 − P(vi))
212
+
213
+ i(1 − P(vi))
214
+ .
215
+ (6)
216
+ P(vi) is the probability that the user estimates the environment corresponding to the distributed representation vi as
217
+ the current world model when no information about the current environment is given to the user. In this paper, P(vi) is
218
+ assumed to be known, and the estimation of P(vi) is outside the scope of this research.
219
+ 2.4
220
+ Explanation by language
221
+ Using a pre-trained language vector Eq. (7) in the representation space, the relationship between the world model
222
+ maintained by the agent and the user’s world model can also be explained by language.
223
+ vword(x) = average(vm − vn),
224
+ (7)
225
+ where vm and vn are distributed representations of environment pairs whose relation is represented by the language
226
+ x. By averaging the differences of distributed representations of environment pairs vm and vn that satisfy a specific
227
+ language description x, we can obtain a semantic vector represented by the language description x in the representation
228
+ space.
229
+ Using the language vector vword, the language describing the relationship between the world model maintained by the
230
+ agent and the user’s world model is represented by Eq. (8).
231
+ Explanation = arg max
232
+ x
233
+ similarity(vword(x), vEnv_est − vobs)).
234
+ (8)
235
+ By means of Eq. (8), the language x that represents the closest relationship between the current world models is selected
236
+ as the explanation.
237
+ 3
238
+ Experiment
239
+ The proposed method was applied to an agent that acquires action strategies by proximal policy optimization (PPO) in a
240
+ simulation environment, and its usefulness was evaluated. A partially modified version of the grid environment [12]
241
+ with multiple objects was used for the experiments (Fig. 5). In this environment, the agent (triangle) obtains a reward by
242
+ taking a key, opening a door, and reaching a goal in the lower right corner. The position of the goal remains unchanged,
243
+ but the positions of the key and the door change every trial. The agent has five actions, namely go straight, turn left,
244
+ turn right, take the key from the grid in front of it, and open the door. The agent observes the absolute position of the
245
+ key (x, y coordinates), the absolute position of the door (x, y coordinates), its own absolute position (x, y coordinates)
246
+ and orientation, holding/not holding the key, and opening/closing the door, for a total of nine dimensions.
247
+ 3.1
248
+ Experiment 1: Acquisition of a distributed representation of the world model
249
+ A graph representing the state transitions of each environment was obtained simultaneously with the learning of policies
250
+ by PPO, and graph2vec was used to obtain a 16-dimensional distributed representation. An example of the acquired
251
+ graph is shown in Fig.6. There are three edges that transition from the group of states before key acquisition to the
252
+ group of states after key acquisition because keys can be acquired from three directions. On the other hand, there is
253
+ only one edge that transitions from the group of states where the door is not open to the group of states where the door
254
+ is open, because the door can be opened from only one state.
255
+ 5
256
+
257
+ Preprint
258
+ Why donʼt you pick up
259
+ Figure 5: Estimation results of the user’s world model.
260
+ Table 1: Cumulative frequency and average value of the order in which the optimal environment appears. The number
261
+ of trials is 100 for each method.
262
+ Method
263
+ Order
264
+ 1
265
+ 2
266
+ 3
267
+ Order Average
268
+ Our method
269
+ 35
270
+ 49
271
+ 60
272
+ 7.1
273
+ Random
274
+ 6
275
+ 8
276
+ 11
277
+ 23.1
278
+ The representation space was compressed to eight dimensions using independent component analysis, and the visualized
279
+ results are displayed in Fig. 7. In this experiment, the absolute positions of the key and door were used as environment
280
+ labels, and the five-dimensional observations excluding them were used as node features. The experimental results
281
+ revealed that clusters are formed in the representation space based on the absolute positions of the key and door. In
282
+ particular, clusters related to the position of the door are apparent, which can be attributed to the fact that the surrounding
283
+ state transition relationship changes considerably compared with that of the key. In this experiment, the absolute
284
+ positions of keys and doors are used only for identifying the environmental graph to be updated and are not embedded
285
+ in the graph itself. Therefore, graph2vec created a representation space that appropriately reflects the differences in the
286
+ location information of keys and doors expressed in the state transition structure.
287
+ 3.2
288
+ Experiment 2: Estimation of the user’s world model
289
+ The query vector was applied to the obtained representation space to estimate the world model of others. In this
290
+ verification, we assume that questions were asked in the situations of acquiring a key and opening a door, and the query
291
+ “In the state Squery, we should {take the key/open the door}” was considered. Figure 5 displays the results of estimating
292
+ the others’ world model given the reference world model and query. The environment that satisfies the query has the
293
+ highest evaluation value, and the environment in which the key is located on the opposite side of the grid environment
294
+ has the lowest evaluation value. This result suggests that the obtained query vector is appropriate for estimating the
295
+ world model.
296
+ The optimal environment is defined as the agent’s world model with minimal modifications for satisfying the query. For
297
+ example, given the query “In state Squery, the agent should take the key,” the optimal environment is the environment
298
+ in which only the position of the key is changed to satisfy the query, whereas the position of the door is left unchanged.
299
+ For each randomly selected agent world model/query pair, the evaluation values for each environment obtained using
300
+ Eq. (3) were sorted in descending order to obtain the order of appearance of the optimal environment (Table 1). The
301
+ order of appearance of the environments that satisfy the query when sorted randomly is displayed for comparison. The
302
+ experimental results confirmed that the proposed method ranks the optimal environments higher.
303
+ 6
304
+
305
+ FoFo
306
+ --
307
+ ooPreprint
308
+ Figure 6: An example graph of the acquired world model. The blue nodes represent the state before the key acquisition.
309
+ The orange nodes represent the state after the key acquisition when the door is not open, and the green nodes represent
310
+ the state when the door is open.
311
+ Although the direct manipulation of the positions of keys and doors can change the order of the appearance of the
312
+ optimal environment to one, direct manipulation is not always possible in cases in which directly manipulatable
313
+ information is not given as a query. The proposed method estimates the optimal environment at an early stage, although
314
+ the state transition relations to be changed are not explicitly given. This property of the proposed method is crucial.
315
+ 3.3
316
+ Experiment 3: Validation with multiple queries
317
+ An explanatory agent A and an explained agent B are prepared, and the number of queries required for A to accurately
318
+ estimate B’s world model is evaluated. This experiment assumes a situation in which the user is asked to confirm the
319
+ correctness of the estimation results of the other’s world model through the presentation of an action sequence, which
320
+ improves the estimation accuracy (Fig. 8). The outline of the experiment is as follows:
321
+ (1) A and B share an initial state (absolute position and orientation of the agent, and the state of the key and door)
322
+ and an environment-policy pair that specifies the strategy to be used in specific environments. They have
323
+ arbitrary world models with different key and door positions.
324
+ (2) Here, A sets its own world model as the initial value of the other’s world model and presents the optimal
325
+ sequence of actions in that model in turn (policy sharing) 5.
326
+ (3) B adds the query “In state squery, action aquery should be chosen” when the given action differs from the
327
+ optimal action in its measure. The existing query is not deleted.
328
+ (4) A updates the other’s world model based on the query and presents the action sequences in the updated other’s
329
+ world model again in sequence with Squery as the initial state.
330
+ (5) Repeat (3) and (4) to evaluate the number of environment updates required for A to obtain B’s world model as
331
+ the other’s world model.
332
+ The environment selected once is not selected, and the second or subsequent candidate is adopted. If the same
333
+ environment has not been obtained after all the action sequences are presented, the environment is continuously updated
334
+ 5Policy sharing in this experiment (Fig. 8) is performed to confirm the estimation results of the other’s world model and differs
335
+ from the presentation of information about one’s own policy in Fig. 1 and Fig. 2.
336
+ 7
337
+
338
+ Preprint
339
+ Figure 7: Results of latent space visualization. Each data point is illustrated according to (a) X-coordinate of the key,
340
+ (b) Y-coordinate of the key, (c) X-coordinate of the door, and (d) Y-coordinate of the door.
341
+ Figure 8: Schematic of experiment 3. The agent transmits information on its policy to the user and updates the user’s
342
+ world model based on queries.
343
+ without increasing the number of queries. In this verification, the proposed method was compared with the “AND”
344
+ search of queries as a method of updating the world model of others. In practice, the following three methods are
345
+ compared.
346
+ Proposed method:
347
+ Select a plausible environment using Eq. (4).
348
+ AND search 1:
349
+ Randomly selects an environment from among the environments that satisfy the query.
350
+ AND search 2:
351
+ The environment is randomly selected with the constraint that “the optimal behavior for the policy B
352
+ is selected in all states from the initial state until reaching state squery”. Thus, it adds constraints and increases
353
+ the information provided compared with the two update methods described.
354
+ Experimental results revealed that the proposed method can estimate others’ world models with the fewest number
355
+ of updates (Table 2). The results of the corresponding two-tailed t-test revealed that t(100) = 8.07 and p < .01 for
356
+ the proposed method and AND search 1, and t(100) = 4.59 and p < .01 for the proposed method and AND search 2.
357
+ Thus, both significant differences were confirmed.
358
+ 8
359
+
360
+ V
361
+ V
362
+ M
363
+ V
364
+ V
365
+ V
366
+ V
367
+ V
368
+
369
+ VI
370
+ V
371
+ V
372
+ V
373
+ V
374
+ V
375
+ V
376
+ V
377
+ V
378
+ V
379
+ V
380
+
381
+ W
382
+ V
383
+ V
384
+ V
385
+ W
386
+
387
+ V
388
+
389
+ V
390
+ V
391
+ V
392
+ V
393
+ V
394
+ V
395
+ V
396
+ V
397
+ V
398
+ V
399
+ V
400
+
401
+ 1
402
+
403
+ I
404
+ I
405
+ V
406
+ △△
407
+
408
+ V
409
+
410
+
411
+
412
+ I
413
+
414
+
415
+ I
416
+ 1
417
+
418
+ 1
419
+ 口口
420
+
421
+ 口口
422
+
423
+
424
+ V
425
+ V
426
+ V
427
+ V
428
+ V
429
+ V
430
+ V
431
+ V
432
+ VV
433
+ V口
434
+ 0VX
435
+ W
436
+ V
437
+ V
438
+
439
+
440
+
441
+
442
+
443
+
444
+
445
+
446
+ 口Preprint
447
+ Table 2: Number of updates required to estimate the user’s world model.
448
+ Method
449
+ Number of updates
450
+ Standard deviation
451
+ Our method
452
+ 5.53
453
+ 4.83
454
+ AND search 1
455
+ 20.72
456
+ 17.43
457
+ AND search 2
458
+ 8.69
459
+ 4.71
460
+ Table 3: Comparison with the use of probabilistic evaluation.
461
+ Method
462
+ Nuber of updates
463
+ Standard deviation
464
+ Our method (λ = 0.05)
465
+ 3.93
466
+ 5.38
467
+ Our method (λ = 0)
468
+ 5.75
469
+ 7.11
470
+ Probabilistic value (λ = 0.05)
471
+ 7.13
472
+ 7.79
473
+ The most common information given directly is “AND search2”. By contrast, the proposed method can reduce the
474
+ number of updates by vectorizing queries and utilizing prior knowledge embedded in the representation space as
475
+ additional information.
476
+ 3.4
477
+ Experiment 4: Comparison with the use of probabilistic evaluation
478
+ We compare the proposed method with the case where the probability value of each environment satisfying the query is
479
+ used as the evaluation function instead of CAV. The proposed method uses Eq. (3) as the evaluation function, while the
480
+ comparison method uses Eq. (9).
481
+ S(vi, vobs, Vq) =
482
+
483
+ j
484
+ P(aquery|vi, squery) − λ · distance(vi, vobs).
485
+ (9)
486
+ The procedure for this experiment is the same as in experiment 3; however, the initial world model is not completely
487
+ random, and the coordinates of either the key or the door are assumed to be identical. This condition replicates the
488
+ assumption that the agent’s world model and the user’s world model are similar6.
489
+ Experimental results showed that the proposed method, which takes into account the distance in the representation
490
+ space (λ = 0.05), was able to estimate the user’s world model with the fewest number of updates (Table3). The results
491
+ of the corresponding two-tailed t test showed that the proposed method with λ = 0.05 compared to λ = 0 showed
492
+ t(99) = 4.59 and p < .005, while the proposed method with λ = 0.05 compared to the method using probability values
493
+ showed t(99) = 4.44 and p < .005, both of which are significantly different from each other7.
494
+ The proposed method, which takes into account the distance in the representation space, was able to estimate the
495
+ environment with a significantly smaller number of updates than the method using the same coordinates for either the
496
+ key or the door. Compared to the method using probability value as the evaluation function, the proposed method
497
+ was able to absorb small errors in probability values, resulting in a significantly smaller number of updates. When the
498
+ distance in the representation space corresponds to the similarity of the state transition structure between environments,
499
+ as in the present verification, it is effective to use CAV to obtain environments that are perpendicular to the virtual
500
+ separation boundary as the user’s world models.
501
+ 3.5
502
+ Experiment 5: Number of samples and accuracy of CAV
503
+ We evaluate the number of queries required to correctly estimate the world model when the number of samples (number
504
+ of environments) used to compute the CAV is reduced. The evaluation procedure is the same as in Experiment 4. In this
505
+ experiment, the maximum number of samples is 300 because 300 environments are embedded in the representation
506
+ space. We also set λ = 0.05.
507
+ 6Without this assumption, the world model with minimal modification to satisfy the query (the optimal environment) is not
508
+ necessarily the user’s world model. However, theoretically, when the evaluation value is calculated using Eq. (3), other environments
509
+ that satisfy the query will have a lower evaluation value compared to the optimal environment. Therefore, if the assumption that the
510
+ world models of the agent and user are similar cannot be made, it is desirable to use Eq. (9). However, in a real environment, the
511
+ world models of the agent and user are not completely independent, and similarity can be assumed.
512
+ 7Because the t test was applied twice in this verification, a significant difference was found at the significance level α = 0.01
513
+ based on the Bonferroni method.
514
+ 9
515
+
516
+ Preprint
517
+ Table 4: Relationship between the number of CAV samples and the order of appearance of the optimal environment.
518
+ Each value represents the cumulative frequency, and the number of trials is 100 for each.
519
+ Nuber of samples
520
+ Order of appearance
521
+ 1
522
+ 2
523
+ 3
524
+ 300
525
+ 40
526
+ 62
527
+ 69
528
+ 250
529
+ 44
530
+ 61
531
+ 64
532
+ 200
533
+ 42
534
+ 51
535
+ 62
536
+ 150
537
+ 40
538
+ 51
539
+ 55
540
+ 100
541
+ 35
542
+ 46
543
+ 54
544
+ 50
545
+ 9
546
+ 18
547
+ 26
548
+ Prior distribution 1
549
+ Prior distribution 2
550
+ P(door_y = 1) = 0.4
551
+ P(door_y = 2) = 0.3
552
+ P(door_y = 3) = 0.2
553
+ P(door_y = 4) = 0.1
554
+ P(door_y = 5) = 0.0
555
+ P(door_y = 6) = 0.0
556
+ P(door_y = 1) = 0.0
557
+ P(door_y = 2) = 0.1
558
+ P(door_y = 3) = 0.4
559
+ P(door_y = 4) = 0.4
560
+ P(door_y = 5) = 0.1
561
+ P(door_y = 6) = 0.0
562
+ Figure 9: The prior distribution used in experiment 6. The prior distribution for the y-coordinate of the door (in the
563
+ vertical direction) is defined.
564
+ The experimental results show that the accuracy deteriorates much more slowly up to 100 samples than when the CAV
565
+ is generated with 300 samples (Table 4). This suggests that a specific level of estimation accuracy can be maintained
566
+ even when the number of samples is reduced. The fact that the user’s world model is estimated taking into account
567
+ the distance in the representation space may also contribute to maintaining accuracy. On the other hand, the accuracy
568
+ dropped drastically when the number of samples was 50. This may be because of an increase in the number of trials in
569
+ which the number of positive data (the number of data satisfying the query) in the sample is very small.
570
+ 3.6
571
+ Experiment 6: Use of the user vector
572
+ We test whether the estimation accuracy of the other-world model can be improved by using the user vector defined
573
+ by Eq. (5), as opposed to using only CAV. In this verification, two types of prior distributions are defined for the
574
+ y-coordinate of the door (Fig. 9), and the user vector is calculated for each of them. The user vector is treated as one
575
+ of the query vectors in the evaluation value calculation for each environment. Note that λ = 0 is assumed in this
576
+ verification because there is no assumption that the world models held by agents A and B are similar8.
577
+ The estimation results of the other-world model for the same reference environment and query are shown in Fig. 10.
578
+ User vectors 1 and 2 correspond to prior distributions 1 and 2, respectively. It can be seen that while the results of the
579
+ inference of the door location are unstable when only the query vector is applied, the results of the inference using the
580
+ other vectors show that the door coordinates are concentrated in locations that have high probability values in the prior
581
+ distribution.
582
+ The same validation as in experiment 3 was conducted by applying the prior distribution shown in Fig. 9, and the
583
+ number of queries required for both distributions 1 and 2 was lower when using the user vector (Tab. 5). The results
584
+ 8If this experiment is conducted under the assumption that the world models of agents A and B are similar, it is necessary to set
585
+ the number of objects that determine the state transition structure of the environment to three or more and that they are placed at the
586
+ same coordinates as the current environment. Under these conditions, it is desirable to set λ = 0.05.
587
+ 10
588
+
589
+ Fo
590
+ -Preprint
591
+ Why don’t you
592
+ pick up the key
593
+ at the state?
594
+ Base environment
595
+ Query vector
596
+ Query vector
597
+ +User vector 1
598
+ Query vector
599
+ +User vector 2
600
+ Highest
601
+ Score
602
+ Lower
603
+ Figure 10: User vector and environments.
604
+ Table 5: Change in estimation accuracy when using user vectors. The number of trials is 100 for each.
605
+ Distribution 1
606
+ Distribution 2
607
+ Method
608
+ Number of updates
609
+ Standard deviation
610
+ Number of updates
611
+ Standard deviation
612
+ Query + User
613
+ 5.31
614
+ 4.59
615
+ 4.69
616
+ 3.34
617
+ Query
618
+ 6.39
619
+ 7.13
620
+ 5.11
621
+ 3.46
622
+ of the corresponding two-tailed t test showed that t(99) = 2.34 and p < .05 for distribution 1 and t(99) = 1.45 and
623
+ p = 0.15 for distribution 2. These results suggest that the number of queries required for estimation can be reduced by
624
+ using the user vector, although the size of the effect depends on the shape of the prior distribution.
625
+ 3.7
626
+ Experiment 7: Explanation by language
627
+ We test whether the language vectors learned in advance can be used to correctly output language that describes the
628
+ relationship between the agent’s world model and user’s world model. The language vectors used in this study are eight
629
+ different explanations, such as “In the world model assumed by the user, {key, door} is located at {upper, lower, right,
630
+ left} than in the world model maintained by the agent.” In the experiment, language explanations were first given to n
631
+ pairs of world models with different coordinates of keys or doors, and the language vectors were obtained using Eq. (7).
632
+ We then generated linguistic explanations for randomly selected pairs of world models using the same conditions as
633
+ those used to generate the language vectors and evaluated the percentage of the explanations that correctly explained
634
+ the relationships between the world models (i.e., the percentage of correct responses).
635
+ The percentage of correct responses after 100 trials is shown in Table 6. Note that if more than one linguistic explanation
636
+ correctly represented the relationship between the world models, both were considered as correct answers. For example,
637
+ if the key is located on the upper right, “the key is on the right” and “the key is on the top” are treated as correct
638
+ answers. The “1st” in the table indicates the percentage of correct explanations generated by Eq. (8). The “1st and 2nd”
639
+ represents the percentage of correctness of the two languages that were the first and second most similar. Note that “1st
640
+ and 2nd” was evaluated only in cases where more than one language explanation was considered to be a correct answer.
641
+ Although the number of world model pairs considered in this experiment is approximately 9000, the experimental
642
+ results show that language explanation generation is possible with high accuracy even when the number of data used
643
+ for language vector acquisition is n = 1000. The accuracy was also maintained even when the number of data was
644
+ extremely reduced to n = 100 and n = 50, suggesting that the learning in the representation space is effective. In this
645
+ experiment, only eight language vectors were set, but it is expected that more accurate explanation generation will
646
+ become possible by setting more detailed language vectors. However, it should be noted that in these cases, sufficient
647
+ teacher data is required.
648
+ 11
649
+
650
+ -------Fo
651
+ -Preprint
652
+ Table 6: Accuracy of language description generation.
653
+ n
654
+ 1st
655
+ 1st and 2nd
656
+ 5000
657
+ 0.89
658
+ 0.67
659
+ 3000
660
+ 0.89
661
+ 0.73
662
+ 1000
663
+ 0.88
664
+ 0.68
665
+ 500
666
+ 0.84
667
+ 0.63
668
+ 300
669
+ 0.80
670
+ 0.57
671
+ 100
672
+ 0.69
673
+ 0.54
674
+ 50
675
+ 0.60
676
+ 0.37
677
+ 4
678
+ Conclusion
679
+ In this study, a novel method was proposed for estimating the user’s world model from the robot’s world model and the
680
+ query given by the user to obtain XAR. The proposed method can estimate others’ world models more efficiently than
681
+ using the “AND” search of queries. In the future, user vectors should be introduced, and the methods for generating
682
+ explanations using differences in world models should be devised.
683
+ Acknowledgments
684
+ This study was supported by the New Energy and Industrial Technology Development Organization (NEDO).
685
+ References
686
+ [1] Sakai, Tatsuya and Nagai, Takayuki, "Explainable Autonomous Robots: A Survey and Perspective," Advanced
687
+ Robotics, 36(5-6), pp.219-238, 2022.
688
+ [2] Ha, David and Schmidhuber, Jürgen, "Recurrent World Models Facilitate Policy Evolution," In Advances in
689
+ Neural Information Processing Systems 31, pp.2450-2462, 2018.
690
+ [3] Xiaofeng Gao, Ran Gong, Yizhou Zhao, et al. "Joint Mind Modeling for Explanation Generation in Complex
691
+ Human-Robot Collaborative Tasks," In Proceedings of 29th IEEE International Conference on Robot and Human
692
+ Interactive Communication (RO-MAN), pp.1119-1126, 2020.
693
+ [4] A. S. Clair and M. Matari ´c, "How Robot Verbal Feedback Can Improve Team Performance in Human-Robot Task
694
+ Collaborations," In Proceedings of 10th ACM/IEEE International Conference on Human-Robot Interaction (HRI),
695
+ pp.213-220, 2015.
696
+ [5] Sandy H. Huang, David Held, Pieter Abbeel, et al. "Enabling Robots to Communicate Their Objectives," ArXiv,
697
+ abs/1702.03465, 2017.
698
+ [6] Isaac Lage, Daphna Lifschitz, Finale Doshi-Velez, et al. "Toward Robust Policy Summarization," In Proceedings of
699
+ the 18th International Conference on Autonomous Agents and MultiAgent Systems, AAMAS ’19, pp.2081-2083,
700
+ 2019.
701
+ [7] Lunjun Zhang, Gengcong Yang, and Bradly C. Stadie, "World Model as a Graph: Learning Latent Landmarks for
702
+ Planning," ArXiv, abs/2011.12491, 2020.
703
+ [8] A. Narayanan, Mahinthan Chandramohan, R. Venkatesan, et al. "graph2vec: Learning distributed representations
704
+ of graphs," ArXiv, abs/1707.05005, 2017.
705
+ [9] Quoc Le and Tomas Mikolov, "Distributed Representations of Sentences and Documents," In Proceedings of the
706
+ 31st International Conference on Machine Learning, volume 32 of Proceedings of Machine Learning Research,
707
+ pp.1188-1196, 2014.
708
+ [10] Nino Shervashidze, Pascal Schweitzer, Erik Jan van Leeuwen, et al. "Weisfeiler-Lehman Graph Kernels," Journal
709
+ of Machine Learning Research, 12(77), pp.2539–2561, 2011.
710
+ [11] Been Kim, M. Wattenberg, J. Gilmer, et al. "Interpretability Beyond Feature Attribution: Quantitative Testing
711
+ with Concept Activation Vectors (TCAV)," In ICML, 2018.
712
+ [12] Maxime Chevalier-Boisvert, Lucas Willems, and Suman Pal, "Minimalistic Gridworld Environment for OpenAI
713
+ Gym," 2018.
714
+ 12
715
+
ENE2T4oBgHgl3EQfSgdx/content/tmp_files/load_file.txt ADDED
@@ -0,0 +1,396 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf,len=395
2
+ page_content='ESTIMATION OF USER’S WORLD MODEL USING GRAPH2VEC ∗ Tatsuya Sakai, Takayuki Nagai Graduate School of Engineering Science, Osaka University 1-3, Machikaneyama, Toyonaka, Osaka, Japan nagai@sys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
3
+ page_content='es.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
4
+ page_content='osaka-u.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
5
+ page_content='ac.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
6
+ page_content='jp ABSTRACT To obtain advanced interaction between autonomous robots and users, robots should be able to distinguish their state space representations (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
7
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
8
+ page_content=', world models).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
9
+ page_content=' Herein, a novel method was proposed for estimating the user’s world model based on queries.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
10
+ page_content=' In this method, the agent learns the distributed representation of world models using graph2vec and generates concept activation vectors that represent the meaning of queries in the latent space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
11
+ page_content=' Experimental results revealed that the proposed method can estimate the user’s world model more efficiently than the simple method of using the “AND” search of queries.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
12
+ page_content=' Keywords Autonomous robot · Explainability · Representation learning · User’s world model 1 Introduction Autonomous robots are increasingly being used in numerous applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
13
+ page_content=' Currently, they assist humans in performing tasks by executing commands.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
14
+ page_content=' For autonomous robots performing sophisticated decisions, the blind execution of commands is not always the best strategy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
15
+ page_content=' Moreover, in many situations, fully executing commands is difficult.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
16
+ page_content=' These autonomous robots should be able to explain the reasons for their decisions to gain user trust.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
17
+ page_content=' Explainable autonomous robots (XAR) are defined as robots that have these explanatory capabilities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
18
+ page_content=' The following four requirements have been identified for the XARs [1]: (1) Owning an interpretable decision-making space (2) Estimation of the model of others (3) Estimation of the information required for a user to estimate the policy of the robot (4) Presentation to the user of explanation This explanation mechanism is a mutual process between the robot and the user and is displayed in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
19
+ page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
20
+ page_content=' The world model refers to the correspondence between actions and state changes, that is, the internal model [2] that represents the dynamics of the environment, and we do not distinguish between the world model and the environment in this paper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
21
+ page_content=' “Policy sharing” in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
22
+ page_content='1 is a spontaneous presentation of information of a policy (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
23
+ page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
24
+ page_content=', presentation of a sequence of actions to be taken), and an explanation is generated when a query to this information presentation is requested by others.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
25
+ page_content=' Among the four requirements, the estimation of the other’s world model is particularly crucial for providing a user- specific explanation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
26
+ page_content=' In the context of human–robot interaction, the importance of estimating the user’s internal state has already been recognized.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
27
+ page_content=' Gao et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
28
+ page_content=' [3] and Clair et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
29
+ page_content=' [4] proposed a framework for estimating plausible action strategies based on the actions and interaction history of users.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
30
+ page_content=' Huang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
31
+ page_content=' [5] and Lage et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
32
+ page_content=' [6] focused on restoring explanations to policies and advocated the importance of appropriately estimating the restoration algorithm of users requesting explanation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
33
+ page_content=' These studies have focused on internal states, particularly policies, and planning algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
34
+ page_content=' ∗This paper is an extended (English translated) version of “T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
35
+ page_content='Sakai, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
36
+ page_content='Horii, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
37
+ page_content='Nagai, Representation Learning of World Models and Estimation of World Model of Others Using Graph2vec, Journal of RSJ, 40, 2, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
38
+ page_content='166-169, 2022,” (in Japanese) ISSN 1884-7145, Print ISSN 0289-1824, https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
39
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
40
+ page_content='7210/jrsj.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
41
+ page_content='40.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
42
+ page_content='166 arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
43
+ page_content='03793v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
44
+ page_content='RO] 10 Jan 2023 Preprint Figure 1: Explanation process as communication.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
45
+ page_content=' To clarify elements of the explanation, observations/actions, and interactions between world models of others are segregated in the figure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
46
+ page_content=' Figure 2: Simplified explanation process considered in this paper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
47
+ page_content=' We only consider unidirectional explanation process from robot to user.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
48
+ page_content=' However, real-world robots are designed to exhibit desired behavior in terms of algorithms and policies, and they share the same final objective with their users.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
49
+ page_content=' In these situations, the results of action decisions typically stem from discrepancies in environmental awareness.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
50
+ page_content=' In this study, a novel method was proposed for estimating the user’s world model based on the robot’s world model and the questions (queries) posed by the user.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
51
+ page_content=' The proposed method can identify differences in the world models and generate an explanation that resolves the discrepancy between the perception of the environment by the robot and the user.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
52
+ page_content=' In this study, we simplified the mechanism of Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
53
+ page_content='1 as in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
54
+ page_content='2 2 and focused on estimating the world model of the explained person as the user’s world model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
55
+ page_content=' 2 Proposed method In the proposed method, the world model of the user is estimated by the following procedure (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
56
+ page_content=' 3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
57
+ page_content=' (1) Acquisition of a distributed representation of the world model: A distributed representation of each world model is obtained using a graph-structured world model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
58
+ page_content=' (2) Acquisition of the query vector: Based on the query given by the user, the system acquires a direction vector that represents the meaning of the query in the representation space of the world model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
59
+ page_content=' (3) Estimation of user’s world model: The distributed representations of the world model and query vector are used to estimate the user’s world model based on cosine similarity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
60
+ page_content=' 2When the model of others outputs an explanation, the content of the explanation is determined by considering information received from the world model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
61
+ page_content=' 2 Preprint Why donʼt you do Agentʼs 1 2 3 4 5 6 7 8 9 Userʼs Figure 3: Schematic of our proposed method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
62
+ page_content=' … … … = 1 2 ⋮ = 1 2 ⋮ Figure 4: Learning of a distributed representation of the world model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
63
+ page_content=' The experience on the environment is used to obtain graph-based world models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
64
+ page_content=' Then, after converting to WL labels, a distributed representation of each environment is obtained.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
65
+ page_content=' The robot and user are assumed to share the same state space and measures;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
66
+ page_content=' only the connection relation between states is assumed to be unshared.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
67
+ page_content=' As a distributed representation of the world model, the parameters of a model representing a continuous state space, for example, [2], could be used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
68
+ page_content=' However, presenting the differences of the world model to the user requires the discretization of the state transition structure;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
69
+ page_content=' the parameter space of the model does not necessarily represent the similarity of the state transition structure of the world model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
70
+ page_content=' Therefore, the world model is considered to be a discretized graph for obtaining a distributed representation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
71
+ page_content=' The generation of explanations is outside the scope of this study.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
72
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
73
+ page_content='1 Acquisition of a distributed representation of the world model The learning process of a distributed representation of the world model is shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
74
+ page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
75
+ page_content=' The robot learns a representation space representing the similarity of environments based on its experiences.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
76
+ page_content=' First, the robot acquires an undirected graph representing the state transitions of each environment;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
77
+ page_content=' simultaneously, it acquires policies through reinforcement learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
78
+ page_content=' Specifically, the robot assumes that the states whose transitions were observed during the policy learning search are adjacent to each other and adds edges3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
79
+ page_content=' After acquiring the undirected graphs of all environments, graph2vec is applied to acquire the distributed representation of the graph of each environment [8].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
80
+ page_content=' Graph2vec is a method in which doc2vec [9] is used to acquire distributed representations of graphs, and instead of predicting the occurrence of words, the occurrence of labels that represent each subgraph is presented.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
81
+ page_content=' Labels representing subgraphs are obtained using the Weisfeiler–Lehman (WL) relabeling process [10].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
82
+ page_content=' In this process, the label of the next layer is determined by considering the labels of neighboring nodes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
83
+ page_content=' Higher layer labels represent more global information regarding the graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
84
+ page_content=' 3This method is assumed to be used in a discrete state space;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
85
+ page_content=' discretization of the state space is required for application to a real robot.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
86
+ page_content=' However, this measure is beyond the scope of this study.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
87
+ page_content=' For discretization, the method proposed in [7], wherein the policy implications of each state is considered, can be used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
88
+ page_content=' 3 ------Preprint Graph2vec allows graphs in which the same subgraphs occur, that is, graphs in environments with similar state transition structures, to be embedded closer together in the representation space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
89
+ page_content=' Efficient search based on user queries becomes possible by acquiring the representation space of the world model in advance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
90
+ page_content=' When acquiring a world model, we explicitly provide an environment label to indicate the environment wherein the experience occurs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
91
+ page_content=' If no environment label is given, a world model is obtained using temporally continuous experiences.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
92
+ page_content=' Furthermore, we consider that models with similar distributed representations represent the same environment and merging multiple models may be effective in obtaining a world model with higher accuracy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
93
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
94
+ page_content='2 Acquisition of the query vector Based on the query given by the user, the robot acquires a direction vector that represents the meaning of the query in the representation space of the world model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
95
+ page_content=' Kim et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
96
+ page_content=' [11] focused on the middle layer of the neural network and generated concept vectors (CAV: concept activation vectors) by calculating the difference in latent representations when features that satisfy a specific concept and features that do not satisfy the concept are input.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
97
+ page_content=' In this study, this method was applied to define a query vector vquery as Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
98
+ page_content=' (1) by considering the difference of distributed representations between environments that satisfy the query and those that do not.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
99
+ page_content=' The query assumes the form “action aquery should be selected in state squery.” vquery = vpos − vneg, (1) where vpos = � i vi · P(aquery|vi, squery) � i P(aquery|vi, squery) , vneg = � i vi · (1 − P(aquery|vi, squery)) � i(1 − P(aquery|vi, squery)) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
100
+ page_content=' (2) Here, vi represents a distributed representation of the i-th environment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
101
+ page_content=' To correspond to a policy in which actions are selected probabilistically, the probability value of selecting an action is used as the coefficient of each distributed representation vi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
102
+ page_content=' If necessary, the coefficients can be expressed as the binary values of {0, 1}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
103
+ page_content=' Note that if the state Squery is not included in the undirected graph of the environment i, the CAV is calculated by excluding vi4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
104
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
105
+ page_content='3 Estimation of user’s world model Using the distributed representation of the world model and the query vector, the user’s world model was estimated using cosine similarity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
106
+ page_content=' The likelihood of an environment i as an estimated environment is expressed by Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
107
+ page_content=' (3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
108
+ page_content=' S(vi, vobs, Vq) = � j similarity(vj query, vi − vobs) − λ · distance(vi, vobs), (3) where vobs is a distributed representation of the environment currently observed by the agent, Vq is any number of query vectors, and vj query ∈ Vq is the j-th query vector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
109
+ page_content=' Furthermore, similarity(a, b) and distance(a, b) are functions that output the cosine similarity [−1, 1] and the distance between vectors a and b in the representation space, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
110
+ page_content=' When reasoning in a real environment, the robot and the user observe almost the same environment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
111
+ page_content=' Therefore, because their world models are similar, the similarity between the direction of each environment and the direction of the query vector, as seen from vobs, is used as the estimation criterion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
112
+ page_content=' The coefficient λ is a hyperparameter that determines how much distance between vectors is considered and represents the strength of the assumption that the observed environment of the robot and the user are similar.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
113
+ page_content=' Using the definitions, the world model of others to be estimated is expressed by Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
114
+ page_content=' (4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
115
+ page_content=' Env_est = arg max i S(vi, vobs, Vq) (4) In this study, we assume that the importance of all queries are equivalent and designed the evaluation function S as the sum of the similarities for each query vector vj query.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
116
+ page_content=' However, in the real world, the importance of each query may differ, in which case the similarity should be multiplied by a coefficient ρj.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
117
+ page_content=' 4When estimating the user’s world model, the likelihood S(vi, vobs, Vq) is computed for all environments i, including those that do not contain the state squery.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
118
+ page_content=' 4 Preprint 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
119
+ page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
120
+ page_content='1 User vectors In addition to the query vector, the user vector that represents “what kind of environment with the distributed repre- sentation the user is likely to retain as a world model” can also be defined.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
121
+ page_content=' The user vector is a vector that represents how the user tends to misunderstand the environment and plays a role in adding this tendency to the result of the user’s world model estimation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
122
+ page_content=' vuser = vu_pos − vu_neg, (5) where vu_pos = � i vi · P(vi) � i P(vi) , vu_neg = � i vi · (1 − P(vi)) � i(1 − P(vi)) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
123
+ page_content=' (6) P(vi) is the probability that the user estimates the environment corresponding to the distributed representation vi as the current world model when no information about the current environment is given to the user.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
124
+ page_content=' In this paper, P(vi) is assumed to be known, and the estimation of P(vi) is outside the scope of this research.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
125
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
126
+ page_content='4 Explanation by language Using a pre-trained language vector Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
127
+ page_content=' (7) in the representation space, the relationship between the world model maintained by the agent and the user’s world model can also be explained by language.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
128
+ page_content=' vword(x) = average(vm − vn), (7) where vm and vn are distributed representations of environment pairs whose relation is represented by the language x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
129
+ page_content=' By averaging the differences of distributed representations of environment pairs vm and vn that satisfy a specific language description x, we can obtain a semantic vector represented by the language description x in the representation space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
130
+ page_content=' Using the language vector vword, the language describing the relationship between the world model maintained by the agent and the user’s world model is represented by Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
131
+ page_content=' (8).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
132
+ page_content=' Explanation = arg max x similarity(vword(x), vEnv_est − vobs)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
133
+ page_content=' (8) By means of Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
134
+ page_content=' (8), the language x that represents the closest relationship between the current world models is selected as the explanation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
135
+ page_content=' 3 Experiment The proposed method was applied to an agent that acquires action strategies by proximal policy optimization (PPO) in a simulation environment, and its usefulness was evaluated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
136
+ page_content=' A partially modified version of the grid environment [12] with multiple objects was used for the experiments (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
137
+ page_content=' 5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
138
+ page_content=' In this environment, the agent (triangle) obtains a reward by taking a key, opening a door, and reaching a goal in the lower right corner.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
139
+ page_content=' The position of the goal remains unchanged, but the positions of the key and the door change every trial.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
140
+ page_content=' The agent has five actions, namely go straight, turn left, turn right, take the key from the grid in front of it, and open the door.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
141
+ page_content=' The agent observes the absolute position of the key (x, y coordinates), the absolute position of the door (x, y coordinates), its own absolute position (x, y coordinates) and orientation, holding/not holding the key, and opening/closing the door, for a total of nine dimensions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
142
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
143
+ page_content='1 Experiment 1: Acquisition of a distributed representation of the world model A graph representing the state transitions of each environment was obtained simultaneously with the learning of policies by PPO, and graph2vec was used to obtain a 16-dimensional distributed representation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
144
+ page_content=' An example of the acquired graph is shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
145
+ page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
146
+ page_content=' There are three edges that transition from the group of states before key acquisition to the group of states after key acquisition because keys can be acquired from three directions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
147
+ page_content=' On the other hand, there is only one edge that transitions from the group of states where the door is not open to the group of states where the door is open, because the door can be opened from only one state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
148
+ page_content=' 5 Preprint Why donʼt you pick up Figure 5: Estimation results of the user’s world model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
149
+ page_content=' Table 1: Cumulative frequency and average value of the order in which the optimal environment appears.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
150
+ page_content=' The number of trials is 100 for each method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
151
+ page_content=' Method Order 1 2 3 Order Average Our method 35 49 60 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
152
+ page_content='1 Random 6 8 11 23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
153
+ page_content='1 The representation space was compressed to eight dimensions using independent component analysis, and the visualized results are displayed in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
154
+ page_content=' 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
155
+ page_content=' In this experiment, the absolute positions of the key and door were used as environment labels, and the five-dimensional observations excluding them were used as node features.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
156
+ page_content=' The experimental results revealed that clusters are formed in the representation space based on the absolute positions of the key and door.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
157
+ page_content=' In particular, clusters related to the position of the door are apparent, which can be attributed to the fact that the surrounding state transition relationship changes considerably compared with that of the key.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
158
+ page_content=' In this experiment, the absolute positions of keys and doors are used only for identifying the environmental graph to be updated and are not embedded in the graph itself.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
159
+ page_content=' Therefore, graph2vec created a representation space that appropriately reflects the differences in the location information of keys and doors expressed in the state transition structure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
160
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
161
+ page_content='2 Experiment 2: Estimation of the user’s world model The query vector was applied to the obtained representation space to estimate the world model of others.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
162
+ page_content=' In this verification, we assume that questions were asked in the situations of acquiring a key and opening a door, and the query “In the state Squery, we should {take the key/open the door}” was considered.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
163
+ page_content=' Figure 5 displays the results of estimating the others’ world model given the reference world model and query.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
164
+ page_content=' The environment that satisfies the query has the highest evaluation value, and the environment in which the key is located on the opposite side of the grid environment has the lowest evaluation value.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
165
+ page_content=' This result suggests that the obtained query vector is appropriate for estimating the world model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
166
+ page_content=' The optimal environment is defined as the agent’s world model with minimal modifications for satisfying the query.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
167
+ page_content=' For example, given the query “In state Squery, the agent should take the key,” the optimal environment is the environment in which only the position of the key is changed to satisfy the query, whereas the position of the door is left unchanged.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
168
+ page_content=' For each randomly selected agent world model/query pair, the evaluation values for each environment obtained using Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
169
+ page_content=' (3) were sorted in descending order to obtain the order of appearance of the optimal environment (Table 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
170
+ page_content=' The order of appearance of the environments that satisfy the query when sorted randomly is displayed for comparison.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
171
+ page_content=' The experimental results confirmed that the proposed method ranks the optimal environments higher.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
172
+ page_content=' 6 FoFo -- ooPreprint Figure 6: An example graph of the acquired world model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
173
+ page_content=' The blue nodes represent the state before the key acquisition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
174
+ page_content=' The orange nodes represent the state after the key acquisition when the door is not open, and the green nodes represent the state when the door is open.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
175
+ page_content=' Although the direct manipulation of the positions of keys and doors can change the order of the appearance of the optimal environment to one, direct manipulation is not always possible in cases in which directly manipulatable information is not given as a query.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
176
+ page_content=' The proposed method estimates the optimal environment at an early stage, although the state transition relations to be changed are not explicitly given.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
177
+ page_content=' This property of the proposed method is crucial.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
178
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
179
+ page_content='3 Experiment 3: Validation with multiple queries An explanatory agent A and an explained agent B are prepared, and the number of queries required for A to accurately estimate B’s world model is evaluated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
180
+ page_content=' This experiment assumes a situation in which the user is asked to confirm the correctness of the estimation results of the other’s world model through the presentation of an action sequence, which improves the estimation accuracy (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
181
+ page_content=' 8).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
182
+ page_content=' The outline of the experiment is as follows: (1) A and B share an initial state (absolute position and orientation of the agent, and the state of the key and door) and an environment-policy pair that specifies the strategy to be used in specific environments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
183
+ page_content=' They have arbitrary world models with different key and door positions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
184
+ page_content=' (2) Here, A sets its own world model as the initial value of the other’s world model and presents the optimal sequence of actions in that model in turn (policy sharing) 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
185
+ page_content=' (3) B adds the query “In state squery, action aquery should be chosen” when the given action differs from the optimal action in its measure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
186
+ page_content=' The existing query is not deleted.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
187
+ page_content=' (4) A updates the other’s world model based on the query and presents the action sequences in the updated other’s world model again in sequence with Squery as the initial state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
188
+ page_content=' (5) Repeat (3) and (4) to evaluate the number of environment updates required for A to obtain B’s world model as the other’s world model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
189
+ page_content=' The environment selected once is not selected, and the second or subsequent candidate is adopted.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
190
+ page_content=' If the same environment has not been obtained after all the action sequences are presented, the environment is continuously updated 5Policy sharing in this experiment (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
191
+ page_content=' 8) is performed to confirm the estimation results of the other’s world model and differs from the presentation of information about one’s own policy in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
192
+ page_content=' 1 and Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
193
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
194
+ page_content=' 7 Preprint Figure 7: Results of latent space visualization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
195
+ page_content=' Each data point is illustrated according to (a) X-coordinate of the key, (b) Y-coordinate of the key, (c) X-coordinate of the door, and (d) Y-coordinate of the door.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
196
+ page_content=' Figure 8: Schematic of experiment 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
197
+ page_content=' The agent transmits information on its policy to the user and updates the user’s world model based on queries.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
198
+ page_content=' without increasing the number of queries.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
199
+ page_content=' In this verification, the proposed method was compared with the “AND” search of queries as a method of updating the world model of others.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
200
+ page_content=' In practice, the following three methods are compared.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
201
+ page_content=' Proposed method: Select a plausible environment using Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
202
+ page_content=' (4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
203
+ page_content=' AND search 1: Randomly selects an environment from among the environments that satisfy the query.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
204
+ page_content=' AND search 2: The environment is randomly selected with the constraint that “the optimal behavior for the policy B is selected in all states from the initial state until reaching state squery”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
205
+ page_content=' Thus, it adds constraints and increases the information provided compared with the two update methods described.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
206
+ page_content=' Experimental results revealed that the proposed method can estimate others’ world models with the fewest number of updates (Table 2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
207
+ page_content=' The results of the corresponding two-tailed t-test revealed that t(100) = 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
208
+ page_content='07 and p < .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
209
+ page_content='01 for the proposed method and AND search 1, and t(100) = 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
210
+ page_content='59 and p < .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
211
+ page_content='01 for the proposed method and AND search 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
212
+ page_content=' Thus, both significant differences were confirmed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
213
+ page_content=' 8 V V M V V V V V 会 VI V V V V V V V V V V △ W V V V W △ V △ V V V V V V V V V V V △ 1 口 I I V △△ 口 V 口 口 口 I 口 口 I 1 口 1 口口 口 口口 口 口 V V V V V V V V VV V口 0VX W V V 口 口 口 口 口 ■ 口 口 口Preprint Table 2: Number of updates required to estimate the user’s world model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
214
+ page_content=' Method Number of updates Standard deviation Our method 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
215
+ page_content='53 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
216
+ page_content='83 AND search 1 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
217
+ page_content='72 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
218
+ page_content='43 AND search 2 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
219
+ page_content='69 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
220
+ page_content='71 Table 3: Comparison with the use of probabilistic evaluation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
221
+ page_content=' Method Nuber of updates Standard deviation Our method (λ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
222
+ page_content='05) 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
223
+ page_content='93 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
224
+ page_content='38 Our method (λ = 0) 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
225
+ page_content='75 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
226
+ page_content='11 Probabilistic value (λ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
227
+ page_content='05) 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
228
+ page_content='13 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
229
+ page_content='79 The most common information given directly is “AND search2”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
230
+ page_content=' By contrast, the proposed method can reduce the number of updates by vectorizing queries and utilizing prior knowledge embedded in the representation space as additional information.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
231
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
232
+ page_content='4 Experiment 4: Comparison with the use of probabilistic evaluation We compare the proposed method with the case where the probability value of each environment satisfying the query is used as the evaluation function instead of CAV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
233
+ page_content=' The proposed method uses Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
234
+ page_content=' (3) as the evaluation function, while the comparison method uses Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
235
+ page_content=' (9).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
236
+ page_content=' S(vi, vobs, Vq) = � j P(aquery|vi, squery) − λ · distance(vi, vobs).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
237
+ page_content=' (9) The procedure for this experiment is the same as in experiment 3;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
238
+ page_content=' however, the initial world model is not completely random, and the coordinates of either the key or the door are assumed to be identical.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
239
+ page_content=' This condition replicates the assumption that the agent’s world model and the user’s world model are similar6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
240
+ page_content=' Experimental results showed that the proposed method, which takes into account the distance in the representation space (λ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
241
+ page_content='05), was able to estimate the user’s world model with the fewest number of updates (Table3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
242
+ page_content=' The results of the corresponding two-tailed t test showed that the proposed method with λ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
243
+ page_content='05 compared to λ = 0 showed t(99) = 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
244
+ page_content='59 and p < .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
245
+ page_content='005, while the proposed method with λ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
246
+ page_content='05 compared to the method using probability values showed t(99) = 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
247
+ page_content='44 and p < .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
248
+ page_content='005, both of which are significantly different from each other7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
249
+ page_content=' The proposed method, which takes into account the distance in the representation space, was able to estimate the environment with a significantly smaller number of updates than the method using the same coordinates for either the key or the door.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
250
+ page_content=' Compared to the method using probability value as the evaluation function, the proposed method was able to absorb small errors in probability values, resulting in a significantly smaller number of updates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
251
+ page_content=' When the distance in the representation space corresponds to the similarity of the state transition structure between environments, as in the present verification, it is effective to use CAV to obtain environments that are perpendicular to the virtual separation boundary as the user’s world models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
252
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
253
+ page_content='5 Experiment 5: Number of samples and accuracy of CAV We evaluate the number of queries required to correctly estimate the world model when the number of samples (number of environments) used to compute the CAV is reduced.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
254
+ page_content=' The evaluation procedure is the same as in Experiment 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
255
+ page_content=' In this experiment, the maximum number of samples is 300 because 300 environments are embedded in the representation space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
256
+ page_content=' We also set λ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
257
+ page_content='05.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
258
+ page_content=' 6Without this assumption, the world model with minimal modification to satisfy the query (the optimal environment) is not necessarily the user’s world model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
259
+ page_content=' However, theoretically, when the evaluation value is calculated using Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
260
+ page_content=' (3), other environments that satisfy the query will have a lower evaluation value compared to the optimal environment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
261
+ page_content=' Therefore, if the assumption that the world models of the agent and user are similar cannot be made, it is desirable to use Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
262
+ page_content=' (9).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
263
+ page_content=' However, in a real environment, the world models of the agent and user are not completely independent, and similarity can be assumed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
264
+ page_content=' 7Because the t test was applied twice in this verification, a significant difference was found at the significance level α = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
265
+ page_content='01 based on the Bonferroni method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
266
+ page_content=' 9 Preprint Table 4: Relationship between the number of CAV samples and the order of appearance of the optimal environment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
267
+ page_content=' Each value represents the cumulative frequency, and the number of trials is 100 for each.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
268
+ page_content=' Nuber of samples Order of appearance 1 2 3 300 40 62 69 250 44 61 64 200 42 51 62 150 40 51 55 100 35 46 54 50 9 18 26 Prior distribution 1 Prior distribution 2 P(door_y = 1) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
269
+ page_content='4 P(door_y = 2) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
270
+ page_content='3 P(door_y = 3) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
271
+ page_content='2 P(door_y = 4) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
272
+ page_content='1 P(door_y = 5) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
273
+ page_content='0 P(door_y = 6) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
274
+ page_content='0 P(door_y = 1) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
275
+ page_content='0 P(door_y = 2) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
276
+ page_content='1 P(door_y = 3) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
277
+ page_content='4 P(door_y = 4) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
278
+ page_content='4 P(door_y = 5) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
279
+ page_content='1 P(door_y = 6) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
280
+ page_content='0 Figure 9: The prior distribution used in experiment 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
281
+ page_content=' The prior distribution for the y-coordinate of the door (in the vertical direction) is defined.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
282
+ page_content=' The experimental results show that the accuracy deteriorates much more slowly up to 100 samples than when the CAV is generated with 300 samples (Table 4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
283
+ page_content=' This suggests that a specific level of estimation accuracy can be maintained even when the number of samples is reduced.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
284
+ page_content=' The fact that the user’s world model is estimated taking into account the distance in the representation space may also contribute to maintaining accuracy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
285
+ page_content=' On the other hand, the accuracy dropped drastically when the number of samples was 50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
286
+ page_content=' This may be because of an increase in the number of trials in which the number of positive data (the number of data satisfying the query) in the sample is very small.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
287
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
288
+ page_content='6 Experiment 6: Use of the user vector We test whether the estimation accuracy of the other-world model can be improved by using the user vector defined by Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
289
+ page_content=' (5), as opposed to using only CAV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
290
+ page_content=' In this verification, two types of prior distributions are defined for the y-coordinate of the door (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
291
+ page_content=' 9), and the user vector is calculated for each of them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
292
+ page_content=' The user vector is treated as one of the query vectors in the evaluation value calculation for each environment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
293
+ page_content=' Note that λ = 0 is assumed in this verification because there is no assumption that the world models held by agents A and B are similar8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
294
+ page_content=' The estimation results of the other-world model for the same reference environment and query are shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
295
+ page_content=' 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
296
+ page_content=' User vectors 1 and 2 correspond to prior distributions 1 and 2, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
297
+ page_content=' It can be seen that while the results of the inference of the door location are unstable when only the query vector is applied, the results of the inference using the other vectors show that the door coordinates are concentrated in locations that have high probability values in the prior distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
298
+ page_content=' The same validation as in experiment 3 was conducted by applying the prior distribution shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
299
+ page_content=' 9, and the number of queries required for both distributions 1 and 2 was lower when using the user vector (Tab.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
300
+ page_content=' 5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
301
+ page_content=' The results 8If this experiment is conducted under the assumption that the world models of agents A and B are similar, it is necessary to set the number of objects that determine the state transition structure of the environment to three or more and that they are placed at the same coordinates as the current environment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
302
+ page_content=' Under these conditions, it is desirable to set λ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
303
+ page_content='05.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
304
+ page_content=' 10 Fo Preprint Why don’t you pick up the key at the state?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
305
+ page_content=' Base environment Query vector Query vector +User vector 1 Query vector +User vector 2 Highest Score Lower Figure 10: User vector and environments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
306
+ page_content=' Table 5: Change in estimation accuracy when using user vectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
307
+ page_content=' The number of trials is 100 for each.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
308
+ page_content=' Distribution 1 Distribution 2 Method Number of updates Standard deviation Number of updates Standard deviation Query + User 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
309
+ page_content='31 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
310
+ page_content='59 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
311
+ page_content='69 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
312
+ page_content='34 Query 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
313
+ page_content='39 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
314
+ page_content='13 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
315
+ page_content='11 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
316
+ page_content='46 of the corresponding two-tailed t test showed that t(99) = 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
317
+ page_content='34 and p < .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
318
+ page_content='05 for distribution 1 and t(99) = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
319
+ page_content='45 and p = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
320
+ page_content='15 for distribution 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
321
+ page_content=' These results suggest that the number of queries required for estimation can be reduced by using the user vector, although the size of the effect depends on the shape of the prior distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
322
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
323
+ page_content='7 Experiment 7: Explanation by language We test whether the language vectors learned in advance can be used to correctly output language that describes the relationship between the agent’s world model and user’s world model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
324
+ page_content=' The language vectors used in this study are eight different explanations, such as “In the world model assumed by the user, {key, door} is located at {upper, lower, right, left} than in the world model maintained by the agent.” In the experiment, language explanations were first given to n pairs of world models with different coordinates of keys or doors, and the language vectors were obtained using Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
325
+ page_content=' (7).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
326
+ page_content=' We then generated linguistic explanations for randomly selected pairs of world models using the same conditions as those used to generate the language vectors and evaluated the percentage of the explanations that correctly explained the relationships between the world models (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
327
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
328
+ page_content=', the percentage of correct responses).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
329
+ page_content=' The percentage of correct responses after 100 trials is shown in Table 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
330
+ page_content=' Note that if more than one linguistic explanation correctly represented the relationship between the world models, both were considered as correct answers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
331
+ page_content=' For example, if the key is located on the upper right, “the key is on the right” and “the key is on the top” are treated as correct answers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
332
+ page_content=' The “1st” in the table indicates the percentage of correct explanations generated by Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
333
+ page_content=' (8).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
334
+ page_content=' The “1st and 2nd” represents the percentage of correctness of the two languages that were the first and second most similar.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
335
+ page_content=' Note that “1st and 2nd” was evaluated only in cases where more than one language explanation was considered to be a correct answer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
336
+ page_content=' Although the number of world model pairs considered in this experiment is approximately 9000, the experimental results show that language explanation generation is possible with high accuracy even when the number of data used for language vector acquisition is n = 1000.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
337
+ page_content=' The accuracy was also maintained even when the number of data was extremely reduced to n = 100 and n = 50, suggesting that the learning in the representation space is effective.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
338
+ page_content=' In this experiment, only eight language vectors were set, but it is expected that more accurate explanation generation will become possible by setting more detailed language vectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
339
+ page_content=' However, it should be noted that in these cases, sufficient teacher data is required.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
340
+ page_content=' 11 -------Fo Preprint Table 6: Accuracy of language description generation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
341
+ page_content=' n 1st 1st and 2nd 5000 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
342
+ page_content='89 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
343
+ page_content='67 3000 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
344
+ page_content='89 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
345
+ page_content='73 1000 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
346
+ page_content='88 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
347
+ page_content='68 500 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
348
+ page_content='84 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
349
+ page_content='63 300 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
350
+ page_content='80 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
351
+ page_content='57 100 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
352
+ page_content='69 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
353
+ page_content='54 50 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
354
+ page_content='60 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
355
+ page_content='37 4 Conclusion In this study, a novel method was proposed for estimating the user’s world model from the robot’s world model and the query given by the user to obtain XAR.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
356
+ page_content=' The proposed method can estimate others’ world models more efficiently than using the “AND” search of queries.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
357
+ page_content=' In the future, user vectors should be introduced, and the methods for generating explanations using differences in world models should be devised.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
358
+ page_content=' Acknowledgments This study was supported by the New Energy and Industrial Technology Development Organization (NEDO).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
359
+ page_content=' References [1] Sakai, Tatsuya and Nagai, Takayuki, "Explainable Autonomous Robots: A Survey and Perspective," Advanced Robotics, 36(5-6), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
360
+ page_content='219-238, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
361
+ page_content=' [2] Ha, David and Schmidhuber, Jürgen, "Recurrent World Models Facilitate Policy Evolution," In Advances in Neural Information Processing Systems 31, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
362
+ page_content='2450-2462, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
363
+ page_content=' [3] Xiaofeng Gao, Ran Gong, Yizhou Zhao, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
364
+ page_content=' "Joint Mind Modeling for Explanation Generation in Complex Human-Robot Collaborative Tasks," In Proceedings of 29th IEEE International Conference on Robot and Human Interactive Communication (RO-MAN), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
365
+ page_content='1119-1126, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
366
+ page_content=' [4] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
367
+ page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
368
+ page_content=' Clair and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
369
+ page_content=' Matari ´c, "How Robot Verbal Feedback Can Improve Team Performance in Human-Robot Task Collaborations," In Proceedings of 10th ACM/IEEE International Conference on Human-Robot Interaction (HRI), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
370
+ page_content='213-220, 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
371
+ page_content=' [5] Sandy H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
372
+ page_content=' Huang, David Held, Pieter Abbeel, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
373
+ page_content=' "Enabling Robots to Communicate Their Objectives," ArXiv, abs/1702.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
374
+ page_content='03465, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
375
+ page_content=' [6] Isaac Lage, Daphna Lifschitz, Finale Doshi-Velez, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
376
+ page_content=' "Toward Robust Policy Summarization," In Proceedings of the 18th International Conference on Autonomous Agents and MultiAgent Systems, AAMAS ’19, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
377
+ page_content='2081-2083, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
378
+ page_content=' [7] Lunjun Zhang, Gengcong Yang, and Bradly C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
379
+ page_content=' Stadie, "World Model as a Graph: Learning Latent Landmarks for Planning," ArXiv, abs/2011.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
380
+ page_content='12491, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
381
+ page_content=' [8] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
382
+ page_content=' Narayanan, Mahinthan Chandramohan, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
383
+ page_content=' Venkatesan, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
384
+ page_content=' "graph2vec: Learning distributed representations of graphs," ArXiv, abs/1707.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
385
+ page_content='05005, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
386
+ page_content=' [9] Quoc Le and Tomas Mikolov, "Distributed Representations of Sentences and Documents," In Proceedings of the 31st International Conference on Machine Learning, volume 32 of Proceedings of Machine Learning Research, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
387
+ page_content='1188-1196, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
388
+ page_content=' [10] Nino Shervashidze, Pascal Schweitzer, Erik Jan van Leeuwen, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
389
+ page_content=' "Weisfeiler-Lehman Graph Kernels," Journal of Machine Learning Research, 12(77), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
390
+ page_content='2539–2561, 2011.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
391
+ page_content=' [11] Been Kim, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
392
+ page_content=' Wattenberg, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
393
+ page_content=' Gilmer, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
394
+ page_content=' "Interpretability Beyond Feature Attribution: Quantitative Testing with Concept Activation Vectors (TCAV)," In ICML, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
395
+ page_content=' [12] Maxime Chevalier-Boisvert, Lucas Willems, and Suman Pal, "Minimalistic Gridworld Environment for OpenAI Gym," 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
396
+ page_content=' 12' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ENE2T4oBgHgl3EQfSgdx/content/2301.03793v1.pdf'}
FNE1T4oBgHgl3EQfWwTK/content/2301.03119v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4117efbeceb1ded71f8ccbb1b5b38084ea41717ea17bc923951f1aee62044d2e
3
+ size 341025
FNE1T4oBgHgl3EQfWwTK/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13e251cf494b3a5e3f9cfb8467f72da414c17f97bc71d2bb32483bd51b19af9d
3
+ size 166183
FtAyT4oBgHgl3EQfSvdV/content/2301.00091v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1466037930e1c5fb7305bc83a9a419c46c96c70023891016748cfdb989b2b00
3
+ size 1200362
FtAyT4oBgHgl3EQfSvdV/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea231b9725e195b21fdc85de62f588b5278edd2799ec73255942a6c756780dd1
3
+ size 164543
GNAzT4oBgHgl3EQfxP7V/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0222c4d1af7b6abbae22a7f586ff4c5d81dd34fae1fbc248a58da07ca5436f23
3
+ size 2490413
HNAzT4oBgHgl3EQfHfvA/content/tmp_files/2301.01047v1.pdf.txt ADDED
@@ -0,0 +1,660 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ A Theory of Human-Like Few-Shot Learning
2
+ Zhiying Jiang1, Rui Wang2, Dongbo Bu2, Ming Li1∗
3
+ 1David Cheriton School of Computer Science, University of Waterloo,
4
+ 200 University Ave W, Waterloo, ON N2L 3G1, Canada
5
+ 2Institute of Computing Technology, Chinese Academy of Science, Beijing, China
6
+ ∗To whom correspondence should be addressed; E-mail: [email protected]
7
+ We aim to bridge the gap between our common-sense few-sample human learn-
8
+ ing and large-data machine learning. We derive a theory of human-like few-
9
+ shot learning from von-Neuman-Landauer’s principle. Modelling human learn-
10
+ ing is difficult as how people learn varies from one to another. Under com-
11
+ monly accepted definitions, we prove that all human or animal few-shot learn-
12
+ ing, and major models including Free Energy Principle and Bayesian Program
13
+ Learning that model such learning, approximate our theory, under Church-
14
+ Turing thesis. We find that deep generative model like variational autoencoder
15
+ (VAE) can be used to approximate our theory and perform significantly better
16
+ than baseline models including deep neural networks, for image recognition,
17
+ low resource language processing, and character recognition.
18
+ Introduction
19
+ During the past decade, fast progress in deep learning (1) has empowered computer speech
20
+ recognition, image processing, natural language processing, protein folding, game playing and
21
+ many other applications. However, these great progresses fell short when we try to understand
22
+ our own learning mechanism: How to model human learning (2), (3), (4)?
23
+ Species in nature learn quickly to survive. When a dragonfly is hatched, within hours it
24
+ firms up its wings and then flies to catch mosquitoes; a newborn does not need tons of repeated
25
+ examples or transfer learning to identify an apple. Most human or animal learning exhibits a
26
+ mixture of inherited intelligence, few-shot learning without prior knowledge, as well as long
27
+ term many-shot learning. It is interesting to note that these learning programs are encoded in
28
+ our genomes but they are not all the same, even for individuals within the same species. The
29
+ diversity of these learning algorithms is vividly expressed by Spearman’s "g" factor (2).
30
+ Work in progress.
31
+ 1
32
+ arXiv:2301.01047v1 [cs.LG] 3 Jan 2023
33
+
34
+ Unlike data-laden, model-heavy, and energy-hungry deep learning approaches, most human
35
+ learning appear to be simple and easy. Merely scaling up current deep learning approaches may
36
+ not be sufficient for achieving human level intelligence. We miss certain major components
37
+ when modelling human or animal learning.
38
+ Diversity is one of the missing part when modelling human or animal few-shot learning.
39
+ There are eight billion people on earth, each with a unique few-shot learning model (5). Even if
40
+ we just want to model one person, a single person often uses different parameters, features, and
41
+ perhaps different algorithms to deal with different learning tasks. Ideally we want a framework
42
+ that can cover the diversity in human and animal few-shot learning. Facing such a seemingly
43
+ formidable task, traditional thinking in machine learning will only lead us to various traps. To
44
+ avoid such traps we need to go back to the very first principles of physics.
45
+ Specifically, we start from an agreed-upon law in thermodynamics, to formally derive our
46
+ model for few-shot learning, and prove this is the optimal model within our framework in the
47
+ sense that all other models including human ones may be viewed as approximations to our
48
+ framework. We show a deep connection between our framework and the free energy principle (3)
49
+ and the Bayesian Program Learning model (4). By the end of this process, a component of data
50
+ compression during the inference phase of learning emerges as a key component of all few-shot
51
+ learning models.
52
+ First, we formalize our intuitive and commonly accepted concept of human-like few-shot
53
+ learning. For example, our definition below is consistent with what is used in (4), and in the
54
+ same spirit of (3).
55
+ Definition 1. Consider a universe Ω, partitioned into H disjoint concept classes: Ch, h =
56
+ 1, 2, . . . , H. Few-shot (k-shot) learning is described as follows:
57
+ 1. n elements in or outside Ω are given as unlabelled samples y1, . . . , yn;
58
+ 2. There are k labelled examples for each class Ch, for small k;
59
+ 3. The learning program, using a computable metric M, few-shot learns Ch, h = 1, 2, ...H,
60
+ if it uses the n unlabelled samples and k labelled samples and minimizes the objective
61
+ function:
62
+ H
63
+
64
+ h=1
65
+ |Ch|
66
+
67
+ i=1
68
+ M(xi, coreh) | y1, . . . , yn, xi ∈ Ch,
69
+ where coreh = ψ(k samples of Ch) representing a transformed representation of the k
70
+ labelled samples from Ch.
71
+ This definition covers most of our common sense few-shot learning scenarios and other
72
+ studies. In particular, this is used in one-shot learning by (4). As each independent individual,
73
+ we do not all use a same metric, or even similar metric, to few-shot learning. For example,
74
+ MN Hebart et al (6) identified 49 highly reproducible dimensions to 1854 objects to measure
75
+ 2
76
+
77
+ their similarity. Different people can be equipped to better observe some of these dimensional
78
+ features.
79
+ We explain the intuition behind Definition 1 via a simple example. A human toddler may
80
+ have already seen many unlabelled samples of fruits which, for example, contains two classes:
81
+ apples and pears. Then given a new labelled sample from each class, the toddler learns how to
82
+ differentiate between these two fruits. The number of labelled data required for one to classify
83
+ may vary as people have different learning algorithms.
84
+ Current deep learning based approaches for few-shot learning generally depend on 1) many
85
+ auxiliary labelled training samples or task-specific data augmentation for transfer learning or
86
+ meta learning (7); or 2) very large scale self-supervised pre-training (8). These approaches thus
87
+ fall short to model few-shot learning in nature by humans and animals as they can hardly account
88
+ for the diversity in learning algorithms and they either neglect the unsupervised scenario that
89
+ humans are mostly exposed to or use the scale of unlabelled data and training parameters that
90
+ are far beyond creatures need.
91
+ Many attempts have been made to understand human learning through cognitive, biological,
92
+ and behavior sciences. Some studies have established basic principles a human learning model
93
+ should obey. One theory is the two-factor theory of intelligence by Charles Spearman in 1904 (2),
94
+ where the “g” factor is an indicator of the overall cognitive ability, and the “s” factor stands for
95
+ the aptitude that a person possesses in specific areas. As “g” factor is genetically-related (9), it
96
+ indicates the necessity of a learning theory that can account for the diversity in creatures’ learning
97
+ ability. Another theory is the Free Energy Principle by Karl Friston (3) that human (and all
98
+ biological systems) learning tends to minimize the free energy between internal understanding in
99
+ the sense of Bayesian (under internal perceived distribution p) and that of the environmental event
100
+ (under distribution q), measured by KL-divergence (10). In a similar spirit, Lake, Salakhutdinov
101
+ and Tenenbaum (4) proposed a Bayesian program learning (BPL) model, learning a probabilistic
102
+ model for each concept and achieve human-level performance. Two articles by Schmidhuber (11)
103
+ and by Chater and Vitanyi (12) linked simplicity to human cognition and appreciation of arts.
104
+ Instead of exploring a biological basis for few-shot learning, we think it is possible to
105
+ mathematically derive an optimal framework that can unify the above theories. We further
106
+ demonstrate by experiments that our new model indeed works significantly better than other
107
+ classical deep learning neural networks for few-shot learning. As a byproduct of our new model,
108
+ a new concept class of "interestingness" is learned; this class implies where our appreciation
109
+ of art, music, science and games comes from. Extending this observation, some aspects of
110
+ consciousness may be modelled as a set of few-shot learned concepts. Consequently, we
111
+ hypothesize the ability of labelling input data becomes a key step to acquiring some aspects of
112
+ consciousness.
113
+ 3
114
+
115
+ A theory of few-shot learning
116
+ We mathematically derive an optimal few-shot learning model for Definition 1 that is effective
117
+ and is able to cover enormous diversities existed in different species. The task may appear to be
118
+ formidable because of conflicting and seemingly very general goals: each individual is allowed
119
+ to have a different learning model, yet our model has just one program to model everybody;
120
+ we do not yet exactly know the complete underlying biological mechanisms, yet we need to
121
+ implement the right functionality; there are infinite number of models, yet we need to choose
122
+ one that is optimal; we are not really interested in "proposing models" out of blue, yet we wish
123
+ our model to be a mathematical consequence of some basic laws of physics; the model needs to
124
+ be theoretically sound, yet practically useful.
125
+ For simplicity and readability, we begin with one-shot learning, k = 1 in Definition 1. Thus,
126
+ coreh in Definition 1 is just the single labelled sample xh. For larger k, coreh can be some form
127
+ of average of the k samples. As Definition 1 defined, some unlabelled objects are assumed and
128
+ it’s also possible to extend the definition by adding distribution, learnt from either unlabelled or
129
+ labelled data, to Ω. Using metric M that is responsible for k-shot learning of an individual, the
130
+ learning system seeks to minimize the energy function
131
+ H
132
+
133
+ h=1
134
+ |Ch|
135
+
136
+ i=1
137
+ M(xi, xh|y1, . . . , yn),
138
+ or, assuming H(y1, . . . , yn) is a pre-trained model of y1, . . . , yn, or other labelled samples,
139
+ capturing the distribution.
140
+ H
141
+
142
+ h=1
143
+ |Ch|
144
+
145
+ i=1
146
+ M(xi, xh|H(y1, . . . , yn)),
147
+ Now the question is, what sort of M should we use? Indeed, this varies from person to person.
148
+ Can we unify all such measures, algorithms and inferences? Let’s go back to the fundamentals.
149
+ Principle 1 (von-Neuman-Landauer Principle). Irreversibly processing 1 bit of information costs
150
+ 1kT; reversible computation is free.
151
+ Then for two objects x, y, the minimum energy needed to convert between x and y in our
152
+ brain is:
153
+ EU(x, y) = min{|p| : U(x, p) = y, U(y, p) = x},
154
+ where U is a universal Turing machine or our brain, assuming Church-Turing thesis. Since we
155
+ can prove a theorem showing all Universal Turing machines are equivalent modulo a constant and
156
+ efficiency, we will drop the index U (see (13)). To interpret, E(x, y) is the length of the shortest
157
+ program that reversibly converts between x and y. These bits used in the shortest program p
158
+ when they are erased will cost |p|kT of energy, according to the John von Neuman and Rolf
159
+ Landuaer’s law. This leads us to a fundamental theorem (14):
160
+ 4
161
+
162
+ ...
163
+ ...
164
+ degree
165
+ degree
166
+ Figure 1: Bipartite Graph
167
+ Theorem 1. E(x, y) = max{K(x|y), K(y|x)} + O(1).
168
+ K(x|y) is the Kolmogorov complexity of x given y, or informally, the length of the shortest
169
+ program that outputs x given input y (details are shown in (13)). As this theorem was proved
170
+ thirty years ago and it is vital in our theory, to help our readers, we will provide an intuitive but
171
+ less formal proof here.
172
+ Proof. By the definition of E(x, y), it follows E(x, y) ≥ K(x|y) and E(x, y) ≥ K(y|x), thus
173
+ we have E(x, y) ≥ max{K(x|y), K(y|x)}.
174
+ To prove the other direction E(x, y) ≤ max{K(x|y), K(y|x)}, we need to construct a
175
+ program p such that p outputs y on input x and p outputs x on input y, and length of p is
176
+ bounded by max{K(x|y), K(y|x)} + O(1).
177
+ Let k1 = K(x|y), and k2 = K(y|x). Without loss of generality, assume k1 ≤ k2. We first
178
+ define a bipartite graph {X, Y, E}, where X, Y = {0, 1}∗, as shown in Figure 1 and E is a finite
179
+ set of edges defined between X and Y as follows:
180
+ E = {{u, v}, u ∈ X, v ∈ Y, K(u|v) ≤ k1, K(v|u) ≤ k2}
181
+ Note that a particular edge (x, y) is in E. If we find edge (x, y), then given x, p can output y,
182
+ and vice versa. So the idea of the proof is to partition E properly so that we can identify (x, y)
183
+ easily. Two edges are disjoint if they do not share nodes on either end. A matching in graph
184
+ theory is a set of disjoint edges in E.
185
+ Claim. E can be partitioned into at most 2k2+2 matchings.
186
+ Proof of Claim. Consider edge (u, v) ∈ E. The degree of a node u ∈ X is bounded by 2k2+1
187
+ because there are at most 2k2+1 different strings v such that K(v|u) ≤ k2, accumulating possible
188
+ strings from i = 1 to i = k2 gives us �i=k2
189
+ i=1 = 2k2+1 − 2. Hence u belongs to at most 2k2+1
190
+ matchings. Similarly, node v ∈ Y belongs to at most 2k1+1 matchings. We just need to put edge
191
+ (u, v) in an unused matching. (End of Proof of Claim)
192
+ Let Mi be the matching that contains edge (x, y) We now construct our program p. p operates
193
+ as follows:
194
+ • Generate Mi following the proof of Claim, i.e. enumerating the matchings. This uses
195
+ information k1, k2, and i. K(i) ≤ k2 + O(1)
196
+ 5
197
+
198
+ • Given x, p uses Mi to output y, and given y, p uses Mi to output x.
199
+ A conditional version of Theorem 1, using information in Definition 1, can be obtained
200
+ E(x, y|y1, . . . , yn) = max{K(x|y, y1, . . . , yn), K(y|x, y1, . . . , yn)}, conditioning on unlabelled
201
+ samples y1, . . . , yn. According to (14), this distance is universal, in the sense that E(x, y) is the
202
+ minimum among any other computable distances:
203
+ Theorem 2. For any computable metric D, there is a constant c, such that for all x, y, E(x, y) ≤
204
+ D(x, y) + c.
205
+ This theorem implies: if D metric finds some similarity between x and y, so will E. Thus,
206
+ the above theorem implies, up to some constant O(H)
207
+ H
208
+
209
+ h=1
210
+ |Ch|
211
+
212
+ i=1
213
+ E(xi ∈ Ch, coreh|y1, . . . , yn) ≤
214
+ H
215
+
216
+ h=1
217
+ |Ch|
218
+
219
+ i=1
220
+ M(xi ∈ Ch, coreh|y1, . . . , yn).
221
+ When unlabelled samples y1, . . . , yn plus other irrelevant historical labelled samples are modeled
222
+ by some model H such as a generative model (e.g., VAE), then the above inequality can be
223
+ rewritten as:
224
+ H
225
+
226
+ h=1
227
+ |Ch|
228
+
229
+ i=1
230
+ E(xi ∈ Ch, coreh|H) ≤
231
+ H
232
+
233
+ h=1
234
+ |Ch|
235
+
236
+ i=1
237
+ M(xi ∈ Ch, coreh|H).
238
+ (1)
239
+ Thus, E gives optimal metric for few-shot learning algorithm. Other algorithms satisfied
240
+ Definition 1 are the approximation to this optimal solution. 1
241
+ In addition, we show that our theory’s deep connection to two well-established principles
242
+ of learning in neuroscience and psychology. Friston’s Free Energy Principle (FEP) (3), derived
243
+ from Bayesian brain hypothesis (15), states that brain seeks to minimize surprises. Specifically,
244
+ it assumes the brain has its internal state (a.k.a. generative model) that implicitly models the
245
+ environment according to the sensory data. Hidden (latent) variables need to be defined for
246
+ the internal state, which are drawn from prior beliefs. Ideally, these prior knowledge is also
247
+ modelled, which is made possible by hierarchical generative models. The free energy principle
248
+ (FEP) is often interpreted as Bayesian optimization, using the Evidence Lower Bound (ELBO)
249
+ as ELBO = log p(x; θ) − D(q(z)∥p(z|x; θ) optimization function. Here the evidence log p(x; θ)
250
+ is the encoding length of x under probability p, and the Kullback-Leibler divergence term is the
251
+ p-expected encoding length difference. This is half of Theorem 1 and FEP is asymmetric if we
252
+ view it as a distance. However, the symmetry is important to few-shot learning. For example, a
253
+ scarlet king snake may look like a coral snake, but the latter certainly has more deadly features
254
+ the former lacks, one way compression K(ScarletKingSnake|CoralSnake) is not sufficient to
255
+ 1Note that E is a metric: it is symmetric, and satisfies triangle inequality
256
+ 6
257
+
258
+ Compressor
259
+ Unlabeled Data
260
+ Distribution
261
+ Test Instance
262
+ Figure 2: Illustration of our framework, dashed line indicates optional component when learning.
263
+ distinguish the two. Despite of the fact H. influnza with genome size 1.8 million and E. coli with
264
+ genome size 5 million they are sister species but E. coli would be much closer to a species with
265
+ zero genome G0 or just a covid-19 genome with this asymmetric measure (K(G0|E.coli) than
266
+ with H. influnza (K(H. influnza|E. coli)). A symmetric interpretation of Friston’s FEP can be
267
+ derived by requiring minimum conversion energy as we show in Theorem 1.
268
+ Different individuals may use different compression algorithms to do data abstraction and
269
+ inference. It can be viewed that these algorithms all approximate E(x, y). Some are more efficient
270
+ than others in different situations. The individuals with better compression algorithms have
271
+ bigger “g” factor. Diversified compression algorithms also guarantee better survival chances of a
272
+ community when facing a pandemic. As compression neural networks are genetically encoded,
273
+ the “g” factor is thus inheritable. This can be seen via Figure 2, compression algorithms vary
274
+ from one to another. The distribution of the data to be learnt is either implicitly or explicitly
275
+ captured by creatures. Those who can better utilize unlabelled data to capture distribution may
276
+ have a more efficient compression algorithm.
277
+ Experimental Results
278
+ Image Experiments
279
+ To approximate our universal few-shot learning model, we use a hierarchical VAE as our
280
+ underlying model H in Inequality 1 to model the unlabelled samples y1, . . . , yn. This hierarchical
281
+ structure coincides with our visual cortex and brain structure (16). According to integrated
282
+ information theory (17), an input y may come from all sensing terminals: vision, hearing,
283
+ smell, taste, sensation. Often, creatures are exposed to an unsupervised environment where
284
+ objects are unknown and unlabelled. Revisiting the negative ELBO, we can see it can be
285
+ interpreted as changing perceptions to minimize discrepancy (minimize KL divergence) or
286
+ changing observations to maximize evidence, in the context of FEP. When the creatures are
287
+ 7
288
+
289
+ exposed to a “tree” and they do not fully realize what it is, the sensory information of the
290
+ objects are internalized with hidden states (inner belief) that can describes how it believes the
291
+ generation process of a “tree”. This process of generation, helps the creatures to identify the
292
+ latent similarities among objects that belong to the same category, without the full awareness.
293
+ This process of "unconsciously" training to generate helps the creatures to better categorize in
294
+ future. When the identity of a “tree” is finally revealed, they can generalize quickly. This explains
295
+ our rationale of using a VAE to process unlabelled samples. Consequently, the Kolmogorov
296
+ complexity terms in Inequality 1 are naturally approximated by a VAE based compressor (18).
297
+ To test the hypothesis, we carry out the experiment on five datasets, MNIST, KMNIST,
298
+ FashionMNIST, STL-10 and CIFAR-10. We first train a hierarchical VAE on unlabelled data
299
+ to learn to generate ˆx that’s as close to x as possible. This corresponds to the time when
300
+ creatures exposed to a environment without knowing the object, implicitly learning the latent
301
+ representation among objects. When the identity of objects are revealed, a VAE based universal
302
+ compressor can be used to identify the new objects. Specifically, after training a hierarchical
303
+ VAE unsupervisedly, we compare the E energy function between a labelled image and a test
304
+ image, as in Definition 1. In our experiment, we use 5 labelled samples per class to test the
305
+ accuracy of classification. The energy function E relies on a compressor to approximate. We thus
306
+ use the bits-back argument to directly use our trained VAE for the compressor in (18). Our result
307
+ shows that using only 5 samples, our method outperforms traditional supervised models like
308
+ SVM, CNN, VGG and Vision Transformer (ViT) on all five datasets. These supervised methods
309
+ are chosen to represent different model complexity with wide range of number of parameters.
310
+ As we can see, when labelled data are scarce, supervised methods are not effective: complex
311
+ models like VGG cannot perform better than SVM and this tendency is more obvious on ViT
312
+ without pre-training. The improvement that our method brings is more obvious on more complex
313
+ datasets like STL-10 and CIFAR-10. Similar result is also obtained in the recent work, across
314
+ different shot settings (19).
315
+ We also compare with using latent representation directly with k-Nearest-Neighbor classifier,
316
+ labelled as “Latent” in the table. The architecture and training procedure for “Latent” method
317
+ is exactly the same to our method — we train on unlabelled data to generate the sample and
318
+ then take the latent representation for classification. We can see using latent representation
319
+ outperforms all supervised methods on four out of five datasets. But the accuracy is still way
320
+ lower than our method, indicating our method can better utilize the generative models.
321
+ Text Experiments
322
+ Our theory is generally applicable, even without pre-training on unlabelled data. Here, we
323
+ demonstrate significant advantages of our approach with a simple compressor gzip over lower
324
+ resource languages.
325
+ Languages with Abundant Resources
326
+ We first test our method on datasets with abundant
327
+ resources. Specifically, we compare with three datasets — AG News, SogouNews and DBpedia.
328
+ 8
329
+
330
+ MNIST
331
+ KMNIST
332
+ FashionMNIST
333
+ STL-10
334
+ CIFAR-10
335
+ SVM
336
+ 69.4±2.2
337
+ 40.3±3.6
338
+ 67.1±2.1
339
+ 21.3±2.8
340
+ 21.1±1.9
341
+ CNN
342
+ 72.4±3.5
343
+ 41.2±1.9
344
+ 67.4±1.9
345
+ 24.8±1.5
346
+ 23.4±2.9
347
+ VGG
348
+ 69.4±5.7
349
+ 36.4±4.7
350
+ 62.8±4.1
351
+ 20.6±2.0
352
+ 22.2±1.6
353
+ ViT (disc)
354
+ 58.8±4.6
355
+ 35.8±4.1
356
+ 61.5±2.2
357
+ 24.2±2.5
358
+ 22.3±1.8
359
+ Latent
360
+ 73.6±3.1
361
+ 48.1±3.3
362
+ 69.5±3.5
363
+ 31.5±3.7
364
+ 22.2±1.6
365
+ Ours
366
+ 77.6±0.4
367
+ 55.4±4.3
368
+ 74.1±3.2
369
+ 39.6±3.1
370
+ 35.3±2.9
371
+ Table 1: 5-shot image classification accuracy on five datasets.
372
+ AG News
373
+ SogouNews
374
+ DBpedia
375
+ fasttext
376
+ 27.3±2.1
377
+ 54.5±5.3
378
+ 47.5±4.1
379
+ Bi-LSTM+Attn
380
+ 26.9±2.2
381
+ 53.4±4.2
382
+ 50.6±4.1
383
+ HAN
384
+ 27.4±2.4
385
+ 42.5±7.2
386
+ 35.0± 1.2
387
+ W2V
388
+ 38.8±18.6
389
+ 14.4±0.5
390
+ 32.5±11.3
391
+ BERT
392
+ 80.3±2.6
393
+ 22.1±4.1
394
+ 96.4±4.1
395
+ Ours
396
+ 58.7±4.8
397
+ 64.9±6.1
398
+ 62.2±2.2
399
+ Table 2: 5-shot text classification accuracy on three datasets.
400
+ Similar to image classification, we compare with both supervised methods, including fasttext (20),
401
+ BiLSTM (21) with attention mechanism (22) and Hierarchical Attention Network (HAN) (23),
402
+ and non-parametric methods that use Word2Vec (W2V) (24) as representation. We also compare
403
+ with pre-trained language models like BERT (25) We use five labelled data for each class (5-shot)
404
+ for all the methods.
405
+ Surprisingly, even without any pre-training and with a simple compressor like gzip, our
406
+ method outperforms all non-pretrained supervised methods and non-parametric methods in
407
+ low data regime. This indicates that compressor serves as an efficient method to capture the
408
+ regularity and our information distance is effective in comparing the similarity based on the
409
+ essential information. When comparing with pre-trained models like BERT, we can see our
410
+ method is significantly higher on SogouNews, a special dataset that includes Pinyin — a phonetic
411
+ romanization of Chinese, which can be viewed as an Out-Of-Distributed (OOD) dataset as it
412
+ uses the same alphabet as english corpus.
413
+ Low-Resource Languages
414
+ Sufficiently pre-trained language models are exceptional few-shot
415
+ learners (8). However, when faced with low resource data or distributions that are significantly
416
+ different from any pre-trained data, those pre-trained language models lose their advantages
417
+ to our method. We compare our method with BERT on four different low-resource language
418
+ datasets - Kinyarwanda, Kirundi, Swahili and Filipino. These datasets are curated
419
+ to have the Latin alphabets, same as english corpus. BERT has performed extremely well as
420
+ 9
421
+
422
+ Kinnews
423
+ Kirnews
424
+ Swahili
425
+ Filipino
426
+ BERT
427
+ 24.0±6.0
428
+ 38.6±10.0
429
+ 39.6±9.6
430
+ 40.9±5.8
431
+ mBERT
432
+ 22.9±6.6
433
+ 32.4±7.1
434
+ 55.8±16.9
435
+ 46.5±4.8
436
+ Ours
437
+ 45.8±6.5
438
+ 54.1±5.6
439
+ 62.7±7.2
440
+ 65.2±4.8
441
+ Table 3: 5-shot text classification accuracy on low-resource datasets
442
+ shown in Table 2 due to pre-training on billions of tokens. However, when facing low-resource
443
+ datasets, BERT perform significantly worse than our method only using gzip as we can see
444
+ in Table 3, no matter using multilingual pre-trained version or the original one. Note that mBERT
445
+ is pre-trained on 104 languages including Swahili and Tagalog (on which Filipino is based
446
+ on). As we can see on Swahili and Filipino, mBERT performs better than BERT, but still
447
+ significantly lower than our method.
448
+ Omniglot one-shot-classification dataset
449
+ Figure 3: Distance between two Bezier curves
450
+ In (4),
451
+ a one-shot learning framework
452
+ Bayesian program learning (BPL) was pro-
453
+ posed. It learns a simple probabilistic model
454
+ for each concept. Taking a negative logarithm
455
+ converts a Bayesian formula to a description
456
+ length paradigm, hence BPL can be viewed
457
+ as one particular approximation to our theory.
458
+ Here we provide another simple approxima-
459
+ tion of our theory for the Omniglot one-shot-
460
+ classification dataset of (4).
461
+ Our system first decompose a given char-
462
+ acter into strokes, then compute E(a, b) be-
463
+ tween characters a and b, using all their possi-
464
+ ble stroke decomposition. We provide how to
465
+ calculate E(a, b) here and details of decompo-
466
+ sition program is given in Appendix A.
467
+ 1. Fit a stroke by a Bezier curve;
468
+ 2. Ensure the number of points on two curves are same. This algorithm utilize equally split
469
+ method to select certain same number of points on each curve Figure 3;
470
+ 3. Ensure the area of the convex hull and the barycenter of the compared characters are the
471
+ same;
472
+ 10
473
+
474
+ 0
475
+ -20
476
+ -40
477
+ -60
478
+ -80
479
+ -100-
480
+ 0
481
+ 20
482
+ 40
483
+ 60
484
+ 80
485
+ 1004. Use max Cartesian distance between parallel points on two Bezier curves to approximate
486
+ the minimum encoding distance between two Bezier curves, as shown in Figure 3;
487
+ 5. Choose the character with minimum distance.
488
+ This simple implementation achieves 92.25% accuracy 20-way-1-shot on this dataset. The
489
+ point here is to demonstrate various approximations of our theory that work rather than com-
490
+ paring accuracy. At 96.75% (4) or at 92.25% might be two different individuals with different
491
+ compression algorithms.
492
+ Unification
493
+ Our framework can unify other popular deep neural networks for few-shot learning.
494
+ Siamese Network: Siamese network uses twin subnetwork to rank the similarity between
495
+ two inputs in order to learn useful features. M here is often a contrastive loss. This framework
496
+ shows strong performance in one-shot image recognition (26).
497
+ Prototypical Network: Prototypical networks (27) propose to optimize the distance metric
498
+ M directly by learning coreh in representation space. coreh are represented as the mean of
499
+ embedded support samples.
500
+ Bi-Encoder: In the context of natural language processing, one of the dominant structure
501
+ is the Bi-Encoder design with each encoder being a pre-trained language model. For example,
502
+ in information retrieval, Dense Passage Retrieval (DPR), with two encoders encoding query
503
+ and document respectively, has become the new state of the art. To capture semantic similarity,
504
+ sentenceBERT (28) also adopts the bi-encoder design and becoming one of the most prevalent
505
+ methods for semantic textual similarity. M in both cases can either be cosine similarity or
506
+ Euclidean distance between the representation learned through pre-trained models.
507
+ Information Distance based Methods: Hundreds of algorithms were published, before the
508
+ deep learning era, on parameter-free data mining, clustering, anomaly detection, classification
509
+ using information distance E (29–34), with a comprehensive list in (13). Recently (19) have
510
+ discovered using information distance with deep neural networks and leverage the generalizability
511
+ of few-shot image classification. This work shows that with the help of deep generative models,
512
+ unlabelled data can be better utilized for few-shot learning under our framework.
513
+ Conclusion and a discussion on consciousness
514
+ We have defined human-like few-shot learning and derived an optimal form of such few-shot
515
+ learning. Note there is an interesting difference between our theory and classical learning
516
+ theory. In classical learning theory, it is well-known that if we compress training data to a
517
+ smaller consistent description, whether it is a classical Bayesian network or a deep neural
518
+ networks (13,35), we would achieve learning. In this paper, we demonstrate that in the inference
519
+ 11
520
+
521
+ stage, compression is also important, especially when there are not enough labelled data to
522
+ train a small model. On the biological side, compression circuits using predictive coding in
523
+ human cortex has been studied by (36). Experiments have also strongly supported our theory.
524
+ We expect to see more practical systems approximating our theory can be implemented to
525
+ solve commonplace few-shot learning problems when large amounts of labelled data for deep
526
+ learning is lacking. We now wish to explore two consequences of our few-shot learning model,
527
+ to consciousness.
528
+ A binary classifier of interestingness
529
+ Our few-shot learning model has a by-product. We have proved compression is a universal goal
530
+ that few-shot learning algorithms approximate. Thus this implies immediately a (subconscious)
531
+ binary classifier: if something is compressed, then something interesting happens, and attention
532
+ is given. It turns out that this "Interestingness" has been theoretically studied as logical depth first
533
+ proposed by Charles Bennett (13). According to Bennett, a structure is deep if it is superficially
534
+ random but subtly redundant. When few-shot learning happens, significant compression happens,
535
+ and these deep objects gain attention. Such a binary classifier might explain our appreciation
536
+ of arts, music, games, and science, since these all share a common feature of dealing with
537
+ non-trivially compressible objects: whether it is a shorter description of the data that gives rise
538
+ of Newton’s laws (13), or a piece of art or music that itself is compressible or that reminds us of
539
+ something we have experienced before, hence very compressible, we feel we understand it and
540
+ hence appreciate it. Science is nothing but compressing data into simpler descriptions of nature.
541
+ Consciousness and the ability of labelling data
542
+ Do other species have consciousness? It is difficult to answer this question as consciousness is
543
+ not testable. Thomas Nagel (37) made a comment: We will never know if a bat is conscious
544
+ because we are not bats.
545
+ Consider an alternative data-driven approach by asking what a species can do instead of how
546
+ they feel. That is, if we treat some aspects of consciousness as a collection of learned concepts,
547
+ then given a compression network, the ability of acquiring the relevant concepts becomes a matter
548
+ of labelling relevant data. We know learning and consciousness are both located at posterior
549
+ cortex region (38). This is in agreement with some injured patients when they lost consciousness.
550
+ This is also in agreement with “bistable perception” training results with monkeys (39).
551
+ Varieties of consciousness are being pragmatically studied (40). These include: 1) the ability
552
+ of consciously perceive the environment; 2) the ability of evaluating conscious emotions; 3) the
553
+ ability of having a unified conscious experience; 4) the ability of integrating across time as a
554
+ continuous stream, one moment flowing into the next; 5) the conscious awareness of oneself
555
+ as distinct from the world outside. Many of these abilities may be seen as a few-shot learnable
556
+ concepts, given properly labelled data.
557
+ 12
558
+
559
+ Different animals have various levels of some of such consciousness by passing certain tests.
560
+ For example, chimpanzees, dolphins, Asian elephants, and magpies can recognize themselves
561
+ by passing some mirror-mark tests. The corvids display some emotions, and are able to plan
562
+ ahead. Octopus have powerful perceptual facilities obtaining and processing data independently
563
+ with each tentacle. Experimentally, awareness emerges when information travels back and forth
564
+ between brain areas (41) instead of a linear chain of command.
565
+ According to our theory, the brain really only needs to use a universal compressor to compress
566
+ information, regardless of one processor in the head or a few processors in the tentacle (in case
567
+ of Cephalopods). Thus we can conjecture that "consciousness” then is a matter of ability of
568
+ labelling the data from sensory terminals. Food or enemy in the environment are easy to label.
569
+ Emotional labelling requires some level of abstraction. Self-awareness of “me” and “others”
570
+ thus is just another binary classifier trainable depending on if the species is able to do “displaced
571
+ reference” mental labelling. Other than the human beings, only orangutans are known to have
572
+ limited displaced reference ability (42).
573
+ Thus we have just reduced the non-testable question of whether an animal has consciousness
574
+ in some aspects to if it is able to label the corresponding data properly.
575
+ Acknowledgement
576
+ We thank Dr. Hang Li for suggestions and bringing (43) to our attention and Dr. Amy Sun for
577
+ bringing (44) to our attention. The work is supported in part by Canada’s NSERC operating grant
578
+ OGP0046506, Canada Research Chair Program, and the Leading Innovative and Entrepreneur
579
+ teams program of Zhejiang, number 2019R02002, and NSFC grant 61832019.
580
+ References and Notes
581
+ 1. Y. LeCun, Y. Bengio, G. Hinton, Nature 521, 436 (2015).
582
+ 2. C. Spearman (1961).
583
+ 3. K. Friston, Nature Review Neuroscience 11, 21 (2010).
584
+ 4. B. M. Lake, R. Salakhutdinov, J. B. Tenenbaum, Science 350, 1332 (2015).
585
+ 5. E. Stern, npj Science of Learning 2, 1 (2017).
586
+ 6. M. Hebart, C. Zheng, F. Pereira, C. Baker, Nature, Human Behaviour pp. 1173–1185 (2020).
587
+ 7. C. Finn, P. Abbeel, S. Levine, International conference on machine learning (PMLR, 2017),
588
+ pp. 1126–1135.
589
+ 8. T. Brown, et al., Advances in neural information processing systems 33, 1877 (2020).
590
+ 13
591
+
592
+ 9. T. J. Bouchard Jr, Annals of Human Biology 36, 527 (2009).
593
+ 10. M. N. Bernstein, mbernste.github.io/posts/elbo/ .
594
+ 11. J. Schmidhuber, arXiv:0812.4360v2 [cs.AI] (2009).
595
+ 12. N. Chater, P. Vitányi, Trends in Cognitive Sciences 7, 19 (2003).
596
+ 13. M. Li, P. Vitányi, An Introduction to Kolmogorov Complexity and Its Applications (Springer-
597
+ Verlag, 1993, 1997, 2008, 2019).
598
+ 14. C. Bennett, P. Gács, M. Li, P. Vitányi, W. Zurek, IEEE Trans. Inform. Theory 44, 1407
599
+ (1998).
600
+ 15. D. C. Knill, A. Pouget, TRENDS in Neurosciences 27, 712 (2004).
601
+ 16. K. Friston, PLoS computational biology 4, e1000211 (2008).
602
+ 17. C. Koch, G. Tononi, Scientific American 304 (2011).
603
+ 18. J. Townsend, T. Bird, D. Barber, International Conference on Learning Representations
604
+ (2018).
605
+ 19. Z. Jiang, Y. Dai, J. Xin, M. Li, J. Lin, Advances in Neural Information Processing Systems
606
+ (2022).
607
+ 20. A. Joulin, E. Grave, P. B. T. Mikolov, EACL 2017 p. 427 (2017).
608
+ 21. M. Schuster, K. K. Paliwal, IEEE transactions on Signal Processing 45, 2673 (1997).
609
+ 22. Y. Wang, M. Huang, X. Zhu, L. Zhao, Proceedings of the 2016 conference on empirical
610
+ methods in natural language processing (2016), pp. 606–615.
611
+ 23. Z. Yang, et al., Proceedings of the 2016 conference of the North American chapter of
612
+ the association for computational linguistics: human language technologies (2016), pp.
613
+ 1480–1489.
614
+ 24. T. Mikolov, K. Chen, G. Corrado, J. Dean, arXiv preprint arXiv:1301.3781 (2013).
615
+ 25. J. Devlin, M.-W. Chang, K. Lee, K. Toutanova, Proceedings of the 2019 Conference of
616
+ the North American Chapter of the Association for Computational Linguistics: Human
617
+ Language Technologies, Volume 1 (Long and Short Papers) (2019), pp. 4171–4186.
618
+ 26. G. Koch, R. Zemel, R. Salakhutdinov, et al., ICML deep learning workshop (Lille, 2015),
619
+ vol. 2, p. 0.
620
+ 14
621
+
622
+ 27. J. Snell, K. Swersky, R. Zemel, Advances in neural information processing systems 30
623
+ (2017).
624
+ 28. N. Reimers, I. Gurevych, Proceedings of the 2019 Conference on Empirical Methods
625
+ in Natural Language Processing and the 9th International Joint Conference on Natural
626
+ Language Processing (EMNLP-IJCNLP) (2019), pp. 3982–3992.
627
+ 29. M. Li, et al., Bioinformatics 17, 149 (2001).
628
+ 30. E. Keogh, S. Lonardi, C. A. Ratanamahatana, Proceedings of the tenth ACM SIGKDD
629
+ international conference on Knowledge discovery and data mining (2004), pp. 206–215.
630
+ 31. C. H. Bennett, M. Li, B. Ma, Scientific American 288, 76 (2003).
631
+ 32. M. Nykter, et al., Physical review letters 100, 058702 (2008).
632
+ 33. D. Benedetto, E. Caglioti, V. Loreto, Physical Review Letters 88, 048702 (2002).
633
+ 34. M. Nykter, et al., Proceedings of the National Academy of Sciences 105, 1897 (2008).
634
+ 35. Y. Bengio, et al., Foundations and trends® in Machine Learning 2, 1 (2009).
635
+ 36. R. P. Rao, D. H. Ballard, Nature neuroscience 2, 79 (1999).
636
+ 37. T. Negel, Readings in philosophy of psychology (1974).
637
+ 38. C. Koch, Scientific American. (2018).
638
+ 39. G. Miller, Science 309, 79 (2005).
639
+ 40. J. Birch, A. Schnell, N. Clayton, Trends in cognitive sciences (2020).
640
+ 41. M. Boly, et al., Science 332 (May, 2011).
641
+ 42. H. Lyn, et al., Animal Cognition 17 (2014).
642
+ 43. Y. Ma, D. Tsao, H. Shum (2022).
643
+ 44. F. Scherr, C. Stöckl, W. Maass, BioRxiv (2020).
644
+ 15
645
+
646
+ A
647
+ Algorithm for extracting strokes from a character
648
+ Repeat until all pixels of a character are marked, by depth-first search:
649
+ (1) Extract its skeleton so that the stroke width is 1 pixel point. Then convert the image to
650
+ a graph and shrink adjacent cross points. (2) Randomly select an endpoint as starting point,
651
+ endpoint at top left has a greater chance of being selected. Walk until a cross point or endpoint.
652
+ If there is a circle then select a cross point of a top left point if there is no cross point. Record
653
+ this stroke and mark it on the character. Allow small number of marked pixel points to make
654
+ the decomposition more natural. (3) When meeting a cross point, then enumerate two situations
655
+ of pen-up and turning, randomly. Pen-up means end of a stroke, go to step (2) with the marked
656
+ graph. Turning means continuation hence repeat step (2). If walking to an endpoint, then attempt
657
+ to turn by going back to find a new unmarked pixels within some small number of pixels or
658
+ directly end the stroke and repeat step (2) with marked graph.
659
+ 16
660
+
HNAzT4oBgHgl3EQfHfvA/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
HdE1T4oBgHgl3EQfFgNH/content/2301.02902v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef4de03d8aed531c616bf7f65e989d4b613eedddb7e1a62dead3f75611952b21
3
+ size 4350921
HdE1T4oBgHgl3EQfFgNH/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c6f8cf20d7bca7740ee2e50bd92f11f746040d89a873023d31d717a65989e31
3
+ size 2883629
I9E2T4oBgHgl3EQfpAhk/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df791b06146d2ca8297a80b0ae0e233c4013bedd2dcb6b9f17fa90a464c0078f
3
+ size 4784173
ItA0T4oBgHgl3EQfCP98/content/2301.01987v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6930cfd3ba00d108b3884187a9155945d0cfee623d85961910337cb9fbe99fe
3
+ size 844784
ItA0T4oBgHgl3EQfCP98/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca7c298a37eef70607c6325ab4773821a033a7d4cbef533509721fd11e5f4ce6
3
+ size 3932205
ItA0T4oBgHgl3EQfCP98/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b39e79b4c4b4171b43e62780446966c521b97d930284dc8e1b7736565a7de6f7
3
+ size 145319
JdAzT4oBgHgl3EQfyP4s/content/tmp_files/2301.01749v1.pdf.txt ADDED
The diff for this file is too large to render. See raw diff
 
JdAzT4oBgHgl3EQfyP4s/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
LdFOT4oBgHgl3EQf0DSF/content/tmp_files/2301.12934v1.pdf.txt ADDED
@@ -0,0 +1,1228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ IEEE ROBOTICS AND AUTOMATION LETTERS. PREPRINT VERSION. JANUARY, 2023
2
+ 1
3
+ Coarse-to-fine Hybrid 3D Mapping System with
4
+ Co-calibrated Omnidirectional Camera and
5
+ Non-repetitive LiDAR
6
+ Ziliang Miao1, Buwei He1, Wenya Xie1, Wenquan Zhao1, Xiao Huang1, Jian Bai2, and Xiaoping Hong1
7
+ Abstract—This paper presents a novel 3D mapping robot with
8
+ an omnidirectional field-of-view (FoV) sensor suite composed of a
9
+ non-repetitive LiDAR and an omnidirectional camera. Thanks to
10
+ the non-repetitive scanning nature of the LiDAR, an automatic
11
+ targetless co-calibration method is proposed to simultaneously
12
+ calibrate the intrinsic parameters for the omnidirectional camera
13
+ and the extrinsic parameters for the camera and LiDAR, which
14
+ is crucial for the required step in bringing color and texture
15
+ information to the point clouds in surveying and mapping
16
+ tasks. Comparisons and analyses are made to target-based
17
+ intrinsic calibration and mutual information (MI)-based extrinsic
18
+ calibration, respectively. With this co-calibrated sensor suite,
19
+ the hybrid mapping robot integrates both the odometry-based
20
+ mapping mode and stationary mapping mode. Meanwhile, we
21
+ proposed a new workflow to achieve coarse-to-fine mapping,
22
+ including efficient and coarse mapping in a global environment
23
+ with odometry-based mapping mode; planning for viewpoints in
24
+ the region-of-interest (ROI) based on the coarse map (relies on the
25
+ previous work [1]); navigating to each viewpoint and performing
26
+ finer and more precise stationary scanning and mapping of the
27
+ ROI. The fine map is stitched with the global coarse map,
28
+ which provides a more efficient and precise result than the
29
+ conventional stationary approaches and the emerging odometry-
30
+ based approaches, respectively.
31
+ Index Terms—Mapping, Robotic Systems, Omnidirectional
32
+ Vision, Calibration and Identification, SLAM.
33
+ I. INTRODUCTION
34
+ T
35
+ HREE-DIMENSIONAL scanning (obtain the raw points)
36
+ and mapping (register or stitch the points into a
37
+ point cloud map) are becoming increasingly important in
38
+ robotics [2], digital construction [3], and virtual reality [4],
39
+ where digitization of the physical 3D space could provide
40
+ tremendous insights in modeling, planning, management,
41
+ optimization, and quality assurance. Photogrammetry has been
42
+ developed to capture the 3D world. However, its application
43
+ has been limited in aviation settings where accurate GPS
44
+ Manuscript received: Nov. 21, 2022; Revised Jan. 19, 2023; Accepted Jan.
45
+ 28, 2023.
46
+ This paper was recommended for publication by Editor Javier Civera
47
+ upon
48
+ evaluation
49
+ of
50
+ the
51
+ Associate
52
+ Editor
53
+ and
54
+ Reviewers’
55
+ comments.
56
+ This work was supported by Shenzhen Science and Technology Project
57
+ (JSGG20211029095803004,
58
+ JSGG20201103100401004)
59
+ and
60
+ SUSTech
61
+ startup fund. (Ziliang Miao and Buwei He contributed equally to this work;
62
+ Corresponding author: Xiaoping Hong)
63
+ 1These authors are with School of System Design and Intelligent
64
+ Manufacturing (SDIM), Southern University of Science and Technology
65
+ (SUSTech),
66
+ China
67
68
69
+ 2Jian Bai is with State Key Laboratory of Modern Optical Instrumentation,
70
+ Zhejiang University, China
71
+ Digital Object Identifier (DOI): see top of this page.
72
+ RTK signals are required. Recently, the need for large-scale
73
+ mapping of building environments has been rising, mainly
74
+ due to the requirements from Building Information Modeling
75
+ (BIM) systems. Thanks to the availability of emerging 3D
76
+ robotic LiDAR sensors [5], [6], Mobile Laser Scanner (MLS)
77
+ systems are increasingly adopted [7] (Fig. 1a, #3 and #4),
78
+ where point clouds from these sensors could be registered
79
+ to the global frame through sensor motion estimation (i.e.,
80
+ odometry) at each instance. However, due to the movement
81
+ nature, such approaches largely depend on estimations of
82
+ temporal characteristics such as translation and rotation, or
83
+ spatial characteristics such as sensor FoV and landmark
84
+ coverages. The results vary from scan to scan with no
85
+ guarantee of precision. Hence, a more robust and precise
86
+ method is desired.
87
+ On
88
+ the
89
+ other
90
+ hand,
91
+ the
92
+ traditional
93
+ Terrestrial
94
+ Laser
95
+ Scanner (TLS) has been employed in many precision-stringent
96
+ applications (Fig. 1a, #1 and #2). The TLS-based stationary
97
+ mapping is usually inefficient (due to the accurate but slow
98
+ laser rotation) but could provide precise results. Viewpoints
99
+ (also known as stationary scanning locations) need to be
100
+ carefully planned to ensure the spatial coverage and enough
101
+ overlapping regions of adjacent viewpoints to make accurate
102
+ point cloud stitching [8], but on the other hand, as fewer as
103
+ possible to reduce scanning time and cost. The planning for
104
+ viewpoints largely relies on the overall layout of the scene,
105
+ which has been done by human experience so far [9].
106
+ #1
107
+ #2
108
+ #3
109
+ #4
110
+ (a)
111
+ Omnidirectional
112
+ camera
113
+ Livox Mid-360 LiDAR
114
+ (with integrated IMU)
115
+ Gimbal mount
116
+ Mobile platform
117
+ (synchronized)
118
+ (b)
119
+ Fig. 1. 3D mapping systems: (a) the current TLS (#1 FARO Focus Premium,
120
+ #2 LEICA BLK360) and MLS (#3 LEICA BLK2GO, #4 NavVis VLX)
121
+ systems; (b) the proposed hybrid mapping robotic system.
122
+ Combining the strength from both worlds would be ideal in
123
+ large-scale 3D mapping applications. As shown in Fig. 1b,
124
+ arXiv:2301.12934v1 [cs.RO] 30 Jan 2023
125
+
126
+ OISEE
127
+ SCOUT2
128
+ IEEE ROBOTICS AND AUTOMATION LETTERS. PREPRINT VERSION. JANUARY, 2023
129
+ the proposed hybrid mapping robot is developed carrying
130
+ a gimbal mount and a novel sensor suite consisting of an
131
+ omnidirectional non-repetitive Livox Mid-360 LiDAR1 and
132
+ an omnidirectional camera. The sensors’ FoV and the non-
133
+ repetitive scanning nature are shown in Fig. 2a. In the
134
+ odometry-based mapping mode, the sensor suite is kept
135
+ horizontal by fixing the gimbal mount to coarsely and
136
+ efficiently map the entire space with the mobile platform.
137
+ Based on the coarse map, a few viewpoints are planned for the
138
+ stationary mapping of targeted ROIs. In the stationary mapping
139
+ mode, the robot will navigate and stay still at each viewpoint,
140
+ performing 360°×300° scanning by traversing the vertical FoV
141
+ through the gimbal mount. These precise scans are registered
142
+ with each other and then stitched with the pre-generated coarse
143
+ map forming a global map with fine ROIs.
144
+ The main contributions of this work are as follows:
145
+ 1) The
146
+ first
147
+ hybrid
148
+ 3D
149
+ mapping
150
+ robot
151
+ system
152
+ that
153
+ integrates odometry-based and stationary mapping modes
154
+ is proposed. The consistency of point clouds in two
155
+ modes can be guaranteed with the single omnidirectional
156
+ non-repetitive Livox Mid-360 LiDAR.
157
+ 2) An omnidirectional camera is introduced in the proposed
158
+ system to complement the omnidirectional LiDAR.
159
+ A
160
+ novel
161
+ automatic
162
+ targetless
163
+ co-calibration
164
+ method
165
+ is proposed to simultaneously calibrate the intrinsic
166
+ parameters and the extrinsic parameters.
167
+ 3) An automated coarse-to-fine hybrid mapping workflow is
168
+ demonstrated, including odometry-based coarse mapping
169
+ in the global environment, planning for the viewpoints
170
+ in the ROIs, and finer stationary mapping at viewpoints.
171
+ The entire project is open-sourced on GitHub2 to aid the
172
+ development of this emerging field.
173
+ II. RELATED WORKS
174
+ A. Mapping Solutions
175
+ 3D mapping solutions are of great interest in many
176
+ emerging fields [3]. TLS-based and MLS-based approaches
177
+ are commonly adopted.
178
+ The traditional TLS-based approach uses a heavy-duty
179
+ single-laser scanner and traverses the entire FoV through
180
+ step-wise rotations about the horizontal and vertical axes.
181
+ It provides sufficiently dense points with good precision.
182
+ However, this method is slow and laborious. It has to be
183
+ repeated on many viewpoints, which need to be chosen wisely
184
+ because a lack of viewpoints will cause missing information
185
+ in the desired ROI, while the excess of viewpoints will lead
186
+ to longer scanning hours and poorer efficiency. Currently,
187
+ viewpoints planning relies on human intuition or experiences,
188
+ making it challenging to plan effectively in large and complex
189
+ working environments like the construction scenes [9].
190
+ On the contrary, the MLS-based approach provides real-
191
+ time scanning and mapping results as the LiDAR moves.
192
+ The current MLS devices are classified by their usage
193
+ configurations, such as handheld (Fig. 1a, #3), backpack
194
+ 1The authors gratefully acknowledge Livox Technology for the equipment
195
+ support.
196
+ 2https://github.com/ZiliangMiao/Hybrid Mapping Cocalibration.git
197
+ (Fig. 1a, #4), and trolley. Most of these mobile systems rely on
198
+ conventional LiDARs (16, 32, or 64 lines) and construct the
199
+ 3D map by registering the point cloud with LiDAR odometry
200
+ or LiDAR-IMU odometry. Such mobile systems greatly speed
201
+ up the mapping process without planning for viewpoints.
202
+ However, it cannot replace the TLS-based approaches due to
203
+ insufficient mapping precision and sparse point clouds [3]. The
204
+ repetitive scanning nature of mechanical LiDAR is unsuitable
205
+ for stationary scanning due to limited FoV coverage (20%
206
+ coverage for 32-line LiDAR). Therefore, the indispensable
207
+ motion for more coverage will cause errors in pose estimation,
208
+ which are accumulated throughout the process, limiting the
209
+ usage in high-precision applications.
210
+ Both TLS-based and MLS-based approaches have their
211
+ unique advantages and drawbacks. It is desired to devise
212
+ a mechanism to combine both modes. For example, a
213
+ combination of TLS and MLS is used to solve the registration
214
+ problem between non-overlapping spaces [8] or use TLS scans
215
+ as references to MLS mapping registration to achieve low
216
+ mapping errors [10]. Moreover, MLS is also used to provide a
217
+ 3D map to solve the viewpoints planning problem of TLS [9].
218
+ However, all these methods are based on heterogeneous
219
+ sensors for different modes, with different synchronization,
220
+ data structure, and protocols, which are difficult to construct
221
+ a one-stop mapping robot with a streamlined and automated
222
+ workflow.
223
+ The unique non-repetitive scanning nature of the Livox
224
+ LiDAR provides a combination of an instantaneous high
225
+ density at a short time interval for odometry (with effective
226
+ point density as 32-line LiDAR within 0.1 seconds) and an
227
+ image-level resolution at relatively long time intervals for
228
+ scanning (within 3 seconds, as shown in Fig. 2b), which makes
229
+ it surprisingly suitable for such hybrid working mechanism.
230
+ The
231
+ feature
232
+ provides
233
+ sufficiently
234
+ good
235
+ performance
236
+ in
237
+ odometry scenarios [11] and a dense FoV coverage for image-
238
+ like feature processing [6], [12], [13]. In this paper, the two
239
+ working modes are integrated into the same robot, ensuring
240
+ overall mapping efficiency and precision with an automated
241
+ coarse-to-fine hybrid mapping workflow.
242
+ B. Calibration Methods
243
+ In addition to LiDAR, Cameras are usually required
244
+ in
245
+ 3D
246
+ mapping
247
+ systems
248
+ to
249
+ give
250
+ an
251
+ overview
252
+ of
253
+ the
254
+ mapped environment [14]. Cameras could provide high-quality
255
+ geometric, color, and texture information [15], which enables
256
+ further modeling and rendering [16] of the point clouds
257
+ and permits tasks in object detection, segmentation, and
258
+ classification [17]. Meanwhile, for autonomous navigation, the
259
+ camera is also vital to visual-LiDAR odometry through sensor
260
+ fusion [4]. All these functions would rely on the accurate
261
+ calibration of the intrinsic parameters of the camera and
262
+ extrinsic parameters between the cameras and LiDAR [15].
263
+ Traditionally, multiple cameras are usually required to
264
+ be complementary to the omnidirectional FoV of LiDAR.
265
+ This work employs an omnidirectional camera over the
266
+ traditional multi-camera vision to avoid bulky construction,
267
+ high cost, shutter synchronization, and cascaded extrinsic
268
+
269
+ MIAO et al: COARSE-TO-FINE HYBRID 3D MAPPING SYSTEM WITH CO-CALIBRATED OMNIDIRECTIONAL CAMERA AND NON-REPETITIVE LIDAR
270
+ 3
271
+ calibrations. The intrinsic and extrinsic parameters of this
272
+ novel omnidirectional sensor suite are essentially needed.
273
+ The intrinsic parameters of the omnidirectional camera
274
+ must be well calibrated since these types usually possess
275
+ much larger and more complex distortions than pin-hole
276
+ cameras [18]. In [18]–[20], higher-order polynomial-based
277
+ intrinsic
278
+ models
279
+ are
280
+ introduced
281
+ with
282
+ many
283
+ degrees
284
+ of
285
+ freedom to obtain satisfactory results. A popular OcamCalib
286
+ toolbox based on the checkerboard is provided [19]. These
287
+ methods could be susceptible to over-fitting with high-order
288
+ polynomials and often require evenly distributed artificial
289
+ targets and dense features across the entire space. Typically,
290
+ these calibration processes are manual and could lead to
291
+ tedious procedures with a large margin of error. Additionally,
292
+ the omnidirectional camera in our work is constructed with
293
+ a refractive-reflective geometry to capture a ring-like FoV
294
+ beyond 180°. This construction makes intrinsic calibration
295
+ even more difficult. An accurate, automatic, and targetless
296
+ calibration method is desired.
297
+ The
298
+ extrinsic
299
+ calibration
300
+ method
301
+ between
302
+ the
303
+ omnidirectional camera and LiDAR has only been explored
304
+ in [21] using edge correspondence to match point clouds
305
+ and images. The bearing angle images highlight the edge
306
+ features, which are manually positioned. Targetless extrinsic
307
+ calibration methods for monocular cameras and LiDAR have
308
+ been developed recently. With the non-repetitive LiDARs,
309
+ CamVox [12] could project the image-like LiDAR point
310
+ clouds onto the camera image plane and extract edge pixels
311
+ using the grayscale images based on reflectivity and depth.
312
+ The method proposed in [13] uses voxels to extract the edge
313
+ points in 3D space and classifies the edges based on depth
314
+ continuity. Both methods work well with conventional pin-hole
315
+ cameras and need to be extended toward the omnidirectional
316
+ cameras with significantly larger distortions. An additional
317
+ targetless extrinsic calibration method employing mutual
318
+ information (MI) is also developed [22], which maximizes
319
+ the intensity correlations of LiDAR and camera. However,
320
+ the misrepresented information caused by lighting conditions,
321
+ surface
322
+ reflection
323
+ properties,
324
+ and
325
+ spectral
326
+ reflectance
327
+ disagreement could result in worse calibration than the
328
+ edge-based methods.
329
+ In the proposed targetless co-calibration method, the high-
330
+ resolution dense point cloud of the non-repetitive scanning
331
+ LiDAR gives abundant and ground-truth-level features, which
332
+ eliminates the artificial targets and manual involvement and
333
+ reduces the error caused by insufficient coverage and sparse
334
+ features of the targets. With the co-calibration method, the
335
+ intrinsic and extrinsic parameters are obtained simultaneously
336
+ and can be re-calibrated fast and reliably in work scenes.
337
+ III. PROPOSED SYSTEM
338
+ A. Co-calibrated Omnidirectional Sensor Suite
339
+ The Livox Mid-360 LiDAR has a 360° × 55° FoV and
340
+ features a non-repetitive scanning pattern, with increasingly
341
+ denser points over time (the coverage of FoV approaches
342
+ 100%), as shown in Fig. 2b. The unique feature specifically
343
+ benefits both odometry-based and stationary mapping modes.
344
+ The omnidirectional camera provides color information of
345
+ the surroundings and has a corresponding 360° × 70° FoV
346
+ (Fig. 2a). Both sensors are synchronized and are mounted
347
+ on a two-axis gimbal (Fig. 1) to extend the scanning FoV
348
+ to 360° × 300°.
349
+ -7°
350
+ -10°
351
+ +60°
352
+ +52°
353
+ -10°
354
+ +60°
355
+ -7°
356
+ +52°
357
+ Omnidirectional Camera
358
+ Livox Mid-360 LiDAR
359
+ (a)
360
+ T = 0.1s
361
+ T = 0.5s
362
+ T = 3.0s
363
+ (b)
364
+ Fig. 2. Configuration of the sensors: (a) omnidirectional camera and Livox
365
+ Mid-360 LiDAR, both on the gimbal mount; (b) point cloud accumulation
366
+ over time due to the non-repetitive scanning nature of the Livox LiDAR.
367
+ (Color represents reflectivity of LiDAR points)
368
+ #1
369
+ #2
370
+ #3
371
+ ...
372
+ 0
373
+ 5.1E-3
374
+ Probability density
375
+ 0
376
+ 5.1E-3
377
+ Probability density
378
+ Fig. 3. Proposed co-calibration process. * The grayscale value indicates the
379
+ average reflectivity of the projected LiDAR points within a pixel.
380
+ The co-calibration simultaneously obtains the intrinsic
381
+ (camera) and extrinsic (camera-LiDAR) parameters, defined
382
+ respectively as Θ ≜ [u0, v0, c, d, e, a0, . . . , an]T and ∆ ≜
383
+ [α, β, γ, tx, ty, tz]T, which will be introduced later. With
384
+
385
+ HO
386
+ DJP2002NOOEORXDHZDZDIHZp)120,△=argma
387
+ 0,1
388
+ ax
389
+ nn
390
+ Cf(
391
+ -14
392
+ IEEE ROBOTICS AND AUTOMATION LETTERS. PREPRINT VERSION. JANUARY, 2023
393
+ the unique benefit of the non-repetitive scanning LiDAR,
394
+ an extremely dense point cloud is always available, which
395
+ provides a 3D ground truth of the environment. This high-
396
+ resolution point cloud could be projected onto the 2D image
397
+ plane with pixel values from LiDAR reflectivity, from which
398
+ clear edge features could be extracted. To align the edges
399
+ from LiDAR and the camera, the co-calibration iteratively
400
+ maximizes the correspondence of projected LiDAR edge
401
+ points with the omnidirectional camera edge pixels. Kernel
402
+ Density Estimation (KDE) is employed to estimate the camera
403
+ edge distribution with different distribution smoothness (by
404
+ varying bandwidth coefficient) to obtain global optimum.
405
+ The entire process of co-calibration can be divided into the
406
+ following two steps (Fig. 3):
407
+ 1) Edge Extraction: Edge extractions are performed for
408
+ both camera and LiDAR. For the camera, exposure fusion [23]
409
+ is adopted to enhance the dynamic range of images to capture
410
+ more details for low and high-brightness objects. Canny edge
411
+ extraction [24] is performed on the enhanced image, with
412
+ edge points Q = [q1, q2 . . . , qn]. For LiDAR, since the
413
+ FoV is smaller, point clouds scanned from different pitch
414
+ angles are stitched together. The stitching is performed by the
415
+ generalized iterative closest point (GICP) algorithm [25] with
416
+ the initial transformation given by the state of the gimbal.
417
+ The stitched point cloud with reflectivity is then projected
418
+ to an image plane with the azimuthal angle and elevation
419
+ angle as the coordinates, generating a grayscale image by
420
+ taking the average reflectivity of the projected LiDAR points
421
+ within each pixel. The Canny edge extraction is performed
422
+ on this grayscale image. Uniform sampling is performed in
423
+ each stage to remove the non-uniform point distribution. The
424
+ edge pixels are then identified in the original 3D point cloud
425
+ P = [LP1, LP2 . . . , LPm].
426
+ 2) Iterative Optimization:
427
+ The iterative optimization is
428
+ performed in the omnidirectional image space. The LiDAR
429
+ edge points are projected to the image coordinates through
430
+ the following equations:
431
+ CP = C
432
+ LT(LP; ∆) = C
433
+ LR · LP + C
434
+ Lt, LP ∈ P,
435
+ (1)
436
+ p = Π(CP; Θ) =
437
+ �c
438
+ d
439
+ e
440
+ 1
441
+ � �r cos φ − u0
442
+ r sin φ − v0
443
+
444
+ ,
445
+ (2)
446
+ r = F(θ; a0, . . . , an) = a0 + a1θ1 + . . . + anθn,
447
+ (3)
448
+ θ = arccos(
449
+ z
450
+
451
+ x2 + y2 + z2 ),
452
+ (4)
453
+ φ = arccos(
454
+ x
455
+
456
+ x2 + y2 ),
457
+ (5)
458
+ where CP and LP denote the 3D point coordinates in camera
459
+ and LiDAR coordinate systems, respectively, and they are
460
+ related through the extrinsic transformation C
461
+ LT(LP; ∆), i.e.,
462
+ rotation C
463
+ LR and translation C
464
+ Lt with the extrinsic parameters
465
+ ∆. The symbol p denotes the location of the point in the
466
+ camera image space, and Π(CP; Θ) expresses the intrinsic
467
+ transformation from
468
+ CP
469
+ =
470
+ [x, y, z]T (3D point) to p
471
+ (2D point), with the distortion correction matrix
472
+
473
+ c
474
+ d
475
+ e
476
+ 1
477
+
478
+ .
479
+ The pixel radius r from the image center [u0, v0]T is
480
+ transformed from the elevation angles θ by a polynomial
481
+ function F(θ; a0, . . . , an) in the camera model; θ and φ are the
482
+ elevation and azimuth angle of CP (Note the omnidirectional
483
+ camera features a ring image).
484
+ To facilitate the alignment between the camera edges
485
+ and the LiDAR edges, the camera edge distribution with
486
+ nonparametric probability density function is constructed
487
+ with the Gaussian Kernel by Kernel Density Estimation
488
+ (KDE) [26]. The optimization is based on maximizing the
489
+ probabilities of the projected LiDAR edge points onto the
490
+ camera edge distribution:
491
+ ˆΘ, ˆ∆ = arg max
492
+ Θ, ∆
493
+ 1
494
+ n
495
+ m
496
+
497
+ i=1
498
+ || ˆf(pi; h, Q)||2,
499
+ (6)
500
+ ˆf(pi; h, Q) = 1
501
+ nh
502
+ n
503
+
504
+ j=1
505
+ K
506
+ �pi − qj
507
+ h
508
+
509
+ ,
510
+ (7)
511
+ K(x) =
512
+ 1
513
+
514
+ 2π det(Σ)e− 1
515
+ 2 (x−µ)TΣ−1(x−µ),
516
+ (8)
517
+ µ = [0, 0]T, Σ = I2×2,
518
+ (9)
519
+ where h denotes the bandwidth of the KDE.
520
+ Several rounds of iterative optimization with reducing
521
+ bandwidth are carried out to approach the correct calibration
522
+ values smoothly. At the start of the process, the bandwidth
523
+ is set at a large number to get a continuous and smooth
524
+ cost function, which allows the optimization to approach
525
+ the optimal region quickly without many local optima.
526
+ Then the bandwidth is reduced gradually to increase the
527
+ gradient, ensuring a sensitive optimization around the optimum
528
+ (optimization of the x-axis translation is shown in Fig. 4).
529
+ 0.20 0.25
530
+ 0.30
531
+ 0.35
532
+ Bandwidth=16
533
+ Bandwidth=4
534
+ Bandwidth=1
535
+ 1
536
+ 0
537
+ Normalized cost
538
+ Translation in the x-axis (m)
539
+ (a)
540
+ 0.265 0.275 0.285 0.295
541
+ Translation in the x-axis (m)
542
+ 1
543
+ 2
544
+ 4
545
+ 3
546
+ (b)
547
+ Fig. 4.
548
+ Iterative optimization with the reducing KDE bandwidth: (a) the
549
+ normalized cost w.r.t. the translation in the x-axis under the different values
550
+ of bandwidth; (b) zoom in to a sub-region of (a) to demonstrate the iterative
551
+ process.
552
+ The
553
+ optimization
554
+ uses
555
+ the
556
+ Levenberg-Marquardt
557
+ method
558
+ implemented
559
+ in
560
+ Ceres-solver
561
+ [27].
562
+ For
563
+ computational
564
+ efficiency,
565
+ the
566
+ parabolic
567
+ Epanechnikov
568
+ kernel K(x) =
569
+ 3
570
+ 4(1 − xTx) can be substituted for the
571
+ Gaussian kernel.
572
+ B. Coarse-to-fine Hybrid Mapping
573
+ The coarse-to-fine hybrid mapping workflow is outlined in
574
+ Fig. 5. With the co-calibration and synchronization, all the
575
+ obtained LiDAR points are represented in both coordinates
576
+ and color. Odometry/SLAM methods are used as a backbone
577
+ to provide localization in both coarse and fine mapping. We
578
+ used FAST-LIO (LiDAR-Inertial odometry [11]) in our current
579
+
580
+ MIAO et al: COARSE-TO-FINE HYBRID 3D MAPPING SYSTEM WITH CO-CALIBRATED OMNIDIRECTIONAL CAMERA AND NON-REPETITIVE LIDAR
581
+ 5
582
+ Fig.
583
+ 5.
584
+ Proposed
585
+ coarse-to-fine
586
+ hybrid
587
+ mapping
588
+ workflow.
589
+ The
590
+ odometry/SLAM serves as a backbone to provide localization results.
591
+ system, but the choice is not limited; other odometry/SLAM
592
+ methods could be utilized as well. At the coarse mapping
593
+ stage, the robot obtains the localization and motion results
594
+ from the odometry, from which the scanned points are
595
+ converted and registered to the global map. Based on the
596
+ coarse map, a few viewpoints for stationary mapping are
597
+ planned for the targeted ROIs, which is well developed in
598
+ previous work by considering the constraints such as range,
599
+ grazing angle, FoV, and overlap [1]. The robot then navigates
600
+ to the generated viewpoints one-by-one through the backbone
601
+ odometry/SLAM and performs the fine mapping, respectively.
602
+ At each viewpoint, stationary scans are performed at several
603
+ gimbal states, with overlapping FoV regions between the
604
+ adjacent two states, and cover a large overall FoV (360° ×
605
+ 300°). These point clouds will be pre-registered based on the
606
+ gimbal angles (as initial angles) at each viewpoint. The scans
607
+ from all the viewpoints are then combined with the global
608
+ coarse map based on robot localization (again provided by the
609
+ LiDAR-Inertial odometry) as the initial state for optimization.
610
+ Finally, the GICP [25] algorithm is used to optimize all the
611
+ localization results and gimbal states and refine all stationary
612
+ scans and the coarse map to form the fine map. Notably,
613
+ we could choose either odometry or SLAM methods in
614
+ the localization backbone. Although SLAM has more loop-
615
+ closure functions than odometry, the final GICP optimization
616
+ is accurate enough to yield a much better localization result.
617
+ IV. EXPERIMENTS AND RESULTS
618
+ A. Co-calibration Results
619
+ The effectiveness of the proposed co-calibration method is
620
+ demonstrated in three natural scenes, as shown in Fig. 6. The
621
+ projection error (in pixels) is defined as:
622
+ e = 1
623
+ n
624
+ n
625
+
626
+ i=1
627
+ d(pi; Q),
628
+ (10)
629
+ where d is to calculate the distance from the LiDAR projected
630
+ point pi to the nearest point in target set Q. Note that the
631
+ largest 10% of the distances are considered outliers with no
632
+ correspondences and are eliminated. Overall, the co-calibration
633
+ works well in all scenes with projection errors on the order of
634
+ 3 pixels or less. The colorized point clouds after co-calibration
635
+ also show much better consistency, as seen in Fig. 6b.
636
+ (a)
637
+ (b)
638
+ Fig. 6. Co-calibration results in three scenes: (a) aligned LiDAR edge points
639
+ (red) on camera images; (b) comparison of colorized point clouds before and
640
+ after co-calibration with the average projection errors in pixels.
641
+ We
642
+ further
643
+ compare
644
+ our
645
+ co-calibration
646
+ results
647
+ with
648
+ the classical target-based intrinsic calibration [19], [28],
649
+ and the state-of-the-art MI-based extrinsic calibration [22],
650
+ respectively, as shown below.
651
+ 1) Analysis of the Intrinsic Results: As a comparison, the
652
+ target-based intrinsic calibration for omnidirectional cameras
653
+ is performed [19]. Thirty checkerboards are manually selected
654
+ as a reference set (Fig. 7a). As the number and position
655
+ of the targets affect the calibration profoundly, we evaluate
656
+ the calibration result as a function of the targets’ number
657
+ and randomly select a specific number of checkerboards
658
+ from the reference set for calibration (repeated 100 times
659
+ independently). The mean reprojection error is used to
660
+ represent the calibration accuracy. The results in Fig. 7b
661
+ show that as the number of checkerboards increases, the
662
+ calibration is more accurate and converged. It is likely that
663
+ more checkerboards would increase the FoV coverage and
664
+ feature points density and improve the effectiveness of the
665
+ target-based method. However, it is labor-intensive to place
666
+ many checkerboards uniformly and densely around the sensor
667
+ and manually select the appropriate ones, which may be
668
+ impossible in the field. The co-calibration method, on the
669
+ contrary, employs dense LiDAR points as abundant, well-
670
+ covered, and accurate features; and the elimination of artificial
671
+ targets and human involvement enables an accurate, efficient,
672
+ and field-friendly approach. Our co-calibration result yields
673
+ a significantly improved performance on the same reference
674
+ set, compared with the conventional method (orange and blue
675
+ boxplot in Fig. 7b, respectively).
676
+ 2) Analysis
677
+ of
678
+ the
679
+ Extrinsic
680
+ Results:
681
+ The
682
+ mutual
683
+ information
684
+ (MI)-based
685
+ extrinsic
686
+ calibration
687
+ method
688
+ utilizes the fact that the reflectivity of LiDAR points
689
+ and corresponding grayscale intensity values of camera
690
+ pixels are correlated since both of them capture the spectral
691
+ response of the object at light frequencies (LiDAR 905 nm,
692
+ camera 400-800 nm), which are usually similar. These values
693
+
694
+ Projection Error: 7.15 →3.17
695
+ Projection Error: 7.36 →2.85
696
+ 皖A·35
697
+ 天国310
698
+ Proiection Error: 7.08 → 2.63Projection Error: 7.15 →3.17
699
+ Projection Error: 7.36 →2.85
700
+ 皖A·35
701
+ 天国310
702
+ Proiection Error: 7.08 → 2.636
703
+ IEEE ROBOTICS AND AUTOMATION LETTERS. PREPRINT VERSION. JANUARY, 2023
704
+ -0.4
705
+ -0.3
706
+ -0.2
707
+ -0.1
708
+ 0.6
709
+ 0.5
710
+ 0
711
+ X
712
+ 0.4
713
+ Z
714
+ 0.2
715
+ 0
716
+ 0 -0.2 -0.4
717
+ -0.5
718
+ Y
719
+ x-axis (m)
720
+ y-axis (m)
721
+ z-axis (m)
722
+ (a)
723
+ Proposed
724
+ 0
725
+ 10
726
+ 20
727
+ 30
728
+ Avg.
729
+ 14
730
+ 15
731
+ 16
732
+ 13
733
+ 12
734
+ 11
735
+ 10
736
+ 9
737
+ 8
738
+ 7
739
+ 6
740
+ 5
741
+ Number of checkerboards
742
+ Projection error (px)
743
+ 3.19
744
+ 5.89
745
+ 5.74
746
+ 5.93
747
+ 6.64
748
+ 6.97
749
+ 7.86
750
+ 7.71
751
+ 8.43
752
+ 9.28
753
+ 9.64
754
+ 10.3
755
+ 11.9
756
+ 40
757
+ (b)
758
+ Fig. 7. Comparison with the target-based intrinsic calibration: (a) the poses
759
+ of the thirty checkerboards; (b) boxplots of projection errors of target-based
760
+ calibration (blue) and the proposed co-calibration (orange).
761
+ are then used to calibrate the extrinsic parameters between
762
+ the camera and LiDAR by maximizing the MI of the two
763
+ distributions [22]. Fig. 8 shows the comparisons of the two
764
+ optimization methods demonstrating the normalized costs on
765
+ different extrinsic parameters. The proposed co-calibration
766
+ method shows a much more sensitive and reliable gradient
767
+ in the cost function near the optimum than the MI-based
768
+ method.
769
+ 0
770
+ 1
771
+ Normalized cost
772
+ MI-based
773
+ Proposed
774
+ Rotation in the x-axis (rad)
775
+ -0.4 -0.2
776
+ 0
777
+ 0.2
778
+ 0.4
779
+ -0.4
780
+ -0.2
781
+ 0
782
+ 0.2
783
+ 0.4
784
+ Rotation in the y-axis (rad)
785
+ 0
786
+ 1
787
+ Normalized cost
788
+ MI-based
789
+ Proposed
790
+ MI-based
791
+ Proposed
792
+ Rotation in the z-axis (rad)
793
+ 1.2
794
+ 1.4
795
+ 1.6
796
+ 1.8
797
+ 2
798
+ 0
799
+ 1
800
+ Normalized cost
801
+ Translation in the x-axis (m)
802
+ -0.5
803
+ 0
804
+ 0.5
805
+ 1
806
+ 0
807
+ 1
808
+ Normalized cost
809
+ MI-based
810
+ Proposed
811
+ Translation in the y-axis (m)
812
+ 0
813
+ 1
814
+ Normalized cost
815
+ -1
816
+ -0.5
817
+ 0
818
+ 0.5
819
+ 1
820
+ MI-based
821
+ Proposed
822
+ 0
823
+ 1
824
+ Normalized cost
825
+ Translation in the z-axis (m)
826
+ -0.5
827
+ 0
828
+ 0.5
829
+ 1
830
+ MI-based
831
+ Proposed
832
+ Fig. 8. Comparisons of the normalized cost function between the proposed
833
+ method and the MI-based method. The optimal values should lie in the gray
834
+ areas estimated based on manufacturing.
835
+ The inaccurate calibration result of the MI-based method
836
+ could be attributed mainly to three reasons: the lighting
837
+ conditions, the surface reflection properties, and the spectral
838
+ reflectance disagreement. The camera’s light source Ii is the
839
+ external ambient lighting which does not change with the
840
+ camera pose. On the contrary, LiDAR uses an active laser from
841
+ the sensor and therefore differs significantly from the camera,
842
+ as shown in Fig. 10a. Besides the lighting, the surfaces of the
843
+ objects are important. The detected intensity could be modeled
844
+ as follows:
845
+ Ir = Kd · Ii · f(θ),
846
+ (11)
847
+ where Ir and Ii indicate the reflection intensity and incident
848
+ intensity, respectively, Kd
849
+ is the reflectance, and f(θ)
850
+ describes the surface properties of the object with respect to
851
+ incident angle θ. For most objects, the surface is Lambertian
852
+ (diffusive), and in that case, f(θ) = cos θ. However, many
853
+ surfaces do not follow this property, and it could be a specular
854
+ reflection that the LiDAR does not collect any signal; or the
855
+ retroreflection that the majority of the energy will be directed
856
+ back toward the LiDAR itself and gives a strong intensity, such
857
+ as those on traffic signs and warning stickers, which show a
858
+ contrast difference in the LiDAR intensities from the camera
859
+ intensities shown in the red boxes in Fig. 9b. Additionally,
860
+ the spectral reflectance of objects at various light wavelengths
861
+ could be different. For instance, materials composed of plant
862
+ fibers show a large reflectance at around 905 nm, even those
863
+ dyed in black colors. As a result, no contrast could be seen in
864
+ LiDAR intensities of materials with different colors, as shown
865
+ in green boxes in Fig. 9b. All three factors mentioned above
866
+ could cause significant differences in intensity response from
867
+ the LiDAR and the camera and reduce the applicability of the
868
+ MI-based method.
869
+ LiDAR
870
+ Camera
871
+ Ambient Light
872
+ Retro
873
+ A
874
+ B
875
+ A
876
+ B
877
+ Spread
878
+ Lamber�an
879
+ (Lamber�an+Retro)
880
+ (a)
881
+ 0
882
+ 255
883
+ Intensity and reflectivity
884
+ (in grayscale)
885
+ Camera
886
+ LiDAR
887
+ (b)
888
+ Fig. 9.
889
+ Analysis of the MI-based extrinsic calibration: (a) the types of
890
+ reflection of the LiDAR and camera w.r.t. the rough surface and the
891
+ retroreflective surface; (b) the inconsistent intensity cases between LiDAR and
892
+ camera, including retroreflection cases (red boxes), and the special spectral
893
+ reflectance cases (green boxes).
894
+ B. Coarse-to-fine Hybrid Mapping Results
895
+ The proposed coarse-to-fine hybrid mapping method is
896
+ demonstrated in an academic building on the SUSTech
897
+ campus. The global coarse map is generated by Fast-LIO in
898
+ ten minutes, and the ROI is selected based on this global
899
+ coarse map (Fig. 10a). In this case, five viewpoints are
900
+ properly planned in this ROI (Fig. 10b), and perform stationary
901
+ scanning for three minutes in each (Fig. 10c).
902
+ Plane thickness could be used as a quantitative metric for
903
+ precision evaluation and comparison between coarse and fine
904
+ mapping. Local planes with a small third eigenvalue λ3 are
905
+ selected by diagonalizing the covariance matrix. Assuming
906
+ the points along the plane’s normal direction follow the
907
+ Gaussian distribution (corresponding to the third eigenvalue
908
+ λ3 with the normal direction of the plane defined by its
909
+ eigenvector), we could set the thickness of the plane as 4√λ3.
910
+
911
+ MIAO et al: COARSE-TO-FINE HYBRID 3D MAPPING SYSTEM WITH CO-CALIBRATED OMNIDIRECTIONAL CAMERA AND NON-REPETITIVE LIDAR
912
+ 7
913
+ (a)
914
+ (b)
915
+ (c)
916
+ Fig. 10.
917
+ Coarse-to-fine hybrid mapping: (a) odometry-based global coarse
918
+ mapping; (b) coarse map of the selected ROI, with markers indicating the
919
+ planned viewpoints; (c) fine map of the ROI, the color illustrates the scans
920
+ from respective viewpoints.
921
+ TABLE I
922
+ SPECS COMPARISON OF CURRENT MAPPING SYSTEMS
923
+ Proposed
924
+ #1 FARO Focus
925
+ Premium 150
926
+ Type
927
+ Hybrid Mapping
928
+ TLS
929
+ FoV
930
+ 360° × 300°
931
+ 360° × 300°
932
+ Range
933
+ 0.1-40 m
934
+ 0.5-150 m
935
+ PPS
936
+ 200,000 pts/s
937
+ 2,000,000 pts/s
938
+ Precision
939
+ ∼ 40 mm (coarse)
940
+ ∼ 20 mm (fine)
941
+ ∼ 1mm [29]
942
+ Accuracy
943
+ ∼ 10 mm (coarse)
944
+ ∼ 2 mm (fine)
945
+ ∼ 1mm [29]
946
+ Registration
947
+ Odometry+Optimization
948
+ Optimization
949
+ Work Manner
950
+ Mobile Robot
951
+ Manual (tripod)
952
+ Viewpoints Planning
953
+ Coarse map-based
954
+ Intuition-based
955
+ Vision
956
+ 1-omni camera
957
+ 1-camera
958
+ #2 LEICA
959
+ BLK360
960
+ #3 LEICA
961
+ BLK2GO
962
+ #4 NavVis
963
+ VLX
964
+ TLS
965
+ MLS
966
+ MLS
967
+ 360° × 300°
968
+ 360° × 270°
969
+ 360° × 30°(×2)
970
+ 0.5-45 m
971
+ 0.5-25 m
972
+ 0.9-100 m
973
+ 680,000 pts/s
974
+ 420,000 pts/s
975
+ 300,000 pts/s (×2)
976
+ ∼ 20 mm [30]
977
+ ∼ 20 mm [30]
978
+ 15-50 mm(walls, 80.5%) [31]
979
+ ∼ 1 mm [30]
980
+ ∼ 30 mm [30]
981
+ 15-50 mm(beams, 98.2%) [31]
982
+ Optimization
983
+ Odometry/SLAM
984
+ Odometry/SLAM
985
+ Manual (tripod)
986
+ Manual (handheld)
987
+ Manual (backpack)
988
+ Intuition-based
989
+ No need
990
+ No need
991
+ 3-camera
992
+ 3-camera
993
+ 4-camera
994
+ The coarse and fine maps of the three different scenes are
995
+ shown in Fig. 11a, whereas the zoomed views show the point
996
+ cloud quality with the top view of the selected planes to
997
+ demonstrate the mapping quality. The quantitative evaluations
998
+ of the plane thickness (the mapping precision) in these scenes
999
+ are summarized in Fig. 11b. Besides precision (spread of data),
1000
+ accuracy (correctness) is also important to examine. Fig. 11c
1001
+ illustrates the measurement accuracy (compared to results
1002
+ from a TLS system, which we regard as ground truth). It is
1003
+ evident that both the precision and accuracy of fine mapping
1004
+ outperform coarse mapping. Although odometry-based coarse
1005
+ mapping has good performances in best-case scenarios, it
1006
+ could be significantly improved by fine mapping in the average
1007
+ values and worse-case scenarios, which are the main concerns
1008
+ of the surveying and mapping industry.
1009
+ With the accurate co-calibration results, LiDAR points
1010
+ (a)
1011
+ #3
1012
+ #2
1013
+ #1
1014
+ Scenes
1015
+ 0
1016
+ 10
1017
+ 20
1018
+ 30
1019
+ 40
1020
+ 50
1021
+ Mapping precision (mm)
1022
+ Coarse Mapping
1023
+ Fine Mapping
1024
+ 60
1025
+ (b)
1026
+ #3
1027
+ #2
1028
+ #1
1029
+ Scenes
1030
+ -20
1031
+ 0
1032
+ 20
1033
+ 40
1034
+ 60
1035
+ 80
1036
+ Mapping accuracy (mm)
1037
+ Coarse Mapping
1038
+ Fine Mapping
1039
+ 100
1040
+ (c)
1041
+ (d)
1042
+ (e)
1043
+ Fig. 11. Comparison of coarse and fine mapping: (a) coarse and fine maps
1044
+ in three scenes (scene #1 is from Fig. 10b, scene #2 and #3 are new). The
1045
+ left column shows the large-scale coarse map, and the right column shows
1046
+ the zoomed-in coarse and fine map in top view (to visualize wall thickness)
1047
+ and third person view (to visualize scene); (b) mapping precision from the
1048
+ three scenes; (c) mapping accuracy from the three scenes; (d) top view of the
1049
+ colorized fine map; (e) third-person view of the colorized ROI.
1050
+ can be colorized from the image information through the
1051
+ transformation in Eqn. 1 and Eqn. 2. Fig. 11d shows the
1052
+ colorized hybrid mapping, and Fig. 11e illustrates the fine
1053
+ mapping of the zoomed-in ROI. The coarse-to-fine map with
1054
+ great precision and accurate colorization pave the way for
1055
+ higher precision with a single unified setup and workflow. It
1056
+ benefits industries requiring both efficiency and accuracy, such
1057
+ as construction automation and building inspection.
1058
+ Lastly, a detailed comparison of the proposed system
1059
+ with the current widely used TLS and MLS systems
1060
+ (shown in Fig. 1a) is made in Table I, where several key
1061
+
1062
+ 0
1063
+ 2m
1064
+ ROI
1065
+ 10m
1066
+ 2m0
1067
+ 2m
1068
+ ROI
1069
+ 10m
1070
+ 2m0
1071
+ 2m
1072
+ ROI
1073
+ 10m
1074
+ 2mCoarse Mapping
1075
+ Fine
1076
+ Scene #1
1077
+ Top view
1078
+ Third-person
1079
+ view
1080
+ Scene #2
1081
+ Top view
1082
+ Third-person
1083
+ view
1084
+ Scene #3MappingTop vie
1085
+ Third-person
1086
+ vlewX
1087
+ 取消ROI8
1088
+ IEEE ROBOTICS AND AUTOMATION LETTERS. PREPRINT VERSION. JANUARY, 2023
1089
+ parameters are listed. The most crucial difference is that the
1090
+ proposed system integrates two working modes in a single
1091
+ streamlined workflow, ensuring overall mapping efficiency
1092
+ and precision/accuracy. All other systems are either TLS
1093
+ which only works in stationary mode, or MLS in mobile
1094
+ mode. Due to this capability, it is the first robotic system
1095
+ that allows automatic viewpoint planning instead of human
1096
+ intuition-based viewpoints selection. In addition, the mobile
1097
+ robot could navigate itself with overall good localization and
1098
+ provide good initial states for fine map optimization. The
1099
+ mapping precision and accuracy of the proposed system are
1100
+ also compared with these systems [29]–[31]. The proposed
1101
+ system achieves performance close to the LEICA TLS but
1102
+ allows mobility as MLS, agreeing with the purpose of the
1103
+ system.
1104
+ V. CONCLUSION
1105
+ This paper proposed a coarse-to-fine hybrid 3D mapping
1106
+ robotic system based on an omnidirectional camera and a non-
1107
+ repetitive Livox LiDAR. A hybrid mapping approach with both
1108
+ odometry-based and stationary mapping modes is integrated
1109
+ into one mobile mapping robot, achieving a streamlined
1110
+ and automated mapping workflow with the assurance of
1111
+ efficiency and mapping precision and accuracy. Meanwhile,
1112
+ the proposed automatic and targetless co-calibration method
1113
+ provides accurate parameters to generate colorized mapping.
1114
+ Specifically, the calibration is based on edges extracted
1115
+ from camera images and LiDAR reflectivity, and the result
1116
+ is compared with the mutual-information-based calibration
1117
+ method, which was under-performing possibly due to varied
1118
+ reflection nature in light sources, surface reflection properties,
1119
+ and the spectral reflectance disagreement in the MI-based
1120
+ method. In future work, more complicated planning strategies
1121
+ could be developed to further optimize both the objectives
1122
+ of scanning time and spatial coverage. We believe this new
1123
+ automated mapping robot will open up a new horizon for
1124
+ surveying and inspection robotics.
1125
+ REFERENCES
1126
+ [1] P. S. Blaer and P. K. Allen, “View planning and automated data
1127
+ acquisition for three-dimensional modeling of complex sites,” Journal
1128
+ of Field Robotics, vol. 26, no. 11-12, pp. 865–891, 2009.
1129
+ [2] C. Debeunne and D. Vivet, “A review of visual-lidar fusion based
1130
+ simultaneous localization and mapping,” Sensors, vol. 20, no. 7, p. 2068,
1131
+ 2020.
1132
+ [3] V. V. Lehtola, H. Kaartinen, A. N¨uchter, R. Kaijaluoto, A. Kukko,
1133
+ P. Litkey, E. Honkavaara, T. Rosnell, M. T. Vaaja, J.-P. Virtanen et al.,
1134
+ “Comparison of the selected state-of-the-art 3d indoor scanning and
1135
+ point cloud generation methods,” Remote sensing, vol. 9, no. 8, p. 796,
1136
+ 2017.
1137
+ [4] J. Lin and F. Zhang, “R 3 live: A robust, real-time, rgb-colored, lidar-
1138
+ inertial-visual tightly-coupled state estimation and mapping package,”
1139
+ in 2022 International Conference on Robotics and Automation (ICRA).
1140
+ IEEE, 2022, pp. 10 672–10 678.
1141
+ [5] B. Schwarz, “Mapping the world in 3d,” Nature Photonics, vol. 4, no. 7,
1142
+ pp. 429–430, 2010.
1143
+ [6] Z. Liu, F. Zhang, and X. Hong, “Low-cost retina-like robotic lidars
1144
+ based on incommensurable scanning,” IEEE/ASME Transactions on
1145
+ Mechatronics, vol. 27, no. 1, pp. 58–68, 2021.
1146
+ [7] R. Otero, S. Lag¨uela, I. Garrido, and P. Arias, “Mobile indoor mapping
1147
+ technologies: A review,” Automation in Construction, vol. 120, p.
1148
+ 103399, 2020.
1149
+ [8] A. Keitaanniemi, J.-P. Virtanen, P. R¨onnholm, A. Kukko, T. Rantanen,
1150
+ and M. T. Vaaja, “The Combined Use of SLAM Laser Scanning and
1151
+ TLS for the 3D Indoor Mapping,” Buildings, vol. 11, no. 9, 2021.
1152
+ [9] A. Aryan, F. Bosch´e, and P. Tang, “Planning for terrestrial laser scanning
1153
+ in construction: A review,” Automation in Construction, vol. 125, p.
1154
+ 103551, 2021.
1155
+ [10] J. Shao, W. Zhang, N. Mellado, N. Wang, S. Jin, S. Cai, L. Luo,
1156
+ T. Lejemble, and G. Yan, “Slam-aided forest plot mapping combining
1157
+ terrestrial and mobile laser scanning,” ISPRS Journal of Photogrammetry
1158
+ and Remote Sensing, vol. 163, pp. 214–230, 2020.
1159
+ [11] W. Xu, Y. Cai, D. He, J. Lin, and F. Zhang, “Fast-lio2: Fast direct
1160
+ lidar-inertial odometry,” IEEE Transactions on Robotics, 2022.
1161
+ [12] Y. Zhu, C. Zheng, C. Yuan, X. Huang, and X. Hong, “Camvox: A
1162
+ low-cost and accurate lidar-assisted visual slam system,” in 2021 IEEE
1163
+ International Conference on Robotics and Automation (ICRA).
1164
+ IEEE,
1165
+ 2021, pp. 5049–5055.
1166
+ [13] C. Yuan, X. Liu, X. Hong, and F. Zhang, “Pixel-level extrinsic
1167
+ self calibration of high resolution lidar and camera in targetless
1168
+ environments,” IEEE Robotics and Automation Letters, vol. 6, no. 4,
1169
+ pp. 7517–7524, 2021.
1170
+ [14] I. Puente, H. Gonz´alez-Jorge, J. Mart´ınez-S´anchez, and P. Arias,
1171
+ “Review of mobile mapping and surveying technologies,” Measurement,
1172
+ vol. 46, no. 7, pp. 2127–2145, 2013.
1173
+ [15] J. S. Berrio, M. Shan, S. Worrall, and E. Nebot, “Camera-lidar
1174
+ integration: Probabilistic sensor fusion for semantic mapping,” IEEE
1175
+ Transactions on Intelligent Transportation Systems, 2021.
1176
+ [16] A. Javaheri, C. Brites, F. Pereira, and J. Ascenso, “Point cloud
1177
+ rendering after coding: Impacts on subjective and objective quality,”
1178
+ IEEE Transactions on Multimedia, vol. 23, pp. 4049–4064, 2020.
1179
+ [17] A. Jaakkola, J. Hyypp¨a, A. Kukko, X. Yu, H. Kaartinen, M. Lehtom¨aki,
1180
+ and Y. Lin, “A low-cost multi-sensoral mobile mapping system and its
1181
+ feasibility for tree measurements,” ISPRS journal of Photogrammetry
1182
+ and Remote Sensing, vol. 65, no. 6, pp. 514–522, 2010.
1183
+ [18] J. Kannala and S. S. Brandt, “A generic camera model and calibration
1184
+ method for conventional, wide-angle, and fish-eye lenses,” IEEE
1185
+ transactions on pattern analysis and machine intelligence, vol. 28, no. 8,
1186
+ pp. 1335–1340, 2006.
1187
+ [19] D. Scaramuzza, A. Martinelli, and R. Siegwart, “A toolbox for easily
1188
+ calibrating omnidirectional cameras,” in 2006 IEEE/RSJ International
1189
+ Conference on Intelligent Robots and Systems.
1190
+ IEEE, 2006, pp. 5695–
1191
+ 5701.
1192
+ [20] K. Kanatani, “Calibration of ultrawide fisheye lens cameras by
1193
+ eigenvalue minimization,” IEEE Transactions on Pattern Analysis and
1194
+ Machine Intelligence, vol. 35, no. 4, pp. 813–822, 2012.
1195
+ [21] D. Scaramuzza, A. Harati, and R. Siegwart, “Extrinsic self calibration
1196
+ of a camera and a 3d laser range finder from natural scenes,” in 2007
1197
+ IEEE/RSJ International Conference on Intelligent Robots and Systems,
1198
+ 2007, pp. 4164–4169.
1199
+ [22] G. Pandey, J. R. McBride, S. Savarese, and R. M. Eustice, “Automatic
1200
+ targetless extrinsic calibration of a 3d lidar and camera by maximizing
1201
+ mutual information,” in Twenty-Sixth AAAI Conference on Artificial
1202
+ Intelligence, 2012.
1203
+ [23] T. Mertens, J. Kautz, and F. Van Reeth, “Exposure fusion,” in 15th
1204
+ Pacific Conference on Computer Graphics and Applications (PG’07).
1205
+ IEEE, 2007, pp. 382–390.
1206
+ [24] L. Ding and A. Goshtasby, “On the canny edge detector,” Pattern
1207
+ recognition, vol. 34, no. 3, pp. 721–725, 2001.
1208
+ [25] A. Segal, D. Haehnel, and S. Thrun, “Generalized-icp.” Robotics:
1209
+ science and systems, vol. 2, no. 4, p. 435, 2009.
1210
+ [26] G. R. Terrell and D. W. Scott, “Variable kernel density estimation,” The
1211
+ Annals of Statistics, pp. 1236–1265, 1992.
1212
+ [27] S. Agarwal, K. Mierle, and T. C. S. Team, “Ceres Solver,” 2022.
1213
+ [Online]. Available: https://github.com/ceres-solver/ceres-solver
1214
+ [28] S. Urban, J. Leitloff, and S. Hinz, “Improved wide-angle, fisheye and
1215
+ omnidirectional camera calibration,” ISPRS Journal of Photogrammetry
1216
+ and Remote Sensing, vol. 108, pp. 72–79, 2015.
1217
+ [29] FARO, “Faro focus premium: Capture with confidence and connect
1218
+ your world faster,” 2022, https://media.faro.com/-/media/Project/FARO/
1219
+ FARO/FARO/Resources/1 BROCHURE/2022/FARO-Sphere/AEC
1220
+ Focus-Premium/3154 Brochure FocusPremium AEC ENG LT.pdf.
1221
+ [30] A. Dlesk, K. Vach, J. ˇSedina, and K. Pavelka, “Comparison of leica
1222
+ blk360 and leica blk2go on chosen test objects.” ISPRS Annals of
1223
+ Photogrammetry, Remote Sensing & Spatial Information Sciences, 2022.
1224
+ [31] S. De Geyter, J. Vermandere, H. De Winter, M. Bassier, and
1225
+ M. Vergauwen, “Point cloud validation: On the impact of laser scanning
1226
+ technologies on the semantic segmentation for bim modeling and
1227
+ evaluation,” Remote Sensing, vol. 14, no. 3, p. 582, 2022.
1228
+
LdFOT4oBgHgl3EQf0DSF/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
MdFRT4oBgHgl3EQf2zi9/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c96323e44276d564e4993dbfe719d2cdaf4c0c781071c7c81251db6d8a09ed8
3
+ size 5373997