jackkuo commited on
Commit
066c2f6
·
verified ·
1 Parent(s): e0b4fd5

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +63 -0
  2. 0NFIT4oBgHgl3EQf2ysh/content/2301.11378v1.pdf +3 -0
  3. 0NFIT4oBgHgl3EQf2ysh/vector_store/index.faiss +3 -0
  4. 0NFIT4oBgHgl3EQf2ysh/vector_store/index.pkl +3 -0
  5. 0dE2T4oBgHgl3EQfNAY2/content/2301.03730v1.pdf +3 -0
  6. 0dE2T4oBgHgl3EQfNAY2/vector_store/index.faiss +3 -0
  7. 0dE2T4oBgHgl3EQfNAY2/vector_store/index.pkl +3 -0
  8. 1NFIT4oBgHgl3EQf3ivh/content/2301.11382v1.pdf +3 -0
  9. 1NFIT4oBgHgl3EQf3ivh/vector_store/index.faiss +3 -0
  10. 1NFIT4oBgHgl3EQf3ivh/vector_store/index.pkl +3 -0
  11. 2NFQT4oBgHgl3EQfFTWv/content/tmp_files/2301.13241v1.pdf.txt +2042 -0
  12. 2NFQT4oBgHgl3EQfFTWv/content/tmp_files/load_file.txt +0 -0
  13. 2tAyT4oBgHgl3EQfovik/vector_store/index.pkl +3 -0
  14. 2tE4T4oBgHgl3EQf0A0W/content/tmp_files/2301.05278v1.pdf.txt +1972 -0
  15. 2tE4T4oBgHgl3EQf0A0W/content/tmp_files/load_file.txt +0 -0
  16. 39FQT4oBgHgl3EQfHTWW/vector_store/index.faiss +3 -0
  17. 4dFQT4oBgHgl3EQf4Ta7/vector_store/index.faiss +3 -0
  18. 6dE0T4oBgHgl3EQffAAW/content/tmp_files/2301.02397v1.pdf.txt +3265 -0
  19. 6dE0T4oBgHgl3EQffAAW/content/tmp_files/load_file.txt +0 -0
  20. 6tE4T4oBgHgl3EQf1w38/content/tmp_files/2301.05294v1.pdf.txt +1742 -0
  21. 6tE4T4oBgHgl3EQf1w38/content/tmp_files/load_file.txt +0 -0
  22. 79E2T4oBgHgl3EQfPgaW/content/tmp_files/2301.03760v1.pdf.txt +1920 -0
  23. 79E3T4oBgHgl3EQfqQqR/content/tmp_files/2301.04650v1.pdf.txt +1863 -0
  24. 79E3T4oBgHgl3EQfqQqR/content/tmp_files/load_file.txt +0 -0
  25. 8NAyT4oBgHgl3EQfQvar/content/2301.00053v1.pdf +3 -0
  26. 8NAyT4oBgHgl3EQfQvar/vector_store/index.faiss +3 -0
  27. 8NAyT4oBgHgl3EQfQvar/vector_store/index.pkl +3 -0
  28. 8NE1T4oBgHgl3EQfTwPq/content/2301.03083v1.pdf +3 -0
  29. 8NE1T4oBgHgl3EQfTwPq/vector_store/index.pkl +3 -0
  30. 9NAyT4oBgHgl3EQf3Plu/content/2301.00765v1.pdf +3 -0
  31. A9AzT4oBgHgl3EQfTPz-/content/2301.01248v1.pdf +3 -0
  32. AdFLT4oBgHgl3EQfxDCd/content/tmp_files/2301.12166v1.pdf.txt +1051 -0
  33. AdFLT4oBgHgl3EQfxDCd/content/tmp_files/load_file.txt +0 -0
  34. B9FAT4oBgHgl3EQfsh6e/content/2301.08659v1.pdf +3 -0
  35. B9FAT4oBgHgl3EQfsh6e/vector_store/index.pkl +3 -0
  36. BNAyT4oBgHgl3EQf4PoQ/content/2301.00781v1.pdf +3 -0
  37. BNAyT4oBgHgl3EQf4PoQ/vector_store/index.faiss +3 -0
  38. BNAyT4oBgHgl3EQf4PoQ/vector_store/index.pkl +3 -0
  39. C9FRT4oBgHgl3EQfwjj-/content/2301.13639v1.pdf +3 -0
  40. C9FRT4oBgHgl3EQfwjj-/vector_store/index.pkl +3 -0
  41. CdE0T4oBgHgl3EQfQADn/content/2301.02188v1.pdf +3 -0
  42. CdE0T4oBgHgl3EQfQADn/vector_store/index.faiss +3 -0
  43. CdE0T4oBgHgl3EQfQADn/vector_store/index.pkl +3 -0
  44. CtE0T4oBgHgl3EQfgQFr/vector_store/index.faiss +3 -0
  45. DdE2T4oBgHgl3EQf9gki/vector_store/index.pkl +3 -0
  46. ENE0T4oBgHgl3EQfgwE0/content/2301.02421v1.pdf +3 -0
  47. ENE0T4oBgHgl3EQfgwE0/vector_store/index.faiss +3 -0
  48. FNE1T4oBgHgl3EQfWwTK/vector_store/index.faiss +3 -0
  49. FtAyT4oBgHgl3EQfSvdV/vector_store/index.faiss +3 -0
  50. FtFKT4oBgHgl3EQfbC5a/content/tmp_files/2301.11810v1.pdf.txt +923 -0
.gitattributes CHANGED
@@ -2306,3 +2306,66 @@ btFJT4oBgHgl3EQf9S1J/content/2301.11687v1.pdf filter=lfs diff=lfs merge=lfs -tex
2306
  ItA0T4oBgHgl3EQfCP98/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2307
  cdE2T4oBgHgl3EQfwgij/content/2301.04102v1.pdf filter=lfs diff=lfs merge=lfs -text
2308
  PNE4T4oBgHgl3EQfkQ1N/content/2301.05149v1.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2306
  ItA0T4oBgHgl3EQfCP98/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2307
  cdE2T4oBgHgl3EQfwgij/content/2301.04102v1.pdf filter=lfs diff=lfs merge=lfs -text
2308
  PNE4T4oBgHgl3EQfkQ1N/content/2301.05149v1.pdf filter=lfs diff=lfs merge=lfs -text
2309
+ l9A0T4oBgHgl3EQfJf8H/content/2301.02089v1.pdf filter=lfs diff=lfs merge=lfs -text
2310
+ ENE0T4oBgHgl3EQfgwE0/content/2301.02421v1.pdf filter=lfs diff=lfs merge=lfs -text
2311
+ FtAyT4oBgHgl3EQfSvdV/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2312
+ WNFJT4oBgHgl3EQfOyzK/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2313
+ P9FRT4oBgHgl3EQf6Tj7/content/2301.13676v1.pdf filter=lfs diff=lfs merge=lfs -text
2314
+ ENE0T4oBgHgl3EQfgwE0/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2315
+ utE0T4oBgHgl3EQfsQFW/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2316
+ utE0T4oBgHgl3EQfsQFW/content/2301.02576v1.pdf filter=lfs diff=lfs merge=lfs -text
2317
+ btFJT4oBgHgl3EQf9S1J/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2318
+ u9E2T4oBgHgl3EQf2ggs/content/2301.04161v1.pdf filter=lfs diff=lfs merge=lfs -text
2319
+ pNE1T4oBgHgl3EQfiATe/content/2301.03248v1.pdf filter=lfs diff=lfs merge=lfs -text
2320
+ ItFAT4oBgHgl3EQfux4k/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2321
+ ztAzT4oBgHgl3EQf8P7y/content/2301.01903v1.pdf filter=lfs diff=lfs merge=lfs -text
2322
+ ztAzT4oBgHgl3EQf8P7y/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2323
+ ftAzT4oBgHgl3EQfafzZ/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2324
+ C9FRT4oBgHgl3EQfwjj-/content/2301.13639v1.pdf filter=lfs diff=lfs merge=lfs -text
2325
+ kdE1T4oBgHgl3EQfggQS/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2326
+ 4dFQT4oBgHgl3EQf4Ta7/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2327
+ B9FAT4oBgHgl3EQfsh6e/content/2301.08659v1.pdf filter=lfs diff=lfs merge=lfs -text
2328
+ pNE1T4oBgHgl3EQfiATe/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2329
+ PNE4T4oBgHgl3EQfkQ1N/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2330
+ tNE3T4oBgHgl3EQf9guV/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2331
+ u9E2T4oBgHgl3EQf2ggs/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2332
+ 39FQT4oBgHgl3EQfHTWW/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2333
+ P9E2T4oBgHgl3EQfVwcE/content/2301.03825v1.pdf filter=lfs diff=lfs merge=lfs -text
2334
+ 0dE2T4oBgHgl3EQfNAY2/content/2301.03730v1.pdf filter=lfs diff=lfs merge=lfs -text
2335
+ CdE0T4oBgHgl3EQfQADn/content/2301.02188v1.pdf filter=lfs diff=lfs merge=lfs -text
2336
+ jtE0T4oBgHgl3EQfYQDo/content/2301.02306v1.pdf filter=lfs diff=lfs merge=lfs -text
2337
+ MNAzT4oBgHgl3EQfkf3z/content/2301.01534v1.pdf filter=lfs diff=lfs merge=lfs -text
2338
+ 8NAyT4oBgHgl3EQfQvar/content/2301.00053v1.pdf filter=lfs diff=lfs merge=lfs -text
2339
+ FNE1T4oBgHgl3EQfWwTK/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2340
+ BNAyT4oBgHgl3EQf4PoQ/content/2301.00781v1.pdf filter=lfs diff=lfs merge=lfs -text
2341
+ Z9FPT4oBgHgl3EQfvDVz/content/2301.13158v1.pdf filter=lfs diff=lfs merge=lfs -text
2342
+ vtA0T4oBgHgl3EQfMP8I/content/2301.02128v1.pdf filter=lfs diff=lfs merge=lfs -text
2343
+ cNE0T4oBgHgl3EQfWQCi/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2344
+ z9FJT4oBgHgl3EQfjSwc/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2345
+ ztAyT4oBgHgl3EQf0_ll/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2346
+ 0NFIT4oBgHgl3EQf2ysh/content/2301.11378v1.pdf filter=lfs diff=lfs merge=lfs -text
2347
+ CdE0T4oBgHgl3EQfQADn/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2348
+ IdFJT4oBgHgl3EQfFywB/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2349
+ INE2T4oBgHgl3EQfUAe8/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2350
+ jtE0T4oBgHgl3EQfYQDo/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2351
+ 9NAyT4oBgHgl3EQf3Plu/content/2301.00765v1.pdf filter=lfs diff=lfs merge=lfs -text
2352
+ PdAzT4oBgHgl3EQfIvsJ/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2353
+ CtE0T4oBgHgl3EQfgQFr/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2354
+ k9A0T4oBgHgl3EQfI__f/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2355
+ A9AzT4oBgHgl3EQfTPz-/content/2301.01248v1.pdf filter=lfs diff=lfs merge=lfs -text
2356
+ INE2T4oBgHgl3EQfUAe8/content/2301.03809v1.pdf filter=lfs diff=lfs merge=lfs -text
2357
+ 8NAyT4oBgHgl3EQfQvar/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2358
+ 8NE1T4oBgHgl3EQfTwPq/content/2301.03083v1.pdf filter=lfs diff=lfs merge=lfs -text
2359
+ BNAyT4oBgHgl3EQf4PoQ/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2360
+ 0dE2T4oBgHgl3EQfNAY2/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2361
+ IdFJT4oBgHgl3EQfFywB/content/2301.11444v1.pdf filter=lfs diff=lfs merge=lfs -text
2362
+ tdE5T4oBgHgl3EQfmg_h/content/2301.05680v1.pdf filter=lfs diff=lfs merge=lfs -text
2363
+ V9AyT4oBgHgl3EQfV_fO/content/2301.00156v1.pdf filter=lfs diff=lfs merge=lfs -text
2364
+ 1NFIT4oBgHgl3EQf3ivh/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2365
+ Z9FPT4oBgHgl3EQfvDVz/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2366
+ MNAzT4oBgHgl3EQfkf3z/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2367
+ ftAzT4oBgHgl3EQfafzZ/content/2301.01371v1.pdf filter=lfs diff=lfs merge=lfs -text
2368
+ e9E5T4oBgHgl3EQfhA8F/content/2301.05637v1.pdf filter=lfs diff=lfs merge=lfs -text
2369
+ V9AyT4oBgHgl3EQfV_fO/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
2370
+ 1NFIT4oBgHgl3EQf3ivh/content/2301.11382v1.pdf filter=lfs diff=lfs merge=lfs -text
2371
+ 0NFIT4oBgHgl3EQf2ysh/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
0NFIT4oBgHgl3EQf2ysh/content/2301.11378v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:26c66fbfd83dceeeee2e21570addeb75b727793df70b218c3b889bbcad3badaa
3
+ size 3329359
0NFIT4oBgHgl3EQf2ysh/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34ce310991e1a71682ac7469b9d3b7ef7940432a9aee7a6ea411e90518590020
3
+ size 2687021
0NFIT4oBgHgl3EQf2ysh/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4495069109d076e485ea7447b116d63d13fa4c0396e6419192bdfcfb6eae53e9
3
+ size 109725
0dE2T4oBgHgl3EQfNAY2/content/2301.03730v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce67f739df978c90a2bbd2c5e684248b7de2c50801d63e7644d8940b7dbb458a
3
+ size 1181857
0dE2T4oBgHgl3EQfNAY2/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b776c8354de9c9615e5d50c07657ed2656558496084d9dcaf48f8560ed56d4e4
3
+ size 3932205
0dE2T4oBgHgl3EQfNAY2/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:372a5cd76265282dad8e5f59ed43020ffe655c84923ada1f73f0fa85c4c30430
3
+ size 144087
1NFIT4oBgHgl3EQf3ivh/content/2301.11382v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5dfaeb35eca4d6404ac2abe85e3ab1d0236577524f06c8c3f5ead33184b309c4
3
+ size 898198
1NFIT4oBgHgl3EQf3ivh/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1aaca5305c993c532f445096720d7c64c875818c925da1bb204f6e98cc4137e3
3
+ size 3801133
1NFIT4oBgHgl3EQf3ivh/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:effe0bdcde68123ca9b7b4b9bcbb9a666e5b8bbdf1e190b8df8ad64c08be078a
3
+ size 135584
2NFQT4oBgHgl3EQfFTWv/content/tmp_files/2301.13241v1.pdf.txt ADDED
@@ -0,0 +1,2042 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ SpinQ: Compilation strategies for scalable spin-qubit architectures
2
+ N. Paraskevopoulos1,2, F. Sebastiano1,2, C. G. Almudever3, and S. Feld1,2
3
+ 1Quantum and Computer Engineering Department, Delft University of Technology, 2628 CD Delft, The Netherlands
4
+ 2QuTech, Delft University of Technology, 2628 CJ Delft, The Netherlands and
5
+ 3Computer Engineering Department, Technical University of Valencia, Camino de Vera, s/n, 46022 Val`encia, Spain
6
+ In most qubit realizations, prototype devices are available and are already utilized in both industry and aca-
7
+ demic research. Despite being severely constrained, hardware- and algorithm-aware quantum circuit mapping
8
+ techniques have been developed for enabling successful algorithm executions during the NISQ era, targeting
9
+ mostly technologies with high qubit counts. Not so much attention has been paid to the implementation of com-
10
+ pilation methods for quantum processors based on spin-qubits due to the scarce availability of current experi-
11
+ mental devices and their small sizes. However, based on their high scalability potential and their rapid progress
12
+ it is timely to start exploring quantum circuit mapping solutions for these spin-qubit devices. In this work,
13
+ we discuss the unique mapping challenges of a scalable spin-qubit crossbar architecture with shared control
14
+ [1] and introduce SpinQ, the first native compilation framework for scalable spin-qubit architectures that maps
15
+ quantum algorithms on this crossbar architecture. At the core of SpinQ is the Integrated Strategy that addresses
16
+ the unique operational constraints of the crossbar while considering compilation (execution time) scalability,
17
+ having a O(n) computational complexity. To evaluate the performance of SpinQ on this novel architecture, we
18
+ compiled a broad set of well-defined quantum circuits and performed an in-depth analysis based on multiple
19
+ metrics such as gate overhead, depth overhead, and estimated success probability, which in turn allowed us to
20
+ create unique mapping and architectural insights. Finally, we propose novel mapping technique improvements
21
+ for the crossbar architecture that could increase algorithm success rates and potentially inspire further research
22
+ on quantum circuit mapping techniques for other scalable spin-qubit architectures.
23
+ I.
24
+ INTRODUCTION
25
+ The prospect of quantum computing advantage is steadily
26
+ becoming a reality [2–4]. The community is anticipating fur-
27
+ ther advances that will allow quantum computing systems to
28
+ become practical and to reach computational advantage [5].
29
+ With such advancements, quantum computing systems are ex-
30
+ pected to solve a plethora of classically intractable problems.
31
+ Until then, current quantum systems belong to the so-called
32
+ Noisy Intermediate-Scale Quantum (NISQ) era [6], in which
33
+ devices can only handle small-sized quantum circuits. This is
34
+ due to limitations in the number of qubits and high operational
35
+ errors, the latter causing rapid quantum information deteriora-
36
+ tion. Combined with even more hardware constraints, such as
37
+ cross-talk and limited classical-control resources [7, 8], suc-
38
+ cessful quantum circuit execution is a difficult feat. Scientists,
39
+ both in academia and industry, face major engineering chal-
40
+ lenges in building both, hardware and corresponding system
41
+ software.
42
+ During the NISQ era, there have been significant efforts
43
+ [9–19] to extract the most out of these resource-constrained
44
+ and error-prone quantum computing systems. One of the ap-
45
+ proaches to do so is by developing hardware- and algorithm-
46
+ aware quantum circuit mapping techniques to maximize per-
47
+ formance. In general terms, mapping refers to the process of
48
+ modifying (potentially hardware-agnostic) quantum circuits
49
+ in such a way that they can be run on a given quantum com-
50
+ puting device by respecting all of its constraints while opti-
51
+ mizing performance (e.g., algorithm success rate).
52
+ So far,
53
+ several mapping techniques have been developed mostly for
54
+ superconducting and ion-trap qubit devices, as they are nowa-
55
+ days one of the most well-recognized and most-developed
56
+ qubit implementation technologies in terms of qubit counts
57
+ and availability to users.
58
+ However, spin-qubits emerge as
59
+ a promising technology for scaling up quantum computing
60
+ systems mainly due to their high integration potential [20–
61
+ 25]. Therefore, the scientific community is envisioning two-
62
+ dimensional spin-qubit architectural proposals that could al-
63
+ leviate some of the major challenges towards scalability. Re-
64
+ cently, a crossbar array [26] has been experimentally demon-
65
+ strated showing great promise for architectures with shared
66
+ control. Such scalable architectural designs come with a new
67
+ set of hardware constraints for which novel quantum circuit
68
+ mapping techniques need to be developed.
69
+ In this paper, we present SpinQ, the first native compilation
70
+ framework focusing on scalable spin-qubit architectures. To
71
+ this purpose, we target the so-called crossbar architecture pro-
72
+ posed in [1]. By creating a deep understanding of its opera-
73
+ tional constraints, we draw a clear picture of unique mapping
74
+ challenges that arise in comparison to other qubit technolo-
75
+ gies. We discuss and implement possible mapping solutions
76
+ based on [27, 28] while improving those works from a scala-
77
+ bility standpoint. We emphasize the importance of performing
78
+ an extensive performance evaluation process of novel map-
79
+ ping techniques. Note, that this compilation framework will
80
+ not only allow quantum algorithm executions on scalable spin
81
+ qubit hardware but more importantly will also help to form
82
+ insights on the behaviour and performance of this new breed
83
+ of architectures and provide some design guidelines for future
84
+ developments.
85
+ The main contributions of this paper are:
86
+ 1. An in-depth analysis of mapping challenges in order to
87
+ create novel mapping techniques for spin-qubit crossbar
88
+ architectures.
89
+ 2. SpinQ – The first native compilation framework dedi-
90
+ cated to scalable spin-qubit architectures which utilizes
91
+ a more scalable compilation strategy compared to pre-
92
+ arXiv:2301.13241v1 [quant-ph] 30 Jan 2023
93
+
94
+ 2
95
+ vious proposals.
96
+ 3. A thorough performance analysis of the main sources of
97
+ gate/depth overhead and estimated success probability
98
+ when mapping well-defined quantum algorithms on the
99
+ crossbar architecture.
100
+ 4. Deriving algorithmic- and hardware-specific mapping
101
+ insights for the crossbar architecture and other spin-
102
+ qubit architectures.
103
+ The remainder of this paper is structured as follows: In Sec.
104
+ II the current progress and challenges of scalable spin-qubit
105
+ architectures are presented. In Sec. III the crossbar architec-
106
+ ture is introduced as a potential candidate in scaling quantum
107
+ devices in two dimensions, as well as its native operations. In
108
+ Sec. IV we comprehensively analyse the unique challenges
109
+ of mapping quantum algorithms on the crossbar architecture
110
+ which require novel mapping techniques. Then, in Sec.V we
111
+ introduce SpinQ – the first native compilation framework for
112
+ scalable spin-qubit architectures. In Sec. VII we thoroughly
113
+ analyze the performance of SpinQ when mapping a broad and
114
+ well-defined range of quantum algorithms on the crossbar ar-
115
+ chitecture after which we form architectural and mapping in-
116
+ sights. In Sec. VIII we discuss potential improvements of our
117
+ compilation strategy and we compare its computational com-
118
+ plexity to previous proposals. Finally, we conclude our work
119
+ in Sec. IX.
120
+ II.
121
+ SPIN QUBITS AS A SCALABLE PLATFORM
122
+ To fulfil the promise [6] of quantum computers being ma-
123
+ chines that solve some classically intractable problems, sub-
124
+ stantial system sizes have to be reached, i.e., a large number
125
+ of qubits [8, 29]. It still remains to be seen which qubit imple-
126
+ mentation technologies (e.g., superconducting, trapped ions,
127
+ quantum dots, photonics, defect-based on nitrogen-vacancy
128
+ diamond centres) will succeed in scaling up quantum comput-
129
+ ing systems with high-quality qubits [30, 31]. Spin qubits in
130
+ quantum dots are a promising technology for scalable quan-
131
+ tum computers due to the maturity of the semiconductor in-
132
+ dustry, the capability of high integration on a single die com-
133
+ pared to other qubit technologies, long coherence times, and
134
+ the ability to operate in super-kelvin temperatures [20–25].
135
+ Despite the advantages just mentioned, there are still sev-
136
+ eral challenges today towards scaling spin-qubit devices in a
137
+ sustainable manner. One major challenge is the wiring scheme
138
+ between the quantum processor and the classical interface, the
139
+ so-called interconnect bottleneck [22]. Formally, the intercon-
140
+ nect bottleneck is described by Rent’s exponent [32], which is
141
+ a measure of optimization in the wiring scheme in both, clas-
142
+ sical and quantum processors. The existing scheme in most
143
+ quantum devices of having at least one control line per qubit
144
+ is not scalable in the long term. This is, mostly, due to the fact
145
+ that dilution refrigerators have an upper limit to I/O cable ca-
146
+ pacity and that more cables will progressively make it harder
147
+ to reach the desired milli-Kelvin temperature due to higher
148
+ heat dissipation. Therefore, qubit architectures and classical-
149
+ control electronics have to support multi-qubit shared-control
150
+ that requires a sub-linear number of control lines with an in-
151
+ creasing number of qubits. In other words, each control line
152
+ needs to address multiple qubits to effectively mitigate the in-
153
+ terconnect bottleneck when scaling up quantum hardware.
154
+ Going a step further, the inability to achieve a scalable
155
+ wiring scheme also originates from the low device unifor-
156
+ mity achieved by today’s fabrication tools.
157
+ In most cases,
158
+ this implies that qubits can not be made homogeneous enough
159
+ to control them effectively in a scalable architecture.
160
+ The
161
+ low uniformity results in resonance frequency deviations or
162
+ other control variations. This means that in an inhomoge-
163
+ neous device, a driving signal for a particular operation will
164
+ have to vary from one qubit to another to get the same out-
165
+ come [1, 22, 33]. This makes it difficult to successfully con-
166
+ trol many qubits with the same control line, thus contributing
167
+ to the wiring scheme challenge (i.e., the interconnect bottle-
168
+ neck).
169
+ There have been significant efforts [1, 22, 32, 34–38] to re-
170
+ duce the number of control lines reaching the qubits as de-
171
+ vices become ever denser.
172
+ Such efforts take advantage of
173
+ the miniaturization capabilities of spin qubits and the large-
174
+ scale integration of solid-state circuits to address the afore-
175
+ mentioned challenges. However, current experimental work
176
+ primarily has been focused on one-dimensional spin-qubit ar-
177
+ rays of small sizes [22], which are not easily scalable. Re-
178
+ cently, a 2×2 spin-qubit processor [39] and a 4×4 spin-qubit
179
+ device based on a crossbar architecture [1, 26] with shared
180
+ control has demonstrated the potential to scale spin-qubit de-
181
+ vices in two dimensions.
182
+ As the technology is advancing
183
+ and further reducing Rent’s exponent, there will be a need to
184
+ effectively map quantum algorithms on two-dimensional de-
185
+ vices such as the crossbar architecture which comes not only
186
+ with limited qubit connectivity but also with a new set of con-
187
+ straints. Therefore, there is an opportunity to explore its map-
188
+ ping challenges and propose novel solutions.
189
+ However, the sample space of these proposals is sparse and
190
+ lacks a detailed description of hardware constraints. In com-
191
+ bination with a lack of available devices for testing, leads to
192
+ a lack of a proper evaluation tool capable of benchmarking
193
+ various quantum algorithms. Therefore, mapping techniques
194
+ have not been studied as much as other qubit technologies
195
+ such as superconducting and ion traps. It also remains un-
196
+ clear whether existing techniques could be applicable. Then,
197
+ even if such techniques are realized they could be incompati-
198
+ ble with existing quantum compilation frameworks made for
199
+ other qubit technologies. This could be due to completely
200
+ different development requirements imposed by the particular
201
+ spin-qubit constraints and their scalability prospects. In other
202
+ words, a dedicated compilation framework for spin-qubit ar-
203
+ chitectures with a focus on scalability is still missing. All
204
+ these obstacles make it difficult to evaluate and compare var-
205
+ ious architectural proposals under relevant application cate-
206
+ gories.
207
+
208
+ 3
209
+ CL0
210
+ CL1
211
+ CL2
212
+ RL2
213
+ RL1
214
+ RL0
215
+ 1
216
+ 4
217
+ 2
218
+ 3
219
+ 5
220
+ 6
221
+ 8
222
+ 7
223
+ QL-3
224
+ QL-2
225
+ QL-1
226
+ QL0
227
+ QL1
228
+ QL2
229
+ QL3
230
+ FIG. 1: Schematic overview of the crossbar architecture and
231
+ operational control lines [1].
232
+ III.
233
+ THE CROSSBAR ARCHITECTURE
234
+ The crossbar architecture for arranging spin qubits was in-
235
+ troduced in [1] as a scalable solution to the interconnect bot-
236
+ tleneck. Inspired by the crossbar architecture used in today’s
237
+ classical processors [1, 34], it adopts a similar characteristic,
238
+ namely shared control. This leads to a quadratic reduction
239
+ in control lines per qubit [28] and opens up the possibility
240
+ for high integration of up to 1, 000 qubits in a single pack-
241
+ age. Qubits are defined by electron (or hole) spin states in
242
+ Si-based quantum dots. In Figure 1, we illustrate a schematic
243
+ overview of the crossbar architecture in which each site (cir-
244
+ cles) represents a quantum dot, some of which are occupied
245
+ by spin-qubits (numbered, green circles).
246
+ Spin qubits are
247
+ usually sparsely initialized in a checker-board pattern to re-
248
+ duce potential cross-talk and to allow for long-range entan-
249
+ glement through shuttling qubits across the array [1]. Finally,
250
+ the crossbar architecture requires high uniformity in the fab-
251
+ rication of materials to minimize operational errors. Fortu-
252
+ nately, it is possible to mitigate such errors or even vanish
253
+ them by operating the crossbar at low magnetic fields and with
254
+ proper tuning (e.g., separated resonance frequencies between
255
+ columns). Furthermore, a crossbar module is envisioned to be
256
+ self-contained and to be duplicated in a network of modules.
257
+ This can provide the means to realize quantum error correc-
258
+ tion (QEC) in large-scale systems enabled by fast-shuttling,
259
+ low-error communication links. In this crossbar architecture,
260
+ three different kinds of shared control lines are used to per-
261
+ form operations on the qubits: vertical (column line, CL),
262
+ horizontal(row line, RL), and diagonal (qubit line, QL). No-
263
+ tably, each line affects all the sites that it is connected to. For
264
+ instance, in Fig. 1 line QL−2 affects the sites in which spin-
265
+ qubits 5 and 7 reside in. This imposes some restrictions in
266
+ the parallelization of instructions, which we will discuss in
267
+ Sec. IV. Below, we will abstractly describe the control prop-
268
+ erties for executing gates native to the crossbar architecture.
269
+ A more detailed explanation is provided in [28]. Two-qubit
270
+ gates. Two two-qubit gates CPHASE and
271
+
272
+ SWAP are
273
+ supported by the crossbar, with the latter being chosen for this
274
+ work due to its higher operational fidelity and faster execu-
275
+ tion time according to [1]. A
276
+
277
+ SWAP can be performed
278
+ when two qubits are vertically adjacent (i.e. same column)
279
+ and the horizontal barrier between them is lowered. Then the
280
+ QL lines going through the two qubits need to be in the same
281
+ voltage potential for a specific duration of time to complete
282
+ the
283
+
284
+ SWAP. Qubit shuttling. In the crossbar architecture,
285
+ qubits can be moved around by performing shuttling opera-
286
+ tions. In this operation, the vertical and horizontal lines are
287
+ used as barrier gates. Lowering or raising these barriers can
288
+ create pathways from which qubits can move (shuttle) from
289
+ one site to another with the use of DC signals through the
290
+ diagonal lines.
291
+ Fig.
292
+ 2 shows an example of shuttling, in
293
+ which spin-qubit 3 is moved one site to the left. Although
294
+ this architecture can support gate-based communication with
295
+ two subsequent
296
+
297
+ SWAP gates as in superconducting qubits,
298
+ shuttling qubits is preferred due to higher operation fidelity
299
+ and shorter execution time. It should be noted that shuttling
300
+ horizontally, i.e., between columns, causes a Z-phase rota-
301
+ tion which should be mitigated by timing such operations well
302
+ ([1]). In the crossbar architecture, single-qubit gate rotations
303
+ should be separated into two categories: Z-phase rotations and
304
+ X or Y rotations. Z-phase rotations. Z-phase qubit rotations
305
+ are controlled by a well-timed qubit shuttling to and from a
306
+ neighbouring column [1, 27, 28]. This is due to the differences
307
+ in Zeeman energies from column to column which imposes
308
+ an alternating magnetic field on qubits, thus rotating them in
309
+ the Z axis. When this shuttle is timed correctly, it rotates the
310
+ qubit in the correct Z state. The diagonal qubit line provides
311
+ the means to address multiple qubits, thus enabling parallel Z
312
+ phase shifts across the topology. X or Y rotations. As for
313
+ X or Y rotations, either all qubits belonging to red-coloured
314
+ columns or all qubits in blue-coloured columns are rotated
315
+ (see Fig. 1). This is called semi-global qubit rotation im-
316
+ plemented by electron-spin-resonance ([40]). Measurement.
317
+ The process of readout allows for local single qubit measure-
318
+ ments based on the Pauli Spin Blockade (PSB) process [41].
319
+ With this process, the measurement outcome is determined by
320
+ whether a qubit shuttle towards a horizontally adjacent ancilla
321
+ qubit was successful or not.
322
+ IV.
323
+ QUANTUM CIRCUIT MAPPING CHALLENGES OF
324
+ THE CROSSBAR ARCHITECTURE
325
+ The quantum circuit mapping process plays an essential
326
+ role in the successful execution of algorithms on a quantum
327
+ computer.
328
+ It consists of a cascade of routines that trans-
329
+ form a (potentially hardware-agnostic) quantum circuit to a
330
+ hardware-compatible version. However, current NISQ quan-
331
+ tum processors are severely constrained and cannot run use-
332
+ ful applications successfully, yet. Examples of hardware con-
333
+ straints are low qubit connectivity, cross-talk, reduced prim-
334
+ itive gate set, low coherence time, fabrication imperfections,
335
+ and limited classical-control resources. Therefore, a mapping
336
+
337
+ 4
338
+ process needs to consider such limitations and try to optimize
339
+ performance as much as possible to increase the algorithm’s
340
+ success rate. So far, there are a plethora of proposed solu-
341
+ tions which differ in strategy, methodology and performance
342
+ metrics to optimize [9–19, 42].
343
+ Mapping techniques have been mostly developed for super-
344
+ conducting and ion-trap qubit devices. However, as of now,
345
+ there is not much focus on spin-qubit architectures and their
346
+ particular characteristics. Although spin-qubits are now in a
347
+ rather early development stage, their scalability potential is
348
+ undeniable and therefore it is timely to lay grounds for devel-
349
+ oping novel mapping techniques and inspire further research.
350
+ As previously mentioned, in this work we focus on the cross-
351
+ bar architecture that comes with a unique set of constraints
352
+ that affect the parallelization of quantum operations, the ap-
353
+ plication of X or Y rotations on single qubits, and the routing
354
+ of qubits (i.e. moving qubits around).
355
+ 1.
356
+ Parallelization of quantum operations
357
+ CL0
358
+ CL1
359
+ CL2
360
+ RL2
361
+ RL1
362
+ RL0
363
+ 1
364
+ 4
365
+ 2
366
+ 3
367
+ 5
368
+ 6
369
+ 8
370
+ 7
371
+ QL-3 < QL-2 > QL-1 > QL0
372
+ >
373
+ QL1
374
+ QL2
375
+ QL3
376
+ FIG. 2: Shuttling example of qubit 3 moved one site to the
377
+ left, as the barrier CL0 between origin and destination site is
378
+ lowered and voltage of QL−1 is larger than > QL0.
379
+ Most of the operation parallelization restrictions come from
380
+ the fact that control lines are shared among multiple qubits,
381
+ while each line has a specific role and relation to one another.
382
+ It should also be noted that most operations must be imple-
383
+ mented with strict pulse durations and time intervals depend-
384
+ ing on the site that gets addressed [1] due to fabrication imper-
385
+ fections [28]. Although such pulse durations have to be care-
386
+ fully considered in the mapping process by providing recent
387
+ calibration data [13, 17, 18], in this work we consider an ideal
388
+ crossbar architecture, as such data are not available yet. De-
389
+ spite that, the mapping techniques proposed in this work are
390
+ compatible with such considerations and can be added once
391
+ calibration data are available.
392
+ To better illustrate what the conditions and constraints are
393
+ CL0
394
+ CL1
395
+ CL2
396
+ RL2
397
+ RL1
398
+ RL0
399
+ 1
400
+ 4
401
+ 2
402
+ 3
403
+ 5
404
+ 6
405
+ 8
406
+ 7
407
+ QL-3 < QL-2 > QL-1 > QL0
408
+ ><
409
+ QL1
410
+ <
411
+ QL2
412
+ >
413
+ QL3
414
+ FIG. 3: Parallelizing shuttles of qubit 3 and 6 is not allowed
415
+ due to violation of constraints.
416
+ when trying to parallelize quantum operations, let us consider
417
+ the following example in which two shuttles are performed in
418
+ parallel. As shown in Fig. 2, the following requirements must
419
+ be fulfilled to shuttle qubit 3 one site to the left:
420
+ 1. The destination site must not be occupied by another
421
+ qubit.
422
+ 2. The barrier between destination and origin sites must be
423
+ lowered. This is depicted as a dashed vertical CL0 line.
424
+ 3. All barriers surrounding the origin and destination sites
425
+ must be raised. This is shown as solid red RL (RL0 and
426
+ RL1) and blue CL lines (CL1 and the always-raised
427
+ most-left CL line).
428
+ 4. The voltage going through the QL line of the destination
429
+ site (QL−1) must be higher than the one going through
430
+ the origin site (QL0). This is shown as QL−1 > QL0
431
+ in the top-right of Fig. 2.
432
+ 5. To prevent other qubits in these two columns from shut-
433
+ tling, the voltage going through their QL lines must be
434
+ higher than their adjacent empty sites. This is depicted
435
+ as voltage level relations between QL lines. Note that
436
+ QLs with no voltage relations are irrelevant for this par-
437
+ ticular shuttle operation.
438
+ Now, we assume a shuttle of qubit 6 to the right (as de-
439
+ picted in Fig.
440
+ 3) in parallel to the left shuttle of qubit 3.
441
+ This implies that all previously listed requirements (of qubit
442
+ 3) need to be satisfied along with the new ones (of qubit 6).
443
+ However, the fourth requirement can not be satisfied as the
444
+ QL0 > QL1 relation we had before would have to be changed
445
+ to QL0 < QL1. If this change is allowed, we violate the fifth
446
+ requirement of the first shuttle and, as a consequence, qubit 1
447
+ will shuttle to the right. Therefore, we can not shuttle qubits
448
+ 3 and 6 at the same time.
449
+
450
+ 5
451
+ Thus, we see that scheduling parallel gates in the crossbar
452
+ implies a strict simultaneous satisfaction of all signal require-
453
+ ments for each gate. Any violation of these conditions would
454
+ potentially result in shuttling of unwanted qubits, in unwanted
455
+ qubit interactions or unknown qubit states. As seen in the
456
+ previous example, performing quantum operations in parallel
457
+ without affecting other qubits and meeting all signal require-
458
+ ments is not always possible regardless of qubit distance. In
459
+ fact, it does not matter how far qubits are away from each
460
+ other, but whether control lines are shared between them or
461
+ not, and whether their operational requirements and relations
462
+ match or not. Unlike more popular qubit architectures based
463
+ on superconducting or ion traps, this form of operational con-
464
+ straint is unique. On one hand, sharing control lines tackles
465
+ the interconnect bottleneck, on the other hand, it intrinsically
466
+ constraints its parallelization capabilities.
467
+ Finally, in other qubit architectures, it is possible to per-
468
+ form different gate types in parallel. In the crossbar archi-
469
+ tecture, this is not always the case. For example, applying
470
+ single-qubit gates and shuttling operations at the same time is
471
+ not possible (see Fig. 4a), because the former CL lines need
472
+ to carry an alternating current (AC) signal while the latter re-
473
+ quire DC signals for raising or lowering the barriers.
474
+ 2.
475
+ Application of X or Y rotations on single qubits
476
+ As established in Sec. III, X or Y qubit rotations are im-
477
+ plemented semi-globally, meaning that either all qubits in odd
478
+ or even column parities will be rotated. However, during an
479
+ arbitrary cycle of algorithm execution, not all qubits in odd
480
+ or even columns should be rotated. Therefore, to compen-
481
+ sate for unwanted X or Y rotations, one has to come up with
482
+ a specific rotation scheme such that only the targeted qubits
483
+ are rotated. In this work, we have implemented the scheme
484
+ introduced by [28]. We illustrate how it works in Fig. 4, in
485
+ which we are interested in rotating only qubit 5. This is an-
486
+ other unique characteristic of this architecture, as additional
487
+ gates are needed to perform single-qubit rotations on specific
488
+ qubits, which impose new challenges to the mapping process.
489
+ 3.
490
+ Routing of Qubits
491
+ While we previously described the operational constraints
492
+ to parallelize various gates in the crossbar architecture, we
493
+ will now expand specifically on the qubit routing challenges.
494
+ Routing a qubit in the crossbar means that an electron
495
+ (or a hole) is physically ”pushed” to an empty site (i.e., an
496
+ empty quantum dot). This mechanism is similar to a Quan-
497
+ tum charged coupled device (QCCD) ion trap device when
498
+ ions are shuttled through a common channel from trap to trap,
499
+ assuming sufficient destination ion trap capacity [43]. The
500
+ QCCD architecture and the crossbar architecture fundamen-
501
+ tally differ in topology, but both require special algorithms or
502
+ additional routing routines to maintain control of qubit posi-
503
+ tions and avoid potential conflicts.
504
+ Focusing on the crossbar, shuttling a qubit does not only
505
+ depend on specific control signal requirements and available
506
+ empty sites but on the positions of other qubits as well. We
507
+ illustrate this fact with an example in Fig. 5, in which a verti-
508
+ cal shuttle operation of qubit 3 is indicated by a black arrow.
509
+ In this case, the horizontal barrier RL0 has to be lowered and
510
+ the QL lines have to be pulsed in certain voltage relations to
511
+ allow for correct shuttling. However, an unwanted interaction
512
+ between two other qubits in the same row (qubits 2 and 4, cir-
513
+ cled) is concurrently caused, regardless of the QL2 and QL3
514
+ relation. Analogously, the same issue exists with a horizontal
515
+ shuttle when having two horizontally adjacent qubits in the
516
+ same columns where the shuttle takes place [27, 28]. Lastly,
517
+ there can be a blocked path conflict where there is no empty
518
+ site for a qubit to shuttle to.
519
+ Therefore, a dedicated qubit routing algorithm for the
520
+ crossbar architecture has to be developed to avoid collisions,
521
+ blocked paths, and unwanted interactions. Furthermore, even
522
+ if we had such a dedicated routing algorithm, the same con-
523
+ flicts have to be considered and prevented when scheduling
524
+ gates in parallel.
525
+ For that, control signals and qubit posi-
526
+ tions must be carefully monitored within the mapping pro-
527
+ cess. From the description above, it is clear that both, routing
528
+ and scheduling processes, need to jointly work in a strategy to
529
+ avoid conflicts and optimize for algorithm success rate. This
530
+ will be part of SpinQ, presented in the following section.
531
+ V.
532
+ SPINQ – THE FIRST NATIVE COMPILATION
533
+ FRAMEWORK FOR SCALABLE SPIN-QUBIT
534
+ ARCHITECTURES
535
+ In this work, we present the first native compilation frame-
536
+ work – SpinQ – dedicated to compiling and mapping quan-
537
+ tum circuits onto scalable spin-qubit architectures, such as the
538
+ previously described crossbar. We have based our mapping
539
+ techniques on previous works from [27, 28] while improving
540
+ them from a scalability standpoint.
541
+ Fig. 6 shows the schematic structure of our framework. As
542
+ input, SpinQ accepts QASM format files that describe quan-
543
+ tum circuits (used as benchmarks) in a device-independent
544
+ manner. To increase flexibility, custom operations and their
545
+ particular attributes can be defined in a hardware architec-
546
+ tural configuration file. It can include operational attributes
547
+ such as the duration of a gate, the mathematical description
548
+ of the unitary matrices, associated gate fidelities, and archi-
549
+ tectural constraints, among others. Moving on, the compiler
550
+ consists of a series of steps (called passes) to decompose
551
+ gates, route qubits, and schedule instructions. To address the
552
+ unique mapping constraints of the crossbar architecture, we
553
+ have conceptualized and developed the integrated strategy.
554
+ We did so not necessarily to maximize the performance of the
555
+ algorithms when being executed on the crossbar, but rather to
556
+ study how they behave on such architecture and focus on the
557
+ scalability potential of spin-qubit technologies (see also Sec.
558
+ VIII). The compiler’s output is a QASM file which is compat-
559
+ ible to be executed on the given crossbar architecture. Option-
560
+ ally, a verification step can take place to ensure the compiled
561
+
562
+ 6
563
+ CL0
564
+ CL1
565
+ CL2
566
+ RL2
567
+ RL1
568
+ RL0
569
+ 1
570
+ 4
571
+ 2
572
+ 3
573
+ 5
574
+ 6
575
+ 8
576
+ 7
577
+ QL-3 QL-2
578
+ QL-1 QL0
579
+ QL1
580
+ QL2
581
+ QL3
582
+ (a) Step 1
583
+ CL0
584
+ CL1
585
+ CL2
586
+ RL2
587
+ RL1
588
+ RL0
589
+ 1
590
+ 4
591
+ 2
592
+ 3
593
+ 5
594
+ 6
595
+ 8
596
+ 7
597
+ QL-3 < QL-2 < QL-1 < QL0
598
+ >
599
+ QL1
600
+ QL2
601
+ QL3
602
+ (b) Step 2
603
+ CL0
604
+ CL1
605
+ CL2
606
+ RL2
607
+ RL1
608
+ RL0
609
+ 1
610
+ 4
611
+ 2
612
+ 3
613
+ 5
614
+ 6
615
+ 8
616
+ 7
617
+ QL-3 QL-2
618
+ QL-1 QL0
619
+ QL1
620
+ QL2
621
+ QL3
622
+ (c) Step 3
623
+ CL0
624
+ CL1
625
+ CL2
626
+ RL2
627
+ RL1
628
+ RL0
629
+ 1
630
+ 4
631
+ 2
632
+ 3
633
+ 5
634
+ 6
635
+ 8
636
+ 7
637
+ QL-3 < QL-2 > QL-1 < QL0
638
+ >
639
+ QL1
640
+ QL2
641
+ QL3
642
+ (d) Step 4
643
+ FIG. 4: Single-qubit gate on qubit 5: (a) Step 1: AC signals through the CL lines induce magnetic fields on qubits 1, 5, 6 and 2,
644
+ thus changing their state. The direction and frequency of these signals determine which columns (red or blue) and what rotation
645
+ (X or Y gate) will be applied to the corresponding qubits. (b) Step 2: The targeted qubit 5 is moved with a shuttle operation to
646
+ a different column parity. For this operation, the orthogonal lines (CL and RL) open and close as barriers and the diagonal lines
647
+ (QL) create potential gradients to allow for qubit 5 to move (shuttle). Note that QL needs to have voltage relations with their
648
+ neighbour QL lines. (c) Step 3: An inverse rotation is applied to qubits 1, 6 and 2 similarly to Step 1. (d) Step 4: Target qubit 5
649
+ is moved with a shuttle operation to the initial position.
650
+ CL0
651
+ CL1
652
+ CL2
653
+ RL2
654
+ RL1
655
+ RL0
656
+ 1
657
+ 4
658
+ 2
659
+ 3
660
+ 5
661
+ 6
662
+ 8
663
+ 7
664
+ QL-3
665
+ QL-2 QL-1 < QL0
666
+ <
667
+ QL1
668
+ QL2
669
+ ><
670
+ QL3
671
+ FIG. 5: Example of a conflict: the operational requirements
672
+ of shuttling qubit 3 downwards have lowered the RL0 barrier
673
+ thus causing an unwanted interaction between qubits 2 and 4
674
+ circuit meets all operational constraints of the crossbar archi-
675
+ tecture without any conflicts. This step is implemented to be
676
+ able to check the compatibility of architectural proposals that
677
+ are not physically realized yet. Finally, several performance
678
+ metrics are extracted from the compiled circuit to evaluate
679
+ algorithm performance. In the next sections, we will further
680
+ discuss each of the elements of the compiler.
681
+ A.
682
+ Compilation steps
683
+ The compiler consists of the following steps:
684
+ Quantum Algorithms
685
+ (Benchmarks)
686
+ Compiler
687
+ Gate Decomposition
688
+ Initial Placement
689
+ Integrated Strategy
690
+ (Routing and Scheduling)
691
+ Architectural Configuration
692
+ Compiled Circuit
693
+ Metrics
694
+ Verification
695
+ Depth
696
+ Overhead
697
+ Gate Overhead
698
+ Estimated
699
+ Success
700
+ Probability
701
+ Compilation
702
+ time
703
+ Operational fidelities
704
+ Operational durations
705
+ Architectural constraints
706
+ Topology
707
+ FIG. 6: Overview of our SpinQ framework proposed in this
708
+ paper.
709
+ Decomposition of quantum gates. Inputted QASM quan-
710
+ tum circuits are transformed into a custom-made intermediate
711
+ representation (IR) data format. Quantum gates are then de-
712
+ composed into gates native to the architecture based on the
713
+ decomposition sequences specified in the architectural con-
714
+ figuration file.
715
+ Physical initialization of spin qubits. A checkerboard pat-
716
+ tern has been proposed [42] to allow space for qubits and an-
717
+ cilla qubits to move [27, 28]. The physical space achieved be-
718
+ tween the qubits not only facilitates shuttling qubits avoiding
719
+ possible conflicts but also reduces crosstalk and enables sur-
720
+ face code error correction [1]. As we will discuss later, main-
721
+ taining this placement throughout a circuit execution plays an
722
+ integral role in our compilation strategy. Having said that,
723
+ initializing qubits in alternative patterns and changing them
724
+ during execution is possible. This flexibility can be particu-
725
+ larly advantageous to highly specialized mapping techniques
726
+ for the crossbar as well as spin-qubit architectures in general.
727
+
728
+ 7
729
+ Virtual-to-physical qubit initial placement. The current
730
+ version of SpinQ associates virtual qubits of an algorithm with
731
+ physical qubits (placed in the checkerboard pattern) in a one-
732
+ to-one manner by numbering the physical qubits from left to
733
+ right and from bottom to top (as shown in Fig. 1. In the re-
734
+ sults sections VII and VIII, we will provide insights on how
735
+ common initial placement algorithms can be adapted to im-
736
+ prove the performance of spin-qubit architectures (such as the
737
+ crossbar).
738
+ Integrated Strategy for Routing and Scheduling. As ex-
739
+ plained in Sec. IV, both routing and scheduling techniques
740
+ must avoid conflicts. To do that, a specific strategy needs
741
+ to be followed. There can be various strategies for various
742
+ goals with trade-offs between performance and compilation
743
+ time. The presented Integrated Strategy tilts towards mini-
744
+ mizing compilation time while having great prospects to be
745
+ competitive against other solutions that focus on algorithm
746
+ performance as will be discussed in Sec. VIII.
747
+ Firstly, in the Integrated Strategy, the checkerboard pat-
748
+ tern qubit placement [1], also known as ”idle-configuration”
749
+ in [28], should be maintained as much as possible. This pro-
750
+ vides at least two empty sites for every qubit to move towards
751
+ to, at the beginning of each cycle. To maintain the checker-
752
+ board pattern throughout circuit execution when routing for
753
+ two-qubit gates, a conflict-free shuttle-based SWAP technique
754
+ can be used ([27]) as shown in Fig. 7. Note that this move-
755
+ ment of qubits results in a gate overhead of 4 (i.e., 4 shuttle
756
+ operations), but a depth overhead of 2, as these two shuttle
757
+ pairs can always be executed in parallel. To bring the two
758
+ qubits to the appropriate sites and allow two-qubit interac-
759
+ tions, multiple shuttle-based SWAPs might be performed. For
760
+ that, we have implemented a shortest-path algorithm based
761
+ on the Manhattan distance. When one of the qubits is placed
762
+ in the desired position, the next step is a horizontal shuttle,
763
+ either to the left or to the right, after which the target and
764
+ control qubits are vertically adjacent for interaction, and the
765
+ checkerboard pattern is temporarily broken. Proceeding the
766
+
767
+ SWAP, a shuttle instruction returns the qubit to the previ-
768
+ ous position and the checkerboard pattern gets restored. Note
769
+ that the aforementioned process can be successfully executed
770
+ only in that particular order, otherwise there can be a routing
771
+ conflict.
772
+ So far, we have only talked about a routing technique
773
+ for bringing together qubits for performing two-qubit gates.
774
+ However, qubit routing is also needed for X or Y rotations to
775
+ a specific qubit(s) and for shuttle-based Z rotations, as dis-
776
+ cussed in Sec. III. As a consequence, the ”idle configura-
777
+ tion” should be maintained when routing for these gates as
778
+ well. But once again, routing for single-qubit gates before
779
+ the scheduling stage can be problematic as it can cause con-
780
+ flicts. For that reason, the second consideration of the Inte-
781
+ grated Strategy is the integration of single-qubit gate routing
782
+ within the scheduling stage, hence the name ”integrated”.
783
+ Then the Integrated Strategy continues with two passes. In
784
+ the first pass, the scheduler tries to parallelize X or Y gates
785
+ in an ideal manner and Z gates individually, ignoring any po-
786
+ tential conflicts. This is no different than other single-qubit
787
+ gate scheduling processes proposed for other qubit architec-
788
+ tures. However, it differs on the second pass which integrates
789
+ the routing procedures for X, Y and Z gates. The second pass
790
+ iterates over each cycle produced by the first pass. For each
791
+ cycle, there are two possibilities: (a) if no conflicts are de-
792
+ tected when scheduling the necessary shuttle instructions re-
793
+ quired for each single-qubit gate, the new shuttle instructions
794
+ are inserted between the current cycle and the next. (b) if con-
795
+ flict(s) are detected, the subset of the problematic gate(s) is
796
+ removed and stored. Once the non-problematic gate subset
797
+ is scheduled according to case (a), the problematic subset is
798
+ recalled and iterated again. This time it constitutes a conflict-
799
+ free cycle and is scheduled according to case (a). This way
800
+ the second pass loops in total two times whenever there is a
801
+ detected conflict.
802
+ Overall, the current implementation does not parallelize
803
+ gates of different types in the same cycle, and thus each cycle
804
+ is dedicated to one instruction type. Fortunately, the strategy
805
+ described above and suggested extensions in Sec. VIII can be
806
+ adapted to a real setup. As explained in Sec. IV, a fabricated
807
+ crossbar device will most likely have material imperfections,
808
+ thus requiring pulse calibration per site. As pointed out by
809
+ [28, 44], pulsing control lines prematurely to account for ma-
810
+ terial variations could cause an unwanted interaction. Since,
811
+ however, the Integrated Strategy (or an extension thereof) ex-
812
+ clusively schedules gates of the same type in each cycle, fine-
813
+ tuning pulses within the cycle is possible before moving to the
814
+ next.
815
+ B.
816
+ Performance metrics
817
+ We will now introduce the metrics used in this work to eval-
818
+ uate the performance of SpinQ when mapping different algo-
819
+ rithms on the crossbar architecture.
820
+ Gate overhead. One commonly used metric to evaluate
821
+ the performance of a mapper and its underlying architecture
822
+ is gate overhead. We calculate it as the percentage relation of
823
+ additional gates inserted by the mapper to the number of gates
824
+ after decomposition. We do not count decomposition gate
825
+ overhead as it is always proportional to the number of gates.
826
+ Getting a clear view of the various sources of gate overhead
827
+ will help to form useful insights. Note that, unlike supercon-
828
+ ducting architectures where gate overhead results from rout-
829
+ ing instructions (i.e. SWAP gates) for performing two-qubit
830
+ gates, in the crossbar, it can be caused by single-qubit gates as
831
+ well. The main sources of gate overhead are the following:
832
+ • 4 additional shuttle instructions per shuttle-based
833
+ SWAP for two-qubit gates
834
+ • At least 3 additional instructions for each X or Y rota-
835
+ tion gate within the semi-global rotation scheme
836
+ • 2 additional shuttle instructions for each two-qubit gate
837
+ • 1 additional shuttle operation for each Z rotation gate
838
+ Depth overhead. Another commonly used metric to eval-
839
+ uate the performance of a mapper and its underlying architec-
840
+ ture is the depth overhead of a circuit. The depth of a circuit is
841
+
842
+ 8
843
+ CL0
844
+ CL1
845
+ CL2
846
+ RL2
847
+ RL1
848
+ RL0
849
+ 1
850
+ 4
851
+ 2
852
+ 3
853
+ 5
854
+ 6
855
+ 8
856
+ 7
857
+ QL-3 < QL-2 > QL-1 > QL0
858
+ <
859
+ QL1
860
+ QL2
861
+ QL3
862
+ (a) Horizontal shuttling
863
+ CL0
864
+ CL1
865
+ CL2
866
+ RL2
867
+ RL1
868
+ RL0
869
+ 1
870
+ 4
871
+ 2
872
+ 3
873
+ 5
874
+ 6
875
+ 8
876
+ 7
877
+ QL-3
878
+ QL-2 QL-1 < QL0
879
+ >
880
+ QL1
881
+ <
882
+ QL2
883
+ >
884
+ QL3
885
+ (b) Vertical shuttling
886
+ FIG. 7: Shuttle-based SWAP for two-qubit gate routing: With this technique, two diagonally neighbouring qubits exchange
887
+ their position by consecutively performing two horizontal and two vertical shuttles.
888
+ equal to the minimum number of time steps of a circuit when
889
+ executing gates in parallel [9, 10, 45–47]. We calculate depth
890
+ overhead as the percentage relation of additional depth pro-
891
+ duced by the mapper to the circuit depth after decomposition.
892
+ Note that the initial circuit depth is calculated after scheduling
893
+ the circuit only by its gate dependencies, meaning without any
894
+ architectural constraints. The main sources of depth overhead
895
+ are:
896
+ • At least 3 additional cycles for each X or Y rotation
897
+ gate due to the semi-global rotation scheme
898
+ • 2 additional cycles per shuttle-based SWAP for two-
899
+ qubit gates
900
+ • 2 additional cycles for each two-qubit gate
901
+ • 1 additional cycle for each Z rotation gate
902
+ Estimated Success Probability. A key metric to assess the
903
+ performance not only of the compiler but in general of a quan-
904
+ tum computing system is the algorithm success rate. From an
905
+ experimental point of view, the algorithm success rate is cal-
906
+ culated by executing the algorithm several times on a given
907
+ (real) quantum processor and creating the distribution of suc-
908
+ cessful executions, based on the expected measurement. An
909
+ alternative way to calculate the success rate without the need
910
+ for a real quantum processor is by using an approximation nu-
911
+ merical method. One of the most commonly used methods to
912
+ do so is considering the estimated success probability (ESP)
913
+ of an algorithm [48]:
914
+ ESP =
915
+
916
+ i
917
+
918
+ j
919
+ gate fidelityi,j
920
+ (1)
921
+ where i represents the ith time step and j the jth gate in the
922
+ ith time step.
923
+ This method is far more efficient compared to using a
924
+ Hamiltonian model. However, the accuracy of the estima-
925
+ tion can be low due to its simplicity. To expand it, we have
926
+ considered a per-type and per-location variability of gate fi-
927
+ delities, based on a normal distribution. This implies that, for
928
+ instance, a two-qubit gate (
929
+
930
+ SWAP) will have lower fidelity
931
+ than a single-qubit gate and that the actual fidelity will depend
932
+ on the exact location in the topology. These expansions con-
933
+ stitute a more realistic, i.e., closer to a real device, estimation
934
+ of circuit success probability:
935
+ ESP =
936
+
937
+ i
938
+
939
+ j
940
+ gate fidelityx,y
941
+ i,j
942
+ (2)
943
+ where i represents the ith time step, j the jth gate in the ith
944
+ time step and and x, y are the physical qubit(s) coordinates.
945
+ Compilation time. In this work, we are not only inter-
946
+ ested in building mapping techniques themselves, but also in
947
+ their scalability potential. This necessitates that our proposed
948
+ SpinQ strategy should remain efficient for a variety of quan-
949
+ tum circuit parameters (e.g., number of qubits or percentage
950
+ of two-qubit gates). By measuring the compilation time for
951
+ mapping quantum circuits, we get a reference of the scalabil-
952
+ ity of our implementations.
953
+ C.
954
+ Verification
955
+ A verification tool is important to this work due to the
956
+ lack of a working device for real-system testing. The tool
957
+ is searching for mismatches between all shuttling sequences
958
+ and the qubits position history stored during compilation. It
959
+ also checks for conflicts, architectural constraint violations
960
+ and state vector mismatches between and in each stage of the
961
+ mapper. The latter uses the Qiskit Aer library [49].
962
+
963
+ 9
964
+ VI.
965
+ EXPERIMENTAL METHODOLOGY
966
+ Benchmarks. We have generated 3, 630 random uniform
967
+ algorithms [50] containing X, Y, Z and
968
+
969
+ SWAP gates (all
970
+ native to the crossbar architecture) to be used as benchmarks.
971
+ With this set, we can vary on demand the number of gates,
972
+ number of qubits, and percentage of two-qubit gates.
973
+ For
974
+ example, a random uniform benchmark with 50% of two-
975
+ qubit gates relative to single-qubit gates will have 33.33% of
976
+ X or Y gates, 33.33% of Z gates, and 33.33% of two-qubit
977
+ gates. Generating synthetic circuits provides a well-controlled
978
+ benchmark collection from which we can better understand
979
+ results and form insights. Moreover, we use real benchmarks
980
+ from the RevLib library in a [5 - 1400] gate range [51]. Quan-
981
+ tum circuits from this library are often used in related quantum
982
+ circuit compilation works [9, 11, 12] and it consists of quan-
983
+ tum algorithms with parameters ranging from 3 to 16 qubits,
984
+ 18.75% to 100% of two-qubit gates and 5 to 512, 064 gates.
985
+ Finally, we also consider quantum circuits from the Qlib li-
986
+ brary [52] which contains real quantum algorithms in increas-
987
+ ing size.
988
+ Benchmarks characterization. When it comes to perfor-
989
+ mance evaluation, it is important to not only consider proper-
990
+ ties of the crossbar architecture but also the characteristics of
991
+ quantum circuits. The simplest and most commonly [14] used
992
+ parameters of quantum circuits are number of qubits, number
993
+ of gates, and absolute or relative (i.e., percentage) number of
994
+ two-qubit gates. However, only these three characteristics can
995
+ be misleading for two reasons. Firstly, two benchmarks, for
996
+ instance, could have the same parameter values but heavily
997
+ differ in the circuit’s structure [14]. When one of them has
998
+ all pairs of qubits interact with each other will require more
999
+ routing than the other which might have the same number of
1000
+ interactions, but with only one pair of qubits interacting. The
1001
+ structure of a quantum circuit is derived from its qubit inter-
1002
+ action graph (QIG) which represents the number and distri-
1003
+ bution of interactions (i.e., two-qubit gates) between virtual
1004
+ qubits. Several internal circuit parameters can be extracted
1005
+ from the QIG that better distil its properties [14]. Having said
1006
+ that, we analyze QIGs visually only, as this is still an active
1007
+ field of research [14]. Despite that, we can nonetheless make
1008
+ concrete conclusions and form insights, making visual QIG
1009
+ assessments a viable tool to characterize algorithms. The sec-
1010
+ ond reason is that initial gates can be decomposed to natively
1011
+ supported instructions for the underlying architecture. This
1012
+ means that the number of gates and ratios (percentages) be-
1013
+ tween each gate type can differ from the initial set to the actual
1014
+ executable set, meaning that evaluations can become more ac-
1015
+ curate when accounting for the decomposed set.
1016
+ Experimental Setup. We run SpinQ on a laptop with an
1017
+ Intel(R) Core(TM) i7-3610QM CPU @ 3.20GHz and 16GB
1018
+ DDR3 memory. SpinQ is written in Python 3.9.6 version.
1019
+ VII.
1020
+ EVALUATION AND ANALYSIS
1021
+ In this Section, we present an in-depth performance anal-
1022
+ ysis of SpinQ when mapping a broad range of quantum al-
1023
+ gorithms on the crossbar architecture. We then form architec-
1024
+ tural and mapping insights for each performance metric. More
1025
+ specifically, gate overhead and corresponding insights are pre-
1026
+ sented in Sec. VII A and VII B, depth overhead in Sec. VII C
1027
+ and VII D, and ESP in Sec. VII E and VII F. Finally, we show
1028
+ results regarding compilation time of SpinQ in Sec. VII G to
1029
+ asses its scalability capability.
1030
+ A.
1031
+ Gate Overhead
1032
+ To start with, we analyse the gate overhead trend in a wide
1033
+ range of quantum algorithms. In Fig. 8 we have mapped ran-
1034
+ dom uniform circuits on the crossbar architecture. Focusing
1035
+ on Fig. 8a, which reaches up to 25 qubits, we observe that as
1036
+ we go from low to high number of qubits and from low to high
1037
+ percentage of two-qubit gates, the gate overhead increases
1038
+ (from blue to red color). More precisely, higher qubit counts
1039
+ imply larger crossbar topologies, thus potentially longer rout-
1040
+ ing distances, i.e., more shuttle-based SWAPs. Furthermore,
1041
+ higher percentages of two-qubit gates potentially lead to more
1042
+ routing of qubits. These observations verify that the main
1043
+ source of gate overhead is indeed the routing of qubits for
1044
+ two-qubit gates (see Sec. V A). We also notice that the num-
1045
+ ber of gates has a small but noticeable influence on the gate
1046
+ overhead. To further observe the trend when increasing the
1047
+ number of qubits, we changed the range of qubits from [3
1048
+ – 25] to [25 – 99] in Fig. 8b. We see once more that the
1049
+ gate overhead increases as we go from low to high number
1050
+ of qubits and percentage of two-qubit gates. As expected, the
1051
+ gate overhead, shown on the color bars, of the [25 – 99] qubit
1052
+ range is on average 102.49% higher than that of the [3 – 25]
1053
+ qubit range because of the increased routing distances.
1054
+ So far, the above random algorithms were generated to have
1055
+ control of different circuit parameters (i.e., number of qubits
1056
+ and gates and two-qubit gate percentage) in a way to broadly
1057
+ cover the parameter space and up to certain boundaries. How-
1058
+ ever, they might not be representative of real algorithms from
1059
+ a circuit structure point of view (e.g., how two-qubit gates
1060
+ are distributed among qubits or the degree of operation par-
1061
+ allelism). Therefore, we then mapped real algorithms from
1062
+ the RevLib and Qlib libraries resulting in the gate overhead
1063
+ shown in Fig. 9, Fig. 10, and Fig. 11. In Fig. 9 we can
1064
+ observe that benchmarks “cluster” together in similar colours,
1065
+ namely shades of blue, green, yellow and red. This implies
1066
+ that similar benchmarks, meaning with similar parameters and
1067
+ structure, have similar gate overhead. Note that whereas ran-
1068
+ dom uniform algorithms have all the same circuit structure
1069
+ because of the way they are generated, RevLib algorithms
1070
+ present different structural parameters not only compared to
1071
+ the randomly generated circuits but also between them. For
1072
+ this reason, correlations such as the higher the number of
1073
+ qubits and percentage of two-qubit gates gets, the higher the
1074
+ gate overhead will be, are not as evident as before (i.e. for
1075
+ random circuits).
1076
+ To further analyse how structural circuit parameters impact
1077
+ the gate overhead, we mapped algorithms with similar number
1078
+ of gates, qubits, percentage of two-qubit gates and QIG from
1079
+
1080
+ 10
1081
+ Gates (before decomp.)
1082
+ 0 25005000750010000
1083
+ 12500
1084
+ 15000
1085
+ 17500
1086
+ 20000
1087
+ Qubits
1088
+ 5
1089
+ 10
1090
+ 15
1091
+ 20
1092
+ 25
1093
+ 2-Q Gate Percentage (before decomp.)
1094
+ 0
1095
+ 20
1096
+ 40
1097
+ 60
1098
+ 80
1099
+ 100
1100
+ MAX=1114.28, AVG=473.69, MED=423.23, MIN=124.53
1101
+ Gate Overhead [%]
1102
+ 200
1103
+ 400
1104
+ 600
1105
+ 800
1106
+ 1000
1107
+ (a)
1108
+ Gates (before decomp.)
1109
+ 0 25005000750010000
1110
+ 12500
1111
+ 15000
1112
+ 17500
1113
+ 20000
1114
+ Qubits
1115
+ 30
1116
+ 40
1117
+ 50
1118
+ 60
1119
+ 70
1120
+ 80
1121
+ 90
1122
+ 100
1123
+ 2-Q Gate Percentage (before decomp.)
1124
+ 0
1125
+ 20
1126
+ 40
1127
+ 60
1128
+ 80
1129
+ 100
1130
+ MAX=2416.29, AVG=959.18, MED=871.93, MIN=65.03
1131
+ Gate Overhead [%]
1132
+ 500
1133
+ 1000
1134
+ 1500
1135
+ 2000
1136
+ (b)
1137
+ FIG. 8: Resulting gate overhead when 3, 630 random uniform quantum algorithms are mapped onto the crossbar architecture.
1138
+ The three axes correspond to benchmark characteristics, namely, the number of gates [50 - 20,000], number of qubits [3 - 99]
1139
+ (split into two subfigures), and two-qubit gate percentage [0 – 100].
1140
+ the Qlib library onto the crossbar architecture (see Fig. 10).
1141
+ With these simulations, we also want to perform a scalability
1142
+ analysis of the algorithms which is not possible with RevLib
1143
+ circuits. First, note that the Cuccaro Adder (top line in Fig.
1144
+ 10) has a small drop in the percentage of two-qubit gates that
1145
+ goes from 71.43% to 66.75% when increasing in size (num-
1146
+ ber of qubits) whereas the Vbe Adder (bottom line) main-
1147
+ tains a lower percentage of 50% for the same increase in size.
1148
+ One can immediately observe that the Cuccaro Adder shows a
1149
+ higher gate overhead up to 284% due to the higher two-qubit
1150
+ gate percentage compared to the 271% of Vbe Adder, match-
1151
+ ing the conclusions made in Fig. 8. However, as we empha-
1152
+ sized above, in the case of real algorithms comparisons can
1153
+ only be properly made when looking not only at their circuit
1154
+ parameters but also at their more structural ones such as the
1155
+ QIG.
1156
+ For this reason, in Fig. 11 we show the derived QIGs from
1157
+ Vbe Adders’ 40-qubit circuit, Cuccaro Adders’ 38-qubit cir-
1158
+ cuit and Cuccaro Multipliers’ 21-qubit circuit alongside their
1159
+ gate overhead in relation to the number of qubits and percent-
1160
+ age of two-qubit gates. In these QIGs, nodes correspond to
1161
+ qubits and edges to qubit interactions, i.e., two-qubit gates.
1162
+ The particular size selection of these QIGs was made to easily
1163
+ show their structure. We immediately observe similarities in
1164
+ the QIGs of the two Adders as the distribution of interactions
1165
+ is almost identical. More specifically, we see 2 to 3 inter-
1166
+ actions per qubit on average, with others close to their logical
1167
+ qubit number. Therefore, we can conclude that the higher gate
1168
+ overhead of Cuccaro Adder is due to the higher percentage of
1169
+ two-qubit gates, compared to Vbe Adder.
1170
+ However, note that the Cuccaro Multiplier has the highest
1171
+ gate overhead of all three (309%) despite having a lower two-
1172
+ qubit gate percentage than the Cuccaro Adder. The reason be-
1173
+ hind this is the difference in its QIG, which is much more con-
1174
+ nected implying a denser qubit interaction distribution com-
1175
+ pared to the others. Because of this, more routing is needed to
1176
+ connect (nearly) all qubits across the entire topology.
1177
+ B.
1178
+ Insights from gate overhead analysis
1179
+ Accounting for the routing constraints, as discussed in Sec.
1180
+ IV, mapping on the crossbar architecture is not a trivial task.
1181
+ In fact, we have emphasized the importance of conceptu-
1182
+ alizing and developing new routing techniques that specif-
1183
+ ically can address the unique mapping challenges of spin-
1184
+ qubit architectures. More specifically, with the adoption of
1185
+ the checkerboard pattern combined with the shuttle-based
1186
+ SWAPs, we can provide a scalable solution of qubit routing
1187
+ for two-qubit gates. Additionally, the complexity only scales
1188
+ with the number of two-qubit gates, therefore being a viable
1189
+ solution for large-scale implementation. However, this tech-
1190
+ nique makes two-qubit gate routing the highest source of gate
1191
+ overhead and it can dramatically increase it with higher qubit
1192
+ counts and a higher percentage of two-qubit gates (see Fig.
1193
+ 8 and 10). Moreover, in Fig. 11 we saw that gate overhead
1194
+ can also be increased by a more connected QIG even if other
1195
+ circuit parameter values are comparatively lower. This shows
1196
+
1197
+ 11
1198
+ Gates (before decomp.)
1199
+ 0
1200
+ 200
1201
+ 400
1202
+ 600
1203
+ 800
1204
+ 1000
1205
+ 1200
1206
+ 1400
1207
+ Qubits
1208
+ 4
1209
+ 6
1210
+ 8
1211
+ 10
1212
+ 12
1213
+ 14
1214
+ 16
1215
+ 2-Q Gate Percentage (before decomp.)
1216
+ 20
1217
+ 30
1218
+ 40
1219
+ 50
1220
+ 60
1221
+ 70
1222
+ 80
1223
+ 90
1224
+ 100
1225
+ MAX=306.6, AVG=210.59, MED=205.72, MIN=167.0
1226
+ Gate Overhead of Integrated Strategy [%]
1227
+ 180
1228
+ 200
1229
+ 220
1230
+ 240
1231
+ 260
1232
+ 280
1233
+ 300
1234
+ FIG. 9: Resulting gate overhead when mapping quantum
1235
+ algorithms from the RevLib library onto the crossbar
1236
+ architecture. The three axes correspond to benchmark
1237
+ characteristics, namely, number of gates [5 - 1400], number
1238
+ of qubits [3 - 16] and two-qubit gate percentage [18.75 -
1239
+ 100].
1240
+ the importance of basing circuit performance evaluation not
1241
+ only on simple circuit parameters but also on other ‘hidden’
1242
+ structural characteristics such as the qubit interaction distribu-
1243
+ tion.Having said that, the second biggest source of gate over-
1244
+ head originates from X or Y qubit rotations, as it produces at
1245
+ least 3 additional gates compared to 4 additional gates for each
1246
+ shuttle-based SWAP. This is due to the unprecedented semi-
1247
+ global rotation scheme which is the first time that single-qubit
1248
+ gates require additional instructions (i.e., produce gate over-
1249
+ head) compared to other qubit architectures. The previous
1250
+ two facts inspire novel mapping techniques for the crossbar
1251
+ architecture (and potentially for other spin-qubit architectures
1252
+ with similar characteristics) that can increase performance,
1253
+ namely:
1254
+ 1. Developing a routing solution dedicated to accounting
1255
+ for potential conflicts and constraints can reduce the
1256
+ gate overhead resulting from the shuttle-based SWAPs.
1257
+ Such a generalized routing algorithm could also include
1258
+ SWAP interactions (two consecutive
1259
+
1260
+ SWAPs) and
1261
+ CPHASE interactions. For instance, there can be sce-
1262
+ narios that choosing a more noisy two-qubit interaction,
1263
+ for the purpose of avoiding an upcoming conflict, that
1264
+ could result in higher ESP. Additionally, such a heuris-
1265
+ tic algorithm can allow multiple control or target qubits
1266
+ ([10]) to be shuttled around the topology allowing for
1267
+ parallelization of many two-qubit gates while avoiding
1268
+ high error variability in the topology [18]. However,
1269
+ Gates (before decomp.)
1270
+ 0
1271
+ 50 100 150 200 250 300 350 400
1272
+ Qubits
1273
+ 0
1274
+ 20
1275
+ 40
1276
+ 60
1277
+ 80
1278
+ 100
1279
+ 120
1280
+ 2-Q Gate Percentage (before decomp.)
1281
+ 50
1282
+ 55
1283
+ 60
1284
+ 65
1285
+ 70
1286
+
1287
+
1288
+
1289
+ Gate Overhead [%]
1290
+ 200
1291
+ 220
1292
+ 240
1293
+ 260
1294
+ 280
1295
+ FIG. 10: Resulting gate overhead when mapping the Cuccaro
1296
+ Adder (top line of data points) and the Vbe Adder (bottom)
1297
+ quantum algorithms from the Qlib library onto the crossbar
1298
+ architecture. The three axes correspond to benchmark
1299
+ characteristics, namely, number of gates [4 - 385], number of
1300
+ qubits [4 - 130] and two-qubit gate percentage [50 - 71.43].
1301
+ such a solution must be implemented with complexity
1302
+ in mind such that it will not make it unviable on large
1303
+ scale.
1304
+ 2. A more efficient routing algorithm for single-qubit
1305
+ gates can significantly reduce the gate overhead, such
1306
+ that a specific rotation scheme to rotate targeted qubits
1307
+ is used less often. Such an algorithm can route qubits to
1308
+ the appropriate odd or even columns before the execu-
1309
+ tion of single-qubit gates without the need to apply any
1310
+ scheme afterwards (see the example in Sec. IV).
1311
+ 3. Combining the previous two points, there can be a uni-
1312
+ fied algorithm implementing both.
1313
+ In such an algo-
1314
+ rithm, upcoming routing for single-qubit gates is ac-
1315
+ counted for when routing for two-qubit gates, and vice
1316
+ versa.
1317
+ 4. Finally, an initial placement algorithm can take into ac-
1318
+ count not only two-qubit gates but single-qubit gates as
1319
+ well. Since the positions of qubits influence the gate
1320
+ overhead resulting from single-qubit gates (due to the
1321
+ semi-global rotation scheme), an extension of an initial
1322
+ placement algorithm accounting for single-qubit gates
1323
+ can reduce the gate overhead.
1324
+ Last but not least, we have emphasized that to concretely
1325
+ evaluate results, there has to be sufficient characterization of
1326
+
1327
+ 12
1328
+ Cuccaro Multiplier
1329
+ Vbe Adder
1330
+ Cuccaro Adder
1331
+ FIG. 11: Resulitng gate overhead when the Vbe Adder, Cuccaro Adder and Cuccaro Multiplier from the Qlib library are
1332
+ mapped onto the crossbar architecture alongside their Quantum Interaction Graphs (QIG) consisting of 40, 38 and 21 qubits,
1333
+ respectively. The y-axis represents the two-qubit gate percentage and the x-axis the number of qubits. We see gate overhead to
1334
+ be influenced not only by the number of qubits and two-qubit gate percentage but also by the qubit interaction distribution.
1335
+ benchmarks, especially when evaluating novel architectures
1336
+ and mapping techniques. In our analysis, we did not rely only
1337
+ on simple benchmark parameters, such as the percentage of
1338
+ two-qubit gates, but also on the internal structure of bench-
1339
+ marks using the Quantum Interaction Graph (QIG).
1340
+ C.
1341
+ Depth Overhead
1342
+ This time, we analyse the depth overhead when mapping
1343
+ onto the crossbar the same random uniform benchmark set as
1344
+ in Fig. 8. In Fig. 12, it can be observed that the trend (colours)
1345
+ of the depth overhead changes for different ranges of number
1346
+ of qubits as shown in the two subfigures. Knowing that the
1347
+ main source of depth overhead originates from X or Y gates
1348
+ (at least 3 additional cycles), we expect the depth overhead to
1349
+ become higher in lower regions of two-qubit gate percentage.
1350
+ That is observed in Fig. 12a, where the number of qubits goes
1351
+ up to 25. However, moving on to Fig. 12b, we see that this
1352
+ trend changes. Now, due to the higher number of qubits, rout-
1353
+ ing distances have increased, thus routing for two-qubit gates
1354
+ dominates the depth overhead. This is apparent by its increase
1355
+ (from blue to red colour) as we go from lower qubit counts to
1356
+ higher qubit counts, and as we go from low to higher percent-
1357
+ age of two-qubit gates. Finally, this fact is also apparent in
1358
+ the absolute values of depth overhead of the two subfigures.
1359
+ Note also that the number of gates has a slight influence on
1360
+ the depth overhead, but it is not as relevant as the other char-
1361
+ acteristics discussed above.
1362
+ Moving on, Fig. 13 shows the depth overhead of a Cuc-
1363
+ caro Adder when scaling it up from 4 to 130 qubits. In the
1364
+ range of 4 to 20 qubits, we observe an increase in depth over-
1365
+ head as the percentage of two-qubit gates decreases, which
1366
+ aligns with the remarks about the main source of depth over-
1367
+ head (i.e., the X or Y gates). Then, for an increasing number
1368
+ of qubits (from 20 qubits on) and at an almost constant two-
1369
+ qubit gate percentage (67%), the depth overhead increases at
1370
+ a slower rate. Here we conclude, once again, that two-qubit
1371
+ gate routing starts to dominate the depth overhead as routing
1372
+ distances become larger.
1373
+ In most previous works, the amount of two-qubit gates is
1374
+ the main circuit characteristic to anticipate how much qubit
1375
+ routing will be needed for a specific quantum algorithm and
1376
+ therefore the major and only source of gate/depth overhead.
1377
+ However, in the crossbar architecture, and potentially in other
1378
+ spin-qubit crossbar designs, single-qubit gates can also con-
1379
+
1380
+ 1334
1381
+ 33
1382
+ 36
1383
+ 33.31
1384
+ 28
1385
+ 22
1386
+ 1913
1387
+ Gates (before decomp.)
1388
+ 0 25005000750010000
1389
+ 12500
1390
+ 15000
1391
+ 17500
1392
+ 20000
1393
+ Qubits
1394
+ 5
1395
+ 10
1396
+ 15
1397
+ 20
1398
+ 25
1399
+ 2-Q Gate Percentage (before decomp.)
1400
+ 0
1401
+ 20
1402
+ 40
1403
+ 60
1404
+ 80
1405
+ 100
1406
+ MAX=6290.98, AVG=2217.92, MED=2132.74, MIN=262.0
1407
+ Depth Overhead [%]
1408
+ 1000
1409
+ 2000
1410
+ 3000
1411
+ 4000
1412
+ 5000
1413
+ 6000
1414
+ (a)
1415
+ Gates (before decomp.)
1416
+ 0 25005000750010000
1417
+ 12500
1418
+ 15000
1419
+ 17500
1420
+ 20000
1421
+ Qubits
1422
+ 30
1423
+ 40
1424
+ 50
1425
+ 60
1426
+ 70
1427
+ 80
1428
+ 90
1429
+ 100
1430
+ 2-Q Gate Percentage (before decomp.)
1431
+ 0
1432
+ 20
1433
+ 40
1434
+ 60
1435
+ 80
1436
+ 100
1437
+ MAX=27786.21, AVG=12530.1, MED=11934.55, MIN=2250.0
1438
+ Depth Overhead [%]
1439
+ 5000
1440
+ 10000
1441
+ 15000
1442
+ 20000
1443
+ 25000
1444
+ (b)
1445
+ FIG. 12: Resulting depth overhead when 3, 630 random uniform quantum algorithms are mapped onto the crossbar
1446
+ architecture. The three axes correspond to benchmark characteristics, namely, number of gates [50 - 20,000], number of qubits
1447
+ [3 - 99] (split into two subfigures), and two-qubit gate percentage [0% – 100%].
1448
+ tribute to this overhead as discussed. It is then important to
1449
+ have a closer look at the X or Y rotation gate percentage and
1450
+ further analyse how it impacts the depth overhead. Addition-
1451
+ ally, after the gate decomposition step, the percentages and
1452
+ ratios between all gate types are changed. To illustrate this,
1453
+ imagine a quantum circuit that originally consists of a low
1454
+ number of CNOT gates and no Z gates. After the decompo-
1455
+ sition to gates supported by the crossbar architecture, the per-
1456
+ centage of Z rotation gates will increase, and consequently,
1457
+ the two-qubit gate percentage will decrease, as CNOT gates
1458
+ are decomposed as Ry( π
1459
+ 2 ), two
1460
+
1461
+ SWAP, S, S†, Ry( −π
1462
+ 2 ).
1463
+ Thus, it is relevant to consider this change in gate percentage
1464
+ in our analysis as ultimately the executable circuit will only
1465
+ consist of native gates. To summarize, as overhead comes
1466
+ from mapping different types of gates on the crossbar, indi-
1467
+ vidually distinguishing between them, in particular after de-
1468
+ composition, can increase the accuracy of our evaluations.
1469
+ To illustrate the previous point, in Fig. 14 we show the
1470
+ depth overhead of the Cuccaro Adder (upper dots) and the
1471
+ Vbe Adder (lower dots) with the same ranges as in Fig. 10.
1472
+ Note that the y-axis corresponds to the percentage of X or Y
1473
+ rotation gates after decomposition. From this new perspective,
1474
+ we clearly see their difference in actual (i.e., executed by the
1475
+ architecture) X or Y rotation gate percentage. On average
1476
+ the depth overhead of the Vbe adder is 196% higher than the
1477
+ Cuccaro Adder for the same range of qubits. As explained
1478
+ before, the highest source of depth overhead comes from X
1479
+ or Y rotations gates, which explains the large depth overhead
1480
+ difference between those two algorithms.
1481
+ D.
1482
+ Insights from depth overhead analysis
1483
+ From the previous analysis, we can observe that trends can
1484
+ change based on the parameter ranges of benchmarks. This
1485
+ is because different sources of depth overhead contribute with
1486
+ different rates based on the number of qubits (i.e., crossbar
1487
+ size). More specifically, the overhead contribution resulting
1488
+ from mapping X/Y gates was higher up to a certain number
1489
+ of qubits after which was exceeded by the contribution rate of
1490
+ two-qubit gates. We saw that exceeding a threshold of more
1491
+ than 20 qubits increases the depth overhead at a steadier pace,
1492
+ which specifically favoured scalability for Cuccaro Adder in
1493
+ Fig. 13 and 14. It is expected, however, that with different
1494
+ algorithms, there will be different trends. With such observa-
1495
+ tions, we stress the importance of distinguishing between all
1496
+ gate types and especially after decomposition to better under-
1497
+ stand the performance impact of mapping. With that knowl-
1498
+ edge, we can create better mapping techniques and/or make
1499
+ an informed selection of algorithms to execute.
1500
+ As stated before, the fact that gate overhead can result from
1501
+ mapping single-qubit gates is unprecedented. Furthermore,
1502
+ we notice that mapping both, single- and two-qubit gates, re-
1503
+ quires additional shuttles and they produce the highest gate
1504
+ and depth overhead. Therefore, novel mapping techniques
1505
+ minimizing all qubit movements (shuttles) can increase per-
1506
+ formance substantially, such as the ones discussed in Sec.
1507
+ VII B. From an architectural point of view, since the shuttle
1508
+ operation is so relevant, there have to be as few operational
1509
+
1510
+ 14
1511
+ Gates (before decomp.)
1512
+ 0
1513
+ 50 100 150 200 250 300 350 400
1514
+ Qubits
1515
+ 0
1516
+ 20
1517
+ 40
1518
+ 60
1519
+ 80
1520
+ 100
1521
+ 120
1522
+ 2-Q Gate Percentage (before decomp.)
1523
+ 67
1524
+ 68
1525
+ 69
1526
+ 70
1527
+ 71
1528
+ MAX=586.97, AVG=563.11, MED=570.28, MIN=450.0
1529
+
1530
+
1531
+ Depth Overhead [%]
1532
+ 460
1533
+ 480
1534
+ 500
1535
+ 520
1536
+ 540
1537
+ 560
1538
+ 580
1539
+ FIG. 13: Resulting depth overhead when Cuccaro Adder
1540
+ from the Qlib library is mapped onto the crossbar
1541
+ architecture. The three axes correspond to benchmark
1542
+ characteristics, namely, number of gates [4 - 385], number of
1543
+ qubits [4 - 130] and two-qubit gate percentage [66.75 -
1544
+ 71.43].
1545
+ 0
1546
+ 20
1547
+ 40
1548
+ 60
1549
+ 80
1550
+ 100
1551
+ 120
1552
+ Qubits
1553
+ 45.25
1554
+ 45.50
1555
+ 45.75
1556
+ 46.00
1557
+ 46.25
1558
+ 46.50
1559
+ 46.75
1560
+ X/Y Gate Percentage (after decomp.)
1561
+ Depth Overhead [%]
1562
+ 450
1563
+ 500
1564
+ 550
1565
+ 600
1566
+ 650
1567
+ 700
1568
+ 750
1569
+ FIG. 14: Resulting depth overhead when Cuccaro Adder
1570
+ (bottom line of data points) and Vbe Adder (top) from the
1571
+ Qlib library are mapped onto the crossbar architecture. The
1572
+ y-axis represents the X or Y gate percentage, and the x-axis
1573
+ the number of qubits.
1574
+ constraints as possible when mapping them.
1575
+ E.
1576
+ Estimated Success Probability
1577
+ In this section, we will show how the success probability of
1578
+ an algorithm drops after mapping it to the crossbar architec-
1579
+ ture. Before we continue, we have to mention that even with
1580
+ operational fidelities as high as 99.99% for single-qubit gates
1581
+ and shuttles (as suggested in [1]) and 99.98% for
1582
+
1583
+ SWAPs,
1584
+ the ESP drops drastically to 0 in most algorithms with a high
1585
+ number of gates.
1586
+ For that reason, we just focused on the
1587
+ Bernstein-Vazirani algorithm as it has got a low percentage of
1588
+ two-qubit gates (usually there are only one or two CNOTs),
1589
+ therefore the error is mostly introduced by single-qubit gates.
1590
+ 0
1591
+ 100
1592
+ 200
1593
+ 300
1594
+ 400
1595
+ 500
1596
+ 0
1597
+ 20
1598
+ 40
1599
+ 60
1600
+ 80
1601
+ 100
1602
+ Estimated Success Probability (ESP)
1603
+ 0
1604
+ 50
1605
+ 100
1606
+ 150
1607
+ 200
1608
+ 250
1609
+ ESP
1610
+ Original ESP
1611
+ Gates (before mapping)
1612
+ Gates (after mapping)
1613
+ FIG. 15: Estimated success probability (ESP) before and
1614
+ after compilation of Bernstein-Vazirani algorithm from 2 to
1615
+ 129 qubits.
1616
+ Fig. 15 shows the ESP of the Bernstein-Vazirani algorithm
1617
+ when scaling it from 2 to 129 qubits. The red line “Origi-
1618
+ nal ESP” refers to the ESP before mapping, and the blue line
1619
+ ”ESP” refers to ESP after mapping. We observe a sharp ESP
1620
+ decrease approaching 10% for 267 gates after mapping with
1621
+ a slope rate of −0.6 which is caused by the increased num-
1622
+ ber of gates. For 529 gates after mapping we obtained a 0%
1623
+ ESP. Another reason for the ESP decrease is the semi-global
1624
+ single-qubit rotation; for each of the X or Y gates contained
1625
+ in the circuit (after decomposition), all qubits in odd or even
1626
+ columns are rotated (even the ones that are not targeted for
1627
+ rotation). This is further explained in Sec. IV 2.
1628
+
1629
+ 15
1630
+ F.
1631
+ Insights from Estimated Success Probability analysis
1632
+ Our estimated success probability equation 2, although sim-
1633
+ ple, is approximating a worse-case-scenario algorithm success
1634
+ rate.
1635
+ We observed a rapid decline in ESP in a minimally
1636
+ connected algorithm (mostly X or Y rotation gates), even
1637
+ though our equation did not include decoherence-induced er-
1638
+ rors [28, 44]. The main reason for this decrease is the result-
1639
+ ing overhead when implementing single-qubit gates on spe-
1640
+ cific qubits given the semi-global rotation scheme. Note that
1641
+ in this case, all qubits in either column parities are rotated thus
1642
+ each contributing to this ESP drop. Therefore, it is essential
1643
+ to determine which algorithms could take advantage of the
1644
+ semi-global control and/or develop architecture-specific map-
1645
+ ping techniques to minimize the need for a scheme.
1646
+ On real NISQ quantum devices there are other sources of
1647
+ noise noise that impact algorithm execution. Fortunately, it
1648
+ is expected that processors will gradually become more ro-
1649
+ bust with better fabrication tolerances and improved error-
1650
+ mitigation and mapping techniques will be developed and ul-
1651
+ timately quantum error correction protocols will be used. It
1652
+ remains challenging, however, to accurately simulate errors in
1653
+ large-scale devices to derive algorithm’s success probability.
1654
+ G.
1655
+ Compilation time
1656
+ 0
1657
+ 2500
1658
+ 5000
1659
+ 7500
1660
+ 10000
1661
+ 12500
1662
+ 15000
1663
+ 17500
1664
+ 20000
1665
+ Gates
1666
+ 0
1667
+ 2
1668
+ 4
1669
+ 6
1670
+ 8
1671
+ Seconds
1672
+ Compilation Time [s]
1673
+ qubits = 3
1674
+ qubits = 12
1675
+ qubits = 21
1676
+ qubits = 30
1677
+ qubits = 39
1678
+ qubits = 48
1679
+ qubits = 57
1680
+ qubits = 66
1681
+ qubits = 75
1682
+ qubits = 84
1683
+ qubits = 93
1684
+ FIG. 16: Compilation time when mapping random uniform
1685
+ algorithms with 50% of two-qubit gates onto the crossbar
1686
+ architecture. We observe a linear relation which makes
1687
+ SpinQ suitable for scalable spin-qubit crossbar architectures.
1688
+ Finally, we measure the compilation time of our solution
1689
+ to evaluate its scalability. The compilation time of SpinQ In-
1690
+ tegrated Strategy can be seen in Fig. 16 for a subset of the
1691
+ random uniform circuits that have been used in Fig. 8 and 12.
1692
+ This subset consists of circuits with only 50% of two-qubit
1693
+ gates. With this subset we map the same number of gates for
1694
+ each gate type, thus all internal SpinQ processes are weighted
1695
+ equally. We observe a linear increase in compilation time in
1696
+ relation to the number of gates for each qubit count. This im-
1697
+ plies that our strategy is suited for scalable spin-qubit crossbar
1698
+ architectures. Improvements can be directed towards reducing
1699
+ the slopes for each qubit count.
1700
+ VIII.
1701
+ DISCUSSION AND FUTURE DIRECTIONS
1702
+ TABLE I: Computational complexity comparison between
1703
+ compilation strategies for the crossbar architecture [1]. With
1704
+ n we denote the number of gates in a quantum circuit.
1705
+ Strategy
1706
+ Complexity
1707
+ Backtrack [27]
1708
+ O(n3)
1709
+ Suffer a side effect [27]
1710
+ O(n2log(n))
1711
+ Avoid the deadlock [27]
1712
+ O(n)
1713
+ Integrated (ours)
1714
+ O(n)
1715
+ Integrated strategy improvements. There can be a few
1716
+ extensions to the Integrated Strategy that can provide better
1717
+ performance (less overhead and higher ESP). These improve-
1718
+ ments can be divided into two categories: a) improvements
1719
+ that increase complexity marginally and b) improvements that
1720
+ will increase complexity substantially. It is important to make
1721
+ this differentiation because on large scale we have to consider
1722
+ the trade-off between complexity (computation time as sizes
1723
+ increase) and performance (less overhead and higher ESP).
1724
+ Improvements in category (a) will involve a constraint and
1725
+ conflict check for any shuttle-based type gate to enable com-
1726
+ plete parallelization of all single-qubit gates within the second
1727
+ pass. Note that, once again, each cycle remains dedicated to
1728
+ one gate type, therefore, fine-tuning pulse durations in real
1729
+ devices is still possible.
1730
+ Moving on to the next category (b), it consists of all heuris-
1731
+ tic mapping algorithms (routing and initial placement) dis-
1732
+ cussed in Sections VII B, VII D and VII F, which can be ex-
1733
+ tended to other scalable spin-qubit architectures. This will en-
1734
+ able complete parallelization of two-qubit gates and less rout-
1735
+ ing for both, one- and two-qubit gates.
1736
+ Strategy Comparisons. As we discussed in Sec. IV, the
1737
+ crossbar architecture comes with constraints that prevent full
1738
+ parallelization of quantum instructions. The crossbar, how-
1739
+ ever, may reach two types of conflicts (unwanted interactions
1740
+ or blocked paths), even if all constraints are respected. For
1741
+ that reason, there must be some kind of compilation strategy
1742
+ between the scheduler and the router to prevent conflicts. In
1743
+ this work, we have implemented the Integrated strategy which
1744
+ is different from the three strategies suggested in [27]. Ta-
1745
+ ble I compares the computational complexity of these three
1746
+ strategies with our own. The backtrack strategy suggested in
1747
+ [27] avoids conflicts by trying a different scheduling combi-
1748
+
1749
+ 16
1750
+ nation. If after repeating this process the scheduler has back-
1751
+ tracked to the first instruction of the cycle (no more schedul-
1752
+ ing combinations), a new routing path is given by the rout-
1753
+ ing algorithm and the scheduling is repeated. This strategy
1754
+ can be quite complex as the worst case scenario can un-route
1755
+ and un-schedule all the gates going back to a completely un-
1756
+ mapped circuit. An improved version of this strategy called
1757
+ suffer a side effect, is a special case of the former and it
1758
+ is only preferred whenever a corresponding conflict can be
1759
+ corrected and if the correction is less costly than only fol-
1760
+ lowing the ”backtracking” strategy. The final strategy, and
1761
+ the one implemented in [27], is called avoid the deadlock.
1762
+ This strategy, similar to our Integrated strategy, is trying to
1763
+ avoid conflicts by parallelizing only X or Y gates. In this
1764
+ way,
1765
+
1766
+ SWAPs and shuttle operations can not cause a con-
1767
+ flict. However, in this strategy there is no synergy between the
1768
+ routing and scheduling stages as our Integrated strategy has,
1769
+ therefore there is little flexibility for improvements and per-
1770
+ formance can not be easily improved while keeping the same
1771
+ complexity. Our strategy is able to maintain the same O(n)
1772
+ complexity even after improvements.
1773
+ General discussion.
1774
+ When developing novel mapping
1775
+ techniques for scalable quantum computing architectures such
1776
+ as the si-spin crossbar two main factors have to be considered:
1777
+ scalability and adaptability. As spin-qubit fabrication capa-
1778
+ bilities are improving, new architectural designs with maybe
1779
+ higher qubit counts will be explored. Therefore, from a com-
1780
+ putation/compilation time point of view, mapping techniques
1781
+ should be as scalable as the underlying technology. Practi-
1782
+ cally, this implies that highly sophisticated and more complex
1783
+ mapping techniques might be excellent for a particular archi-
1784
+ tecture and up to a certain number of qubits, but could be
1785
+ impractical for more qubits or even unusable for another ar-
1786
+ chitecture. In addition, as we are slowly exiting the NISQ
1787
+ era, quantum technologies will become more robust, espe-
1788
+ cially with the use of quantum error correction techniques. By
1789
+ that time, optimizing mapping techniques for specific hard-
1790
+ ware and/or algorithm might not be as relevant as today, but
1791
+ rather how fast and adaptable they are to a plethora of quan-
1792
+ tum algorithms and increased number of qubits.
1793
+ IX.
1794
+ CONCLUSION
1795
+ Different quantum circuit mapping techniques have been
1796
+ developed to deal with the limitations that current quantum
1797
+ hardware presents and are being consistently improved to ex-
1798
+ pand its computational capabilities by getting better and better
1799
+ algorithm success rates. The most advanced mapping meth-
1800
+ ods focus on ion-trap and superconducting devices due to
1801
+ their ‘maturity’ compared with other quantum technologies.
1802
+ However, spin-qubit-based processors have a great potential
1803
+ to rapidly scale and the first 2D crossbar architectures have
1804
+ been recently demonstrated. In this work, we focused on the
1805
+ quantum circuit mapping challenges of the newly emerging
1806
+ spin qubit technology for which highly-specialized mapping
1807
+ techniques are needed to take advantage of its operational
1808
+ abilities. Specifically, we used the crossbar architecture as
1809
+ a stepping stone to explore novel mapping solutions while
1810
+ focusing on scalability. The crossbar architecture adopts a
1811
+ shared-control scheme, thus making it a great candidate to
1812
+ tackle the interconnect bottleneck.
1813
+ On that note, we have
1814
+ developed SpinQ, the first native compilation framework for
1815
+ spin-qubit architecture which we used to analyze the perfor-
1816
+ mance of synthetic and real quantum algorithms on the cross-
1817
+ bar architecture. Through our analysis, we tried to inspire
1818
+ novel algorithm- and hardware-specific mapping techniques
1819
+ that can possibly increase the performance while taking into
1820
+ account the compilation scalability. We also emphasized the
1821
+ importance of characterizing benchmarks before and after de-
1822
+ composition and by including their QIG structure to better
1823
+ evaluate results.
1824
+ X.
1825
+ ACKNOWNLEDGEMENT
1826
+ This work is part of the research program OTP with project
1827
+ number 16278, which is (partly) financed by the Netherlands
1828
+ Organisation for Scientific Research (NWO). This work has
1829
+ also been partially supported by the Spanish Ministerio de
1830
+ Ciencia e Innovaci´on, European ERDF under grant PID2021-
1831
+ 123627OB-C51 (CGA). We thank Menno Veldhorst and Hans
1832
+ van Someren for their fruitful discussions.
1833
+
1834
+ 17
1835
+ [1] R. Li, L. Petit, D. P. Franke, J. P. Dehollain, J. Helsen,
1836
+ M. Steudtner, N. K. Thomas, Z. R. Yoscovits, K. J. Singh,
1837
+ S. Wehner, et al., A crossbar network for silicon quantum dot
1838
+ qubits, Science advances 4, eaar3960 (2018).
1839
+ [2] F. Arute, K. Arya, R. Babbush, D. Bacon, J. C. Bardin,
1840
+ R. Barends, R. Biswas, S. Boixo, F. G. Brandao, D. A. Buell,
1841
+ et al., Quantum supremacy using a programmable supercon-
1842
+ ducting processor, Nature 574, 505 (2019).
1843
+ [3] L. S. Madsen, F. Laudenbach, M. F. Askarani, F. Rortais,
1844
+ T. Vincent, J. F. Bulmer, F. M. Miatto, L. Neuhaus, L. G. Helt,
1845
+ M. J. Collins, et al., Quantum computational advantage with a
1846
+ programmable photonic processor, Nature 606, 75 (2022).
1847
+ [4] H.-Y. Huang, M. Broughton, J. Cotler, S. Chen, J. Li,
1848
+ M. Mohseni, H. Neven, R. Babbush, R. Kueng, J. Preskill,
1849
+ et al., Quantum advantage in learning from experiments, Sci-
1850
+ ence 376, 1182 (2022).
1851
+ [5] S. Bravyi, O. Dial, J. M. Gambetta, D. Gil, and Z. Nazario,
1852
+ The future of quantum computing with superconducting qubits,
1853
+ Journal of Applied Physics 132, 160902 (2022).
1854
+ [6] J. Preskill, Quantum computing in the nisq era and beyond,
1855
+ Quantum 2, 79 (2018).
1856
+ [7] T. D. Ladd, F. Jelezko, R. Laflamme, Y. Nakamura, C. Monroe,
1857
+ and J. L. O’Brien, Quantum computers, nature 464, 45 (2010).
1858
+ [8] C. G. Almudever, L. Lao, X. Fu, N. Khammassi, I. Ashraf,
1859
+ D. Iorga, S. Varsamopoulos, C. Eichler, A. Wallraff, L. Geck,
1860
+ et al., The engineering challenges in quantum computing, in
1861
+ Design, Automation & Test in Europe Conference & Exhibition
1862
+ (DATE), 2017 (IEEE, 2017) pp. 836–845.
1863
+ [9] A. Zulehner, A. Paler, and R. Wille, An efficient methodology
1864
+ for mapping quantum circuits to the ibm qx architectures, IEEE
1865
+ Transactions on Computer-Aided Design of Integrated Circuits
1866
+ and Systems 38, 1226 (2018).
1867
+ [10] L. Lao, H. van Someren, I. Ashraf, and C. G. Almudever, Tim-
1868
+ ing and resource-aware mapping of quantum circuits to super-
1869
+ conducting processors, IEEE Transactions on Computer-Aided
1870
+ Design of Integrated Circuits and Systems (2021).
1871
+ [11] P. Murali, J. M. Baker, A. Javadi-Abhari, F. T. Chong, and
1872
+ M. Martonosi, Noise-adaptive compiler mappings for noisy
1873
+ intermediate-scale quantum computers, in Proceedings of the
1874
+ Twenty-Fourth International Conference on Architectural Sup-
1875
+ port for Programming Languages and Operating Systems
1876
+ (2019) pp. 1015–1029.
1877
+ [12] L. Lao and D. E. Browne, 2qan: A quantum compiler for 2-
1878
+ local qubit hamiltonian simulation algorithms, in Proceedings
1879
+ of the 49th Annual International Symposium on Computer Ar-
1880
+ chitecture (2022) pp. 351–365.
1881
+ [13] S. Nishio, Y. Pan, T. Satoh, H. Amano, and R. V. Meter, Extract-
1882
+ ing success from ibm’s 20-qubit machines using error-aware
1883
+ compilation, ACM Journal on Emerging Technologies in Com-
1884
+ puting Systems (JETC) 16, 1 (2020).
1885
+ [14] M. Bandic, S. Feld, and C. G. Almudever, Full-stack quan-
1886
+ tum computing systems in the nisq era: algorithm-driven and
1887
+ hardware-aware compilation techniques, in 2022 Design, Au-
1888
+ tomation & Test in Europe Conference & Exhibition (DATE)
1889
+ (IEEE, 2022) pp. 1–6.
1890
+ [15] P. Murali, N. M. Linke, M. Martonosi, A. J. Abhari, N. H.
1891
+ Nguyen, and C. H. Alderete, Full-stack, real-system quantum
1892
+ computer studies: Architectural comparisons and design in-
1893
+ sights, in 2019 ACM/IEEE 46th Annual International Sympo-
1894
+ sium on Computer Architecture (ISCA) (IEEE, 2019) pp. 527–
1895
+ 540.
1896
+ [16] N. Quetschlich, L. Burgholzer, and R. Wille, Predicting
1897
+ good quantum circuit compilation options, arXiv preprint
1898
+ arXiv:2210.08027 (2022).
1899
+ [17] M. Steinberg, S. Feld, C. G. Almudever, M. Marthaler,
1900
+ and J.-M. Reiner, A noise-aware qubit mapping algorithm
1901
+ evaluated via qubit interaction-graph criteria, arXiv preprint
1902
+ arXiv:2103.15695 (2021).
1903
+ [18] S. S. Tannu and M. K. Qureshi, Not all qubits are created equal:
1904
+ a case for variability-aware policies for nisq-era quantum com-
1905
+ puters, in Proceedings of the Twenty-Fourth International Con-
1906
+ ference on Architectural Support for Programming Languages
1907
+ and Operating Systems (2019) pp. 987–999.
1908
+ [19] M. G. Pozzi, S. J. Herbert, A. Sengupta, and R. D. Mullins, Us-
1909
+ ing reinforcement learning to perform qubit routing in quantum
1910
+ compilers, arXiv preprint arXiv:2007.15957 (2020).
1911
+ [20] F. A. Zwanenburg, A. S. Dzurak, A. Morello, M. Y. Simmons,
1912
+ L. C. L. Hollenberg, G. Klimeck, S. Rogge, S. N. Coppersmith,
1913
+ and M. A. Eriksson, Silicon quantum electronics, Rev. Mod.
1914
+ Phys. 85, 961 (2013).
1915
+ [21] D. Loss and D. P. DiVincenzo, Quantum computation with
1916
+ quantum dots, Phys. Rev. A 57, 120 (1998).
1917
+ [22] L. Vandersypen, H. Bluhm, J. Clarke, A. Dzurak, R. Ishihara,
1918
+ A. Morello, D. Reilly, L. Schreiber, and M. Veldhorst, Interfac-
1919
+ ing spin qubits in quantum dots and donors—hot, dense, and
1920
+ coherent, npj Quantum Information 3, 1 (2017).
1921
+ [23] M. Veldhorst, C. Yang, J. Hwang, W. Huang, J. Dehollain,
1922
+ J. Muhonen, S. Simmons, A. Laucht, F. Hudson, K. M. Itoh,
1923
+ et al., A two-qubit logic gate in silicon, Nature 526, 410 (2015).
1924
+ [24] D. Zajac, T. Hazard, X. Mi, K. Wang, and J. R. Petta, A re-
1925
+ configurable gate architecture for si/sige quantum dots, Applied
1926
+ Physics Letters 106, 223507 (2015).
1927
+ [25] T. Watson, S. Philips, E. Kawakami, D. Ward, P. Scarlino,
1928
+ M. Veldhorst, D. Savage, M. Lagally, M. Friesen, S. Copper-
1929
+ smith, et al., A programmable two-qubit quantum processor in
1930
+ silicon, nature 555, 633 (2018).
1931
+ [26] F. Borsoi, N. W. Hendrickx, V. John, S. Motz, F. van Riggelen,
1932
+ A. Sammak, S. L. de Snoo, G. Scappucci, and M. Veldhorst,
1933
+ Shared control of a 16 semiconductor quantum dot crossbar ar-
1934
+ ray, arXiv preprint arXiv:2209.06609 (2022).
1935
+ [27] A. Morais Tejerina, Mapping quantum algorithms in a crossbar
1936
+ architecture (2019).
1937
+ [28] J. Helsen, M. Steudtner, M. Veldhorst, and S. Wehner, Quantum
1938
+ error correction in crossbar architectures, Quantum Science and
1939
+ Technology 3, 035005 (2018).
1940
+ [29] C. Gidney and M. Eker˚a, How to factor 2048 bit rsa integers in
1941
+ 8 hours using 20 million noisy qubits, Quantum 5, 433 (2021).
1942
+ [30] S.
1943
+ Resch
1944
+ and
1945
+ U.
1946
+ R.
1947
+ Karpuzcu,
1948
+ Quantum
1949
+ computing:
1950
+ an
1951
+ overview
1952
+ across
1953
+ the
1954
+ system
1955
+ stack,
1956
+ arXiv
1957
+ preprint
1958
+ arXiv:1905.07240 (2019).
1959
+ [31] A. Chatterjee, P. Stevenson, S. De Franceschi, A. Morello, N. P.
1960
+ de Leon, and F. Kuemmeth, Semiconductor qubits in practice,
1961
+ Nature Reviews Physics 3, 157 (2021).
1962
+ [32] D. P. Franke, J. S. Clarke, L. M. Vandersypen, and M. Veld-
1963
+ horst, Rent’s rule and extensibility in quantum computing, Mi-
1964
+ croprocessors and Microsystems 67, 1 (2019).
1965
+ [33] M. Meyer, C. D´eprez, T. R. van Abswoude, D. Liu, C.-A. Wang,
1966
+ S. Karwal, S. Oosterhout, F. Borsoi, A. Sammak, N. W. Hen-
1967
+ drickx, et al., Electrical control of uniformity in quantum dot
1968
+ devices, arXiv preprint arXiv:2211.13493 (2022).
1969
+ [34] J. M. Boter, J. P. Dehollain, J. P. Van Dijk, Y. Xu, T. Hensgens,
1970
+ R. Versluis, H. W. Naus, J. S. Clarke, M. Veldhorst, F. Sebas-
1971
+
1972
+ 18
1973
+ tiano, et al., Physical Review Applied 18, 024053 (2022).
1974
+ [35] C. D. Hill, E. Peretz, S. J. Hile, M. G. House, M. Fuechsle,
1975
+ S. Rogge, M. Y. Simmons, and L. C. Hollenberg, A surface code
1976
+ quantum computer in silicon, Science advances 1, e1500707
1977
+ (2015).
1978
+ [36] B. Paquelet Wuetz, P. Bavdaz, L. Yeoh, R. Schouten, H. Van
1979
+ Der Does, M. Tiggelman, D. Sabbagh, A. Sammak, C. G. Al-
1980
+ mudever, F. Sebastiano, et al., Multiplexed quantum transport
1981
+ using commercial off-the-shelf cmos at sub-kelvin tempera-
1982
+ tures, npj Quantum Information 6, 1 (2020).
1983
+ [37] S. Pauka, K. Das, R. Kalra, A. Moini, Y. Yang, M. Trainer,
1984
+ A. Bousquet, C. Cantaloube, N. Dick, G. Gardner, et al., A
1985
+ cryogenic interface for controlling many qubits, arXiv preprint
1986
+ arXiv:1912.01299 (2019).
1987
+ [38] M. Veldhorst, H. Eenink, C.-H. Yang, and A. S. Dzurak, Silicon
1988
+ cmos architecture for a spin-based quantum computer, Nature
1989
+ communications 8, 1 (2017).
1990
+ [39] N. W. Hendrickx, W. I. Lawrie, M. Russ, F. van Riggelen,
1991
+ S. L. de Snoo, R. N. Schouten, A. Sammak, G. Scappucci, and
1992
+ M. Veldhorst, A four-qubit germanium quantum processor, Na-
1993
+ ture 591, 580 (2021).
1994
+ [40] M. Veldhorst, J. Hwang, C. Yang, A. Leenstra, B. de Ronde,
1995
+ J. Dehollain, J. Muhonen, F. Hudson, K. M. Itoh, A. Morello,
1996
+ et al., An addressable quantum dot qubit with fault-tolerant
1997
+ control-fidelity, Nature nanotechnology 9, 981 (2014).
1998
+ [41] T. Fujita, T. A. Baart, C. Reichl, W. Wegscheider, and L. M. K.
1999
+ Vandersypen, Coherent shuttle of electron-spin states, npj
2000
+ Quantum Information 3, 1 (2017).
2001
+ [42] G. Li, Y. Ding, and Y. Xie, Tackling the qubit mapping problem
2002
+ for nisq-era quantum devices, in Proceedings of the Twenty-
2003
+ Fourth International Conference on Architectural Support for
2004
+ Programming Languages and Operating Systems (2019) pp.
2005
+ 1001–1014.
2006
+ [43] C. D. Bruzewicz, J. Chiaverini, R. McConnell, and J. M. Sage,
2007
+ Trapped-ion quantum computing: Progress and challenges, Ap-
2008
+ plied Physics Reviews 6, 021314 (2019).
2009
+ [44] Y. Kharkov, A. Ivanova, E. Mikhantiev, and A. Kotelnikov, Ar-
2010
+ line benchmarks: Automated benchmarking platform for quan-
2011
+ tum compilers, arXiv preprint arXiv:2202.14025 (2022).
2012
+ [45] A. Sinha, U. Azad, and H. Singh, Qubit routing using graph
2013
+ neural network aided monte carlo tree search, in Proceedings of
2014
+ the AAAI Conference on Artificial Intelligence, Vol. 36 (2022)
2015
+ pp. 9935–9943.
2016
+ [46] M. Bandic, H. Zarein, E. Alarcon, and C. G. Almudever, On
2017
+ structured design space exploration for mapping of quantum al-
2018
+ gorithms, in 2020 XXXV conference on design of circuits and
2019
+ integrated systems (DCIS) (IEEE, 2020) pp. 1–6.
2020
+ [47] S. Herbert and A. Sengupta, Using reinforcement learning to
2021
+ find efficient qubit routing policies for deployment in near-term
2022
+ quantum computers, arXiv preprint arXiv:1812.11619 (2018).
2023
+ [48] D. M. A. L. Valada, Predicting the fidelity of quantum circuits
2024
+ search for better metrics for the qubit mapping problem (2020).
2025
+ [49] IBM,
2026
+ Qiskit
2027
+ aer
2028
+ library,
2029
+ https://qiskit.org/
2030
+ documentation/apidoc/aer_library.html
2031
+ (2022).
2032
+ [50] S. Sivarajah, S. Dilkes, A. Cowtan, W. Simmons, A. Edging-
2033
+ ton, and R. Duncan, t— ket¿: A retargetable compiler for nisq
2034
+ devices, Quantum Science and Technology (2020).
2035
+ [51] R. Wille, D. Große, L. Teuber, G. W. Dueck, and R. Drech-
2036
+ sler, Revlib: An online resource for reversible functions and re-
2037
+ versible circuits, in 38th International Symposium on Multiple
2038
+ Valued Logic (ismvl 2008) (IEEE, 2008) pp. 220–225.
2039
+ [52] C.-C. Lin, A. Chakrabarti, and N. K. Jha, Qlib: Quantum mod-
2040
+ ule library, ACM Journal on Emerging Technologies in Com-
2041
+ puting Systems (JETC) 11, 1 (2014).
2042
+
2NFQT4oBgHgl3EQfFTWv/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
2tAyT4oBgHgl3EQfovik/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d1919dd2d47eb1aea95c922f1684f09e247bebb8204b2c0ec855ed938b39c0b
3
+ size 178800
2tE4T4oBgHgl3EQf0A0W/content/tmp_files/2301.05278v1.pdf.txt ADDED
@@ -0,0 +1,1972 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIXED VOLUMES OF NORMAL COMPLEXES
2
+ LAUREN NOWAK, PATRICK O’MELVENY, AND DUSTIN ROSS
3
+ Abstract. Normal complexes are orthogonal truncations of polyhedral fans. In this paper,
4
+ we develop the study of mixed volumes for normal complexes. Our main result is a sufficiency
5
+ condition that ensures when the mixed volumes of normal complexes associated to a given fan
6
+ satisfy the Alexandrov–Fenchel inequalities. By specializing to Bergman fans of matroids, we
7
+ give a new proof of the Heron–Rota–Welsh Conjecture as a consequence of the Alexandrov–
8
+ Fenchel inequalities for normal complexes.
9
+ 1. Introduction
10
+ The Alexandrov–Fenchel inequalities lie at the heart of convex geometry, asserting that,
11
+ for any convex bodies P♥, P♦, P3 . . . , Pd ∈ Rd, their mixed volumes satisfy
12
+ MVol(P♥, P♦, P3, . . . , Pd)2 ≥ MVol(P♥, P♥, P3, . . . , Pd) MVol(P♦, P♦, P3, . . . , Pd).
13
+ This paper is centered around developing an analogue of the Alexandrov–Fenchel inequalities
14
+ in a decidedly nonconvex setting. The geometric objects of interest to us are normal com-
15
+ plexes, which were recently introduced by A. Nathanson and the third author [NR21]. Given
16
+ a pure simplicial fan Σ, a normal complex associated to Σ is, roughly speaking, a polyhedral
17
+ complex obtained by truncating each cone of Σ with half-spaces perpendicular to the rays of
18
+ Σ. The choice of where to place the truncating half-spaces results in a family of normal com-
19
+ plexes associated to each fan Σ, and the question that motivates this work is: for a given fan
20
+ Σ, do the mixed volumes of the associated normal complexes satisfy the Alexandrov–Fenchel
21
+ inequalities? Our main result (Theorem 5.1) describes two readily verifiable conditions on
22
+ Σ that guarantee an affirmative answer to this question.
23
+ One of the motivations for studying mixed volumes of normal complexes is that, in the
24
+ special setting of tropical fans, they correspond to mixed degrees of divisors in associated
25
+ Chow rings. Thus, Alexandrov–Fenchel inequalities for normal complexes lead to nontrivial
26
+ numerical inequalities in these Chow rings. A class of tropical fans that have garnered a
27
+ great deal of attention in recent years are Bergman fans of matroids, and one application
28
+ of our main result (Theorem 6.2) is that normal complexes associated to Bergman fans of
29
+ matroids satisfy the Alexandrov–Fenchel inequalities. Translating these inequalities back to
30
+ matroid Chow rings, we obtain a volume-theoretic proof of the log-concavity of characteristic
31
+ polynomials of matroids, a result that was conjectured by Heron, Rota, and Welsh [Rot71,
32
+ Her72, Wel76] and first proved by Adiprasito, Huh, and Katz [AHK18].
33
+ 1
34
+ arXiv:2301.05278v1 [math.CO] 12 Jan 2023
35
+
36
+ 2
37
+ L. NOWAK, P. O’MELVENY, AND D. ROSS
38
+ 1.1. Overview of the paper. We begin in Section 2 by briefly recalling the construction
39
+ of normal complexes and their volumes. Normal complexes, denoted CΣ,∗(z), depend on
40
+ a marked simplicial d-fan Σ in a vector space NR with an inner product ∗ ∈ Inn(NR), as
41
+ well as a choice of pseudocubical truncating values z ∈ Cub(Σ, ∗) ⊆ RΣ(1). The volume of
42
+ CΣ,∗(z), denoted VolΣ,ω,∗(z), where ω is a weight function on the top-dimensional cones of
43
+ Σ, is defined as the weighted sum of the volumes of the maximal polytopes in CΣ,∗(z). We
44
+ recall the main result of [NR21], which asserts that, if (Σ, ω) is a tropical fan, then
45
+ (1.1)
46
+ VolΣ,ω,∗(z) = degΣ,ω(D(z)d)
47
+ where
48
+ D(z) =
49
+
50
+ ρ∈Σ(1)
51
+ zρXρ ∈ A1(Σ).
52
+ In Section 3, we introduce mixed volumes of normal complexes CΣ,∗(z1), . . . , CΣ,∗(zd),
53
+ denoted MVolΣ,ω,∗(z1, . . . , zd), which are weighted sums of mixed volumes of maximal poly-
54
+ topes. Analogous to mixed volumes in convex geometry, we show that mixed volumes of
55
+ normal complexes are characterized by being symmetric, multilinear, and normalized by vol-
56
+ ume (Proposition 3.1). Furthermore, we prove that mixed volumes are nonnegative on the
57
+ pseudocubical cone Cub(Σ, ∗) and positive on the cubical cone Cub(Σ, ∗) (Proposition 3.5).
58
+ For all tropical fans (Σ, ω), we leverage (1.1) to show (Theorem 3.6) that
59
+ (1.2)
60
+ MVolΣ,ω,∗(z1, . . . , zd) = degΣ,ω(D(z1) · · · D(zd)).
61
+ In Section 4, we develop the face structure of normal complexes, closely paralleling the
62
+ classical face structure of polytopes. In particular, the faces of a normal complex CΣ,∗(z)
63
+ are indexed by cones τ ∈ Σ, and each face is obtained as the intersection of CΣ,∗(z) with the
64
+ truncating hyperplanes indexed by the rays of τ. We describe how each face can, itself, be
65
+ viewed as a normal complex associated to the star fan Στ, and use this to define (mixed)
66
+ volumes of faces.
67
+ Our main result of this section (Proposition 4.13), shows how mixed
68
+ volumes of normal complexes can be computed in terms of mixed volumes of facets.
69
+ In Section 5, we introduce what it means for a triple (Σ, ω, ∗) to be AF—namely, that the
70
+ mixed volumes of cubical values satisfy the Alexandrov–Fenchel inequalities. Our main result
71
+ (Theorem 5.1), inspired by work of Cordero-Erausquin, Klartag, Merigot, and Santambrogio
72
+ [CEKMS19] and Br¨and´en and Leake [BL21], states that (Σ, ω, ∗) is AF if (i) all star fans Στ
73
+ of dimension at least three remain connected after removing the origin and (ii) the quadratic
74
+ volume polynomials associated to the two-dimensional star fans of Σ have exactly one positive
75
+ eigenvalue. In fact, under these conditions, we argue that the volume polynomial VolΣ,ω,∗(z)
76
+ is Cub(Σ, ∗)-Lorentzian, which then implies that (Σ, ω, ∗) is AF.
77
+ In Section 6, we briefly recall relevant notions regarding matroids and Bergman fans, and
78
+ then we use Theorem 5.1 to prove that Bergman fans of matroids are AF (Theorem 6.2).
79
+
80
+ MIXED VOLUMES OF NORMAL COMPLEXES
81
+ 3
82
+ We conclude the paper by deducing the Heron–Rota–Welsh Conjecture as a consequence of
83
+ the Alexandrov–Fenchel inequalities for normal complexes.
84
+ 1.2. Relation to other work. Since the original proof of the Heron–Rota–Welsh Conjec-
85
+ ture by Adiprasito, Huh, and Katz [AHK18], there have been a number of alternative proofs,
86
+ generalizations, and exciting related developments (an incomplete list includes [BHM+22,
87
+ BHM+20, BES20, ADH20, AP20, AP21, BH20, AGV21, ALGV19, ALGV18, CP21]). We
88
+ view the volume-theoretic approach in this paper as a new angle from which to view log-
89
+ concavity of characteristic polynomials of matroids, but we also want to acknowledge that
90
+ our methods share features of and are indebted to the approaches of several other teams
91
+ of mathematicians. In particular, our methods rely on the Chow-theoretic interpretation of
92
+ characteristic polynomials of matroids, proved by Huh and Katz [HK12], which was central
93
+ in the original proof of Adiprasito, Huh, and Katz [AHK18], as well as in the subsequent
94
+ proofs by Braden, Huh, Matherne, Proudfoot, and Wang [BHM+22] and Backman, Eur, and
95
+ Simpson [BES20]. In addition, our methods prove that volume polynomials are Lorentzian,
96
+ which is also a central feature in the methods of both Backman, Eur, and Simpson [BES20]
97
+ and Br¨and´en and Leake [BL21]. We note that, while the methods of [BES20] and [BL21]
98
+ seem to be tailored primarily for matroids, our methods readily extend to the more general
99
+ setting of tropical intersection theory (this extension will be spelled out in a forthcoming
100
+ work of the third author). By adding a new volume-theoretic approach to the Heron–Rota–
101
+ Welsh Conjecture to the literature, we hope that this paper will serve to welcome a new
102
+ batch of geometrically-minded folks into the fold of this flourishing area of research, opening
103
+ the door for further developments.
104
+ 1.3. Acknowledgements. The authors would like to express their gratitude to Federico
105
+ Ardila, Matthias Beck, Emily Clader, Chris Eur, and Serkan Ho¸sten for sharing insights
106
+ related to this project.
107
+ This work was supported by a grant from the National Science
108
+ Foundation: DMS-2001439.
109
+ 2. Background on normal complexes
110
+ In this section, we establish notation, conventions, and preliminary results regarding poly-
111
+ hedral fans and normal complexes.
112
+ 2.1. Fan definitions and conventions. Let NR be a real vector spaces of dimension n.
113
+ Given a polyhedral fan Σ ⊆ NR, we denote the k-dimensional cones of Σ by Σ(k). Let ⪯
114
+ denote the face containment relation among the cones of Σ, and for each cone σ ∈ Σ, let
115
+ σ(k) ⊆ Σ(k) denote the k-dimensional faces of σ. For any cone σ, let σ◦ denote the relative
116
+ interior of σ and denote the linear span of σ by Nσ,R ⊆ NR.
117
+
118
+ 4
119
+ L. NOWAK, P. O’MELVENY, AND D. ROSS
120
+ We say that a fan Σ is pure if all of the maximal cones in Σ have the same dimension.
121
+ We say that Σ is marked if we have chosen a distinguished generating vector 0 ̸= uρ ∈ ρ for
122
+ each ray ρ ∈ Σ(1). Henceforth, we assume that all fans are pure, polyhedral, and marked,
123
+ and we use the term d-fan to refer to a pure, polyhedral, marked fan of dimension d.
124
+ We say that Σ is simplicial if dim(Nσ,R) = |σ(1)| for all σ ∈ Σ. The faces of a simplicial
125
+ cone σ are in bijective correspondence with the subsets of σ(1). For every face containment
126
+ τ ⪯ σ in a simplicial fan Σ, let σ \ τ denote the face of σ with rays σ(1) \ τ(1). Given two
127
+ faces τ, π ⪯ σ, denote by τ ∪ π the face of σ with rays τ(1) ∪ π(1).
128
+ Given a simplical d-fan Σ and a weight function ω : Σ(d) → R>0, we say that the pair
129
+ (Σ, ω) is a tropical fan if it satisfies the weighted balancing condition:
130
+
131
+ σ∈Σ(d)
132
+ τ≺σ
133
+ ω(σ)uσ\τ ∈ Nτ,R
134
+ for all
135
+ τ ∈ Σ(d − 1).
136
+ While the definition of tropical fans can be generalized to nonsimplicial fans, we will assume
137
+ throughout this paper that all tropical fans are simplicial. If ω(σ) = 1 for all σ ∈ Σ(d), we
138
+ say that Σ is balanced and we omit ω from the notation.
139
+ 2.2. Chow rings and degree maps. Let MR denote the dual of NR and let ⟨−, −⟩ be the
140
+ duality pairing. Given a simplicial fan Σ ⊆ NR, the Chow ring of Σ is defined by
141
+ A•(Σ) ..= R
142
+
143
+ xρ | ρ ∈ Σ(1)
144
+
145
+ I + J
146
+ where
147
+ I ..=
148
+
149
+ xρ1 · · · xρk | R≥0{ρ1, . . . , ρk} /∈ Σ
150
+
151
+ and
152
+ J ..=
153
+ � �
154
+ ρ∈Σ(1)
155
+ ⟨v, uρ⟩xρ
156
+ ���� v ∈ MR
157
+
158
+ .
159
+ As both I and J are homogeneous, the Chow ring A•(Σ) is a graded ring, and we denote
160
+ by Ak(Σ) the subgroup of homogeneous elements of degree k. We denote the generators of
161
+ A•(Σ) by Xρ ..= [xρ] ∈ A1(Σ), and for any σ ∈ Σ(k), we define
162
+ Xσ ..=
163
+
164
+ ρ∈σ(1)
165
+ Xρ ∈ Ak(Σ).
166
+ If Σ is a simplicial d-fan, then every element of Ak(Σ) can be written as a linear combination
167
+ of Xσ with σ ∈ Σ(k) (see, for example, [AHK18, Proposition 5.5]). It follows that Ak(Σ) = 0
168
+ for all k > d. If, in addition, (Σ, ω) is tropical, then there is a well-defined degree map
169
+ degΣ,ω : Ad(Σ) → R
170
+ such that degΣ,ω(Xσ) = ω(σ) for every σ ∈ Σ(d) (see, for example, [AHK18, Proposition 5.6]).
171
+
172
+ MIXED VOLUMES OF NORMAL COMPLEXES
173
+ 5
174
+ 2.3. Normal complexes. We now recall the construction of normal complexes from [NR21].
175
+ In addition to a simplicial d-fan Σ ⊆ NR, the normal complex construction requires an
176
+ additional choice of an inner product ∗ ∈ Inn(NR) and a value z ∈ RΣ(1). Given such a ∗
177
+ and z, we define a set of hyperplanes and half-spaces in NR associated to each ρ ∈ Σ by
178
+ Hρ,∗(z) ..= {v ∈ NR | v ∗ uρ = zρ}
179
+ and
180
+ H−
181
+ ρ,∗(z) ..= {v ∈ NR | v ∗ uρ ≤ zρ}.
182
+ We then define polytopes Pσ,∗(z), one for each σ ∈ Σ, by
183
+ Pσ,∗(z) ..= σ ∩
184
+
185
+ ρ∈σ(1)
186
+ H−
187
+ ρ,∗(z).
188
+ Notice that Pσ,∗(z) is simply a truncation of the cone σ by hyperplanes that are normal to the
189
+ rays of σ—what it means to be normal is determined by ∗, and the locations of the normal
190
+ hyperplanes along the rays of the cone are determined by z. We would like to construct a
191
+ polytopal complex from these polytopes, but in general, they do not meet along faces. To
192
+ ensure that they meet along faces, we require a compatibility between z and ∗.
193
+ For each σ ∈ Σ, let wσ,∗(z) ∈ Nσ,R be the unique vector such that wσ,∗(z) ∗ uρ = zρ for all
194
+ ρ ∈ σ(1). That such a vector exists and is unique follows from the fact that the vectors uρ
195
+ with ρ ∈ σ(1) are linearly independent—this is equivalent to the simplicial hypothesis. We
196
+ then say that z is cubical (pseudocubical) with respect to (Σ, ∗) if
197
+ wσ,∗(z) ∈ σ◦
198
+ (wσ,∗(z) ∈ σ)
199
+ for all
200
+ σ ∈ Σ.
201
+ In other words, the pseudocubical values are those values of z for which the truncating
202
+ hyperplanes intersect within each cone, and the cubical values are those for which they
203
+ intersect in the relative interior of each cone. The collection of cubical values are denoted
204
+ Cub(Σ, ∗) ⊆ RΣ(1) and the pseudocubical values are denoted Cub(Σ, ∗) ⊆ RΣ(1).
205
+ We now summarize key results from [NR21] that will be necessary for the developments
206
+ in this paper (see [NR21, Propositions 3.2, 3.3, and 3.7 ]).
207
+ Proposition 2.1. Let Σ ⊆ NR be a simplicial d-fan and let ∗ ∈ Inn(NR) be an inner product.
208
+ (1) The set Cub(Σ, ∗) ⊆ RΣ(1) is a polyhedral cone with Cub(Σ, ∗)◦ = Cub(Σ, ∗).
209
+ (2) For z ∈ Cub(Σ, ∗), the vertices of Pσ,∗(z) are {wτ,∗(z) | τ ⪯ σ}.
210
+ (3) For z ∈ Cub(Σ, ∗), the polytopes Pσ,∗(z) meet along faces.
211
+ For any polytope P, let �P denote the set of all faces of P. The third part of Proposition 2.1
212
+ implies that
213
+ CΣ,∗(z) ..=
214
+
215
+ σ∈Σ(d)
216
+
217
+ Pσ,∗(z)
218
+ is a polytopal complex whenever z ∈ Cub(Σ, ∗), and this polytopal complex is called the
219
+ normal complex of Σ with respect to ∗ and z.
220
+
221
+ 6
222
+ L. NOWAK, P. O’MELVENY, AND D. ROSS
223
+ Below, we depict a two-dimensional tropical fan and an associated normal complex. The
224
+ fan is comprised of nine two-dimensional cones glued along faces, and each of these nine
225
+ cones corresponds to a quadrilateral in the normal complex.
226
+ The next pair of images depict a three-dimensional fan comprised of two maximal cones
227
+ meeting along a two-dimensional face, and a corresponding normal complex. While this
228
+ fan is not tropical, the reader is welcome to view this image as just one small piece of a
229
+ three-dimensional tropical fan in some higher-dimensional vector space.
230
+ 2.4. Volumes of normal complexes. Let Σ ⊆ NR be a simplicial d-fan, ∗ ∈ Inn(NR)
231
+ an inner product, and z ∈ Cub(Σ, ∗) a pseudocubical value. Informally, the volume of the
232
+ normal complex CΣ,∗(z) is the sum of the volumes of the polytopes Pσ,∗(z) with σ ∈ Σ(d);
233
+ however, some care is required in specifying what we mean by volume in each subspace Nσ,R.
234
+ For each cone σ ∈ Σ, define the discrete subgroup
235
+ Nσ ..= spanZ(uρ | ρ ∈ σ(1)) ⊆ NR,
236
+ and let Mσ denote its dual: Mσ ..= HomZ(Nσ, Z) ⊆ Mσ,R ..= HomR(Nσ,R, R). Using the inner
237
+ product ∗, we can identify Mσ,R with Nσ,R and thus, we can view Mσ as a lattice in Nσ,R.
238
+ For each σ ∈ Σ, let
239
+ Volσ :
240
+
241
+ polytopes in Nσ,R
242
+
243
+ → R≥0
244
+
245
+ MIXED VOLUMES OF NORMAL COMPLEXES
246
+ 7
247
+ be the volume function determined by the property that a fundamental simplex of the lattice
248
+ Mσ ⊆ Nσ,R has unit volume. Define the volume of the normal complex CΣ,∗(z), denoted
249
+ VolΣ,∗(z) for brevity, as the sum of the volumes of the constituent d-dimensional polytopes:
250
+ VolΣ,∗(z) ..=
251
+
252
+ σ∈Σ(d)
253
+ Volσ(Pσ,∗(z)).
254
+ In slightly more generality, suppose that ω : Σ(d) → R>0 is a weight function on the maximal
255
+ cones of Σ. The volume of the normal complex CΣ,∗(z) weighted by ω is defined by
256
+ VolΣ,ω,∗(z) ..=
257
+
258
+ σ∈Σ(d)
259
+ ω(σ) Volσ(Pσ,∗(z)).
260
+ The main result of [NR21] is a Chow-theoretic interpretation of the weighted volumes of
261
+ normal complexes, valid whenever (Σ, ω) is tropical.
262
+ Theorem 2.2 ([NR21, Theorem 6.3]). Let (Σ, ω) be a tropical d-fan, ∗ ∈ Inn(NR) an inner
263
+ product, and z ∈ Cub(Σ, ∗) a pseudocubical value. Then
264
+ VolΣ,ω,∗(z) = degΣ,ω(D(z)d)
265
+ where
266
+ D(z) =
267
+
268
+ ρ∈Σ(1)
269
+ zρXρ ∈ A1(Σ).
270
+ 3. Mixed Volumes of Normal Complexes
271
+ Our first aim in this paper is to enhance Theorem 2.2 to a statement about mixed volumes.
272
+ In order to do this, we briefly recall the classical theory of mixed volumes, for which we
273
+ recommend the comprehensive text by Schneider [Sch14] as a reference.
274
+ 3.1. Mixed volumes of polytopes. Mixed volumes are the natural result of combining the
275
+ notion of volume with the operation of Minkowski addition. We start with a d-dimensional
276
+ real vector space V and a volume function Vol : {polytopes in V } → R≥0. The mixed
277
+ volume function
278
+ MVol : {polytopes in V }d → R≥0
279
+ is the unique function determined by the following three properties.
280
+ • (Symmetry) For any permutation π ∈ Sd,
281
+ MVol(P1, . . . , Pd) = MVol(π(P1, . . . , Pd)).
282
+ • (Multilinearity) For any i = 1, . . . , d and λ ∈ R≥0,
283
+ MVol(P1, . . . , λPi + P ′
284
+ i, . . . , Pd) = λ MVol(P1, . . . , Pi, . . . , Pd)
285
+ + MVol(P1, . . . , P ′
286
+ i, . . . , Pd),
287
+
288
+ 8
289
+ L. NOWAK, P. O’MELVENY, AND D. ROSS
290
+ where the linear combination of polytopes is defined by
291
+ λPi + P ′
292
+ i = {λv + w | v ∈ Pi, w ∈ P ′
293
+ i}.
294
+ • (Normalization) For any polytope P,
295
+ MVol(P, . . . , P) = Vol(P).
296
+ That such a mixed volume function exists and is unique is due to Minkowski [Min03], who
297
+ proved that such a function exists more generally for convex bodies, not just for polytopes.
298
+ 3.2. Mixed volumes of normal complexes. We now define a notion of mixed volumes
299
+ of normal complexes. Let Σ ⊆ NR be a simplicial d-fan and let ∗ ∈ Inn(NR) be an inner
300
+ product. Given pseudocubical values z1, . . . , zd ∈ Cub(Σ, ∗), we define the mixed volume
301
+ of the normal complexes CΣ,∗(z1), . . . , CΣ,∗(zd), denoted MVolΣ,∗(z1, . . . , zd) for brevity,
302
+ by
303
+ MVolΣ,∗(z1, . . . , zd) ..=
304
+
305
+ σ∈Σ(d)
306
+ MVolσ(Pσ,∗(z1), . . . , Pσ,∗(zd)).
307
+ In other words, the mixed volume is the sum of the mixed volumes of the polytopes associated
308
+ to the top-dimensional cones of Σ. More generally, if ω : Σ(d) → R>0 is a weight function,
309
+ then the mixed volume of the normal complexes CΣ,∗(z1), . . . , CΣ,∗(zd) weighted by
310
+ ω is defined by
311
+ MVolΣ,ω,∗(z1, . . . , zd) ..=
312
+
313
+ σ∈Σ(d)
314
+ ω(σ) MVolσ(Pσ,∗(z1), . . . , Pσ,∗(zd)).
315
+ In order to verify that this is a meaningful notion of mixed volumes for normal complexes,
316
+ we check that it is characterized by an analogue of the three characterizing properties of
317
+ mixed volumes of polytopes.
318
+ Proposition 3.1. Let Σ ⊆ NR be a simplicial d-fan, ∗ ∈ Inn(NR) an inner product, and
319
+ ω : Σ(d) → R>0 a weight function.
320
+ (1) For any z1, . . . , zd ∈ Cub(Σ, ∗) and π ∈ Sd,
321
+ MVolΣ,ω,∗(z1, . . . , zd) = MVolΣ,ω,∗(π(z1, . . . , zd)).
322
+ (2) For any i = 1, . . . , d, and for any z1, . . . , zi, z′
323
+ i, . . . , zd ∈ Cub(Σ, ∗) and λ ∈ R≥0,
324
+ MVolΣ,ω,∗(z1, . . . , λzi + z′
325
+ i, . . . , zd) = λ MVolΣ,ω,∗(z1, . . . , zi, . . . , zd)
326
+ + MVolΣ,ω,∗(z1, . . . , z′
327
+ i, . . . , zd).
328
+ (3) For any z ∈ Cub(Σ, ∗),
329
+ MVolΣ,ω,∗(z, . . . , z) = VolΣ,ω,∗(z).
330
+ Moreover, any function Cub(Σ, ∗)d → R≥0 satisfying Properties (1) – (3) must be MVolΣ,ω,∗.
331
+
332
+ MIXED VOLUMES OF NORMAL COMPLEXES
333
+ 9
334
+ Proof. Given that
335
+ MVolΣ,ω,∗(z1, . . . , zd) =
336
+
337
+ σ∈Σ(d)
338
+ ω(σ) MVolσ(Pσ,∗(z1), . . . , Pσ,∗(zd))
339
+ and the summands in the right-hand side are simply mixed volumes of polytopes, Proper-
340
+ ties (1) and (3) follow from the symmetry and normalization properties of mixed volumes in
341
+ the polytope setting. Moreover, once we prove that
342
+ (3.2)
343
+ Pσ,∗(λz + z′) = λPσ,∗(z) + Pσ,∗(z′)
344
+ for all z, z′ ∈ Cub(Σ, ∗) and λ ∈ R≥0, then Property (2) also follows from the multilinearity
345
+ property of mixed volumes in the polytope setting. Thus, it remains to prove (3.2), which
346
+ we accomplish by proving both inclusions.
347
+ First, suppose that v ∈ Pσ,∗(λz + z′). By Proposition 2.1, the vertices of Pσ,∗(λz + z′) are
348
+ {wτ,∗(λz + z′) | τ ⪯ σ}, so we can write v as a convex combination:
349
+ (3.3)
350
+ v =
351
+
352
+ τ⪯σ
353
+ aτ wτ,∗(λz + z′)
354
+ for some
355
+ aτ ∈ R≥0
356
+ with
357
+
358
+ τ⪯σ
359
+ aτ = 1.
360
+ To prove that v ∈ λPσ,∗(z) + Pσ,∗(z′), our next step is to prove that the vertices are linear:
361
+ (3.4)
362
+ wτ,∗(λz + z′) = λwτ,∗(z) + wτ,∗(z′).
363
+ Since wτ,∗(λz + z′) is the unique vector in Nτ,R with wτ,∗(λz + z′) ∗ uρ = (λz + z′)ρ for
364
+ all ρ ∈ τ(1), proving (3.4) amounts to proving that λwτ,∗(z) + wτ,∗(z′) also satisfies these
365
+ equations. Using bilinearity of the inner product and the definition of the w vectors, we have
366
+ (λwτ,∗(z) + wτ,∗(z′)) ∗ uρ = λwτ,∗(z) ∗ uρ + wτ,∗(z′) ∗ uρ
367
+ = λzρ + z′
368
+ ρ
369
+ = (λz + z′)ρ.
370
+ Therefore, (3.4) holds, and substituting (3.4) into (3.3) implies that
371
+ v = λ
372
+
373
+ τ⪯σ
374
+ aτwτ,∗(z) +
375
+
376
+ τ⪯σ
377
+ aτwτ,∗(z′) ∈ λPσ,∗(z) + Pσ,∗(z′).
378
+ To prove the other inclusion, suppose that v ∈ λPσ,∗(z) + Pσ,∗(z′). Then v = λw + w′ for
379
+ some w ∈ Pσ,∗(z) and w′ ∈ Pσ,∗(z′). This means that w, w′ ∈ σ and, in addition, w · uρ ≤ zρ
380
+ and w′ · uρ ≤ z′
381
+ ρ for all ρ ∈ σ(1). Since σ is a cone, u = λw + w′ ∈ σ and, for every ρ ∈ σ(1),
382
+ we have
383
+ v ∗ uρ = (λw + w′) ∗ uρ
384
+ = λw ∗ uρ + w′ ∗ uρ
385
+ ≤ λzρ + z′
386
+ ρ,
387
+
388
+ 10
389
+ L. NOWAK, P. O’MELVENY, AND D. ROSS
390
+ from which we conclude that v ∈ Pσ,∗(λz + z′).
391
+ Finally, to prove the final assertion of the proposition, suppose that F : Cub(Σ, ∗)d → R≥0
392
+ satisfies Properties (1) – (3). Our goal is to prove that F(z1, . . . , zd) = MVolΣ,ω,∗(z1, . . . , zd)
393
+ for any pseudocubical values z1, . . . , zd ∈ Cub(Σ, ∗).
394
+ Set z = λ1z1 + · · · + λdzd with
395
+ λ1, . . . , λd ∈ R≥0 arbitrary. Property (3) implies that
396
+ F(z, . . . , z) = VolΣ,ω,∗(z) = MVolΣ,ω,∗(z, . . . , z).
397
+ Using Properties (1) and (2) we can expand both the left- and right-hand sides of this
398
+ equation as polynomials in λ1, . . . , λd:
399
+
400
+ k1,...,kd
401
+
402
+ d
403
+ k1, . . . , kd
404
+
405
+ F(z1, . . . , z1
406
+
407
+ ��
408
+
409
+ k1
410
+ , . . . , zd, . . . , zd
411
+
412
+ ��
413
+
414
+ kd
415
+ )λk1
416
+ 1 · · · λkd
417
+ d
418
+ =
419
+
420
+ k1,...,kd
421
+
422
+ d
423
+ k1, . . . , kd
424
+
425
+ MVolΣ,ω,∗(z1, . . . , z1
426
+
427
+ ��
428
+
429
+ k1
430
+ , . . . , zd, . . . , zd
431
+
432
+ ��
433
+
434
+ kd
435
+ )λk1
436
+ 1 · · · λkd
437
+ d
438
+ Equating the coefficients of λ1 · · · λd in these two polynomials leads to the desired conclusion:
439
+ F(z1, . . . , zd) = MVolΣ,ω,∗(z1, . . . , zd).
440
+
441
+ Our methods for studying Alexandrov–Fenchel inequalities will also require the following
442
+ positivity result.
443
+ Proposition 3.5. Let Σ ⊆ NR be a simplicial d-fan, ∗ ∈ Inn(NR) an inner product, and
444
+ ω : Σ(d) → R>0 a weight function. Then
445
+ MVolΣ,ω,∗(z1, . . . , zd) ≥ 0
446
+ for all
447
+ z1, . . . , zd ∈ Cub(Σ, ∗)
448
+ and
449
+ MVolΣ,ω,∗(z1, . . . , zd) > 0
450
+ for all
451
+ z1, . . . , zd ∈ Cub(Σ, ∗).
452
+ Proof. The first statement follows from the definition of MVolΣ,ω,∗ and the nonnegativity
453
+ of mixed volumes of polytopes [Sch14, Theorem 5.1.7]. For the second statement, we first
454
+ observe that z ∈ Cub(Σ, ∗) implies that Pσ,∗(z) has dimension d for every σ ∈ Σ(d), which
455
+ follows from the fact that Pσ,∗(z) is combinatorially equivalent to a d-cube [NR21, Propo-
456
+ sition 3.8]. Thus, the second statement follows from the fact that mixed volumes of full-
457
+ dimensional polytopes are strictly positive [Sch14, Theorem 5.1.8].
458
+
459
+ 3.3. Mixed volumes and mixed degrees. We now extend Theorem 2.2 to give a Chow-
460
+ theoretic interpretation of mixed volumes of normal complexes associated to tropical fans.
461
+ Theorem 3.6. Let (Σ, ω) be a tropical d-fan, let ∗ ∈ Inn(NR) be an inner product, and let
462
+ z1, . . . , zd ∈ Cub(Σ, ∗) be pseudocubical values. Then
463
+ MVolΣ,ω,∗(z1, . . . , zd) = degΣ,ω(D(z1) · · · D(zd)).
464
+
465
+ MIXED VOLUMES OF NORMAL COMPLEXES
466
+ 11
467
+ Proof. By Proposition 3.1, it suffices to prove that the function
468
+ Cub(Σ, ∗)d → R≥0
469
+ (z1, . . . , zd) �→ degΣ,ω(D(z1) · · · D(zd))
470
+ is symmetric, multilinear, and normalized by VolΣ,ω,∗. Symmetry follows from the fact that
471
+ A•(Σ) is a commutative ring, multilinearity follows from the fact that degΣ,ω : Ad(Σ) → R
472
+ is a linear map, and normalization is the content of Theorem 2.2.
473
+
474
+ 4. Faces of Normal Complexes
475
+ In this section, we develop a face structure for normal complexes, analogous to the face
476
+ structure of polytopes. Parallel to the polytope case, we will see that each face is obtained
477
+ by intersecting the normal complex with supporting hyperplanes, that each face can, itself,
478
+ be viewed as a normal complex, and that a face of a face is, itself, a face. We then prove fun-
479
+ damental properties relating (mixed) volumes of normal complexes to the (mixed) volumes
480
+ of their facets, which perfectly parallel central results in the classical polytope setting.
481
+ 4.1. Orthogonal decompositions. The face construction for normal complexes makes
482
+ heavy use of an orthogonal decomposition of NR associated to each cone τ ∈ Σ, which
483
+ we now describe. Associated to each τ ∈ Σ, we have already met the subspace Nτ,R ⊆ NR,
484
+ which is the linear span of τ, and we now introduce notation for the quotient space
485
+ N τ
486
+ R ..= NR/Nτ,R.
487
+ With the inner product ∗, we may identify N τ
488
+ R as the orthogonal complement of Nτ,R:
489
+ N τ
490
+ R = N ⊥
491
+ τ,R = {v ∈ NR | v ∗ u = 0 for all u ∈ Nτ,R} ⊆ NR,
492
+ allowing us to decompose NR as an orthogonal sum NR = Nτ,R ⊕ N τ
493
+ R.
494
+ We denote the
495
+ orthogonal projections onto the factors of this decomposition by prτ and prτ.
496
+ As we will see below, given a normal complex CΣ,∗(z) and a cone τ ∈ Σ, we will associate
497
+ a face Fτ(CΣ,∗(z)), and this face will lie in the space N τ
498
+ R.
499
+ In order to help the reader
500
+ digest the construction of Fτ(CΣ,∗(z)) and its subsequent interpretation as a normal complex,
501
+ we henceforth make the convention that τ superscripts will be used exclusively for objects
502
+ associated to the vector space N τ
503
+ R. For example, Στ will denote a fan in N τ
504
+ R and ∗τ will denote
505
+ an inner product on N τ
506
+ R.
507
+ 4.2. Faces of normal complexes. There are two primary steps in the face construction
508
+ for normal complexes. The first step is completely analogous to the polytope setting: we
509
+ intersect the normal complex with a collection of supporting hyperplanes to obtain a sub-
510
+ complex. However, in order to view this resulting subcomplex as a normal complex itself,
511
+
512
+ 12
513
+ L. NOWAK, P. O’MELVENY, AND D. ROSS
514
+ the second step of the construction requires us to translate this polytopal subcomplex to the
515
+ origin, where we can then endow it with the structure of a normal complex inside N τ
516
+ R.
517
+ Let Σ ⊆ NR be a simplicial d-fan, ∗ ∈ Inn(NR) an inner product, and z ∈ Cub(Σ, ∗) a
518
+ pseudocubical value. For each cone τ ∈ Σ, define the neighborhood of τ in Σ by
519
+ NτΣ ..= {π | π ⪯ σ for some σ ∈ Σ with τ ⪯ σ}.
520
+ To illustrate this definition, we have darkened the neighborhood of the ray ρ in the following
521
+ two-dimensional fan.
522
+ ρ
523
+ Notice that NτΣ is, itself, a simplicial d-fan in NR whose cones are a subset of Σ, and the
524
+ maximal cones of NτΣ comprise all of the maximal cones of Σ that contain τ. Since every
525
+ maximal cone σ ∈ NτΣ(d) contains τ as a face, it follows from the definitions that each
526
+ hyperplane Hρ,∗(z) with ρ ∈ τ(1) is a supporting hyperplane of Pσ,∗(z):
527
+ Pσ,∗(z) ⊆ H−
528
+ ρ,∗(z)
529
+ for all
530
+ σ ∈ NτΣ(d)
531
+ and
532
+ ρ ∈ τ(1).
533
+ Thus, for each σ ∈ NτΣ(d), we obtain a face of Pσ,∗(z) by intersecting with all of these
534
+ hyperplanes:
535
+ Fτ(Pσ,∗(z)) ..= Pσ,∗(z) ∩
536
+
537
+ ρ∈τ(1)
538
+ Hρ,∗(z).
539
+ The collection of these polytopes along with all of their faces forms a polytopal subcomplex
540
+ of CΣ,∗(z), which we denote
541
+ Fτ(CΣ,∗(z)) ..=
542
+
543
+ σ∈NτΣ(d)
544
+
545
+ Fτ(Pσ,∗(z)).
546
+ To illustrate how the polytopal subcomplex Fτ(CΣ,∗(z)) is constructed in a concrete exam-
547
+ ple, the following image depicts a two-dimensional normal complex where we have darkened
548
+ the collection of maximal polytopes associated to the neighborhood of a ray ρ. We have
549
+ also drawn in the hyperplane associated to ρ. The intersection of the hyperplane and the
550
+ darkened polytopes is Fρ(CΣ,∗(z)), which, in this example, is a polytopal complex comprised
551
+ of three line segments meeting at the point wρ,∗(z).
552
+
553
+ MIXED VOLUMES OF NORMAL COMPLEXES
554
+ 13
555
+ Hρ,∗(z)
556
+ ρ
557
+ Fρ(CΣ,∗(z))
558
+ One might be tempted to call Fτ(CΣ,∗(z)) a “face” of CΣ,∗(z); however, a drawback would
559
+ be that Fτ(CΣ,∗(z)) is not, itself, a normal complex (all normal complexes contain the origin,
560
+ for example, while Fτ(CΣ,∗(z)) generally does not).
561
+ Thus, our construction involves one
562
+ more step, which is to translate Fτ(CΣ,∗(z)) by the vector wτ,∗(z). Notice that, tracking
563
+ back through the definitions, there is an identification of affine subspaces
564
+
565
+ ρ∈τ(1)
566
+ Hρ,∗(z) = N τ
567
+ R + wτ,∗(z).
568
+ Since Fτ(CΣ,∗(z)) is, by definition, contained in the left-hand side, it follows that its trans-
569
+ lation by −wτ,∗(z) is a polytopal complex in N τ
570
+ R. We define the face of CΣ,∗(z) associated
571
+ to τ ∈ Σ to be this polytopal complex:
572
+ Fτ(CΣ,∗(z)) ..= Fτ(CΣ,∗(z)) − wτ,∗(z) ⊆ N τ
573
+ R.
574
+ The face associated to the ray ρ in our running example is depicted below inside N ρ
575
+ R.
576
+ N ρ
577
+ R = Hρ,∗(z) − wρ,∗(z)
578
+ ρ
579
+ F ρ(CΣ,∗(z)) = Fρ(CΣ,∗(z)) − wρ,∗(z)
580
+ The next pair of images depicts the subcomplex Fρ(CΣ,∗(z)) ⊆ CΣ,∗(z) and, after trans-
581
+ lating to the origin, the face Fρ(CΣ,∗(z)), where ρ is a ray of a three-dimensional fan.
582
+
583
+ 14
584
+ L. NOWAK, P. O’MELVENY, AND D. ROSS
585
+ ρ
586
+ ρ
587
+ Fρ(CΣ,∗(z))
588
+ F ρ(CΣ,∗(z))
589
+ In the following subsections, it will also be useful to have notation for translates of the
590
+ polytopes Fτ(Pσ,∗(z)). We define
591
+ Fτ(Pσ,∗(z)) ..= Fτ(Pσ,∗(z)) − wτ,∗(z).
592
+ In terms of these translated polytopes, we can write the τ-face of CΣ,∗(z) as
593
+ Fτ(CΣ,∗(z)) =
594
+
595
+ σ∈NτΣ(d)
596
+
597
+ Fτ(Pσ,∗(z)).
598
+ 4.3. Faces as normal complexes. Our aim in this subsection is to realize each face
599
+ Fτ(CΣ,∗(z)) as a normal complex. In order to do so, we require several ingredients; namely,
600
+ we require a marked, pure, simplicial fan Στ in N τ
601
+ R, an inner product ∗τ on N τ
602
+ R, and a
603
+ pseudocubical value zτ ∈ Cub(Στ, ∗τ). We now define each of these ingredients.
604
+ For each cone τ ∈ Σ, define the star of Σ at τ ∈ Σ to be the fan in N τ
605
+ R comprised of all
606
+ projections of cones in the neighborhood of τ:
607
+ Στ ..= {prτ(π) | π ∈ NτΣ}.
608
+ The star of a two-dimensional fan Σ at a ray ρ is depicted below.
609
+ In the image, there
610
+ are three two-dimensional cones in the neighborhood of ρ that are projected onto three
611
+ one-dimensional cones that comprise the maximal cones in the star fan Σρ.
612
+ ρ
613
+ Σ ⊆ NR
614
+ Σρ ⊆ N ρ
615
+ R
616
+ Henceforth, we use the shorthand πτ = prτ(π).
617
+
618
+ MIXED VOLUMES OF NORMAL COMPLEXES
619
+ 15
620
+ Given any cone πτ ∈ Στ with π ∈ NτΣ, we can also view πτ as the projection of the larger
621
+ cone σ = π ∪ τ ∈ NτΣ. Note that σ is the unique maximal cone in NτΣ that projects onto
622
+ πτ, from which it follows that each cone in Στ is the projection of a distinguished cone in
623
+ NτΣ. In other words, there is a bijection
624
+ {σ ∈ NτΣ | τ ⪯ σ} → Στ
625
+ σ �→ στ.
626
+ From the assumptions that Σ is a simplicial d-fan, it follow that Στ is a simplicial fan in N τ
627
+ R
628
+ that is pure of dimension dτ = d − dim(τ). Moreover, the simplicial hypothesis on Σ implies
629
+ that each ray η ∈ Στ(1) is the projection of a unique ray ˆη ∈ NτΣ(1), and we can use this
630
+ to mark each ray η ∈ Στ(1) with the vector prτ(uˆη).
631
+ We now have a marked, pure, simplicial fan in N τ
632
+ R, so it remains to define an inner product
633
+ and pseudocubical value. The inner product ∗τ ∈ Inn(N τ
634
+ R) is simply defined as the restriction
635
+ of the inner product ∗ ∈ Inn(NR) to the subspace N τ
636
+ R. Lastly, given any z ∈ RΣ(1), we define
637
+ zτ ∈ RΣτ(1) by the rule
638
+
639
+ η = zˆη − wτ,∗(z) ∗ uˆη,
640
+ where, as before, ˆη ∈ NτΣ(1) is the unique ray with prτ(ˆη) = η.
641
+ We now have all the ingredients necessary to state and prove the following result, which
642
+ asserts that faces of normal complexes are, themselves, normal complexes.
643
+ Proposition 4.1. Let Σ ⊆ NR be a simplicial d-fan, ∗ ∈ Inn(NR) an inner product, and
644
+ τ ∈ Σ a cone. If z ∈ RΣ(1) is (pseudo)cubical with respect to (Σ, ∗), then zτ is (pseudo)cubical
645
+ with respect to (Στ, ∗τ) and
646
+ Fτ(CΣ,∗(z)) = CΣτ,∗τ(zτ).
647
+ We note that the first statement—that zτ is (pseudo)cubical—is necessary in order for
648
+ CΣτ,∗τ(zτ) to even be well-defined. Proposition 4.1 is a statement about normal complexes,
649
+ or equivalently, about the polytopes that comprise those complexes.
650
+ In order to prove
651
+ Proposition 4.1, we first prove the following key lemma, which concerns just the vertices of
652
+ the polytopes.
653
+ Lemma 4.2. Let Σ ⊆ NR be a simplicial d-fan, ∗ ∈ Inn(NR) an inner product, and τ ∈ Σ
654
+ a cone. For any σ ∈ Σ with τ ⪯ σ, we have
655
+ prτ(wσ,∗(z)) = wσ,∗(z) − wτ,∗(z) = wστ,∗τ(zτ).
656
+ Proof. We start by establishing the first equality.
657
+ To do so, we begin by arguing that
658
+ wσ,∗(z) − wτ,∗(z) ∈ N τ
659
+ R.
660
+ Since N τ
661
+ R = N ⊥
662
+ τ,R, it suffices to prove that wσ,∗(z) − wτ,∗(z) is
663
+
664
+ 16
665
+ L. NOWAK, P. O’MELVENY, AND D. ROSS
666
+ orthogonal to the basis {uρ | ρ ∈ τ(1)} ⊆ Nτ,R. By definition of the w vectors and the
667
+ assumption that τ ⪯ σ, we compute
668
+ (wσ,∗(z) − wτ,∗(z)) ∗ uρ = zρ − zρ = 0
669
+ for all
670
+ ρ ∈ τ(1),
671
+ from which it follows that wσ,∗(z) − wτ,∗(z) ∈ N τ
672
+ R. Since NR = Nτ,R ⊕ N τ
673
+ R, the orthogonal
674
+ decomposition wσ,∗(z) = wτ,∗(z) + (wσ,∗(z) − wτ,∗(z)) then implies that
675
+ (4.3)
676
+ prτ(wσ,∗(z)) = wτ,∗(z)
677
+ and
678
+ prτ(wσ,∗(z)) = wσ,∗(z) − wτ,∗(z).
679
+ To prove that wσ,∗(z) − wτ,∗(z) = wστ,∗τ(zτ), we now argue that wσ,∗(z) − wτ,∗(z) is an
680
+ element of Nστ,R and is a solution of the equations defining wστ,∗τ(zτ):
681
+ (4.4)
682
+ v ∗τ uη = zτ
683
+ η
684
+ for all
685
+ η ∈ στ(1).
686
+ To check that wσ,∗(z) − wτ,∗(z) ∈ Nστ,R, we start by observing that we can write
687
+ wσ,∗(z) =
688
+
689
+ ρ∈σ(1)
690
+ aρ uρ
691
+ for some values aρ ∈ R, in which case
692
+ wσ,∗(z) − wτ,∗(z) = prτ(wσ,∗(z))
693
+ =
694
+
695
+ ρ∈σ(1)\τ(1)
696
+ aρ prτ(uρ)
697
+ =
698
+
699
+ η∈στ(1)
700
+ aˆη uη,
701
+ where the first equality uses (4.3), the second uses that prτ vanishes on Nτ,R, and the third
702
+ uses that the rays of στ are in natural bijection with σ(1) \ τ(1). Lastly, we peel back the
703
+ definitions to check that wσ,∗(z) − wτ,∗(z) is a solution of Equations (4.4):
704
+ (wσ,∗(z) − wτ,∗(z)) ∗τ uη = (wσ,∗(z) − wτ,∗(z)) ∗ (uˆη − prτ(uˆη))
705
+ = wσ,∗(z) ∗ uˆη − wτ,∗(z) ∗ uˆη −
706
+
707
+ wσ,∗(z) − wτ,∗(z)
708
+
709
+ ∗ prτ(uˆη)
710
+ = zˆη − wτ,∗(z) ∗ uˆη
711
+ = zτ
712
+ η,
713
+ where the first equality uses the orthogonal decomposition of uˆη and the fact that ∗τ is
714
+ just the restriction of ∗, the second equality uses linearity of the inner product, and the
715
+ third equality uses the definition of wσ,∗(z) along with the fact that the vectors prτ(uˆη) and
716
+ wσ,∗(z) − wτ,∗(z) = prτ(wσ,∗(z)) are in orthogonal subspaces.
717
+
718
+
719
+ MIXED VOLUMES OF NORMAL COMPLEXES
720
+ 17
721
+ Proof of Proposition 4.1. To prove the first statement in the cubical setting, assume that
722
+ z ∈ RΣ(1) is cubical. This means that, for every σ ∈ Σ, we can write
723
+ wσ,∗(z) =
724
+
725
+ ρ∈σ(1)
726
+ aρuρ
727
+ for some positive values aρ ∈ R>0. Consider any cone of Στ, which we can write as στ with
728
+ τ ⪯ σ. Applying the lemma, we then see that
729
+ wστ,∗τ(zτ) = prτ(wσ,∗(z))
730
+ =
731
+
732
+ ρ∈σ(1)\τ(1)
733
+ aρ prτ(uρ)
734
+ =
735
+
736
+ η∈στ(1)
737
+ aˆη uη.
738
+ This shows that wστ,∗τ(zτ) can be written as a positive combination of the ray generators of
739
+ στ, proving that zτ ∈ Cub(Στ, ∗τ). The proof in the pseudocubical setting is identical but
740
+ with “positive” replaced by “nonnegative.”
741
+ To prove that
742
+ Fτ(CΣ,∗(z)) = CΣτ,∗τ(zτ),
743
+ it suffices to identify the maximal polytopes in these complexes. In other words, we must
744
+ prove that, for every σ ∈ NτΣ(d), we have
745
+ (4.5)
746
+ Fτ(Pσ,∗(z)) = Pστ,∗τ(zτ).
747
+ To prove (4.5), we analyze the vertices of these polytopes.
748
+ By Proposition 2.1, the vertices of Pσ,∗(z) are {wπ,∗(z) | π ⪯ σ}. Since
749
+ Fτ(Pσ,∗(z)) = Pσ,∗(z) ∩
750
+
751
+ ρ∈τ(1)
752
+ Hρ,∗(z),
753
+ it follows that the vertices of Fτ(Pσ,∗(z)) are
754
+ {wπ,∗(z) | π ⪯ σ and wπ,∗(z) ∗ uρ = zρ for all ρ ∈ τ(1)}.
755
+ If a cone π ⪯ σ satisfies wπ,∗(z)∗uρ = zρ for all ρ ∈ τ(1), then the definition of the w-vectors
756
+ implies that wπ,∗(z) = wπ∪τ,∗(z), and it follows that the vertices of Fτ(Pσ,∗(z)) are
757
+ Vert
758
+
759
+ Fτ(Pσ,∗(z))
760
+
761
+ = {wπ,∗(z) | τ ⪯ π ⪯ σ}.
762
+ Upon translating by wτ,∗(z) to get from Fτ(Pσ,∗(z)) to F τ(Pσ,∗(z)), we see that
763
+ Vert
764
+
765
+ F τ(Pσ,∗(z))
766
+
767
+ = {wπ,∗(z) − wτ,∗(z) | τ ⪯ π ⪯ σ}
768
+ = {wπτ,∗τ(zτ) | πτ ⪯ στ}
769
+ = Vert
770
+
771
+ Pστ,∗τ(zτ))
772
+
773
+ ,
774
+
775
+ 18
776
+ L. NOWAK, P. O’MELVENY, AND D. ROSS
777
+ where the second equality is an application of Lemma 4.2 and the third is an application
778
+ of Proposition 2.1. Having matched the vertices of the polytopes in (4.5), the equality of
779
+ polytopes then follows.
780
+
781
+ The importance of Proposition 4.1 is that it allows us to endow each of the faces of a
782
+ normal complex with the structure of a normal complex, and in particular, it then allows
783
+ us to compute (mixed) volumes of faces. More specifically, if ω : Σ(d) → R>0 is a weight
784
+ function, then we obtain a weight function ωτ : Στ(dτ) → R>0 defined by ωτ(στ) = ω(σ) for
785
+ all σ ∈ Σ(d). The volume of the face Fτ(CΣ,∗(z)) weighted by ω is
786
+ VolΣτ,ωτ,∗τ(zτ).
787
+ Similarly, the mixed volume of the faces Fτ(CΣ,∗(z1)), . . . Fτ(CΣ,∗(zdτ)) weighted by ω is
788
+ MVolΣτ,ωτ,∗τ(zτ
789
+ 1, . . . , zτ
790
+ dτ).
791
+ In the next two subsections, we use these concepts to prove fundamental results relating
792
+ (mixed) volumes of normal complexes to the (mixed) volumes of their facets. In making
793
+ arguments using mixed volumes, it will be useful to consider facets of facets; as such, the
794
+ next result—asserting that the face of a face of a normal complex is a face of the original
795
+ normal complex—will be useful.
796
+ Proposition 4.6. Let Σ ⊆ NR be a simplicial d-fan, ∗ ∈ Inn(NR) an inner product, and
797
+ z ∈ Cub(Σ, ∗) a pseudocubical value. If τ, π ∈ Σ with τ ⪯ π, then
798
+ Fπτ(Fτ(CΣ,∗(z))) = Fπ(CΣ,∗(z)).
799
+ Proof. By Proposition 4.1, the claim in this proposition is equivalent to
800
+ Fπτ(CΣτ,∗τ(zτ)) = CΣπ,∗π(zπ).
801
+ It suffices to match the maximal polytopes in these complexes, so we must prove:
802
+ (4.7)
803
+ Fπτ(Pστ,∗τ(zτ)) = Pσπ,∗π(zπ)
804
+ for all
805
+ σ ∈ Σ(d)
806
+ with
807
+ τ ⪯ σ.
808
+ The vertices of the polytope in the left-hand side of (4.7) are
809
+ {wµτ,∗τ(zτ) − wπτ,∗τ(zτ) | πτ ⪯ µτ ⪯ στ}
810
+ while the vertices in the right-hand side of (4.7) are
811
+ {wµπ,∗π(zπ) | µπ ⪯ σπ}.
812
+
813
+ MIXED VOLUMES OF NORMAL COMPLEXES
814
+ 19
815
+ Notice that both sets of vertices are indexed by µ ∈ Σ with π ⪯ µ ⪯ σ, and we have
816
+ wµτ,∗τ(zτ) − wπτ,∗τ(zτ) = prπτ(wµτ,∗τ(zτ))
817
+ = prπτ(prτ(wµ,∗(z)))
818
+ = prπ(wµ,∗(z))
819
+ = wµπ,∗π(zπ),
820
+ where the first, second, and fourth equalities are Lemma 4.2, while the second is the obser-
821
+ vation that the projection prπ can be broken up into two steps: prπ = prπτ ◦ prτ. Thus, the
822
+ vertices of the polytopes in (4.7) match up, and the proposition follows.
823
+
824
+ 4.4. Volumes and facets. This subsection is devoted to proving the following result, which
825
+ relates the volume of a normal complex to the volumes of its facets.
826
+ Proposition 4.8. Let Σ ⊆ NR be a simplicial d-fan with weight function ω : Σ(d) → R>0,
827
+ let ∗ ∈ Inn(NR) be an inner product, and let z ∈ Cub(Σ, ∗) be a pseudocubical value. Then
828
+ VolΣ,ω,∗(z) =
829
+
830
+ ρ∈Σ(1)
831
+ zρ VolΣρ,ωρ,∗ρ(zρ).
832
+ The sum in the right-hand side of the theorem corresponds to decomposing the normal
833
+ complex into pyramids over its facets, as depicted in the next image.
834
+ Proposition 4.8 follows from the following lemma relating the volume function Volσ on
835
+ Nσ,R to the volume function Volσρ on the hyperplane Nσρ,R ⊆ Nσ,R.
836
+ Lemma 4.9. Under the hypotheses of Proposition 4.8, let σ ∈ Σ(d) and ρ ∈ σ(1). For any
837
+ polytope P ⊆ Nσρ,R and a ∈ R≥0, we have
838
+ Volσ
839
+
840
+ conv(0, P + auρ)
841
+
842
+ = a(uρ ∗ uρ) · Volσρ(P).
843
+ For intuition, we note that the polytope conv(0, P + auρ) appearing in the left-hand side
844
+ of Lemma 4.9 is obtained from the polytope P by first translating P along the ray ρ, which
845
+ is orthogonal to Nσρ,R, then taking the convex hull with the origin, the result of which can
846
+ be thought of as a pyramid with P as base and the origin as apex. The right-hand side can
847
+
848
+ 20
849
+ L. NOWAK, P. O’MELVENY, AND D. ROSS
850
+ then be thought of as a “base-times-height” formula for the volume of this pyramid, where
851
+ the “height” of the vector auρ is a(uρ ∗ uρ). We now make this informal discussion precise.
852
+ Proof of Lemma 4.9. Let {vη | η ∈ σ(1)} ⊆ Mσ be the dual basis of {uη | η ∈ σ(1)} ⊆ Nσ,
853
+ defined uniquely by the equations
854
+ vη ∗ uµ =
855
+
856
+
857
+
858
+ 1
859
+ µ = η
860
+ 0
861
+ µ ̸= η.
862
+ Recall that each ray generator of σρ is of the form prρ(uη) for a unique η ∈ σ(1) \ {ρ}; we
863
+ claim that the dual vector of prρ(uη) in Mσρ is vη—in other words, the dual vector of prρ(uη)
864
+ is the same as the dual vector of uη. To verify this, note that, for any η, µ ∈ σ(1) \ {ρ}, we
865
+ have
866
+ prρ(uη) ∗ρ vµ = (uη − prρ(uη)) ∗ vµ
867
+ = uη ∗ vµ
868
+ =
869
+
870
+
871
+
872
+ 1
873
+ µ = η
874
+ 0
875
+ µ ̸= η,
876
+ where the first equality uses the decomposition of uη into its orthogonal components, along
877
+ with the fact that ∗ρ is just the restriction of ∗, and the second equality uses that prρ(uη) is
878
+ a multiple of uρ, along with uρ ∗ vµ = 0.
879
+ Using these dual bases, we defined simplices in each of vector spaces Nσ,R and Nσρ,R by
880
+ ∆(σ) = conv(0, {vη | η ∈ σ(1)}) ⊆ Nσ,R
881
+ and
882
+ ∆(σρ) = conv(0, {vη | η ∈ σ(1) \ {ρ}}) ⊆ Nσρ,R.
883
+ By our convention on how volumes are normalized in Nσ,R and Nσρ,R, along with our verifi-
884
+ cation above that {vη | η ∈ σ(1) \ {ρ}} is the dual basis of the ray generators of σρ, these
885
+ simplices have unit volume:
886
+ Volσ(∆(σ)) = Volσρ(∆(σρ)) = 1.
887
+ Notice that ∆(σρ) is a facet of ∆(σ) and we can write ∆(σ) = conv(vρ, ∆(σρ)). If we project
888
+ the vertex vρ of ∆(σ) onto the line spanned by ρ, we obtain a new simplex
889
+ ∆1(σ) = conv(prρ(vρ), ∆(σρ)).
890
+ Since the projection prρ is parallel to the facet ∆(σρ), it follows that
891
+ Volσ(∆1(σ)) = Volσ(∆(σ)) = Volσρ(∆(σρ)).
892
+
893
+ MIXED VOLUMES OF NORMAL COMPLEXES
894
+ 21
895
+ Now define a new simplex by sliding the vertex prρ(vρ) along ρ to the new vertex auρ:
896
+ ∆2(σ) = conv(auρ, ∆(σρ)).
897
+ By the standard projection formula, we have prρ(vρ) =
898
+
899
+ uρ∗uρ, from which we see that ∆2(σ)
900
+ is obtained from ∆1(σ) by scaling the height of the vertex prρ(vρ) by a factor of a(uρ ∗ uρ).
901
+ It follows that the volume also scales by a(uρ ∗ uρ):
902
+ Volσ(∆2(σ)) = a(uρ ∗ uρ) · Volσ(∆1(σ)) = a(uρ ∗ uρ) · Volσρ(∆(σρ)).
903
+ More concisely, we have proved that
904
+ (4.10)
905
+ Volσ
906
+
907
+ conv(auρ, P)
908
+
909
+ = a(uρ ∗ uρ) · Volσρ(P)
910
+ when P = ∆(σρ).
911
+ As a visual aid, we have depicted below the sequence of polytopes from the above discussion
912
+ in the specific setting of a two-dimensional cone σ, which we have visualized in R2 with the
913
+ usual dot product.
914
+
915
+
916
+
917
+ 0
918
+ σ
919
+ η
920
+ ρ
921
+
922
+
923
+ Nσρ,R
924
+
925
+
926
+ •vη
927
+
928
+ 0
929
+ ρ
930
+ Nσρ,R
931
+
932
+
933
+ •vη
934
+ ∆(σ)
935
+ ∆(σρ)
936
+
937
+ 0
938
+ ρ
939
+ Nσρ,R
940
+
941
+
942
+ uρ∗uρ
943
+ •vη
944
+ ∆1(σ)
945
+ ∆(σρ)
946
+
947
+ 0
948
+ ρ
949
+ Nσρ,R
950
+ •auρ
951
+ •vη
952
+ ∆2(σ)
953
+ ∆(σρ)
954
+ We now extend (4.10) to any simplex P ⊆ Nσρ,R. To do so, first note that a simplex P can
955
+ be obtained from the specific simplex ∆(σρ) by a composition of a translation and a linear
956
+ transformation on Nσρ,R. Translating P within Nσρ,R does not affect the volume on either
957
+
958
+ 22
959
+ L. NOWAK, P. O’MELVENY, AND D. ROSS
960
+ side of (4.10). Given a linear transformation T, on the other hand, we can extend it to a
961
+ linear transformation �T on Nσ,R by simply fixing the vector uρ, in which case we have
962
+ �T(conv(auρ, P)) = conv(auρ, T(P)).
963
+ Since det( �T) = det(T) and linear transformations scale volumes by the absolute values of
964
+ their determinants, we conclude that the equality in (4.10) is preserved upon taking linear
965
+ transforms of P:
966
+ Volσ
967
+
968
+ conv(auρ, T(P))
969
+
970
+ = Volσ
971
+ � �T(conv(auρ, P))
972
+
973
+ = | det( �T)| Volσ
974
+
975
+ conv(auρ, P)
976
+
977
+ = | det(T)| · a(uρ ∗ uρ) · Volσρ(P)
978
+ = a(uρ ∗ uρ) · Volσρ(T(P)).
979
+ Knowing that (4.10) holds for simplices, we extend it to arbitrary polytopes P ⊆ Nσρ,R
980
+ by triangulating P and applying (4.10) to each simplex in the triangulation. The lemma
981
+ then follows from (4.10) along with the observation that conv(auρ, P) is just a reflection of
982
+ conv(0, P + auρ), so has the same volume.
983
+
984
+ We now use Lemma 4.9 to prove Proposition 4.8.
985
+ Proof of Proposition 4.8. For each top-dimensional cone σ ∈ Σ(d) and ρ ∈ σ(1), consider
986
+ the polytope face Fρ(Pσ,∗(z)) ⊆ Pσ,∗(z). By definition, we have
987
+ Fρ(Pσ,∗(z)) = Fρ(Pσ,∗(z)) + wρ,∗(z).
988
+ Noting that wρ,∗(z) =
989
+
990
+ uρ∗uρuρ, Lemma 4.9 computes the volume of the pyramid conv(0, Fρ(Pσ,∗(z))):
991
+ (4.11)
992
+ Volσ
993
+
994
+ conv(0, Fρ(Pσ,∗(z)))
995
+
996
+ = zρ Volσρ(Fρ(Pσ,∗(z)) = zρ Volσρ(Pσρ,∗ρ(zρ)),
997
+ where the second equality is an application of (4.5).
998
+ Next, note that we can decompose each polytope Pσ,∗(z) into pyramids over the faces
999
+ Fρ(Pσ,∗(z)) with ρ ∈ σ(1), implying that
1000
+ (4.12)
1001
+ Volσ(Pσ,∗(z)) =
1002
+
1003
+ ρ∈σ(1)
1004
+ Volσ
1005
+
1006
+ conv(0, Fρ(Pσ,∗(z))
1007
+
1008
+ .
1009
+
1010
+ MIXED VOLUMES OF NORMAL COMPLEXES
1011
+ 23
1012
+ We then compute:
1013
+ VolΣ,ω,∗(z) =
1014
+
1015
+ σ∈Σ(d)
1016
+ ω(σ) Volσ(Pσ,∗(z))
1017
+ =
1018
+
1019
+ σ∈Σ(d)
1020
+ ω(σ)
1021
+
1022
+ ρ∈σ(1)
1023
+ Volσ
1024
+
1025
+ conv(0, Fρ(Pσ,∗(z))
1026
+
1027
+ =
1028
+
1029
+ σ∈Σ(d)
1030
+ ω(σ)
1031
+
1032
+ ρ∈σ(1)
1033
+ zρ Volσρ(Pσρ,∗ρ(zρ))
1034
+ =
1035
+
1036
+ ρ∈Σ(1)
1037
+
1038
+
1039
+ σρ∈Σρ(d−1)
1040
+ ωρ(σρ) Volσρ(Pσρ,∗ρ(zρ))
1041
+ =
1042
+
1043
+ ρ∈Σ(1)
1044
+ zρ VolΣρ,ωρ,∗ρ(zρ),
1045
+ where the first equality is the definition of VolΣ,ω,∗(z), the second and third are (4.12) and
1046
+ (4.11), respectively, the fourth follows from the definition of ωρ and the fact that cones in
1047
+ Σρ(d − 1) are in bijection with the cones in Σ(d) containing ρ via σρ ↔ σ, and the fifth is
1048
+ the definition of VolΣρ,ωρ,∗ρ(zρ).
1049
+
1050
+ 4.5. Mixed volumes and facets. The aim of this subsection is to enhance Proposition 4.8
1051
+ to the following more general statement about mixed volumes. See [Sch14, Lemma 5.1.5] for
1052
+ the analogous result in the classical setting of strongly isomorphic polytopes.
1053
+ Proposition 4.13. Let Σ ⊆ NR be a simplicial d-fan with weight function ω : Σ(d) → R>0,
1054
+ let ∗ ∈ Inn(NR) be an inner product, and let z1, . . . , zd ∈ Cub(Σ, ∗) be pseudocubical values.
1055
+ Then
1056
+ MVolΣ,ω,∗(z1, . . . , zd) =
1057
+
1058
+ ρ∈Σ(1)
1059
+ z1,ρ MVolΣρ,ωρ,∗ρ(zρ
1060
+ 2, . . . , zρ
1061
+ d).
1062
+ Proof. We proceed by induction on d.
1063
+ If d = 1, then mixed volumes are just volumes,
1064
+ in which case Proposition 4.13 is a special case of Proposition 4.8.
1065
+ Assume, now, that
1066
+ Proposition 4.13 holds in dimension less than d > 1. Define
1067
+ F(z1, . . . , zd) =
1068
+
1069
+ ρ∈Σ(1)
1070
+ z1,ρ MVolΣρ,ωρ,∗ρ(zρ
1071
+ 2, . . . , zρ
1072
+ d).
1073
+ To prove that F = MVolΣ,∗,ω, Proposition 3.1 tells us that it suffices to prove that F is (1)
1074
+ symmetric, (2) multilinear, and (3) normalized correctly with respect to volume; we check
1075
+ these properties in reverse order.
1076
+
1077
+ 24
1078
+ L. NOWAK, P. O’MELVENY, AND D. ROSS
1079
+ To check (3), we note that
1080
+ F(z, . . . , z) =
1081
+
1082
+ ρ∈Σ(1)
1083
+ zρ MVolΣρ,ωρ,∗ρ(zρ, . . . , zρ)
1084
+ =
1085
+
1086
+ ρ∈Σ(1)
1087
+ zρ VolΣρ,ωρ,∗ρ(zρ)
1088
+ = VolΣ,ω,∗(z),
1089
+ where the first equality is the definition of F, the second is Proposition 3.1 Part (3), and the
1090
+ third is Proposition 4.8.
1091
+ To check (2), there are two cases to consider: linearity in the first coordinate and linearity
1092
+ in every other coordinate. Linearity in the first coordinate follows quickly from the definition
1093
+ of F, while linearity in every other coordinate follows from Proposition 3.1 Part (2) applied
1094
+ to (Σρ, ∗ρ, ωρ).
1095
+ Finally, to check (1), we first note that Proposition 3.1 Part (1) applied to (Σρ, ∗ρ, ωρ)
1096
+ implies that F is symmetric in the entries z2, . . . , zd. Thus, it remains to prove that F is
1097
+ invariant under transposing z1 and z2. To do so, we first apply the induction hypothesis to
1098
+ the mixed volumes appearing in the definition of F to obtain
1099
+ (4.14)
1100
+ F(z1, . . . , zd) =
1101
+
1102
+ ρ∈Σ(1)
1103
+ z1,ρ
1104
+
1105
+ ηρ∈Σρ(1)
1106
+
1107
+ 2,ηρ MVolΣρ,η,ωρ,η,∗ρ,η(zρ,η
1108
+ 3 , . . . , zρ,η
1109
+ d ),
1110
+ where, to avoid the proliferation of parentheses and superscripts, we have written, for exam-
1111
+ ple, Σρ,η as short-hand for (Σρ)ηρ. Notice that the mixed volumes appearing in the right-hand
1112
+ side of (4.14) are mixed volumes associated to faces of faces. Proposition 4.6 tells us that
1113
+ the ηρ-face of the ρ-face of a normal complex is the same as the τ face of the original normal
1114
+ complex, where τ ∈ Σ(2) is the 2-cone containing ρ and η as rays. Therefore,
1115
+ MVolΣρ,η,ωρ,η,∗ρ,η(zρ,η
1116
+ 3 , . . . , zρ,η
1117
+ d ) = MVolΣτ,ωτ,∗τ(zτ
1118
+ 3, . . . , zτ
1119
+ d).
1120
+ Keeping in mind that each 2-cone τ appears twice in (4.14), once for each ordering of the
1121
+ rays, we have
1122
+ F(z1, . . . , zd) =
1123
+
1124
+ τ∈Σ(2)
1125
+ τ(1)={ρ,η}
1126
+ (z1,ρzρ
1127
+ 2,ηρ + z1,ηzη
1128
+ 2,ρη) MVolΣτ,ωτ,∗τ(zτ
1129
+ 3, . . . , zτ
1130
+ d).
1131
+ Therefore, it remains to prove that z1,ρzρ
1132
+ 2,ηρ + z1,ηzη
1133
+ 2,ρη is invariant under transposing 1 and
1134
+ 2. Computing directly from the definition of zρ, we have
1135
+ z1,ρzρ
1136
+ 2,ηρ + z1,ηzη
1137
+ 2,ρη = z1,ρ
1138
+
1139
+ z2,η − wρ,∗(z2) ∗ uη
1140
+
1141
+ + z1,η
1142
+
1143
+ z2,ρ − wη,∗(z2) ∗ uρ
1144
+
1145
+ ,
1146
+ from which we see that it suffices to prove that both
1147
+ z1,ρwρ,∗(z2) ∗ uη
1148
+ and
1149
+ z1,ηwη,∗(z2) ∗ uρ
1150
+
1151
+ MIXED VOLUMES OF NORMAL COMPLEXES
1152
+ 25
1153
+ are invariant under transposing 1 and 2. This invariance follows from the computations
1154
+ wρ,∗(z2) =
1155
+ z2,ρ
1156
+ uρ ∗ uρ
1157
+
1158
+ and
1159
+ wη,∗(z2) =
1160
+ z2,η
1161
+ uη ∗ uη
1162
+ uη.
1163
+
1164
+ The following analytic consequence of Proposition 4.13 will be useful in our computations
1165
+ in the next section.
1166
+ Corollary 4.15. In addition to the hypotheses of Proposition 4.13, assume that Cub(Σ, ∗)
1167
+ is nonempty. Then for any fixed z1, . . . , zk ∈ Cub(Σ, ∗), we have
1168
+
1169
+ ∂zρ
1170
+ MVolΣ,ω,∗(z1, . . . , zk, z, . . . , z
1171
+ � �� �
1172
+ d−k
1173
+ ) = (d − k) MVolΣρ,ωρ,∗ρ(zρ
1174
+ 1, . . . , zρ
1175
+ k, zρ, . . . , zρ
1176
+
1177
+ ��
1178
+
1179
+ d−k−1
1180
+ ).
1181
+ Proof. The assumption that Cub(Σ, ∗) ̸= ∅ implies that MVolΣ,ω,∗(z1, . . . , zk, z, . . . , z) is a
1182
+ degree d − k polynomial in R[zρ | ρ ∈ Σ(1)], so the derivatives are well-defined. Proposi-
1183
+ tion 4.13 and symmetry of mixed volumes imply that
1184
+
1185
+ ∂zi,ρ
1186
+ MVolΣ,ω,∗(z1, . . . , zd) = MVolΣρ,ωρ,∗ρ(zρ
1187
+ 1, . . . , zρ
1188
+ i−1, zρ
1189
+ i+1, . . . , zρ
1190
+ d).
1191
+ Viewing MVolΣ,ω,∗(z1, . . . , zk, z, . . . , z) as the composition of MVolΣ,ω,∗(z1, . . . , zd) with the
1192
+ specialization
1193
+ zk+1 = · · · = zd = z,
1194
+ the result then follows from the multivariable chain rule.
1195
+
1196
+ 5. Alexandrov–Fenchel inequalities
1197
+ One of the most consequential properties of mixed volumes of polytopes (or, more gener-
1198
+ ally, of mixed volumes of convex bodies) is the Alexandrov–Fenchel inequalities. Given
1199
+ polytopes P1, . . . , Pd in a d-dimensional real vector space V with volume function Vol, the
1200
+ Alexandrov–Fenchel inequalities state that
1201
+ MVol(P1, P2, P3, . . . , Pd)2 ≥ MVol(P1, P1, P3, . . . , Pd) MVol(P2, P2, P3, . . . , Pd)
1202
+ (see, for example, [Sch14, Theorem 7.3.1] for a proof and historical references). It is our aim
1203
+ in this section to study Alexandrov–Fenchel inequalities in the setting of mixed volumes of
1204
+ normal complexes.
1205
+ Let Σ ⊆ NR be a simplicial d-fan, ω : Σ(d) → R>0 a weight function, and ∗ ∈ Inn(NR)
1206
+ an inner product. We say that the triple (Σ, ω, ∗) is Alexandrov–Fenchel, or just AF for
1207
+ short, if Cub(Σ, ∗) ̸= ∅ and
1208
+ MVolΣ,ω,∗(z1, z2, z3, . . . , zd)2 ≥ MVolΣ,ω,∗(z1, z1, z3, . . . , zd) MVolΣ,ω,∗(z2, z2, z3, . . . , zd)
1209
+ for all z1, . . . , zd ∈ Cub(Σ, ∗). In this section, we prove the following result, which provides
1210
+ sufficient conditions for proving that a triple (Σ, ω, ∗) is AF.
1211
+
1212
+ 26
1213
+ L. NOWAK, P. O’MELVENY, AND D. ROSS
1214
+ Theorem 5.1. Let Σ ⊆ NR be a simplicial d-fan, ω : Σ(d) → R>0 a weight function, and
1215
+ ∗ ∈ Inn(NR) an inner product such that Cub(Σ, ∗) ̸= ∅. The triple (Σ, ω, ∗) is AF if the
1216
+ following two conditions are satisfied:
1217
+ (i) Στ \ {0} is connected for any cone τ ∈ Σ(k) with k ≤ d − 3;
1218
+ (ii) Hess
1219
+
1220
+ VolΣτ,ωτ,∗τ(z)
1221
+
1222
+ has exactly one positive eigenvalue for any τ ∈ Σ(d − 2).
1223
+ Remark 5.2. Condition (i) in Theorem 5.1 can be thought of as requiring that the fan Σ
1224
+ does not have any “pinch” points. For example, in dimension four, this condition rules out
1225
+ fans that locally look like a pair of four-dimensional cones meeting along a ray, because the
1226
+ star fan associated to that ray would comprise two three-dimensional cones that meet only
1227
+ at the origin.
1228
+ Remark 5.3. Condition (ii) of Theorem 5.1 concerns only the two-dimensional stars of Σ.
1229
+ Since the volume polynomial of a two-dimensional fan is a quadratic form, the Hessians
1230
+ appearing in Condition (ii) are constant matrices. Condition (ii) can be viewed as an ana-
1231
+ logue of the Brunn–Minkowski inequality for polygons. For an example of a two-dimensional
1232
+ (tropical) fan that does not satisfy Condition (ii), see [BH17].
1233
+ 5.1. Proof of Theorem 5.1. Our proof of Theorem 5.1 is largely inspired by a proof
1234
+ of the classical Alexandrov–Fenchel inequalities recently developed by Cordero-Erausquin,
1235
+ Klartag, Merigot, and Santambrogio [CEKMS19]—for which the key geometric input is
1236
+ Proposition 4.13.
1237
+ While the arguments in [CEKMS19] can be employed in this setting
1238
+ more-or-less verbatim, we present a more streamlined proof using ideas regarding Lorentzian
1239
+ polynomials recently developed by Br¨and´en and Leake [BL21]. Before presenting a proof of
1240
+ Theorem 5.1, we pause to introduce key ideas regarding Lorentzian polynomials.
1241
+ 5.1.1. Lorentzian polynomials on cones. One way to view the AF inequalities is as the non-
1242
+ positivity of the 2 × 2 matrix
1243
+
1244
+ MVolΣ,ω,∗(z1, z1, z3, . . . , zd)
1245
+ MVolΣ,ω,∗(z1, z2, z3, . . . , zd)
1246
+ MVolΣ,ω,∗(z2, z1, z3, . . . , zd)
1247
+ MVolΣ,ω,∗(z2, z2, z3, . . . , zd)
1248
+
1249
+ ,
1250
+ and this nonpositivity is equivalent to the matrix having exactly one positive eigenvalue.
1251
+ Lorentzian polynomials are a clever tool for capturing the essence of this observation, and
1252
+ are therefore a natural setting for understanding AF-type inequalities.
1253
+ Our discussion of Lorentzian polynomials follows Br¨and´en and Leake [BL21]. Suppose
1254
+ that C ⊆ Rn
1255
+ >0 is a nonempty open convex cone, and let f ∈ R[x1, . . . , xn] be a homogeneous
1256
+ polynomial of degree d. For each i = 1, . . . , n and v = (v1, . . . , vn) ∈ Rn, we use the following
1257
+
1258
+ MIXED VOLUMES OF NORMAL COMPLEXES
1259
+ 27
1260
+ shorthand for partial and directional derivatives
1261
+ ∂i = ∂
1262
+ ∂xi
1263
+ and
1264
+ ∂v =
1265
+ n
1266
+
1267
+ i=1
1268
+ vi∂i.
1269
+ We say that f is C-Lorentzian if, for all v1, . . . , vd ∈ C,
1270
+ (P) ∂v1 · · · ∂vdf > 0, and
1271
+ (H) Hess(∂v3 · · · ∂vdf) has exactly one positive eigenvalue.
1272
+ To relate Lorentzian polynomials back to AF-type inequalities, we recall the following key
1273
+ observation (see [BH20, Proposition 4.4]).
1274
+ Lemma 5.4. Let C ⊆ Rn
1275
+ >0 be a nonempty open convex cone, and let f ∈ R[x1, . . . , xn] be
1276
+ C-Lorentzian. Then for all v1, v2, v3 . . . , vd ∈ C, we have
1277
+
1278
+ ∂v1∂v2∂v3 · · · ∂vdf
1279
+ �2 ≥
1280
+
1281
+ ∂v1∂v1∂v3 · · · ∂vdf
1282
+ ��
1283
+ ∂v2∂v2∂v3 · · · ∂vdf
1284
+
1285
+ .
1286
+ Proof. Consider the symmetric 2 × 2 matrix
1287
+ M =
1288
+
1289
+ ∂v1∂v1∂v3 · · · ∂vdf
1290
+ ∂v1∂v2∂v3 · · · ∂vdf
1291
+ ∂v2∂v1∂v3 · · · ∂vdf
1292
+ ∂v2∂v2∂v3 · · · ∂vdf
1293
+
1294
+ .
1295
+ By (P), the entries of M are positive, so the Peron–Frobenius Theorem implies that M has at
1296
+ least one positive eigenvalue. On the other hand, M is a principal minor of Hess(∂v3 · · · ∂vdf),
1297
+ which, by (H), has exactly one positive eigenvalue; thus, it follows from Cauchy’s Interlacing
1298
+ Theorem that M has at most one positive eigenvalue. Therefore M has exactly one positive
1299
+ eigenvalue, implying that the determinant of M is nonpositive, proving the lemma.
1300
+
1301
+ The following result, proved by Br¨and´en and Leake [BL21], is particularly useful for the
1302
+ study of Lorentzian polynomials on cones. We view this result as an effective implementation
1303
+ of the key insights in [CEKMS19]; in essence, it eliminates the need for one of the induction
1304
+ parameters in [CEKMS19] because that induction parameter is captured within the recursive
1305
+ nature of Lorentzian polynomials.
1306
+ Lemma 5.5 ([BL21], Proposition 2.4). Let C ⊆ Rn
1307
+ >0 be a nonempty open convex cone, and
1308
+ let f ∈ R[x1, . . . , xn] be a homogeneous polynomial of degree d. If
1309
+ (1) ∂v1 · · · ∂vdf > 0 for all v1, . . . , vd ∈ C,
1310
+ (2) Hess
1311
+
1312
+ ∂v1 · · · ∂vd−2f
1313
+
1314
+ is irreducible1 and has nonnegative off-diagonal entries for all
1315
+ v1, . . . , vd−2 ∈ C, and
1316
+ (3) ∂if is C-Lorentzian for all i = 1, . . . , n,
1317
+ then f is C-Lorentzian.
1318
+ 1An n×n matrix M is irreducible if the associated adjacency graph—the undirected graph on n labeled
1319
+ vertices with an edge between the ith and jth vertex whenever the (i, j) entry of M is nonzero—is connected.
1320
+
1321
+ 28
1322
+ L. NOWAK, P. O’MELVENY, AND D. ROSS
1323
+ 5.1.2. Lorentzian volume polynomials. We now discuss how the above discussion of Lorentzian
1324
+ polynomials on cones can be used to study mixed volumes of normal complexes. Let Σ ⊆ NR
1325
+ be a simplicial d-fan, ω : Σ(d) → R>0 a weight function, and ∗ ∈ Inn(NR) an inner product.
1326
+ We assume that Cub(Σ, ∗) ̸= ∅, in which case the function VolΣ,ω,∗ : Cub(Σ, ∗) → R is a
1327
+ homogeneous polynomial of degree d in R[zρ | ρ ∈ Σ(1)]. By Proposition 3.1(3), we have
1328
+ VolΣ,ω,∗(z) = MVolΣ,ω,∗(z, . . . , z).
1329
+ It then follows from Proposition 3.1(1) and (2) (and the chain rule) that
1330
+ (5.6)
1331
+ ∂z1 · · · ∂zk VolΣ,ω,∗(z) =
1332
+ d!
1333
+ d − k! MVolΣ,ω,∗(z1, . . . , zk, z, . . . , z
1334
+ � �� �
1335
+ d−k
1336
+ )
1337
+ for any z1, . . . , zk ∈ Cub(Σ, ∗). In particular, in order to prove that (Σ, ω, ∗) is AF, we now
1338
+ see that it suffices (by Lemma 5.4) to prove that VolΣ,ω,∗ is Cub(Σ, ∗)-Lorentzian. Thus,
1339
+ Theorem 5.1 is a consequence of the following stronger result.
1340
+ Theorem 5.7. Let Σ ⊆ NR be a simplicial d-fan, ω : Σ(d) → R>0 a weight function,
1341
+ and ∗ ∈ Inn(NR) an inner product such that Cub(Σ, ∗) ̸= ∅. Then VolΣ,ω,∗ is Cub(Σ, ∗)-
1342
+ Lorentzian if the following two conditions are satisfied:
1343
+ (i) Στ \ {0} is connected for any cone τ ∈ Σ(k) with k ≤ d − 3;
1344
+ (ii) Hess
1345
+
1346
+ VolΣτ,ωτ,∗τ(z)
1347
+
1348
+ has exactly one positive eigenvalue for any τ ∈ Σ(d − 2).
1349
+ Proof. We prove Theorem 5.7 by induction on d.
1350
+ First consider the base case d = 2 (in which case Condition (i) is vacuous). Note that
1351
+ VolΣ,ω,∗ satisfies (P) by (5.6) and the positivity of mixed volumes (Proposition 3.5), while
1352
+ (H) for VolΣ,ω,∗ is equivalent to Condition (ii). Therefore, Theorem 5.7 holds when d = 2.
1353
+ Now let d > 2 and assume (Σ, ω, ∗) satisfies Conditions (i) and (ii) in Theorem 5.7.
1354
+ To prove that VolΣ,ω,∗ is Cub(Σ, ∗)-Lorentzian, we use Lemma 5.5. Translating the three
1355
+ conditions of Lemma 5.5 using (5.6), we must prove that
1356
+ (1) MVolΣ,ω,∗(z1, . . . , zd) > 0 for all z1, . . . , zd ∈ Cub(Σ, ∗),
1357
+ (2) Hess
1358
+
1359
+ MVolΣ,ω,∗(z1, . . . , zd−2, z, z)
1360
+
1361
+ is irreducible and has nonnegative off-diagonal en-
1362
+ tries for all z1, . . . , zd−2 ∈ Cub(Σ, ∗), and
1363
+ (3) ∂ρ VolΣ,ω,∗(z) is Cub(Σ, ∗)-Lorentzian for all ρ ∈ Σ(1).
1364
+ Note that (1) is just the positivity of mixed volumes (Proposition 3.5). To prove (3), note
1365
+ that Proposition 3.1(3) and Corollary 4.15 (with k = 0) together imply that
1366
+ ∂ρ VolΣ,ω,∗(z) = d VolΣρ,ωρ,∗ρ(zρ).
1367
+ Applying the induction hypothesis to (Σρ, ωρ, ∗ρ)—which we can do because any star fan
1368
+ of Σρ is a star fan of Σ, so our assumption that (Σ, ω, ∗) satisfies the two conditions of
1369
+
1370
+ MIXED VOLUMES OF NORMAL COMPLEXES
1371
+ 29
1372
+ Theorem 5.7 implies that (Σρ, ωρ, ∗ρ) also satisfies the two conditions of Theorem 5.7—
1373
+ implies that ∂ρ VolΣ,ω,∗(z) is Lorentzian, verifying (3).
1374
+ Finally, to prove (2), we use Corollary 4.15 to compute
1375
+ ∂ρ MVolΣ,ω,∗(z1, . . . , zd−2, z, z) = 2 MVolΣ,ω,∗(zρ
1376
+ 1, . . . , zρ
1377
+ d−2, zρ).
1378
+ If τ ∈ Σ(2) with rays ρ and η, then
1379
+
1380
+ ηρ = zη − wρ,∗(z) ∗ uη = zη − uρ ∗ uη
1381
+ uρ ∗ uρ
1382
+ zρ,
1383
+ from which it follows that,
1384
+ (5.8)
1385
+ ∂η∂ρ MVolΣ,ω,∗(z1, . . . , zd−2, z, z) = 2 MVolΣτ,ωτ,∗τ(zτ
1386
+ 1, . . . , zτ
1387
+ d−2)
1388
+ On the other hand, if ρ and η do not lie on a common cone τ ∈ Σ(2), then
1389
+ (5.9)
1390
+ ∂η∂ρ MVolΣ,ω,∗(z1, . . . , zd−2, z, z) = 0.
1391
+ The positivity of mixed volumes for cubical values, along with (5.8) and (5.9), then implies
1392
+ that Hess
1393
+
1394
+ MVolΣ,ω,∗(z1, . . . , zd−2, z, z)
1395
+
1396
+ has nonnegative off-diagonal entries that are positive
1397
+ whenever the row and column index are the rays of a cone τ ∈ Σ(2). The first condition in
1398
+ Theorem 5.7 implies that we can travel from any ray of Σ to any other ray by passing only
1399
+ through the relative interiors of one- and two-dimensional cones, which then implies that
1400
+ Hess
1401
+
1402
+ MVolΣ,ω,∗(z1, . . . , zd−2, z, z)
1403
+
1404
+ is irreducible, concluding the proof.
1405
+
1406
+ 6. Application: the Heron–Rota–Welsh conjecture
1407
+ As an application of our developments regarding mixed volumes of normal complexes,
1408
+ we show in this section how Theorem 5.1 can be used to prove the Heron–Rota–Welsh
1409
+ conjecture, which states that the coefficients of the characteristic polynomial of any matroid
1410
+ are log-concave. The bridge between matroids and mixed volumes is the Bergman fan; we
1411
+ begin this section by briefly recalling relevant notions regarding matroids and Bergman fans.
1412
+ 6.1. Matroids and Bergman fans. A (loopless) matroid M = (E, L) consists of a finite
1413
+ set E, called the ground set, and a collection of subsets L ⊆ 2E, called flats, which satisfy
1414
+ the following three conditions:
1415
+ (F1) ∅ ∈ L,
1416
+ (F2) if F1, F2 ∈ L, then F1 ∩ F2 ∈ L, and
1417
+ (F3) if F ∈ L, then every element of E \ F is contained in exactly one flat that is minimal
1418
+ among the flats that strictly contain F.
1419
+
1420
+ 30
1421
+ L. NOWAK, P. O’MELVENY, AND D. ROSS
1422
+ We do not give a comprehensive overview of matroids; rather, we settle for a brief intro-
1423
+ duction of key concepts. For a more complete treatment, see Oxley’s book [Oxl11].
1424
+ The closure of a set S ⊆ E, denoted cl(S), is the smallest flat containing S.
1425
+ A set
1426
+ I ⊆ E is called independent if cl(I1) ⊊ cl(I2) for any I1 ⊊ I2 ⊆ I. The rank of a set
1427
+ S ⊆ E, denoted rk(S), is the maximum size of an independent subset of S, and the rank
1428
+ of M, denoted rk(M) is defined to be the rank of E. While we have chosen to characterize
1429
+ matroids in terms of their flats, we note that matroids can also be characterized in terms of
1430
+ their independent sets or their rank function.
1431
+ A flag of flats (of length k) in M is a chain of the form
1432
+ F = (F1 ⊊ · · · ⊊ Fk)
1433
+ with
1434
+ F1, . . . , Fk ∈ L.
1435
+ It can be checked from the matroid axioms that every maximal flag has one flat of each rank
1436
+ 0, . . . , rk(M). We let ∆M denote the set of flags of flats, which naturally has the structure of
1437
+ a simplicial complex of dimension rk(M) + 1. Since every maximal flag contains ∅ and E, we
1438
+ often restrict our attention to studying proper flats. We use the notation L∗ = L \ {∅, E}
1439
+ for the set of proper flats and ∆∗
1440
+ M for the set of flags of proper flats, which is a simplicial
1441
+ complex of dimension rk(M) − 1.
1442
+ Given a matroid M, consider the vector space RE with basis {ve | e ∈ E}. For each subset
1443
+ S ⊆ E, define
1444
+ vS =
1445
+
1446
+ e∈S
1447
+ ve ∈ RE.
1448
+ Set NR = RE/RvE and denote the image of vS in the quotient space NR by uS. For each
1449
+ flag F = (F1 ⊊ · · · ⊊ Fk) ∈ ∆∗
1450
+ M, define a polyhedral cone
1451
+ σF = R≥0{uF1, . . . , uFk} ⊆ NR.
1452
+ The Bergman fan of M, denoted ΣM, is the polyhedral fan
1453
+ ΣM = {σF | F ∈ ∆∗
1454
+ M}.
1455
+ Note that ΣM is simplicial, pure of dimension d = rk(M) − 1, and marked by the vectors uF.
1456
+ Consider a cone σF ∈ ΣM(d − 1) corresponding to a flag
1457
+ F = (F1 ⊊ · · · ⊊ Fk−1 ⊊ Fk+1 ⊊ · · · ⊊ Fd)
1458
+ with
1459
+ rk(Fi) = i.
1460
+ The d-cones containing σF are indexed by flats F with Fk−1 ⊊ F ⊊ Fk+1. If there are ℓ such
1461
+ flats, then (F3) implies that
1462
+
1463
+ F ∈L
1464
+ Fk−1⊊F ⊊Fk+1
1465
+ uF = (ℓ − 1)uFk−1 + uFk+1.
1466
+
1467
+ MIXED VOLUMES OF NORMAL COMPLEXES
1468
+ 31
1469
+ Since the right-hand side lies in NσF,R, this observation implies that ΣM is balanced (tropical
1470
+ with weights all equal to 1).
1471
+ In order to check that Bergman fans are AF, we require a working understanding of the
1472
+ star fans of Bergman fans. Consider a cone σF associated to a flat F = (F1 ⊊ · · · ⊊ Fk). Set
1473
+ F0 = ∅ and Fk+1 = E, and for each j = 0, . . . , k consider the matroid minor M[Fj, Fj+1],
1474
+ which is the matroid on ground set Fj+1 \ Fj with flats of the form F \ Fj where F is a flat
1475
+ of M satisfying Fj ⊆ F ⊆ Fj+1. Notice that the star fan ΣσF
1476
+ M lives in the quotient space
1477
+ N σF
1478
+ R
1479
+ =
1480
+ NR
1481
+ R{uF1, . . . , uFk} =
1482
+ RE
1483
+ R{vF1, . . . , vFk+1} =
1484
+ k
1485
+
1486
+ j=0
1487
+ RFk+1\Fk
1488
+ RvFk+1\Fk
1489
+ ,
1490
+ and one checks that this natural isomorphism of vector spaces identifies the star of ΣM at
1491
+ σF as the product of the Bergman fans of the associated matroid minors:
1492
+ (6.1)
1493
+ ΣσF
1494
+ M =
1495
+ k
1496
+
1497
+ j=0
1498
+ ΣM[Fj,Fj+1].
1499
+ 6.2. Bergman fans are AF. We are now ready to use Theorem 5.1 to prove that Bergman
1500
+ fans of matroids are AF.
1501
+ Theorem 6.2. Let M be a matroid of rank d+1 and let ΣM ⊆ NR be the associated Bergman
1502
+ fan. If ∗ ∈ Inn(NR) is any inner product with Cub(ΣM, ∗) ̸= ∅, then (ΣM, ∗) is AF.
1503
+ Remark 6.3. We are assuming the weight function ω is equal to 1 because, as noted in the
1504
+ previous subsection, ΣM is balanced. Thus, we omit ω from the notation in this section.
1505
+ To prove Theorem 6.2, we verify the two conditions of Theorem 5.1. We accomplish this
1506
+ through the following three lemmas. The first lemma verifies that Bergman fans satisfy (a
1507
+ slight strengthening of) Condition (i) of Theorem 6.2.
1508
+ Lemma 6.4. ΣσF
1509
+ M \ {0} is connected for any cone σF ∈ ΣM(k) with k ≤ d − 2.
1510
+ Proof. We begin by arguing that ΣM \{0} is connected for any matroid of rank at least 3. It
1511
+ suffices to prove that, for any two rays ρF, ρF ′ ∈ ΣM(1) associated to flats F, F ′ ∈ L∗, there
1512
+ are sequences ρ1, . . . , ρℓ ∈ ΣM(1) and τ1, . . . , τℓ+1 ∈ ΣM(2) such that
1513
+ ρF ≺ τ1 ≻ ρ1 ≺ · · · ≻ ρℓ ≺ τℓ+1 ≻ ρF ′.
1514
+ If F ∩ F ′ = G ̸= ∅, then G ∈ L∗ by (F2) and the following is such a sequence
1515
+ ρF ≺ τG⊊F ≻ ρG ≺ τG⊊F ′ ≻ ρF ′.
1516
+ If, on the other hand, F ∩ F ′ = ∅, choose rank-one flats G ⊆ F and G′ ⊆ F ′. By (F3), there
1517
+ is exactly one rank-two flat H that contains G and G′, so we can construct a sequence
1518
+ (ρF ≺ τG⊊F ≻)ρG ≺ τG⊊H ≻ ρH ≺ τG′⊊H ≻ ρG′(≺ τG′⊊F ′ ≻ ρF ′),
1519
+
1520
+ 32
1521
+ L. NOWAK, P. O’MELVENY, AND D. ROSS
1522
+ where the parenthetical pieces should be omitted if G = F or G′ = F ′.
1523
+ Now consider any star fan ΣσF
1524
+ M where F = (F1 ⊊ · · · ⊊ Fk) with k ≤ d − 2. Notice that
1525
+ such a star fan has dimension at least two, and we can write it as a product of Bergman fans
1526
+ on matroid minors
1527
+ ΣσF
1528
+ M =
1529
+ k
1530
+
1531
+ j=0
1532
+ ΣM[Fj,Fj+1].
1533
+ Consider two rays ρ, ρ′ ∈ ΣσF
1534
+ M (1). If the two rays happen to come from different factors in
1535
+ the product, then we can connect them through the sequence
1536
+ ρ ≺ ρ × ρ′ ≻ ρ′.
1537
+ If, on the other hand, they lie in the same factor, there are two cases to consider. If the
1538
+ matroid minor of the factor that the rays lie in has rank at least 3, then the rays can be
1539
+ connected via the argument above. If, on the other hand, the matroid minor has rank 2,
1540
+ then one of the other matroid minors must also have rank at least 2. Choosing any ray ρ′′ in
1541
+ the Bergman fan of the second matroid minor, we can connect ρ and ρ′ through the sequence
1542
+ ρ ≺ ρ × ρ′′ ≻ ρ′′ ≺ ρ′ × ρ′′ ≻ ρ′.
1543
+
1544
+ In order to verify Condition (ii) of Theorem 5.1, there are two cases to consider, depending
1545
+ on whether the two-dimensional star fan in question is, itself, a Bergman fan, or whether it
1546
+ is the product of two one-dimensional Bergman fans. In both cases, we use the fact that,
1547
+ in order to prove that the Hessian of a quadratic form f ∈ R[x1, . . . , xn] has exactly one
1548
+ eigenvalue, it suffices (by Sylvester’s Law of Inertia) to find an invertible change of variables
1549
+ y1(x), . . . , yn(x) such that
1550
+ f =
1551
+ n
1552
+
1553
+ i=1
1554
+ aiyi(x)2
1555
+ with exactly one positive ai. We now consider the two cases in the following two lemmas.
1556
+ Lemma 6.5. If M is a rank-three matroid, then the Hessian of degΣM(D(z)2) has exactly
1557
+ one positive eigenvalue.
1558
+ Proof. For a flat F ∈ L∗, we use the shorthand XF = XρF and zF = zρF . In order to compute
1559
+ degΣM(D(z)2), we must compute degΣM(XFXG) for any two flats F, G ∈ L∗. If F ⊊ G, then
1560
+ the degree is one, by definition of the degree function, and if F and G are incomparable,
1561
+ then the degree is zero.
1562
+ Thus, it remains to compute the degree of the squared terms.
1563
+ Using the definition of A•(ΣM) and the flat axioms, the reader is encouraged to verify that
1564
+ degΣM(X2
1565
+ F) = 1 − |{G ∈ L∗ | F ⊊ G}| if rk(F) = 1 and degΣM(X2
1566
+ G) = −1 if rk(G) = 2. It
1567
+
1568
+ MIXED VOLUMES OF NORMAL COMPLEXES
1569
+ 33
1570
+ follows that
1571
+ degΣM(D(z)2) = 2
1572
+
1573
+ F,G∈L∗
1574
+ F ⊊G
1575
+ zFzG +
1576
+
1577
+ F ∈L∗
1578
+ rk(F )=1
1579
+ z2
1580
+ F −
1581
+
1582
+ F,G∈L∗
1583
+ F ⊊G
1584
+ z2
1585
+ F −
1586
+
1587
+ G∈L∗
1588
+ rk(G)=2
1589
+ z2
1590
+ G.
1591
+ By creatively organizing the terms, we can rewrite this as
1592
+ degΣM(D(z)2) =
1593
+ � �
1594
+ F ∈L∗
1595
+ rk(F )=1
1596
+ zF
1597
+ �2
1598
+
1599
+
1600
+ G∈L∗
1601
+ rk(G)=2
1602
+
1603
+ zG −
1604
+
1605
+ F ∈L∗
1606
+ F ⊊G
1607
+ zF
1608
+ �2
1609
+ ,
1610
+ where the only key matroid assertion used in the equivalence of these two formulas is that
1611
+ there exists a unique rank-two flat containing any two distinct rank-one flats. Sylvester’s
1612
+ Law of Inertia implies that the Hessian of this quadratic form has exactly one positive
1613
+ eigenvalue.
1614
+
1615
+ Lemma 6.6. If M and M′ are rank-two matroids, then the Hessian of degΣM×ΣM′(D(z)2) has
1616
+ exactly one positive eigenvalue.
1617
+ Proof. By definition of A•(ΣM × ΣM′), the reader is encouraged to verify that
1618
+ degΣM×ΣM′(XρXη) =
1619
+
1620
+
1621
+
1622
+ 0
1623
+ ρ, η ∈ ΣM(1) or ρ, η ∈ ΣM′(1),
1624
+ 1
1625
+ ρ ∈ ΣM(1) and η ∈ ΣM′(1).
1626
+ Therefore,
1627
+ degΣM×ΣM′(D(z)2) =
1628
+
1629
+ ρ∈ΣM(1), η∈ΣM′(1)
1630
+ 2zρzη,
1631
+ which can be rewritten as
1632
+ degΣM×ΣM′(D(z)2) = 1
1633
+ 2
1634
+
1635
+
1636
+ ρ∈ΣM(1)
1637
+ zρ +
1638
+
1639
+ η∈ΣM′(1)
1640
+
1641
+ �2
1642
+ − 1
1643
+ 2
1644
+
1645
+
1646
+ ρ∈ΣM(1)
1647
+ zρ −
1648
+
1649
+ η∈ΣM′(1)
1650
+
1651
+ �2
1652
+ .
1653
+ Sylvester’s Law of Inertia implies that the Hessian of this quadratic form has exactly one
1654
+ positive eigenvalue.
1655
+
1656
+ We now have all the ingredients we need to prove Theorem 6.2.
1657
+ Proof of Theorem 6.2. We prove that Bergman fans satisfy the two conditions of Theo-
1658
+ rem 5.1. That Bergman fans satisfy Condition (i) is the content of Lemma 6.4. To prove
1659
+ Condition (ii), we first note that, since Bergman fans are balanced, their star fans are also
1660
+ balanced, so Theorem 2.2 implies that the volume polynomials in Condition (ii) are inde-
1661
+ pendent of ∗ and are equal to
1662
+ degΣσF
1663
+ M (D(z)2).
1664
+ By the product decomposition of star fans given in (6.1), ΣσF
1665
+ M is either a two-dimensional
1666
+ Bergman fan or a product of two one-dimensional Bergman fans; in the former case, the
1667
+
1668
+ 34
1669
+ L. NOWAK, P. O’MELVENY, AND D. ROSS
1670
+ Hessian of the volume polynomial has exactly one positive eigenvalue by Lemma 6.5, and in
1671
+ the latter case, by Lemma 6.6.
1672
+
1673
+ 6.3. Revisiting the Heron–Rota–Welsh Conjecture. The characteristic polynomial
1674
+ of a matroid M = (E, L) can be defined by
1675
+ χM(λ) =
1676
+
1677
+ S⊆E
1678
+ (−1)|S|λrk(M)−rk(S).
1679
+ It can be checked that χM(λ) has a root at λ = 1 for any positive-rank matroid, and the
1680
+ reduced characteristic polynomial is defined by
1681
+ χM(λ) = χM(λ)
1682
+ λ − 1 .
1683
+ We use the notation µa(M) and µa(M) for the (unsigned) coefficients of these polynomials:
1684
+ χM(λ) =
1685
+ rk(M)
1686
+
1687
+ a=0
1688
+ (−1)aµa(M)λrk(M)−a
1689
+ and
1690
+ χM(λ) =
1691
+ rk(M)−1
1692
+
1693
+ a=0
1694
+ (−1)aµa(M)λrk(M)−1−a.
1695
+ The Heron–Rota–Welsh Conjecture, developed in [Rot71, Her72, Wel76], asserts that the
1696
+ sequence of nonnegative integers µ0(M), . . . , µrk(M)(M) is unimodal and log-concave:
1697
+ 0 ≤ µ0(M) ≤ · · · ≤ µk(M) ≥ · · · ≥ µrk(M)(M) ≥ 0
1698
+ for some
1699
+ k ∈ {0, . . . , rk(M)}
1700
+ and
1701
+ µk(M)2 ≥ µk−1(M)µk+1(M)
1702
+ for every
1703
+ k ∈ {1, . . . , rk(M) − 1}.
1704
+ The Heron–Rota–Welsh Conjecture was first proved by Adiprasito, Huh, and Katz [AHK18].
1705
+ Our aim here is to show how this result also follows from the developments in this paper.
1706
+ It is elementary to check that the unimodality and log-concavity of the coefficients of the
1707
+ characteristic polynomial is implied by the analogous properties for the coefficients of the
1708
+ reduced characteristic polynomial. The bridge from characteristic polynomials to the content
1709
+ of this paper, then, is a result of Huh and Katz [HK12, Proposition 5.2] (see also [AHK18,
1710
+ Proposition 9.5] and [DR22, Proposition 3.11]), which asserts that
1711
+ µa(M) = degΣM(αd−aβa)
1712
+ where rk(M) = d + 1 and α, β ∈ A1(ΣM) are defined by
1713
+ α =
1714
+
1715
+ e0∈F
1716
+ XF
1717
+ and
1718
+ β =
1719
+
1720
+ e0 /∈F
1721
+ XF
1722
+ for some e0 ∈ E (these Chow classes are independent of the choice of e0).
1723
+
1724
+ MIXED VOLUMES OF NORMAL COMPLEXES
1725
+ 35
1726
+ Choose any e0 ∈ E, and let ∗ ∈ Inn(NR) be the inner product with orthonormal basis
1727
+ {ue | e ̸= e0} ⊆ NR = RE/RuE. For two flats F1, F2 ∈ L∗, we compute
1728
+ uF1 ∗ uF2 =
1729
+
1730
+
1731
+
1732
+
1733
+
1734
+
1735
+
1736
+
1737
+
1738
+ |F1 ∩ F2|
1739
+ e0 /∈ F1 and e0 /∈ F2,
1740
+ −|F1 ∩ F c
1741
+ 2|
1742
+ e0 /∈ F1 and e0 ∈ F2,
1743
+ |F c
1744
+ 1 ∩ F c
1745
+ 2|
1746
+ e0 ∈ F1 and e0 ∈ F2.
1747
+ Define zα, zβ ∈ RΣM(1) = RL∗ by
1748
+
1749
+ F =
1750
+
1751
+
1752
+
1753
+ 1
1754
+ e0 ∈ F,
1755
+ 0
1756
+ e0 /∈ F,
1757
+ and
1758
+
1759
+ F =
1760
+
1761
+
1762
+
1763
+ 1
1764
+ e0 /∈ F,
1765
+ 0
1766
+ e0 ∈ F,
1767
+ so that D(zα) = α and D(zβ) = β in A1(ΣM). The following lemma allows us to connect
1768
+ characteristic polynomials to mixed volumes of normal complexes.
1769
+ Lemma 6.7. zα, zβ ∈ Cub(ΣM, ∗).
1770
+ Proof. We must argue that wσ,∗(zα), wσ,∗(zβ) ∈ σ for every cone σ ∈ ΣM. Consider a flag
1771
+ F = (F1 ⊊ · · · ⊊ Fk) corresponding to a cone σF ∈ ΣM. It suffices to prove that
1772
+ (6.8)
1773
+ wσF,∗(zα) =
1774
+
1775
+
1776
+
1777
+ 1
1778
+ |F c
1779
+ k|uFk
1780
+ e0 ∈ Fk
1781
+ 0
1782
+ e0 /∈ Fk,
1783
+ and
1784
+ (6.9)
1785
+ wσF,∗(zβ) =
1786
+
1787
+
1788
+
1789
+ 1
1790
+ |F1|uF1
1791
+ e0 /∈ F1
1792
+ 0
1793
+ e0 ∈ F1,
1794
+ We verify (6.8); the verification (6.9) is similar.
1795
+ To verify (6.8), first suppose that e0 ∈ Fk. Then for any j = 1, . . . , k, it follows from the
1796
+ definition of ∗ that
1797
+ uFk ∗ uFj =
1798
+
1799
+
1800
+
1801
+ |F c
1802
+ k|
1803
+ e0 ∈ Fj,
1804
+ 0
1805
+ e0 /∈ Fj.
1806
+ Using this, we verify that
1807
+ 1
1808
+ |F c
1809
+ k|uFk satisfies the defining equations of wσF,∗(zα):
1810
+ 1
1811
+ |F c
1812
+ k|uFk ∗ uFj = zα
1813
+ Fj
1814
+ for all
1815
+ j = 1, . . . , k.
1816
+ Now suppose that e0 /∈ Fk. Then e0 /∈ Fj for any j = 1, . . . , k, so zα
1817
+ Fj = 0. Thus, the defining
1818
+ equation for wσF,∗(zα) become
1819
+ wσF,∗(zα) ∗ uFj = 0
1820
+ for all
1821
+ j = 1, . . . , k,
1822
+ showing that wσF,∗(zα) = 0.
1823
+
1824
+
1825
+ 36
1826
+ L. NOWAK, P. O’MELVENY, AND D. ROSS
1827
+ It follows from Theorem 3.6 that the coefficients of the reduced characteristic polynomial
1828
+ have a volume-theoretic interpretation:
1829
+ µa(M) = MVolΣM,∗(zα, . . . , zα
1830
+
1831
+ ��
1832
+
1833
+ d−a
1834
+ , zβ, . . . , zβ
1835
+
1836
+ ��
1837
+
1838
+ a
1839
+ ).
1840
+ By [NR21, Proposition 7.4], we know that Cub(ΣM, ∗) ̸= ∅, and since the cubical cone
1841
+ is the interior of the pseudocubical cone, we may approximate zα, zβ ∈ Cub(ΣM, ∗) with
1842
+
1843
+ t , zβ
1844
+ t ∈ Cub(ΣM, ∗) such that
1845
+ lim
1846
+ t→0 zα
1847
+ t = zα
1848
+ and
1849
+ lim
1850
+ t→0 zβ
1851
+ t = zβ.
1852
+ Define
1853
+ µa
1854
+ t (M) = MVolΣM,∗(zα
1855
+ t , . . . , zα
1856
+ t
1857
+
1858
+ ��
1859
+
1860
+ d−a
1861
+ , zβ
1862
+ t , . . . , zβ
1863
+ t
1864
+
1865
+ ��
1866
+
1867
+ a
1868
+ ).
1869
+ By Theorem 6.2, we know that (ΣM, ∗) is AF, and the AF inequalities applied to the mixed
1870
+ volumes µa
1871
+ t (M) imply that the sequence µ0
1872
+ t(M), . . . , µd
1873
+ t (M) is log-concave. Since mixed vol-
1874
+ umes of cubical values are positive (Proposition 3.5), and since all log-concave sequences of
1875
+ positive values are unimodal, we see that the sequence µ0
1876
+ t(M), . . . , µd
1877
+ t (M) is also unimodal.
1878
+ Since both unimodality and log-concavity are preserved under limits, we conclude that
1879
+ µ0(M), . . . , µd(M)
1880
+ is unimodal and log-concave, verifying the Heron–Rota–Welsh Conjecture.
1881
+ References
1882
+ [ADH20]
1883
+ F.
1884
+ Ardila,
1885
+ G.
1886
+ Denham,
1887
+ and
1888
+ J.
1889
+ Huh.
1890
+ Lagrangian
1891
+ geometry
1892
+ of
1893
+ matroids.
1894
+ Preprint:
1895
+ arXiv:2004.13116, 2020.
1896
+ [AGV21]
1897
+ N. Anari, S. O. Gharan, and C. Vinzant. Log-concave polynomials, I: entropy and a determinis-
1898
+ tic approximation algorithm for counting bases of matroids. Duke Math. J., 170(16):3459–3504,
1899
+ 2021.
1900
+ [AHK18]
1901
+ K. Adiprasito, J. Huh, and E. Katz. Hodge theory for combinatorial geometries. Ann. of Math.
1902
+ (2), 188(2):381–452, 2018.
1903
+ [ALGV18]
1904
+ N. Anari, K. Liu, S. O. Gharan, and C. Vinzant. Log-concave polynomials iii: Mason’s ultra-
1905
+ log-concavity conjecture for independent sets of matroids. Preprint: arXiv:1811.01600, 2018.
1906
+ [ALGV19]
1907
+ N. Anari, K. Liu, S. O. Gharan, and C. Vinzant. Log-concave polynomials II: High-dimensional
1908
+ walks and an FPRAS for counting bases of a matroid. In STOC’19—Proceedings of the 51st
1909
+ Annual ACM SIGACT Symposium on Theory of Computing, pages 1–12. ACM, New York,
1910
+ 2019.
1911
+ [AP20]
1912
+ O. Amini and M. Piquerez. Hodge theory for tropical varieties. Preprint: arXiv:2007.07826,
1913
+ 2020.
1914
+ [AP21]
1915
+ O. Amini and M. Piquerez. Homology of tropical fans. Preprint: arXiv:2105.01504, 2021.
1916
+ [BES20]
1917
+ S. Backman, C. Eur, and C. Simpson. Simplicial generation of Chow rings of matroids. S´em.
1918
+ Lothar. Combin., 84B:Art. 52, 11, 2020.
1919
+
1920
+ MIXED VOLUMES OF NORMAL COMPLEXES
1921
+ 37
1922
+ [BH17]
1923
+ F. Babaee and J. Huh. A tropical approach to a generalized Hodge conjecture for positive
1924
+ currents. Duke Math. J., 166(14):2749–2813, 2017.
1925
+ [BH20]
1926
+ P. Br¨and´en and J. Huh. Lorentzian polynomials. Ann. of Math. (2), 192(3):821–891, 2020.
1927
+ [BHM+20]
1928
+ T. Braden, J. Huh, J. P. Matherne, N. Proudfoot, and B. Wang. Singular hodge theory for
1929
+ combinatorial geometries. Preprint: arXiv:2010.06088, 2020.
1930
+ [BHM+22]
1931
+ T. Braden, J. Huh, J. P. Matherne, N. Proudfoot, and B. Wang. A semi-small decomposition
1932
+ of the Chow ring of a matroid. Adv. Math., 409(part A):Paper No. 108646, 49, 2022.
1933
+ [BL21]
1934
+ P. Br¨and´en and J. Leake. Lorentzian polynomials on cones and the Heron-Rota-Welsh conjec-
1935
+ ture. Preprint: arXiv:2110.08647, 2021.
1936
+ [CEKMS19] D. Cordero-Erausquin, B. Klartag, Q. Merigot, and F. Santambrogio. One more proof of the
1937
+ Alexandrov-Fenchel inequality. C. R. Math. Acad. Sci. Paris, 357(8):676–680, 2019.
1938
+ [CP21]
1939
+ S. H. Chan and I. Pak. Log-concave poset inequalities. Preprint: arXiv:2110.10740, 2021.
1940
+ [DR22]
1941
+ J. Dastidar and D. Ross. Matroid psi classes. Selecta Math. (N.S.), 28(3):Paper No. 55, 38,
1942
+ 2022.
1943
+ [Her72]
1944
+ A. P. Heron. Matroid polynomials. In Combinatorics (Proc. Conf. Combinatorial Math., Math.
1945
+ Inst., Oxford, 1972), pages 164–202, 1972.
1946
+ [HK12]
1947
+ J. Huh and E. Katz. Log-concavity of characteristic polynomials and the Bergman fan of ma-
1948
+ troids. Math. Ann., 354(3):1103–1116, 2012.
1949
+ [Min03]
1950
+ H. Minkowski. Volumen und Oberfl¨ache. Math. Ann., 57(4):447–495, 1903.
1951
+ [NR21]
1952
+ R. Nathanson and D. Ross. Tropical fans and normal complexes. Preprint: arXiv:2110.08647,
1953
+ 2021.
1954
+ [Oxl11]
1955
+ J. Oxley. Matroid theory, volume 21 of Oxford Graduate Texts in Mathematics. Oxford Univer-
1956
+ sity Press, Oxford, second edition, 2011.
1957
+ [Rot71]
1958
+ G.-C. Rota. Combinatorial theory, old and new. In Actes du Congr`es International des
1959
+ Math´ematiciens (Nice, 1970), Tome 3, pages 229–233. 1971.
1960
+ [Sch14]
1961
+ R. Schneider. Convex bodies: the Brunn-Minkowski theory, volume 151 of Encyclopedia of Math-
1962
+ ematics and its Applications. Cambridge University Press, Cambridge, expanded edition, 2014.
1963
+ [Wel76]
1964
+ D. J. A. Welsh. Matroid theory. Academic Press [Harcourt Brace Jovanovich, Publishers],
1965
+ London-New York, 1976. L. M. S. Monographs, No. 8.
1966
+ Department of Mathematics, University of Washington
1967
+ Email address: [email protected]
1968
+ Department of Mathematics, San Francisco State University
1969
+ Email address: [email protected]
1970
+ Department of Mathematics, San Francisco State University
1971
+ Email address: [email protected]
1972
+
2tE4T4oBgHgl3EQf0A0W/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
39FQT4oBgHgl3EQfHTWW/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f3ddb8d94d1a3ed38935976f66005a08153115ef5ea5b9e398bf4af2f806811
3
+ size 3014701
4dFQT4oBgHgl3EQf4Ta7/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49ebe7f41c146bf922a4067a9bba65b9ecdd0577d9c9fcf99969e4c9d2084834
3
+ size 8126509
6dE0T4oBgHgl3EQffAAW/content/tmp_files/2301.02397v1.pdf.txt ADDED
@@ -0,0 +1,3265 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.02397v1 [math.AP] 6 Jan 2023
2
+ Fine boundary regularity for fully nonlinear mixed local-nonlocal
3
+ problems
4
+ MITESH MODASIYA AND ABHROJYOTI SEN
5
+ Abstract. We consider Dirichlet problems for fully nonlinear mixed local-nonlocal non-translation
6
+ invariant operators. For a bounded C2 domain Ω ⊂ Rd, let u ∈ C(Rd) be a viscosity solution of
7
+ such Dirichlet problem. We obtain global Lipschitz regularity and fine boundary regularity for u by
8
+ constructing appropriate sub and supersolutions coupled with a weak version of Harnack inequality.
9
+ We apply these results to obtain Hölder regularity of Du up to the boundary.
10
+ 1. Introduction and main results
11
+ In this article, for a bounded C2 domain Ω ⊂ Rd we establish the boundary regularity of the
12
+ solution u to the in-equations
13
+ Lu + C0|Du| ≥ −K
14
+ in Ω,
15
+ Lu − C0|Du| ≤ K
16
+ in Ω,
17
+ u = 0
18
+ in Ωc,
19
+ (1.1)
20
+ where C0, K ≥ 0 and L is a fully nonlinear integro-differential operator of the form
21
+ Lu(x) := L[x, u] = sup
22
+ θ∈Θ
23
+ inf
24
+ ν∈Γ
25
+
26
+ Tr aθν(x)D2u(x) + Iθν[x, u]
27
+
28
+ ,
29
+ (1.2)
30
+ for some index sets Θ, Γ. The coefficient aθν : Ω → Rd×d is a matrix valued function and Iθν is a
31
+ nonlocal operator defined as
32
+ Iθνu(x) := Iθν[x, u] =
33
+ ˆ
34
+ Rd(u(x + y) − u(x) − 1B1(y)Du(x) · y)Nθν(x, y) dy.
35
+ (1.3)
36
+ The above in-equations (1.1) are motivated by Hamilton-Jacobi equations of the form
37
+ Iu(x) := sup
38
+ θ∈Θ
39
+ inf
40
+ ν∈Γ {Lθνu(x) + fθν(x)} = 0,
41
+ where
42
+ Lθνu(x) = Tr aθν(x)D2u(x) + Iθν[x, u] + bθν(x) · Du(x),
43
+ (1.4)
44
+ bθν(·) and fθν(·) are bounded functions on Ω. These linear operators (1.4) are extended generator
45
+ for a wide class of d-dimensional Feller processes (more precisely, jump diffusions) and the nonlinear
46
+ operator Iu(·) has its connection to the stochastic control problems and differential games (see [12,13]
47
+ and the references therein). The first term in (1.4) represents the diffusion, the second term represents
48
+ the jump part of a Feller process, and the third represents the drift. We refer to [1, 14, 15] and the
49
+ references therein for more on the connections between the operators of the form (1.4) and stochastic
50
+ differential equations. For a precise application of these type of operators in finance and biological
51
+ models, we refer to [20,23,24] and the references therein.
52
+ Department
53
+ of
54
+ Mathematics,
55
+ Indian
56
+ Institute
57
+ of
58
+ Science
59
+ Education
60
+ and
61
+ Research,
62
+ Dr.
63
+ Homi
64
+ Bhabha
65
+ Road,
66
+ Pune
67
+ 411008,
68
+ India.
69
+ Email:
70
71
72
+ 2020 Mathematics Subject Classification. Primary: 35D40, 47G20, 35J60, 35B65 .
73
+ Key words and phrases. Operators of mixed order, viscosity solution, fine boundary regularity, fully nonlinear
74
+ integro-PDEs, Harnack inequality, gradient estimate.
75
+ 1
76
+
77
+ 2
78
+ BOUNDARY REGULARITY
79
+ We set the following assumptions on the coefficient aθν(·) and the kernel Nθν(x, y), throughout
80
+ this article.
81
+ Assumption 1.1.
82
+ (a) aθν(·) are uniformly continuous and bounded in ¯Ω, uniformly in θ, ν for θ ∈ Θ, ν ∈ Γ. Further-
83
+ more, aθν(·) satisfies the uniform ellipticity condition λI ≤ aθν(·) ≤ ΛI for some 0 < λ ≤ Λ
84
+ where I denotes the d × d identity matrix.
85
+ (b) For each θ ∈ Θ, ν ∈ Γ, Nθν : Ω × Rd is a measurable function and for some α ∈ (0, 2) there
86
+ exists a kernel k that is measurable in Rd \{0} such that for any θ ∈ Θ, ν ∈ Γ, x ∈ Ω, we have
87
+ 0 ≤ Nθν(x, y) ≤ k(y)
88
+ and
89
+ ˆ
90
+ Rd(1 ∧ |y|α)k(y)dy < +∞,
91
+ where we denote p ∧ q := min{p, q} for p, q ∈ R.
92
+ Let us comment briefly on Assumption 1.1. The uniform continuity of aθν(·) is required for the
93
+ stability of viscosity sub or supersolutions under appropriate limits and useful in Lemma A.1 which
94
+ is a key step for proving interior C1,γ regularity (cf. Lemma 2.1). The Assumption 1.1(b) includes a
95
+ large class of kernels. We mention some of them below.
96
+ Example 1.1. Consider the following kernels Nθν(x, y) :
97
+ (i) Nθν(x, y) =
98
+ 1
99
+ |y|d+σ for σ ∈ (0, 2). Clearly we can take k(y) =
100
+ 1
101
+ |y|d+σ and
102
+ ´
103
+ Rd(1 ∧ |y|α)k(y)dy is
104
+ finite for α ∈ [1 + σ/2, 2).
105
+ (ii) Nθν(x, y) = �∞
106
+ i=1
107
+ ai
108
+ |y|d+σi for σi ∈ (0, 2), σ0 = supi σi < 2 and �∞
109
+ i=1 ai = 1. Similarly taking
110
+ Nθν(x, y) = k(y) we can see
111
+ ´
112
+ Rd(1 ∧ |y|α)k(y) < +∞ for α ∈ [1 + σ0/2, 2).
113
+ (iii) Nθν(x, y) =
114
+
115
+
116
+
117
+ (1−log |y|)β
118
+ |y|d+σ
119
+ for 0 < |y| ≤ 1
120
+ (1+log |y|)−β
121
+ |y|d+σ
122
+ for |y| ≥ 1,
123
+ where σ ∈ (0, 2).
124
+ (a) For 2(2 − σ) > β ≥ 0, taking Nθν(x, y) = k(y) we have
125
+ ´
126
+ Rd(1 ∧ |y|α)k(y)dy < +∞ for
127
+ α ∈ [1 + σ
128
+ 2 + β
129
+ 4 , 2).
130
+ (b) For −σ < β < 0, taking Nθν(x, y) = k(y) we have
131
+ ´
132
+ Rd(1 ∧ |y|α)k(y)dy < +∞ for
133
+ α ∈ [1 + σ
134
+ 2 , 2).
135
+ Proof of (a):
136
+ ˆ
137
+ Rd(1 ∧ |y|α)k(y)dy =
138
+ ˆ
139
+ |y|≤1
140
+ |y|α(1 − log |y|)β
141
+ |y|d+σ
142
+ dy +
143
+ ˆ
144
+ |y|>1
145
+ (1 + log |y|)−β
146
+ |y|d+σ
147
+ dy := I1 + I2.
148
+ Using (1 − log |y|) ≤
149
+ 1
150
+
151
+ |y| + 1 and the convexity of ξ(t) = tp for p ≥ 1 we get
152
+ (1 − log |y|)β ≤ C
153
+
154
+ 1
155
+ |y|β/2 + 1
156
+
157
+ .
158
+ Therefore
159
+ I1 ≤
160
+ ˆ
161
+ |y|≤1
162
+ Cdy
163
+ |y|β/2+d+σ−α +
164
+ ˆ
165
+ |y|≤1
166
+ Cdy
167
+ |y|d+σ−α < +∞ for α ∈ [1 + σ/2 + β/4, 2),
168
+ and
169
+ I2 ≤
170
+ ˆ
171
+ |y|>1
172
+ dy
173
+ |y|d+σ < +∞.
174
+
175
+ BOUNDARY REGULARITY
176
+ 3
177
+ Proof of (b): Since β < 0 in this case, we have (1−log |y|)β ≤ 1 and I1 < +∞ for α ∈ [1+ σ
178
+ 2 , 2).
179
+ To estimate I2, observe (1 + log |y|)−β ≤ (1 + |y|)−β and
180
+ I2 ≤ C
181
+ ˆ
182
+ |y|>1
183
+ (1 + |y|−β)
184
+ |y|d+σ
185
+ dy < +∞ since σ > −β.
186
+ (iv) Nθν(x, y) =
187
+ Ψ(1/|y|2)
188
+ |y|d+σ(x,y), where σ : Rd × Rd → R satisfying
189
+ 0 < σ− :=
190
+ inf
191
+ (x,y)∈Rd×Rd σ(x, y) ≤
192
+ sup
193
+ (x,y)∈Rd×Rd σ(x, y) := σ+ < 2.
194
+ and Ψ is a Bernstein function (for several examples of such functions, see [50]) vanishing at
195
+ zero. Furthermore, Ψ is non-decreasing, concave and satisfies a weak upper scaling property
196
+ i.e, there exists µ ≥ 0 and c ∈ (0, 1] such that
197
+ Ψ(λx) ≤ cλµΨ(x) for x ≥ s0 > 0, λ ≥ 1.
198
+ For µ < 2(2 − σ+), we can take
199
+ k(y) =
200
+
201
+
202
+
203
+ Ψ(1)
204
+ |y|d+2µ+σ+ ,
205
+ if 0 < |y| ≤ 1,
206
+ Ψ(1)
207
+ |y|d+σ− ,
208
+ if |y| > 1
209
+ and
210
+ ´
211
+ Rd(1 ∧ |y|α)k(y)dy < +∞ for α ∈ [1 + µ + σ+/2, 2).
212
+ The main purpose of this article is to establish a global Lipschitz regularity and boundary regularity
213
+ of the solutions satisfying (1.1) under the Assumption 1.1. On the topic of regularity theory for
214
+ linear elliptic equations, Hölder estimate plays a key role and it can be obtained by using Harnack
215
+ inequality.
216
+ The pioneering contributions are by DeGiorgi-Nash-Moser [29, 42, 45] who proved Cα
217
+ regularity for solutions to the second order elliptic equations in divergence form with measurable
218
+ coefficients under the assumption of uniform ellipticity. For equations of non-divergence form, the
219
+ corresponding regularity theory was established by Krylov and Safonov [41].
220
+ We refer [16] for a
221
+ comprehensive overview on the regularity theory for fully nonlinear elliptic equations. In [40], Krylov
222
+ studied the boundary regularity for local second order elliptic equations in non-divergence form with
223
+ bounded measurable coefficients. He obtained the Hölder regularity of u
224
+ δ up to the boundary where
225
+ δ denotes the distance function, i.e, δ(x) = dist(x, Ωc).
226
+ Turning our attention towards the nonlocal equations, first Hölder estimates and Harnack inequal-
227
+ ities for s-harmonic functions are proved by Bass and Kassmann [3–5], however their approach was
228
+ purely probabilistic. In the realm of analytic setup, Silvestre [52] proved Hölder continuity of u satis-
229
+ fying (1.3) with some structural assumptions on the operator and kernel related to the assumptions of
230
+ Bass and Kassmann. Analogous to the local case [40], in the nonlocal setting, for a bounded domain
231
+ Ω ⊂ Rd with C1,1 boundary the first result concerning boundary regularity of u solving the Dirichlet
232
+ problem for (−∆)s with bounded right hand side is obtained by Ros-Oton and Serra [46] where they
233
+ established a Hölder regularity of u/δs up to the boundary. This result is proved by using a method
234
+ of Krylov (see [33]). The idea is to obtain a bound for u with respect to a constant multiple of δs
235
+ and this controls the oscillation of u/δs near the boundary ∂Ω. The Hölder regularity of u/δs, (i) for
236
+ more general nonlocal linear operators with C1,α domain is established in [48], (ii) for smooth domain
237
+ with smooth right hand side is established in [30,31], (iii) for kernel with variable order see [34] and
238
+ (iv) for Dirichlet problem for fractional p-Laplacian, see [32].
239
+ In a seminal paper, Caffarelli and Silvestre [17] studied the regularity theory for fully nonlinear
240
+ integro-differential equations of the form : supθ∈Θ infν∈Γ I[x, u] where I[x, u] is given by (1.3). By
241
+ obtaining a nonlocal ABP estimate, they established the Hölder regularity and Harnack inequality
242
+ when Nθν(y) (Nθν(y) denotes the x-independent form of Nθν(x, y)) is positive, symmetric and com-
243
+ parable with the kernel of the fractional Laplacian. From a large amount of literature that extend
244
+ the work of Caffarelli and Silvestre [17], we mention [36] where the authors considered integro-PDEs
245
+
246
+ 4
247
+ BOUNDARY REGULARITY
248
+ with regularly varying kernel, [9,19,37] where regularity results are obtained for symmetric and non-
249
+ symmetric stable-like operators and [35] for kernels with variable order. Also a recent paper [38]
250
+ studies Hölder regularity and a scale invariant Harnack inequality under some weak scaling condition
251
+ on the kernel. Boundary regularity results for fully nonlinear integro-differential equations are ob-
252
+ tained by Ros-Oton and Serra in [47]. They considered a restricted class of kernels L∗ where Nθν(x, y)
253
+ is x-independent and of the following form
254
+ Nθν(y) := µ(y/|y|)
255
+ |y|d+2s
256
+ with µ ∈ L∞(Sd−1),
257
+ satisfying µ(θ) = µ(−θ) and λ ≤ µ ≤ Λ where 0 < λ ≤ Λ are the ellipticity constants. An interesting
258
+ feature of L∗ is
259
+ L(xd)s
260
+ + = 0 in {xd > 0} for all L ∈ L∗
261
+ which is useful to construct barriers in their case. Note that our operators do not enjoy such property
262
+ for having different orders. Furthermore, with Assumption 1.1 the nonlocal part (1.3) is not scale
263
+ invariant in our case, that is one may not find any 0 ≤ β ≤ 2 such that Iθν[x, u(r·)] = rβIθν[rx, u(·)]
264
+ for any 0 < r < 1.
265
+ Recently, the mathematical study of mixed local-nonlocal integro-differential equations have been
266
+ received a considerable attention, for instance see [2,6–8,10,26]. The regularity results and Harnack
267
+ inequality for mixed fractional p-Laplace equations are recently obtained in [27,28]. The interior Cα
268
+ regularity theory for HJBI-type integro-PDEs has been studied by Mou [43]. He obtained Hölder
269
+ regularity for viscosity solutions under uniform ellipticity condition and a slightly weaker condition
270
+ on kernels in compared to the Assumption 1.1 (b), that is
271
+ ´
272
+ Rd(1 ∧ |y|2)k(y)dy < +∞. More recently
273
+ global Lipschitz regularity (compare it with Biagi et. al. [7]) and fine boundary regularity have been
274
+ obtained for linear mixed local-nonlocal operators in [11]. Since the nonlocal operator applied on the
275
+ distance function becomes singular near the boundary for certain range of order of the kernel, one
276
+ of the main challenges was to construct appropriate sub and supersolutions and prove an oscillation
277
+ lemma following [46]. To do such analysis, along with several careful estimates, the authors borrowed
278
+ a Harnack inequality from [25]. Note that for fully nonlinear mixed operators of the form (1.2) no
279
+ such Harnack inequality is available in the literature.
280
+ In this current contribution, we continue the study started in [11] to obtain the boundary regularity
281
+ for fully nonlinear integro-differential problems of the form (1.1). Below, we present our first result
282
+ that is the Lipschitz regularity of u satisfying (1.1) up to the boundary. Note that (1.5) can be
283
+ achieved under some weaker assumptions on the domain and kernel. For this result, we only assume
284
+ ∂Ω to be C1,1 and
285
+ ´
286
+ Rd(1 ∧ |y|2)k(y)dy < +∞.
287
+ Theorem 1.1. Let Ω be a bounded C1,1 domain in Rd and u be a continuous function which solves the
288
+ in-equations (1.1) in viscosity sense. Then u is in C0,1(Rd) and there exists a constant C, depending
289
+ only on d, Ω, λ, Λ, C0,
290
+ ´
291
+ Rd(1 ∧ |y|2)k(y)dy, such that
292
+ ∥u∥C0,1(Rd) ≤ CK.
293
+ (1.5)
294
+ To prove Theorem 1.1, the first step is to show that the distance function δ(x) = dist(x, Ωc) can be
295
+ used as a barrier to u in Ω. Once this is done, we can complete the proof by considering different cases
296
+ depending on the distance between any two points in Ω or their distance from ∂Ω and combining
297
+ |u| ≤ Cδ with an interior C1,γ-estimate for scaled operators (cf. Lemma 2.1).
298
+ Next we show the fine boundary regularity, that is the Hölder regularity of u/δ up to the boundary.
299
+ Theorem 1.2. Suppose that Assumption 1.1 holds.
300
+ Let Ω be a bounded C2 domain and u be a
301
+ viscosity solution to the in-equations (1.1). Then there exists κ ∈ (0, ˆα) such that
302
+ ∥u/δ∥Cκ(Ω) ≤ C1K,
303
+ (1.6)
304
+
305
+ BOUNDARY REGULARITY
306
+ 5
307
+ for some constant C1, where κ, C1 depend on d, Ω, C0, Λ, λ, α and
308
+ ´
309
+ Rd(1 ∧ |y|α)k(y)dy. Here ˆα is
310
+ given by
311
+ ˆα =
312
+
313
+ 1
314
+ if α ∈ (0, 1]
315
+ 2−α
316
+ 2
317
+ if α ∈ (1, 2).
318
+ To prove Theorem 1.2, following [46] we prove an oscillation lemma (cf. Proposition 4.1). For
319
+ this, first we need to construct sub and supersolutions carefully since Iθνδ becomes singular near
320
+ the boundary ∂Ω for α ∈ (1, 2). Then we shall use a “weak version” of Harnack inequality (cf.
321
+ Theorem 4.1). This weak version of Harnack inequality is new and needed to be developed due to
322
+ the unavailability of classical Harnack inequality. Also, we must point out that one needs to bypass
323
+ the use of comparison principle [10, Theorem 5.1] in such analysis, since the mentioned theorem is
324
+ for translation invariant linear operators. For non-translation invariant operators, such comparison
325
+ principle is unavailable, see Remark 2.1 for details.
326
+ Now applying (1.6), we prove the Hölder regularity of Du up to the boundary.
327
+ Theorem 1.3. Suppose that Assumption 1.1 holds and Ω be a bounded C2 domain. Then for any
328
+ viscosity solution u to the in-equations (1.1) we have
329
+ ||Du||Cη(Ω) ≤ CK,
330
+ for some η ∈ (0, 1) and C, depending only on d, Ω, C0, Λ, λ, α and
331
+ ´
332
+ Rd(1 ∧ |y|α)k(y)dy.
333
+ The interior C1,η-regularity for fully nonlinear integro-differential equations is studied in [17] by
334
+ introducing a new ellipticity class where the kernels are C1 away from the origin. Kriventsov [39]
335
+ extended this result without the additional assumption on kernels (sometimes referred as rough ker-
336
+ nels). Also see [49] for its parabolic version. For HJBI-type integro-PDEs, interior C1,η-regularity is
337
+ established by Mou and Zhang [44] and for mixed local nonlocal fractional p-Laplacian, see [22]. The
338
+ C1,η-regularity up to the boundary for linear mixed local-nonlocal operators is recently obtained in
339
+ [11].
340
+ The rest of the article is organized as follows. In Section 2, we introduce the necessary preliminaries
341
+ and collect all the auxiliary results which will be used throughout the article. In Section 3 we prove
342
+ Theorem 1.1. Theorem 1.2 is proved in Section 4. In Section 5 we prove Theorem 1.3. Lastly, in
343
+ Appendix A, following an approximation and scaling argument, we give a proof of C1,γ regularity for
344
+ a scaled operator i.e, Lemma 2.1.
345
+ 2. Notation and preliminary results
346
+ This section sets the notation which we use throughout the paper and collects the necessary results.
347
+ 2.1. Notations and Definitions. We start by setting the notations. We use Br(x) to denote an
348
+ open ball of radius r > 0 centred at a point x ∈ Rd and for x = 0, we denote Br := Br(0). For any
349
+ subset U ⊆ Rd and for α ∈ (0, 1), we denote Cα(U) as the space of all bounded, α-Hölder continuous
350
+ functions equipped with the norm
351
+ ||f||Cα(U) := sup
352
+ x∈U
353
+ |f(x)| + sup
354
+ x,y∈U
355
+ |f(x) − f(y)|
356
+ |x − y|α
357
+ .
358
+ Note that for α = 1, C0,1(U) denotes the space of all Lipschitz continuous functions on U. The space
359
+ of all bounded functions with bounded α-Hölder continuous derivatives is denoted by C1,α(U) with
360
+ the norm
361
+ ||f||C1,α(U) := sup
362
+ x∈U
363
+ |f(x)| + ||Df||Cα(U).
364
+ We use USC(Rd), LSC(Rd), C(Rd), Cb(Rd), Md to denote the space of upper semicontinuous,
365
+ lower semicontinuous, continuous functions, bounded continuous functions on Rd and d×d symmetric
366
+ matrices respectively.
367
+
368
+ 6
369
+ BOUNDARY REGULARITY
370
+ Now let us introduce the scaled operators. For 0 < s ≤ 1, we define scaled version of (1.2) as
371
+ following.
372
+ Ls[x, u] = sup
373
+ θ∈Θ
374
+ inf
375
+ ν∈Γ
376
+
377
+ Tr aθν(sx)D2u(x) + Is
378
+ θν[x, u]
379
+
380
+ ,
381
+ where
382
+ Is
383
+ θν[x, u] =
384
+ ˆ
385
+ Rd(u(x + y) − u(x) − 1B 1
386
+ s (y)∇u(x) · y)sd+2Nθν(sx, sy)dy.
387
+ Next we define extremal Pucci operators for second order term and the nonlocal term.
388
+ P+u(x) := sup
389
+
390
+ Tr(AD2u(x)), A ∈ Md, λI ≤ A ≤ ΛI
391
+
392
+ ,
393
+ P−u(x) := inf
394
+
395
+ Tr(AD2u(x)), A ∈ Md, λI ≤ A ≤ ΛI
396
+
397
+ ,
398
+ and
399
+ P+
400
+ k,su(x) :=
401
+ ˆ
402
+ Rd(u(x + y) − u(x) − 1B 1
403
+ s (y)∇u(x) · y)+sd+2k(sy)dy,
404
+ P−
405
+ k,su(x) := −
406
+ ˆ
407
+ Rd(u(x + y) − u(x) − 1B 1
408
+ s (y)∇u(x) · y)−sd+2k(sy)dy.
409
+ Denote P+
410
+ k,1 = P+
411
+ k and P−
412
+ k,1 = P−
413
+ k .
414
+ We recall the definition of viscosity sub and supersolution. First of all, we say a function ϕ touches
415
+ from above (below) at x if, for a small r > 0,
416
+ ϕ(x) = u(x) and u(y) ≤ (≥)ϕ(y) for all y ∈ Br(x).
417
+ Definition 2.1. A function u ∈ USC(Rd) ∩ L∞(Rd) (resp. u ∈ LSC(Rd) ∩ L∞(Rd)) is said to be a
418
+ viscosity subsolution (resp. supersolution) to (1.1) if whenever ϕ touches u from above (resp. below)
419
+ for some bounded test function ϕ ∈ C2(Br(x)) ∩ C(Rd), then
420
+ v =
421
+
422
+ ϕ
423
+ in Br(x)
424
+ u
425
+ in Bc
426
+ r(x)
427
+ satisfies Lv(x) + C0|Dv(x)| ≥ −K (resp. Lv(x) − C0|Dv(x)| ≤ K).
428
+ 2.2. Auxiliary lemmas. We collect some preliminary results here. The first result is the interior
429
+ C1,γ regularity for the scaled operator Ls.
430
+ Lemma 2.1. Let 0 < s ≤ 1 and u ∈ L∞(Rd) ∩ C(Rd) solves the in-equations
431
+ Ls[x, u] + C0s|Du(x)| ≥ −K in B2,
432
+ Ls[x, u] − C0s|Du(x)| ≤ K in B2,
433
+ (2.1)
434
+ in the viscosity sense. Then there exist constants 0 < γ < 1 and C > 0 independent of s, such that
435
+ ||u||C1,γ(B1) ≤ C
436
+
437
+ ||u||L∞(Rd) + K
438
+
439
+ ,
440
+ where γ and C depend only on d, λ, Λ, C0 and
441
+ ´
442
+ Rd(1 ∧ |y|2)k(y)dy.
443
+ Proof. The proof essentially uses the approximation arguments for nonlocal equations [18] and we
444
+ postpone it to Appendix A.
445
+
446
+ Now we present a maximum principle type result similar to [10, Theorem 5.2]. We report the proof
447
+ here for reader’s convenience.
448
+ Lemma 2.2. Let u be a bounded function on Rd which is in USC(Ω) and satisfies P+u + P+
449
+ k u +
450
+ C0|Du| ≥ 0 in Ω. Then we have supΩ u ≤ supΩc u.
451
+
452
+ BOUNDARY REGULARITY
453
+ 7
454
+ Proof. From [43, Lemma 5.5] we can find a non-negative function χ ∈ C2(¯Ω) ∩ Cb(Rd) satisfying
455
+ P+χ + P+
456
+ k χ + C0|Dχ| ≤ −1
457
+ in Ω.
458
+ Note that, since χ ∈ C2(¯Ω), the above inequality holds in the classical sense. For ε > 0, we let φM
459
+ to be
460
+ φM(x) = M + εχ.
461
+ Then P+φM(x0) + P+
462
+ k φM + C0|DφM| ≤ −ε in Ω.
463
+ Let M0 be the smallest value of M for which φM ≥ u in Rd. We show that M0 ≤ supΩc u. Suppose,
464
+ to the contrary, that M0 > supΩc u. Then there must be a point x0 ∈ Ω for which u(x0) = φM0(x0).
465
+ Otherwise using the upper semicontinuity of u, we get a M1 < M0 such that φM1 ≥ u in Rd, which
466
+ contradicts the minimality of M0.
467
+ Now φM0 would touch u from above at x0 and thus, by the
468
+ definition of the viscosity subsolution, we would have that P+φM0(x0) + P+
469
+ k φM0 + C0|DφM0| ≥ 0.
470
+ This leads to a contradiction. Therefore, M0 ≤ supΩc u which implies that for every x ∈ Rd
471
+ u ≤ φM0 ≤ M0 + ε sup
472
+ Rd χ ≤ sup
473
+ Ωc u + ε sup
474
+ Rd χ.
475
+ The result follows by taking ε → 0.
476
+ Remark 2.1. Although we have the above maximum principle, one can not simply compare two
477
+ viscosity sub and supersolutions for the operator (1.2). More precisely, if u, v are bounded functions
478
+ and u ∈ USC(Rd), v ∈ LSC(Rd) satisfy
479
+ Lu + C|Du| ≥ f and Lv + C|Dv| ≤ g in Ω
480
+ in viscosity sense for two continuous functions f and g, and for some C ≥ 0, then L(u−v)+C|D(u−
481
+ v)| ≥ f − g may not always holds true in Ω. However, if one of them is C2, then we have
482
+ P+(u − v) + P+
483
+ k (u − v) + C|D(u − v)| ≥ f − g in Ω.
484
+ Indeed, without loss of generality, let us assume v ∈ C2(Ω) and ϕ be a C2 test function that touches
485
+ u − v at x ∈ Ω from above then clearly ϕ + v touches u at x from above. By definition of viscosity
486
+ subsolution we have L(ϕ + v)(x) + C|D(ϕ + v)(x)| ≥ f(x), which implies
487
+ P+ϕ(x) + P+
488
+ k ϕ(x) + Lv(x) + C|Dϕ(x)| + C|Dv(x)| ≥ f(x)
489
+ and hence we obtain
490
+ P+ϕ(x) + P+
491
+ k ϕ(x) + C|Dϕ(x)| ≥ f(x) − g(x).
492
+
493
+ 3. Global Lipschitz regularity
494
+ In this section we establish the Lipschitz regularity of the solution u up to the boundary. We start
495
+ by showing that the distance function δ(x) is a barrier to u.
496
+ Lemma 3.1. Let Ω be a bounded C1,1 domain in Rd and u be a continuous function which solves (1.1)
497
+ in the viscosity sense. Then there exists a constant C which depends only on d, λ, Λ, C0, diam(Ω),
498
+ radius of exterior sphere and
499
+ ´
500
+ Rd(1 ∧ |y|2)k(y)dy, such that
501
+ |u(x)| ≤ CKδ(x)
502
+ for all x ∈ Ω.
503
+ (3.1)
504
+ Proof. First we show that
505
+ |u(x)| ≤ κ K
506
+ x ∈ Rd,
507
+ (3.2)
508
+ for some constant κ. From [43, Lemma 5.5], there exists a non-negative function χ ∈ C2(¯Ω)∩Cb(Rd),
509
+ with infRd χ > 0, satisfying
510
+ P+χ + P+
511
+ k χ + C0|Dχ| ≤ −1
512
+ in Ω.
513
+ We define ψ = Kχ which gives that infRd ψ ≥ 0 and
514
+ P+ψ + P+
515
+ k ψ + C0|Dψ| ≤ −K
516
+ in Ω.
517
+
518
+ 8
519
+ BOUNDARY REGULARITY
520
+ Then by using Remark 2.1, we get
521
+ P+(u − ψ) + P+
522
+ k (u − ψ) + C0|D(u − ψ)| ≥ 0.
523
+ Now applying Lemma 2.2 on u − ψ we obtain
524
+ sup
525
+
526
+ (u − ψ) ≤ sup
527
+ Ωc (u − ψ) ≤ 0.
528
+ Note that in the second inequality above we used u = 0 in Ωc. This proves that u ≤ ψ in Rd. Similar
529
+ calculation using −u will also give us −u ≤ ψ in Rd. Thus
530
+ |u| ≤ sup
531
+ Rd |χ| K
532
+ in Rd,
533
+ which gives (3.2).
534
+ Now we shall prove (3.1). Since ∂Ω is C1,1, Ω satisfies a uniform exterior sphere condition from
535
+ outside. Let r◦ be a radius satisfying uniform exterior condition. From [43, Lemma 5.4] there exists
536
+ a bounded, Lipschitz continuous function ϕ, Lipschitz constant being r−1
537
+ ◦ , satisfying
538
+ ϕ = 0
539
+ in
540
+ ¯Br◦,
541
+ ϕ > 0
542
+ in
543
+ ¯Bc
544
+ r◦,
545
+ ϕ ≥ ε
546
+ in
547
+ Bc
548
+ (1+δ)r◦,
549
+ P+ϕ + P+
550
+ k ϕ + C0|Dϕ| ≤ −1
551
+ in
552
+ B(1+δ)r◦ \ ¯Br◦,
553
+ for some constants ε, δ, dependent on C0, d, λ, Λ, d and
554
+ ´
555
+ Rd(1 ∧ |y|2)k(y)dy. Furthermore, ϕ is C2
556
+ in B(1+δ)r◦ \ ¯Br◦. For any point y ∈ ∂Ω, we can find another point z ∈ Ωc such that Br◦(z) ⊂ Ωc
557
+ touches ∂Ω at y. Let w(x) = ε−1κKϕ(x − z). Also P+(w) + P+
558
+ k (w) + C0|Dw| ≤ −K. Then by using
559
+ Remark 2.1 we have
560
+ P+(u − w) + P+
561
+ k (u − w) + C0|D(u − w)| ≥ 0
562
+ in B(1+δ)r◦(z) ∩ Ω.
563
+ Since, by (3.2) u − w ≤ 0 in (B(1+δ)r◦(z) ∩ Ω)c, applying Lemma 2.2 on u − w, it follows that
564
+ u(x) ≤ w(x) in Rd. Repeating a similar calculation for −u, we can conclude that |u(x)| ≤ w(x) in
565
+ Rd. Since this relation holds for any y ∈ ∂Ω, taking x ∈ Ω with dist(x, ∂Ω) < r◦, one can find y ∈ ∂Ω
566
+ satisfying dist(x, ∂Ω) = |x − y| < r◦. Then using the previous estimate we would obtain
567
+ |u(x)| ≤ ε−1κKϕ(x − z) ≤ ε−1κK(ϕ(x − z) − ϕ(y − z)) ≤ ε−1κK r−1
568
+
569
+ dist(x, ∂Ω),
570
+ which gives us (3.1).
571
+
572
+ Now we are ready to prove that u ∈ C0,1(Rd).
573
+ Proof of Theorem 1.1. Let x0 ∈ Ω and s ∈ (0, 1] be such that 2s = dist(x0, ∂Ω) ∧ 1. Without loss of
574
+ any generality, we assume x0 = 0. Define v(x) = u(sx) in Rd. Using Lemma 3.1 we already have
575
+ |u(x)| ≤ C1Kδ(x), from that one can deduce
576
+ |v(x)| ≤ C1 Ks(1 + |x|)
577
+ for all x ∈ Rd,
578
+ (3.3)
579
+ for some constant C1 independent of s. We recall the scaled operator
580
+ Is
581
+ θν[x, f] :=
582
+ ˆ
583
+ Rd(f(x + y) − f(x) − 1B 1
584
+ s (y)∇f(x) · y)sd+2Nθν(sx, sy)dy.
585
+ To compute Ls[x, v] + C0s|Dv(x)| in B2, first we observe that D2v(x) = s2D2u(sx) and Dv(x) =
586
+ sDu(sx). Also
587
+ Is
588
+ θν[x, v] = s2
589
+ ˆ
590
+ Rd(v(x + y) − v(x) − 1B 1
591
+ s (y)∇v(x) · y)Nθν(sx, sy)sddy
592
+ = s2
593
+ ˆ
594
+ Rd(u(sx + sy) − u(sx) − 1B1(sy)∇u(sx) · sy)Nθν(sx, sy)sddy = s2Iθν[sx, u].
595
+
596
+ BOUNDARY REGULARITY
597
+ 9
598
+ Thus, it follows from (1.1) that
599
+ Ls[x, v] + C0s|Dv(x)| ≥ −Ks2
600
+ in
601
+ B2,
602
+ Ls[x, v] − C0s|Dv(x)| ≤ Ks2
603
+ in
604
+ B2.
605
+ (3.4)
606
+ Now consider a smooth cut-off function ϕ, 0 ≤ ϕ ≤ 1, satisfying
607
+ ϕ =
608
+
609
+ 1
610
+ in B3/2,
611
+ 0
612
+ in Bc
613
+ 2.
614
+ Let w = ϕv.
615
+ Clearly, ((ϕ − 1)v)(y) = 0 for all y ∈ B3/2, which gives D((ϕ − 1)v) = 0 and
616
+ D2((ϕ − 1)v) = 0 in x ∈ B3/2. Since w = v + (ϕ − 1)v, from (3.4) we obtain
617
+ Ls[x, w] + C0s|Dw(x)| ≥ −Ks2 − | sup
618
+ θ∈Θ
619
+ inf
620
+ ν∈Γ Is
621
+ θν[x, (ϕ − 1)v)]|
622
+ in
623
+ B1,
624
+ Ls[x, w] − C0s|Dw(x)| ≤ Ks2 + | sup
625
+ θ∈Θ
626
+ inf
627
+ ν∈Γ Is
628
+ θν[x, (ϕ − 1)v)]|
629
+ in
630
+ B1.
631
+ (3.5)
632
+ Again, since (ϕ − 1)v = 0 in B3/2, we have in B1 that
633
+ |Is
634
+ θν[x, (ϕ − 1)v]| =
635
+ ���
636
+ ˆ
637
+ |y|≥1/2
638
+ ((ϕ − 1)v)(x + y) − ((ϕ − 1)v)(x))sd+2Nθν(sx, sy)dy
639
+ ���
640
+
641
+ ˆ
642
+ |y|≥1/2
643
+ |v(x + y)|sd+2Nθν(sx, sy)dy + |v(x)|
644
+ ˆ
645
+ |y|≥1/2
646
+ sd+2Nθν(sx, sy)dy
647
+ := I1 + I2.
648
+ Since x ∈ B1, using sd+2Nθν(sx, sy) ≤ sd+2k(sy) and (3.3) we can have the following estimate,
649
+ I2 ≤ 2C1Ks
650
+ ˆ
651
+ Rd(1 ∧ |y|2)dy.
652
+ Now write
653
+ I1 =
654
+ ˆ
655
+ 1/2≤|y|≤1/s
656
+ |v(x + y)|sd+2Nθν(sx, sy)dy +
657
+ ˆ
658
+ |y|≥1/s
659
+ |v(x + y)|sd+2Nθν(sx, sy)dy
660
+ = Is,1 + Is,2 .
661
+ Let us first estimate Is,1. Since x ∈ B1 and |y| ≥ 1
662
+ 2 we have 1 + |x + y| ≤ 5|y|. By using this estimate
663
+ and (3.3) we obtain
664
+ Is,1 = sd+2
665
+ ˆ
666
+ 1
667
+ 2≤|y|≤ 1
668
+ s
669
+ |v(x + y)|Nθν(sx, sy)dy
670
+ ≤ 5C1K
671
+ ˆ
672
+ 1
673
+ 2≤|y|≤ 1
674
+ s
675
+ |sy|sd+2k(sy)dy ≤ 5C1Ks
676
+ ˆ
677
+ s
678
+ 2 ≤|z|≤1
679
+ |sz|k(z)dz
680
+ ≤ C2s
681
+ ˆ
682
+ s
683
+ 2 ≤|z|≤1
684
+ |z|2k(z)dz ≤ C2s
685
+ ˆ
686
+ Rd(1 ∧ |y|2)k(z)dz ≤ C3s,
687
+ for some constants C3. For Is,2, a change of variable and (3.2) gives
688
+ Is,2 ≤ κs2K
689
+ ˆ
690
+ s|y|>1
691
+ sdk(ry)dy = κs2K
692
+ ˆ
693
+ |y|>1
694
+ k(y)dy
695
+ ≤ κs2K
696
+ ˆ
697
+ Rd(1 ∧ |y|2)k(y)dy ≤ C4s2K
698
+ for some constant C4. Therefore, putting the estimates of I1 and I2 in (3.5) we obtain
699
+ Ls[x, w] + C0s|Dw(x)| ≥ −C5Ks
700
+ in
701
+ B1,
702
+ Ls[x, w] − C0s|Dw(x)| ≥ C5Ks
703
+ in
704
+ B1,
705
+ (3.6)
706
+
707
+ 10
708
+ BOUNDARY REGULARITY
709
+ for some constant C5. Now applying Lemma 2.1, from (3.6) we have
710
+ ∥v∥C1(B 1
711
+ 2 ) ≤ C6
712
+
713
+ ∥v∥L∞(B2) + sK
714
+
715
+ (3.7)
716
+ for some constant C6. From (3.3) and (3.7) we then obtain
717
+ sup
718
+ y∈Bs/2(x),y̸=x
719
+ |u(x) − u(y)|
720
+ |x − y|
721
+ ≤ C7K,
722
+ (3.8)
723
+ for some constant C7.
724
+ Now we can complete the proof. Note that if |x − y| ≥ 1
725
+ 8, then
726
+ |u(x) − u(y)|
727
+ |x − y|
728
+ ≤ 2κK,
729
+ by (3.2). So we consider |x − y| < 1
730
+ 8. If |x − y| ≥ 8−1(δ(x) ∨ δ(y)), then using Lemma 3.1 we get
731
+ |u(x) − u(y)|
732
+ |x − y|
733
+ ≤ 4CK(δ(x) + δ(y))(δ(x) ∨ δ(y))−1 ≤ 8CK.
734
+ Now let |x − y| < 8−1 min{δ(x) ∨ δ(y), 1}. Then either y ∈ B δ(x)∧1
735
+ 8
736
+ (x) or x ∈ B δ(y)∧1
737
+ 8
738
+ (y). Without
739
+ loss of generality, we suppose y ∈ B δ(x)∧1
740
+ 8
741
+ (x). From (3.8) we get
742
+ |u(x) − u(y)|
743
+ |x − y|
744
+ ≤ C7K.
745
+ This completes the proof.
746
+
747
+ 4. Fine boundary regularity
748
+ Aim of this section is to prove Theorem 1.2. Since u is Lipschitz, (1.1) can be written as
749
+ |Lu| ≤ CK in Ω,
750
+ and u = 0 in Ωc.
751
+ We start by constructing subsolutions which will be useful later on to prove oscillation lemma.
752
+ Lemma 4.1. There exists a constant ˜κ, which depends only on d, λ, Λ,
753
+ ´
754
+ Rd(1∧ |y|2)k(y)dy, such that
755
+ for any r ∈ (0, 1], we have a bounded radial function φr satisfying
756
+
757
+
758
+
759
+
760
+
761
+
762
+
763
+
764
+
765
+ P−φr + P−
766
+ k φr ≥ 0
767
+ in B4r \ ¯Br,
768
+ 0 ≤ φr ≤ ˜κr
769
+ in Br,
770
+ φr ≥ 1
771
+ ˜κ(4r − |x|)
772
+ in B4r \ Br,
773
+ φr ≤ 0
774
+ in Rd \ B4r.
775
+ Moreover, φr ∈ C2(B4r \ ¯Br).
776
+ Proof. We use the same subsolution constructed in [11] and show that it is indeed a subsolution
777
+ with respect to minimal Pucci operators. Fix r ∈ (0, 1] and define vr(x) = e−ηq(x) − e−η(4r)2, where
778
+ q(x) = |x|2 ∧ 2(4r)2 and η > 0. Clearly, 1 ≥ vr(0) ≥ vr(x) for all x ∈ Rd. Thus using the fact that
779
+ 1 − e−ξ ≤ ξ for all ξ ≥ 0 we have
780
+ vr(x) ≤ 1 − e−η(4r)2 ≤ η(4r)2,
781
+ (4.1)
782
+ Again for x ∈ B4r \ Br, we have that
783
+ vr(x) = e−η(4r)2(eη((4r)2−q(x)) − 1) ≥ ηe−η(4r)2((4r)2 − |x|2)
784
+ = ηe−η(4r)2(4r + |x|)(4r − |x|) ≥ 5ηre−η(4r)2(4r − |x|).
785
+ (4.2)
786
+
787
+ BOUNDARY REGULARITY
788
+ 11
789
+ Fix x ∈ B4r \ ¯Br. We start by estimating the local minimal Pucci operator P− of v. Using rotational
790
+ symmetry we may always assume x = (l, 0, · · · , 0) Then
791
+ ∂ivr(x) = −2ηe−η|x|2xi =
792
+
793
+ −2ηe−η|x|2l
794
+ i = 1,
795
+ 0
796
+ i ̸= 1
797
+ and
798
+ ∂ijvr(x) =
799
+
800
+ 4η2x2
801
+ i e−η|x|2 − 2ηe−η|x|2
802
+ i = j,
803
+ 4η2xixje−η|x|2
804
+ i ̸= j.
805
+ =
806
+
807
+
808
+
809
+
810
+
811
+ 4η2l2e−η|x|2 − 2ηe−η|x|2
812
+ i = j = 1,
813
+ −2ηe−η|x|2
814
+ i = j ̸= 1,
815
+ 0
816
+ i ̸= j.
817
+ By the above calculation, for any x ∈ B4r \ ¯Br, choosing η > 1
818
+ r2 we have
819
+ P−vr(x) = λ4η2l2e−η|x|2 − λ2ηe−η|x|2 − Λ(d − 1)2ηe−η|x|2
820
+ ≥ λ4η2l2e−η|x|2 − dΛ2ηe−η|x|2.
821
+ Now to determine nonlocal minimal Pucci operator, using the convexity of exponential map we get,
822
+ e−η|x+y|2 − e−η|x|2 + 2η1{|y|≤1}y · xe−η|x|2
823
+ ≥ −ηe−η|x|2 �
824
+ |x + y|2 − |x|2 − 21{|y|≤1}y · x
825
+
826
+ .
827
+ Since P−
828
+ k vr = P−
829
+ k (vr + e−η(4r)2) and using above inequality we obtain
830
+ P−
831
+ k (e−ηq(·))(x) = −
832
+ ˆ
833
+ Rd
834
+
835
+ e−ηq(x+y) − e−ηq(x) − 1B1(y)∇e−ηq(x) · y
836
+ �−
837
+ k(y)dy
838
+ ≥ −ηe−η|x|2 ˆ
839
+ |y|≤r
840
+
841
+ |x + y|2 − |x|2 − 2y · x
842
+
843
+ k(y)dy
844
+
845
+ ˆ
846
+ r<|y|≤1
847
+ ���e−η(|x|2+2(4r)2) − e−η|x|2 + 2ηy · xe−η|x|2��� k(y)dy
848
+
849
+ ˆ
850
+ |y|>1
851
+ ���e−η(|x|2+2(4r)2) − e−η|x|2��� k(y)dy
852
+ ≥ −ηe−η|x|2
853
+ �ˆ
854
+ |y|<r
855
+ |y|2k(y)dy +
856
+ ˆ
857
+ r<|y|≤1
858
+ ������
859
+ 1 − e−2η(4r)2
860
+ η
861
+ ����� + 2|x · y|
862
+
863
+ k(y)dy
864
+
865
+ − ηe−η|x|2 ˆ
866
+ |y|>1
867
+ �����
868
+ 1 − e−2η(4r)2
869
+ η
870
+ ����� k(y)dy
871
+ ≥ −ηe−η|x|2
872
+ �ˆ
873
+ |y|<r
874
+ |y|2k(y)dy +
875
+ ˆ
876
+ r<|y|≤1
877
+ 43|y|2k(y)dy +
878
+ ˆ
879
+ |y|>1
880
+ 2(4r)2k(y)dy
881
+
882
+ ≥ −ηe−η|x|243
883
+ ˆ
884
+ Rd(1 ∧ |y|2)k(y)dy,
885
+ where in the second line we used |x + y|2 ∧ 2(4r)2 ≤ |x|2 + 2(4r)2. Combining the above estimates
886
+ we see that, for x ∈ B4r \ ¯Br,
887
+ P −vr(x) + P −
888
+ k vr(x) ≥ ηe−η|x|2�
889
+ 4ηλ|x|2 − 2dΛ − 43
890
+ ˆ
891
+ Rd(1 ∧ |y|2)k(y)dy
892
+
893
+ ≥ ηe−η|x|2�
894
+ 4ηλr2 − 2dΛ − 43
895
+ ˆ
896
+ Rd(1 ∧ |y|2)k(y)dy
897
+
898
+ .
899
+
900
+ 12
901
+ BOUNDARY REGULARITY
902
+ Thus, finally letting η =
903
+ 1
904
+ λr2(2dΛ + 43 ´
905
+ Rd(1 ∧ |y|2)k(y)dy), we obtain
906
+ P−vr + P−vr > 0
907
+ in B4r \ ¯Br.
908
+ Note that the final choice of η is admissible since
909
+ 1
910
+ λr2(2dΛ + 43 ´
911
+ Rd(1 ∧ |y|2)k(y)dy) >
912
+ 1
913
+ r2. Now set
914
+ φr = rvr and the result follows from (4.1)-(4.2).
915
+
916
+ Next we prove a weak version of Harnack inequality.
917
+ Theorem 4.1. Let s ∈ (0, 1], α′ = 1∧(2−α) and u be a continuous non-negative function satisfying
918
+ P−u + P−
919
+ k,su ≤ C0s1+α′,
920
+ P+u + P+
921
+ k,su ≥ −C0s1+α′
922
+ in B2.
923
+ Furthermore if supRd |u| ≤ M0 and |u(x)| ≤ M0s(1 + |x|) for all x ∈ Rd, then
924
+ u(x) ≤ C(u(0) + (M0 ∨ C0)s1+α′)
925
+ for every x ∈ B 1
926
+ 2 and for some constant C which only depends on λ, Λ, d,
927
+ ´
928
+ Rd(1 ∧ |y|α)k(y)dy.
929
+ Proof. Dividing by u(0)+(M0 ∨C0)s1+α′, it can be easily seen that supRd |u| ≤ s−(1+α′) and |u(x)| ≤
930
+ s−α′(1 + |x|) for all x ∈ Rd and u satisfies
931
+ P−u + P−
932
+ k,su ≤ 1,
933
+ P+u + P+
934
+ k,su ≥ −1.
935
+ Fix ε > 0 from [43, Corollary 3.14] and let γ = d
936
+ ε. Let
937
+ t0 := min
938
+
939
+ t : u(x) ≤ ht(x) := t(1 − |x|)−γ for all x ∈ B1
940
+
941
+ .
942
+ Clearly this set is nonempty since u(0) ≤ 1, thus t0 exist. Let x0 ∈ B1 be such that u(x0) = ht0(x0).
943
+ Let η = 1 − |x0| be the distance of x0 from ∂B1. For r = η
944
+ 2 and x ∈ Br(x0), we can write
945
+ Br(x0) =
946
+
947
+ u(x) ≤ u(x0)
948
+ 2
949
+
950
+
951
+
952
+ u(x) > u(x0)
953
+ 2
954
+
955
+ := A + ˜A.
956
+ The goal is to estimate |Br(x0)| in terms of |A| and | ˜A|. Proceeding this way, we show that t0 < C
957
+ for some universal C which, in turn, implies that u(x) < C(1 − |x|)−γ. This would prove our result.
958
+ Next, Using [43, Corollary 3.14] we obtain
959
+ | ˜A ∩ B1| ≤ C
960
+ ����
961
+ 2
962
+ u(x0)
963
+ ����
964
+ ε
965
+ ≤ Ct−ε
966
+ 0 ηd ,
967
+ whereas |Br| = ωd(η/2)d. In particular,
968
+ �� ˜A ∩ Br(x0)
969
+ �� ≤ Ct−ε
970
+ 0 |Br|.
971
+ (4.3)
972
+ So if t0 is large, ˜A can cover only a small portion of Br(x0). We shall show that for some δ > 0,
973
+ independent of t0 we have
974
+ |A ∩ Br(x0)| ≤ (1 − δ)|Br|,
975
+ which will provide an upper bound on t0 completing the proof. We start by estimating |A ∩ Bθr(x0)|
976
+ for θ > 0 small. For every x ∈ Bθr(x0) we have
977
+ u(x) ≤ ht0(x) ≤ t0
978
+ �2η − θη
979
+ 2
980
+ �−γ
981
+ ≤ u(x0)
982
+
983
+ 1 − θ
984
+ 2
985
+ �−γ
986
+ ,
987
+ with
988
+
989
+ 1 − θ
990
+ 2
991
+
992
+ close to 1. Define
993
+ v(x) :=
994
+
995
+ 1 − θ
996
+ 2
997
+ �−γ
998
+ u(x0) − u(x).
999
+ Then we get v ≥ 0 in Bθr(x0) and also P−v + P−
1000
+ k,sv ≤ 1 as P+u + P+
1001
+ k,su ≥ −1.
1002
+
1003
+ BOUNDARY REGULARITY
1004
+ 13
1005
+ We would like to apply [43, Corollary 3.14] to v, but v need not be non-negative in the whole of
1006
+ Rd. Thus we consider the positive part of v, i.e, w = v+ and find an upper bound of P−w + P−
1007
+ k,sw.
1008
+ Since v− is C2 in B θr
1009
+ 4 (x0), we have
1010
+ P−w(x) + P−
1011
+ k,sw(x) ≤ [P−v(x) + P−
1012
+ k,sv(x)] + [P+v−(x) + P+
1013
+ k,sv−(x)] ≤ 1 + P+v−(x) + P+
1014
+ k,sv−(x).
1015
+ (4.4)
1016
+ Also, using v−(x) = Dv−(x) = D2v−(x) = 0 for all x ∈ B θr
1017
+ 4 (x0), we get
1018
+ P+v−(x) + P+
1019
+ k,sv−(x) =
1020
+ ˆ
1021
+ Rd∩{v(x+y)≤0}
1022
+ v−(x + y)sd+2k(sy)dy.
1023
+ (4.5)
1024
+ Now plugging (4.5) into (4.4), for all x ∈ B θr
1025
+ 4 (x0) we obtain
1026
+ P−w(x) + P−
1027
+ k,sw(x) ≤ 1 +
1028
+ ˆ
1029
+ Rd\B θr
1030
+ 2
1031
+ (x−x0)
1032
+
1033
+ u(x + y) −
1034
+
1035
+ 1 − θ
1036
+ 2
1037
+ �−γ
1038
+ u(x0)
1039
+ �+
1040
+ sd+2k(sy)dy
1041
+ ≤ 1 +
1042
+ ˆ
1043
+ Rd\B θr
1044
+ 2
1045
+ (x−x0)
1046
+ |u(x + y)| sd+2k(sy)dy +
1047
+ ˆ
1048
+ Rd\B θr
1049
+ 2
1050
+ (x−x0)
1051
+ �����
1052
+
1053
+ 1 − θ
1054
+ 2
1055
+ �−γ
1056
+ u(x0)
1057
+ ����� sd+2k(sy)dy
1058
+ ≤ 1 +
1059
+ ˆ
1060
+ Rd\B θr
1061
+ 4
1062
+ |u(x + y)| sd+2k(sy)dy +
1063
+ ˆ
1064
+ Rd\B θr
1065
+ 4
1066
+ �����
1067
+
1068
+ 1 − θ
1069
+ 2
1070
+ �−γ
1071
+ u(x0)
1072
+ ����� sd+2k(sy)dy := 1 + I1 + I2.
1073
+ Estimate of I1: Let us write
1074
+ I1 =
1075
+ ˆ
1076
+ θr
1077
+ 4 ≤|y|≤ 1
1078
+ s
1079
+ |u(x + y)| sd+2k(sy)dy +
1080
+ ˆ
1081
+ |y|≥ 1
1082
+ s
1083
+ |u(x + y)| sd+2k(sy)dy := I11 + I12.
1084
+ Simply using change of variable and supRd |u| ≤ s−(1+α′), we obtain
1085
+ I12 ≤
1086
+ ˆ
1087
+ |z|≥1
1088
+ k(z)dz.
1089
+ Now we estimate I11 using |u(x)| ≤ s−α′(1 + |x|) for all x ∈ Rd.
1090
+ I11 ≤
1091
+ ˆ
1092
+ θr
1093
+ 4 ≤|y|≤ 1
1094
+ s
1095
+ (1 + |x + y|) sd+2−α′k(sy)dy
1096
+ ≤ 5
1097
+ 4
1098
+ ˆ
1099
+ θr
1100
+ 4 ≤|y|≤ 1
1101
+ s
1102
+ sd+2−α′k(sy)dy +
1103
+ ˆ
1104
+ θr
1105
+ 4 ≤|y|≤ 1
1106
+ s
1107
+ sd+2−α′|y|k(sy)dy .
1108
+ We consider two cases. First consider the case α′ = 1 so α ≤ 1. This implies
1109
+ I11 ≤ 5
1110
+ 4
1111
+ ˆ
1112
+ θrs
1113
+ 4 ≤|z|≤1
1114
+ sk(z)dz +
1115
+ ˆ
1116
+ θrs
1117
+ 4 ≤|z|≤1
1118
+ |z|k(z)dz ≤ 6(θr)−1
1119
+ ˆ
1120
+ Rd(1 ∧ |z|α)k(z)dz.
1121
+ Now consider the case α′ = 2 − α, and hence α > 1. In this case
1122
+ I11 ≤ 5
1123
+ 4
1124
+ ˆ
1125
+ θr
1126
+ 4 ≤|y|≤ 1
1127
+ s
1128
+ sαsdk(sy)dy +
1129
+ ˆ
1130
+ θr
1131
+ 4 ≤|y|≤ 1
1132
+ s
1133
+ sα−1|sy|sdk(sy)dy
1134
+ = 5 · 4α−1(θr)−α
1135
+ ˆ
1136
+ θrs
1137
+ 4 ≤|z|≤1
1138
+ �θr
1139
+ 4 s
1140
+ �α
1141
+ k(z)dz +
1142
+ �θr
1143
+ 4
1144
+ �1−α ˆ
1145
+ θrs
1146
+ 4 ≤|z|≤1
1147
+ �θr
1148
+ 4 s
1149
+ �α−1
1150
+ |z|k(z)dz
1151
+ ≤ C(θr)−2
1152
+ ˆ
1153
+ Rd(1 ∧ |z|α)k(z)dz .
1154
+
1155
+ 14
1156
+ BOUNDARY REGULARITY
1157
+ Combining the estimates of I11 and I12, we get
1158
+ I1 ≤ C(θr)−2
1159
+ ˆ
1160
+ Rd(1 ∧ |z|α)k(z)dz.
1161
+ Estimate of I2: If α′ = 1, then α ≤ 1 and using |u(x0)| ≤ s−α′(1 + |x0|) we have
1162
+ I2 :=
1163
+ ˆ
1164
+ Rd\B θr
1165
+ 4
1166
+ �����
1167
+
1168
+ 1 − θ
1169
+ 2
1170
+ �−γ
1171
+ u(x0)
1172
+ ����� sd+2k(sy)dy ≤ C
1173
+ ˆ
1174
+ Rd\B θr
1175
+ 4
1176
+ sd+2−α′k(sy)dy
1177
+ = C
1178
+ ˆ
1179
+ Rd\B θrs
1180
+ 4
1181
+ sk(z)dz ≤ C
1182
+ �ˆ
1183
+ θrs
1184
+ 4 ≤|z|≤1
1185
+ sk(z)dz +
1186
+ ˆ
1187
+ |z|≥1
1188
+ sk(z)dz
1189
+
1190
+ ≤ C
1191
+
1192
+ 4
1193
+ θr
1194
+ ˆ
1195
+ θrs
1196
+ 4 ≤|z|≤1
1197
+ |z|αk(z)dz +
1198
+ ˆ
1199
+ |z|>1
1200
+ k(z)dz
1201
+
1202
+ ≤ C(θr)−1
1203
+ ˆ
1204
+ Rd(1 ∧ |y|α)k(z)dz.
1205
+ If α′ = 2 − α then α > 1. In that case, using similar calculation as above we have
1206
+ I2 :=
1207
+ ˆ
1208
+ Rd\B θr
1209
+ 4
1210
+ �����
1211
+
1212
+ 1 − θ
1213
+ 2
1214
+ �−γ
1215
+ u(x0)
1216
+ ����� sd+2k(sy)dy ≤ C
1217
+ ˆ
1218
+ Rd\B θr
1219
+ 4
1220
+ sd+αk(sy)dy
1221
+ = C
1222
+ ˆ
1223
+ Rd\B θrs
1224
+ 4
1225
+ sαk(z)dz ≤ C(θr)−α
1226
+ ˆ
1227
+ Rd(1 ∧ |y|α)k(z)dz.
1228
+ Since α ∈ (0, 2), combining the above estimates we obtain
1229
+ P−w + P−
1230
+ k,sw ≤
1231
+ C
1232
+ (θr)2
1233
+ in B θr
1234
+ 4 (x0) .
1235
+ Now using [43, Corollary 3.14] for w we get
1236
+ |A ∩ B θr
1237
+ 8 (x0)| =
1238
+ ����
1239
+
1240
+ w ≥ u(x0)((1 − θ/2)−γ − 1/2)
1241
+
1242
+ ∩ B θr
1243
+ 8 (x0)
1244
+ ����
1245
+ ≤ C(θr)d
1246
+
1247
+ inf
1248
+ B θr
1249
+ 8
1250
+ (x0) w + θr
1251
+ 8 ·
1252
+ C
1253
+ (θr)2
1254
+ �ε
1255
+ ·
1256
+
1257
+ u(x0)((1 − θ/2)−γ − 1/2)
1258
+ �−ε
1259
+ ≤ C(θr)d� �
1260
+ (1 − θ
1261
+ 2)−γ − 1
1262
+ 2
1263
+
1264
+ + C
1265
+ 8 (θr)−1t−1
1266
+ 0 (2r)d�ε
1267
+ ≤ C(θr)d ��
1268
+ (1 − θ/2)−γ − 1
1269
+ �ε + C0(θr)−εt−ε
1270
+ 0 rdε�
1271
+ .
1272
+ Now let us choose θ > 0 small enough (independent of t0) to satisfy
1273
+ C(θr)d �
1274
+ (1 − θ/2)−γ − 1
1275
+ �ε ≤ 1
1276
+ 4|B θr
1277
+ 8 (x0)| .
1278
+ With this choice of θ if t0 becomes large, then we also have
1279
+ C(θr)dθ−εr(n−1)εt−ε
1280
+ 0
1281
+ ≤ 1
1282
+ 4|B θr
1283
+ 8 (x0)| ,
1284
+ and hence
1285
+ |A ∩ B θr
1286
+ 8 (x0)| ≤ 1
1287
+ 2|B θr
1288
+ 8 (x0)| .
1289
+ This estimate of course implies that
1290
+ | ˜A ∩ B θr
1291
+ 8 (x0)| ≥ C2|Br|,
1292
+ but this is contradicting (4.3). Therefore t0 cannot be large and this completes the proof.
1293
+
1294
+
1295
+ BOUNDARY REGULARITY
1296
+ 15
1297
+ Corollary 4.1. Let u satisfies the conditions of Theorem 4.1, then the following holds.
1298
+ sup
1299
+ B 1
1300
+ 4
1301
+ u ≤ C
1302
+
1303
+ inf
1304
+ B 1
1305
+ 4
1306
+ u + (M0 ∨ C0)s1+α′
1307
+
1308
+ .
1309
+ Proof. Take any point x0 ∈ B 1
1310
+ 4 such that u(x0) = infB 1
1311
+ 4 u(x). Clearly B 1
1312
+ 4 ⊂ B 1
1313
+ 2(x0). Defining
1314
+ ˜u(x) := u(x + x0) and applying Theorem 4.1 on ˜u we find
1315
+ ˜u(x) ≤ C
1316
+
1317
+ ˜u(0) + (M0 ∨ C0)s1+α′�
1318
+ in B 1
1319
+ 2 .
1320
+ This implies
1321
+ sup
1322
+ B 1
1323
+ 4
1324
+ u(x) ≤
1325
+ sup
1326
+ B 1
1327
+ 2 (x0)
1328
+ u(x) ≤ C
1329
+
1330
+ inf
1331
+ B 1
1332
+ 4
1333
+ u(x) + (M0 ∨ C0)s1+α′
1334
+
1335
+ .
1336
+ This proves the claim.
1337
+
1338
+ Now we will give some auxiliary lemmas which will be used to construct appropriate supersolutions
1339
+ that are crucial to prove the oscillation estimate.
1340
+ Lemma 4.2. Let Ω be a bounded C2 domain in Rd, then for any 0 < ǫ < 1, we have the following
1341
+ estimate
1342
+ ��Iθν(δ1+ǫ)
1343
+ �� ≤ C
1344
+
1345
+ 1 + 1(1,2)(α)δ1−α�
1346
+ in Ω,
1347
+ (4.6)
1348
+ where C > 0 depends only on d, Ω and
1349
+ ´
1350
+ Rd(1 ∧ |y|α)k(y)dy.
1351
+ Proof. Since δ ∈ C0,1(Rd)∩C2(¯Ω) [21, Theorem 5.4.3], using the Lipschtiz continuity of δ1+ǫ near the
1352
+ origin and boundedness away from the origin we can easily obtain the estimate (4.6) for α ∈ (0, 1].
1353
+ Next consider the case α ∈ (1, 2). For any x ∈ Ω we have
1354
+ ��Iθν(δ1+ǫ)(x)
1355
+ �� ≤
1356
+ ˆ
1357
+ Rd
1358
+ ��δ1+ǫ(x + y) − δ1+ǫ(x) − 1B1(y)y · ∇δ1+ǫ(x)
1359
+ �� k(y)dy
1360
+ =
1361
+ ˆ
1362
+ |y|< δ(x)
1363
+ 2
1364
+ +
1365
+ ˆ
1366
+ δ(x)
1367
+ 2 ≤|y|≤1
1368
+ +
1369
+ ˆ
1370
+ |y|>1
1371
+ := I1 + I2 + I3 .
1372
+ Since |y| ≤ δ(x)
1373
+ 2
1374
+ and δ(x) < 1, we have the following estimate on I1.
1375
+ ��δ1+ǫ(x + y) − δ1+ǫ(x) − 1B1(y)y · ∇δ1+ǫ(x)
1376
+ �� ≤ ||δ1+ǫ||C2(B δ(x)
1377
+ 2
1378
+ (x))|y|2
1379
+ ≤ 4C
1380
+ ||δ||C2(¯Ω)
1381
+ δ(x)1−ǫ |y|2 ≤ 4C
1382
+ ||δ||C2(¯Ω)δ(x)2−α
1383
+ δ(x)1−ǫ
1384
+ |y|α.
1385
+ This implies
1386
+ I1 ≤ 4C||δ||C2(¯Ω)δ(x)1+ǫ−α
1387
+ ˆ
1388
+ Rd |y|αk(y)dy ≤ 4C0C||δ||C2(¯Ω)δ(x)1+ǫ−α.
1389
+ (4.7)
1390
+ Again for I2 we have
1391
+ I2 ≤ C
1392
+ ˆ
1393
+ δ(x)
1394
+ 2 ≤|y|≤1
1395
+ |y|k(y)dy ≤
1396
+ �Cδ(x)
1397
+ 2
1398
+ �1−α ˆ
1399
+ δ(x)
1400
+ 2 ≤|y|≤1
1401
+ |y|αk(y)dy
1402
+
1403
+ �Cδ(x)
1404
+ 2
1405
+ �1−α ˆ
1406
+ Rd (1 ∧ |y|α) k(y)dy.
1407
+ Finally,
1408
+ I3 =
1409
+ ˆ
1410
+ |y|>1
1411
+ |δ1+ǫ(x + y) − δ1+ǫ(x)|k(y)dy ≤ 2(diam Ω)1+ǫ
1412
+ ˆ
1413
+ Rd(1 ∧ |y|α)k(y)dy.
1414
+ (4.8)
1415
+ Combining (4.7)-(4.8) we obtain (4.6).
1416
+
1417
+
1418
+ 16
1419
+ BOUNDARY REGULARITY
1420
+ Next we obtain an estimate on minimal Pucci operator P− applied on δ1+ǫ.
1421
+ Lemma 4.3. Let Ω be a bounded C2 domain in Rd, then for any 0 < ǫ < 1, we have the following
1422
+ estimate
1423
+ P− �
1424
+ δ1+ǫ�
1425
+ ≥ C1 · ǫδǫ−1 − C2 in Ω,
1426
+ where C1, C2 depends only on d, Ω, λ, Λ.
1427
+ Proof. Since ∂Ω is C2, we have δ1+ǫ ∈ C2(Ω) and for any x ∈ Ω
1428
+ ∂2
1429
+ ∂xi∂xj
1430
+ δ1+ǫ(x) = (1 + ǫ)
1431
+
1432
+ δǫ(x)
1433
+ ∂2
1434
+ ∂xi∂xj
1435
+ δ(x) + ǫδǫ−1(x)∂δ(x)
1436
+ ∂xi
1437
+ · ∂δ(x)
1438
+ ∂xj
1439
+
1440
+ := A + B
1441
+ where A, B are two d × d matrices given by
1442
+ A := (ai,j)1≤i,j≤d = (1 + ǫ)δǫ(x)
1443
+ ∂2
1444
+ ∂xi∂xj
1445
+ δ(x)
1446
+ and
1447
+ B := (bi,j)1≤i,j≤d = (1 + ǫ)ǫδǫ−1(x)∂δ(x)
1448
+ ∂xi
1449
+ · ∂δ(x)
1450
+ ∂xj
1451
+ .
1452
+ Note that B is a positive definite matrix and ||A|| ≤ d2(1 + ǫ)(diam Ω)ǫ||δ||C2(¯Ω). Therefore we have
1453
+ P−(δ1+ǫ(x)) = P−(A + B) ≥ P−(B) + P−(A)
1454
+ ≥ P−(B) − d2Λ(1 + ǫ)(diam Ω)ǫ||δ||C2(¯Ω)
1455
+ ≥ ǫ(1 + ǫ)δǫ−1(x)λ|Dδ(x)|2 − d2Λ(1 + ǫ)(diam Ω)ǫ||δ||C2(¯Ω)
1456
+ ≥ C1 · ǫδǫ��1(x) − C2.
1457
+
1458
+ Next we obtain an estimate on Lδ in Ω.
1459
+ Lemma 4.4. Let Ω be a bounded C2 domain in Rd. Then we have the following estimate
1460
+ |Lδ| ≤ C(1 + 1(1,2)δ1−α) in Ω,
1461
+ (4.9)
1462
+ where constant C depends only on d, Ω, λ, Λ and
1463
+ ´
1464
+ Rd(1 ∧ |y|α)k(y)dy.
1465
+ Proof. First of all, for all x ∈ Ω we have
1466
+ |Lδ(x)| ≤ sup
1467
+ θ,ν
1468
+ | Tr(aθν(x)D2δ(x))| + sup
1469
+ θ,ν
1470
+ |Iθνδ(x)| ≤ κ + sup
1471
+ θ,ν
1472
+ |Iθνδ(x)|,
1473
+ (4.10)
1474
+ for some constant κ, depending on Ω and uniform bound of aθν. For α ∈ (0, 1], (4.9) follows from the
1475
+ same arguments of Lemma 4.2. For α ∈ (1, 2), it is enough to obtain the estimate (4.9) for all x ∈ Ω
1476
+ such that δ(x) < 1. We follow the similar calculation as in Lemma 4.2 and get
1477
+ |Iθνδ(x)| ≤
1478
+ ˆ
1479
+ Rd |δ(x + y) − δ(x) − 1B1(y)y · ∇δ(x)|k(y)dy
1480
+ =
1481
+ ˆ
1482
+ |y|≤ δ(x)
1483
+ 2
1484
+ +
1485
+ ˆ
1486
+ δ(x)
1487
+ 2 <|y|<1
1488
+ +
1489
+ ˆ
1490
+ |y|>1
1491
+ and
1492
+ |Iθνδ(x)| ≤ κ1
1493
+ ˆ
1494
+ Rd(1 ∧ |y|α)k(y)dyδ1−α(x)
1495
+ for some constant κ1. Inserting these estimates in (4.10) we obtain
1496
+ |Lδ(x)| ≤ κ2δ1−α(x)
1497
+ for some constant κ2 and (4.9) follows.
1498
+
1499
+
1500
+ BOUNDARY REGULARITY
1501
+ 17
1502
+ Let us now define the sets that we use for our oscillation estimates. We borrow the notations of
1503
+ [46].
1504
+ Definition 4.1. Let κ ∈ (0, 1
1505
+ 16) be a fixed small constant and let κ′ = 1/2 + 2κ. Given a point
1506
+ x0 ∈ ∂Ω and R > 0, we define
1507
+ DR = DR(x0) = BR(x0) ∩ Ω,
1508
+ and
1509
+ D+
1510
+ κ′R = D+
1511
+ κ′R(x0) = Bκ′R(x0) ∩ {x ∈ Ω : (x − x0) · n(x0) ≥ 2κR} ,
1512
+ where n(x0) is the unit inward normal at x0. For any bounded C1,1-domain, we know that there
1513
+ exists ρ > 0, depending on Ω, such that the following inclusions hold for each x0 ∈ ∂Ω and R ≤ ρ:
1514
+ BκR(y) ⊂ DR(x0)
1515
+ for all y ∈ D+
1516
+ κ′R(x0),
1517
+ (4.11)
1518
+ and
1519
+ B4κR(y∗ + 4κRn(y∗)) ⊂ DR(x0),
1520
+ and
1521
+ BκR(y∗ + 4κRn(y∗)) ⊂ D+
1522
+ κ′R(x0)
1523
+ (4.12)
1524
+ for all y ∈ DR/2, where y∗ ∈ ∂Ω is the unique boundary point satisfying |y − y∗| = dist(y, ∂Ω). Note
1525
+ that, since R ≤ ρ, y ∈ DR/2 is close enough to ∂Ω and hence the point y∗ + 4κR n(y∗) belongs to the
1526
+ line joining y and y∗.
1527
+ Remark 4.1. In the remaining part of this section, we fix ρ > 0 to be a small constant depending
1528
+ only on Ω, so that (4.11)-(4.12) hold whenever R ≤ ρ and x0 ∈ ∂Ω. Also, every point on ∂Ω can be
1529
+ touched from both inside and outside Ω by balls of radius ρ. We also fix σ > 0 small enough so that
1530
+ for 0 < r ≤ ρ and x0 ∈ ∂Ω we have
1531
+ Bηr(x0) ∩ Ω ⊂ B(1+σ)r(z) \ ¯Br(z)
1532
+ for
1533
+ η = σ/8, σ ∈ (0, γ),
1534
+ for any x′ ∈ ∂Ω ∩ Bηr(x0), where Br(z) is a ball contained in Rd \ Ω that touches ∂Ω at point x′.
1535
+ In the following lemma, using Lemma 4.2 and Lemma 4.3 we construct supersolutions. We denote
1536
+ Ωρ := {x ∈ Ω| dist(x, Ωc) < ρ}.
1537
+ Lemma 4.5. Let Ω be a bounded C2 domain in Rd and α ∈ (1, 2), then there exist ρ1 > 0 and a C2
1538
+ function φ1 satisfying
1539
+
1540
+
1541
+
1542
+
1543
+
1544
+ P+φ1(x) + P+
1545
+ k φ1(x) ≤ −Cδ− α
1546
+ 2 (x)
1547
+ in
1548
+ Ωρ1,
1549
+ C−1δ(x) ≤ φ1(x) ≤ Cδ(x)
1550
+ in
1551
+ Ω,
1552
+ φ1(x) = 0
1553
+ in
1554
+ Rd \ Ω,
1555
+ where the constants ρ1 and C depend only on d, α, Ω, λ, Λ and
1556
+ ´
1557
+ Rd(1 ∧ |y|α)k(y)dy.
1558
+ Proof. Let ǫ = 2−α
1559
+ 2
1560
+ and c =
1561
+ 1
1562
+ (diam Ω)2 , and define
1563
+ φ1(x) = δ(x) − cδ1+ǫ(x).
1564
+ Since both δ and δ1+ǫ are in C2(Ω), we have P+φ1(x) ≤ P+δ(x) − cP−δ1+ǫ(x). Then by Lemma 4.3
1565
+ and supθν | Tr(aθν(x)D2δ(x))| ≤ ˜C, we get for all x ∈ Ωρ
1566
+ P+φ1(x) ≤ P+δ(x) − cP−δ1+ǫ(x) ≤ C − c(C1 · ǫδǫ−1(x)).
1567
+ Similarly for all x ∈ Ωρ, using Lemma 4.2 and Lemma 4.4 we get
1568
+ P+
1569
+ k φ1(x) ≤ |P+
1570
+ k δ(x)| + c|P−
1571
+ k δ1+ǫ(x)| ≤ C2δ1−α(x).
1572
+ Combining the above inequalities we have
1573
+ P+φ1(x) + P+
1574
+ k φ1(x) ≤ C − cC1ǫδǫ−1(x) + C2δ1−α(x)
1575
+ ≤ −δǫ−1(x)
1576
+ � C1(2 − α)
1577
+ 2(diam Ω)2 − Cδ
1578
+ α
1579
+ 2 (x) − C2δ
1580
+ 2−α
1581
+ 2 (x)
1582
+
1583
+ ,
1584
+
1585
+ 18
1586
+ BOUNDARY REGULARITY
1587
+ for all x ∈ Ωρ. Now choose 0 < ρ1 ≤ ρ < 1 such that
1588
+ � C1(2 − α)
1589
+ 2(diam Ω)2 − Cρ
1590
+ α
1591
+ 2
1592
+ 1 − C2ρ
1593
+ 2−α
1594
+ 2
1595
+ 1
1596
+
1597
+ ≥ C1(2 − α)
1598
+ 4(diam Ω)2 .
1599
+ Thus for all x ∈ Ωρ1, we have
1600
+ P+φ1(x) + P+
1601
+ k φ1(x) ≤ − C1(2 − α)
1602
+ 4(diam Ω)2 δ− α
1603
+ 2 (x).
1604
+ Finally the construction of φ1 immediately gives us that
1605
+ C−1δ(x) ≤ φ1(x) ≤ Cδ(x)
1606
+ in
1607
+ Ω,
1608
+ and φ1 = 0 in Ωc. This completes the proof of the lemma.
1609
+
1610
+ As mentioned earlier, the key step of proving Theorem 1.2 is to obtain the oscillation lemma
1611
+ Proposition 4.1. For this we next prove two preparatory lemmas. In the first lemma we obtain a
1612
+ lower bound of infD R
1613
+ 2
1614
+ u
1615
+ δ whereas the second lemma controls supD+
1616
+ κ′R
1617
+ u
1618
+ δ by using that lower bound.
1619
+ Lemma 4.6. Let α ∈ (0, 2) and Ω be a bounded C2 domain in Rd. Also, let u be such that u ≥ 0 in
1620
+ Rd, and |Lu| ≤ C2(1 + 1(1,2)(α)δ1−α) in DR, for some constant C2. If ˆα is given by
1621
+ ˆα =
1622
+
1623
+ 1
1624
+ if α ∈ (0, 1],
1625
+ 2−α
1626
+ 2
1627
+ if α ∈ (1, 2),
1628
+ then there exists a positive constant C depending only on d, Ω, Λ, λ, α,
1629
+ ´
1630
+ Rd(1 ∧ |y|α)k(y)dy, such that
1631
+ inf
1632
+ D+
1633
+ κ′R
1634
+ u
1635
+ δ ≤ C
1636
+
1637
+ inf
1638
+ D R
1639
+ 2
1640
+ u
1641
+ δ + C2Rˆα
1642
+
1643
+ (4.13)
1644
+ for all R ≤ ρ0, where the constant ρ0 depends only on d, Ω, λ, Λ, α and
1645
+ ´
1646
+ Rd(1 ∧ |y|α)k(y)dy.
1647
+ Proof. Suppose R ≤ ηρ, where ρ is given by Remark 4.1 and η ≤ 1 be some constant that will be
1648
+ chosen later. Define m = infD+
1649
+ κ′ R
1650
+ u/δ ≥ 0. Let us first observe that by (4.11) we have,
1651
+ u ≥ mδ ≥ m (κR)
1652
+ in D+
1653
+ κ′R.
1654
+ (4.14)
1655
+ Moreover by (4.12), for any y ∈ DR/2, we have either y ∈ D+
1656
+ κ′R or δ(y) < 4κR. If y ∈ D+
1657
+ κ′R, then by
1658
+ the definition of m we get m ≤ u(y)/δ(y).
1659
+ Next we consider δ(y) < 4κR. Let y∗ be the nearest point to y on ∂Ω, i.e, dist(y, ∂Ω) = |y − y∗|
1660
+ and define ˜y = y∗ + 4κR n(y∗). Again by (4.12), we have
1661
+ B4κR(˜y) ⊂ DR and BκR(˜y) ⊂ D+
1662
+ κ′R.
1663
+ Denoting r = κR and using the subsolution constructed in Lemma 4.1, define ˜φr(x) := 1
1664
+ ˜κφr(x − ˜y).
1665
+ We will consider two cases.
1666
+ Case 1: α ∈ (0, 1]. Take r′ = R
1667
+ η . Since r′ ≤ ρ, points of ∂Ω can be touched by exterior ball of radius
1668
+ r′. In particular, for y∗ ∈ ∂Ω, we can find a point z ∈ Ωc such that ¯Br′(z) ⊂ Ωc touches ∂Ω at y∗.
1669
+ Now from [43, Lemma 5.4] there exists a bounded, Lipschitz continuous function ϕr′, with Lipschitz
1670
+ constant 1
1671
+ r′ , that satisfies
1672
+
1673
+
1674
+
1675
+
1676
+
1677
+ ϕr′ = 0,
1678
+ in
1679
+ ¯Br′,
1680
+ ϕr′ > 0,
1681
+ in
1682
+ ¯Bc
1683
+ r′,
1684
+ P+ϕr′ + P+
1685
+ k ϕr′ ≤ −
1686
+ 1
1687
+ (r′)2 ,
1688
+ in
1689
+ B(1+σ)r′ \ ¯Br′,
1690
+
1691
+ BOUNDARY REGULARITY
1692
+ 19
1693
+ for some constant σ, independent of r′. Without any loss of any generality we may assume σ ≤ γ
1694
+ (see Remark 4.1). Then setting η = σ
1695
+ 8 and using Remark 4.1, we have
1696
+ DR ⊂ B(1+σ)r′(z) \ Br′(z)
1697
+ and by (4.12) we have
1698
+ B4r(˜y) \ Br(˜y) ⊂ DR ⊂ B(1+σ)r′(z) \ Br′(z).
1699
+ We show that v(x) = m˜φr(x) − C2(r′)2ϕr′(x − z) is an appropriate subsolution. Since both ˜φr and
1700
+ ϕr′ are C2 functions in B4r(˜y) \ ¯Br(˜y), we conclude that v is C2 function in B4r(˜y) \ ¯Br(˜y). For
1701
+ x ∈ B4r(˜y) \ ¯Br(˜y),
1702
+ P−v(x) + P−
1703
+ k v(x) ≥ m
1704
+
1705
+ P− ˜φr(x) + P−
1706
+ k ˜φr(x)
1707
+
1708
+ − C2(r′)2 �
1709
+ P+ϕr′(x − z) + P+
1710
+ k ϕr′(x − z)
1711
+
1712
+ ≥ C2.
1713
+ Therefore by Remark 2.1 we have
1714
+ P+(v − u) + P+
1715
+ k (v − u) ≥ 0 in B4r(˜y) \ ¯Br(˜y).
1716
+ Furthermore, using (4.14) and u ≥ 0 in Rd we obtain u(x) ≥ m˜φr(x) − C2(r′)2ϕr′(x − z) in
1717
+
1718
+ B4r(˜y) \ ¯Br(˜y)
1719
+ �c . Hence an application of maximum principle (cf.
1720
+ Lemma 2.2) gives u ≥ v in
1721
+ Rd. Now for y ∈ DR/2, using the Lipschitz continuity of ϕr′ we get
1722
+ m˜φr(y) ≤ u(y) + C2(r′)2 [ϕr′(y − z) − ϕr′(y∗ − z)] ≤ u(y) + C2r′ · δ(y)
1723
+ and as y lies on the line segment joining y∗ to ˜y we get
1724
+ u(y)
1725
+ δ(y) + C2r′ ≥
1726
+ m
1727
+ (˜κ)2 .
1728
+ This gives
1729
+ inf
1730
+ D+
1731
+ κ′R
1732
+ u
1733
+ δ ≤ C
1734
+
1735
+ inf
1736
+ DR/2
1737
+ u
1738
+ δ + C2
1739
+ R
1740
+ η
1741
+
1742
+ and finally choosing ρ0 = ηρ we have (4.13).
1743
+ Case 2: α ∈ (1, 2). Let ρ1 as in Lemma 4.5 and consider R ≤ ρ1 < 1. Here we aim to construct
1744
+ an appropriate subsolution using ˜φr(x) and supersolution constructed in Lemma 4.5. Since δ(x) ≤ 1
1745
+ in DR, we have |Lu(x)| ≤ C2(1 + δ1−α(x)) ≤ 2C2δ1−α(x) in DR. Also by Lemma 4.5, we have a
1746
+ bounded function φ1 which is C2 in Ωρ1 ⊃ DR and satisfies
1747
+ P+φ1(x) + P+
1748
+ k φ1(x) ≤ −Cδ− α
1749
+ 2 (x) = −C
1750
+ 1
1751
+ δ
1752
+ 2−α
1753
+ 2 (x)
1754
+ δ1−α(x) ≤ −C
1755
+ Rˆα δ1−α(x),
1756
+ for all x ∈ DR. Now we define the subsolutions as
1757
+ v(x) = m˜φr(x) − µ Rˆαφ1(x),
1758
+ where the constant µ is chosen suitably so that P−v(x) + P−
1759
+ k v(x) ≥ 2C2δ1−α(x) in B4r(˜y) \ ¯Br(˜y)
1760
+ (i.e. µ = 2C2
1761
+ C ). Also u ≥ v in (B4r(˜y) \ ¯Br(˜y))c. Using the same calculation as previous case for v − u
1762
+ and maximum principle Lemma 2.2 we derive that u ≥ v in Rd. Again, repeating the arguments of
1763
+ Case 1 we get
1764
+ inf
1765
+ D+
1766
+ κ′R
1767
+ u
1768
+ δ ≤ C
1769
+
1770
+ inf
1771
+ D R
1772
+ 2
1773
+ u
1774
+ δ + 2C2Rˆα
1775
+
1776
+ .
1777
+ Choosing ρ0 = ηρ ∧ ρ1 completes the proof.
1778
+
1779
+ Lemma 4.7. Let α′ = 1 ∧ (2 − α) and Ω be a bounded C2 domain in Rd. Also, let u be a bounded
1780
+ continuous function such that u ≥ 0 and u ≤ M0δ(x) in Rd, and |Lu| ≤ C2(1 + 1(1,2)(α)δ1−α) in
1781
+
1782
+ 20
1783
+ BOUNDARY REGULARITY
1784
+ DR, for some constant C2. Then, there exists a positive constant C, depending only on d, λ, Λ, Ω and
1785
+ ´
1786
+ Rd(1 ∧ |y|α)k(y)dy, such that
1787
+ sup
1788
+ D+
1789
+ κ′R
1790
+ u
1791
+ δ ≤ C
1792
+
1793
+ inf
1794
+ D+
1795
+ κ′R
1796
+ u
1797
+ δ + (M0 ∨ C2)Rα′
1798
+
1799
+ (4.15)
1800
+ for all R ≤ ρ, where constant ρ is given by Remark 4.1.
1801
+ Proof. We will use the weak Harnack inequality proved in Theorem 4.1 to show (4.15). Let R ≤ ρ.
1802
+ Then for each y ∈ D+
1803
+ κ′R, we have BκR(y) ⊂ DR. Hence we have |Lu| ≤ C2(1 + 1(1,2)(α)δ1−α(x)) in
1804
+ BκR(y). Without loss of generality, we may assume y = 0. Let s = κR and define v(x) = u(sx) for
1805
+ all x ∈ Rd. Then, it can be easily seen that
1806
+ s2L[sx, u] = Ls[x, v] := sup
1807
+ θ∈Θ
1808
+ inf
1809
+ ν∈Γ
1810
+
1811
+ Tr aθν(sx)D2v(x) + Is
1812
+ θν[x, v]
1813
+
1814
+ for all x ∈ B2.
1815
+ This gives
1816
+ |Ls[x, v]| ≤ C2s2(1 + 1(1,2)(α)δ1−α(sx))
1817
+ ≤ C2
1818
+
1819
+ s2 + 1(1,2)(α)s2 (κR)1−α�
1820
+ ≤ C2s1+α′ ,
1821
+ in B2 where α′ = 1 ∧ (2 − α). In second line, we used that for each x ∈ BκR, |sx| < κR and hence
1822
+ δ(sx) > κR
1823
+ 2 = s
1824
+ 2. From u ≤ M0δ(x) we have v(y) ≤ M0 diam Ω and v(y) ≤ M0s(1 + |y|) in whole Rd.
1825
+ Hence by Corollary 4.1, we obtain
1826
+ sup
1827
+ B 1
1828
+ 4
1829
+ v ≤ C
1830
+
1831
+ inf
1832
+ B 1
1833
+ 4
1834
+ v + (M0 ∨ C2)s1+α′
1835
+
1836
+ ,
1837
+ where constant C does not depend on s, M0, C2. This of course, implies
1838
+ sup
1839
+ B κR
1840
+ 64
1841
+ (y)
1842
+ u ≤ C
1843
+
1844
+ inf
1845
+ B κR
1846
+ 64
1847
+ (y) u + (M0 ∨ C2)R1+α′
1848
+
1849
+ ,
1850
+ for all y ∈ D+
1851
+ κ′R. Now cover D+
1852
+ κ′R by a finite number of balls BκR/64(yi), independent of R, to obtain
1853
+ sup
1854
+ D+
1855
+ κ′R
1856
+ u ≤ C
1857
+
1858
+ inf
1859
+ D+
1860
+ κ′R
1861
+ u + (M0 ∨ C2)R1+α′
1862
+
1863
+ .
1864
+ Then (4.15) follows since κR/2 ≤ δ ≤ 3κR/2 in D+
1865
+ κ′R.
1866
+
1867
+ Now we are ready to prove the oscillation lemma.
1868
+ Proposition 4.1. Let u be a bounded continuous function such that |Lu| ≤ K in Ω, for some constant
1869
+ K, and u = 0 in Ωc. Given any x0 ∈ ∂Ω, let DR be as in the Definition 4.1. Then for some τ ∈ (0, ˆα)
1870
+ there exists C, dependent on Ω, d, λ, Λ, α and
1871
+ ´
1872
+ Rd(1 ∧ |y|α)k(y)dy but not on x0, such that
1873
+ sup
1874
+ DR
1875
+ u
1876
+ δ − inf
1877
+ DR
1878
+ u
1879
+ δ ≤ CKRτ
1880
+ (4.16)
1881
+ for all R ≤ ρ0, where ρ0 > 0 is a constant depending only on Ω, d, λ, Λ, α and
1882
+ ´
1883
+ Rd(1 ∧ |y|α)k(y)dy.
1884
+ Proof. For the proof we follow a standard method, similar to [46], with the help of Lemmas 4.4, 4.6,
1885
+ and 4.7. Fix x0 ∈ ∂Ω and consider ρ0 > 0 to be chosen later. With no loss of generality, we assume
1886
+ x0 = 0. In view of (3.2), we only consider the case K > 0. By considering u/K instead of u, we
1887
+ may assume that K = 1, that is, |Lu| ≤ 1 in Ω. From Theorem 1.1 we note that ||u||C0,1(Rd) ≤ C1.
1888
+ Below, we consider two cases.
1889
+
1890
+ BOUNDARY REGULARITY
1891
+ 21
1892
+ Case 1: For α ∈ (0, 1], Iθνu is classically defined and |Iθνu| ≤ ˜C in Ω for all θ and ν. Consequently,
1893
+ one can combine the nonlocal term on the rhs and only deal with local nonlinear operator ˜L[x, u] :=
1894
+ supθ∈Θ infν∈Γ
1895
+
1896
+ Tr aθν(x)D2u(x)
1897
+
1898
+ . In this case the proof is simpler and can be done following the
1899
+ same method as for the local case. However, the method we use below would also work with an
1900
+ appropriate modification.
1901
+ Case 2: Now we deal with the case α ∈ (1, 2). We show that there exists K > 0, ρ1 ∈ (0, ρ0) and
1902
+ τ ∈ (0, 1), dependent only on Ω, d, λ, Λ, α and
1903
+ ´
1904
+ Rd(1 ∧ |y|α)k(y)dy, and monotone sequences {Mk}
1905
+ and {mk} such that, for all k ≥ 0,
1906
+ Mk − mk =
1907
+ 1
1908
+ 4kτ ,
1909
+ −1 ≤ mk ≤ mk+1 < Mk+1 ≤ Mk ≤ 1,
1910
+ (4.17)
1911
+ and
1912
+ mk ≤ K−1 u
1913
+ δ ≤ Mk
1914
+ in
1915
+ DRk,
1916
+ where
1917
+ Rk = ρ1
1918
+ 4k .
1919
+ (4.18)
1920
+ Note that (4.18) is equivalent to the following
1921
+ mkδ ≤ K−1u ≤ Mkδ,
1922
+ in
1923
+ BRk,
1924
+ where
1925
+ Rk = ρ1
1926
+ 4k .
1927
+ (4.19)
1928
+ Next we construct monotone sequences {Mk} and {mk} by induction.
1929
+ The existence of M0 and m0 such that (4.17) and (4.19) hold for k = 0 is guaranteed by Lemma 3.1.
1930
+ Assume that we have the sequences up to Mk and mk. We want to show the existence of Mk+1 and
1931
+ mk+1 such that (4.17)-(4.19) hold. We set
1932
+ uk = 1
1933
+ Ku − mkδ.
1934
+ Note that to apply Lemma 4.7 we need uk to be nonnegative in Rd. Therefore we shall work with
1935
+ u+
1936
+ k , the positive part of uk. Let uk = u+
1937
+ k − u−
1938
+ k and by the induction hypothesis,
1939
+ u+
1940
+ k = uk
1941
+ and
1942
+ u−
1943
+ k = 0
1944
+ in
1945
+ BRk.
1946
+ (4.20)
1947
+ We need to find a lower bound on uk. Since uk ≥ 0 in BRk and uk is Lipschitz in Rd, we get for
1948
+ x ∈ Bc
1949
+ Rk that
1950
+ uk(x) = uk(Rkxu) + uk(x) − uk(Rkxu) ≥ −CL|x − Rkxu|,
1951
+ (4.21)
1952
+ where zu =
1953
+ 1
1954
+ |z|z for z ̸= 0 and CL denotes a Lipschitz constant of uk which can be chosen independent
1955
+ of k. Using Lemma 3.1 we also have |uk| ≤ K−1 + diam(Ω) = C1 for all x ∈ Rd. Thus using (4.20)
1956
+ and (4.21) we calculate L[x, u−
1957
+ k ] in D Rk
1958
+ 2 . Let x ∈ DRk/2. By (4.20), D2u−
1959
+ k (x) = 0. Then
1960
+ 0 ≤ Iθν[x, u−
1961
+ k ] =
1962
+ ˆ
1963
+ x+y̸∈BRk
1964
+ u−
1965
+ k (x + y)Nθν(x, y)dy
1966
+
1967
+ ˆ
1968
+
1969
+ |y|≥ Rk
1970
+ 2 ,x+y̸=0
1971
+ � u−
1972
+ k (x + y)k(y)dy
1973
+ ≤ CL
1974
+ ˆ
1975
+ � Rk
1976
+ 2 ≤|y|≤1, x+y̸=0
1977
+
1978
+ ���(x + y) − Rk(x + y)u
1979
+ ���k(y)dy + C1
1980
+ ˆ
1981
+ |y|≥1
1982
+ k(y)dy
1983
+ ≤ CL
1984
+ ˆ
1985
+ Rk
1986
+ 2 ≤|y|≤1
1987
+ (|x| + Rk) k(y) dy + CL
1988
+ ˆ
1989
+ Rk
1990
+ 2 ≤|y|≤1
1991
+ |y|k(y) dy + C1
1992
+ ˆ
1993
+ Rd(1 ∧ |y|α)k(y) dy
1994
+ ≤ κ3
1995
+ �ˆ
1996
+ Rd(1 ∧ |y|α)k(y) dy
1997
+ � �
1998
+ R1−α
1999
+ k
2000
+ + 1
2001
+
2002
+ ≤ κ4R1−α
2003
+ k
2004
+ ,
2005
+ (4.22)
2006
+ for some constants κ3, κ4, independent of k.
2007
+
2008
+ 22
2009
+ BOUNDARY REGULARITY
2010
+ Now we write u+
2011
+ k = K−1u − mkδ + u−
2012
+ k . Since δ is C2 and u−
2013
+ k = 0 in D Rk
2014
+ 2 , first note that
2015
+ Lu+
2016
+ k ≤ K−1 − (P− + P−
2017
+ k )(mkδ) + (P+ + P+
2018
+ k )(u−
2019
+ k ),
2020
+ Lu+
2021
+ k ≥ −K−1 − (P+ + P+
2022
+ k )(mkδ) + (P− + P−
2023
+ k )(u−
2024
+ k ).
2025
+ Using Lemma 4.4 and (4.22) in the above estimate we have
2026
+ |Lu+
2027
+ k | ≤ K−1 + mkCδ1−α + κ4(Rk)1−α in D Rk
2028
+ 2 .
2029
+ (4.23)
2030
+ Since ρ1 ≥ Rk ≥ δ in DRk, for α > 1, we have R1−α
2031
+ k
2032
+ ≤ δ1−α, and hence, from (4.23), we have
2033
+ |Lu+
2034
+ k | ≤
2035
+
2036
+ K−1[(ρ1)]α−1 + C + κ4
2037
+
2038
+ δ1−α(x) := κ5δ1−α(x)
2039
+ in
2040
+ DRk/2.
2041
+ Now we are in a position to apply Lemmas 4.6 and 4.7. Recalling that
2042
+ u+
2043
+ k = uk = K−1u − mkδ
2044
+ in
2045
+ DRk,
2046
+ and using Lemma 3.1 we also have |u+
2047
+ k | ≤ |uk| ≤ (K−1 + 1)δ(x) = C1δ(x) for all x ∈ Rd. We get
2048
+ from Lemmas 4.6 and 4.7 that
2049
+ sup
2050
+ D+
2051
+ κ′Rk/2
2052
+
2053
+ K−1 u
2054
+ δ − mk
2055
+
2056
+ ≤ C
2057
+
2058
+ inf
2059
+ D+
2060
+ κ′Rk/2
2061
+
2062
+ K−1 u
2063
+ δ − mk
2064
+
2065
+ + (κ5 ∨ C1)Rˆα
2066
+ k
2067
+
2068
+ ≤ C
2069
+
2070
+ inf
2071
+ DRk/4
2072
+
2073
+ K−1 u
2074
+ δ − mk
2075
+
2076
+ + (κ5 ∨ C1)Rˆα
2077
+ k
2078
+
2079
+ .
2080
+ (4.24)
2081
+ Repeating a similar argument for the function ˜uk = Mkδ − K−1u, we find
2082
+ sup
2083
+ D+
2084
+ κ′Rk/2
2085
+
2086
+ Mk − K−1 u
2087
+ δ
2088
+
2089
+ ≤ C
2090
+
2091
+ inf
2092
+ DRk/4
2093
+
2094
+ Mk − K−1 u
2095
+ δ
2096
+
2097
+ + (κ5 ∨ C1)Rˆα
2098
+ k
2099
+
2100
+ .
2101
+ (4.25)
2102
+ Combining (4.24) and (4.25) we obtain
2103
+ Mk − mk ≤ C
2104
+
2105
+ inf
2106
+ D+
2107
+ Rk/4
2108
+
2109
+ Mk − K−1 u
2110
+ δ
2111
+
2112
+ + inf
2113
+ D+
2114
+ Rk/4
2115
+
2116
+ K−1 u
2117
+ δ − mk
2118
+
2119
+ + (κ5 ∨ C1)Rˆα
2120
+ k
2121
+
2122
+ = C
2123
+
2124
+ inf
2125
+ DRk+1
2126
+ K−1 u
2127
+ δ − sup
2128
+ DRk+1
2129
+ K−1 u
2130
+ δ + Mk − mk + (κ5 ∨ C1)Rˆα
2131
+ k
2132
+
2133
+ .
2134
+ (4.26)
2135
+ Putting Mk − mk =
2136
+ 1
2137
+ 4τk in (4.26), we have
2138
+ sup
2139
+ DRk+1
2140
+ K−1 u
2141
+ δ −
2142
+ inf
2143
+ DRk+1
2144
+ K−1 u
2145
+ δ ≤
2146
+ �C − 1
2147
+ C
2148
+ 1
2149
+ 4τk + (κ5 ∨ C1)Rˆα
2150
+ k
2151
+
2152
+ =
2153
+ 1
2154
+ 4τk
2155
+ �C − 1
2156
+ C
2157
+ + (κ5 ∨ C1)Rˆα
2158
+ k4τk�
2159
+ .
2160
+ (4.27)
2161
+ Since Rk = ρ1
2162
+ 4k for ρ1 ∈ (0, ρ0), we can choose ρ0 and τ small so that
2163
+ �C − 1
2164
+ C
2165
+ + (κ5 ∨ C1)Rˆα
2166
+ k 4τk�
2167
+ ≤ 1
2168
+ 4τ .
2169
+ Putting in (4.27) we obtain
2170
+ sup
2171
+ DRk+1
2172
+ K−1 u
2173
+ δ −
2174
+ inf
2175
+ DRk+1
2176
+ K−1 u
2177
+ δ ≤
2178
+ 1
2179
+ 4τ(k+1) .
2180
+ Thus we find mk+1 and Mk+1 such that (4.17) and (4.18) hold.
2181
+ It is easy to prove (4.16) from
2182
+ (4.17)-(4.18).
2183
+
2184
+ Next we establish Hölder regularity of u/δ up to the boundary, that is Theorem 1.2.
2185
+
2186
+ BOUNDARY REGULARITY
2187
+ 23
2188
+ Proof of Theorem 1.2. Replacing u by
2189
+ u
2190
+ CK we may assume that |Lu| ≤ 1 in Ω. Let v = u/δ. From
2191
+ Lemma 3.1 we then have
2192
+ ∥v∥L∞(Ω) ≤ C,
2193
+ for some constant C and from Theorem 1.1 we have
2194
+ ∥u∥C0,1(Rd) ≤ C.
2195
+ (4.28)
2196
+ Also from Proposition 4.1 for each x0 ∈ ∂Ω and for all r > 0 we have
2197
+ sup
2198
+ Dr(x0)
2199
+ v −
2200
+ inf
2201
+ Dr(x0) v ≤ Crτ.
2202
+ (4.29)
2203
+ where Dr(x0) = Br(x0) ∩ Ω as before. To complete the proof we shall show that
2204
+ sup
2205
+ x,y∈Ω,x̸=y
2206
+ |v(x) − v(y)|
2207
+ |x − y|κ
2208
+ ≤ C,
2209
+ (4.30)
2210
+ for some κ > 0.
2211
+ Let r = |x − y| and there exists x0, y0 ∈ ∂Ω such that δ(x) = |x − x0| and
2212
+ δ(y) = |y − y0|. If r ≥ 1
2213
+ 8, then
2214
+ |v(x) − v(y)|
2215
+ |x − y|κ
2216
+ ≤ 2 · 8κ||v||L∞(Ω).
2217
+ If r < 1
2218
+ 8 and r ≥ 1
2219
+ 8(δ(x) ∨ δ(y))p for some p > 2 then clearly y ∈ Bκr1/p(x0) for some κ > 0. Now
2220
+ using (4.29) we obtain
2221
+ |v(x) − v(y)| ≤
2222
+ sup
2223
+ Dκr1/p(x0)
2224
+ v −
2225
+ inf
2226
+ Dκr1/p(x0) v ≤ Cκrτ/p.
2227
+ If r < 1
2228
+ 8 and r < 1
2229
+ 8(δ(x) ∨ δ(y))p, then r < 1
2230
+ 8(δ(x) ∨ δ(y)) and this implies y ∈ B 1
2231
+ 8(δ(x)∨δ(y))(x) or
2232
+ x ∈ B 1
2233
+ 8(δ(x)∨δ(y))(y). Without loss of any generality assume δ(x) ≥ δ(y) and y ∈ B δ(x)
2234
+ 8 (x). Using
2235
+ (4.28) and the Lipschitz continuity of δ, we get
2236
+ |v(x) − v(y)| =
2237
+ ����
2238
+ u(x)
2239
+ δ(x) − u(y)
2240
+ δ(y)
2241
+ ���� ≤ M(K, diam Ω)r
2242
+ δ(x) · δ(y)
2243
+ .
2244
+ Also we have (8r)1/pδ(y) < δ(x) · δ(y). This implies
2245
+ |v(x) − v(y)| ≤ M(K, diam Ω)r
2246
+ δ(x) · δ(y)
2247
+ < M(K, diam Ω)
2248
+ 81/p
2249
+ · r1−1/p
2250
+ δ(y) .
2251
+ Now if r < 1
2252
+ 8(δ(y))p then one obtains
2253
+ |v(x) − v(y)| < M(K, diam Ω)
2254
+ 81/p
2255
+ · r1−1/p
2256
+ δ(y)
2257
+ ≤ Cr1−2/p.
2258
+ On the other hand, if r ≥ 1
2259
+ 8(δ(y))p, since δ(y) >
2260
+ 1
2261
+ 64δ(x) we have r ≥ 1
2262
+ 8 ·
2263
+ � 1
2264
+ 64
2265
+ �p (δ(x))p and this case
2266
+ can be treated as previous. Therefore choosing κ = (1 − 2
2267
+ p) ∧ τ
2268
+ p we conclude (4.30). This completes
2269
+ the proof.
2270
+
2271
+ 5. Global Hölder regularity of the gradient
2272
+ In this section we prove the Hölder regularity of Du up to the boundary. First, let us recall
2273
+ L[x, u] = sup
2274
+ θ∈Θ
2275
+ inf
2276
+ ν∈γ
2277
+
2278
+ Tr aθν(x)D2u(x) + Iθν[x, u]
2279
+
2280
+ .
2281
+ We denote v = u
2282
+ δ . Following [11], next we obtain the in-equations satisfied by v.
2283
+
2284
+ 24
2285
+ BOUNDARY REGULARITY
2286
+ Lemma 5.1. Let Ω be bounded C2 domain in Rd. If |Lu| ≤ K in Ω and u = 0 in Ωc, then we have
2287
+ Lv + 2K0d2 |Dδ|
2288
+ δ
2289
+ |Dv| ≥ 1
2290
+ δ
2291
+
2292
+ − K − |v|(P + + P +
2293
+ k )δ − sup
2294
+ θ,ν
2295
+ Zθν[v, δ]
2296
+
2297
+ ,
2298
+ Lv − 2K0d2 |Dδ|
2299
+ δ
2300
+ |Dv| ≤ 1
2301
+ δ
2302
+
2303
+ K − |v|(P − + P −
2304
+ k )δ − inf
2305
+ θ,ν Zθν[v, δ]
2306
+
2307
+ (5.1)
2308
+ for some K0, where
2309
+ Zθν[v, δ](x) =
2310
+ ˆ
2311
+ Rd(v(y) − v(x))(δ(y) − δ(x))Nθν(x, y − x)dy.
2312
+ Proof. First note that, since u ∈ C1(Ω) by Lemma 2.1, we have v ∈ C1(Ω). Therefore, Zθν[v, δ] is
2313
+ continuous in Ω. Consider a test function ψ ∈ C2(Ω) that touches v from above at x ∈ Ω. Define
2314
+ ψr(z) =
2315
+
2316
+ ψ(z)
2317
+ in Br(x),
2318
+ v(z)
2319
+ in Bc
2320
+ r(x).
2321
+ By our assertion, we have ψr ≥ v for all r small. To verify the first inequality in (5.1) we must show
2322
+ that
2323
+ L[x, ψr] + 2k0d2 |Dδ(x)|
2324
+ δ(x)
2325
+ · |Dψr(x)| ≥
2326
+ 1
2327
+ δ(x)[−K − |v(x)|(P+ + P+
2328
+ k )δ(x) − sup
2329
+ θ,ν
2330
+ Zθν[v, δ](x)],
2331
+ (5.2)
2332
+ for some r small. We define
2333
+ ˜ψr(z) =
2334
+
2335
+ δ(z)ψ(z)
2336
+ in Br(x),
2337
+ u(z)
2338
+ in Bc
2339
+ r(x).
2340
+ Then, ˜ψr ≥ u for all r small. Since |Lu| ≤ K and δψr = ˜ψr, we obtain at a point x
2341
+ −K ≤L[x, ˜ψr]
2342
+ = sup
2343
+ θ∈Θ
2344
+ inf
2345
+ ν∈γ
2346
+
2347
+ δ(x)
2348
+
2349
+ Tr aθν(x)D2ψr(x) + Iθνψr(x)
2350
+
2351
+ + ψr(x)
2352
+
2353
+ Tr aθν(x)D2δ(x) + Iθνδ(x)
2354
+
2355
+ + Tr
2356
+ � �
2357
+ aθν(x) + aT
2358
+ θν(x)
2359
+
2360
+ · (Dδ(x) ⊗ Dψr(x))
2361
+
2362
+ + Zθν[ψr, δ](x)
2363
+
2364
+ ≤ δ(x)L[x, ψr] + sup
2365
+ θ,ν
2366
+
2367
+ |ψr(x)|
2368
+
2369
+ Tr aθν(x)D2δ(x) + Iθνδ(x)
2370
+
2371
+ + Tr
2372
+ ��
2373
+ aθν(x) + aT
2374
+ θν(x)
2375
+
2376
+ · (Dδ(x) ⊗ Dψr(x))
2377
+
2378
+ + Zθν[ψr, δ](x)
2379
+
2380
+ ≤ δ(x)L[x, ψr] + |v(x)|
2381
+
2382
+ P+ + P+
2383
+ k
2384
+
2385
+ δ(x) + 2K0d2|Dδ(x)| · |Dψr(x)| + sup
2386
+ θ,ν
2387
+ Zθν[ψr, δ](x),
2388
+ for all r small and some constant K0, where Dδ(x)⊗Dψr(x) :=
2389
+
2390
+ ∂δ
2391
+ ∂xi · ∂ψr
2392
+ ∂xj
2393
+
2394
+ i,j . Rearranging the terms
2395
+ we have
2396
+ − K − |v(x)|
2397
+
2398
+ P+ + P+
2399
+ k
2400
+
2401
+ δ(x) − sup
2402
+ θ,ν
2403
+ Zθν[ψr, δ](x) ≤ δ(x)L[x, ψr] + 2K0d2|Dδ(x)| · |Dψr(x)|.
2404
+ (5.3)
2405
+ Let r1 ≤ r. Since ψr is decreasing with r, we get from (5.3) that
2406
+ δ(x)L[x, ψr] + 2K0d2|Dδ(x)| · |Dψr(x)| ≥ δ(x)L[x, ψr1] + 2K0d2|Dδ(x)| · |Dψr1(x)|
2407
+ ≥ lim
2408
+ r1→0
2409
+
2410
+ −K − |v(x)|
2411
+
2412
+ P+ + P+
2413
+ k
2414
+
2415
+ δ(x) − sup
2416
+ θ,ν
2417
+ Zθν[ψr1, δ](x)
2418
+
2419
+ =
2420
+
2421
+ −K − |v(x)|
2422
+
2423
+ P+ + P+
2424
+ k
2425
+
2426
+ δ(x) − sup
2427
+ θ,ν
2428
+ Zθν[v, δ](x)
2429
+
2430
+ ,
2431
+
2432
+ BOUNDARY REGULARITY
2433
+ 25
2434
+ by dominated convergence theorem. This gives (5.2). Similarly we can verify the second inequality
2435
+ of (5.1).
2436
+
2437
+ Next we obtain a the following estimate on v, away from the boundary. Denote Ωσ = {x ∈ Ω :
2438
+ dist(x, Ωc) ≥ σ}.
2439
+ Lemma 5.2. Let Ω be bounded C2 domain in Rd. If |Lu| ≤ K in Ω and u = 0 in Ωc, then for some
2440
+ constant C it holds that
2441
+ ∥Dv∥L∞(Ωσ) ≤ CKσκ−1
2442
+ for all σ ∈ (0, 1).
2443
+ (5.4)
2444
+ Furthermore, there exists η ∈ (0, 1) such that for any x ∈ Ωσ and 0 < |x − y| ≤ σ/8 we have
2445
+ |Dv(y) − Dv(x)|
2446
+ |x − y|η
2447
+ ≤ CKσκ−1−η,
2448
+ for all σ ∈ (0, 1).
2449
+ Proof. Using Lemma 5.1 we have
2450
+ Lv + 2K0d2 |Dδ|
2451
+ δ
2452
+ |Dv| ≥ 1
2453
+ δ
2454
+
2455
+ − K − |v|(P+ + P+
2456
+ k )δ − sup
2457
+ θ,ν
2458
+ Zθν[v, δ]
2459
+
2460
+ ,
2461
+ Lv − 2K0d2 |Dδ|
2462
+ δ
2463
+ |Dv| ≤ 1
2464
+ δ
2465
+
2466
+ K − |v|(P− + P−
2467
+ k )δ − inf
2468
+ θ,ν Zθν[v, δ]
2469
+
2470
+ (5.5)
2471
+ in Ω. Fix a point x0 ∈ Ωσ and define
2472
+ w(x) = v(x) − v(x0).
2473
+ From (5.5) we then obtain
2474
+ Lw + 2K0d2 |Dδ|
2475
+ δ
2476
+ |Dw| ≥
2477
+
2478
+ − 1
2479
+ δ K − ℓ1
2480
+
2481
+ ,
2482
+ Lw − 2K0d2 |Dδ|
2483
+ δ
2484
+ |Dw| ≤
2485
+ �1
2486
+ δ K + ℓ2
2487
+
2488
+ (5.6)
2489
+ in Ω, where
2490
+ ℓ1(x) =
2491
+ 1
2492
+ δ(x)
2493
+
2494
+ |w(x)|(P+ + P+
2495
+ k )δ(x) + sup
2496
+ θ,ν
2497
+ Zθν[w, δ](x) + |v(x0)|(P+ + P+
2498
+ k )δ(x)
2499
+
2500
+ And
2501
+ ℓ2(x) =
2502
+ 1
2503
+ δ(x)
2504
+
2505
+ |w(x)|(P− + P−
2506
+ k )δ(x) − inf
2507
+ θ,ν Zθν[w, δ](x) − |v(x0)|(P− + P−
2508
+ k )δ(x)
2509
+
2510
+ .
2511
+ We set r = σ
2512
+ 2 and claim that
2513
+ ∥ℓi∥L∞(Br(x0)) ≤ κ1σκ−2,
2514
+ for all σ ∈ (0, 1) and i = 1, 2,
2515
+ (5.7)
2516
+ for some constant κ1. Let us denote by
2517
+ ξ±
2518
+ 1 = |w(x)|(P± + P±
2519
+ k )δ
2520
+ δ
2521
+ ,
2522
+ ξ2 = 1
2523
+ δ sup
2524
+ θ,ν
2525
+ Zθν[w, δ],
2526
+ ξ±
2527
+ 3 = |v(x0)|(P± + P±
2528
+ k )δ
2529
+ δ
2530
+ ,
2531
+ ξ4 = 1
2532
+ δ inf
2533
+ θ,ν Zθν[v, δ].
2534
+ Recall that κ ∈ (0, ˆα). Since
2535
+ ∥P±δ∥L∞(Ω) < ∞
2536
+ and
2537
+ ∥P±
2538
+ k δ∥L∞(Ωσ) ≲
2539
+
2540
+ 1 + 1(1,2)(α)δ1−α�
2541
+ (cf Lemma 4.4 ), and
2542
+ ∥v∥L∞(Rd) < ∞,
2543
+ ∥w∥L∞(Br(x0)) ≲ rκ,
2544
+ it follows that
2545
+ ∥ξ±
2546
+ 3 ∥L∞(Br(x0)) ≲
2547
+
2548
+ 1
2549
+ σ
2550
+ if α ∈ (0, 1],
2551
+ 1
2552
+ σα
2553
+ if α ∈ (1, 2)
2554
+
2555
+ ≲ σκ−2,
2556
+
2557
+ 26
2558
+ BOUNDARY REGULARITY
2559
+ and
2560
+ ∥ξ±
2561
+ 1 ∥L∞(Br(x0)) ≲
2562
+
2563
+ σκ
2564
+ δ2
2565
+ if α ∈ (0, 1],
2566
+ σκ
2567
+ δα
2568
+ if α ∈ (1, 2)
2569
+
2570
+ ≲ σκ−2.
2571
+ Next we estimate ξ2 and ξ4. Let x ∈ Br(x0). Denote by ˆr = δ(x)/4. Note that
2572
+ δ(x) ≥ δ(x0) − |x − x0| ≥ 2r − r = r ⇒ ˆr ≥ r/4.
2573
+ Since u ∈ C1(Ω) by Lemma 2.1 and |u| ≤ Cδ in Rd by Lemma 3.1. Thus we have
2574
+ |Dv| ≤
2575
+ ����
2576
+ Du
2577
+ δ
2578
+ ���� +
2579
+ ����
2580
+ uDδ
2581
+ δ2
2582
+ ���� ≲
2583
+ 1
2584
+ δ(x)
2585
+ in Bˆr(x).
2586
+ (5.8)
2587
+ Now we calculate
2588
+ |Zθν[w, δ](x)| ≤
2589
+ ˆ
2590
+ Rd |δ(x) − δ(y)||v(x) − v(y)|k(y − x)dy =
2591
+ ˆ
2592
+ Bˆr(x)
2593
+ +
2594
+ ˆ
2595
+ B1(x)\Bˆr(x)
2596
+ +
2597
+ ˆ
2598
+ Bc
2599
+ 1(x)
2600
+ = I1 + I2 + I3.
2601
+ To estimate I1, first we consider α ≤ 1. Since δ is Lipschitz continuous and v bounded on Rd, I1 can
2602
+ be written as
2603
+ I1 =
2604
+ ˆ
2605
+ Bˆr(x)
2606
+ |δ(x) − δ(y)|
2607
+ |x − y|
2608
+ |v(x) − v(y)| · |x − y|k(y − x)dy
2609
+
2610
+ ˆ
2611
+ Bˆr(x)
2612
+ |x − y|αk(y − x)dy ≤
2613
+ ˆ
2614
+ Rd(1 ∧ |z|α)k(z)dz.
2615
+ For α ∈ (1, 2), using the Lipschitz continuity of δ and (5.8) we get
2616
+ I1 =
2617
+ ˆ
2618
+ Bˆr(x)
2619
+ |δ(x) − δ(y)|
2620
+ |x − y|
2621
+ · |v(x) − v(y)|
2622
+ |x − y|
2623
+ · |x − y|α|x − y|2−αk(y − x)dy
2624
+ ≲ ˆr2−α
2625
+ δ(x)
2626
+ ˆ
2627
+ Bˆr(x)
2628
+ |x − y|αk(y − x)dy ≲ δ(x)1−α
2629
+ ˆ
2630
+ Rd(1 ∧ |z|α)k(z)dz ≲ σκ−1.
2631
+ Bounds on I2 can be computed as follows: for α ≤ 1, we write
2632
+ I2 =
2633
+ ˆ
2634
+ B1(x)\Bˆr(x)
2635
+ |δ(x) − δ(y)||v(x) − v(y)|k(y − x)dy ≲
2636
+ ˆ
2637
+ B1(x)\Bˆr(x)
2638
+ |x − y|αk(y − x)dy
2639
+
2640
+ ˆ
2641
+ Rd(1 ∧ |z|α)k(z)dz.
2642
+ In the second line of the above inequality we used
2643
+ |δ(x) − δ(y)| ≲ |x − y| and ||v||L∞(Rd) < ∞.
2644
+ For α ∈ (1, 2) we can compute I2 as
2645
+ ˆ
2646
+ B1(x)\Bˆr(x)
2647
+ |δ(x) − δ(y)||v(x) − v(y)|k(y − x)dy ≲
2648
+ ˆ
2649
+ B1(x)\Bˆr(x)
2650
+ |x − y|1−α · |x − y|αk(y − x)dy
2651
+ ≲ δ(x)1−α
2652
+ ˆ
2653
+ Rd(1 ∧ |z|α)k(z)dz ≲ σκ−1.
2654
+ Moreover, since δ and v are bounded in Rd, we get I3 ≤ κ3. Combining the above estimates we obtain
2655
+ ∥ξi∥L∞Br(x0) ≲ σκ−2 for i = 2, 4.
2656
+ Thus the claim (5.7) is established.
2657
+ Let us now de���ne ζ(z) = w( r
2658
+ 2z + x0). Letting b(z) =
2659
+ Dδ( r
2660
+ 2 z+x0)
2661
+ 2δ( r
2662
+ 2z+x0) it follows from (5.6) that
2663
+ ˜Lrζ + K0d2rb(z) · |Dζ| ≥ −r2
2664
+ 4
2665
+ �1
2666
+ δ K + l1
2667
+ � �r
2668
+ 2z + x0
2669
+
2670
+ (5.9)
2671
+
2672
+ BOUNDARY REGULARITY
2673
+ 27
2674
+ ˜Lrζ − K0d2rb(z) · |Dζ| ≤ r2
2675
+ 4
2676
+ �1
2677
+ δ K + l2
2678
+ � �r
2679
+ 2z + x0
2680
+
2681
+ in B2(0), where
2682
+ ˜Lr[x, u] := sup
2683
+ θ∈Θ
2684
+ inf
2685
+ ν∈Γ
2686
+
2687
+ Tr
2688
+
2689
+ aθν
2690
+ �r
2691
+ 2x + x0
2692
+
2693
+ D2u(x)
2694
+
2695
+ + ˜Ir
2696
+ θν[x, u]
2697
+
2698
+ and ˜Ir
2699
+ θν is given by
2700
+ ˜Ir
2701
+ θν[x, f] =
2702
+ ˆ
2703
+ Rd
2704
+
2705
+ f(x + y) − f(x) − 1B 1
2706
+ r (y)∇f(x) · y
2707
+ � �r
2708
+ 2
2709
+ �d+2
2710
+ Nθν
2711
+ �r
2712
+ 2x + x0, ry
2713
+
2714
+ dy.
2715
+ Consider a cut-off function ϕ satisfying ϕ = 1 in B3/2 and ϕ = 0 in Bc
2716
+ 2. Defining ˜ζ = ζϕ we get
2717
+ from (5.9) that
2718
+ ˜Lr[z, ˜ζ] + K0d2rb(z).|D˜ζ(z)| ≥ −r2
2719
+ 4
2720
+ �K
2721
+ δ + |l1|
2722
+ � �r
2723
+ 2z + x0
2724
+
2725
+
2726
+ ����sup
2727
+ θ∈Θ
2728
+ inf
2729
+ ν∈Γ
2730
+ ˜Ir
2731
+ θν[z, (ϕ − 1)ζ]
2732
+ ����
2733
+ ˜Lr[z, ˜ζ] − K0d2rb(z).|D˜ζ(z)| ≤ r2
2734
+ 4
2735
+ �K
2736
+ δ + |l1|
2737
+ � �r
2738
+ 2z + x0
2739
+
2740
+
2741
+ ����sup
2742
+ θ∈Θ
2743
+ inf
2744
+ ν∈Γ
2745
+ ˜Ir
2746
+ θν[z, (ϕ − 1)ζ]
2747
+ ����
2748
+ in B1. Since
2749
+ ∥rb∥L∞(B1(0)) ≤ κ3
2750
+ for all σ ∈ (0, 1),
2751
+ applying Lemma 2.1 we obtain, for some η ∈ (0, 1),
2752
+ ∥Dζ∥Cη(B1/2(0)) ≤ κ6
2753
+
2754
+ ∥˜ζ∥L∞(Rd) + κ4σ + κ5σκ�
2755
+ ,
2756
+ (5.10)
2757
+ for some constant κ6 independent of σ ∈ (0, 1), where we used
2758
+ ���˜Ir
2759
+ θν[z, (ϕ − 1)ζ]
2760
+ ��� ≲ σ
2761
+ (cf. the proof of Theorem 1.1) and |l1|(r
2762
+ 2 · +x0) ≲ σκ−2.
2763
+ Since v is in Cκ(Rd), it follows that
2764
+ ∥˜ζ∥L∞(Rd) = ∥˜ζ∥L∞(B2) ≤ ∥ζ∥L∞(B2) ≲ rκ.
2765
+ Putting these estimates in (5.10) and calculating the gradient at z = 0 we obtain
2766
+ |Dv(x0)| ≲ σκ−1,
2767
+ for all σ ∈ (0, 1). This proves the Hölder estimate (5.4).
2768
+ For the second part, compute the Hölder ratio with Dζ(0) − Dζ(z) where z =
2769
+ 2
2770
+ r(y − x0) for
2771
+ |x0 − y| ≤ σ/8. This completes the proof.
2772
+
2773
+ Now we can complete the proof of Theorem 1.3. If u is solution of the in-equation (1.1) then using
2774
+ Theorem 1.1 we have |Lu| ≤ CK. Now the proof can be obtained by following the same lines as in
2775
+ [11, Theorem 1.3]. We present it here for the sake of completeness.
2776
+ Proof of Theorem 1.3. Since u = vδ it follows that
2777
+ Du = vDδ + δDv.
2778
+ Since δ ∈ C2(¯Ω), it follows from Theorem 1.2 that vDδ ∈ Cκ(¯Ω). Thus, we only need to concentrate
2779
+ on ϑ = δDv. Consider η from Lemma 5.2 and with no loss of generality, we may fix η ∈ (0, κ).
2780
+ For |x − y| ≥ 1
2781
+ 8(δ(x) ∨ δ(y)) it follows from (5.4) that
2782
+ |ϑ(x) − ϑ(y)|
2783
+ |x − y|η
2784
+ ≤ CK(δκ(x) + δκ(y))(δ(x) ∨ δ(y))−η ≤ 2CK.
2785
+
2786
+ 28
2787
+ BOUNDARY REGULARITY
2788
+ So consider the case |x − y| <
2789
+ 1
2790
+ 8(δ(x) ∨ δ(y)).
2791
+ Without loss of generality, we may assume that
2792
+ |x − y| < 1
2793
+ 8δ(x). Then
2794
+ 9
2795
+ 8δ(x) ≥ |x − y| + δ(x) ≥ δ(y) ≥ δ(x) − |x − y| ≥ 7
2796
+ 8δ(x).
2797
+ By Lemma 5.2, it follows
2798
+ |ϑ(x) − ϑ(y)|
2799
+ |x − y|η
2800
+ ≤ |Dv(x)||δ(x) − δ(y)|
2801
+ |x − y|η
2802
+ + δ(y)|Dv(x) − Dv(y)|
2803
+ |x − y|η
2804
+ ≲ δ(x)κ−1(δ(x))1−η + δ(y)[δ(x)]κ−1−η
2805
+ ≤ CK.
2806
+ This completes the proof.
2807
+
2808
+ A. Appendix
2809
+ In this section we aim to present a proof of Lemma 2.1. For this purpose, we first introduce the
2810
+ scaled operator. Let x0 ∈ Ω and r > 0, we define the doubly scaled operator as
2811
+ Lr,s(x0)[x, u] = sup
2812
+ θ∈Θ
2813
+ inf
2814
+ ν∈Γ
2815
+
2816
+ Tr aθν(sr(x − x0) + sx0)D2u(x) + Ir,s
2817
+ θν (x0)[x, u]
2818
+
2819
+ (A.1)
2820
+ where
2821
+ Ir,s
2822
+ θν (x0)[x, u] =
2823
+ ˆ
2824
+ Rd(u(x + y) − u(x) − 1B 1
2825
+ sr (y)∇u(x) · y)rd+2(sd+2Nθν(rs(x − x0) + sx0, sry)dy.
2826
+ Further, we define
2827
+ L0,s(x0)[x, u] := sup
2828
+ θ∈Θ
2829
+ inf
2830
+ ν∈Γ
2831
+
2832
+ Tr aθν(sx0)D2u(x)
2833
+
2834
+ .
2835
+ (A.2)
2836
+ Now we give the definition of weak convergence of operators.
2837
+ Definition A.1. Let Ω ⊂ Rd be open and 0 < r < 1. A sequence of operators Lm is said to converge
2838
+ weakly to L in Ω, if for any test function ϕ ∈ L∞(Rd) ∩ C2(Br(x0)) for some Br(x0) ⊂ Ω, we have
2839
+ Lm[x, ϕ] → L[x, ϕ]
2840
+ uniformly in B r
2841
+ 2 (x0) as m → ∞.
2842
+ The next lemma is a slightly modified version of [44, Lemma 4.1] which can be proved by similar
2843
+ arguments.
2844
+ Lemma A.1. For any x0 ∈ B1, r > 0 and 0 < s < 1, Let Lr,s(x0) and L0,s(x0) is given by (A.1)
2845
+ and (A.2) respectively where the Assumption 1.1 are satisfied by the corresponding coefficients with
2846
+ Ω = B2. Moreover, for given M, ε > 0 and a modulus of continuity ρ, there exists r0, η > 0 independent
2847
+ of x0 and s such that if
2848
+ (i) r < r0, L0,s(x0)[x, u] = 0 in B1,
2849
+ (ii)
2850
+ Lr,s(x0)[x, u] + C0rs|Du(x)| ≥ −η in B1
2851
+ Lr,s(x0)[x, u] − C0rs|Du(x)| ≤ η in B1,
2852
+ u = v in ∂B1.
2853
+ (iii) |u(x)| + |v(x)| ≤ M in Rd and |u(x) − u(y)| + |v(x) − v(y)| ≤ ρ(|x − y|) for all x, y ∈ B1,
2854
+ then we have
2855
+ |u − v| ≤ ε
2856
+ in B1.
2857
+
2858
+ BOUNDARY REGULARITY
2859
+ 29
2860
+ It is worth mentioning that in [44], the authors have set a uniform continuity assumption on the
2861
+ nonlocal kernels Nθν(x, y) ( for the precise assumption, see Assumption (C) of [44, p. 391] ) which
2862
+ is a standard assumption to make for the stability property of viscosity solutions. Namely, if we
2863
+ have a sequence of integro-differential operators Lm converging weakly to L in Ω and a sequence
2864
+ of subsolutions (or supersolutions) in Ω converging locally uniformly on any compact subset of Ω,
2865
+ then the limit is also a subsolution (or supersolution) with respect to L. However in the case of
2866
+ the operator Lr,s defined in (A.1), the nonlocal term Ir,s
2867
+ θν can be treated as a lower order term that
2868
+ converges to zero as r → 0 without any kind of continuity assumptions on nonlocal kernels Nθν.
2869
+ Now we give the proof of Lemma 2.1.
2870
+ Proof of Lemma 2.1. We will closely follow the proof of [44, Theorem 4.1]. Fix any x0 ∈ B1, let
2871
+ Lrk,s(x0) and L0,s(x0) is given by (A.1) and (A.2) respectively. Then by [44, Lemma 3.1] as rk → 0,
2872
+ we have
2873
+ Lrk,s(x0) → L0,s(x0),
2874
+ in the sense of Definition A.1. By interior regularity [16, Corollary 5.7], L0,s(x0) has C1,β estimate
2875
+ for an universal constant β > 0. Now without loss of any generality we may assume that x0 = 0. Also
2876
+ dividing u by ||u||L∞(Rd) + K in (2.1) we may assume that K = 1 and ||u||L∞(Rd) ≤ 1.
2877
+ Using the Hölder regularity [44, Lemma 2.1], we have u ∈ Cβ(B1). Following [18, Theorem 52],
2878
+ we will show that there exists δ, µ ∈ (0, 1
2879
+ 4), independent of s and a sequence of linear functions
2880
+ lk(x) = ak + bkx such that
2881
+
2882
+
2883
+
2884
+
2885
+
2886
+
2887
+
2888
+
2889
+
2890
+
2891
+
2892
+
2893
+
2894
+ (i)
2895
+ sup
2896
+ B2δνk
2897
+ |u − lk| ≤ µk(1+γ) ,
2898
+ (ii) |ak − ak−1| ≤ µ(k−1)(1+γ) ,
2899
+ (iii) µk−1|bk − bk−1| ≤ Cµ(k−1)(1+γ) ,
2900
+ (iv) |u − lk| ≤ µ−k(γ′−γ)δ−(1+γ′)|x|1+γ′ for x ∈ Bc
2901
+ 2δµk ,
2902
+ (A.3)
2903
+ where 0 < γ < γ′ < β do not depend on s. We plan to proceed by induction, when k = 0, since
2904
+ ||u||L∞(Rd) ≤ 1, (A.3) holds with l−1 = l0 = 0. Assume (A.3) holds for some k and we shall show
2905
+ (A.3) for k + 1.
2906
+ Let ξ : Rd → [0, 1] be a continuous function such that
2907
+ ξ(x) =
2908
+
2909
+ 1 for x ∈ B3,
2910
+ 0 for x ∈ Bc
2911
+ 4.
2912
+ Let us define
2913
+ wk(x) = (u − ξlk)(δµkx)
2914
+ µk(1+γ)
2915
+ .
2916
+ We claim that there exists a universal constant C > 0, such that for all k, we have
2917
+ Lrk,s[x, wk] − C0rks|Dwk(x)| ≤ Cδ2µk(1−γ) ≤ Cδ2,
2918
+ Lrk,s[x, wk] + C0rks|Dwk(x)| ≥ −Cδ2µk(1−γ) ≥ −Cδ2,
2919
+ (A.4)
2920
+ in B2 in viscosity sense. Let φ ∈ C2(B2) ∩ C(Rd) which touches wk from below at x′ in B2. Let
2921
+ ψ(x) := µk(1+γ)φ
2922
+ � x
2923
+ δµk
2924
+
2925
+ + ξlk(x).
2926
+ Then ψ ∈ C2(B2δµk) ∩ C(Rd) is bounded and touches u from below at δµkx′. Taking rk = δµk, we
2927
+ have
2928
+ Irk,s
2929
+ θν [x′, φ] = δ2µk(1−γ)Is
2930
+ θν[rkx′, ψ − ξlk].
2931
+
2932
+ 30
2933
+ BOUNDARY REGULARITY
2934
+ Thus we get
2935
+ Lrk,s[x′, φ] − C0rks|Dφ(x′)|
2936
+ = δ2µk(1−γ)�
2937
+ sup
2938
+ θ∈Θ
2939
+ inf
2940
+ ν∈Γ
2941
+
2942
+ Tr aθν(srkx′)D2ψ(rkx′) + Is
2943
+ θν[rkx′, ψ − ξlk]
2944
+
2945
+ − sC0|Dψ(rkx′) − bk|
2946
+
2947
+ ≤ δ2µk(1−γ)�
2948
+ Ls[rkx′, ψ] − sC0|Dψ(rkx′)| + sup
2949
+ θ∈Θ
2950
+ inf
2951
+ ν∈Γ{−Is
2952
+ θν[rkx′, ξlk]} + sC0|bk|
2953
+
2954
+ ≤ Cδ2µk(1−γ) ≤ Cδ2.
2955
+ In the second last inequality we use that
2956
+ Ls[x, u] − C0s|Du(x)| ≤ 1,
2957
+ and |ak|, |bk| are uniformly bounded and for all x′ ∈ B2, sup
2958
+ θ∈Θ
2959
+ inf
2960
+ ν∈Γ{−Is
2961
+ θν[rkx′, ξlk]} is bounded inde-
2962
+ pendent of s and k . Thus we have proved
2963
+ Lrk,s[x, wk] − C0rks|Dwk(x)| ≤ Cδ2 in B2,
2964
+ in viscosity sense. Similarly the other inequality in (A.4) can be proven.
2965
+ Define w′
2966
+ k(x) := max {min {wk(x), 1} , −1} . We see that w′
2967
+ k is uniformly bounded independent of
2968
+ k. We claim that in B 3
2969
+ 2
2970
+ Lrk,s[x, w′
2971
+ k] − C0rks|Dw′
2972
+ k(x)| ≤ Cδ2 + ω1(δ),
2973
+ Lrk,s[x, w′
2974
+ k] + C0rks|Dw′
2975
+ k(x)| ≥ −Cδ2 − ω1(δ)
2976
+ (A.5)
2977
+ Now take any bounded φ ∈ C2(B2) ∩ C(Rd) that touches w′
2978
+ k from below at x′ in B3/2. By the
2979
+ definition of w′
2980
+ k, in B2 we have |wk| = |w′
2981
+ k| ≤ 1 and φ touches wk from below at x′. Hence
2982
+ sup
2983
+ θ∈Θ
2984
+ inf
2985
+ ν∈Γ
2986
+
2987
+ Tr aθν(srkx′)D2φ(x′)
2988
+ +
2989
+ ˆ
2990
+ B1/2
2991
+ (φ(x′ + z) + φ(x′) − 1B 1
2992
+ rs (z)Dφ(x′) · z)(rks)d+2Nθν(rksx, srkz)dz
2993
+
2994
+ ˆ
2995
+ Rd\B1/2
2996
+ (wk(x′ + z) − w′
2997
+ k(x′ + z)
2998
+ + w′
2999
+ k(x′ + z) − φ(x′) − 1B 1
3000
+ rs (z)Dφ(x′) · z))(rks)d+2Nθν(rksx, srkz)dz
3001
+
3002
+ − C0rks|Dφ(x′)| ≤ Cδ2
3003
+ Therefore by Definition 2.1 of viscosity supersolution and using the bounds on the kernel we get the
3004
+ following estimate:
3005
+ Lrk,s[x, w′
3006
+ k] − C0rks|Dw′
3007
+ k(x)| ≤
3008
+ ˆ
3009
+ Rd\B1/2
3010
+ ��wk(x′ + z) − w′
3011
+ k(x′ + z)
3012
+ �� (rks)d+2k(rksz)dz + Cδ2.
3013
+ in the viscosity sense. By the inductive assumptions, we have ak and bk uniformly bounded. Since
3014
+ ||u||L∞(Rd) ≤ 1 and ξlk is uniformly bounded, |wk| ≤ Cµ−k(1+γ) in Rd. Using (iv) from (A.3) we have
3015
+ |wk(x)| = (u − ξlk)(rkx)
3016
+ µk(1+γ)
3017
+
3018
+ � 1
3019
+ rk
3020
+ �1+γ′
3021
+ |rkx|1+γ′ = |x|1+γ′,
3022
+ for any x ∈ Bc
3023
+ 2 ∩ B 2
3024
+ rk . Again for any x ∈ Bc
3025
+ 2/rk, we find
3026
+ |wk(x)| ≤ Cµ−k(1+γ′) · µ−k(γ−γ′) ≤ Cµ−k(1+γ′) ≤ C δ1+γ′
3027
+ 2
3028
+ |x|1+γ′ ≤ C|x|1+γ′.
3029
+
3030
+ BOUNDARY REGULARITY
3031
+ 31
3032
+ Now, since w′
3033
+ k is uniformly bounded, we have for x ∈ Bc
3034
+ 2,
3035
+ |wk| + |w′
3036
+ k − wk| ≤ C min{|x|1+γ′, µ−k(1+γ)}.
3037
+ (A.6)
3038
+ For x′ ∈ B3/2, using (A.6) we have the following estimate.
3039
+ ˆ
3040
+ Rd
3041
+ ��wk(x′ + z) − w′
3042
+ k(x′ + z)
3043
+ �� (rks)d+2k(rksz)dz
3044
+
3045
+ ˆ
3046
+ {z:|x′+z|≥2}∩B1/rk
3047
+ ��wk − w′
3048
+ k
3049
+ �� (x′ + z)(rks)d+2k(rksz)dz + δ2µk(1−γ)
3050
+ ˆ
3051
+ Bc
3052
+ 1
3053
+ rk
3054
+ (rks)d+2k(rksz)
3055
+ (δµ−k)2
3056
+ dz
3057
+ ≤ C
3058
+ � ˆ
3059
+ Bc
3060
+ 1/2∩B
3061
+ 1
3062
+ √rk
3063
+ |z|2(rks)d+2k(rksz)dz + r
3064
+ (1−γ′)
3065
+ 2
3066
+ k
3067
+ ˆ
3068
+ Bc
3069
+ 1
3070
+ √rk
3071
+ ∩B 1
3072
+ rk
3073
+ |z|2(rks)d+2k(rksz)dz
3074
+ + δ2µk(1−γ)
3075
+ ˆ
3076
+ Bcs
3077
+ s2k(z)dz
3078
+
3079
+ ≤ C
3080
+ � ˆ
3081
+ B√rk
3082
+ |y|2k(y)dy + (r
3083
+ (1−γ′)
3084
+ 2
3085
+ k
3086
+ + δ2µk(1−γ))
3087
+ ˆ
3088
+ Rd(1 ∧ |y|2)k(y)dy
3089
+
3090
+ .
3091
+ Hence,
3092
+ ˆ
3093
+ Rd
3094
+ ��wk(x′ + z) − w′
3095
+ k(x′ + z)
3096
+ �� krk,s(z)dz ≤ ˜C
3097
+ �ˆ
3098
+ B√
3099
+ δ
3100
+ |y|2k(y)dy + δ
3101
+ 1−γ′
3102
+ 2
3103
+ + δ2
3104
+
3105
+ = ω1(δ)
3106
+ where ω1(δ) → 0 as δ → 0. Therefore we proved Lrk,s[x, w′
3107
+ k] − C0rks|Dw′
3108
+ k(x)| ≤ Cδ2 + ω1(δ). The
3109
+ other inequality of (A.5) can be proved in a similar manner.
3110
+ Since w′
3111
+ k satisfies the equation (A.5), by [44, Lemma 2.1] we have ||w′
3112
+ k||Cβ(B1) ≤ M1 for some M1
3113
+ independent of k, s. Now we consider the a function h which solves
3114
+ L0,s(x0)[x, h] = 0
3115
+ in B1
3116
+ h = w′
3117
+ k
3118
+ on ∂B1.
3119
+ Existence of such h can be seen from [51, Theorem 1]. Moreover, using [51, Theorem 2] we have
3120
+ ||h||Cα(B1) ≤ M2 where α < β
3121
+ 2 and M2 is independent of k, s. Now for any 0 < ε < 1, let r0 := r0(ε)
3122
+ and η := η(ε) as given in Lemma A.1. Also for x ∈ B1 and δ := δ(ε) ≤ r0, we have
3123
+ Lrk,s[x, w′
3124
+ k] + C0rks|Dw′
3125
+ k(x)| ≥ −η,
3126
+ Lrk,s[x, w′
3127
+ k] − C0rks|Dw′
3128
+ k(x)| ≤ η.
3129
+ Therefore by Lemma A.1, we conclude |w′
3130
+ k −h| ≤ ε in B1. Again by using [16, Corollary 5.7], we have
3131
+ h ∈ C1,β(B1/2) and we can take a linear part l(x) := a + bx of h at the origin. By C1,β estimate of
3132
+ L0,s(x0) and |w′
3133
+ k| ≤ 1 in B1 we obtain that the coefficients of l, i.e, a, b are bounded independent of
3134
+ k, s. Further for x ∈ B1/2, we have
3135
+ |h(x) − l(x)| ≤ C1|x|1+β,
3136
+ where C1 is independent of k, s. Hence using the previous estimate we get
3137
+ |w′
3138
+ k(x) − l(x)| ≤ ǫ + C1|x|1+β in B1/2.
3139
+ Again using (A.6) and |wk| ≤ 1 in B2 we have
3140
+ |wk(x) − l(x)| ≤ 1 + |a| + |b| ≤ C2 in B1,
3141
+ |wk(x) − ξ(δµkx)l(x)| ≤ C|x|1+γ′ + C3|x| in Bc
3142
+ 1.
3143
+
3144
+ 32
3145
+ BOUNDARY REGULARITY
3146
+ Next defining
3147
+ lk+1(x) := lk(x) + µk(1+γ)l
3148
+
3149
+ δ−1µ−kx
3150
+
3151
+ ,
3152
+ wk+1(x) := (u − ξlk+1)(δµk+1x)
3153
+ µ(k+1)(1+γ)
3154
+ ,
3155
+ and following the proof of [44, Theorem 4.1] we conclude that (A.3) holds for k + 1. This completes
3156
+ the proof.
3157
+
3158
+ Acknowledgement. We thank Anup Biswas for several helpful discussions during the prepara-
3159
+ tion of this article.
3160
+ Mitesh Modasiya is partially supported by CSIR PhD fellowship (File no.
3161
+ 09/936(0200)/2018-EMR-I).
3162
+ References
3163
+ [1] D. Applebaum: Lévy processes and stochastic calculus. Second edition. Cambridge Studies in Advanced Mathe-
3164
+ matics, 116. Cambridge University Press, Cambridge, 2009. xxx+460 pp. ISBN: 978-0-521-73865-1
3165
+ [2] G. Barles, E. Chasseigne and C. Imbert: Lipschitz regularity of solutions for mixed integro-differential equations.
3166
+ J. Differential Equations 252 (2012), no. 11, 6012–6060.
3167
+ [3] R.F. Bass and M. Kassmann: Moritz Hölder continuity of harmonic functions with respect to operators of variable
3168
+ order. Comm. Partial Differential Equations 30 (2005), no. 7-9, 1249–1259.
3169
+ [4] R.F. Bass and M. Kassmann: Harnack inequalities for non-local operators of variable order. Trans. Amer. Math.
3170
+ Soc. 357 (2005), no. 2, 837–850.
3171
+ [5] R.F. Bass and D.A. Levin: Harnack inequalities for jump processes. Potential Anal. 17 (2002), no. 4, 375–388.
3172
+ [6] S. Biagi, S. Dipierro, E. Valdinoci and E. Vecchi: A Faber-Krahn inequality for mixed local and nonlocal operators.
3173
+ To appear in Journal d’Analyse Mathématique.
3174
+ [7] S. Biagi, S. Dipierro, E. Valdinoci and E. Vecchi: Mixed local and nonlocal elliptic operators: regularity and
3175
+ maximum principles, Communications in Partial Differential Equations 47 (2022), no. 3, 585–629
3176
+ [8] S. Biagi, E. Vecchi, S. Dipierro and E. Valdinoci: Semilinear elliptic equations involving mixed local and nonlocal
3177
+ operators, Proceedings of the Royal Society of Edinburgh Section A: Mathematics, DOI:10.1017/prm.2020.75
3178
+ [9] A. Biswas and M. Modasiya: Regularity results of nonlinear perturbed stable-like operators. Differential Integral
3179
+ Equations 33 (2020), no. 11-12, 597-624.
3180
+ [10] A. Biswas and M. Modasiya: Mixed local-nonlocal operators: maximum principles, eigenvalue problems and their
3181
+ applications, preprint, 2021. arXiv: 2110.06746
3182
+ [11] A.Biswas, M. Modasiya and A. Sen: Boundary regularity of mixed local-nonlocal operators and its application.
3183
+ Annali di Matematica (2022). https://doi.org/10.1007/s10231-022-01256-0
3184
+ [12] A. Biswas and S. Khan: Existence-Uniqueness of nonlinear integro-differential equations with drift in Rd, preprint
3185
+ (2022), https://doi.org/10.48550/arXiv.2206.13797.
3186
+ [13] I.H. Biswas: On zero-sum stochastic differential games with jump-diffusion driven state: a viscosity solution frame-
3187
+ work. SIAM J. Control Optim. 50 (2012), no. 4, 1823–1858.
3188
+ [14] B.Böttcher; R.L. Schilling and J.Wang: Lévy matters. III. Lévy-type processes: construction, approximation and
3189
+ sample path properties. With a short biography of Paul Lévy by Jean Jacod. Lecture Notes in Mathematics, 2099.
3190
+ Lévy Matters. Springer, Cham, 2013. xviii+199 pp. ISBN: 978-3-319-02683-1; 978-3-319-02684-8.
3191
+ [15] L. A. Caffarelli: Non-local diffusions, drifts and games. Nonlinear partial differential equations, 37–52, Abel Symp.,
3192
+ 7, Springer, Heidelberg, 2012
3193
+ [16] L. A. Caffarelli and X. Cabré : Fully nonlinear elliptic equations. American Mathematical Society Colloquium
3194
+ Publications, 43. American Mathematical Society, Providence, RI, 1995. vi+104 pp. ISBN: 0-8218-0437-5
3195
+ [17] L. A. Caffarelli and L. Silvestre: Regularity theory for fully nonlinear integro-differential equations, Comm. Pure
3196
+ Appl. Math. 62 (2009), 597–638.
3197
+ [18] L. A. Caffarelli and L. Silvestre: Regularity results for nonlocal equations by approximation, Arch. Ration. Mech.
3198
+ Anal. 200 (2011), no. 1, 59–88
3199
+ [19] H. Chang Lara and G. Dávila: Regularity for solutions of nonlocal, nonsymmetric equations. Ann. Inst. H. Poincaré
3200
+ C Anal. Non Linéaire 29 (2012), no. 6, 833–859.
3201
+ [20] R. Cont and P. Tankov: Financial Modelling with Jump Processes. Chapman and Hall, 552 pages. ISBN
3202
+ 9781584884132.
3203
+ [21] M. C. Delfour and J.-P. Zolésio: Shapes and geometries. Metrics, analysis, differential calculus, and optimization.
3204
+ Second edition. Advances in Design and Control, 22. Society for Industrial and Applied Mathematics (SIAM),
3205
+ Philadelphia, PA, 2011. xxiv+622 pp.
3206
+
3207
+ BOUNDARY REGULARITY
3208
+ 33
3209
+ [22] C. De Filippis and G. Mingione: Gradient regularity in mixed local and nonlocal problems. Mathematische Annalen.
3210
+ DOI: https://doi.org/10.1007/s00208-022-02512-7
3211
+ [23] S.Dipierro and E.Valdinoci: Description of an ecological niche for a mixed local/nonlocal dispersal: an evolution
3212
+ equation and a new Neumann condition arising from the superposition of Brownian and Lévy processes. Phys. A
3213
+ 575 (2021), Paper No. 126052, 20 pp.
3214
+ [24] S. Dipierro; E.P. Lippi and E. Valdinoci :(Non)local logistic equations with Neumann conditions. arXiv:2101.02315.
3215
+ [25] M. Foondun: Harmonic functions for a class of integro-differential operators, Potential Anal. 31(1) (2009), 21–44
3216
+ [26] M.G. Garroni and J.L. Menaldi: Second order elliptic integro-differential problems, Chapman & Hall/CRC Re-
3217
+ search Notes in Mathematics, 430. Chapman & Hall/CRC, Boca Raton, FL, 2002. xvi+221 pp.
3218
+ [27] P. Garain and J. Kinnunen: On the regularity theory for mixed local and nonlocal quasilinear elliptic equations,
3219
+ to appear in Transactions of the AMS, 2022
3220
+ [28] P. Garain and E. Lindgren: Higher Hölder regularity for mixed local and nonlocal degenerate elliptic equations.
3221
+ Preprint. Arxiv: 2204.13196
3222
+ [29] E. DeGiorgi: Sulla differenziabilità e l’analiticità delle estremali degli integrali multipli regolari. (Italian) Mem.
3223
+ Accad. Sci. Torino. Cl. Sci. Fis. Mat. Nat. (3) 3 1957 25–43.
3224
+ [30] G. Grubb: Local and nonlocal boundary conditions for µ-transmission and fractional elliptic pseudodifferential
3225
+ operators. Anal. PDE 7 (2014), no. 7, 1649–1682.
3226
+ [31] G. Grubb: Fractional Laplacians on domains, a development of Hörmander’s theory of µ-transmission pseudodif-
3227
+ ferential operators. Adv. Math. 268 (2015), 478–528.
3228
+ [32] A. Iannizzotto, S. Mosconi and M. Squassina: Fine boundary regularity for the degenerate fractional p-Laplacian.
3229
+ J. Funct. Anal. 279 (2020), no. 8, 108659, 54 pp.
3230
+ [33] J. L. Kazdan: Prescribing The Curvature Of A Riemannian Manifold, CBMS Reg. Conf. Ser. Math.57, Amer.
3231
+ Math. Soc., Providence, 1985.
3232
+ [34] M. Kim, P. Kim, J. Lee and K-A Lee: Boundary regularity for nonlocal operators with kernel of variable orders.
3233
+ J. Funct. Anal. 277 (2019), no. 1, 279–332.
3234
+ [35] M.Kim and K-A. Lee: Regularity for fully nonlinear integro-differential operators with kernels of variable orders.
3235
+ Nonlinear Anal. 193 (2020), 111312, 27 pp.
3236
+ [36] S.Kim, Y-C. Kim and K-A Lee: Regularity for fully nonlinear integro-differential operators with regularly varying
3237
+ kernels. Potential Anal. 44 (2016), no. 4, 673–705.
3238
+ [37] Y-C. Kim and K-A. Lee: Regularity results for fully nonlinear integro-differential operators with nonsymmetric
3239
+ positive kernels. Manuscripta Math. 139 (2012), no. 3-4, 291–319.
3240
+ [38] S. Kitano : Harnck inequalities and Hölder estimates for fully nonlinear integro-differential equations with weak
3241
+ scaling conditions. https://doi.org/10.48550/arXiv.2207.02617
3242
+ [39] D.Kriventsov: C1,α interior regularity for nonlinear nonlocal elliptic equations with rough kernels. Comm. Partial
3243
+ Differential Equations 38 (2013), no. 12, 2081–2106.
3244
+ [40] N. Krylov: Boundedly inhomogeneous elliptic and parabolic equations in a domain, Izv. Akad. Nauk SSSR Ser.
3245
+ Mat. 47 (1983), 75–108.
3246
+ [41] N.V. Krylov and M.V. Safonov: An estimate for the probability of a diffusion process hitting a set of positive
3247
+ measure. (Russian) Dokl. Akad. Nauk SSSR 245 (1979), no. 1, 18–20.
3248
+ [42] J. Moser: A Harnack inequality for parabolic differential equations. Comm. Pure Appl. Math. 17 (1964), 101–134.
3249
+ [43] C. Mou: Existence of Cα solutions to integro-PDEs, Calc. Var. Partial Diff. Equ. 58(4) (2019) , 1–28
3250
+ [44] C. Mou and Y.P. Zhang: Regularity Theory for Second Order Integro-PDEs, Potential Anal 54 (2021), 387–407
3251
+ [45] J. Nash: Continuity of solutions of parabolic and elliptic equations. Amer. J. Math. 80 (1958), 931–954.
3252
+ [46] X. Ros-Oton and J. Serra: The Dirichlet problem for the fractional Laplacian: regularity up to the boundary, J.
3253
+ Math. Pures Appl. 101 (2014), 275–302.
3254
+ [47] X. Ros-Oton and J. Serra: Boundary regularity for fully nonlinear integro-differential equations, Duke Math. J.
3255
+ 165 (2016), 2079–2154.
3256
+ [48] X. Ros-Oton and J. Serra: Boundary regularity estimates for nonlocal elliptic equations in C1 and C1,α domains,
3257
+ Annali di Matematica Pura ed Applicata 196 (2017), 1637–1668.
3258
+ [49] J. Serra: Regularity for fully nonlinear nonlocal parabolic equations with rough kernels. Calc. Var. Partial Differ-
3259
+ ential Equations 54 (2015), no. 1, 615–629.
3260
+ [50] R. Schilling, R. Song, and Z. Vondraček: Bernstein Functions, Walter de Gruyter, 2010.
3261
+ [51] B. Sirakov: Solvability of uniformly elliptic fully nonlinear PDE. Arch. Ration. Mech. Anal. 195 (2010), no. 2,
3262
+ 579–607.
3263
+ [52] L.Silvestre: Hölder estimates for solutions of integro-differential equations like the fractional Laplace. Indiana Univ.
3264
+ Math. J. 55 (2006), no. 3, 1155–1174.
3265
+
6dE0T4oBgHgl3EQffAAW/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
6tE4T4oBgHgl3EQf1w38/content/tmp_files/2301.05294v1.pdf.txt ADDED
@@ -0,0 +1,1742 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Learning to Control and Coordinate Hybrid Traffic
2
+ Through Robot Vehicles at Complex and Unsignalized
3
+ Intersections
4
+ Dawei Wang,1 Weizi Li,2 Lei Zhu,3 Jia Pan1
5
+ 1The University of Hong Kong
6
+ 2The University of Memphis
7
+ 3The University of North Carolina at Charlotte
8
+ Abstract
9
+ Intersections are essential road infrastructures for traffic in modern metropolises; how-
10
+ ever, they can also be the bottleneck of traffic flows due to traffic incidents or the absence
11
+ of traffic coordination mechanisms such as traffic lights. Thus, various control and coor-
12
+ dination mechanisms that are beyond traditional control methods have been proposed to
13
+ improve the efficiency of intersection traffic. Amongst these methods, the control of fore-
14
+ seeable hybrid traffic that consists of human-driven vehicles (HVs) and robot vehicles (RVs)
15
+ has recently emerged. We propose a decentralized reinforcement learning approach for the
16
+ control and coordination of hybrid traffic at real-world, complex intersections–a topic that
17
+ has not been previously explored. Comprehensive experiments are conducted to show the
18
+ effectiveness of our approach. In particular, we show that using 5% RVs, we can prevent
19
+ congestion formation inside the intersection under the actual traffic demand of 700 vehicles
20
+ per hour. In contrast, without RVs, congestion starts to develop when the traffic demand
21
+ reaches as low as 200 vehicles per hour. Further performance gains (reduced waiting time
22
+ of vehicles at the intersection) are obtained as the RV penetration rate increases. When
23
+ there exist more than 50% RVs in traffic, our method starts to outperform traffic signals on
24
+ the average waiting time of all vehicles at the intersection.
25
+ Our method is also robust against both blackout events and sudden RV percentage
26
+ drops, and enjoys excellent generalizablility, which is illustrated by its successful deploy-
27
+ ment in two unseen intersections.
28
+ 1
29
+ Introduction
30
+ Uninterrupted traffic flows are the beating heart of cities. They are not only the driving force
31
+ for socio-economic development but also an assurance for essential supplies’ delivery to the
32
+ 1
33
+ arXiv:2301.05294v1 [cs.LG] 12 Jan 2023
34
+
35
+ populace during emergent events. However, even with existing traffic control and management
36
+ methods (such as traffic signals, ramp meters, street signs, and tolls) working at their full capac-
37
+ ity, traffic delays and congestion are still a worldwide problem causing more than $100 billion in
38
+ external costs annually (1). Given that urbanization and motorization are projected to continue
39
+ rising in the decades to come (2, 3), there is an immediate need for better design/management
40
+ of our traffic systems.
41
+ Traffic is an interplay between vehicles and road infrastructure. Contemporary urban road
42
+ networks largely consist of linearly-coupled road segments connected with intersections. The
43
+ key to this design’s functionality is the intersection, where traffic flow from different directions
44
+ can interchange and disperse. Any incident at the intersection can block traffic on all connecting
45
+ roads and cause traffic spillback over further upstream roads. It is not uncommon to observe
46
+ an entire city’s traffic becoming paralyzed because of the breakdown of major intersections.
47
+ Unfortunately, intersections are prone to traffic incidents due to their varied (and potentially)
48
+ complex topology and conflicting traffic streams. In the U.S., nearly half of all crashes take
49
+ place at intersections (4). Additionally, extreme weather and energy shortages can take down
50
+ our power grids causing the main intersection control method, traffic signals, to be absent for
51
+ days, if not weeks. This leaves traffic stranded and bound to congest (5–7). This leaves us with
52
+ the question: how to ensure traffic flows uninterrupted at intersections?
53
+ While exhaustive transport policies and control methods exist to contain traffic delays and
54
+ congestion, technological advancements such as connected and autonomous vehicles (CAVs)
55
+ offer us new opportunities. Recent studies (8, 9) have demonstrated the possibilities of using
56
+ self-driving robot vehicles (RVs) to enhance traffic throughput at intersections; however, these
57
+ studies assume that all vehicles present are mutually connected and centrally controlled—a
58
+ condition that may not be realized in the near future. The adoption of vehicles with different
59
+ levels of autonomy has been and will continue to be gradual. A mixture of human-driven ve-
60
+ 2
61
+
62
+ hicles (HVs) and RVs, i.e., hybrid traffic, will long be experienced before the advent of fully
63
+ autonomous transportation systems. Although hybrid traffic can be much more challenging to
64
+ model and control compared to 100% RVs (considering the diversity and suboptimality of the
65
+ human drivers’ behaviors), we may still be able to regulate it by first algorithmically determin-
66
+ ing the behaviors of the RVs and then using them to influence nearby HVs (10). Existing studies
67
+ have demonstrated the potential of this hybrid system’s control in scenarios such as ring roads,
68
+ figure-eight roads (10), highway bottleneck and merge (11,12), two-way intersections (13), and
69
+ roundabouts (14). However, most of these scenarios do not embed real-world complexity, and
70
+ the number of vehicles that can potentially be in conflict is small.
71
+ In this research, we study the control and coordination of hybrid traffic at intersections.
72
+ Given the importance of intersections, various traffic control mechanisms have been devel-
73
+ oped (15) with three approaches being the most prominent.
74
+ • Traffic signal control (16–18): a well-studied topic. Nevertheless, as we mentioned be-
75
+ fore, traffic lights are vulnerable to extreme conditions, and thus cannot guarantee unin-
76
+ terrupted traffic flows at intersections.
77
+ • Autonomous Intersection Management Systems (AIMS) (19,20): a robust approach even
78
+ during emergent events. However, this approach assumes all vehicles are centrally con-
79
+ trolled and thus is not applicable to hybrid traffic.
80
+ • Reinforcement learning (RL). RL has shown great potential in high-dimensional, multi-
81
+ agent control tasks (21–25) in recent years. It is a promising tool for hybrid traffic control
82
+ because its model-free design copes with the absence of effective models for hybrid traf-
83
+ fic. To date, most successful examples of hybrid traffic control (including the studies we
84
+ mentioned before (10–14)) take this approach (26).
85
+ While significant progress has been made, none of the above-mentioned studies addresses hy-
86
+ 3
87
+
88
+ brid traffic at real-world, complex intersections where a large number of vehicles can poten-
89
+ tially be in conflict under actual traffic demands. Our study subjects include four real-world
90
+ intersections, along with their actual traffic data, from Colorado Springs, CO, USA1. The inter-
91
+ section layout and reconstructed traffic are shown in Fig. 1. The comparison of our work and
92
+ some example studies of intersections is illustrated in Fig. 2. To the best of our knowledge, our
93
+ work is the first to control and coordinate hybrid traffic at unsignalized intersections with both
94
+ complicated topology and real-world traffic demands.
95
+ Figure 1: Our study subjects include four complex intersections at Colorado Springs, CO, USA.
96
+ The traffic is reconstructed using the actual traffic data collected at these intersections.
97
+ The control and coordination of intersection traffic pose many challenges, which include
98
+ varied topology of intersections, changing traffic demands, and conflicting traffic streams. We
99
+ propose a decentralized RL approach to handle these challenges. Our approach’s pipeline is
100
+ shown in Fig. 3. First, after entering the control zone, each RV is controlled using our method
101
+ 1https://coloradosprings.gov/
102
+ 4
103
+
104
+ Chick-fil-A
105
+ ublinBlvd
106
+ Soopers
107
+ 332
108
+ 44.9
109
+ 32
110
+ Costco Wholesale
111
+ Cinemark Carefree
112
+ CircleXDandIMAFigure 2: Comparison of some state-of-the-art studies on intersection traffic control. Ours and
113
+ Yan are the only two studies applying RL to hybrid traffic control at intersections; between the
114
+ two, ours is the only method based on real GIS data. Note that since not all measurements are
115
+ provided, the shown features of each study are our best estimates after examining the study.
116
+ For complete information, we refer readers to COOR-PLT (27), DASMC (28), Yang (9), Ma-
117
+ likopoulos (20), Mirheli (29), Chen (30), Yan (13), and Miculescu (19).
118
+ 5
119
+
120
+ Comparison of Example Intersection Studies
121
+ 45
122
+ 40
123
+ Intersection Capacity (num. of vehicles)
124
+ ()
125
+ Ours
126
+ 35
127
+ 30
128
+ Control Method
129
+ 25
130
+ learning
131
+ COOR-PLT
132
+ other method
133
+ 20
134
+ ()
135
+ Malikopoulos肉
136
+ Control Subiect
137
+ Yang
138
+ 15
139
+ hybrid traffic
140
+ DASMC +*
141
+ all vehicles
142
+ 10
143
+ Yan +
144
+ GIS Data
145
+ 5
146
+ ()
147
+ Mirheli
148
+ real
149
+ Chen
150
+ simulation
151
+ Miculescu
152
+ 0
153
+ 2
154
+ 4
155
+ 6
156
+ 8
157
+ 10
158
+ 12
159
+ 14
160
+ 16
161
+ 18
162
+ 20
163
+ 22
164
+ Number of In-flow Lanesand is assumed to obtain a full observation of the traffic condition within the control zone.
165
+ Next, each RV encodes the perceived traffic condition into a fixed-length representation. This
166
+ representation contains traffic information of eight moving directions (see Fig. 3a). For each
167
+ direction, both macroscopic traffic features such as queue length and waiting time, and mi-
168
+ croscopic traffic features such as vehicles’ locations inside the intersection are recorded. The
169
+ representation is then adopted by each RV in front of the intersection entrance line to make a
170
+ high-level decision ‘Stop’ or ‘Go’ (see Fig. 3b). The high-level decisions from different RVs
171
+ in front of the entrance line are shared and coordinated via vehicle-to-vehicle (V2V) commu-
172
+ nication (Fig. 3c). Lastly, each RV travels through the intersection by fulfilling its high-level
173
+ decision using a low-level control mechanism described in Sec. 4.5.2.
174
+ We conduct various experiments to evaluate our approach using the reconstructed traffic
175
+ from real-world traffic data in SUMO (31). Our results show that with 50% or more RVs, our
176
+ method outperforms traffic light control in terms of efficiency. In general, better performance
177
+ is gained as the RV penetration rate increases from 50% to 100%. For example, the average
178
+ waiting time is reduced by 16.08%, 40.29%, and 45.01% compared to traffic light control at
179
+ the intersection 229 when the RV penetration rate is 50%, 70%, and 90%, respectively. With
180
+ 100% RVs, our method can reduce the average waiting time of the entire intersection traffic up
181
+ to 75% compared to traffic light control and 96% compared to the traffic light absence baseline.
182
+ These results demonstrate the effectiveness of our approach. In addition, we analyze the reward
183
+ function by Yan and Wu (13) and justify the design rationale of our reward function. We show
184
+ that our local reward alternates between conflicting moving directions and grants a direction
185
+ with a long-waiting queue the priority to travel. We also show that our global reward reflects the
186
+ traffic condition of the entire intersection in a timely fashion. Then, we explore the relationship
187
+ between traffic demands, congestion, and RV penetration rates. The results show that with just
188
+ 5% RVs, we can prevent congestion at the intersection under the actual traffic demand of 700
189
+ 6
190
+
191
+ Figure 3: The pipeline of our approach. a. The traffic condition of an intersection is encoded by
192
+ each RV inside the control zone to be a fixed-length representation. This representation contains
193
+ both macroscopic traffic features such as queue length and waiting time, and microscopic traffic
194
+ features such as vehicles’ locations along each traffic moving direction (E, W, N, and S represent
195
+ east, west, north, and south, respectively; C means cross and L means left-turn). b. The traffic-
196
+ condition representation is then used by each RV in front of the entrance line to decide either
197
+ ‘Stop’ or ‘Go’ at the high level. c. These high-level decisions of the RVs are communicated
198
+ and coordinated to ensure conflict-free movements inside the intersection.
199
+ 7
200
+
201
+ -
202
+ W-C
203
+ 二一
204
+ S-C
205
+ STO
206
+ 0
207
+ STOP
208
+ STOP
209
+ STOP
210
+ 0
211
+ GO
212
+ 0v/h. In contrast, without RVs, congestion emerges when the traffic demand is higher than 200
213
+ v/h. Lastly, we test the robustness and generalizablility of our approach. For robustness, first
214
+ we conduct a ‘blackout’ experiment when traffic lights suddenly stop working. During such an
215
+ event, the RVs act as self-organized movable ‘traffic lights’ to coordinate the traffic and prevent
216
+ congestion. Second, we examine the impact of sudden RV rate drops. The results demonstrate
217
+ that even with 60% drop (from 100% to 40%), our method can still maintain stable and efficient
218
+ traffic flows at the intersection. For generalizablility, we deploy our method (without refining)
219
+ in two unseen intersections: not only does our method prevent congestion, but starting from
220
+ 50%–60% RVs, our method surpasses traffic light control on saving the average waiting time of
221
+ all vehicles at the two intersections. The details of these results are introduced next.
222
+ 2
223
+ Results
224
+ In this section, we first introduce the baselines for evaluating our method. Then, we present the
225
+ overall results of our evaluations at four real-world intersections. Next, we present a series of
226
+ experiments to analyze our reward function and discuss the insights of its design. After that,
227
+ we explore the relationship between traffic demands, congestion, and RV penetration rates.
228
+ Lastly, we demonstrate the robustness and generalizability of our approach to blackout events
229
+ and unseen intersections, respectively.
230
+ 2.1
231
+ Baselines
232
+ To evaluate our method, we compare our method with the following four baselines.
233
+ • TL: the actual traffic signal program deployed in the city of Colorado Spring, CO, USA.
234
+ • NoTL: no traffic light control; all traffic signals are off.
235
+ • Yan (13): the state-of-the-art multi-agent RL traffic controller with 100% RV penetrate
236
+ 8
237
+
238
+ rate. In order to use this approach, we make necessary changes to the approach to accom-
239
+ modate the varied intersection topology by extending the network input to the maximum
240
+ number of incoming lanes in our work.
241
+ • Yang (9): the state-of-the-art CAV control method for hybrid traffic at unsignalized inter-
242
+ sections.
243
+ 2.2
244
+ Overall Performance
245
+ We evaluate our approach under the RV penetration rates ranging from 40% to 100%. At each
246
+ rate, we conduct ten experiments and report the averaged results. In each experiment, HVs are
247
+ constructed using real-world traffic turning count data (see Sec. 4.2 for details). However, the
248
+ behavior and location of each HV are stochastic. Each experiment runs for 1000 steps (1 step =
249
+ 1 second in simulation). We use all four intersections shown in Fig. 1 for our experiments. The
250
+ features of these intersections are given in Tab. S1.
251
+ The overall results measured using reduced average waiting time in percentage are listed in
252
+ Table 1. The waiting time of a vehicle is the time that a vehicle spends in front of the entrance
253
+ line waiting to enter the intersection. The average waiting time of a moving direction is then
254
+ the average of the waiting times of all vehicles along that direction. The average waiting of
255
+ an intersection is the average of the waiting times of all vehicles at the intersection. Overall,
256
+ when the RV penetration rate is 50% or higher, our method outperforms traffic signals. Ad-
257
+ ditionally, better performance is gained, in general, as the RV penetration rate increases. This
258
+ shows that the more RVs can interact with their nearby HVs, the more stable the regulation and
259
+ coordination of the entire traffic can be.
260
+ In Fig. 4, we show the detailed performance at intersection 229. The results include two
261
+ parts. The first part (the top row of the four figures) shows the average waiting time along
262
+ the eight traffic moving directions. Note that in the actual traffic data, some directions do not
263
+ 9
264
+
265
+ Figure 4: The overall results measured in average waiting time at the intersection 229. The
266
+ RIGHT sub-figures are zoomed-in versions of the LEFT sub-figures by excluding NoTL and
267
+ Yan. In general, as the RV penetration rate equals or passes 50%, our method achieves consis-
268
+ tent better performance over the other four baselines.
269
+ 10
270
+
271
+ 70
272
+ 350
273
+ 09
274
+ 300
275
+ 50
276
+ 40
277
+ ng
278
+ 200
279
+ Vaitir
280
+ 30
281
+ 150
282
+ W
283
+ 20
284
+ 9100
285
+ Av
286
+ 50
287
+ 10
288
+ RV: 40%
289
+ RV: 50%
290
+ RV: 60%
291
+ RV: 70%
292
+ RV: 80%
293
+ RV: 90%
294
+ TL
295
+ NoTL
296
+ Yan
297
+ Yang
298
+ RV: 100%
299
+ 70
300
+ 60
301
+ 300
302
+ Time
303
+ 50
304
+ 40
305
+ 30
306
+ ,100
307
+ 20
308
+ Av
309
+ 10
310
+ H
311
+ 0
312
+ olo
313
+ NoTTL vs. RVs (%)
314
+ NoTL vs. RVs (%)
315
+ Intersection
316
+ 50%
317
+ 60%
318
+ 70%
319
+ 80%
320
+ 90%
321
+ 100%
322
+ 100%
323
+ 229
324
+ 16.08%
325
+ 44.02%
326
+ 40.29%
327
+ 58.35%
328
+ 45.01%
329
+ 68.62%
330
+ 97.09%
331
+ 449
332
+ 22.67%
333
+ 15.21%
334
+ 32.56%
335
+ 40.47%
336
+ 43.06%
337
+ 39.71%
338
+ 75.03%
339
+ 332
340
+ 8.50%
341
+ 1.20%
342
+ 35.22%
343
+ 31.86%
344
+ 52.34%
345
+ 61.15%
346
+ 78.80%
347
+ 334
348
+ 57.42%
349
+ 41.88%
350
+ 59.51%
351
+ 61.72%
352
+ 64.67%
353
+ 69.71%
354
+ 64.43%
355
+ Table 1: Reduced average waiting time (in percentage) at each intersection under various RV
356
+ penetration rates. When the RV penetration rate is 50% or higher, our method outperforms traf-
357
+ fic signals across the board. In general, more time is saved as the RV penetration rate increases.
358
+ have traffic (e.g., E-L for 229) and thus are excluded from the results. The second part (the
359
+ bottom row of the four figures) reports the influence of different RV penetration rates on the
360
+ average waiting time. In the same way, Fig. S2, S3, and S4 illustrate the detailed performance
361
+ at intersections 449, 332, and 334, respectively.
362
+ Intersection 229. As shown in Fig. 4, for all moving directions, NoTL and Yan perform
363
+ the worst and are excluded from the zoomed-in sub-figure on the top, RIGHT row. From the
364
+ zoomed-in sub-figure, we can see that the average waiting time is significantly reduced when the
365
+ RV penetration rate is increased from 40% to 50% (except for W-L). Another major reduction
366
+ of the waiting time is observed when the RV penetration rate further increases to 60%. For
367
+ S-C, S-L, W-L, N-C, and N-L, approximately 40% to 60% additional saving in waiting time
368
+ is achieved when the RV penetration rate increases from 90% to 100%. TL and Yang have
369
+ similar performance on most moving directions, except for W-L and E-C where TL performs
370
+ much worse than Yang. In general, our method starts to outperform TL and Yang when the RV
371
+ penetration rate is 50% or higher.
372
+ We further show traffic congestion levels of intersection 229 during our evaluation in Fig. 5.
373
+ The congestion level is defined as AVT/Threshold, where AVT denotes the average waiting time
374
+ of all vehicles of a moving direction, and Threshold is for normalization. For results shown in
375
+ Fig. 5, Threshold is set to 40, which is the maximum average waiting time during our evaluation
376
+ 11
377
+
378
+ Figure 5: Traffic congestion levels at intersection 229 under different control mechanisms. Our
379
+ approach with 80% RVs consistently achieves lower levels of congestion than Yang and TL.
380
+ Unlike Yang and TL, which control intersection traffic using fixed phases, our method learns to
381
+ use adaptive phases for control based on traffic conditions.
382
+ at intersection 229. The results illustrate that traffic controlled using our method achieves much
383
+ lower congestion levels than Yang and TL. In addition, our method can flexibly coordinate
384
+ conflicting moving directions based on varied traffic conditions, which is different than Yang
385
+ and TL that employ fixed-phase coordination. These results hint that varied phases of control
386
+ can positively influence the efficiency of intersection traffic.
387
+ Intersection 449. In general, similar results are observed as those of intersection 229 and
388
+ are shown in Fig. S2. For most moving directions, the performances of Yan and NoTL are worse
389
+ than ours, except for the direction W-L. Our method with 50% RVs or higher outperforms Yang
390
+ and TL in nearly all cases.
391
+ Intersection 332. The results are shown in Fig. S3. We can see that the average waiting time
392
+ decreases as the RV penetration rate increases from 40% to 100%. Similar to the intersections
393
+ 229 and 449, Yan and NoTL are worse than Yang and TL, as well as our method with RV
394
+ penetration rate 40% or higher, except for the S-C direction. For Yang and TL, our method with
395
+ at least 70% RVs can outperform them at all moving directions.
396
+ Intersection 334. The results are shown in Fig. S4. In general, the average waiting time
397
+ 12
398
+
399
+ Yang with 100% CAVs
400
+ Traffic Signal Control (TL)
401
+ Our Method with 80% RVs
402
+ 1.0
403
+ F-C
404
+ F-C
405
+ E-C
406
+ 0.8
407
+ N-L
408
+ N-L
409
+ gestion Level
410
+ N-
411
+ N-C
412
+ N-C
413
+ 0.6
414
+ ire
415
+ W-L
416
+ W-L
417
+ W-L
418
+ Moving
419
+ 0.4
420
+ Conge
421
+ W-
422
+ W-C
423
+ W-C
424
+ S-L
425
+ S-L
426
+ S-1
427
+ 0.2
428
+ S-C
429
+ S-C
430
+ S-C
431
+ 0.0
432
+ 0
433
+ 200
434
+ 400
435
+ 600
436
+ 800
437
+ 1000
438
+ 0
439
+ 200
440
+ 400
441
+ 600
442
+ 800
443
+ 1000
444
+ 0
445
+ 250
446
+ 500
447
+ 750
448
+ 1000
449
+ Step (s)
450
+ Step (s)
451
+ Step (s)decreases as the RV penetration rate increases. There is an interesting phenomenon where
452
+ the median of the average waiting time of NoTL is lower than that of TL. This is because
453
+ intersection 334 has a lower traffic demand than the other three intersections. The peak flow is
454
+ 515 vehicles per lane per hour compared to around 700 for the other three intersections. This
455
+ lowers the chance of congestion inside the intersection and makes the absence of traffic lights
456
+ less an obstacle for efficient traffic flows.
457
+ Worth mentioning, across all results of all intersections, the average waiting time of all
458
+ vehicles may not monotonically decrease when the RV penetration rate increases. The median
459
+ waiting time of a higher RV percentage can be lower than the median waiting time of a lower
460
+ RV percentage, e.g., 60% RVs vs 50% RVs at the intersection 449. This is because during
461
+ repeated experiments, while traffic demands are matched between simulations, the actual data,
462
+ behaviors, and positions of individual vehicles are stochastic. These unpredictable factors can
463
+ lead to a large variance in performance.
464
+ 2.3
465
+ Hybrid Reward
466
+ Reward design is essential to RL. A poorly designed reward can result in inferior performance
467
+ of a control task; however, it is non-trivial to design a reward for complex tasks that reflects
468
+ all desiderata of a task and benefits from the convergence of the learning process. Our task is
469
+ intrinsically complex: varied topology and conflicting traffic streams can lead to conflicts inside
470
+ the intersection, and the use of real-world traffic data can lead to unpredictable and unstable
471
+ inflow/outflow for the road network.
472
+ To resolve conflicting movements within the intersection and avoid the negative impact of
473
+ traffic jams on the learning process, we design our reward function by fusing the collision pun-
474
+ ishment and the conflict punishment to prevent intersection conflicts. Our insight is to design
475
+ the reward function into two parts: a local reward and a global reward. The local reward quan-
476
+ 13
477
+
478
+ tifies the influence of each RV’s actions on the waiting time and queue length of the traffic on
479
+ its own moving direction while the global reward concerns the performance of the whole in-
480
+ tersection and encourages RVs from conflicting directions to cooperate (32). According to our
481
+ experiments, our hybrid reward enables effective and efficient interchange of traffic streams at
482
+ the intersection. More details of our hybrid reward is presented in Sec. 4.4.3.
483
+ To illustrate why our reward function works for large-scale traffic scenarios, we show an
484
+ example global reward in the logarithmic scale at the bottom, LEFT row of Fig. S1. As shown
485
+ by the results, our global reward responds to the change of traffic conditions swiftly and thus is
486
+ a timely indicator for the learning process. The global reward is defined in Sec. 4.4.3. When
487
+ congestion eases, it will be positive; on the other hand, it will be negative if congestion worsens.
488
+ Regarding the local reward, we show that it alternates between conflicting moving directions in
489
+ the MIDDLE and RIGHT sub-figures of Fig. S1. Since the local reward focuses on the traffic
490
+ condition on each RV’s own moving direction, the RV is encouraged to release a long-waiting
491
+ queue to cross the intersection. Again, the local reward is detailed in Sec. 4.4.3.
492
+ We also analyze the limitation of the state-of-the-art method’s reward function in Text S1,
493
+ which furthers the design rationale of our reward function.
494
+ 2.4
495
+ Traffic Demands, Congestion, and RV Percentages
496
+ In previous sections, we show that under real-world traffic demands with traffic signals off,
497
+ congestion starts to develop along with the average waiting time of all vehicles increasing sig-
498
+ nificantly. In this section, we use intersection 229 as the testbed to further explore the relation-
499
+ ship of traffic demands and congestion. The results of our first set of experiments are shown
500
+ in Fig. S5 LEFT. By increasing the traffic demand from 150 v/h to 300 v/h under no traffic
501
+ lights and no RVs, we can see that starting from 200 v/h, congestion starts to form (reflected by
502
+ the low average speed of all vehicles at the intersection). For comparison, we show the actual
503
+ 14
504
+
505
+ Figure 6: Comparison between traffic conditions with and without RVs during a blackout event
506
+ at the intersection 229. The blackout event occurs at the 5-minute mark, when all traffic signals
507
+ stop working. For traffic without RVs, congestion quickly forms within the next 15 minutes. In
508
+ contrast, for traffic with RVs controlled using our approach, no congestion is observed.
509
+ traffic demand at intersection 229 of ∼700 v/h. Although the real-world demand is significantly
510
+ higher than the 200 v/h demand that causes congestion, no congestion forms with just 5% RVs
511
+ deployed in traffic. Fig. S5 RIGHT additionally shows that the minimal RV penetration rate
512
+ needed to prevent congestion under the real-world traffic demand at the intersection is 5%.
513
+ 2.5
514
+ Robustness
515
+ To demonstrate the robustness of our approach, we simulate several blackout events, during
516
+ which all traffic signals are off. The results comparing no RVs and 50% RVs are shown in
517
+ Fig. 6. In Fig. S6, the blackout event occurs at the 100th step. We can observe that if there exists
518
+ no RVs, the average waiting time increases significantly due to traffic jams when traffic lights
519
+ are absent. Once the traffic lights are turned off, the intersection is fully congested. In contrast,
520
+ with 50% RVs, the average waiting time remains stable during the blackout event. In essence,
521
+ RVs controlled using our method perform like ‘self-organized traffic lights’ to coordinate the
522
+ traffic at the intersection and prevent gridlocks.
523
+ 15
524
+
525
+ C RV: 0%
526
+ C RV: 0%
527
+ D RV: 0%
528
+ RV: 0%
529
+ 0 HV: 100%
530
+ HV: 100%
531
+ 0 HV: 100%
532
+ 0 HV: 100%
533
+ 88
534
+ RV: 50%
535
+ RV: 50%
536
+ RV: 50%
537
+ D RV: 50%
538
+ 0 HV: 50%
539
+ 0 HV: 50%
540
+ 0 HV: 50%
541
+ O HV: 50%
542
+ 8
543
+ SIGNAL
544
+ 8
545
+ SIGNAL
546
+ 88
547
+ 88
548
+ SIGNAL
549
+ SIGNAL
550
+
551
+ 20
552
+ OFF
553
+ ON
554
+ OFF
555
+ OFFIn Fig. S7, we show the impact of sudden RV percentage drops on the intersection traffic.
556
+ These sudden drops can be caused by unstable V2V communication, other unforeseeable soft-
557
+ ware failures, or humans taking over the control. The ‘offline’ RVs are taken over by Intelligent
558
+ Driver Model (IDM) (33), which is used for all HVs. All drops occur at the 100th step. As
559
+ expected, the average waiting time of all vehicles at the intersection increases. Nevertheless,
560
+ our method can successfully and quickly stabilize the system and contain the average waiting
561
+ time under certain thresholds.
562
+ 2.6
563
+ Generalization
564
+ To evaluate the generalizability of our approach, we test on two unseen intersections. These two
565
+ intersections are also taken from the city of Colorado Springs and are shown in Fig. S8. Notice
566
+ that one test scenario is a three-legged intersection, which has a different topology than all our
567
+ training intersections (which are all four-legged). The detailed parameters of the intersections
568
+ are shown in Tab. S2.
569
+ For the unseen four-legged intersection, we directly deploy our trained model without re-
570
+ fining it. The result is illustrated in Fig. S9. Our method works well and beats the traffic light
571
+ control baseline when the RV penetration rate is 60% or higher. With 100% RVs, our method
572
+ can reduce the average waiting time by almost 80% compared to the traffic light control base-
573
+ line.
574
+ We also deploy our trained model without refining it on the unseen three-legged intersection.
575
+ Since it is a three-legged intersection, there are only four directions that need to be coordinated,
576
+ namely S-C, S-L, W-L, and N-C. We set the corresponding input values of other directions (ap-
577
+ pearing in four-legged intersections) to zero for our RL policy. The result is shown in Fig. S10.
578
+ The intersection is jammed when the traffic lights are absent. The average waiting time of the
579
+ NoTL baseline is much higher than others. Although our RL model has never seen this inter-
580
+ 16
581
+
582
+ section and the traffic demand, it still manages to coordinate the traffic and prevent congestion
583
+ at the intersection. Our approach outperforms the traffic light control baseline when the RV
584
+ penetration rate is 50% or higher. Our method with 100% RVs can reduce the average waiting
585
+ time by ∼30% compared to the traffic light control baseline. These results demonstrate the
586
+ excellent generalizablility of our approach.
587
+ 3
588
+ Conclusion
589
+ We propose a decentralized RL approach for the control and coordination of hybrid traffic at
590
+ real-world and unsignalized intersections. Our approach consists of three novel techniques to
591
+ handle the complexities of intersections: 1) an encoder to convert the traffic status into a fixed-
592
+ length representation, 2) a hybrid reward function that suits large-scale intersectional traffic,
593
+ and 3) a coordination mechanism to ensure conflict-free movements. Our method is the first
594
+ to control hybrid traffic under real-world traffic conditions at complex intersections. Various
595
+ experiments are conducted to show the effectiveness, robustness, and generalizablility of our
596
+ approach. Detailed analysis are also pursued to justify the design choices of the components of
597
+ our method.
598
+ In the future, we would like to further improve our method in three aspects. First, the
599
+ learning algorithm could use a hierarchical design so that the low-level control (e.g., longitu-
600
+ dinal and lateral acceleration) also becomes the RL policy’s output. Second, we want to ease
601
+ the coordination mechanism so that vehicles have more freedom to move inside the intersec-
602
+ tion. Nevertheless, we anticipate that certain prior knowledge remains necessary for ensuring
603
+ no-conflict movements. Finally, we would like to combine our approach with traffic flow pre-
604
+ diction to further improve the performance of coordination: real-world traffic demands fluctuate
605
+ over time, thus accurate flow predictions are very useful in enhancing the effectiveness of the
606
+ control and coordination of intersection traffic.
607
+ 17
608
+
609
+ 4
610
+ Methodology
611
+ 4.1
612
+ Intersection Topology and Conflicting Traffic Streams
613
+ For a common four-legged intersection, there are four moving directions: eastbound (E), west-
614
+ bound (W), northbound (N), and southbound (S); and three turning options at the intersection:
615
+ left (L), right (R), and cross (C). As an example, we use E-L and E-C to denote left-turning
616
+ traffic and crossing traffic that travel eastbound before entering the intersection, respectively.
617
+ The complete notation is shown in Fig. 3a. Inside the intersection, conflicts may occur among
618
+ the moving directions. Here, we define ‘conflict’ as two moving directions intersecting each
619
+ other, e.g., E-C and N-C. It is worth noting since the right-turning traffic will not enter the in-
620
+ tersection (or will only occupy the intersection for a short period of time), we do not coordinate
621
+ right-turning traffic with traffic from other directions. Our experiments show that this empirical
622
+ choice has minimal effects on the control and coordination of intersection traffic.
623
+ In summary, we consider eight traffic streams that can potentially raise conflicts: E-L, E-
624
+ C, W-L, W-C, N-L, N-C, S-L, and S-C. We further define the conflict-free movement set C =
625
+ {(S-C, N-C), (W-C, E-C), (S-L, N-L), (E-L, W-L), (S-C, S-L), (E-C, E-L), (N-C, N-L), (W-C,
626
+ W-L)}. For each pair in C, the two traffic streams will not conflict with each other; however,
627
+ conflicts can potentially occur in the remaining traffic stream pairs.
628
+ 4.2
629
+ Traffic Reconstruction and Simulation
630
+ In order for robot vehicles to interact with human-driven vehicles under real-world traffic condi-
631
+ tions, we need to first reconstruct traffic using actual traffic data and then carry on high-fidelity
632
+ simulations. We reconstruct the intersection traffic using turning count data at each intersection
633
+ provided by the city of Colorado Springs, CO, USA2. The turning count data records the num-
634
+ ber of vehicles moving in a particular direction at the intersection and is collected via in-road
635
+ 2https://coloradosprings.gov/
636
+ 18
637
+
638
+ sensors such as infrastructure-mounted radars.
639
+ Given the GIS data (traffic data and digital map), we pursue traffic simulations in SUMO (31),
640
+ a widely-adopted high-fidelity traffic simulation platform. The reconstructed traffic and traffic
641
+ simulations are showcased in Fig. 1. In SUMO, a directed graph is used to describe the simu-
642
+ lation area. Each edge of the graph represents a road segment with an ID and a vehicle’s route
643
+ is defined by a list of edge IDs. Vehicles are then routed using jtcrouter3 in SUMO based on
644
+ the turning count data. By default, jtcrouter will select edges that are close to the intersection
645
+ as the starting and ending edges of a route. This type of route can be extremely short and affect
646
+ the simulation fidelity. To mitigate this issue, we adjust the vehicle routes by proposing more
647
+ proper edges for the vehicles to enter and leave the network. Specifically, for the traffic stream
648
+ on the main road that connects the four intersections, we assign the starting and ending edges
649
+ of the routes to the boundary of the main road; for the traffic stream on other roads, the start-
650
+ ing edges are moved to the successor upstream intersection and the ending edges are moved to
651
+ the successor downstream intersection. After re-assigning the starting edge and ending edge of
652
+ each route, ‘extra traffic counts’ can occur. For example, a vehicle traveling through intersec-
653
+ tion 334 from northbound can also travel through intersections 229, 449, and 332, contributing
654
+ to the northbound count for all four intersections. To alleviate this problem, we consider the
655
+ coordination of traffic flows among adjacent intersections to avoid traffic double-counting, and
656
+ then refine the number of routes to ensure the turning counts in the simulation match the actual
657
+ turning count data. Fig. 1 shows the four intersections in our study. To evaluate whether the
658
+ simulated flow resembles the real-world flow in terms of turning counts, we adopt the absolute
659
+ percentage error (APE):
660
+ APE = |TCreal − TCsim|
661
+ TCreal
662
+ ,
663
+ (1)
664
+ where TCreal and TCsim are the turning counts from the real-world traffic and simulated traffic,
665
+ 3https://sumo.dlr.de/docs/jtrrouter.html
666
+ 19
667
+
668
+ respectively. As a result, the APE score for intersections 229, 499, 332, and 334 are 0.22, 0.21,
669
+ 0.16, and 0.17, respectively. Since APE = 0 means the exact match of simulated and real-world
670
+ traffic, these low APE scores (i.e., ∼ 0.2) verify the fidelity of our simulations.
671
+ 4.3
672
+ Hybrid Traffic Generation
673
+ To create a mixture of robot and human-driven vehicles, at each time step, newly spawned
674
+ vehicles are randomly assigned to be either a robot vehicle (RV) or a human-driven vehicle
675
+ (HV) according to a pre-specified RV penetration rate. For an HV, the longitudinal acceleration
676
+ is computed using Intelligent Driver Model (IDM) (33). For an RV, when it is outside the
677
+ control zone, IDM is again used to determine the longitudinal acceleration; if it is inside the
678
+ control zone, the high-level decisions ‘Stop’ and ‘Go’ are determined by the RL policy, while its
679
+ low-level longitudinal acceleration is determined by the control method described in Sec. 4.5.2.
680
+ 4.4
681
+ Decentralized RL For Hybrid Traffic
682
+ We formulate the control of RVs at the interaction as a POMDP, which consists of a 7-tuple
683
+ (S, A, T , R, Ω, O, γ), where S is a set of states (s ∈ S), A is a set of actions (a ∈ A), T is the
684
+ transition probabilities between states T (s′ | s, a), R is the reward function (S × A → R), Ω is
685
+ a set of observations o ∈ Ω, O is the set of conditional observation probabilities and γ ∈ [0, 1)
686
+ is a discount factor. In our task, at each time t, when an RV i enters the control zone of an
687
+ intersection, its action at
688
+ i is determined based on its observation (of the current traffic condition)
689
+ ot
690
+ i, which is a partial observation of the traffic state st
691
+ i at the intersection. Such a problem can be
692
+ solved using RL (34), where the policy πθ is a neural network trained using the following loss:
693
+ L =
694
+
695
+ Rt+1 + γt+1q¯θ
696
+
697
+ St+1, arg max
698
+ a′
699
+ qθ(St+1, a)
700
+
701
+ − qθ(St, At)
702
+ �2
703
+ .
704
+ (2)
705
+ In Eq. 2, q denotes the estimated value from the value network, θ and ¯θ respectively represent
706
+ the value network and the target network. The target network is a periodic copy of the value
707
+ 20
708
+
709
+ network, which is not directly optimized during training. Next, we detail the components (i.e.,
710
+ action space, observation space, reward function) as well as the whole pipeline of our decen-
711
+ tralized RL algorithm.
712
+ 4.4.1
713
+ Action Space
714
+ Since our focus is to control hybrid traffic via the influence of RVs to HVs in a more advanta-
715
+ geous way than traffic lights, we design the action space of RVs to only consist of high-level
716
+ decisions. To be specific, an RV’s action at
717
+ i determines at time t whether the RV i shall pass the
718
+ entrance line of an intersection or stop at the entrance line to block its following vehicles:
719
+ at
720
+ i ∈ A = {Stop, Go}.
721
+ (3)
722
+ When the RL policy grants ‘Go’, the RV will enter the intersection; instead, if the RL policy
723
+ decides ‘Stop’, the RV will decelerate and stop at the entrance line.
724
+ 4.4.2
725
+ Observation Space
726
+ In order to develop a general RL policy that can handle varied intersection topology and the
727
+ number of connecting lanes, we encode the traffic conditions observed by each RV to a fixed-
728
+ length representation. Specifically, the observation of each RV in the control zone (starts at 30m
729
+ before the entrance line) includes three elements:
730
+ • The status of the RV. The status includes one feature—the distance, denoted as dt
731
+ i, be-
732
+ tween the RV i’s position to the entrance line of the intersection.
733
+ • Traffic condition outside the intersection (but inside the control zone). As introduced
734
+ in Sec. 4.1, we categorize traffic streams into eight movement groups. We compute the
735
+ queue length lt,j and the average waiting time wt,j of each group j at time t. This is
736
+ 21
737
+
738
+ to quantify the anisotropic congestion levels at an intersection. These features can be
739
+ conveniently shared among all vehicles in the control zone via local communication.
740
+ • Traffic condition inside the intersection. We design an ‘occupancy map’ mt,j for each
741
+ moving direction j inside the intersection. As shown in Fig. S11, for each direction, an
742
+ inner lane is divided into 10 equal segments. If a vehicle’s position falls into a segment,
743
+ that segment is considered occupied and its value is set to 1. An empty segment’s value
744
+ is set to 0.
745
+ Overall, the observation of RV i at time t is defined as:
746
+ ot
747
+ i = ⊕J
748
+ j ⟨lt,j, wt,j⟩ ⊕J
749
+ j ⟨mt,j⟩ ⊕ ⟨dt
750
+ i⟩,
751
+ (4)
752
+ where ⊕ is the concatenation operator and J = 8 is the number of traffic moving directions at
753
+ the intersection.
754
+ 4.4.3
755
+ Hybrid Reward
756
+ To encourage the RV not only consider its own efficiency but also the efficiency of the entire
757
+ intersection traffic, we design a hybrid reward function for the RV taking the following form:
758
+ r(st, at, st+1) = λLrL + λGrG,
759
+ (5)
760
+ where rL is the local reward, rG is the global reward, and λL and λG are the coefficients of the
761
+ two rewards, respectively.
762
+ The local reward rL is defined as
763
+ rL =
764
+
765
+
766
+
767
+
768
+
769
+ 0
770
+ if at = Stop
771
+ −100
772
+ else if conflict occurs
773
+
774
+ OF j(st, st+1) + QLj(st+1)
775
+
776
+ · AW j(st+1)
777
+ otherwise
778
+ (6)
779
+ where OF j(st, st+1) denotes the outflow, i.e., the number of vehicles entering the intersection,
780
+ along the movement direction j during the time period [t, t + 1]. QLj(st+1) is queue length of
781
+ 22
782
+
783
+ the traffic waiting at the jth moving direction to enter the intersection. AW j(st) is the average
784
+ waiting time of all vehicles in the corresponding jth queue at time step t. If the current action is
785
+ ‘Stop’, the local reward is 0. If the current action is ‘Go’, but the RV’s movement conflicts with
786
+ other vehicles passing through the intersection, it will be punished with −100. On the other
787
+ hand, if the RV’s ‘Go’ action does not conflict with other vehicles, it will get a positive reward
788
+ value because OF j(st, st+1), QLj(st+1), and AW j(st) are non-negative.
789
+ The global reward rG is defined as
790
+ rG =
791
+ J
792
+
793
+ j
794
+ (QLj(st) · AW j(st)) −
795
+ J
796
+
797
+ j
798
+ (QLj(st+1) · AW j(st+1)),
799
+ (7)
800
+ where the left side of the minus sign is the summation of the average waiting time multiplied
801
+ by the queue length of each direction j at t, which measures the severity of traffic congestion;
802
+ the right side of the minus sign measures the severity of traffic congestion at t + 1. Hence,
803
+ the global reward reveals the change in traffic congestion during one time step. The design
804
+ of Eq. 7 is inspired by the observation that both waiting time and queue length (35, 36) have
805
+ been adopted to quantify traffic congestion. However, we find that either metric alone is less
806
+ informative to quantify the congestion level formed by hybrid traffic at complex intersections.
807
+ While there are infinitely many ways to combine both metrics to form the reward, we choose a
808
+ non-linear approach, i.e., multiplying them together, over other linear options. This is because
809
+ hybrid traffic represents an unstable and non-preemptive system. The relationship between
810
+ waiting time and queue length does not obey the Little’s law (37) and thus does not take a
811
+ linear form. Extensive experiments show that our hybrid reward enables effective interchanges
812
+ of traffic streams at the intersection. The analysis of the hybrid reward is elaborated in Sec. 2.3.
813
+ 4.4.4
814
+ RL Algorithm
815
+ For the actual RL algorithm, we adopt Rainbow DQN (34). Rainbow DQN is a state-of-the-art
816
+ technique that combines the novel designs of six extensions of the original DQN algorithm (38),
817
+ 23
818
+
819
+ including prioritized experience replay (39), double DQN (40), dueling network (41), distribu-
820
+ tional RL algorithm (42), and noisy network (43). By incorporating the advantages of these
821
+ DQN variants, Rainbow DQN achieves the best performance on the Atari benchmark (34). We
822
+ use Rainbow DQN and the hybrid reward function to centrally train all RVs. During execution,
823
+ each RV executes its own policy and all RVs share the same policy, i.e., the same neural network
824
+ architecture and weights.
825
+ To be specific, the policy πθ is represented by a neural network with three fully connected
826
+ (FC) layers. Each FC layer contains 512 hidden units and uses a rectified linear unit (ReLU)
827
+ as the activation layer. For training, the learning rate is set to 1e−3, the discount factor is set
828
+ to 0.99, and the batch size of each policy update is 2048. We train our model using a PC with
829
+ Intel i9-9900K and NVIDIA GeForce RTX 2080Ti. The training time varies due to different
830
+ RV penetration rates, but in general 48 hours are expected for a policy to converge.
831
+ 4.5
832
+ Traffic Coordination and Low-level Control
833
+ 4.5.1
834
+ Resolving Conflicting Traffic Streams
835
+ Conflicted movements are the most crucial aspects of intersection traffic, which can cause grid-
836
+ locks not only locally at each intersection but potentially over the entire traffic network (9).
837
+ Although our approach punishes conflicting movements (through the hybrid reward function),
838
+ learning-based autonomous systems that are simultaneously effective and provably safe remain
839
+ an open problem (44). So, conflicts can still occur and subsequently affect the training ef-
840
+ ficiency. To ensure our approach is conflict-free, we introduce a coordination mechanism to
841
+ post-process the decisions returned by the RL policy. This mechanism is applied only to the
842
+ traffic stream pairs that are not defined in the non-conflict movement set C.
843
+ First, each RV obtains its ‘Stop’ or ‘Go’ decision by the RL policy. Then, the RV broadcasts
844
+ its decision among all RVs inside the control zone. Next, each RV (ego RV) at the entrance line
845
+ 24
846
+
847
+ compares its decision with all other RVs in the control zone. This comparison results in three
848
+ conditions:
849
+ • If the vehicles inside the intersection are on the conflicting stream of ego RV, the ego RV
850
+ is not permitted to enter the intersection.
851
+ • If the vehicles inside the intersection are not on the conflicting stream of the ego RV, but
852
+ multiple RVs at the entrance line on conflicting streams receive the decision ‘Go’ in the
853
+ same time step (a potential conflict decision), a priority score is calculated as the product
854
+ of average waiting time and queue length. The RV with the highest score can enter the
855
+ intersection, while other RVs should wait at the entrance line.
856
+ • If the vehicles inside the intersection are not on the conflicting stream of the ego RV
857
+ and there are no potential conflict decisions for the ego RV, the ego RV will execute its
858
+ decision.
859
+ Since the hybrid reward function contains a punishment term for conflict decisions, the
860
+ agent will learn to avoid conflicts during training. In Fig. S12 LEFT, we show that the number
861
+ of conflict decisions decreases as the training progresses, and the trend stabilizes at a low level
862
+ after the corresponding policy converges. We also investigate the conflict rate computed as the
863
+ number of conflict decisions divided by the number of RVs inside the control zone. As shown in
864
+ Fig. S12 RIGHT, the conflict rate of either 60% RVs or 80% RVs tends to converge around 5%,
865
+ while the conflict rate of 100% RVs approaches 0 after 500 steps. The results demonstrate the
866
+ effectiveness of the RL policy in coordinating intersection traffic and the infrequent use cases
867
+ of the coordination mechanism introduced in this section.
868
+ 4.5.2
869
+ Low-level Control of RVs
870
+ While the RL policy makes the high-level decisions ‘Stop’ and ‘Go’, low-level controls are
871
+ needed to complement an RV for traveling through the intersection.
872
+ 25
873
+
874
+ • Route planning. The route of a vehicle is planned during the traffic reconstruction phase
875
+ (discussed in Sec. 4.2). There is no re-planning of a vehicle’s route during the simulation
876
+ phase.
877
+ • Longitudinal acceleration. For RVs receiving the decision ‘Go’, their longitudinal accel-
878
+ eration will be set to the vehicle’s maximum acceleration at = amax; for RVs receiving the
879
+ decision ‘Stop’, they will slow down and stop at the entrance line using the deceleration
880
+ at =
881
+ −v2
882
+ 2·dfront, where dfront is the distance to the entrance line. Note that other deceleration
883
+ computing methods can be adopted to replace our formula.
884
+ 4.6
885
+ Assumptions of RVs
886
+ It is important to note that the RVs defined in this project are different than the conventional
887
+ set-ups of autonomous vehicles (AVs), which are equipped with a complete suite of perception-
888
+ to-planning modules. Our RVs focus on the high-level decisions of ‘Stop’ and ‘Go’ and only
889
+ require a certain form of V2V communication to obtain vehicles’ positions inside the control
890
+ zone. Other types of sensors such as cameras and lidars are unnecessary. Thus, our learning
891
+ process is different than a typical training process (end-to-end or otherwise) of autonomous
892
+ driving. Another important difference between our RV and the conventional AV is that our RV
893
+ does not exclude humans but can keep humans in the loop: humans can execute the low-level
894
+ controls of an RV while the machine learning module suggests the ‘Stop’ or ‘Go’ decisions.
895
+ Monetary incentive mechanisms can be established to encourage humans to follow the sug-
896
+ gestions and contribute to more efficient traffic systems. In case that the suggestions are not
897
+ followed regularly, the hybrid traffic system can still benefit from the proposed control mecha-
898
+ nism as the RV penetration rate steadily increases in the expected future (see Sec. 2.5). Overall,
899
+ the above-mentioned characteristics make our RVs applicable to all levels of vehicle autonomy
900
+ and a more practical solution for facilitating intersection traffic than using fully equipped AVs.
901
+ 26
902
+
903
+ References
904
+ 1. David Schrank, Bill Eisele, Tim Lomax, and Jim Bak. Urban mobility scorecard. Texas
905
+ A&M Transportation Institute and INRIX, 2021.
906
+ 2. United Nations. World urbanization prospects: The 2018 revision (st/esa/ser.a/420). De-
907
+ partment of Economic and Social Affairs, Population Division, New York: United Nations,
908
+ 2019.
909
+ 3. Mallory Trouve, Gaele Lesteven, and Fabien Leurent.
910
+ Worldwide investigation of pri-
911
+ vate motorization dynamics at the metropolitan scale. Transportation Research Procedia,
912
+ 48:3413–3430, 2020.
913
+ 4. Eun-Ha Choi. Crash factors in intersection-related crashes: An on-scene perspective. Na-
914
+ tional Highway Traffic Safety Administration, U.S. Department of Transportation, 2010.
915
+ 5. Associated
916
+ Press.
917
+ Power
918
+ still
919
+ out
920
+ to
921
+ 50k
922
+ customers,
923
+ days
924
+ after
925
+ memphis
926
+ storm.
927
+ https://www.usnews.com/
928
+ news/best-states/tennessee/articles/2022-02-07/
929
+ power-still-out-to-60k-customers-days-after-memphis-storm,
930
+ February 2022.
931
+ 6. Ben
932
+ Winck.
933
+ Get
934
+ ready
935
+ for
936
+ blackouts
937
+ from
938
+ london
939
+ to
940
+ la,
941
+ as
942
+ the
943
+ global
944
+ energy
945
+ crisis
946
+ overwhelms
947
+ grids
948
+ and
949
+ sends
950
+ energy
951
+ prices
952
+ skyrocketing.
953
+ https://www.businessinsider.com/
954
+ global-europe-energy-crisis-power-electricity-outages-blackouts-energy-grid-2022-9?
955
+ op=1, September 2022.
956
+ 27
957
+
958
+ 7. Rachel Ramirez.
959
+ Power outages are on the rise, led by texas, michigan and cal-
960
+ ifornia. here’s what’s to blame.
961
+ https://www.cnn.com/2022/09/14/us/
962
+ power-outages-rising-extreme-weather-climate/index.html,
963
+ September 2022.
964
+ 8. Guni Sharon and Peter Stone. A protocol for mixed autonomous and human-operated ve-
965
+ hicles at intersections. In International Conference on Autonomous Agents and Multiagent
966
+ Systems, pages 151–167, 2017.
967
+ 9. Hao Yang and Ken Oguchi. Intelligent vehicle control at signal-free intersection under
968
+ mixed connected environment. IET Intelligent Transport Systems, 14(2):82–90, 2020.
969
+ 10. Cathy Wu, Abdul Rahman Kreidieh, Kanaad Parvate, Eugene Vinitsky, and Alexandre M
970
+ Bayen. Flow: A modular learning framework for mixed autonomy traffic. IEEE Transac-
971
+ tions on Robotics, 38(2):1270–1286, 2022.
972
+ 11. Eugene Vinitsky, Kanaad Parvate, Aboudy Kreidieh, Cathy Wu, and Alexandre Bayen.
973
+ Lagrangian control through deep-rl: Applications to bottleneck decongestion. In IEEE
974
+ International Conference on Intelligent Transportation Systems, pages 759–765, 2018.
975
+ 12. Shuo Feng, Xintao Yan, Haowei Sun, Yiheng Feng, and Henry X Liu. Intelligent driving
976
+ intelligence test for autonomous vehicles with naturalistic and adversarial environment.
977
+ Nature communications, 12(1):1–14, 2021.
978
+ 13. Zhongxia Yan and Cathy Wu. Reinforcement learning for mixed autonomy intersections.
979
+ In IEEE International Intelligent Transportation Systems Conference, pages 2089–2094,
980
+ 2021.
981
+ 14. Kathy Jang, Eugene Vinitsky, Behdad Chalaki, Ben Remer, Logan Beaver, Andreas A Ma-
982
+ likopoulos, and Alexandre Bayen. Simulation to scaled city: zero-shot policy transfer for
983
+ 28
984
+
985
+ traffic control via autonomous vehicles. In ACM/IEEE International Conference on Cyber-
986
+ Physical Systems, pages 291–300, 2019.
987
+ 15. Jackeline Rios-Torres and Andreas A Malikopoulos. A survey on the coordination of con-
988
+ nected and automated vehicles at intersections and merging at highway on-ramps. IEEE
989
+ Transactions on Intelligent Transportation Systems, 18(5):1066–1077, 2016.
990
+ 16. PB Hunt, DI Robertson, RD Bretherton, and RI Winton.
991
+ Scoot-a traffic responsive
992
+ method of coordinating signals. Technical report, Transport and Road Research Laboratory
993
+ (TRRL), United Kingdom, 1981.
994
+ 17. Mohammed A Hadi and Charles E Wallace. Hybrid genetic algorithm to optimize signal
995
+ phasing and timing. Transportation Research Record, (1421):104–112, 1993.
996
+ 18. Mohammed A Hadi and Charles E Wallace. Optimization of signal phasing and timing
997
+ using cauchy simulated annealing. Transportation Research Record, 1456:64–71, 1994.
998
+ 19. David Miculescu and Sertac Karaman. Polling-systems-based autonomous vehicle coor-
999
+ dination in traffic intersections with no traffic signals. IEEE Transactions on Automatic
1000
+ Control, 65(2):680–694, 2019.
1001
+ 20. Andreas A Malikopoulos, Christos G Cassandras, and Yue J Zhang.
1002
+ A decentralized
1003
+ energy-optimal control framework for connected automated vehicles at signal-free inter-
1004
+ sections. Automatica, 93:244–256, 2018.
1005
+ 21. Bowen Baker, Ingmar Kanitscheider, Todor Markov, Yi Wu, Glenn Powell, Bob McGrew,
1006
+ and Igor Mordatch. Emergent tool use from multi-agent autocurricula. In International
1007
+ Conference on Learning Representations, 2020.
1008
+ 29
1009
+
1010
+ 22. Oriol Vinyals, Igor Babuschkin, Wojciech M Czarnecki, Michaël Mathieu, Andrew Dudzik,
1011
+ Junyoung Chung, David H Choi, Richard Powell, Timo Ewalds, Petko Georgiev, et al.
1012
+ Grandmaster level in starcraft ii using multi-agent reinforcement learning.
1013
+ Nature,
1014
+ 575(7782):350–354, 2019.
1015
+ 23. Christopher Berner, Greg Brockman, Brooke Chan, Vicki Cheung, Przemysław D˛ebiak,
1016
+ Christy Dennison, David Farhi, Quirin Fischer, Shariq Hashme, Chris Hesse, et al. Dota 2
1017
+ with large scale deep reinforcement learning. arXiv preprint arXiv:1912.06680, 2019.
1018
+ 24. William H Guss, Brandon Houghton, Nicholay Topin, Phillip Wang, Cayden Codel,
1019
+ Manuela Veloso, and Ruslan Salakhutdinov. Minerl: A large-scale dataset of minecraft
1020
+ demonstrations. In International Joint Conference on Artificial Intelligence, pages 2442–
1021
+ 2448, 2019.
1022
+ 25. Maximilian Jaritz, Raoul De Charette, Marin Toromanoff, Etienne Perot, and Fawzi
1023
+ Nashashibi. End-to-end race driving with deep reinforcement learning. In IEEE Inter-
1024
+ national Conference on Robotics and Automation, pages 2070–2075, 2018.
1025
+ 26. Zhongxia Yan, Abdul Rahman Kreidieh, Eugene Vinitsky, Alexandre M Bayen, and Cathy
1026
+ Wu. Unified automatic control of vehicular systems with reinforcement learning. IEEE
1027
+ Transactions on Automation Science and Engineering, 2022.
1028
+ 27. Duowei Li, Jianping Wu, Feng Zhu, Tianyi Chen, and Yiik Diew Wong.
1029
+ Coor-plt: A
1030
+ hierarchical control model for coordinating adaptive platoons of connected and autonomous
1031
+ vehicles at signal-free intersections based on deep reinforcement learning. arXiv preprint
1032
+ arXiv:2207.07195, 2022.
1033
+ 30
1034
+
1035
+ 28. Anye Zhou, Srinivas Peeta, Menglin Yang, and Jian Wang. Cooperative signal-free inter-
1036
+ section control using virtual platooning and traffic flow regulation. Transportation research
1037
+ part C: emerging technologies, 138:103610, 2022.
1038
+ 29. Amir Mirheli, Mehrdad Tajalli, Leila Hajibabai, and Ali Hajbabaie. A consensus-based
1039
+ distributed trajectory control in a signal-free intersection. Transportation research part C:
1040
+ emerging technologies, 100:161–176, 2019.
1041
+ 30. Xiaolong Chen, Manjiang Hu, Biao Xu, Yougang Bian, and Hongmao Qin. Improved
1042
+ reservation-based method with controllable gap strategy for vehicle coordination at non-
1043
+ signalized intersections.
1044
+ Physica A: Statistical Mechanics and its Applications, page
1045
+ 127953, 2022.
1046
+ 31. Michael Behrisch, Laura Bieker, Jakob Erdmann, and Daniel Krajzewicz.
1047
+ SUMO–
1048
+ simulation of urban mobility: an overview. In International Conference on Advances in
1049
+ System Simulation, 2011.
1050
+ 32. Behrad Toghi, Rodolfo Valiente, Dorsa Sadigh, Ramtin Pedarsani, and Yaser P Fallah. Al-
1051
+ truistic maneuver planning for cooperative autonomous vehicles using multi-agent advan-
1052
+ tage actor-critic. In IEEE/CVF Conference on Computer Vision and Pattern Recognition,
1053
+ Workshop on Autonomous Driving: Perception, Prediction and Planning, 2021.
1054
+ 33. Martin Treiber, Ansgar Hennecke, and Dirk Helbing. Congested traffic states in empirical
1055
+ observations and microscopic simulations. Physical review E, 62(2):1805, 2000.
1056
+ 34. Matteo Hessel, Joseph Modayil, Hado Van Hasselt, Tom Schaul, Georg Ostrovski, Will
1057
+ Dabney, Dan Horgan, Bilal Piot, Mohammad Azar, and David Silver. Rainbow: Com-
1058
+ bining improvements in deep reinforcement learning. In AAAI Conference on Artificial
1059
+ Intelligence, 2018.
1060
+ 31
1061
+
1062
+ 35. Rusheng Zhang, Akihiro Ishikawa, Wenli Wang, Benjamin Striner, and Ozan K Tonguz.
1063
+ Using reinforcement learning with partial vehicle detection for intelligent traffic signal con-
1064
+ trol. IEEE Transactions on Intelligent Transportation Systems, 22(1):404–415, 2020.
1065
+ 36. Martin Greguri´c, Miroslav Vuji´c, Charalampos Alexopoulos, and Mladen Mileti´c. Appli-
1066
+ cation of deep reinforcement learning in traffic signal control: An overview and impact of
1067
+ open traffic data. Applied Sciences, 10(11):4011, 2020.
1068
+ 37. John DC Little and Stephen C Graves. Little’s law. In Building intuition, pages 81–100.
1069
+ Springer, 2008.
1070
+ 38. Volodymyr Mnih, Koray Kavukcuoglu, David Silver, Andrei A Rusu, Joel Veness, Marc G
1071
+ Bellemare, Alex Graves, Martin Riedmiller, Andreas K Fidjeland, Georg Ostrovski, et al.
1072
+ Human-level control through deep reinforcement learning. nature, 518(7540):529–533,
1073
+ 2015.
1074
+ 39. Tom Schaul, John Quan, Ioannis Antonoglou, and David Silver. Prioritized experience
1075
+ replay. In International Conference on Learning Representations, 2016.
1076
+ 40. Hado Van Hasselt, Arthur Guez, and David Silver. Deep reinforcement learning with dou-
1077
+ ble q-learning. In AAAI Conference on Artificial Intelligence, page 2094–2100, 2016.
1078
+ 41. Ziyu Wang, Tom Schaul, Matteo Hessel, Hado Hasselt, Marc Lanctot, and Nando Freitas.
1079
+ Dueling network architectures for deep reinforcement learning. In International Confer-
1080
+ ence on Machine Learning, pages 1995–2003, 2016.
1081
+ 42. Marc G Bellemare, Will Dabney, and Rémi Munos. A distributional perspective on rein-
1082
+ forcement learning. In International Conference on Machine Learning, pages 449–458,
1083
+ 2017.
1084
+ 32
1085
+
1086
+ 43. Meire Fortunato, Mohammad Gheshlaghi Azar, Bilal Piot, Jacob Menick, Ian Osband, Alex
1087
+ Graves, Vlad Mnih, Remi Munos, Demis Hassabis, Olivier Pietquin, et al. Noisy networks
1088
+ for exploration. International Conference on Learning Representations, 2018.
1089
+ 44. Shangding Gu, Long Yang, Yali Du, Guang Chen, Florian Walter, Jun Wang, Yaodong
1090
+ Yang, and Alois Knoll. A review of safe reinforcement learning: Methods, theory and
1091
+ applications. arXiv preprint arXiv:2205.10330, 2022.
1092
+ 33
1093
+
1094
+ Supplementary Materials
1095
+ The supplementary materials include:
1096
+ Text S1: Analysis of the Reward Function in Yan and Wu (13).
1097
+ Fig. S1: Comparison between our hybrid reward and the reward function in Yan and Wu (13).
1098
+ Fig. S2: The evaluation results at the intersection 449.
1099
+ Fig. S3: The evaluation results at the intersection 332.
1100
+ Fig. S4: The evaluation results at the intersection 334.
1101
+ Fig. S5: The relationship between traffic demand, RV penetration rates and traffic conges-
1102
+ tion.
1103
+ Fig. S6: Traffic light blackout experiments.
1104
+ Fig. S7: RV ‘offline’ experiments.
1105
+ Fig. S8: The two unseen intersections used in our testing.
1106
+ Fig. S9: The evaluation results at unseen four leg intersection.
1107
+ Fig. S10: The evaluation results at unseen three leg intersection.
1108
+ Fig. S11: The illustration of the occupancy map.
1109
+ Fig. S12: The number of conflict decisions during training and testing.
1110
+ Tab. S1: The features of four main intersections (229, 449, 332, 334).
1111
+ Tab. S2: The details of the two unseen intersections used in generalization experiments.
1112
+ Text S1
1113
+ Analysis of the Reward Function in Yan and Wu (13)
1114
+ In this part, we show why the reward function of the state-of-the-art technique by Yan and
1115
+ Wu (13) is difficult to scale to large-scale traffic scenarios.
1116
+ The reward function by Yan and Wu (13) takes the format RY an = outflow(st, st+1) −
1117
+ collision(st, st+1) , where outflow(st, st+1) denotes the number of vehicles exiting the net-
1118
+ 34
1119
+
1120
+ Figure S1: TOP-LEFT: Reward by Yan and Wu (13) calculated with the NoTL baseline. The
1121
+ average speed of all vehicles at the intersection is also plotted for comparison. The decreasing
1122
+ of the average speed indicates the form of congestion. As a result, Yan Reward does not reflect
1123
+ the intersection congestion timely. BOTTOM-LEFT: The accumulative global reward of ours,
1124
+ which responses to the intersection congestion swiftly. MIDDLE and RIGHT: the local reward
1125
+ of a pair of conflicting moving directions. The alternating patterns show the effectiveness of the
1126
+ local reward in enabling interchanges of traffic flows at the intersection.
1127
+ work from t to t + 1, and collision(st, st+1) is the number of collisions in the network from t
1128
+ to t + 1. We record this reward during the evaluation of the NoTL baseline to analyze its char-
1129
+ acteristics. The results are shown in TOP-LEFT and BOTTOM-LEFT of Fig. S1. As expected,
1130
+ for the NoTL baseline (with 100% HVs), congestion is formed at the intersection, which is
1131
+ reflected by the average speed of all vehicles decreasing to 0. However, RY an (Yan Reward)
1132
+ does not reflect the change of traffic conditions timely. This is because the outflow of a network
1133
+ is a delayed indicator: the congestion inside the intersection does not prohibit the vehicles that
1134
+ have already exited the intersection to continue contributing to the outflow. The delayed reward
1135
+ increases the difficulty of learning since an episode is likely to terminate due to the congestion
1136
+ before the reward eventually reflects it.
1137
+ 35
1138
+
1139
+ 1.50
1140
+ Yan Reward wl NoTl
1141
+ W-C
1142
+ E-C
1143
+ 1.25
1144
+ Avg. Speed w/ NoTL
1145
+ E-L
1146
+ W-L
1147
+ 0.8
1148
+ 0.6
1149
+ 0.6
1150
+ 0.25
1151
+ 0
1152
+ 0.00.
1153
+ 20
1154
+ 80
1155
+ 20
1156
+ 0
1157
+ 200
1158
+ 400
1159
+ 600
1160
+ 800
1161
+ 1000 1200
1162
+ 0
1163
+ 40
1164
+ 60
1165
+ 100
1166
+ 0
1167
+ 40
1168
+ 60
1169
+ 80
1170
+ 100
1171
+ Step (s)
1172
+ Step (s)
1173
+ Step (s)
1174
+ Our Reward w/ NoTL
1175
+ N-C
1176
+ S-C
1177
+ Global Reward (log)
1178
+ 0
1179
+ Avg. Speed w/ NoTL
1180
+ S-L
1181
+ N-L
1182
+ 0.6
1183
+ 0.6
1184
+ Jno
1185
+ 0
1186
+ 0
1187
+ 400
1188
+ 600
1189
+ 800
1190
+ 1000 1200
1191
+ 20
1192
+ 40
1193
+ 60
1194
+ 80
1195
+ 100
1196
+ 0
1197
+ 20
1198
+ 40
1199
+ 60
1200
+ 80
1201
+ 100
1202
+ 200
1203
+ 0
1204
+ Step (s)
1205
+ Step (s)
1206
+ Step (s)Intersection
1207
+ Num. incoming lanes
1208
+ Num. non-empty lanes
1209
+ Traffic demand (v/h * lane)
1210
+ 229
1211
+ 21
1212
+ 19
1213
+ 694
1214
+ 449
1215
+ 19
1216
+ 18
1217
+ 620
1218
+ 332
1219
+ 18
1220
+ 17
1221
+ 662
1222
+ 334
1223
+ 16
1224
+ 14
1225
+ 515
1226
+ Table S1: Intersection features. Among four intersections, 229 is the busiest one with the
1227
+ highest traffic demand (i.e., 694 vehicles per lane per hour) and the most non-empty lanes (i.e.,
1228
+ 19).
1229
+ Figure S2: The overall results measured in average waiting time at the intersection 449. The
1230
+ RIGHT sub-figures are zoomed-in versions of the LEFT sub-figures by excluding NoTL and
1231
+ Yan. With 60% or more RVs, our method consistently outperforms all other baseline methods.
1232
+ 36
1233
+
1234
+ 70
1235
+ 120
1236
+ 60
1237
+ Time
1238
+ 100
1239
+ 50
1240
+ 80
1241
+ 40
1242
+ Waiting
1243
+ 60
1244
+ 30
1245
+ 40
1246
+ 20
1247
+ Avg.
1248
+ 20
1249
+ 10
1250
+ RV: 40%
1251
+ RV: 50%
1252
+ RV: 60%
1253
+ RV: 70%
1254
+ RV: 80%
1255
+ RV: 90%
1256
+ TL
1257
+ NoTL
1258
+ Yan
1259
+ Yang
1260
+ RV: 100%
1261
+ 70
1262
+ 120
1263
+ 60
1264
+ Time
1265
+ 100
1266
+ 50
1267
+ Waiting
1268
+ 80
1269
+ 40
1270
+ 60
1271
+ 30
1272
+ 40
1273
+ 20
1274
+ Avg.
1275
+ 20
1276
+ 10
1277
+ 09
1278
+ RV:
1279
+ RV:
1280
+ RVFigure S3: The overall results measured in average waiting time at the intersection 332. The
1281
+ RIGHT sub-figures are zoomed-in versions of the LEFT sub-figures by excluding NoTL and
1282
+ Yan. Generally speaking, NoTL and Yan do not perform very well. Our method starts to
1283
+ outperform TL and Yang when the RV penetration rate is 70% or higher.
1284
+ Intersection
1285
+ Topology
1286
+ Num. lanes
1287
+ Num.
1288
+ non-empty lanes
1289
+ Traffic demand
1290
+ (v/h * lane)
1291
+ 140
1292
+ four-legged
1293
+ 24
1294
+ 24
1295
+ 537
1296
+ 205
1297
+ three-legged
1298
+ 10
1299
+ 10
1300
+ 700
1301
+ Table S2: Details of the two unseen intersections used in our testing.
1302
+ 37
1303
+
1304
+ 100
1305
+ 175
1306
+ S
1307
+ 150
1308
+ 80
1309
+ Time
1310
+ 125
1311
+ 60
1312
+ 75
1313
+ 40
1314
+ g
1315
+ 50
1316
+ Av
1317
+ 2.5
1318
+ S
1319
+ RV: 40%
1320
+ RV: 50%
1321
+ RV: 60%
1322
+ RV: 70%
1323
+ RV: 80%
1324
+ RV: 90%
1325
+ NoTl
1326
+ Yan
1327
+ Yang
1328
+ TL
1329
+ RV: 100%
1330
+ 100
1331
+ 175
1332
+ 150
1333
+ 80
1334
+
1335
+ 60
1336
+ 9100
1337
+ Waitin
1338
+ 75
1339
+ 40
1340
+ 50
1341
+ Avg.
1342
+ 20
1343
+ 25
1344
+ 0
1345
+ RVFigure S4: The overall results measured in average waiting time at the intersection 334. The
1346
+ RIGHT sub-figures are zoomed-in versions of the LEFT sub-figures by excluding NoTL and
1347
+ Yan. In general, our method with 50% RVs or more outperforms all four baselines.
1348
+ 38
1349
+
1350
+ 50
1351
+ 60
1352
+ (s)
1353
+ 50
1354
+ 40
1355
+ ime
1356
+
1357
+ 40
1358
+ 30
1359
+ Waiting
1360
+ 30
1361
+ 20
1362
+ 20
1363
+ Avg.
1364
+ 10
1365
+ 10
1366
+ 0
1367
+ S
1368
+ RV: 40%
1369
+ RV: 50%
1370
+ RV: 60%
1371
+ RV: 70%
1372
+ RV: 80%
1373
+ RV: 90%
1374
+ TL
1375
+ NoTL
1376
+ Yan
1377
+ Yang
1378
+ RV: 100%
1379
+ 50
1380
+ 60
1381
+ 50
1382
+ 40
1383
+ 40
1384
+ 30
1385
+ iting
1386
+ 30
1387
+ Wai
1388
+ 20
1389
+ 20
1390
+ 10
1391
+ RFigure S5: LEFT: The solid lines represent no traffic lights and no RVs. The congestion starts
1392
+ to form when the demand is over 200 v/h. The real-world demand denoted using the dash line,
1393
+ which is about 700 v/h, does not build congestion because 5% RVs are deployed in traffic.
1394
+ RIGHT: Analyzing the influence of low RV penetration rates on traffic. As a result, 5% is the
1395
+ minimum to prevent congestion. For both figures, the study subject is the intersection 229.
1396
+ Figure S6: Blackout experiments. We simulate blackout events (traffic signals are off) at in-
1397
+ tersections 229, 332, 449, and 334 (from left to right) since the 100th step. Without any RV, a
1398
+ gridlock will form at the intersection causing the average waiting time of all vehicles to increase
1399
+ rapidly. In contrast, with 50% RVs, no gridlock is formed and the waiting times of all vehicles
1400
+ at the intersection remain low and stable.
1401
+ 39
1402
+
1403
+ 6
1404
+ (m/s)
1405
+ Speed.
1406
+ Avg.
1407
+ 0
1408
+ 0
1409
+ 0
1410
+ 200
1411
+ 400
1412
+ 600
1413
+ 800
1414
+ 1000
1415
+ 1200
1416
+ 0
1417
+ 200
1418
+ 400
1419
+ 600
1420
+ 800
1421
+ 1000
1422
+ 1200
1423
+ Step (s)
1424
+ Step (s)
1425
+ 150 v/h
1426
+ 200 v/h
1427
+ 250 v/h
1428
+ No RV
1429
+ RV: 3%
1430
+ RV: 4%
1431
+ 300 v/h
1432
+ 700 v/h (5% RV)
1433
+ RV: 5%
1434
+ RV: 10%1400
1435
+ 800
1436
+ RV: 0%
1437
+ RV: 0%
1438
+ RV: 0%
1439
+ 500
1440
+ RV: 0%
1441
+ S
1442
+ 1800
1443
+ 1200
1444
+ RV: 50%
1445
+ RV: 50%
1446
+ RV: 50%
1447
+ RV: 50%
1448
+ 700
1449
+ Time
1450
+ 450
1451
+ 1600
1452
+ 1000
1453
+ 600
1454
+ !!
1455
+ 1400
1456
+ 400
1457
+ Waiting
1458
+ 500
1459
+ 800
1460
+ 350
1461
+ 1200
1462
+ 400
1463
+ 600
1464
+ 1000
1465
+ 300
1466
+ 300
1467
+ 800
1468
+ 250
1469
+ 400
1470
+ Avg.
1471
+ 600
1472
+ 200
1473
+ 200
1474
+ 200
1475
+ 400
1476
+ 100
1477
+ 150
1478
+ 0
1479
+ 200
1480
+ 400
1481
+ 600
1482
+ 800
1483
+ 0
1484
+ 200
1485
+ 400
1486
+ 600
1487
+ 800
1488
+ 0
1489
+ 200
1490
+ 400
1491
+ 600
1492
+ 800
1493
+ 0
1494
+ 200
1495
+ 400
1496
+ 600
1497
+ 800
1498
+ Step (s)
1499
+ Step (s)
1500
+ Step (s)
1501
+ Step (s)Figure S7: RV ‘offline’ experiments. The RV penetration rate drops from 100% to various
1502
+ percentages at intersections 229, 332, 449, and 334 (from left to right) since the 100th step.
1503
+ The ‘offline’ RVs are taken over by the IDM model. A pure HV scenario (100% HVs) is
1504
+ also included for comparison. As a result, even if the RV penetration rate reduces to 40%,
1505
+ our method can still maintain stable average waiting times of all vehicles at the intersection,
1506
+ reflecting no occurrence of gridlocks.
1507
+ Figure S8: The two unseen intersections used in our testing (left is four-legged and right is
1508
+ three-legged).
1509
+ 40
1510
+
1511
+ RV %
1512
+ 1600
1513
+ RV %
1514
+ 700
1515
+ RV %
1516
+ 350
1517
+ RV %
1518
+ 1200
1519
+ drop
1520
+ 1400
1521
+ drop
1522
+ drop
1523
+ drop
1524
+ 600
1525
+ 300
1526
+ Avg. Waiting Time (
1527
+ 1000
1528
+ 1200
1529
+ 500
1530
+
1531
+ 250
1532
+ 800
1533
+ 0
1534
+ 1000
1535
+ 0
1536
+ 0
1537
+ 0
1538
+ 400
1539
+ 200
1540
+ 800
1541
+ 600
1542
+ 300
1543
+ 150
1544
+ 600
1545
+ 400
1546
+ 200
1547
+ 100
1548
+ 400
1549
+ 200
1550
+ 200
1551
+ 100
1552
+ 50
1553
+ 0:
1554
+ 0
1555
+ 0.
1556
+ 0
1557
+ 250
1558
+ 500
1559
+ 750
1560
+ 0
1561
+ 250
1562
+ 500
1563
+ 750
1564
+ 0
1565
+ 250
1566
+ 500
1567
+ 750
1568
+ 0
1569
+ 250
1570
+ 500
1571
+ 750
1572
+ Step (s)
1573
+ Step (s)
1574
+ Step (s)
1575
+ Step (s)
1576
+ RV: 0%
1577
+ RV: 100% → 90%
1578
+ RV: 100% → 70%
1579
+ RV: 100% → 50%
1580
+ RV: 100%
1581
+ RV: 100% → 80%
1582
+ RV: 100% → 60%
1583
+ RV: 100% -→ 40%IIMG ALLMAGETOLS>
1584
+ 80
1585
+ ± Download cropped IMAGE
1586
+ 4 uno
1587
+ wid
1588
+ usnbua ?
1589
+ Showax山Figure S9: The overall results measured in average waiting time at the intersection 140 (unseen).
1590
+ The RIGHT sub-figures are zoomed-in versions of the LEFT sub-figures by excluding NoTL
1591
+ and RV percentages 40% and 50%. Starting from 60% RVs, our method beats the TL baseline.
1592
+ With 100% RVs, our method can reduce the average waiting time by ∼80% compared to TL.
1593
+ 41
1594
+
1595
+ 70
1596
+ 200
1597
+ 60
1598
+ Time
1599
+ 50
1600
+ 150
1601
+ 40
1602
+ Waiting
1603
+ 100
1604
+ 30
1605
+ 20
1606
+ Avg.
1607
+ 50
1608
+ 10
1609
+ C
1610
+ S
1611
+ RV: 40%
1612
+ RV: 50%
1613
+ RV: 60%
1614
+ RV: 70%
1615
+ RV: 80%
1616
+ RV: 90%
1617
+ TL
1618
+ NoTL
1619
+ RV: 100%
1620
+ 70
1621
+ 200
1622
+ 60
1623
+ ime
1624
+ 50
1625
+ 150
1626
+
1627
+ 40
1628
+ iting
1629
+ 100
1630
+ 30
1631
+ Wait
1632
+ 20
1633
+ Avg.
1634
+ 50
1635
+ 10
1636
+ H
1637
+ 0
1638
+ %08
1639
+ %06
1640
+ 0
1641
+ %09
1642
+ %06
1643
+ 100
1644
+ oo
1645
+ LON
1646
+ .
1647
+ &
1648
+ RV:Figure S10: The overall results measured in average waiting time at the intersection 205 (un-
1649
+ seen). This is a three-legged intersection and thus only four directions are shown. The RIGHT
1650
+ sub-figures are zoomed-in versions of the LEFT sub-figures by excluding NoTL. Our approach
1651
+ starts to outperform the TL baseline when RVs are 50% or more.
1652
+ 42
1653
+
1654
+ 140
1655
+ 200
1656
+ 120
1657
+ ime
1658
+ 150
1659
+ 100
1660
+ Waiting
1661
+ 100
1662
+ 80
1663
+ Avg.
1664
+ 09
1665
+ 50
1666
+ 40
1667
+ S-C
1668
+ S-L
1669
+ W-L
1670
+ N-C
1671
+ S-C
1672
+ S-L
1673
+ W-L
1674
+ N-C
1675
+ RV: 40%
1676
+ RV: 50%
1677
+ RV: 60%
1678
+ RV: 70%
1679
+ RV: 80%
1680
+ RV: 90%
1681
+ TL
1682
+ NoTL
1683
+ RV: 100%
1684
+ 140
1685
+ 200
1686
+ S
1687
+ 120
1688
+ Time
1689
+ 150
1690
+ 100
1691
+ . Waiting
1692
+ 100
1693
+ 80
1694
+ Avg.
1695
+ 60
1696
+ 50
1697
+ 40
1698
+ 0
1699
+ 40%
1700
+ %06
1701
+ 40%
1702
+ 50%
1703
+ %09
1704
+ 80%
1705
+ RV:
1706
+ RV:
1707
+ RV:Figure S11: An illustration of the occupancy map along the moving direction W-L. The inner
1708
+ lanes are divided into 10 segments. Each segment has an associated binary label representing
1709
+ ‘free’ (green dot) or ‘occupied’ (red dot).
1710
+ Num. of Conflict Decisions
1711
+ Conflict Rate (%)
1712
+ Step (s)
1713
+ Epoch
1714
+ Figure S12: LEFT: The number of conflict decisions decreases as the learning progresses. For
1715
+ all three RV penetration rates, the trend stabilizes at a low level. RIGHT: The conflict rate (=
1716
+ num. of conflict decisions / num. of RVs within the control zone) is low for any RV penetration
1717
+ rate—around 5% for 60% RVs and 80% RVs, and close to 0 for 100% RVs.
1718
+ 43
1719
+
1720
+ 0
1721
+ DRV: 60%
1722
+ 8
1723
+ RV:80%
1724
+ RV: 100%
1725
+ 6
1726
+ 4
1727
+ 0
1728
+ 100
1729
+ 200
1730
+ 300
1731
+ 400
1732
+ 500RV: 60%
1733
+ 6000
1734
+ RV:80%
1735
+ RV: 100%
1736
+ 4000
1737
+ 2000
1738
+ 0
1739
+ 10
1740
+ 20
1741
+ 30
1742
+ 40
6tE4T4oBgHgl3EQf1w38/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
79E2T4oBgHgl3EQfPgaW/content/tmp_files/2301.03760v1.pdf.txt ADDED
@@ -0,0 +1,1920 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 1
2
+ Over-The-Air Adversarial Attacks on Deep
3
+ Learning Wi-Fi Fingerprinting
4
+ Fei Xiao, Yong Huang, Member, IEEE, Yingying Zuo, Wei Kuang, Wei Wang, Senior Member, IEEE
5
+ Abstract—Empowered by deep neural networks (DNNs), Wi-
6
+ Fi fingerprinting has recently achieved astonishing localization
7
+ performance to facilitate many security-critical applications in
8
+ wireless networks, but it is inevitably exposed to adversarial
9
+ attacks, where subtle perturbations can mislead DNNs to wrong
10
+ predictions. Such vulnerability provides new security breaches to
11
+ malicious devices for hampering wireless network security, such
12
+ as malfunctioning geofencing or asset management. The prior
13
+ adversarial attack on localization DNNs uses additive perturba-
14
+ tions on channel state information (CSI) measurements, which is
15
+ impractical in Wi-Fi transmissions. To transcend this limitation,
16
+ this paper presents FooLoc, which fools Wi-Fi CSI fingerprinting
17
+ DNNs over the realistic wireless channel between the attacker and
18
+ the victim access point (AP). We observe that though uplink CSIs
19
+ are unknown to the attacker, the accessible downlink CSIs could
20
+ be their reasonable substitutes at the same spot. We thoroughly
21
+ investigate the multiplicative and repetitive properties of over-the-
22
+ air perturbations and devise an efficient optimization problem to
23
+ generate imperceptible yet robust adversarial perturbations. We
24
+ implement FooLoc using commercial Wi-Fi APs and Wireless
25
+ Open-Access Research Platform (WARP) v3 boards in offline
26
+ and online experiments, respectively. The experimental results
27
+ show that FooLoc achieves overall attack success rates of about
28
+ 70% in targeted attacks and of above 90% in untargeted attacks
29
+ with small perturbation-to-signal ratios of about -18 dB.
30
+ Index Terms—Adversarial attack, indoor localization, deep
31
+ learning
32
+ I. Introduction
33
+ In wireless networks, accurate device location information
34
+ is increasingly desired to support many security-critical appli-
35
+ cations, such as device authentication and access control [1],
36
+ [2]. To achieve this, Wi-Fi fingerprint based indoor localization
37
+ recently has gained astonishing performance via benefiting
38
+ from the advances in deep neural networks (DNNs) [3], [4],
39
+ [5], [6], which, however, are shown to be susceptible to
40
+ adversarial attacks [7], [8], [9]. In such attacks, minimal
41
+ perturbations on genuine input samples can steer DNNs
42
+ catastrophically away from true predictions. By exploiting
43
+ these vulnerabilities, malicious devices have the potential to
44
+ manipulate their localization results and cause the breakdown
45
+ of wireless geofencing [10], [11], asset management, and so
46
+ on. Thus, it is of great importance to investigate the extent
47
+ This work was supported by the Henan Province Key R&D Program with
48
+ Grant 221111210400. (Corresponding author: Yong Huang.)
49
+ Y. Huang is with the School of Cyber Science and Engineering, Zhengzhou
50
+ University, Zhengzhou 450002, China (e-mail:[email protected]).
51
+ F. Xiao is with Business School, Hubei University and School of Manage-
52
+ ment, Huazhong University of Science and Technology, Wuhan 430074, China
53
+ (e-mail: [email protected]).
54
+ Y. Zuo, W. Kuang and W. Wang are with the School of Electronic Information
55
+ and Communications, Huazhong University of Science and Technology, Wuhan
56
+ 430074, China (e-mail:{yingyingzuo, kuangwei, weiwangw}@hust.edu.cn).
57
+ to which DNN powered indoor localization is vulnerable to
58
+ adversarial attacks in the real world.
59
+ Despite the great importance, no existing study explores
60
+ over-the-air adversarial attacks on indoor localization DNNs in
61
+ the physical world. The prior work [12] investigates adversarial
62
+ attacks on indoor localization DNNs and simply adds perturba-
63
+ tion signals to original signals likewise generating adversarial
64
+ images in the computer vision domain. However, additive
65
+ perturbations can not characterize the impact of Wi-Fi training
66
+ signals on CSI measurements, thus rendering them infeasible
67
+ in over-the-air attacks. Moreover, these approaches [13], [14]
68
+ trigger attacks by directly converting genuine CSI fingerprints
69
+ into targeted ones, which are suitable for attacking single-
70
+ antenna APs. Yet, they are physically unrealizable in widely-
71
+ used multi-antenna Wi-Fi systems due to the one-to-many
72
+ relationship between transmitting and receiving signals. In
73
+ addition, this study [15] proposes a CSI randomization approach
74
+ to distort device location information. Though this approach
75
+ can trigger untargeted adversarial attacks, it lacks the capability
76
+ of misleading location predictions close to chosen spots, i.e.,
77
+ targeted attacks. In addition, the random perturbations are not
78
+ smooth and will cause significant disturbance in the original
79
+ signals, rendering them easy to be detected. Thus, no existing
80
+ work is suitable for launching adversarial attacks on Wi-Fi
81
+ fingerprinting DNNs in the real world.
82
+ In this paper, we investigate a new type of adversarial attack
83
+ that deceives indoor localization DNNs over realistic wireless
84
+ channels. In particular, our attack model includes a Wi-Fi AP
85
+ and an attacker. The AP holds a well-trained DNN for indoor
86
+ localization using uplink CSI signatures as inputs. The attacker,
87
+ i.e., a malicious client device, manipulates its Wi-Fi training
88
+ signals and transmits them to the AP over the air, with the
89
+ purpose of fooling the localization DNN. In this way, the
90
+ AP receives the falsified signals from the attacker, generates
91
+ perturbed uplink CSI signatures, and feeds them into the DNN
92
+ for device localization. As demonstrated in Fig. 1, over-the-air
93
+ attacks can rise severe security issues in wireless networks. An
94
+ outside attacker can be empowered to break the geofencing of a
95
+ Wi-Fi AP by camouflaging itself within authorized areas to gain
96
+ wireless connectivity. Moreover, an attacker can bypass Sybil
97
+ attack detection to deplete valuable bandwidth by pretending
98
+ multiple fake clients at the same location [16], [17].
99
+ We argue that the major obstacle to realizing such over-the-
100
+ air adversarial attacks is that the uplink CSI estimated at the
101
+ victim AP is unknown to the attacker and thus effective channel
102
+ perturbations cannot be generated before each attack. To tackle
103
+ this problem, we observe that the similarity between uplink
104
+ and downlink CSIs can be exploited for launching adversarial
105
+ arXiv:2301.03760v1 [cs.CR] 10 Jan 2023
106
+
107
+ 2
108
+ Breaking geofencing
109
+ Bypassing Sybil attack detection
110
+ AP
111
+ Attacker
112
+ Authorized
113
+ area
114
+ Loc.
115
+ DNN
116
+ Attacker
117
+ Fake client
118
+ AP
119
+ Loc.
120
+ DNN
121
+ Fig. 1. Attack cases with over-the-air adversarial attacks.
122
+ attacks over the air. In Wi-Fi networks, downlink CSIs can be
123
+ easily obtained from the AP’s broadcasting packets, such as
124
+ beacon frames. When one attacker stays at one spot, its uplink
125
+ and downlink transmissions would experience similar multipath
126
+ propagations and thus have similar CSI fingerprints [18]. Hence,
127
+ the attacker can take benefits of accessible and informative
128
+ downlink CSIs to generate adversarial perturbations locally
129
+ without knowing the exact uplink CSIs that are fed into
130
+ localization DNNs by the AP.
131
+ Toward this end, we present FooLoc, a novel system that
132
+ fools localization DNNs by launching over-the-air adversar-
133
+ ial attacks. Specifically, before each attack, FooLoc takes
134
+ obtainable downlink CSIs as a reasonable substitute of the
135
+ corresponding uplink ones and trains an adversarial perturbation
136
+ locally. Then, it applies the well-trained perturbation on its
137
+ own transmitted signals for manipulating the corresponding
138
+ uplink CSI signatures received by the AP. In this way, FooLoc
139
+ is capable of deceiving the localization DNN to output desired
140
+ yet wrong location estimates over real wireless channels.
141
+ To realize the above idea, we address the following two
142
+ challenges.
143
+ 1) How to design realizable adversarial perturbations
144
+ that are suitable for Wi-Fi transmissions? Most adversarial
145
+ attacks are based on additive perturbations and require the
146
+ ability to individually alter each element of an input sample,
147
+ which, however, is physically unrealizable for over-the-air
148
+ perturbations. Specifically, in Wi-Fi communications, a physical
149
+ layer training symbol has a multiplicative relationship with a
150
+ channel response in the frequency domain [18], thus rendering
151
+ additive perturbations on Wi-Fi CSIs infeasible. Moreover, for
152
+ a multi-antenna receiver, one training symbol of each subcarrier
153
+ corresponds to multiple received symbols during channel
154
+ estimation, implying a one-to-many relationship between the
155
+ elements of one perturbation and one CSI measurement. Based
156
+ on the discovered multiplicative and repetitive properties, we
157
+ formulate the novel over-the-air perturbations on uplink CSIs
158
+ and further derive the adversarial perturbations for targeted
159
+ and untargeted attacks on indoor localization DNNs.
160
+ 2) How to efficiently craft imperceptible yet robust adver-
161
+ sarial perturbations under environmental noise? Due to the
162
+ random nature of environmental noise, two CSI measurements
163
+ from the same spot are unlikely to be exactly the same.
164
+ Consequently, one perturbation that is generated for one
165
+ specific CSI may not generalize well to another one. To
166
+ tackle this challenge, we propose a generalized objective
167
+ function integrating both targeted and untargeted attacks and
168
+ reasonably formulate the adversarial perturbation generation as
169
+ a box-constrained optimization problem. In this optimization
170
+ problem, we ensure the robustness of adversarial perturbations
171
+ by seeking a universal perturbation that works well on all
172
+ CSI measurements from the same spot and guarantee their
173
+ imperceptibility by maximizing the perturbation smoothness
174
+ and limiting the perturbation strength at the same time.
175
+ Moreover, to ease the difficulty of problem optimization, we
176
+ further transform the constrained problem into an equivalent
177
+ unconstrained one.
178
+ Summary of Results. We implement FooLoc using com-
179
+ mercial Wi-Fi APs for offline experiments and Wireless
180
+ Open-Access Research Platform (WARP) v3 boards [19] for
181
+ online experiments. In offline experiments, FooLoc obtains
182
+ attack success rates (ASRs) of 73.0% and 93.4% for targeted
183
+ and untargeted attacks, respectively, on average. In online
184
+ experiments, FooLoc achieves mean ASRs of 71.6% and 99.5%
185
+ for targeted and untargeted attacks, respectively. Moreover,
186
+ FooLoc has small perturbation-to-signal ratios (PSRs) of about
187
+ -18 dB in two settings.
188
+ Contributions. The main contributions of this work are
189
+ summarized as follows.
190
+ • We propose FooLoc, which exploits the similarity be-
191
+ tween uplink and downlink CSIs to launch over-the-air
192
+ adversarial attacks on Wi-Fi localization DNNs.
193
+ • We discover the multiplicative and repetitive impacts of
194
+ over-the-air perturbations on CSI fingerprints in Wi-Fi
195
+ localization systems.
196
+ • We propose an efficient algorithm to generate impercepti-
197
+ ble and robust adversarial perturbations against localization
198
+ DNNs over realistic Wi-Fi channels.
199
+ • We implement FooLoc on both commercial Wi-Fi APs and
200
+ WARP wireless platforms, respectively, to demonstrate its
201
+ effectiveness in different environments.
202
+ II. Attack Model and Wi-Fi CSI Signatures
203
+ A. Adversarial Attacks on Indoor Localization
204
+ In this paper, we consider a general Wi-Fi network, where
205
+ one fixed AP with multiple antennas provides wireless connec-
206
+ tivity for many single-antenna clients, such as smartphones and
207
+ vacuum robots. The AP has the capability of device localization
208
+ for delivering location based services, such as user monitoring
209
+ and access control. Moreover, we focus on deep learning (DL)
210
+ based indoor localization systems, which exploit accessible and
211
+ fine-grained Wi-Fi CSIs as location fingerprints. Considering
212
+ the randomness of CSI phases, most fingerprinting systems
213
+ rely on CSI amplitudes [3], [20]. Hence, such DL models are
214
+ assumed to accept CSI amplitudes as input features and output
215
+ 2D continuous-valued location estimations.
216
+ To fool such localization systems in reality, we consider the
217
+ over-the-air adversarial attacks by exploiting the vulnerabilities
218
+ of DNNs [7]. In this scenario, a malicious attacker, as a client
219
+ device, can not directly manipulate the input values of DL
220
+ models used by the AP. Instead, it can attack a DL model
221
+ only via modifying its own transmitted Wi-Fi signals. In this
222
+ paper, we mainly consider white-box DL models, of which
223
+ the attacker knows their exact structures as well as trained
224
+ parameters. For black-box models that are unknown to the
225
+
226
+ 3
227
+ 0
228
+ 20
229
+ 40
230
+ # of subcarriers
231
+ 50
232
+ 100
233
+ 150
234
+ 200
235
+ Uplink
236
+ CSI
237
+ 1st spot
238
+ 0
239
+ 20
240
+ 40
241
+ # of subcarriers
242
+ 50
243
+ 100
244
+ 150
245
+ 200
246
+ Downlink
247
+ CSI
248
+ 0
249
+ 20
250
+ 40
251
+ # of subcarriers
252
+ 100
253
+ 200
254
+ 300
255
+ 2nd spot
256
+ 0
257
+ 20
258
+ 40
259
+ # of subcarriers
260
+ 100
261
+ 200
262
+ 300
263
+ 0
264
+ 20
265
+ 40
266
+ # of subcarriers
267
+ 0
268
+ 50
269
+ 100
270
+ 150
271
+ 3rd spot
272
+ 0
273
+ 20
274
+ 40
275
+ # of subcarriers
276
+ 0
277
+ 50
278
+ 100
279
+ 150
280
+ Fig. 2. Uplink and downlink CSI measurements at different spots. The distances
281
+ of 1st spot to 2nd and 3rd spots are 0.3 m and 1.2 m, respectively.
282
+ attacker, we will discuss the feasibility of triggering adversarial
283
+ attacks on them in our offline experiment. Furthermore, the
284
+ attacker has no access to uplink CSI measurements that are used
285
+ for model training and testing. Yet, it has the ability to move
286
+ in the targeted area and collects corresponding downlink CSI
287
+ measurements. For example, the attacker could be a vacuum
288
+ robot, which moves between different spots to automatically
289
+ collect Wi-Fi CSI fingerprints [21], [22].
290
+ In addition, we assume that the attacker knows its own
291
+ location information when launching adversarial attacks for
292
+ misleading location based services provided by the AP. More-
293
+ over, we consider targeted and untargeted adversarial attacks
294
+ on localization DNNs. Specifically, in targeted attacks, the
295
+ attacker aims to force the localization model to output a location
296
+ estimate that is as close as possible to a chosen spot. When
297
+ comes to untargeted attacks, it only wants to be localized far
298
+ away from its true location.
299
+ Such over-the-air adversarial attacks can be exploited to
300
+ deceive localization DNNs [3], [20] for hampering security of
301
+ wireless networks. The example attack scenarios include 1)
302
+ breaking geofencing: a Wi-Fi AP holds a device localization
303
+ model and provides wireless connectivity only to clients that
304
+ are within a certain area. In this scenario, an attacker stays
305
+ outside of the area and can trigger over-the-air adversarial
306
+ attacks to camouflage itself inside authorized areas for gaining
307
+ wireless connectivity; 2) bypassing Sybil attacker detection: a
308
+ Wi-Fi AP uses a localization model to detect potential Sybil
309
+ attackers based on their locations. Using over-the-air adversarial
310
+ attacks, an attacker can masquerade many fictitious clients that
311
+ are seemingly from different locations to deplete valuable
312
+ bandwidth at a low cost.
313
+ B. Wi-Fi CSI Fingerprints
314
+ Basically, channel state information characterizes the signal
315
+ propagation among a pair of Wi-Fi transceivers in a certain
316
+ environment. The IEEE 802.11n/ac/ax Wi-Fi protocols divide
317
+ a Wi-Fi channel into K orthogonal subcarriers and assign K
318
+ pre-defined long training field signals (LTFs) for them. For
319
+ the k-th subcarrier, the transmitter sends a training signal sk,
320
+ and accordingly the receiver obtains a signal yk. With the
321
+ knowledge of sk, the receiver can estimate the current channel
322
+ response hk between them as
323
+ hk = yk/sk.
324
+ (1)
325
+ Due to multipath effects, each channel response hk can be
326
+ further modeled as the composition of one direct path and
327
+ multiple reflected ones [18], which can be formulated as
328
+ hk = α0e j2πτ0 fk +
329
+
330
+ l
331
+ αle j2πτl fk + nk,
332
+ (2)
333
+ where nk is the complex Gaussian noise. Moreover, α0 and
334
+ τ0 represent the signal propagation attenuation and time delay
335
+ of the direct path, respectively, and αl and τl are those of
336
+ the l-th reflected path. From the above equation, we can
337
+ see that Wi-Fi CSI measurements are highly dependent on
338
+ transceiver locations as well as environmental reflectors. For
339
+ a fixed-position AP-client pair, uplink and downlink signals
340
+ would travel through the alike line-of-sight distances as well
341
+ as similar incident-reflecting paths. The above geometric
342
+ properties together contribute to nearly-identical path loss and
343
+ time delay, thus resulting in similar channel responses. Such
344
+ similarity enables an adversary to replace unknown uplink
345
+ CSIs with the corresponding downlink ones for generating
346
+ adversarial perturbations.
347
+ We conduct some preliminary studies to verify the similarity
348
+ between paired uplink and downlink CSI fingerprints. To
349
+ do this, we use two off-the-shelf Wi-Fi APs with Atheros
350
+ CSI Tool [23] to record CSI measurements of 56 subcarriers.
351
+ In our experiments, we fix one AP at a certain location
352
+ and place the other at three different spots. As plotted in
353
+ Fig. 2, we can observe that similar change patterns are shared
354
+ in uplink and downlink measurements corresponding to the
355
+ same spot. This is because when the locations of two APs
356
+ are fixed, the uplink and downlink signals would experience
357
+ similar multipath propagations as indicated in Eq. (2). It is
358
+ worth noting that the occurrence of multiple clusters of CSI
359
+ measurements in each subfigure is caused by automatic gain
360
+ control on the receiver side for maintaining a suitable power
361
+ level. In addition, it also can be found that the similarity
362
+ in CSI measurements increases as the distance between two
363
+ spots decreases. The above observations verify that uplink and
364
+ downlink CSI measurements are highly similar, providing an
365
+ exciting opportunity to launch over-the-air adversarial attacks
366
+ on DL indoor localization systems.
367
+ III. Over-The-Air Adversarial Attacks
368
+ A. Overview of FooLoc
369
+ FooLoc is a novel system that fools Wi-Fi CSI fingerprinting
370
+ localization DNNs via launching over-the-air adversarial attacks.
371
+ As depicted in Fig. 3, FooLoc runs on the attacker and helps it to
372
+ spoof the localization DNN used by the AP. Specifically, before
373
+ each attack, the attacker first stays at one spot and receives
374
+ downlink packets, such as beacon and acknowledgment (ACK)
375
+ frames [24], from the targeted AP. Then, FooLoc generates a
376
+ set of well-crafted adversarial weights based on its knowledge
377
+ of the victim model. After that, it multiplies the adversarial
378
+ weights with genuine LTFs and sends their product results to
379
+ the AP over the air. Once receiving these signals, the AP feeds
380
+
381
+ 4
382
+ Over-The-Air
383
+ Perturbation Design
384
+ Adversarial Weight
385
+ Optimization
386
+ Attacker
387
+ DL-Based
388
+ Localization
389
+ AP
390
+ Perturbed
391
+ LTFs
392
+ Downlink
393
+ CSI
394
+ Time
395
+ Perturbed
396
+ CSI
397
+ Loc.
398
+ DNN
399
+ FooLoc
400
+ Adversarial
401
+ weights
402
+ LTFs
403
+ Collecting
404
+ CSIs
405
+ Generating
406
+ Perturbations
407
+ Launching
408
+ Attacks
409
+ Fig. 3. Workflow of FooLoc for launching over-the-air adversarial attacks on
410
+ DL indoor localization.
411
+ the perturbed CSI signatures to its DL localization model,
412
+ which will consequently output a wrong estimation that is
413
+ desired by the attacker. The main advantages of FooLoc are
414
+ that it has small perturbations with respect to original signals
415
+ and remains unharmful to message demodulation at the AP.
416
+ As shown in Fig. 3, the core components of FooLoc include
417
+ Over-The-Air Perturbation Design and Adversarial Weight
418
+ Optimization.
419
+ • Over-The-Air Perturbation Design. First, we investigate
420
+ the multiplicative and repetitive properties of over-the-air
421
+ perturbations and formalize their impacts on uplink CSI
422
+ measurements. Then, we define the notions of adversarial
423
+ examples as well as targeted and untargeted adversarial
424
+ attacks on wireless localization. Additionally, we prove
425
+ that our adversarial perturbation remains unharmful to the
426
+ payload decoding at the AP.
427
+ • Adversarial Weight Optimization. First, we detail our
428
+ attack strategy and propose a generalized objective func-
429
+ tion that integrates both targeted and untargeted attacks.
430
+ Then, adversarial attacks on DL localization models are
431
+ formulated as a box-constrained problem that minimizes
432
+ the objective function while satisfying the constraints of
433
+ robustness, imperceptibility as well as efficiency. Moreover,
434
+ we carefully transform the above constrained problem into
435
+ an equivalent unconstrained one for easing the difficulty
436
+ of problem optimization.
437
+ B. Over-The-Air Perturbation Design
438
+ In this subsection, we first investigate the unique multiplica-
439
+ tive and repetitive properties of over-the-air perturbations and
440
+ define adversarial examples in indoor localization.
441
+ Multiplicative Property. Most of the prior studies on
442
+ wireless adversarial attacks synthesize an adversarial example
443
+ xad for each genuine sample x using an additive perturbation r
444
+ likewise generating adversarial images in the computer vision
445
+ domain as xad = x+r. However, it is inapplicable for performing
446
+ over-the-air attacks in real-world wireless channels. In over-
447
+ the-air attacks, the attacker can change model inputs only via
448
+ multiplicative perturbations. The reason stems from the fact
449
+ that a received signal is the product of a channel response and
450
+ a transmitted signal in the frequency domain [18]. Hence, one
451
+ uplink CSI measurement has a proportional relationship with
452
+ the perturbed training signals as indicated in Eq. (1).
453
+
454
+
455
+ Channel
456
+ estimation
457
+
458
+
459
+
460
+
461
+ FooLoc
462
+ Attacker
463
+ AP
464
+ Wireless channel
465
+
466
+
467
+
468
+
469
+
470
+
471
+
472
+
473
+
474
+
475
+
476
+ f
477
+
478
+
479
+
480
+ Training
481
+ symbols
482
+ Perturbation
483
+ Perturbed
484
+ CSI
485
+ Fig. 4. Illustration of over-the-air adversarial perturbations from the attacker
486
+ to the victim AP.
487
+ Specifically, as depicted in Fig. 4, when attempting to
488
+ launch over-the-air attacks, FooLoc first generates a real-valued
489
+ multiplicative perturbation set γ = [γ1, · · · , γk, · · · , γK] ∈ R1×K
490
+ for its K-element training sequence s = [s1, · · · , sk, · · · , sK] ∈
491
+ C1×K, which is known by the AP. Then, the scaled sequence
492
+ st ∈ C1×K can be obtained as
493
+ st = γ ⊙ s = [γ1s1, · · · , γksk, · · · , γKsK],
494
+ (3)
495
+ where ⊙ is the Hadamard product for element-wise production.
496
+ Then, FooLoc transmits st to the victim AP over realistic
497
+ wireless channels. When hearing the signal, the AP with N
498
+ antennas receives a measurement ˆY ∈ CN×K and estimates their
499
+ uplink channel ˆH ∈ CN×K using Eq. (1). Therein, each entry of
500
+ ˆH can be denoted as ˆhn,k, representing the perturbed channel
501
+ response between the client and the n-th AP antenna at the k-th
502
+ subcarrier. Let us assume that the corresponding true channel
503
+ estimation is H ∈ CN×K with each entry denoted as hn,k. From
504
+ Eq. (1), we can have
505
+ ˆhn,k = ˆyn,k
506
+ sk
507
+ = hn,kγksk
508
+ sk
509
+ = γkhn,k.
510
+ (4)
511
+ According to Eq. (4), we can see that hn,k, as the original
512
+ channel response, is proportionally perturbed by γk, suggesting
513
+ that over-the-air perturbations have a multiplicative effect on
514
+ uplink CSI measurements.
515
+ Using such multiplicative weights, FooLoc can easily manip-
516
+ ulate uplink CSI measurements through the standard channel
517
+ estimation process as depicted in Fig. 4, which lays the
518
+ foundation for further over-the-air attacks.
519
+ Repetitive Property. Given the multiplicative perturbation,
520
+ we proceed to investigate the unique pattern of our perturbation
521
+ weights received by the AP. Existing studies on adversarial
522
+ attacks create different perturbation weights for different input
523
+ elements. Yet, this is not the case for adversarial attacks over
524
+ wireless channels.
525
+ As illustrated in Fig. 4, the uplink transmission from
526
+ the attacker to the AP can be modeled as a single-input-
527
+ multiple-output (SIMO) channel, which suggests a one-to-many
528
+ relationship between the elements of one perturbation γ and
529
+ the perturbed CSI measurement ˆH. Mathematically, given the
530
+ perturbation weight γk, the k-th column of ˆH represents all
531
+ estimated channel responses for the k-th subcarrier and can be
532
+
533
+ 5
534
+ further written as
535
+ ˆhk =
536
+ �������������
537
+ ˆh1,k
538
+ ...
539
+ ˆhN,k
540
+ �������������
541
+ =
542
+ ������������
543
+ γkh1,k
544
+ ...
545
+ γkhN,k
546
+ ������������
547
+ = γk
548
+ ������������
549
+ h1,k
550
+ ...
551
+ hN,k
552
+ ������������
553
+ .
554
+ (5)
555
+ The above equation shows that all receiving antennas share
556
+ the same perturbation weight with respect to each subcarrier.
557
+ Hence, the overall received perturbation weights Γ ∈ RN×K on
558
+ ˆH have a repetitive pattern as
559
+ Γ = JN×1 ⊗ γ =
560
+ ������������
561
+ γ1
562
+ · · ·
563
+ γK
564
+ ...
565
+ ...
566
+ ...
567
+ γ1
568
+ · · ·
569
+ γK
570
+ ������������
571
+ ,
572
+ (6)
573
+ where JN×1 is the all-ones matrix with a size of N × 1 and ⊗
574
+ denotes the Kronecker product that helps γ expanding in the
575
+ vertical dimension in Eq. (6).
576
+ With the observations of multiplicative weights and repetitive
577
+ patterns, we can finally formulate the impact of FooLoc’s
578
+ perturbations on uplink CSIs as
579
+ ˆH = JN×1 ⊗ γ ⊙ H.
580
+ (7)
581
+ Adversarial Perturbations. Next, we define the notion of
582
+ over-the-air adversarial examples in the context of indoor
583
+ localization. Let P ∈ R2 be the 2D area, where the AP
584
+ provides wireless connectivity. We denote fθ(·) : X → P as the
585
+ localization DNN used by the AP, where θ stands for the already
586
+ trained parameters using uplink CSI fingerprints Xu
587
+ A that are
588
+ collected at a set of reference spots A ⊂ P. Therein, each input
589
+ sample Xu ∈ RN×K represents the amplitudes of one uplink CSI.
590
+ Moreover, we assume that our attacker locates at a location
591
+ p ∈ P, i.e., the genuine spot, and manipulates its uplink channel
592
+ using a perturbation γp. Considering that amplitude features
593
+ are essentially the absolute values of complex-valued channel
594
+ responses, the real-valued perturbation weights in Eq. (7) will
595
+ have the same linear scaling effect on corresponding CSI
596
+ amplitudes. Using this property, we can derive our adversarial
597
+ example ˆXu
598
+ p as
599
+ ˆXu
600
+ p = JN×1 ⊗ γp ⊙ Xu
601
+ p,
602
+ (8)
603
+ where Xu
604
+ p represents the true uplink CSI amplitudes.
605
+ Based on the above notion of adversarial examples, we
606
+ further define the adversarial perturbations for targeted and
607
+ untargeted attacks, respectively, on indoor localization DNNs.
608
+ In the targeted case, one successful perturbation γp would
609
+ mislead a location estimate fθ( ˆXu
610
+ p) to a targeted spot q ∈ P as
611
+ close as possible, where q � p. That is, we seek a perturbation
612
+ γp such that
613
+ D
614
+
615
+
616
+
617
+ JN×1 ⊗ γp ⊙ Xu
618
+ p
619
+
620
+ , q
621
+
622
+ ≤ dmax,
623
+ (9)
624
+ where D(·, ·) is the euclidean distance and dmax represents the
625
+ acceptable maximal distance error. Whereas, in the untargeted
626
+ case, one adversarial perturbation γp would make fθ( ˆXu
627
+ p) away
628
+ from the genuine location p as far as possible. Similarly,
629
+ given the acceptable minimal distance error dmin, we expect a
630
+ perturbation γp satisfying
631
+ D
632
+
633
+
634
+
635
+ JN×1 ⊗ γp ⊙ Xu
636
+ p
637
+
638
+ , p
639
+
640
+ ≥ dmin.
641
+ (10)
642
+ We will specify the configurations of two acceptable distance er-
643
+ rors dmin and dmax and verify the validity of such configurations
644
+ in our experiments.
645
+ Impact on Message Demodulation. One of the major
646
+ benefits of our multiplicative perturbation γ defined in Eq. (7)
647
+ is that it has no impact on message demodulation at the AP.
648
+ Specifically, in each packet transmission, FooLoc not only
649
+ applies the multiplicative perturbations on pre-defined LTF
650
+ symbols s, but also uses them accordingly on the subsequent
651
+ payload signal u = [u1, · · · , uk, · · · , uK] ∈ C1×K. After that, the
652
+ perturbed payload will go through the same real channel as
653
+ the perturbed training sequence. In this way, although the AP
654
+ obtains a fake CSI response, the original message is perturbed
655
+ in the same way. Thus, based on the perturbed response
656
+ ˆhn,k in Eq. (4), the payload signal uk still can be correctly
657
+ decoded from the received signals hn,kγkuk. This process can
658
+ be mathematically expressed as
659
+ hn,kγkuk
660
+ ˆhn,k
661
+ = hn,kγkuk
662
+ γkhn,k
663
+ = uk.
664
+ (11)
665
+ Hence, our adversarial perturbations remain unharmful to the
666
+ message transmission from the attacker to the AP. The only
667
+ impact of such perturbations is that the AP feeds falsified CSIs
668
+ to its localization DNN.
669
+ C. Adversarial Weight Optimization
670
+ In this subsection, we first detail our attack strategy
671
+ and formulate adversarial perturbation generation as a box-
672
+ constrained optimization problem. Then, we transform it into
673
+ an unconstrained one.
674
+ Attack Strategy. Since uplink CSI measurements are un-
675
+ known to the attacker, one possible attack strategy is to
676
+ blindly manipulate its LTF symbols in a brute-force manner.
677
+ However, such an approach is prohibitively inefficient and
678
+ time-consuming. Instead of blindly searching, FooLoc exploits
679
+ the accessible and informative downlink CSI measurements,
680
+ which can be easily obtained from the AP’s beacon or ACK
681
+ packets in Wi-Fi networks [24]. Concretely, when our attacker
682
+ stays at the genuine spot p, it first collects some downlink
683
+ CSI measurements and obtains a set of amplitude features
684
+ Xd
685
+ p, where Xd
686
+ p ∈ RN×K. Then, FooLoc simulates the over-
687
+ the-air attacks using Eq. (8) and optimizes the perturbation
688
+ weights based on Xd
689
+ p. After that, it multiplies the optimized
690
+ weights γp with the pre-defined training sequence s and sends
691
+ their product results to the AP for attacking its localization
692
+ model fθ(·). Because uplink and downlink channel responses
693
+ are similar as aforementioned, the perturbation weights learned
694
+ from downlink CSI measurements are expected to generalize
695
+ well to uplink ones.
696
+ Problem Formulation. With the above attack strategy,
697
+ we first integrate both targeted attacks (9) and untargeted
698
+ attacks (10) in wireless localization into one objective function
699
+ J
700
+
701
+ γp, fθ
702
+
703
+ as
704
+ J
705
+
706
+ γp, fθ
707
+
708
+ ≜(1 − ω)EXdp
709
+
710
+ D
711
+
712
+
713
+
714
+ JN×1 ⊗ γp ⊙ Xd
715
+ p
716
+
717
+ , q
718
+
719
+ − dmax
720
+ �+
721
+ + ωEXdp
722
+
723
+ dmin − D
724
+
725
+
726
+
727
+ JN×1 ⊗ γp ⊙ Xd
728
+ p
729
+
730
+ , p
731
+ ��+ .
732
+ (12)
733
+
734
+ 6
735
+ Feature space
736
+ Euclidean space
737
+ F
738
+ Focused
739
+ H
740
+ C
741
+ B
742
+ I
743
+ A
744
+ G
745
+ D
746
+ J
747
+ E
748
+ F
749
+ H
750
+ C
751
+ B
752
+ I
753
+ A
754
+ G
755
+ D
756
+ J
757
+ E
758
+ Attention
759
+ Mapping
760
+ Fig. 5. Illustration of FooLoc’s attention scheme for targeted attacks during
761
+ perturbation optimization.
762
+ Therein, ω indicates the attack type and takes values in the set
763
+ {0, 1}, where ω = 0 stands for targeted attacks and ω = 1 is for
764
+ untargeted attacks. EXdp[·] is the expectation over the dataset
765
+ Xd
766
+ p and [a]+ = max(a, 0) denotes the positive part of a.
767
+ Using this objective function, we formulate the problem of
768
+ adversarial attacks on the localization model fθ(·) as
769
+ minimize
770
+ γp
771
+ J
772
+
773
+ γp, fθ
774
+
775
+ + β ∥∆γp∥2,
776
+ (13)
777
+ subject to ∥γp − J1×K∥∞ < δmax < 1.
778
+ (14)
779
+ Therein, ∆γp =
780
+
781
+ γp,i − γp,i−1
782
+
783
+ i=2,··· ,K is the difference vector
784
+ of γp and β denotes a hyperparameter. In addition, ∥a∥2 is
785
+ the l2 norm and ∥a∥∞ = max (|a1|, · · · , |an|) is the l∞ norm. In
786
+ the following, we explain the design rationale of the above
787
+ box-constrained problem.
788
+ Robustness. When ω = 0 in the objective function J (·), we
789
+ minimize the average error between the distance D
790
+
791
+ fθ( ˆXd
792
+ p), q
793
+
794
+ and the threshold dmax over the entire downlink CSI dataset
795
+ Xd
796
+ p. This is because due to the random nature of environmental
797
+ noise in Wi-Fi CSI signatures, two CSI instances from one
798
+ spot are unlikely to be exactly the same. As a consequence, the
799
+ perturbation that is crafted for a specific CSI sample may have
800
+ little effect on another one with a high probability. To boost
801
+ the robustness of our adversarial perturbations, FooLoc seeks
802
+ a universal perturbation that causes all the samples in Xd
803
+ p to
804
+ be estimated at a neighboring area of the targeted location q.
805
+ The same reason holds for the untargeted attacks when ω = 1.
806
+ Imperceptibility. The second term in Eq. (13) and the
807
+ constraint in Eq. (14) together guarantee the imperceptibility
808
+ of our adversarial perturbations. Specifically, ∥∆γp∥2 quantifies
809
+ the smoothness of one perturbation γp by measuring the
810
+ difference between its consecutive weights. The smaller the
811
+ difference, the smoother the perturbation. In the extreme case
812
+ ∥∆γp∥2 = 0, γp shall be a constant. In this condition, γp
813
+ has the same linear scaling effect on each element of one
814
+ CSI measurement and can not manipulate its changing trends.
815
+ Moreover, the constraint (14) limits the perturbation strength
816
+ and makes sure that FooLoc always searches a perturbation
817
+ γp within the l∞ norm ball with a radius δmax centering at
818
+ J1×K during optimization process. The choose of l∞ norm in
819
+ Eq. (14) makes each adversarial weight γp,k in γp satisfying
820
+ 1−δmax < γp,k < 1+δmax. The above two designs can guarantee
821
+ a minimally-perturbed signal ˆXd
822
+ p that is seemingly alike to the
823
+ original signal Xd
824
+ p when received by the AP.
825
+ Efficiency. At each optimization step, not all samples are
826
+ necessary for updating perturbation weights. Without loss
827
+
828
+
829
+
830
+
831
+
832
+
833
+
834
+ Transformation
835
+ Domain of
836
+ Constrained problem
837
+ Unconstrained problem
838
+ Domain of
839
+
840
+
841
+
842
+
843
+
844
+
845
+
846
+ Fig. 6. Illustration of weight transformation in problem optimization. For
847
+ simplicity, we take one element of γp for illustration.
848
+ of generality, we take ω = 0, i.e., the targeted attacks, for
849
+ explaining. Let Rmax ≜ {X : D ( fθ (X) , q) < dmax} be the set
850
+ of amplitude features, whose location estimates are within
851
+ Bdmax(q) ⊂ P, i.e., the ball with a radius of dmax centering
852
+ at the targeted spot q in the Euclidean space. After some
853
+ optimization steps, a part of perturbed CSI samples may have
854
+ already been mapped in Bdmax(q) by fθ(·), i.e., the green circles
855
+ in the Euclidean space in Fig. 5. In this condition, these samples
856
+ are unnecessary for optimizing new perturbation weights in the
857
+ next step. Based on this observation, we devise an attention
858
+ scheme to enhance the efficiency of our optimization problem.
859
+ In particular, FooLoc uses the operator [·]+ in J
860
+
861
+ γp, fθ
862
+
863
+ to
864
+ discriminate whether location estimates are inside or outside of
865
+ Bdmax(q). Then, it strategically pays attention to outside samples
866
+ and ignores inside ones. This operation will generally decrease
867
+ the number of needed samples at each optimization step and
868
+ thus lead to a lower overall computational overhead.
869
+ Problem Optimization. With the optimization problem (13),
870
+ we proceed to design a dedicated optimization scheme for gen-
871
+ erating our adversarial perturbations. Because our perturbations
872
+ are multiplicative rather than additive, traditional perturbation
873
+ generation algorithms, such as the well-known fast gradient
874
+ sign method (FGSM) [8], are inapplicable for our optimization
875
+ problem. Thus, we need to directly solve the problem (13)
876
+ using other general gradient based optimization methods, such
877
+ as stochastic gradient descent (SGD) and adaptive moment
878
+ estimation (Adam). However, the constraint term (14) restricts
879
+ the domain of the objective function J (·) in the space
880
+ (1 − δmax, 1 + δmax)1×K and makes the optimization problem
881
+ as a box-constrained one, which is not naively supported by
882
+ such gradient-based optimization methods.
883
+ To deal with this issue, we transform the box-constrained
884
+ problem (13) into an equivalent unconstrained one for easing its
885
+ optimization difficulty. To do this, we first make γp satisfying
886
+ the constraint (14) via the transformation as
887
+ γp = tanh (ξ) · δmax + J1×K,
888
+ (15)
889
+ where ξ ∈ R1×K. Moreover, tanh (x) = ex−e−x
890
+ ex+e−x is the hyperbolic
891
+ tangent function with the range (−1, 1). As illustrated in Fig. 6,
892
+ each element γp,k in γp is naturally confined to the interval
893
+ (1 − δmax, 1 + δmax) using the above transformation, which is
894
+ equivalent to the constraint ∥γp − J1×K∥∞ < δmax. Then, we
895
+ substitute γp with Eq. (15) in the original problem (13), which
896
+ will convert the domain of J (·) into the space R1×K. In this way,
897
+
898
+ 7
899
+ Algorithm 1 Over-the-air adversarial attacks on DL localization
900
+ models.
901
+ Input: Downlink CSI samples Xd
902
+ p, the DL localization model
903
+ fθ(·), the genuine and targeted spots {p, q}, the acceptable
904
+ distance errors {dmin, dmax} and the attack type ω
905
+ ξ ← random(1, K) ∈ R1×K ▶ initialization
906
+ for the number of training iterations do
907
+ Sample a mini-batch of training data
908
+
909
+ Xd
910
+ p,i
911
+ �M
912
+ i=1 from Xd
913
+ p
914
+ Generate adversarial examples
915
+ γp ← tanh (ξ) · δmax + J1×K
916
+ ˆXd
917
+ p,i ← JN×1 ⊗ γp ⊙ Xd
918
+ p,i
919
+ Update parameters ξ:
920
+ if ω = 0 then
921
+ ξ ← ξ − η∇ξ
922
+ � �M
923
+ i
924
+
925
+ D
926
+
927
+
928
+ � ˆXd
929
+ p,i
930
+
931
+ ,q
932
+
933
+ −dmax
934
+ �+
935
+ M
936
+ + β ∥∆γp∥2
937
+
938
+ end if
939
+ if ω = 1 then
940
+ ξ ← ξ − η∇ξ
941
+ � �M
942
+ i
943
+
944
+ dmin−D
945
+
946
+
947
+ � ˆXd
948
+ p,i
949
+
950
+ ,p
951
+ ��+
952
+ M
953
+ + β ∥∆γp∥2
954
+
955
+ end if
956
+ end for
957
+ Generate and transmit perturbed LTFs and payload signals
958
+ st ← (tanh (ξ) · δmax + J1×K) ⊙ s
959
+ ut ← (tanh (ξ) · δmax + J1×K) ⊙ u
960
+ we obtain an equivalent unconstrained problem of adversarial
961
+ perturbation generation as
962
+ minimize
963
+ ξ∈R1×K
964
+ J
965
+
966
+ γp, fθ
967
+
968
+ + β∥∆γp∥2,
969
+ (16)
970
+ where γp = tanh (ξ) · δmax + J1×K.
971
+ (17)
972
+ In this condition, we can leverage traditional gradient-based
973
+ methods to solve the optimization problem (16).
974
+ At last, FooLoc can apply the well-trained adversarial
975
+ weights on pre-defined LTF symbols as well as payload signals
976
+ and transmit their product results over wireless channels to fool
977
+ the localization DNN fθ(·) at the AP. The way to launch our
978
+ over-the-air adversarial attacks is summarized in Algorithm 1.
979
+ In our experiments, we empirically set δmax = 0.15 and use
980
+ the SGD optimizer for searching optimal perturbation weights.
981
+ IV. Evaluation
982
+ A. Victim DNNs and Evaluation Metrics
983
+ Victim DNNs. To evaluate FooLoc, we build two victim
984
+ localization models, i.e., DNNA and DNNB, using mainstream
985
+ neural network architectures. In particular, both DNNA and
986
+ DNNB are set as regression models, which take raw multi-
987
+ dimensional CSI samples as inputs and output a continuous-
988
+ valued location estimate. The structures and parameters of two
989
+ DNNs are present in Table I. As the table shows, DNNA is
990
+ a fully connected neural network (FCNN). It first normalizes
991
+ each sample element into the interval [0, 1] along the antenna
992
+ dimension for effective inference [25] and flattens a normalized
993
+ sample into a one-dimensional tensor. Then, DNNA leverages
994
+ six fully connected (fc) layers to extract hidden features
995
+ and predicts the corresponding device location. DNNB is a
996
+ convolutional neural network (CNN) and consists of three
997
+ TABLE I
998
+ The structures and Parameters of Victim DNNs Used in Our Experiments.
999
+ DNNA
1000
+ DNNB
1001
+ Pre-processing
1002
+ Normalize&Flatten
1003
+ Normalize
1004
+ Layers
1005
+ #1
1006
+ fc1024, Linear
1007
+ conv256@1×1, ReLu
1008
+ #2
1009
+ fc512, ReLu
1010
+ conv128@1×1, ReLu
1011
+ #3
1012
+ fc1024, Linear
1013
+ conv128@1×1, ReLu
1014
+ #4
1015
+ fc512, ReLu
1016
+ fc512, ReLu
1017
+ #5
1018
+ fc1024, Linear
1019
+ fc256, ReLu
1020
+ #6
1021
+ fc2, Sigmoid
1022
+ fc2, Sigmoid
1023
+ convolutional (conv) layers and three fully connected layers. It
1024
+ also performs data normalization before feeding CSI samples
1025
+ into its convolutional layers. In addition, we build DNNA and
1026
+ DNNB on the PyTorch framework.
1027
+ Evaluation Metrics. We use the following metrics to
1028
+ measure FooLoc’s performance.
1029
+ • Localization Error (LE). Given a localization model fθ(·)
1030
+ and an input sample Xu
1031
+ g from the ground-truth spot g, the
1032
+ LE to g is computed as
1033
+ D
1034
+
1035
+ fθ(Xu
1036
+ g), g
1037
+
1038
+ = ∥fθ(Xu
1039
+ g) − g∥2.
1040
+ • Attack Success Rate (ASR). Given a set of perturbed
1041
+ uplink CSIs ˆXu
1042
+ p =
1043
+ � ˆXu
1044
+ p,m
1045
+
1046
+ m=1:M pertaining to the attacker’s
1047
+ true spot p and an adversarial perturbation γp, the ASR
1048
+ of targeted attacks with a targeted spot q is
1049
+
1050
+ m
1051
+ 1
1052
+
1053
+ D
1054
+
1055
+
1056
+ � ˆXu
1057
+ p,m
1058
+
1059
+ , q
1060
+
1061
+ − dmax ≤ 0
1062
+
1063
+ /M,
1064
+ where 1(·) denotes the indication function and dmax is
1065
+ the acceptable maximal distance error. It represents the
1066
+ probability that a perturbed location estimation fθ
1067
+ � ˆXu
1068
+ p,m
1069
+
1070
+ is inside the ball centering at the targeted spot q with a
1071
+ radius of dmax. Similarly, the ASR of untargeted attacks
1072
+ is given as
1073
+
1074
+ m
1075
+ 1
1076
+
1077
+ D
1078
+
1079
+
1080
+ � ˆXu
1081
+ p,m
1082
+
1083
+ , p
1084
+
1085
+ − dmin ≥ 0
1086
+
1087
+ /M,
1088
+ where dmin is the acceptable minimal distance error.
1089
+ It indicates the probability that a perturbed location
1090
+ estimation fθ
1091
+ � ˆXu
1092
+ p,m
1093
+
1094
+ is at the outside of the ball centering
1095
+ at the true spot p with a radius of dmin.
1096
+ • Perturbation-To-Signal Ratio (PSR). Given the per-
1097
+ turbed uplink CSI ˆXu
1098
+ p and corresponding original one
1099
+ Xu
1100
+ p at the genuine spot p, the PSR is computed as
1101
+ PSR = 20 log10
1102
+ ∥ ˆXu
1103
+ p − Xu
1104
+ p∥2
1105
+ ∥Xup∥2
1106
+ .
1107
+ B. Offline Experiments
1108
+ In this subsection, we conduct our offline experiments, in
1109
+ which both uplink and downlink CSI measurements are first
1110
+ collected in real-world environments. In this setting, the attacker
1111
+ optimizes adversarial perturbations using downlink CSIs and
1112
+ then applies the learned perturbations directly on the collected
1113
+ uplink ones based on Eq. (8) to spoof localization DNNs.
1114
+ Implementation. In offline experiments, we implement
1115
+ FooLoc using two TL-WDR4310 Wi-Fi routers and one Lenovo
1116
+
1117
+ 8
1118
+ 18m
1119
+ 12m
1120
+ AP
1121
+ A spots
1122
+ B spots
1123
+ AP
1124
+ with 2 antennas
1125
+ Client
1126
+ with 1 antenna
1127
+ Wi-Fi routers
1128
+ using Atheros CSI Tool
1129
+ Fig. 7. Floor plan of the experiment environment and experimental platform
1130
+ in offline experiments.
1131
+ Targeted attacks
1132
+ Untargeted attacks
1133
+ B spots
1134
+ 90th LE + 0.75m
1135
+ = 90th LE
1136
+ + 0.75m
1137
+ 1.5m
1138
+ B spots
1139
+ 1.5m
1140
+ = 0.75 m
1141
+ Fig. 8. Illustration of our attack methodology adopted in offline experiments.
1142
+ laptop. Specifically, one router with two antennas is fixed at
1143
+ one spot to act as an AP, and the left one is equipped with one
1144
+ antenna to work as a mobile client to communicate with the
1145
+ AP from different spots. Moreover, we connect the laptop with
1146
+ two routers via Ethernet cables and run Atheros CSI Tool [23].
1147
+ Using this tool, each router is set to work at the 2.4 GHz Wi-Fi
1148
+ band and record channel responses of 56 subcarriers. Hence,
1149
+ one CSI sample has a size of 1 × 2 × 56.
1150
+ Data Collection. We collect CSI measurements in a 12 ×
1151
+ 18 m2 meeting room as shown in Fig. 7. The AP is placed at
1152
+ one end of the room to avoid isotropy for better localization
1153
+ performance [3], [20]. We move the client among 40 selected
1154
+ locations with a spacing distance of 1.5 m, i.e., A spots in Fig. 7,
1155
+ to collect uplink CSI measurements at the AP. Accordingly,
1156
+ we choose 40 locations around A spots, i.e., B spots in Fig. 7,
1157
+ to record uplink and downlink CSI measurements, respectively.
1158
+ At each spot, 250 CSI samples are recorded during data
1159
+ collection. Thus, we can obtain three datasets DA, DB and
1160
+ DC. In particular, DA includes 10K uplink CSI samples from
1161
+ A spots and is used for training localization DNNs at the AP.
1162
+ DB consists of 10K downlink samples from B spots and is
1163
+ used by the attacker to generate adversarial perturbations. DC
1164
+ has 10K uplink samples from B spots and is responsible for
1165
+ testing FooLoc.
1166
+ Attack Methodology. We independently train DNNA and
1167
+ DNNB on DA, and optimize adversarial perturbations using
1168
+ the samples in DB according to Algorithm 1. Then, we apply
1169
+ the optimized perturbations on DC and feed the perturbed
1170
+ samples into DNNA and DNNB, respectively, to perform both
1171
+ targeted and untargeted attacks. As depicted in Fig. 8, for each
1172
+ B spot p in targeted attacks, we choose the nearest B points
1173
+ that are outside a certain ball centering at p as targeted spots.
1174
+ In particular, the ball radius equals to the sum of the 90th
1175
+ TABLE II
1176
+ Performance of FooLoc in Offline Experiments.
1177
+ Targeted attacks
1178
+ Before
1179
+ After
1180
+ DNNA
1181
+ DNNB
1182
+ DNNA
1183
+ DNNB
1184
+ LE to p
1185
+ (Genuine spots)
1186
+ 50th
1187
+ 0.60 m
1188
+ 0.54 m
1189
+ 1.48 m
1190
+ 1.28 m
1191
+ 90th
1192
+ 1.85 m
1193
+ 1.93 m
1194
+ 2.61 m
1195
+ 2.51 m
1196
+ LE to q
1197
+ (Targeted spots)
1198
+ 50th
1199
+ 1.59 m
1200
+ 1.56 m
1201
+ 0.53 m
1202
+ 0.55 m
1203
+ 90th
1204
+ 3.08 m
1205
+ 2.93 m
1206
+ 1.42 m
1207
+ 1.38 m
1208
+ ASR
1209
+ 0.1%
1210
+ 0.1%
1211
+ 74.1%
1212
+ 71.8%
1213
+ PSR
1214
+ -
1215
+ -
1216
+ -19.6 dB
1217
+ -18.9 dB
1218
+ Untargeted attacks
1219
+ Before
1220
+ After
1221
+ DNNA
1222
+ DNNB
1223
+ DNNA
1224
+ DNNB
1225
+ LE to p
1226
+ 50th
1227
+ 0.60 m
1228
+ 0.54 m
1229
+ 3.30 m
1230
+ 3.45 m
1231
+ 90th
1232
+ 1.85 m
1233
+ 1.93 m
1234
+ 5.55 m
1235
+ 5.41 m
1236
+ ASR
1237
+ 0.1%
1238
+ 0.0%
1239
+ 94.4%
1240
+ 92.4%
1241
+ PSR
1242
+ -
1243
+ -
1244
+ -19.0 dB
1245
+ -19.5 dB
1246
+ percentile LE of localization models and half of the spacing
1247
+ distance, i.e, 0.75 m. In this way, we can have multiple targeted
1248
+ spots for one genuine spot p and finally obtain 119 and 116
1249
+ genuine-targeted spot pairs for DNNA and DNNB, respectively.
1250
+ In addition, we configure dmax = 0.75 m in targeted attacks.
1251
+ When performing untargeted attacks on p, we set dmin to be
1252
+ the sum of 90th percentile LE at p of localization models and
1253
+ half of the spacing distance.
1254
+ Experimental Results. We first show the overall attack
1255
+ performance of FooLoc on DNNA and DNNB. For this purpose,
1256
+ we report all evaluation metrics in Table II. Before attacks,
1257
+ DNNA and DNNB obtain 50th LEs of 0.60 m and 0.54 m,
1258
+ respectively, which are comparable to other localization DNNs.
1259
+ We can also observe that FooLoc has better performance in
1260
+ untargeted attacks in terms of LEs and ASRs. The reason is that
1261
+ FooLoc can search all directions pointing away from genuine
1262
+ spots in untargeted attacks, while having much fewer directions
1263
+ and more strict distance constraints to launch targeted attacks
1264
+ as shown in Fig. 8. Despite that, in targeted attacks, DNNA’s
1265
+ 90th percentile LE to genuine spots arises from 1.85 m to
1266
+ 2.61 m, while its 90th percentile LE to targeted spots decreases
1267
+ from 3.08 m to 1.42 m. Similar results can be found in DNNB.
1268
+ Moreover, FooLoc achieves ASRs of 74.1% and 71.8% on
1269
+ DNNA and DNNB, respectively. The above observations suggest
1270
+ that FooLoc can effectively render victim models’ predictions
1271
+ close to targeted spots. In untargeted attacks, FooLoc makes
1272
+ the 50th and 90th percentile LE of both models increase by
1273
+ over five and two times, respectively, implying that the two
1274
+ models’ predictions are easily misled away from genuine spots.
1275
+ In addition, FooLoc obtains high ASRs of 94.4% and 92.4%,
1276
+ respectively, on DNNA and DNNB in untargeted attacks. It
1277
+ is worth noting that due to random noise and environmental
1278
+ dynamics, some collected Wi-Fi CSI samples may have already
1279
+ been predicted in targeted areas before adversarial attacks.
1280
+ However, such samples are only a very small portion of total
1281
+ testing samples, i.e., about 0.1% as shown in Table II, which
1282
+ indicates the validity of targeted spot selection and acceptable
1283
+ distance error settings in our attack methodology. Furthermore,
1284
+ we also find that FooLoc has low PSRs of about -19 dB in both
1285
+ targeted and untargeted attacks. The result means that only
1286
+ small perturbations are introduced in original signals, which
1287
+
1288
+ TP-LINKTP-NK9
1289
+ 0
1290
+ 10
1291
+ 20
1292
+ 30
1293
+ 40
1294
+ 50
1295
+ # of subcarriers
1296
+ 0
1297
+ 0.5
1298
+ 1
1299
+ 1.5
1300
+ Normalized CSI
1301
+ Original CSI at genuine spot
1302
+ 1st antenna
1303
+ 2nd antenna
1304
+ 0
1305
+ 10
1306
+ 20
1307
+ 30
1308
+ 40
1309
+ 50
1310
+ # of subcarriers
1311
+ 0
1312
+ 0.5
1313
+ 1
1314
+ 1.5
1315
+ Targeted perturbed CSI
1316
+ ASR=99.6%, PSR= -19.8dB
1317
+ 0
1318
+ 10
1319
+ 20
1320
+ 30
1321
+ 40
1322
+ 50
1323
+ # of subcarriers
1324
+ 0
1325
+ 0.5
1326
+ 1
1327
+ 1.5
1328
+ Normalized CSI
1329
+ Original CSI at targeted spot
1330
+ 0
1331
+ 10
1332
+ 20
1333
+ 30
1334
+ 40
1335
+ 50
1336
+ # of subcarriers
1337
+ 0
1338
+ 0.5
1339
+ 1
1340
+ 1.5
1341
+ Untargeted perturbed CSI
1342
+ ASR=100%, PSR= -18.3dB
1343
+ Fig. 9.
1344
+ Illustration of original and perturbed signals under targeted and
1345
+ untargeted attacks in offline experiments.
1346
+ suggests the imperceptibility of our adversarial attacks. To sum
1347
+ up, the above results verify the effectiveness of FooLoc to
1348
+ deceive DL localization models.
1349
+ Next, we illustrate perturbed signals under targeted and un-
1350
+ targeted attacks. Since FooLoc has similar attack performance
1351
+ on DNNA and DNNB, we take perturbed signals of DNNA for
1352
+ illustration in Fig. 9, where each subfigure depicts 50 CSI
1353
+ samples. As shown in Fig. 9, we observe that under the same
1354
+ attack, the perturbed signals of two antennas share the same
1355
+ changing trends with respect to original ones. It is due to that
1356
+ our adversarial perturbations have multiplicative and repetitive
1357
+ impacts on original signals. Moreover, although the perturbed
1358
+ signals under two attacks are predicted to be far away from
1359
+ the genuine spot with high probabilities, they look very similar
1360
+ to original ones, which shows the usefulness of maximizing
1361
+ smoothness and limiting strength of adversarial weights in
1362
+ perturbation optimization. Furthermore, we can observe that
1363
+ targeted perturbed CSIs have more sudden changes and are
1364
+ less smoother when compared with untargeted perturbed CSIs.
1365
+ This is due to the fact that more changes are needed when
1366
+ FooLoc renders one sample to be estimated to come from a
1367
+ specified spot. Interestingly, we also find that targeted attacks
1368
+ have smaller perturbations on original signals. Though targeted
1369
+ perturbed signals show a very low similarity with original
1370
+ signals at the targeted spot, the corresponding predictions are
1371
+ less than 0.75 m from the targeted spot with a probability of
1372
+ 99.6%. These observations suggest that localization DNNs are
1373
+ very vulnerable to our adversarial perturbations.
1374
+ Then, we showcase FooLoc’s targeted and untargeted attacks
1375
+ on DNNA at two B spots in the offline environment. To do
1376
+ this, we plot location predictions at two spots with and without
1377
+ adversarial attacks in the corresponding 2D Euclidean space in
1378
+ Fig. 10. In targeted attacks, the majority of CSI samples can
1379
+ be successfully perturbed into the neighboring area of targeted
1380
+ spots within a distance dmax = 0.75 m, even if these spots
1381
+ locate in different directions with respect to corresponding
1382
+ genuine spots. This observation verifies FooLoc’s ability to
1383
+ render location predictions close to given targeted spots. In
1384
+ untargeted attacks, adversarial perturbations can make model
1385
+ predictions far away from genuine locations with a distance of
1386
+ more than dmin. In addition, we can find that location predictions
1387
+ under untargeted attacks basically have a larger distance from
1388
+ 0
1389
+ 1.5
1390
+ 3.0
1391
+ 4.5
1392
+ 6.0
1393
+ 7.5
1394
+ 9.0
1395
+ 10.5
1396
+ 12.0
1397
+ 13.5
1398
+ x (m)
1399
+ 1.5
1400
+ 3.0
1401
+ 4.5
1402
+ 6.0
1403
+ 7.5
1404
+ y (m)
1405
+ Targeted ASR:75.7%
1406
+ Untargeted ASR:100%
1407
+ 1st
1408
+ Targeted ASR:99.6%
1409
+ Untargeted ASR:100%
1410
+ 2nd
1411
+ r = dmax
1412
+ r = dmin
1413
+ Fig. 10.
1414
+ Illustration of adversarial attacks at two spots in the offline
1415
+ environment. The red dots are location predictions without perturbations.
1416
+ The gray dots are location predictions under untargeted attacks.
1417
+ 0
1418
+ 0.5
1419
+ 1
1420
+ ASR
1421
+ 0
1422
+ 1
1423
+ 2
1424
+ 3
1425
+ 4
1426
+ 50th LE (m)
1427
+ -20
1428
+ -10
1429
+ 0
1430
+ PSR (dB)
1431
+ 0
1432
+ 0.5
1433
+ 1
1434
+ 0
1435
+ 1
1436
+ 2
1437
+ 3
1438
+ 4
1439
+ -20
1440
+ -10
1441
+ 0
1442
+ White-box
1443
+ Black-box
1444
+ Baseline 1
1445
+ Baseline 2
1446
+ Baseline 3
1447
+ Fig. 11. Performance of untargeted attacks under different conditions in offline
1448
+ experiments.
1449
+ genuine spots than that under targeted attacks. The above results
1450
+ illustrate the effectiveness of FooLoc to launch targeted and
1451
+ untargeted attacks on localization DNNs.
1452
+ Furthermore, we show the feasibility of fooling black-box
1453
+ DL models over the air. In this case, the localization model used
1454
+ by the AP is unknown to the attacker. To simulate this situation,
1455
+ we first assume that DNNA is used by the AP. Then, we train
1456
+ DNNA using uplink CSI samples in the dataset DA as a victim
1457
+ model and optimize DNNB using the dataset DB as a substitute
1458
+ model. Next, we use the substitute model to generate untargeted
1459
+ adversarial perturbations with DB according to Algorithm 1. In
1460
+ this way, we can apply locally-generated perturbations on uplink
1461
+ CSI samples in DC to deceive unknown DNNA. Similarly, we
1462
+ can attack DNNB if it is used by the AP using DNNA in a black-
1463
+ box manner. In this scenario, we also set three baseline models
1464
+ that leverage multiplicative perturbation weights randomly
1465
+ sampled from the interval (1 − δmax, 1 + δmax). The baseline
1466
+ models, i.e., Baseline 1, Baseline 2 and Baseline 3, have
1467
+ different perturbation constraints δmax of 0.15, 0.3 and 0.45,
1468
+ respectively. During testing, we run each of them ten times
1469
+ and average all ASRs, 50th percentile LEs and PSRs as their
1470
+ final performance results.
1471
+ As Fig. 11 shows, FooLoc suffers performance degradation
1472
+ from white-box scenarios to black-box ones. These results are
1473
+
1474
+ 10
1475
+ 10m
1476
+ AP
1477
+ A spots
1478
+ B spots
1479
+ Office area
1480
+ AP
1481
+ with 2
1482
+ antennas
1483
+ Client
1484
+ with 1
1485
+ antenna
1486
+ WARP
1487
+ boards
1488
+ Fig. 12. Floor plan of the experiment environment and experimental platform
1489
+ in online experiments.
1490
+ expected because the substitute models for perturbation gener-
1491
+ ation in black-box attacks are different from targeted victim
1492
+ models, resulting in different adversarial weights. Moreover,
1493
+ when compared to other baseline models, Baseline 3 obtains the
1494
+ best performance in terms of ASRs and 50th LEs, while also
1495
+ having the highest PSRs. In addition, compared with Baseline
1496
+ 3, the black-box version of FooLoc achieves better performance
1497
+ on DNNA and comparable performance on DNNB with regard
1498
+ to ASRs and 50th LEs. However, it has much smaller PSRs
1499
+ on both two DNNs, suggesting that our adversarial attacks are
1500
+ more effective and stealthy than random perturbations. The
1501
+ above results indicate that FooLoc is capable of learning some
1502
+ shared adversarial weights that work well on different models
1503
+ due to the transferability of adversarial attacks [7], [8], showing
1504
+ the possibility of exploiting FooLoc to perform over-the-air
1505
+ adversarial attacks on black-box localization models.
1506
+ C. Online Experiments
1507
+ In this subsection, we further examine the performance of
1508
+ FooLoc in online experiments. In this setting, we multiply
1509
+ adversarial weights with LTF signals, transmit perturbed signals
1510
+ to the AP over real wireless channels and record corresponding
1511
+ falsified uplink CSIs to attack localization models.
1512
+ Implementation. In online experiments, we implement
1513
+ FooLoc using the WARP wireless experimental platform [19]
1514
+ as shown in Fig. 12. In particular, two WARP v3 boards are
1515
+ controlled by a Lenovo laptop via Ethernet cables to transfer
1516
+ control signals as well as their CSI measurements. One of the
1517
+ two boards is fixed at a certain location to act as an AP with
1518
+ two antennas, and the left board with one antenna works as
1519
+ a mobile client that communicates with the AP at the 5 GHz
1520
+ Wi-Fi band. Since WARP boards can provide channel estimates
1521
+ of 52 subcarriers, one CSI sample in online experiments has a
1522
+ size of 1 × 2 × 52.
1523
+ Data Collection. We collect CSI measurements in a corridor
1524
+ environment as depicted in Fig. 12. Specifically, we place the
1525
+ client at ten A spots and ten B spots in turn to record CSI
1526
+ measurements. First, we move the client among A spots, with
1527
+ a spacing distance of 0.6 m, and receive 1K uplink CSIs at
1528
+ each spot. In this way, we obtain a dataset DE containing 10K
1529
+ samples for training localization DNNs used by the AP. Then,
1530
+ by locating the client at B spots, we collect 1K downlink CSI
1531
+ samples at each location and have a dataset DF to generate
1532
+ adversarial perturbations. Note that there are stairs at one
1533
+ ASR
1534
+ PSR
1535
+ 0
1536
+ 0.5
1537
+ 1
1538
+ -20
1539
+ -15
1540
+ -10
1541
+ -5
1542
+ 0
1543
+ Targeted
1544
+ ASR
1545
+ PSR
1546
+ 0
1547
+ 0.5
1548
+ 1
1549
+ -20
1550
+ -15
1551
+ -10
1552
+ -5
1553
+ 0
1554
+ dB
1555
+ Untargeted
1556
+ Fig. 13. Attack performance of FooLoc in online experiments.
1557
+ end of the corridor and people go downstairs and upstairs
1558
+ frequently. Thus, the collected CSI measurements are impacted
1559
+ by environmental noise and changes.
1560
+ Attack Methodology. The attack strategy adopted in online
1561
+ experiments is similar to that in offline settings, but the only
1562
+ difference is that the attacker needs to send perturbed LTF
1563
+ signals over the air to deceive the victim AP. Specifically, we
1564
+ train DNNA and DNNB, respectively, on the dataset DE and
1565
+ learn adversarial perturbations using DF. Then, we multiply
1566
+ the locally-optimized perturbations on Wi-Fi LTF signals and
1567
+ transmit the perturbed signals from the client to the AP over
1568
+ the air. After the AP receives perturbed CSI measurements, we
1569
+ immediately feed them into DNNA and DNNB, respectively, to
1570
+ perform location estimation. Moreover, we set dmax = 0.3 m,
1571
+ i.e., the half of the spacing distance, and configure dmin to be
1572
+ the sum of the 90th percentile LE and dmax. For a given B
1573
+ spot, the corresponding targeted spot is selected as a location
1574
+ that has a distance of 1.8 m from it.
1575
+ Experimental Results. We first report FooLoc’s ASRs and
1576
+ PSRs in our online experiments. Since FooLoc’s adversarial
1577
+ perturbations are learned from downlink CSI measurements,
1578
+ they would generally be affected by random environmental
1579
+ noise in uplink transmissions, resulting in performance degra-
1580
+ dation in terms of ASRs at the testing phase. As shown in
1581
+ Fig. 13, FooLoc achieves targeted ASRs of 65.7% and 77.5%
1582
+ on DNNA and DNNB, respectively, which are comparable to
1583
+ that of FooLoc in offline experiments. In untargeted attacks,
1584
+ FooLoc obtains ASRs of above 99.0% on two victim models,
1585
+ suggesting that FooLoc is still effective in this online setting.
1586
+ Moreover, our adversarial attacks have small perturbations on
1587
+ original signals and obtain mean PSRs of less than -17.5 dB in
1588
+ both targeted and untargeted scenarios. The above observations
1589
+ indicate that FooLoc is robust to environmental noise and has
1590
+ comparable performance in online experiments.
1591
+ Furthermore, different AP locations will impact FooLoc’s
1592
+ performance. In general, the displacement of AP locations
1593
+ will produce different training sets of CSI fingerprints, which
1594
+ correspondingly changes the parameters of the localization
1595
+ model, thus resulting in different attack performance of our
1596
+ system. Roughly speaking, the higher localization accuracy
1597
+ the model achieves, the lower ASR FooLoc obtains. In our
1598
+ experiments, FooLoc achieves a targeted ASR of about 73%
1599
+ and an untargeted ASR of about 93% in the offline experiment,
1600
+ while obtaining a targeted ASR of about 71% and an untargeted
1601
+ ASR of about 99% in the online experiment. The above results
1602
+
1603
+ 11
1604
+ 0
1605
+ 10
1606
+ 20
1607
+ 30
1608
+ 40
1609
+ 50
1610
+ # of subcarriers
1611
+ 0
1612
+ 0.5
1613
+ 1
1614
+ Normalized CSI
1615
+ Original CSI at genuine spot
1616
+ 1st antenna
1617
+ 2nd antenna
1618
+ 0
1619
+ 10
1620
+ 20
1621
+ 30
1622
+ 40
1623
+ 50
1624
+ # of subcarriers
1625
+ 0
1626
+ 0.5
1627
+ 1
1628
+ Targeted perturbed CSI
1629
+ ASR=100%, PSR= -18.7dB
1630
+ 0
1631
+ 10
1632
+ 20
1633
+ 30
1634
+ 40
1635
+ 50
1636
+ # of subcarriers
1637
+ 0
1638
+ 0.5
1639
+ 1
1640
+ Normalized CSI
1641
+ original CSI at targeted spot
1642
+ 0
1643
+ 10
1644
+ 20
1645
+ 30
1646
+ 40
1647
+ 50
1648
+ # of subcarriers
1649
+ 0
1650
+ 0.5
1651
+ 1
1652
+ Untargeted perturbed CSI
1653
+ ASR=100%, PSR= -16.4dB
1654
+ Fig. 14.
1655
+ Illustration of original and perturbed signals under targeted and
1656
+ untargeted attacks in online experiments.
1657
+ show that FooLoc has similar attack performance in two
1658
+ different experimental settings.
1659
+ Next, we take a further step to show the imperceptibility of
1660
+ our adversarial perturbations. For this purpose, we record uplink
1661
+ CSI measurements at the AP with and without perturbations and
1662
+ depict corresponding signals for attacking DNNA in Fig. 14.
1663
+ As the figure shows, all perturbed CSI measurements look
1664
+ like original ones, i.e., keeping the main changing trends of
1665
+ original signals with slight differences. In addition, FooLoc can
1666
+ successfully generate adversarial signals with high ASRs and
1667
+ low PSRs. Although targeted perturbed CSIs are very different
1668
+ from original signals at the targeted spot, their predictions are
1669
+ less than 0.3 m from the targeted spot with a probability of
1670
+ 100%. To sum up, our adversarial perturbations can effectively
1671
+ spoof DL localization models over realistic wireless channels.
1672
+ Then, we present location prediction results with and without
1673
+ adversarial attacks at two B spots in the online environment. To
1674
+ do this, we depict location predictions under adversarial attacks
1675
+ in the 2D Euclidean space in Fig. 15. At the first spot, FooLoc
1676
+ can successfully render all location predictions in untargeted
1677
+ attacks far away from it with a distance of more than dmin. At
1678
+ the same time, FooLoc makes location predictions in targeted
1679
+ attacks close to the targeted spot within a distance of 0.3 m
1680
+ with a high probability of 92.2%. Similar observations can
1681
+ be also found in the second spot. The above results show the
1682
+ effectiveness of FooLoc to perform over-the-air targeted and
1683
+ untargeted adversarial attacks.
1684
+ V. Related Work
1685
+ Indoor Localization. Recent years have witnessed the
1686
+ emerging needs of person or device locations in indoor
1687
+ environments, such as homes and office buildings [26], [27],
1688
+ [28]. Generally, indoor localization can be realized by exploit-
1689
+ ing various sensing modalities, among which Wi-Fi signals
1690
+ are one of the most promising ones thanks to their high
1691
+ ubiquity in indoor scenarios. Moreover, due to the huge success
1692
+ in the computer vision domain, various DNNs have been
1693
+ recently exploited for accurate Wi-Fi indoor localization [29],
1694
+ [30]. The stacked restricted Boltzmann machines [20], deep
1695
+ autoencoder [31] as well as residual networks [6] are proposed
1696
+ for indoor positioning, distance estimation, and so on. With
1697
+ the increasing usage of DNNs in indoor localization, it is
1698
+ 0
1699
+ 0.6
1700
+ 1.2
1701
+ 1.8
1702
+ 2.4
1703
+ 3.0
1704
+ 3.6
1705
+ 4.2
1706
+ 4.8
1707
+ 5.4
1708
+ 6.0
1709
+ 6.6
1710
+ x (m)
1711
+ 0
1712
+ 0.6
1713
+ 1.2
1714
+ y (m)
1715
+ 1st spot
1716
+ Targeted ASR:92.2%
1717
+ Untargeted ASR:100%
1718
+ 0
1719
+ 0.6
1720
+ 1.2
1721
+ 1.8
1722
+ 2.4
1723
+ 3.0
1724
+ 3.6
1725
+ 4.2
1726
+ 4.8
1727
+ 5.4
1728
+ 6.0
1729
+ 6.6
1730
+ x (m)
1731
+ 0
1732
+ 0.6
1733
+ 1.2
1734
+ y (m)
1735
+ 2nd spot
1736
+ Targeted ASR:100%
1737
+ Untargeted ASR:100%
1738
+ Genuine spot
1739
+ Targeted spot
1740
+ Original prediction
1741
+ Targeted attack
1742
+ Untargeted attack
1743
+ Fig. 15. Illustration of adversarial attacks in the online environment.
1744
+ thus of great importance to investigate the robustness of DL
1745
+ localization models to adversarial attacks.
1746
+ Adversarial Attacks. Although deep neural networks have
1747
+ proven their success in many real-world applications, they are
1748
+ shown to be susceptible to minimal perturbations [7], [8]. After
1749
+ that, various adversarial attacks are introduced in face recogni-
1750
+ tion [32], person detection [33], optical flow estimation [34],
1751
+ and so on. Recently, adversarial attacks are proposed on DNN
1752
+ based applications in wireless communications, such as radio
1753
+ signal classification [35], waveform jamming and synthesis [36].
1754
+ Moreover, the work [12] exposes the threats of adversarial
1755
+ attacks on indoor localization and floor classification. However,
1756
+ this work uses additive perturbations, which can not tamper
1757
+ CSI measurements over realistic Wi-Fi channels. In our work,
1758
+ we propose multiplicative adversarial perturbations that can
1759
+ be exploited by adversary transceivers to perform adversarial
1760
+ attacks on localization DNNs over the air.
1761
+ Wireless Channel Manipulation. Perturbations on wireless
1762
+ channels have also been investigated in the tasks of device au-
1763
+ thentication and device localization. Recently, researchers [15]
1764
+ propose a CSI randomization approach to distort location
1765
+ specific signatures for dealing with users’ privacy concerns
1766
+ about locations. However, this approach lacks the capability
1767
+ of misleading location predictions close to specified spots, i.e.,
1768
+ targeted attacks. In addition, the proposed random perturbations
1769
+ are not smooth, which will produce significant differences
1770
+ between perturbed CSI measurements and original ones,
1771
+ rendering them easy to be detected. However, FooLoc enables
1772
+ the attacker to launch both targeted and untargeted attacks,
1773
+ and our adversarial perturbations are smooth and minimal,
1774
+ making perturbed CSI signatures similar to the original ones.
1775
+ Moreover, the authors in [13] propose analog man-in-the-
1776
+ middle attacks to mimic legitimate channel responses against
1777
+ link based device identification. The work [14] fools location
1778
+ distinction systems via creating virtual multipath signatures.
1779
+ These approaches trigger attacks via directly transforming
1780
+ genuine Wi-Fi CSI fingerprints to targeted ones, which is
1781
+ suitable for attacking single-antenna APs, which, however,
1782
+ are physically unrealizable in widely-used multi-antenna Wi-
1783
+ Fi systems due to the one-to-many relationship between the
1784
+ elements of one perturbation and one CSI measurement. In
1785
+
1786
+ 12
1787
+ contrast, our attack takes this relationship into consideration
1788
+ and generates adversarial perturbations with a repetitive pattern,
1789
+ which characterizes the impact of over-the-air attacks on multi-
1790
+ antenna APs.
1791
+ VI. Conclusion
1792
+ This paper presents FooLoc, a novel system that launches
1793
+ over-the-air adversarial attacks on indoor localization DNNs.
1794
+ We observe that though the uplink CSI is unknown to FooLoc,
1795
+ its corresponding downlink one is obtainable and could be
1796
+ a reasonable substitute. Instead of using traditional additive
1797
+ perturbations, FooLoc exploits multiplicative perturbations with
1798
+ repetitive patterns, which are suitable for adversarial attacks
1799
+ over realistic wireless channels. FooLoc can efficiently craft
1800
+ imperceptible yet robust perturbations for triggering targeted
1801
+ and untargeted attacks against DL localization models. We
1802
+ implement our system using both commercial Wi-Fi APs and
1803
+ WARP v3 boards and extensively evaluate it in different indoor
1804
+ environments. The experimental results show that FooLoc
1805
+ achieves overall ASRs of about 70% in targeted attacks and
1806
+ of above 90% in untargeted attacks with small PSRs of about
1807
+ -18 dB. In addition, this paper reveals the bind spots of indoor
1808
+ localization DNNs using over-the-air adversarial attacks to call
1809
+ attention to appropriate countermeasures.
1810
+ References
1811
+ [1] J. Liu, H. Liu, Y. Chen, Y. Wang, and C. Wang, “Wireless sensing for
1812
+ human activity: A survey,” IEEE Commun. Surveys Tuts., vol. 22, no. 3,
1813
+ pp. 1629–1645, 2019.
1814
+ [2] R. Liu, S. H. Marakkalage, M. Padmal, T. Shaganan, C. Yuen, Y. L.
1815
+ Guan, and U.-X. Tan, “Collaborative SLAM based on WiFi fingerprint
1816
+ similarity and motion information,” IEEE Internet Things J., vol. 7, no. 3,
1817
+ pp. 1826–1840, 2019.
1818
+ [3] X. Wang, L. Gao, S. Mao, and S. Pandey, “CSI-based fingerprinting
1819
+ for indoor localization: A deep learning approach,” IEEE Trans. Veh.
1820
+ Technol., vol. 66, no. 1, pp. 763–776, 2016.
1821
+ [4] M. Nowicki and J. Wietrzykowski, “Low-effort place recognition with
1822
+ WiFi fingerprints using deep learning,” in International Conference
1823
+ Automation.
1824
+ Springer, 2017, pp. 575–584.
1825
+ [5] M. Abbas, M. Elhamshary, H. Rizk, M. Torki, and M. Youssef, “WiDeep:
1826
+ WiFi-based accurate and robust indoor localization system using deep
1827
+ learning,” in Proc. IEEE PerCom.
1828
+ [6] X. Wang, X. Wang, and S. Mao, “Indoor fingerprinting with bimodal
1829
+ CSI tensors: A deep residual sharing learning approach,” IEEE Internet
1830
+ Things J., vol. 8, no. 6, pp. 4498–4513, 2020.
1831
+ [7] C. Szegedy, W. Zaremba, I. Sutskever, J. Bruna, D. Erhan, I. Good-
1832
+ fellow, and R. Fergus, “Intriguing properties of neural networks,”
1833
+ arXiv:1312.6199, 2013.
1834
+ [8] I. J. Goodfellow, J. Shlens, and C. Szegedy, “Explaining and harnessing
1835
+ adversarial examples,” arXiv:1412.6572, 2014.
1836
+ [9] K. Eykholt, I. Evtimov, E. Fernandes, B. Li, A. Rahmati, C. Xiao,
1837
+ A. Prakash, T. Kohno, and D. Song, “Robust physical-world attacks
1838
+ on deep learning visual classification,” in Proc. IEEE CVPR, 2018, pp.
1839
+ 1625–1634.
1840
+ [10] A. Sheth, S. Seshan, and D. Wetherall, “Geo-fencing: Confining Wi-
1841
+ Fi coverage to physical boundaries,” in International Conference on
1842
+ Pervasive Computing.
1843
+ Springer, 2009, pp. 274–290.
1844
+ [11] J. J. Pan, S. J. Pan, V. W. Zheng, and Q. Yang, “Digital wall: A power-
1845
+ efficient solution for location-based data sharing,” in Proc. IEEE PerCom,
1846
+ 2008, pp. 645–650.
1847
+ [12] M. Patil, X. Wang, X. Wang, and S. Mao, “Adversarial attacks on deep
1848
+ learning-based floor classification and indoor localization,” in Proc. ACM
1849
+ WiseML, 2021, pp. 7–12.
1850
+ [13] Y.-C. Tung, K. G. Shin, and K.-H. Kim, “Analog man-in-the-middle
1851
+ attack against link-based packet source identification,” in Proc. ACM
1852
+ MobiHoc, 2016, pp. 331–340.
1853
+ [14] S. Fang, Y. Liu, W. Shen, and H. Zhu, “Where are you from? confusing
1854
+ location distinction using virtual multipath camouflage,” in Proc. ACM
1855
+ MobiCom, 2014, pp. 225–236.
1856
+ [15] M. Cominelli, F. Kosterhon, F. Gringoli, R. L. Cigno, and A. Asadi, “IEEE
1857
+ 802.11 CSI randomization to preserve location privacy: An empirical
1858
+ evaluation in different scenarios,” Computer Networks, vol. 191, p.
1859
+ 107970, 2021.
1860
+ [16] J. Newsome, E. Shi, D. Song, and A. Perrig, “The sybil attack in sensor
1861
+ networks: analysis & defenses,” in Proc. IEEE IPSN, 2004, pp. 259–268.
1862
+ [17] Y. Huang, W. Wang, T. Jiang, and Q. Zhang, “Detecting colluding sybil
1863
+ attackers in robotic networks using backscatters,” IEEE/ACM Trans. Netw.,
1864
+ vol. 29, no. 2, pp. 793–804, 2021.
1865
+ [18] D. Tse and P. Viswanath, Fundamentals of wireless communication.
1866
+ Cambridge university press, 2005.
1867
+ [19] N. Anand, E. Aryafar, and E. W. Knightly, “WARPlab: a flexible
1868
+ framework for rapid physical layer design,” in Proc. ACM workshop
1869
+ on Wireless of the students, by the students, for the students, 2010, pp.
1870
+ 53–56.
1871
+ [20] X. Wang, L. Gao, S. Mao, and S. Pandey, “DeepFi: Deep learning for
1872
+ indoor fingerprinting using channel state information,” in Proc. IEEE
1873
+ WCNC, 2015, pp. 1666–1671.
1874
+ [21] S. Sen, B. Radunovic, R. R. Choudhury, and T. Minka, “You are facing the
1875
+ mona lisa: Spot localization using PHY layer information,” in Proc. ACM
1876
+ MobiSys, 2012, pp. 183–196.
1877
+ [22] R. Ayyalasomayajula, A. Arun, C. Wu, S. Sharma, A. R. Sethi, D. Vasisht,
1878
+ and D. Bharadia, “Deep learning based wireless localization for indoor
1879
+ navigation,” in Proc.ACM MobiCom, 2020, pp. 1–14.
1880
+ [23] Y. Xie, Z. Li, and M. Li, “Precise power delay profiling with commodity
1881
+ WiFi,” in Proc. ACM MobiCom, 2015, p. 53–64.
1882
+ [24] Y. Xiao, “IEEE 802.11 n: enhancements for higher throughput in wireless
1883
+ LANs,” IEEE Wireless Commun., vol. 12, no. 6, pp. 82–91, 2005.
1884
+ [25] I. Goodfellow, Y. Bengio, and A. Courville, Deep learning.
1885
+ MIT press,
1886
+ 2016.
1887
+ [26] X. Zhang, W. Wang, X. Xiao, H. Yang, X. Zhang, and T. Jiang, “Peer-
1888
+ to-peer localization for single-antenna devices,” Proceedings of the ACM
1889
+ on Interactive, Mobile, Wearable and Ubiquitous Technologies, vol. 4,
1890
+ no. 3, pp. 1–25, 2020.
1891
+ [27] Z. Luo, Q. Zhang, W. Wang, and T. Jiang, “Single-antenna device-
1892
+ to-device localization in smart environments with backscatter,” IEEE
1893
+ Internet Things J., 2021.
1894
+ [28] G. Huang, Z. Hu, J. Wu, H. Xiao, and F. Zhang, “WiFi and vision-
1895
+ integrated fingerprint for smartphone-based self-localization in public
1896
+ indoor scenes,” IEEE Internet Things J., vol. 7, no. 8, pp. 6748–6761,
1897
+ 2020.
1898
+ [29] M. Kotaru, K. Joshi, D. Bharadia, and S. Katti, “Spotfi: Decimeter level
1899
+ localization using WiFi,” in Proc. ACM SIGCOMM, 2015, pp. 269–282.
1900
+ [30] Y. Xie, J. Xiong, M. Li, and K. Jamieson, “mD-Track: Leveraging
1901
+ multi-dimensionality for passive indoor Wi-Fi tracking,” in Proc. ACM
1902
+ MobiCom, 2019, pp. 1–16.
1903
+ [31] W. Liu, Y. Jia, G. Jiang, H. Jiang, F. Wu, and Z. Lv, “Wifi-sensing based
1904
+ person-to-person distance estimation using deep learning,” in Proc. IEEE
1905
+ ICPADS, 2018, pp. 236–243.
1906
+ [32] M. Sharif, S. Bhagavatula, L. Bauer, and M. K. Reiter, “Accessorize to
1907
+ a crime: Real and stealthy attacks on state-of-the-art face recognition,”
1908
+ in Proc. ACM CCS, 2016, pp. 1528–1540.
1909
+ [33] S. Thys, W. Van Ranst, and T. Goedem´e, “Fooling automated surveillance
1910
+ cameras: adversarial patches to attack person detection,” in Proc. IEEE
1911
+ CVPR, 2019, pp. 0–0.
1912
+ [34] A. Ranjan, J. Janai, A. Geiger, and M. J. Black, “Attacking optical flow,”
1913
+ in Proc. IEEE ICCV, 2019, pp. 2404–2413.
1914
+ [35] M. Sadeghi and E. G. Larsson, “Adversarial attacks on deep-learning
1915
+ based radio signal classification,” IEEE Commun. Lett., vol. 8, no. 1, pp.
1916
+ 213–216, 2019.
1917
+ [36] F. Restuccia, S. D’Oro, A. Al-Shawabka, B. C. Rendon, K. Chowdhury,
1918
+ S. Ioannidis, and T. Melodia, “Generalized wireless adversarial deep
1919
+ learning,” in Proc. ACM WiseML, 2020, pp. 49–54.
1920
+
79E3T4oBgHgl3EQfqQqR/content/tmp_files/2301.04650v1.pdf.txt ADDED
@@ -0,0 +1,1863 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Geometry-biased Transformers for Novel View Synthesis
2
+ Naveen Venkat*1
3
+ Mayank Agarwal*1
4
+ Maneesh Singh
5
+ Shubham Tulsiani1
6
+ 1Carnegie Mellon University
7
+ {nvenkat, mayankag, shubhtuls}@cmu.edu, [email protected]
8
+ https://mayankgrwl97.github.io/gbt
9
+ Input views
10
+ Synthesized novel views (GBT)
11
+ Synthesized novel views (GBT w/o geometric bias)
12
+ Figure 1. Given a small set of context images with known camera viewpoints (left), our Geometry-biased transformer (GBT) synthesizes
13
+ novel views from arbitrary query viewpoints (middle). The use of global context ensures meaningful prediction despite large viewpoint
14
+ variation, while the geometric bias allows more accurate inference compared to a baseline without such bias (right).
15
+ Abstract
16
+ We tackle the task of synthesizing novel views of an
17
+ object given a few input images and associated camera
18
+ viewpoints.
19
+ Our work is inspired by recent ‘geometry-
20
+ free’ approaches where multi-view images are encoded as
21
+ a (global) set-latent representation, which is then used to
22
+ predict the color for arbitrary query rays. While this repre-
23
+ sentation yields (coarsely) accurate images corresponding
24
+ to novel viewpoints, the lack of geometric reasoning lim-
25
+ its the quality of these outputs. To overcome this limita-
26
+ tion, we propose ‘Geometry-biased Transformers’ (GBTs)
27
+ that incorporate geometric inductive biases in the set-latent
28
+ representation-based inference to encourage multi-view ge-
29
+ ometric consistency. We induce the geometric bias by aug-
30
+ menting the dot-product attention mechanism to also incor-
31
+ porate 3D distances between rays associated with tokens
32
+ as a learnable bias. We find that this, along with camera-
33
+ aware embeddings as input, allows our models to generate
34
+ significantly more accurate outputs. We validate our ap-
35
+ proach on the real-world CO3D dataset, where we train our
36
+ system over 10 categories and evaluate its view-synthesis
37
+ ability for novel objects as well as unseen categories. We
38
+ empirically validate the benefits of the proposed geometric
39
+ biases and show that our approach significantly improves
40
+ over prior works.
41
+ 1. Introduction
42
+ Given just a few images depicting an object, we humans
43
+ can easily imagine its appearance from novel viewpoints.
44
+ For instance, consider the first image of the hydrant shown
45
+ in Figure 1 and imagine rotating it slightly anti-clockwise –
46
+ we intuitively understand that this would move the small
47
+ outlet towards the front and right. We can also imagine
48
+ rotating the hydrant further and know that the (currently
49
+ occluded) central outlet will eventually become visible on
50
+ the left. These examples serve to highlight that this task
51
+ of novel-view synthesis requires both reasoning about geo-
52
+ metric transformations e.g. motion of the visible surfaces, as
53
+ well as an understanding of the global structure e.g. occlu-
54
+ sions and symmetries to allow for realistic extrapolations.
55
+ In this work, we develop an approach that incorporates both
56
+ these to synthesize accurate novel views given only a sparse
57
+ set of images of a previously unseen object.
58
+ Recent advances in Neural Radiance Fields (NeRFs)
59
+ [13] have led to numerous approaches that use these rep-
60
+ resentations (and their variants) for obtaining remarkably
61
+ detailed novel-view renderings.
62
+ However, such methods
63
+ typically optimize instance-specific representations using
64
+ densely sampled multi-view observations, and cannot be di-
65
+ rectly leveraged for 3D inference from sparse input views.
66
+ * indicates equal contribution
67
+ 1
68
+ arXiv:2301.04650v1 [cs.CV] 11 Jan 2023
69
+
70
+ To enable generalizable inference from a few views, recent
71
+ methods seek to instead predict radiance fields using the im-
72
+ age projections of a query 3D point as conditioning. While
73
+ using such geometric reprojection constraints allows accu-
74
+ rate predictions in the close vicinity of observed views, this
75
+ purely local conditioning mechanism fails to capture any
76
+ global context e.g. symmetries or correlated patterns. As a
77
+ result, these approaches struggle to render views containing
78
+ unobserved aspects or large viewpoint variations.
79
+ Our work is motivated by an alternate approach to gen-
80
+ eralizable view synthesis, where a geometry-free (global)
81
+ scene representation is used to predict images from query
82
+ viewpoints. Specifically, these methods form a set-latent
83
+ representation from multiple input views and directly in-
84
+ fer the color for a pixel for a query view (or equivalently a
85
+ query ray) using attention-based mechanisms in the scene
86
+ encoding and ray decoding process. Not only is this direct
87
+ view synthesis more computationally efficient than volume
88
+ rendering, but the set-latent representation also allows cap-
89
+ turing global context as each ray can attend to all aspects of
90
+ other views instead of just the projections of points along
91
+ it. However, this ‘geometry-free’ design comes at the cost
92
+ of precision – these methods cannot easily capture the de-
93
+ tails in input views, and while they can robustly capture the
94
+ coarse structure, do not output high-quality renderings.
95
+ In this work, we develop mechanisms to inject geometric
96
+ biases in these set-latent representation-based approaches.
97
+ Specifically, we propose Geometry-biased Transformers
98
+ (GBTs) which consist of a ray-distance-based bias in the
99
+ attention mechanism in Transformer layers. We show that
100
+ these help guide the scene encoding and ray decoding stages
101
+ to pay attention to relevant context, thereby enabling more
102
+ accurate view synthesis. We benchmark our approach using
103
+ the Co3D dataset [18] that comprises of challenging real-
104
+ world captures across diverse categories. We show that our
105
+ approach outperforms both, projection-based radiance field
106
+ prediction and set-latent representation-based view synthe-
107
+ sis approaches, and also demonstrate our method’s ability
108
+ to generalize to unseen object categories.
109
+ 2. Related Work
110
+ Instance-specific 3D Representations.
111
+ Driven by the re-
112
+ cent emergence of neural fields [13], a growing number of
113
+ methods seek to accurately capture the details of a specific
114
+ object or scene given multiple images. Leveraging either
115
+ volumetric [1,2,5,9,13,14,16], implicit [17,27,31], mesh-
116
+ based [8,33], or hybrid [3,7] representations, these methods
117
+ learn instance-specific representations capable of synthesiz-
118
+ ing novel views. However, as these methods do not learn
119
+ generic data-driven priors, they typically require densely
120
+ sampled views to be able to infer geometrically consistent
121
+ underlying representations and are incapable of predicting
122
+ beyond what they directly observe.
123
+ Projection-guided
124
+ Generalizable
125
+ View
126
+ Synthesis.
127
+ Closer to our goal, several methods have aimed to learn
128
+ models capable of view-synthesis across instances. While
129
+ initial
130
+ attempts
131
+ [22]
132
+ used
133
+ global-variable-conditioned
134
+ neural fields, subsequent approaches [4,24,28,32] obtained
135
+ significant improvements by instead using features ex-
136
+ tracted via projection onto the context views. Reiznestein
137
+ et al. [18] further demonstrated the benefits of learning
138
+ the aggregation mechanisms across the features along a
139
+ query ray, but the projection-guided features remained
140
+ the fundamental building blocks. While these projection-
141
+ based methods are effective at generating novel views by
142
+ transforming the visible structures, they struggle to deal
143
+ with large viewpoint changes (as the underlying geometry
144
+ maybe uncertain), and are fundamentally unable to generate
145
+ plausible visual information not directly observed in the
146
+ context views. We argue that this is because these methods
147
+ lack the mechanisms to learn and utilize contexts globally
148
+ when generating query views.
149
+ Geometry-free View Synthesis.
150
+ To allow using global
151
+ context for view synthesis, an alternate class of methods
152
+ uses ‘geometry-free’ encodings to infer novel views. The
153
+ initial learning-based methods [23,30,34] typically focused
154
+ on novel-view prediction given a single image via global
155
+ conditioning. Subsequent approaches [11,15,19] improved
156
+ performance using different architectures e.g. Transform-
157
+ ers [26], while also allowing for probabilistic view synthesis
158
+ using VQ-VAEs [25] and VQ-GANs [6]. While this leads
159
+ to detailed and realistic outputs, the renderings are not 3D-
160
+ consistent due to stochastic sampling.
161
+ Our work is inspired by the recently proposed Scene
162
+ Representation Transformer (SRT) [20], which uses a set-
163
+ latent representation that encodes both patch-level and
164
+ global scene context.
165
+ This design engenders a fast, de-
166
+ terministic rendering pipeline that, unlike projection-based
167
+ methods, furnishes plausible hallucinations in the invisible
168
+ regions. However, these benefits come at the cost of detail
169
+ – unlike the projection-based methods, this geometry-free
170
+ approach is unable to capture precise details in the visi-
171
+ ble aspects. Motivated by this need to improve the detail,
172
+ we propose mechanisms to inject geometric biases in this
173
+ framework, and find that this significantly improves the per-
174
+ formance while preserving global reasoning and efficiency.
175
+ 3. Approach
176
+ We aim to render novel viewpoints of previously unseen
177
+ objects from a few posed images. To achieve this goal, we
178
+ design a rendering pipeline that reasons along the following
179
+ two aspects: (i) appearance - what is the likely appearance
180
+ of the object from the queried viewpoint, and, (ii) geometry
181
+ - what geometrically-informed context can be derived from
182
+ the configuration of the given input and query cameras?
183
+ Prior methods address each question in isolation e.g. via
184
+ 2
185
+
186
+ Q⋅KT + bias
187
+ CNN
188
+ Rrel | trel
189
+ CNN
190
+ R=I | t=0
191
+ Fusion
192
+ Fusion
193
+ a) Camera-fused patch embedding
194
+ b) Geometry-biased scene encoding
195
+ Q⋅KT + bias
196
+ Geometry-biased Transformer Encoder
197
+ Rrel | trel
198
+ R=I | t=0
199
+ Patch-level
200
+ features
201
+ Global scene
202
+ encoding
203
+ c) Geometry-biased ray-decoding
204
+ Query
205
+ ray
206
+ Geometry-biased
207
+ Transformer Decoder
208
+ Rq | tq
209
+ RGB
210
+ Plucker coordinates
211
+ Harmonic Embedding
212
+ Input patch-rays
213
+ R=I | t=0
214
+ Rrel | trel
215
+ MLP
216
+ Figure 2. Learning novel view synthesis using Geometry-biased Transformers. Best viewed in color. a) Camera-fused patch embed-
217
+ ding. Each input image Ii is processed using a shared CNN backbone FC and the feature maps are fused with the corresponding input
218
+ patch-ray embeddings (obtained via pi). b) Geometry-biased scene encoding. Our proposed Geometry-biased Transformer encoder FE
219
+ converts the set of patch-level feature tokens into a scene encoding via self-attention biased with ray distances. c) Geometry-biased ray-
220
+ decoding. To decode pixels for a novel viewpoint, we construct ray queries that are decoded by a geometry-biased transformer decoder
221
+ FD by attending into the scene encoding. Finally, an MLP predicts the pixel color using the decoded query token.
222
+ global latent representations [11,20,22,29] that address (i)
223
+ by learning object semantics, or, via reprojections [18, 32]
224
+ that address (ii) by employing explicit geometric transfor-
225
+ mations.
226
+ In contrast to prior works, our method jointly
227
+ reasons along both these aspects. Concretely, we propose
228
+ geometry-biased transformers that incorporate geometric
229
+ inductive biases while learning set-latent representations
230
+ that help capture global structures with superior quality.
231
+ Fig. 2 depicts the Geometry-biased Transformer (GBT)
232
+ framework which has three components.
233
+ First, a shared
234
+ CNN backbone extracts patch-level features which are
235
+ fused with the corresponding ray embeddings to derive lo-
236
+ cal (pose-aware) features (Fig.
237
+ 2a).
238
+ Then, the flattened
239
+ patch features and the associated rays are fed as input to-
240
+ kens to the GBT encoder that constructs a global set-latent
241
+ representation via self-attention (Fig. 2b). The attention
242
+ layers are biased to prioritize both the photometric and the
243
+ geometric context. Finally, the GBT decoder converts tar-
244
+ get ray queries to pixel colors by attending to the set-latent
245
+ representation (Fig. 2c). We now review the preliminary
246
+ concepts before describing our approach in detail.
247
+ 3.1. Preliminaries
248
+ 3.1.1
249
+ Ray representations
250
+ The fundamental unit of geometric information in our ap-
251
+ proach is a ray which is used to compute the geometric sim-
252
+ ilarity between two image regions. A naive choice for ray
253
+ representation is r = (o, d), where o ∈ R3 is the origin of
254
+ the ray, and d ∈ S2 is the normalized ray direction.
255
+ In contrast, we use the 4 DoF Pl¨ucker coordinates [10,
256
+ 21], r = (d, m) ∈ R6, where m = o × d, that are invari-
257
+ ant to the choice of the origin along the ray. Intuitively, this
258
+ allows us to associate a single color (pixel RGB) to the en-
259
+ tire ray, agnostic to its origin. In practice, this simplification
260
+ mitigates overfitting to the camera origin during training.
261
+ 3.1.2
262
+ Scene Representation Transformers
263
+ The overall framework of our approach is inspired by SRT
264
+ [20] that proposes a transformer encoder-decoder network
265
+ for novel view synthesis. Given a collection of posed im-
266
+ ages {(Ii, pi)}V
267
+ i=1 where I ∈ RH×W ×3 pi ∈ R3×4, and a
268
+ query ray r, SRT computes the following:
269
+ {zp}V ×P
270
+ p=1 = FE ◦ FC({Ii, pi})
271
+ (1)
272
+ C(r) = FD(r | {zp})
273
+ (2)
274
+ Here, the shared CNN backbone (FC) extracts P patch-
275
+ level features from each posed input image. These are ag-
276
+ gregated into a set of flat patch embeddings and fed as in-
277
+ put tokens to the transformer encoder (FE). The encoder
278
+ transforms input tokens into a set-latent scene representa-
279
+ tion {zp} via self-attention. To render a novel viewpoint,
280
+ the decoder FD queries for each ray r pertaining to the tar-
281
+ get pixels and yields an RGB color by attending to the scene
282
+ representation {zp}.
283
+ 3.2. Geometry-biased Transformer (GBT) Layer
284
+ The core reasoning module in a transformer is a multi-
285
+ head attention layer that aggregates information from the
286
+ right context for each query. In our work, we propose to
287
+ extend this module by incorporating geometric reasoning.
288
+ 3
289
+
290
+ Feature similarity
291
+ Distance bias
292
+ Geometry-biased attention
293
+ =
294
+ +
295
+ Distance bias
296
+ Figure 3. An illustration of attention within GBT layer. Given the query and key tokens q, kn, along with the associated rays rq, rkn,
297
+ the attention within GBT incorporates two components: (i) a dot product similarity between features, and, (ii) the geometric distance bias
298
+ computed between the rays. Refer to Eq. 6 for the exact computation. Best viewed in color.
299
+ Base transformer layer. Given the query q, key {kn},
300
+ value {vn} tokens, a typical transformer layer computes:
301
+ q′ = T(q, {(kn, vn)})
302
+ (3)
303
+ which consists of a multi-head attention module, followed
304
+ by normalization and linear projection. During the context
305
+ aggregating step, each multi-head attention layer aggregates
306
+ token values based on query-key similarity weights:
307
+ wn = softmaxn
308
+ � Wqq · Wkkn
309
+ η
310
+
311
+ (4)
312
+ Incorporating ray distance as geometric bias.
313
+ In our
314
+ use case, each query and context token pertains to some
315
+ ray. For instance, all tokens passed to the encoder are patch
316
+ embeddings that have associated patch rays (Fig. 2b). Like-
317
+ wise, we query the decoder using target pixel rays (Fig. 2c).
318
+ In such a scenario, we propose to bias the transformer’s
319
+ attention by encouraging similarity between rays that are
320
+ closer to each other in 3D space. Specifically, the GBT layer
321
+ couples the query and key tokens with the associated rays
322
+ (q, rq), {(kn, rkn)} and performs the token transformation:
323
+ q′ = GBT((q, rq), {(kn, rkn, vn)})
324
+ (5)
325
+ The attention layer is modified to account for the dis-
326
+ tance between rq = (dq, mq) and rkn = (dkn, mkn):
327
+ wn = softmax
328
+ � Wqq · Wkkn
329
+ η
330
+ − γ2 d(rq, rkn)
331
+
332
+ (6)
333
+ where,
334
+ d(rq, rkn) =
335
+
336
+
337
+
338
+ |dq·mkn+dkn·mq|
339
+ ||dq×dkn||2
340
+ ,
341
+ dq × dkn ̸= 0
342
+ ||dq×(mq−mkn/s)||
343
+ ||dq||2
344
+ 2
345
+ ,
346
+ dkn = sdq, s ̸= 0
347
+ (7)
348
+ and γ is a learnable parameter controlling the relative im-
349
+ portance of geometric bias. This formulation explicitly ac-
350
+ counts for both appearance (feature similarity between q
351
+ and kn), and geometry (distance between rq and rkn). This
352
+ attention mechanism is illustrated in Fig. 3. In practice, the
353
+ distance bias results in faster convergence to the right con-
354
+ text during training. While one can fix γ to some constant
355
+ hyperparameter, we found improved results by learning γ.
356
+ 3.3. Learning Novel View Synthesis with GBTs
357
+ Given multiview images {Ii
358
+ ∈ RH×W ×3}V
359
+ i=1 with
360
+ paired camera poses {pi ∈ R3×4}V
361
+ i=1, we wish to render a
362
+ target viewpoint described by the camera pose pq ∈ R3×4.
363
+ Our network, as illustrated in Fig. 2, first processes the
364
+ posed multiview images using a CNN FC to extract patch-
365
+ level latent features. We then use GBT encoder FE to ex-
366
+ tract a scene encoding, and GBT decoder FD to yield pixel
367
+ colors given target ray queries.
368
+ a) Camera-fused patch embedding (FC).
369
+ We process
370
+ each context image Ii through a ResNet18 backbone to
371
+ obtain patch-level image feature grid. Subsequently, each
372
+ patch feature is concatenated with the corresponding ray
373
+ embedding (Fig. 2a) as follows:
374
+ [fc]k
375
+ i = W
376
+
377
+ [FC(Ii)]k ⊕ h((dk
378
+ i , mk
379
+ i ))
380
+
381
+ (8)
382
+ where h(·) denotes harmonic embedding [13], (dk
383
+ i , mk
384
+ i )
385
+ denotes the Pl¨ucker coordinates for kth patch ray in the ith
386
+ input image, and ⊕ denotes concatenation. We define each
387
+ patch ray as the ray passing through the center of the recep-
388
+ tive field of the corresponding cell in the feature grid. The
389
+ concatenated features are projected using a linear layer W.
390
+ While SRT fuses input images with per-pixel rays before
391
+ the CNN, we fuse the CNN output feature grid with per-
392
+ patch rays (observe different inputs to FC in Eq. 1 and Eq.
393
+ 8). This late fusion enables us to leverage transfer learning
394
+ using pretrained image backbones. Furthermore, since the
395
+ patch ray embeddings implicitly capture the positional in-
396
+ formation for each patch, we do not require 2D positional
397
+ encoding or camera ID embedding after the CNN (unlike
398
+ SRT), thus simplifying the architecture significantly.
399
+ 4
400
+
401
+ Wgq. Wkkn
402
+ -2 d(rq, rkn
403
+ nrkd(rq,rk)Table 1. Evaluation of novel view synthesis. Given V = 3 input views, we evaluate the reconstruction quality (PSNR ↑ and LPIPS ↓) of
404
+ each method on the CO3Dv2 [18] dataset. GBT denotes our proposed approach, and GBT-nb is an ablation. See Sec. 4.2.
405
+ 10 training cat.
406
+ Apple
407
+ Ball
408
+ Bench
409
+ Cake
410
+ Donut
411
+ Hydrant
412
+ Plant
413
+ Suitcase
414
+ Teddybear
415
+ Vase
416
+ Mean
417
+ PSNR LPIPS PSNR LPIPS PSNR LPIPS PSNR LPIPS PSNR LPIPS PSNR LPIPS PSNR LPIPS PSNR LPIPS PSNR LPIPS PSNR LPIPS PSNR LPIPS
418
+ pixelNeRF [32]
419
+ 20.87
420
+ 0.29
421
+ 20.17
422
+ 0.30
423
+ 18.69
424
+ 0.34
425
+ 19.20
426
+ 0.34
427
+ 20.79
428
+ 0.29
429
+ 20.43
430
+ 0.26
431
+ 20.68
432
+ 0.30
433
+ 22.19
434
+ 0.32
435
+ 19.80
436
+ 0.34
437
+ 20.82
438
+ 0.28
439
+ 20.37
440
+ 0.31
441
+ NerFormer [18]
442
+ 20.91
443
+ 0.31
444
+ 17.50
445
+ 0.35
446
+ 16.06
447
+ 0.52
448
+ 18.08
449
+ 0.46
450
+ 21.19
451
+ 0.33
452
+ 19.33
453
+ 0.31
454
+ 19.31
455
+ 0.50
456
+ 20.31
457
+ 0.46
458
+ 16.95
459
+ 0.47
460
+ 18.04
461
+ 0.39
462
+ 18.77
463
+ 0.41
464
+ ViewFormer [11] 21.70
465
+ 0.24
466
+ 19.34
467
+ 0.30
468
+ 17.08
469
+ 0.30
470
+ 18.04
471
+ 0.32
472
+ 19.59
473
+ 0.28
474
+ 18.59
475
+ 0.21
476
+ 18.34
477
+ 0.31
478
+ 21.61
479
+ 0.26
480
+ 16.60
481
+ 0.31
482
+ 21.52
483
+ 0.21
484
+ 19.24
485
+ 0.27
486
+ GBT-nb
487
+ 22.83
488
+ 0.28
489
+ 20.59
490
+ 0.32
491
+ 19.22
492
+ 0.34
493
+ 20.56
494
+ 0.34
495
+ 21.87
496
+ 0.31
497
+ 21.32
498
+ 0.24
499
+ 21.52
500
+ 0.30
501
+ 23.30
502
+ 0.29
503
+ 19.82
504
+ 0.34
505
+ 22.65
506
+ 0.27
507
+ 21.37
508
+ 0.30
509
+ GBT
510
+ 25.08
511
+ 0.23
512
+ 22.96
513
+ 0.26
514
+ 19.93
515
+ 0.31
516
+ 21.51
517
+ 0.30
518
+ 23.05
519
+ 0.27
520
+ 22.76
521
+ 0.22
522
+ 21.88
523
+ 0.27
524
+ 24.15
525
+ 0.27
526
+ 20.89
527
+ 0.30
528
+ 23.36
529
+ 0.25
530
+ 22.56
531
+ 0.27
532
+ Table 2. Evaluation of variable context views setting. We report
533
+ PSNR (↑) and LPIPS (↓) averaged over 10 categories for each V .
534
+ 10 training cat.
535
+ PSNR ↑
536
+ LPIPS ↓
537
+ V = 2
538
+ V = 3
539
+ V = 6
540
+ V = 2
541
+ V = 3
542
+ V = 6
543
+ pixelNeRF [32]
544
+ 18.47
545
+ 20.37
546
+ 22.25
547
+ 0.36
548
+ 0.31
549
+ 0.26
550
+ NerFormer [18]
551
+ 17.88
552
+ 18.77
553
+ 20.01
554
+ 0.43
555
+ 0.41
556
+ 0.38
557
+ ViewFormer [11]
558
+ 18.62
559
+ 19.24
560
+ 20.12
561
+ 0.28
562
+ 0.27
563
+ 0.26
564
+ GBT-nb
565
+ 20.91
566
+ 21.37
567
+ 21.49
568
+ 0.31
569
+ 0.30
570
+ 0.30
571
+ GBT
572
+ 21.47
573
+ 22.56
574
+ 23.09
575
+ 0.29
576
+ 0.27
577
+ 0.27
578
+ b) Geometry-biased scene encoding (FE).
579
+ Given local
580
+ patch features, we employ GBT encoder layers to augment
581
+ them with the global scene context through self-attention.
582
+ Specifically, we compute fe = FE(fc, {(dk
583
+ i , mk
584
+ i )}) where
585
+ FE contains a stack of GBT encoder layers as depicted in
586
+ Fig. 2b. The query, key, and value tokens for the encoder
587
+ layers are derived from the patch features [fc]k
588
+ i and their
589
+ corresponding patch rays (dk
590
+ i , mk
591
+ i ). For each transformer
592
+ encoder layer, we learn a separate γ parameter.
593
+ Finally, the encoder outputs a global scene encoding
594
+ {[fe]k
595
+ i } that characterizes the appearance and the geome-
596
+ try of the object as observed from the multiple input views.
597
+ Note, this extension of the set-latent representation [20] in-
598
+ corporates both appearance and geometric priors.
599
+ c) Geometry-biased ray decoding (FD).
600
+ To render a
601
+ novel viewpoint given camera pose pq, we construct an
602
+ H × W grid of query rays rq = (dq, mq), with one ray
603
+ per query pixel. We then employ a stack of GBT decoder
604
+ layers FD that decodes each query ray independently by ag-
605
+ gregating meaningful context via cross-attention (Fig. 2c).
606
+ Specifically, the query tokens for the multihead attention
607
+ pertain to the query ray embeddings h(rq), while the keys
608
+ and values comprise of the global scene encoding tokens
609
+ {[fe]k
610
+ i } along with the patch rays. The transformed query
611
+ embeddings are processed by an MLP to predict the pixel
612
+ color. Similar to FE, we learn a separate parameter γ for
613
+ each GBT decoder layer in FD.
614
+ Architectural details.
615
+ We use a ResNet18 (ImageNet ini-
616
+ tialized) up to the first 3 blocks as FC. The images are re-
617
+ Table 3. Evaluation of novel-view synthesis on unseen cate-
618
+ gories. Given V = 3 input views, we evaluate the reconstruction
619
+ quality (PSNR ↑ and LPIPS ↓) on unseen categories.
620
+ 5 heldout cat.
621
+ Backpack
622
+ Book
623
+ Chair
624
+ Mouse
625
+ Remote
626
+ Mean
627
+ PSNR LPIPS PSNR LPIPS PSNR LPIPS PSNR LPIPS PSNR LPIPS PSNR LPIPS
628
+ pixelNeRF [32]
629
+ 22.87
630
+ 0.31
631
+ 18.86
632
+ 0.34
633
+ 20.30
634
+ 0.32
635
+ 23.39
636
+ 0.27
637
+ 23.74
638
+ 0.23
639
+ 21.83
640
+ 0.30
641
+ ViewFormer [11] 20.84
642
+ 0.31
643
+ 16.84
644
+ 0.32
645
+ 15.94
646
+ 0.31
647
+ 21.55
648
+ 0.26
649
+ 20.42
650
+ 0.22
651
+ 19.12
652
+ 0.28
653
+ GBT-nb
654
+ 23.55
655
+ 0.33
656
+ 19.38
657
+ 0.35
658
+ 20.50
659
+ 0.32
660
+ 23.72
661
+ 0.27
662
+ 24.00
663
+ 0.22
664
+ 22.23
665
+ 0.30
666
+ GBT
667
+ 24.08
668
+ 0.30
669
+ 20.36
670
+ 0.32
671
+ 21.46
672
+ 0.28
673
+ 24.91
674
+ 0.23
675
+ 24.63
676
+ 0.21
677
+ 23.09
678
+ 0.27
679
+ sized to H ×W = 256×256 and FC outputs a 16×16 fea-
680
+ ture grid. We use 8 GBT encoder layers and 4 GBT decoder
681
+ layers, wherein each transformer contains 12 heads for
682
+ multi-head attention with gelu activation. For the harmonic
683
+ embeddings h, we use 15 frequencies {2−6π, . . . , 28π}.
684
+ Since we do not have access to a consistent world coordi-
685
+ nate frame across scenes, we choose an arbitrary input view
686
+ as identity [20,32]. All other cameras are represented rela-
687
+ tive to the identity view. See Appendix C for more details.
688
+ Training and Inference.
689
+ During training, we encode
690
+ V = 3 posed input views and query the decoder for Q =
691
+ 7168 randomly sampled rays for a given target pose pq. The
692
+ pixel color is supervised using an L2 reconstruction loss.
693
+ The model is trained with Adam optimizer with 10−5 learn-
694
+ ing rate until loss convergence. At inference, we encode the
695
+ context views once and decode a batch of H × W rays for
696
+ each query view in a single forward pass. This results in a
697
+ fast rendering time. See Appendix D for more details.
698
+ 4. Experiments
699
+ 4.1. Setup and Training Data
700
+ Dataset.
701
+ We experiment on the Common Objects in 3D
702
+ (CO3Dv2) dataset [18] that contains multi-view images
703
+ along with camera pose annotations. This is a challenging
704
+ dataset containing real-world object captures from 51 MS-
705
+ COCO categories. Following [18], we train our network on
706
+ 10 categories (see Table 1). Further, we evaluate our method
707
+ on 5 additional heldout categories (see Table 3) to demon-
708
+ strate generalization to unseen categories (see Appendix D
709
+ for details on training and testing splits).
710
+ 5
711
+
712
+ Input
713
+ pixelNeRF
714
+ NerFormer
715
+ ViewFormer
716
+ GBT-nb
717
+ GBT
718
+ Ground Truth
719
+ Input
720
+ pixelNeRF
721
+ NerFormer
722
+ ViewFormer
723
+ GBT-nb
724
+ GBT
725
+ Ground Truth
726
+ Figure 4. Qualitative results on heldout objects from training categories. For each object, we consider V = 3 input views and compare
727
+ the reconstruction quality of each method on 2 other query views. Best viewed in color.
728
+ Baselines.
729
+ We benchmark GBT against three state-of-the-
730
+ art methods:
731
+ - pixelNeRF [32] which is a representative of projection-
732
+ guided methods for generalizable view synthesis. Similar to
733
+ our setting, we train a single category-agnostic pixelNeRF
734
+ model on 10 categories from the CO3Dv2 dataset.
735
+ - NerFormer [18] which uses attention-based mechanisms
736
+ to aggregate projected features along a query ray. We utilize
737
+ (category-specific) models provided by the authors. 1
738
+ - ViewFormer [11] which uses a two-stage ‘geometry-free’
739
+ architecture to first encode the input images into a compact
740
+ representation, and then uses a transformer model for view
741
+ synthesis. For evaluation, we use the co3d-10cat model pro-
742
+ vided by the authors.
743
+ Additionally, we compare against another variant of our
744
+ approach, where we replace the geometry-biased trans-
745
+ former layers with regular transformer layers (equivalently,
746
+ set γ = 0 during training and inference). We refer to this
747
+ as GBT-nb (no bias) in further discussion. GBT-nb is an
748
+ extension of SRT [20], where we use Pl¨ucker coordinates
749
+ 1 While we evaluated per-category models, the NerFormer authors
750
+ conveyed this performance is similar to a cross-category model.
751
+ Table 4. Ablative analysis. We train a separate category-specific
752
+ model from scratch under each setting. The models are evaluated
753
+ on the held out objects under consistent settings.
754
+ Method
755
+ Hydrant
756
+ Teddybear
757
+ PSNR (↑)
758
+ LPIPS (↓)
759
+ PSNR (↑)
760
+ LPIPS (↓)
761
+ SRT*
762
+ 19.63
763
+ 0.23
764
+ 19.48
765
+ 0.32
766
+ GBT-nb
767
+ 21.30
768
+ 0.20
769
+ 19.32
770
+ 0.31
771
+ GBT-fb
772
+ 23.93
773
+ 0.17
774
+ 20.99
775
+ 0.28
776
+ GBT
777
+ 24.22
778
+ 0.17
779
+ 21.45
780
+ 0.26
781
+ representation of rays and perform a late camera-fusion in
782
+ the feature extractor.
783
+ Evaluation Metrics.
784
+ To evaluate reconstruction quality,
785
+ we measure the peak signal-to-noise ratio (PSNR) and per-
786
+ ceptual similarity metric (LPIPS). For each category, we se-
787
+ lect 10 scenes from the dev set for evaluation. We randomly
788
+ sample V context views and 32 query views for each scene
789
+ and report the average metrics computed over these query
790
+ views. We set appropriate seeds such that the context and
791
+ query views are consistent across all methods.
792
+ 6
793
+
794
+ 50Input Views
795
+ Novel Views
796
+ Figure 5. Qualitative results on heldout categories. On each row
797
+ we visualize the rendered views obtained from GBT (right) given
798
+ V = 3 input views (left). Note that the model has never seen these
799
+ categories of objects during training.
800
+ 4.2. Results
801
+ Novel view synthesis for unseen objects. Table 1 demon-
802
+ strates the efficacy of our method in synthesizing novel
803
+ views for previously unseen objects. GBT consistently out-
804
+ performs other methods in all categories in terms of PSNR.
805
+ With the exception of a few categories, we also achieve su-
806
+ perior LPIPS compared to other baselines.
807
+ For categories such as bench, hydrant, etc.
808
+ we at-
809
+ tribute ViewFormer’s higher perceptual quality to their use
810
+ of a 2D-only prediction model, which comes at the cost
811
+ of multi-view consistent results.
812
+ For instance, in Fig 4,
813
+ ViewFormer’s prediction for the donut is plausibly similar
814
+ to some donut, however, lacks consistency with the corre-
815
+ sponding ground truth query view. Also, in cases where
816
+ the query view is not visible in any of the input views (ball,
817
+ top-right), pixelNeRF and NerFormer - which rely solely on
818
+ projection-based features from input images - suffer from
819
+ poor results, while our method is capable of hallucinating
820
+ these unseen regions.
821
+ Table 2 analyses the performance of all methods with
822
+ variable number of context views.
823
+ While GBT is only
824
+ trained with a fixed V = 3 input views, it is capable of
825
+ generalizing across different input view settings. We ob-
826
+ serve a higher performance gain under fewer context views
827
+ (2-3). However, as the number of input views increases,
828
+ pixelNeRF becomes more competitive.
829
+ Generalization to unseen categories.
830
+ To investigate
831
+ whether our model learns generic 3D priors and can infer
832
+ global context from given multi-view images, we test its
833
+ ability to generalize to previously unseen categories. In Ta-
834
+ ble 3 we benchmark our method by evaluating over 5 held
835
+ out categories. We empirically find that GBT demonstrates
836
+ better generalizability compared to baselines, and also ob-
837
+ serve this in the qualitative predictions in Figure 5.
838
+ Figure 6. Effect of viewpoint distance in prediction accuracy.
839
+ Given 200 frames, we set the 50th, 100th, 150th frame as the in-
840
+ put views, and evaluate the performance of novel view synthesis
841
+ over all other views. While the prior methods show accurate re-
842
+ sults close to the input views, our approach (GBT) consistently
843
+ outperforms them in other views.
844
+ 4.3. Analysis
845
+ Effect of Viewpoint Distance in Prediction Accuracy.
846
+ In Fig 6, we analyze view synthesis accuracy as a function
847
+ of distance from context views. In particular, we use 80
848
+ randomly sampled sequences from across categories with
849
+ 200 frames each, and set the 50th, 100th, 150th views as
850
+ context, and evaluate the average novel view synthesis ac-
851
+ curacy across indices.
852
+ We find that all approaches peak
853
+ around the observed frames, but our set-latent representa-
854
+ tion based methods (GBT, GBT-nb) perform significantly
855
+ better for query views dissimilar from the context views.
856
+ This corroborates our intuition that a global set-latent repre-
857
+ sentation is essential for reasoning in the sparse-view setup.
858
+ Ablative analysis.
859
+ We investigate the importance of the
860
+ design choices made in GBT, by ablating individual com-
861
+ ponents and analysing performance. First, we analyze the
862
+ effect of learnable geometric bias by fixing γ = 1 (GBT-fb)
863
+ during the training process. Next, we remove the geomet-
864
+ ric bias component (GBT-nb); equivalently γ = 0. Finally,
865
+ we replace Pl¨ucker coordinates for ray representation with
866
+ r = (o, d). We term this trimmed version of GBT as SRT*
867
+ (variant of SRT with late camera fusion).
868
+ For each ablation (see Table 4), we train a category-
869
+ specific model from scratch and evaluate results on held-
870
+ out objects. From Table 4, we see that learnable γ yields
871
+ some benefit over fixed γ = 1. However, removing geome-
872
+ try altogether results in a considerable drop in performance.
873
+ Also, the choice of Pl¨ucker coordinates as ray representa-
874
+ tions improves the predictions in general.
875
+ Robustness to camera noise.
876
+ As the use of the geometric
877
+ bias requires known camera calibration, we study the effect
878
+ 7
879
+
880
+ GBT
881
+ 28
882
+ GBT-nb
883
+ ViewFormer
884
+ 26
885
+ NerFormer
886
+ pixelNeRF
887
+ 24
888
+ PSNR
889
+ 22
890
+ 20
891
+ 18
892
+ 0
893
+ 25
894
+ 50
895
+ 75
896
+ 100
897
+ 125
898
+ 150
899
+ 175
900
+ 200
901
+ View IndexⅡ
902
+
903
+ 1
904
+ 2
905
+ 3Input
906
+ 𝞼 = 0
907
+ 𝞼 = 0.02
908
+ 𝞼 = 0.05
909
+ 𝞼 = 0.1
910
+ Input
911
+ 𝞼 = 0
912
+ 𝞼 = 0.02
913
+ 𝞼 = 0.05
914
+ 𝞼 = 0.1
915
+ GBT-nb
916
+ pixelNeRF
917
+ GBT
918
+ Figure 7. Effect of camera noise. Given the 3 input views with noisy camera poses (increasing left to right), we visualize the predictions
919
+ for a common query view across three methods (rows).
920
+ Decoder Layer 1
921
+ Decoder Layer 4
922
+ Query pixel
923
+ GBT-nb
924
+ GBT
925
+ Pred
926
+ Ground
927
+ Truth
928
+ Figure 8. Attention visualization. For the query pixel marked
929
+ in green, we visualize the attention over the input patches for the
930
+ 1st and the 4th decoder layer. We compare the attention maps
931
+ of GBT-nb (top) and GBT (bottom), wherein GBT is observed to
932
+ yield sharper results. See Sec. 4.3.
933
+ Table 5. Evaluation of noisy cameras. All models are trained on
934
+ 10 categories and evaluated on the Hydrant category.
935
+ σ = 0
936
+ σ = 0.02
937
+ σ = 0.05
938
+ σ = 0.1
939
+ PSNR LPIPS PSNR LPIPS PSNR LPIPS PSNR LPIPS
940
+ pixelNeRF 20.43
941
+ 0.26
942
+ 20.06
943
+ 0.26
944
+ 19.20
945
+ 0.27
946
+ 18.09
947
+ 0.29
948
+ GBT-nb
949
+ 21.32
950
+ 0.24
951
+ 21.26
952
+ 0.24
953
+ 20.85
954
+ 0.24
955
+ 19.88
956
+ 0.25
957
+ GBT
958
+ 22.76
959
+ 0.22
960
+ 22.40
961
+ 0.22
962
+ 21.43
963
+ 0.23
964
+ 19.84
965
+ 0.25
966
+ of noisy cameras on novel view synthesis. Following [12,
967
+ 20], we synthetically perturb input camera poses to various
968
+ degrees and analyze the effect of noise during inference (for
969
+ models trained without any camera noise during training).
970
+ We report the results in Table 5, and see that performance
971
+ degrades across all methods with camera noise. Although
972
+ GBT-nb degrades more gracefully, the performance of GBT
973
+ is better until a large amount of noise is added (about 10cm
974
+ camera motion for a camera unit distance away from an ob-
975
+ ject, and 9 degree rotation). Fig. 7 demonstrates these ob-
976
+ servations visually.
977
+ Visualizing attention.
978
+ In Fig 8 we visualize attention
979
+ heatmaps for a particular query ray highlighted in green. In
980
+ absence of geometric bias (GBT-nb), we observe a diffused
981
+ attention map over the relevant context, which yields blur-
982
+ rier results. On adding geometric bias (GBT), we observe
983
+ more concentrated attention toward the geometrically valid
984
+ regions, resulting in more accurate details.
985
+ 5. Discussion
986
+ Our work introduced a simple but effective mechanism
987
+ for adding geometric inductive biases in set-latent repre-
988
+ sentation based networks. In particular, we demonstrated
989
+ that for the task of novel view synthesis given few input
990
+ views, this allows Transformer-based networks to better
991
+ leverage geometric associations while preserving their abil-
992
+ ity to reason about global structure. While our approach
993
+ led to substantial improvements over prior works, there are
994
+ several unaddressed challenges.
995
+ First, unlike projection-
996
+ based methods, the set-latent representation methods (in-
997
+ cluding ours) struggle to predict precise details and it re-
998
+ mains on open question how one can augment such meth-
999
+ ods to overcome this. Moreover, the use of geometric infor-
1000
+ mation in our approach presumes access to (approximate)
1001
+ camera viewpoints for inference, and this may limit its ap-
1002
+ plicability to in-the-wild settings. While our work focused
1003
+ on the task of view synthesis, we believe that the geometry-
1004
+ biasing mechanisms proposed would be relevant for other
1005
+ tasks where a moving camera is observing a common scene
1006
+ (e.g. video segmentation, detection).
1007
+ Acknowledgements.
1008
+ We thank Zhizhuo Zhou, Jason
1009
+ Zhang, Yufei Ye, Ambareesh Revanur, Yehonathan Litman,
1010
+ and Anish Madan for helpful discussions and feedback. We
1011
+ also thank David Novotny and Jon´aˇs Kulh´anek for shar-
1012
+ ing outputs of their work and helpful correspondence. This
1013
+ project was supported in part by a Verisk AI Faculty Award.
1014
+ 8
1015
+
1016
+ References
1017
+ [1] Jonathan T Barron, Ben Mildenhall, Dor Verbin, Pratul P
1018
+ Srinivasan, and Peter Hedman. Mip-nerf 360: Unbounded
1019
+ anti-aliased neural radiance fields. In CVPR, 2022. 2
1020
+ [2] Mark Boss, Andreas Engelhardt, Abhishek Kar, Yuanzhen
1021
+ Li, Deqing Sun, Jonathan T. Barron, Hendrik Lensch, and
1022
+ Varun Jampani. SAMURAI: Shape and material from uncon-
1023
+ strained real-world arbitrary image collections. In NeurIPS,
1024
+ 2022. 2
1025
+ [3] Anpei Chen, Zexiang Xu, Andreas Geiger, Jingyi Yu, and
1026
+ Hao Su. TensoRF: Tensorial Radiance Fields. In ECCV,
1027
+ 2022. 2
1028
+ [4] Anpei Chen, Zexiang Xu, Fuqiang Zhao, Xiaoshuai Zhang,
1029
+ Fanbo Xiang, Jingyi Yu, and Hao Su. MVSNeRF: Fast Gen-
1030
+ eralizable Radiance Field Reconstruction from Multi-View
1031
+ Stereo. In ICCV, 2021. 2
1032
+ [5] Kangle Deng, Andrew Liu, Jun-Yan Zhu, and Deva Ra-
1033
+ manan. Depth-supervised NeRF: Fewer Views and Faster
1034
+ Training for Free. In CVPR, 2022. 2
1035
+ [6] Patrick Esser, Robin Rombach, and Bjorn Ommer.
1036
+ Tam-
1037
+ ing Transformers for High-Resolution Image Synthesis. In
1038
+ CVPR, 2021. 2
1039
+ [7] Sara Fridovich-Keil, Alex Yu, Matthew Tancik, Qinhong
1040
+ Chen, Benjamin Recht, and Angjoo Kanazawa. Plenoxels:
1041
+ Radiance Fields Without Neural Networks. In CVPR, 2022.
1042
+ 2
1043
+ [8] Shubham Goel, Georgia Gkioxari, and Jitendra Malik. Dif-
1044
+ ferentiable Stereopsis: Meshes from Multiple Views using
1045
+ Differentiable Rendering. In CVPR, 2022. 2
1046
+ [9] Ajay Jain, Matthew Tancik, and Pieter Abbeel. Putting nerf
1047
+ on a diet: Semantically consistent few-shot view synthesis.
1048
+ In ICCV, 2021. 2
1049
+ [10] Yan-Bin Jia.
1050
+ Pl¨ucker Coordinates for Lines in the Space.
1051
+ Problem Solver Techniques for Applied Computer Science,
1052
+ Com-S-477/577 Course Handout, 2020. 3
1053
+ [11] Jon´aˇs Kulh´anek, Erik Derner, Torsten Sattler, and Robert
1054
+ Babuˇska. ViewFormer: NeRF-free Neural Rendering from
1055
+ Few Images Using Transformers. In ECCV, 2022. 2, 3, 5, 6,
1056
+ 12
1057
+ [12] Chen-Hsuan Lin, Wei-Chiu Ma, Antonio Torralba, and Si-
1058
+ mon Lucey.
1059
+ BARF: Bundle-Adjusting Neural Radiance
1060
+ Fields. In ICCV, 2021. 8
1061
+ [13] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik,
1062
+ Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. NeRF:
1063
+ Representing Scenes as Neural Radiance Fields for View
1064
+ Synthesis. In ECCV, 2020. 1, 2, 4, 11
1065
+ [14] Thomas M¨uller, Alex Evans, Christoph Schied, and Alexan-
1066
+ der Keller. Instant Neural Graphics Primitives with a Mul-
1067
+ tiresolution Hash Encoding. ACM Trans. Graph., 2022. 2
1068
+ [15] Phong Nguyen-Ha, Lam Huynh, Esa Rahtu, and Janne
1069
+ Heikkila. Sequential View Synthesis with Transformer. In
1070
+ ACCV, 2020. 2
1071
+ [16] Michael Niemeyer, Jonathan T. Barron, Ben Mildenhall,
1072
+ Mehdi S. M. Sajjadi, Andreas Geiger, and Noha Radwan.
1073
+ Regnerf: Regularizing neural radiance fields for view syn-
1074
+ thesis from sparse inputs. In CVPR, 2022. 2
1075
+ [17] Michael Oechsle, Songyou Peng, and Andreas Geiger.
1076
+ UNISURF: Unifying Neural Implicit Surfaces and Radiance
1077
+ Fields for Multi-View Reconstruction. In ICCV, 2021. 2
1078
+ [18] Jeremy Reizenstein, Roman Shapovalov, Philipp Henzler,
1079
+ Luca Sbordone, Patrick Labatut, and David Novotny. Com-
1080
+ mon Objects in 3D: Large-Scale Learning and Evaluation of
1081
+ Real-Life 3D Category Reconstruction. In ICCV, 2021. 2, 3,
1082
+ 5, 6, 12, 13
1083
+ [19] Robin
1084
+ Rombach,
1085
+ Patrick
1086
+ Esser,
1087
+ and
1088
+ Bj¨orn
1089
+ Ommer.
1090
+ Geometry-Free View Synthesis: Transformers and No 3D
1091
+ Priors. In ICCV, 2021. 2
1092
+ [20] Mehdi S. M. Sajjadi, Henning Meyer, Etienne Pot, Urs
1093
+ Bergmann, Klaus Greff, Noha Radwan, Suhani Vora,
1094
+ Mario Lucic, Daniel Duckworth, Alexey Dosovitskiy, Jakob
1095
+ Uszkoreit, Thomas Funkhouser, and Andrea Tagliasacchi.
1096
+ Scene Representation Transformer: Geometry-Free Novel
1097
+ View Synthesis Through Set-Latent Scene Representations.
1098
+ In CVPR, 2022. 2, 3, 5, 6, 8
1099
+ [21] Vincent Sitzmann, Semon Rezchikov, Bill Freeman, Josh
1100
+ Tenenbaum, and Fredo Durand. Light Field Networks: Neu-
1101
+ ral Scene Representations with Single-Evaluation Render-
1102
+ ing. In NeurIPS, 2021. 3
1103
+ [22] Vincent Sitzmann, Michael Zollh¨ofer, and Gordon Wet-
1104
+ zstein.
1105
+ Scene Representation Networks: Continuous 3D-
1106
+ Structure-Aware Neural Scene Representations. In NeurIPS,
1107
+ 2019. 2, 3
1108
+ [23] Maxim Tatarchenko, Alexey Dosovitskiy, and Thomas Brox.
1109
+ Single-View to Multi-View: Reconstructing Unseen Views
1110
+ with a Convolutional Network.
1111
+ CoRR abs/1511.06702,
1112
+ 1(2):2, 2015. 2
1113
+ [24] Alex Trevithick and Bo Yang. Grf: Learning a general ra-
1114
+ diance field for 3d representation and rendering. In ICCV,
1115
+ 2021. 2
1116
+ [25] Aaron Van Den Oord, Oriol Vinyals, et al. Neural Discrete
1117
+ Representation Learning. In NeurIPS, 2017. 2
1118
+ [26] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszko-
1119
+ reit, Llion Jones, Aidan N Gomez, Łukasz Kaiser, and Illia
1120
+ Polosukhin. Attention Is All You Need. In NeurIPS, 2017. 2
1121
+ [27] Peng Wang, Lingjie Liu, Yuan Liu, Christian Theobalt, Taku
1122
+ Komura, and Wenping Wang. NeuS: Learning Neural Im-
1123
+ plicit Surfaces by Volume Rendering for Multi-view Recon-
1124
+ struction. In NeurIPS, 2021. 2
1125
+ [28] Qianqian Wang, Zhicheng Wang, Kyle Genova, Pratul Srini-
1126
+ vasan, Howard Zhou, Jonathan T. Barron, Ricardo Martin-
1127
+ Brualla, Noah Snavely, and Thomas Funkhouser.
1128
+ Ibrnet:
1129
+ Learning multi-view image-based rendering. In CVPR, 2021.
1130
+ 2
1131
+ [29] Jiajun Wu, Chengkai Zhang, Xiuming Zhang, Zhoutong
1132
+ Zhang, William T Freeman, and Joshua B Tenenbaum.
1133
+ Learning Shape Priors for Single-View 3D Completion and
1134
+ Reconstruction. In ECCV, 2018. 3
1135
+ [30] Jimei Yang, Scott E Reed, Ming-Hsuan Yang, and Honglak
1136
+ Lee.
1137
+ Weakly-Supervised Disentangling with Recurrent
1138
+ Transformations for 3D View Synthesis. In NeurIPS, 2015.
1139
+ 2
1140
+ [31] Lior Yariv, Jiatao Gu, Yoni Kasten, and Yaron Lipman. Vol-
1141
+ ume Rendering of Neural Implicit Surfaces.
1142
+ In NeurIPS,
1143
+ 2021. 2
1144
+ 9
1145
+
1146
+ [32] Alex Yu, Vickie Ye, Matthew Tancik, and Angjoo Kanazawa.
1147
+ PixelNeRF: Neural Radiance Fields from One or Few Im-
1148
+ ages. In CVPR, 2021. 2, 3, 5, 6, 12
1149
+ [33] Jason Zhang, Gengshan Yang, Shubham Tulsiani, and Deva
1150
+ Ramanan. Ners: Neural reflectance surfaces for sparse-view
1151
+ 3d reconstruction in the wild. NeurIPS, 2021. 2
1152
+ [34] Tinghui Zhou, Shubham Tulsiani, Weilun Sun, Jitendra Ma-
1153
+ lik, and Alexei A Efros.
1154
+ View Synthesis by Appearance
1155
+ Flow. In ECCV, 2016. 2
1156
+ 10
1157
+
1158
+ Appendix A. Additional Random Results
1159
+ We provide additional results on randomly selected ob-
1160
+ jects across each category, and, provide 360-degree render-
1161
+ ing for each figure in the main text. See the project page for
1162
+ video visualizations, and Sec. E for attention map visual-
1163
+ izations on more examples across each category.
1164
+ We observe that while ViewFormer produces plausible
1165
+ images, these are not 3d consistent due to the stochastic na-
1166
+ ture of the rendering pipeline. While pixelNeRF and Ner-
1167
+ Former produce accurate results in the vicinity of the ob-
1168
+ served context views, the results are inaccurate and implau-
1169
+ sible under larger camera deviations. Our baseline, GBT-nb
1170
+ produces consistent but blurry results. Finally, GBT im-
1171
+ proves over GBT-nb by furnishing finer details while pre-
1172
+ serving consistency across all viewpoints, although there is
1173
+ clear room for improvement in the level of details modeled.
1174
+ Appendix B. Classwise metrics for Table 2
1175
+ In Table 2, we present averaged results for V = 2, 3, 6
1176
+ over 10 categories. The per-category metrics are presented
1177
+ in Table 6 (for V = 2) and Table 7 (for V = 6). Note, the
1178
+ per-category results for V = 3 setting is presented in the
1179
+ paper (in Table 1).
1180
+ Appendix C. Architectural Details
1181
+ We will make our implementation publicly available
1182
+ for reproducibility. We also describe the implementation
1183
+ details of GBT here.
1184
+ Overall, GBT consists of 3 com-
1185
+ ponents - the CNN backbone FC, GBT Encoder FE and
1186
+ the GBT Decoder FD.
1187
+ The input to the model is a set
1188
+ of V posed images {(Ii, pi)}V
1189
+ i=1, and H × W ray queries
1190
+ {rj}H×W
1191
+ j=1
1192
+ generated using the target camera pose pq. The
1193
+ model outputs RGB colors for each query ray, which are
1194
+ then reshaped to generate an image of size H × W × 3.
1195
+ We use PyTorch for model development. In the discus-
1196
+ sion below, tensor shapes are annotated in monospace
1197
+ font. We omit the batch dimension for simplicity. Across
1198
+ all models, the image size used is H = W = 256.
1199
+ C.1. GBT
1200
+ a) Camera-fused patch embedding (FC).
1201
+ We use a
1202
+ ResNet18 backbone (upto Res3 block) shared across input
1203
+ images to extract patch level features. Concretely, given the
1204
+ V input images {Ii}V
1205
+ i=1 of shape (V, 3, 256, 256),
1206
+ the CNN outputs a feature grid (V, 256, 16, 16).
1207
+ Each of the 16 × 16 cells in the feature grid corresponds
1208
+ to a receptive field in the input image. We associate each re-
1209
+ ceptive field with a ray that passes through its center (called
1210
+ as ‘input patch ray’ in the paper). Each input patch ray is
1211
+ represented in the Pl¨ucker coordinates (dk
1212
+ i , mk
1213
+ i ) ∈ R6 -
1214
+ a tensor of shape (V, 16, 16, 6), where the notation
1215
+ implies ith image’s kth patch. We extract harmonic em-
1216
+ beddings [13] over the Pl¨ucker coordinates, h((dk
1217
+ i , mk
1218
+ i )),
1219
+ using 15 frequencies f = −6, . . . 8. Specifically, we get
1220
+ h(x) = [sin(2fπx), cos(2fπx)] for each coordinate. This
1221
+ results in a 6∗2∗15 = 180-d feature representation, yielding
1222
+ a ray embedding tensor of shape (V, 16, 16, 180).
1223
+ The CNN features {[FC(Ii)]k} and the ray embeddings
1224
+ h((dk
1225
+ i , mk
1226
+ i )) are concatenated along the channel dimen-
1227
+ sion {[FC(Ii)]k ⊕ h((dk
1228
+ i , mk
1229
+ i ))} that results in a tensor of
1230
+ shape (V, 16, 16, 436). Finally, these features are
1231
+ projected to a 768 dimensional feature space using a linear
1232
+ layer W (i.e. camera fusion). The output of the first stage
1233
+ is therefore camera-fused patch level features [fc]k
1234
+ i repre-
1235
+ sented by a tensor of shape (V, 16, 16, 768).
1236
+ b) Geometry-biased scene encoding.
1237
+ We use GBT en-
1238
+ coder to embed the global scene context into the patch fea-
1239
+ tures.
1240
+ The GBT encoder consists of 8 geometry-biased
1241
+ transformer encoder layers with GELU activation, 12 MHA
1242
+ heads, and 768-d latent feature size. Each MHA module is
1243
+ biased using ray distances as done in Eq. 6.
1244
+ We construct the query, key and value tokens using flat-
1245
+ tened patch embeddings. Each query and key token is asso-
1246
+ ciated with the patch ray (Pl¨ucker coordinates). Therefore,
1247
+ the input to the GBT encoder is patch-level feature tensor of
1248
+ shape (V * 16 * 16, 768) along with the patch ray
1249
+ tensor of shape (V * 16 * 16, 6). Note, the patch ray
1250
+ tensor is the same across all 8 GBT encoder layers, while
1251
+ the learnable weight γ is different for each layer.
1252
+ The output of the GBT encoder module {[fe]k
1253
+ i } is a ten-
1254
+ sor of shape (V * 16 * 16, 768) which is the set-
1255
+ latent representation of the scene. Each output token [fe]k
1256
+ i
1257
+ summarizes the appearance and the geometry of the scene
1258
+ incorporating both local and global features. These output
1259
+ tokens are used as the memory for the GBT decoder module
1260
+ to decode ray queries as described below.
1261
+ c) Geometry-biased ray decoding.
1262
+ To render an image,
1263
+ we construct Q ray queries using the query camera pose pq
1264
+ and use the GBT decoder to predict the RGB color for each
1265
+ pixel. The GBT decoder contains a stack of 4 geometry-
1266
+ biased transformer decoder layers, followed by a shallow
1267
+ MLP. Similar to encoder, each decoder layer consists of 12
1268
+ MHA heads biased with ray distances, 768-d latent dimen-
1269
+ sions and GELU activation. The MLP consists of 2 ReLU
1270
+ activated hidden layers (256-d, 64-d) and a sigmoid acti-
1271
+ vated output (0-1 normalized RGB values).
1272
+ The decoder’s query tokens consist of harmonic ray
1273
+ embeddings h((dj, mj)) and the Pl¨ucker coordinates
1274
+ (dj, mj) for each query ray. Similar to the encoder, we use
1275
+ 15 frequencies which results in a harmonic ray embedding
1276
+ tensor of shape (Q, 180). These are projected to a 768-d
1277
+ feature space (GBT decoder’s input dimension) via a linear
1278
+ 11
1279
+
1280
+ Table 6. Evaluation of novel view synthesis. Given V = 2 input views, we evaluate the reconstruction quality (PSNR ↑ and LPIPS ↓) of
1281
+ each method on the CO3Dv2 [18] dataset. GBT denotes our proposed approach, and GBT-nb is an ablation.
1282
+ 10 training cat.
1283
+ Apple
1284
+ Ball
1285
+ Bench
1286
+ Cake
1287
+ Donut
1288
+ Hydrant
1289
+ Plant
1290
+ Suitcase
1291
+ Teddybear
1292
+ Vase
1293
+ Mean
1294
+ PSNR LPIPS PSNR LPIPS PSNR LPIPS PSNR LPIPS PSNR LPIPS PSNR LPIPS PSNR LPIPS PSNR LPIPS PSNR LPIPS PSNR LPIPS PSNR LPIPS
1295
+ pixelNeRF [32]
1296
+ 18.21
1297
+ 0.36
1298
+ 17.74
1299
+ 0.35
1300
+ 17.59
1301
+ 0.38
1302
+ 17.22
1303
+ 0.38
1304
+ 18.51
1305
+ 0.35
1306
+ 18.44
1307
+ 0.31
1308
+ 19.39
1309
+ 0.36
1310
+ 20.71
1311
+ 0.37
1312
+ 17.74
1313
+ 0.41
1314
+ 19.17
1315
+ 0.34
1316
+ 18.47
1317
+ 0.36
1318
+ NerFormer [18]
1319
+ 20.11
1320
+ 0.34
1321
+ 16.63
1322
+ 0.37
1323
+ 15.09
1324
+ 0.55
1325
+ 17.23
1326
+ 0.48
1327
+ 20.07
1328
+ 0.36
1329
+ 18.11
1330
+ 0.35
1331
+ 18.37
1332
+ 0.53
1333
+ 19.69
1334
+ 0.46
1335
+ 15.73
1336
+ 0.51
1337
+ 17.79
1338
+ 0.39
1339
+ 17.88
1340
+ 0.43
1341
+ ViewFormer [11] 20.53
1342
+ 0.25
1343
+ 18.35
1344
+ 0.31
1345
+ 16.58
1346
+ 0.3
1347
+ 17.66
1348
+ 0.33
1349
+ 18.88
1350
+ 0.29
1351
+ 17.93
1352
+ 0.22
1353
+ 18.04
1354
+ 0.31
1355
+ 21.11
1356
+ 0.26
1357
+ 15.87
1358
+ 0.32
1359
+ 21.23
1360
+ 0.21
1361
+ 18.62
1362
+ 0.28
1363
+ GBT-nb
1364
+ 22.13
1365
+ 0.3
1366
+ 19.83
1367
+ 0.33
1368
+ 18.69
1369
+ 0.36
1370
+ 20.2
1371
+ 0.35
1372
+ 21.0
1373
+ 0.32
1374
+ 21.16
1375
+ 0.24
1376
+ 21.17
1377
+ 0.31
1378
+ 23.02
1379
+ 0.3
1380
+ 19.52
1381
+ 0.35
1382
+ 22.35
1383
+ 0.28
1384
+ 20.91
1385
+ 0.31
1386
+ GBT
1387
+ 22.96
1388
+ 0.27
1389
+ 21.45
1390
+ 0.28
1391
+ 19.1
1392
+ 0.33
1393
+ 20.71
1394
+ 0.32
1395
+ 21.78
1396
+ 0.29
1397
+ 21.82
1398
+ 0.23
1399
+ 21.29
1400
+ 0.29
1401
+ 23.41
1402
+ 0.28
1403
+ 19.93
1404
+ 0.32
1405
+ 22.28
1406
+ 0.26
1407
+ 21.47
1408
+ 0.29
1409
+ Table 7. Evaluation of novel view synthesis. Given V = 6 input views, we evaluate the reconstruction quality (PSNR ↑ and LPIPS ↓) of
1410
+ each method on the CO3Dv2 [18] dataset. GBT denotes our proposed approach, and GBT-nb is an ablation.
1411
+ 10 training cat.
1412
+ Apple
1413
+ Ball
1414
+ Bench
1415
+ Cake
1416
+ Donut
1417
+ Hydrant
1418
+ Plant
1419
+ Suitcase
1420
+ Teddybear
1421
+ Vase
1422
+ Mean
1423
+ PSNR LPIPS PSNR LPIPS PSNR LPIPS PSNR LPIPS PSNR LPIPS PSNR LPIPS PSNR LPIPS PSNR LPIPS PSNR LPIPS PSNR LPIPS PSNR LPIPS
1424
+ pixelNeRF [32]
1425
+ 23.07
1426
+ 0.24
1427
+ 22.26
1428
+ 0.25
1429
+ 19.94
1430
+ 0.29
1431
+ 21.18
1432
+ 0.28
1433
+ 23.02
1434
+ 0.24
1435
+ 22.62
1436
+ 0.21
1437
+ 21.86
1438
+ 0.26
1439
+ 23.78
1440
+ 0.27
1441
+ 21.35
1442
+ 0.29
1443
+ 23.38
1444
+ 0.22
1445
+ 22.25
1446
+ 0.26
1447
+ NerFormer [18]
1448
+ 22.03
1449
+ 0.26
1450
+ 18.16
1451
+ 0.33
1452
+ 17.09
1453
+ 0.5
1454
+ 19.53
1455
+ 0.43
1456
+ 23.1
1457
+ 0.29
1458
+ 21.1
1459
+ 0.27
1460
+ 20.62
1461
+ 0.46
1462
+ 21.48
1463
+ 0.43
1464
+ 18.29
1465
+ 0.44
1466
+ 18.73
1467
+ 0.37
1468
+ 20.01
1469
+ 0.38
1470
+ ViewFormer [11] 22.66
1471
+ 0.23
1472
+ 20.11
1473
+ 0.29
1474
+ 18.06
1475
+ 0.28
1476
+ 19.05
1477
+ 0.31
1478
+ 20.79
1479
+ 0.27
1480
+ 19.62
1481
+ 0.2
1482
+ 18.94
1483
+ 0.29
1484
+ 22.18
1485
+ 0.25
1486
+ 17.57
1487
+ 0.29
1488
+ 22.2
1489
+ 0.21
1490
+ 20.12
1491
+ 0.26
1492
+ GBT-nb
1493
+ 22.53
1494
+ 0.28
1495
+ 20.59
1496
+ 0.32
1497
+ 19.5
1498
+ 0.34
1499
+ 20.77
1500
+ 0.34
1501
+ 22.15
1502
+ 0.3
1503
+ 21.24
1504
+ 0.23
1505
+ 21.83
1506
+ 0.3
1507
+ 23.43
1508
+ 0.29
1509
+ 19.85
1510
+ 0.34
1511
+ 23.0
1512
+ 0.26
1513
+ 21.49
1514
+ 0.30
1515
+ GBT
1516
+ 25.5
1517
+ 0.23
1518
+ 23.35
1519
+ 0.26
1520
+ 20.64
1521
+ 0.3
1522
+ 22.34
1523
+ 0.3
1524
+ 23.55
1525
+ 0.27
1526
+ 23.18
1527
+ 0.21
1528
+ 22.46
1529
+ 0.27
1530
+ 24.65
1531
+ 0.26
1532
+ 21.22
1533
+ 0.3
1534
+ 24.06
1535
+ 0.25
1536
+ 23.10
1537
+ 0.26
1538
+ layer. The keys and values tokens (i.e. memory) pertain to
1539
+ the set-latent representation output by the GBT encoder, i.e.
1540
+ a tensor of shape (V * 16 * 16, 768).
1541
+ The GBT decoder outputs a tensor of shape (Q, 768)
1542
+ that consists of decoded ray features for each target pixel.
1543
+ Finally, the MLP predicts a tensor of shape (Q, 3) con-
1544
+ taining the RGB colors for each queried pixel. During train-
1545
+ ing, we compute L2 reconstruction loss on Q = 7168 pre-
1546
+ dicted pixel colors, and at inference we predict the colors
1547
+ for Q = 256 × 256 rays which is reshaped to yield the im-
1548
+ age tensor of shape (3, 256, 256).
1549
+ C.2. Ablations
1550
+ We also propose 3 ablations of GBT in the paper:
1551
+ GBT-fb (fixed bias).
1552
+ This variant employs a fixed γ = 1
1553
+ weight in all the geometry-biased transformer layers as op-
1554
+ posed to learning the weight γ. During training this model
1555
+ requires lesser memory overhead since the gradients for γ
1556
+ are no longer computed. At inference, the compute over-
1557
+ head is similar to GBT.
1558
+ GBT-nb (no bias).
1559
+ In this variant, we remove the
1560
+ geometry-biased transformer layers in the encoder and de-
1561
+ coder, and replace them with regular transformer layers (im-
1562
+ plemented in PyTorch). During training and inference, this
1563
+ model incurs lesser computational overhead than GBT since
1564
+ the ray distances are no longer computed. However, this
1565
+ comes at the cost of quality, which corroborates the need to
1566
+ account for geometry during attention.
1567
+ SRT*.
1568
+ This variant is closest to SRT, wherein we no
1569
+ longer use geometric bias, nor Pl¨ucker ray representations.
1570
+ Rays are represented using the origin o and direction d as
1571
+ r = (o, d). While the compute overhead is similar to GBT-
1572
+ nb, this model is the least performing among all the variants
1573
+ which demonstrates the benefits of our design choices.
1574
+ Appendix D. Experimental Details
1575
+ D.1. Training & Inference
1576
+ Training.
1577
+ We perform mixed-precision training with
1578
+ 2×NVIDIA A6000 (48GB) GPUs with a batch size of
1579
+ B = 6 scenes. For each scene in a batch, we randomly
1580
+ sample V = 3 input views and Q = 7168 rays from an
1581
+ arbitrary query viewpoint. The predicted pixel RGB color
1582
+ for each query ray is supervised using an L2 reconstruction
1583
+ loss with respect to the ground truth pixel in the query view-
1584
+ point. The training is performed till loss convergence which
1585
+ is about 1.6Mil iterations for GBT and about 2Mil iterations
1586
+ for GBT-nb trained on all 10 categories (about 9-10 days).
1587
+ Inference.
1588
+ At inference we are provided with V posed in-
1589
+ put images and a query camera pose pq. We generate a
1590
+ batch of H ×W = 256×256 query rays that are decoded in
1591
+ a single forward pass. The inference time for a single query
1592
+ image with V = 3 input views for GBT is 0.09s (∼ 11 FPS),
1593
+ and for GBT-nb is 0.025s (∼ 40 FPS). Compared to GBT,
1594
+ the prior methods exhibit more runtime - pixelNeRF takes
1595
+ 7.3s (∼ 0.13 FPS), NerFormer takes 2.7s (∼ 0.37 FPS), and
1596
+ ViewFormer takes 0.68s (∼ 1.5 FPS), using default param-
1597
+ eters (1×A6000 GPU).
1598
+ 12
1599
+
1600
+ D.2. Dataset Splits
1601
+ We use the CO3Dv2 dataset [18] that contains multi-
1602
+ view images along with camera pose annotations of 51
1603
+ object categories.
1604
+ We select 10 categories to train our
1605
+ models - [apple, ball, bench, cake, donut,
1606
+ hydrant, plant, suitcase, teddybear,
1607
+ vase].
1608
+ Additionally, we choose 5 heldout categories -
1609
+ [backpack, book, chair, mouse, remote],
1610
+ which are used to evaluate the generalization of methods.
1611
+ All images are cropped and resized to 256 × 256 (the
1612
+ camera parameters are modified accordingly).
1613
+ CO3Dv2
1614
+ provides
1615
+ three
1616
+ dataset
1617
+ splits
1618
+ -
1619
+ fewview train, fewview dev, and fewview test.
1620
+ Since the fewview test ground-truth has been redacted
1621
+ for online evaluation, we use fewview train for train-
1622
+ ing and fewview dev for testing. We use all available
1623
+ views in each scene in fewview train split for training.
1624
+ For computing metrics on the fewview dev split, we
1625
+ evaluate the models on 32 randomly selected views for the
1626
+ first 10 scenes in each category. We set random seed such
1627
+ that the input and query viewpoints are consistent across
1628
+ all methods.
1629
+ For the viewpoint distance experiment in
1630
+ Fig. 6, we evaluate the average PSNR over 80 sequences
1631
+ across categories for each of 200 query views, with the
1632
+ 50th, 100th, 150th views being the input views.
1633
+ Appendix E. Attention Visualization
1634
+ We plot the attention maps for GBT and GBT-nb in Fig.
1635
+ 9-13. Overall, the incorporation of geometric bias results in
1636
+ more concentrated attention towards the geometrically valid
1637
+ regions. For instance, see the attention maps for GBT and
1638
+ GBT-nb in the two hydrant examples in Fig. 9. We hypoth-
1639
+ esize that concentrated attention toward the relevant context
1640
+ improves the quality of the rendered images.
1641
+ 13
1642
+
1643
+ Figure 9. Attention maps for held out objects in teddybear, vase and hydrant categories.
1644
+ 14
1645
+
1646
+ Ground
1647
+ Decoder Layer 1
1648
+ Decoder Layer 4
1649
+ Pred
1650
+ Truth
1651
+ GBT-nb
1652
+ GBT
1653
+ st9
1654
+ Ground
1655
+ Decoder Layer 1
1656
+ Decoder Layer 4
1657
+ Pred
1658
+ Truth
1659
+ GBT-nb
1660
+ GBT
1661
+ Ground
1662
+ Decoder Layer 1
1663
+ Decoder Layer 4
1664
+ Pred
1665
+ Truth
1666
+ GBT-nb
1667
+ GBT
1668
+ Ground
1669
+ Decoder Layer 1
1670
+ Decoder Layer 4
1671
+ Pred
1672
+ Truth
1673
+ GBT-nb
1674
+ GB1
1675
+ Ground
1676
+ Decoder Layer 1
1677
+ Decoder Layer 4
1678
+ Pred
1679
+ Truth
1680
+ GBT-nb
1681
+ GBT
1682
+ Ground
1683
+ Decoder Layer 1
1684
+ Decoder Layer 4
1685
+ Pred
1686
+ Truth
1687
+ GBT-nb
1688
+ GBTFigure 10. Attention maps for held out objects in apple, cake and backpack categories.
1689
+ 15
1690
+
1691
+ Ground
1692
+ Decoder Layer 1
1693
+ Decoder Layer 4
1694
+ Pred
1695
+ Truth
1696
+ GBT-nb
1697
+ GBT
1698
+ Ground
1699
+ Decoder Layer 1
1700
+ Decoder Layer 4
1701
+ Pred
1702
+ Truth
1703
+ GBT-nb
1704
+ GBT
1705
+ Ground
1706
+ Decoder Layer 1
1707
+ Decoder Layer 4
1708
+ Pred
1709
+ Truth
1710
+ GBT-nb
1711
+ GBT
1712
+ Ground
1713
+ Decoder Layer 1
1714
+ Decoder Layer 4
1715
+ Pred
1716
+ Truth
1717
+ GBT-nb
1718
+ GBT
1719
+ Ground
1720
+ Decoder Layer 1
1721
+ Decoder Layer 4
1722
+ Pred
1723
+ Truth
1724
+ GBT-nb
1725
+ GBT
1726
+ Ground
1727
+ Decoder Layer 1
1728
+ Decoder Layer 4
1729
+ Pred
1730
+ Truth
1731
+ GBT-nb
1732
+ GBTFigure 11. Attention maps for held out objects in ball, bench and book categories.
1733
+ 16
1734
+
1735
+ Ground
1736
+ Decoder Layer 1
1737
+ Decoder Layer 4
1738
+ Pred
1739
+ Truth
1740
+ GBT-nb
1741
+ GBT
1742
+ Ground
1743
+ Decoder Layer 1
1744
+ Decoder Layer 4
1745
+ Pred
1746
+ Truth
1747
+ GBT-nb
1748
+ GBT
1749
+ Ground
1750
+ Decoder Layer 1
1751
+ Decoder Layer 4
1752
+ Pred
1753
+ Truth
1754
+ GBT-nb
1755
+ GBT
1756
+ Ground
1757
+ Decoder Layer 1
1758
+ Decoder Layer 4
1759
+ Pred
1760
+ Truth
1761
+ GBT-nb
1762
+ GBT
1763
+ Ground
1764
+ Decoder Layer 1
1765
+ Decoder Layer 4
1766
+ Pred
1767
+ Truth
1768
+ GBT-nb
1769
+ GBT
1770
+ Ground
1771
+ Decoder Layer 1
1772
+ Decoder Layer 4
1773
+ Pred
1774
+ Truth
1775
+ GBT-nb
1776
+ GBTFigure 12. Attention maps for held out objects in donut, remote and suitcase categories.
1777
+ 17
1778
+
1779
+ Ground
1780
+ Decoder Layer 1
1781
+ Decoder Layer 4
1782
+ Pred
1783
+ Truth
1784
+ GBT-nb
1785
+ GBT
1786
+ Ground
1787
+ Decoder Layer 1
1788
+ Decoder Layer 4
1789
+ Pred
1790
+ Truth
1791
+ GBT-nb
1792
+ GBT
1793
+ Ground
1794
+ Decoder Layer 1
1795
+ Decoder Layer 4
1796
+ Pred
1797
+ Truth
1798
+ GBT-nb
1799
+ GBT
1800
+ Ground
1801
+ Decoder Layer 1
1802
+ Decoder Layer 4
1803
+ Pred
1804
+ Truth
1805
+ GBT-nb
1806
+ GBT
1807
+ Ground
1808
+ Decoder Layer 1
1809
+ Decoder Layer 4
1810
+ Pred
1811
+ Truth
1812
+ GBT-nb
1813
+ GBT
1814
+ Ground
1815
+ Decoder Layer 1
1816
+ Decoder Layer 4
1817
+ Pred
1818
+ Truth
1819
+ GBT-nb
1820
+ GBTFigure 13. Attention maps for held out objects in chair, mouse and plant categories.
1821
+ 18
1822
+
1823
+ Ground
1824
+ Decoder Layer 1
1825
+ Decoder Layer 4
1826
+ Pred
1827
+ Truth
1828
+ GBT-nb
1829
+ GBT
1830
+ Ground
1831
+ Decoder Layer 1
1832
+ Decoder Layer 4
1833
+ Pred
1834
+ Truth
1835
+ GBT-nb
1836
+ Ground
1837
+ Decoder Layer 1
1838
+ Decoder Layer 4
1839
+ Pred
1840
+ Truth
1841
+ GBT-nb
1842
+ GBT
1843
+ Ground
1844
+ Decoder Layer 1
1845
+ Decoder Layer 4
1846
+ Pred
1847
+ Truth
1848
+ GBT-nb
1849
+ GBT
1850
+ Ground
1851
+ Decoder Layer 1
1852
+ Decoder Layer 4
1853
+ Pred
1854
+ Truth
1855
+ GBT-nb
1856
+ GBT
1857
+ Ground
1858
+ Decoder Layer 1
1859
+ Decoder Layer 4
1860
+ Pred
1861
+ Truth
1862
+ GBT-nb
1863
+ GBT
79E3T4oBgHgl3EQfqQqR/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
8NAyT4oBgHgl3EQfQvar/content/2301.00053v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f75cf8c21f78c78189f536a097c7b0c577ad6182b33e7c9a6c88086482d4948
3
+ size 230568
8NAyT4oBgHgl3EQfQvar/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b71307906800fb441e5cd88fb65f3d14b94722e6cb0a2ee16f3bc92317525459
3
+ size 2949165
8NAyT4oBgHgl3EQfQvar/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57edcc97c7a92208d36e01568f90dc0c8a5b5da36a77804c9ad49793d56c22d6
3
+ size 108286
8NE1T4oBgHgl3EQfTwPq/content/2301.03083v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f02667a861d367d33714e17654725fecd9091e04a840270f136a769365bb25ac
3
+ size 609684
8NE1T4oBgHgl3EQfTwPq/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d55e30fe5ddca6d96d3fead488b511f8fbf49381734dc8fd56b9b26457154bc
3
+ size 304068
9NAyT4oBgHgl3EQf3Plu/content/2301.00765v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0414254a070af0a5a9af3e2aac09b7e49cd845b301ac114a0a40105cc3fe5486
3
+ size 18561823
A9AzT4oBgHgl3EQfTPz-/content/2301.01248v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e2a8fe460e3864239cce8386b0345020333bbb9992832fa8f55e3b8597ec5b4
3
+ size 16882433
AdFLT4oBgHgl3EQfxDCd/content/tmp_files/2301.12166v1.pdf.txt ADDED
@@ -0,0 +1,1051 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Heterogeneous Datasets for Federated Survival
2
+ Analysis Simulation
3
+ Alberto Archetti
4
+ DEIB, Politecnico di Milano
5
+ Milan, Italy
6
7
+ Eugenio Lomurno
8
+ DEIB, Politecnico di Milano
9
+ Milan, Italy
10
11
+ Francesco Lattari
12
+ DEIB, Politecnico di Milano
13
+ Milan, Italy
14
15
+ Andr´e Martin
16
+ Technische Universit¨at Dresden
17
+ Dresden, Germany
18
19
+ Matteo Matteucci
20
+ DEIB, Politecnico di Milano
21
+ Milan, Italy
22
23
+ Abstract—Survival analysis studies time-modeling techniques
24
+ for an event of interest occurring for a population. Survival
25
+ analysis found widespread applications in healthcare, engineer-
26
+ ing, and social sciences. However, the data needed to train
27
+ survival models are often distributed, incomplete, censored, and
28
+ confidential. In this context, federated learning can be exploited
29
+ to tremendously improve the quality of the models trained on
30
+ distributed data while preserving user privacy. However, feder-
31
+ ated survival analysis is still in its early development, and there
32
+ is no common benchmarking dataset to test federated survival
33
+ models. This work proposes a novel technique for constructing
34
+ realistic heterogeneous datasets by starting from existing non-
35
+ federated datasets in a reproducible way. Specifically, we provide
36
+ two novel dataset-splitting algorithms based on the Dirichlet
37
+ distribution to assign each data sample to a carefully chosen
38
+ client: quantity-skewed splitting and label-skewed splitting. Fur-
39
+ thermore, these algorithms allow for obtaining different levels
40
+ of heterogeneity by changing a single hyperparameter. Finally,
41
+ numerical experiments provide a quantitative evaluation of the
42
+ heterogeneity level using log-rank tests and a qualitative analysis
43
+ of the generated splits. The implementation of the proposed
44
+ methods is publicly available in favor of reproducibility and to
45
+ encourage common practices to simulate federated environments
46
+ for survival analysis.
47
+ Index Terms—datasets, federated learning, survival analysis
48
+ I. INTRODUCTION
49
+ Survival analysis [1], [2] is a subfield of statistics focused
50
+ on modeling the occurrence time of an event of interest for a
51
+ population. In particular, its goal is to exploit statistical and
52
+ machine learning techniques to provide a survival function,
53
+ i.e., a function that estimates the event occurrence probability
54
+ with respect to time for an individual. Survival analysis has
55
+ been successfully applied in many healthcare, engineering,
56
+ and social science applications. However, the data to train
57
+ survival models are often distributed, incomplete, inaccurate,
58
+ and confidential [3], [4]. On top of that, survival data often
59
+ include a considerable portion of censored observations, i.e.,
60
+ instances for which the event of interest has yet to occur. In
61
+ censored samples, the observed time is an underestimation
62
+ of the actual occurrence time of the event. As a result,
63
+ data scarcity, censorship, and confidentiality can hinder the
64
+ applicability of survival analysis when addressing real-world,
65
+ large-scale problems.
66
+ In this context, Federated Learning (FL) [5], [6] holds
67
+ tremendous potential to improve the effectiveness of survival
68
+ analysis applications. FL is a subfield of distributed machine
69
+ learning that investigates techniques to train machine learning
70
+ models while preserving user privacy. In FL, data information
71
+ never leaves the device in which it is produced, collected, and
72
+ stored. FL allows for training on large-scale data, improving
73
+ the quality, fairness, and generalizability of the resulting
74
+ models with respect to the non-distributed counterparts.
75
+ Federated survival analysis studies the relationship between
76
+ federated learning and survival analysis. In particular, survival
77
+ models present structural components that make their inclusion
78
+ into existing federated learning algorithms non-trivial [3], [7]–
79
+ [9]. Since this field is in its early development, reproducible
80
+ and standardized simulation environments are of paramount
81
+ importance for the comparability of results. Simulation en-
82
+ vironments mimic one or many aspects of real-world feder-
83
+ ations, such as client availability, communication constraints,
84
+ computation constraints, and data heterogeneity. Some existing
85
+ works provide simulation environments for standard federated
86
+ learning applications [10], [11]. However, there is no direct
87
+ support for survival analysis problems within these environ-
88
+ ments. Other works implement algorithms for single-client
89
+ survival models [12]–[15] based on centralized datasets [16].
90
+ To date, there is no common benchmarking dataset or practive
91
+ to test federated survival analysis algorithms.
92
+ This paper presents a novel technique for constructing
93
+ realistic federated datasets by starting from non-federated
94
+ survival datasets in a reproducible way. In this context, re-
95
+ alistic federated datasets exhibit heterogeneous distributions
96
+ among client data, i.e., data are non-independent and non-
97
+ identically distributed (non-IID). More specifically, we provide
98
+ two algorithms for assigning each data sample from a non-
99
+ federated survival dataset to a carefully chosen client. In this
100
+ way, a survival dataset can be split across a federation of
101
+ clients. The proposed data splitting algorithms are based on
102
+ arXiv:2301.12166v1 [cs.LG] 28 Jan 2023
103
+
104
+ the Dirichlet distribution [17], [18]. The first algorithm focuses
105
+ on building federated datasets with a non-uniform number of
106
+ samples. We call this algorithm quantity-skewed splitting. The
107
+ second one, instead, builds client datasets with different label
108
+ distributions. We call this algorithm label-skewed splitting.
109
+ The heterogeneity level introduced by each algorithm in the
110
+ resulting data assignments can be tuned with a parameter
111
+ α > 0, such that for α → 0 data are more skewed, while
112
+ for α → ∞ data are more uniform. The ability to tune
113
+ the heterogeneity level allows for federated simulations with
114
+ different environmental conditions. This aspect is essential to
115
+ test the resilience of federated survival models to non-IID
116
+ realistic data distributions.
117
+ The presented techniques have been tested on a collection of
118
+ datasets for survival analysis, providing visual insights about
119
+ the level of heterogeneity induced in each setting. Also, the
120
+ level of heterogeneity is numerically investigated with log-rank
121
+ tests within client distributions. The experimental evaluation
122
+ demonstrates that the proposed techniques are able to build
123
+ heterogeneous federated datasets starting from non-federated
124
+ survival data. Moreover, the numerical analysis shows how
125
+ the α parameter can effectively control the heterogeneity level
126
+ induced by each split.
127
+ The implementation of quantity-skewed and label-skewed
128
+ splitting is publicly available1 in favor of reproducibility and
129
+ to encourage the usage of common practices in the simulation
130
+ of federated survival environments.
131
+ II. BACKGROUND AND RELATED WORKS
132
+ This section summarizes the main aspects of survival anal-
133
+ ysis and federated learning and reviews the state-of-the-art on
134
+ federated survival analysis.
135
+ A. Survival Analysis
136
+ Survival analysis, also known as time-to-event analysis, is
137
+ a statistical machine learning field that models the occurrence
138
+ time of an event of interest for a population [2]. The distinctive
139
+ feature of survival models is the handling of censored data.
140
+ With censored data, we refer to samples for which the event
141
+ occurrence was not observed during the study. A survival
142
+ dataset D is a set of N triplets
143
+ (xi, δi, ti), i = 1, . . . , N s.t.
144
+ • xi ∈ Rd is a d-dimensional feature vector, also called
145
+ covariate vector, that retains all the input information for
146
+ a sample;
147
+ • δi is the event occurrence indicator. If δi = 1, then the i-th
148
+ sample experienced the event, otherwise the i-th sample
149
+ is censored and δi = 0;
150
+ • ti = min {te
151
+ i, tc
152
+ i} is the minimum between the actual
153
+ event time te
154
+ i and the censoring time tc
155
+ i.
156
+ This setting refers to right-censoring [19], where the censoring
157
+ time is less than or equal to the actual event time. This is the
158
+ 1https://github.com/archettialberto/federated survival datasets
159
+ case, for instance, of disease recurrence under a certain treat-
160
+ ment [20] or patient death [21]. Indeed, right-censoring is the
161
+ most common scenario in real-world survival applications [2].
162
+ Therefore, we limit the discussion to the right censoring setting
163
+ for the rest of the paper.
164
+ The goal of survival analysis is to estimate the event
165
+ occurrence probability with respect to time. In particular, the
166
+ output of a survival model is the survival function
167
+ S(t|x) = P(T > t|x).
168
+ Survival models are classified into three types: non-
169
+ parametric, semi-parametric, and parametric [2]. In this work,
170
+ we include non-parametric models in the analysis of the pro-
171
+ posed data splitting algorithms, as these are the only models
172
+ that make no assumption about the underlying event distri-
173
+ bution over time. Moreover, non-parametric models are well-
174
+ suited for survival data visualization. Indeed, non-parametric
175
+ models encode the overall survival behavior of a population by
176
+ predicting a survival function ˆS(t) which is not conditioned
177
+ on x.
178
+ Non-parametric models are Kaplan-Meier (KM) [22],
179
+ Nelson-Aalen [23], [24], and Life-Table [25]. Among those,
180
+ the KM estimator is the most widely spread in survival
181
+ applications due to its intuitive interpretation. The KM es-
182
+ timator starts from the set of unique event occurrence times
183
+ TD = {tj : (xi, δi, tj) ∈ D}. Then, for each tj ∈ TD it
184
+ computes the number of observed events dj ≥ 1 at time tj
185
+ and the number of samples rj that did not yet experience an
186
+ event. The KM estimator is computed as
187
+ ˆS(t) =
188
+
189
+ j:tj<t
190
+
191
+ 1 − dj
192
+ rj
193
+
194
+ .
195
+ B. Federated Learning
196
+ Federated Learning (FL) [5], [6] is a learning setting in
197
+ which a set of agents jointly train a machine learning model
198
+ without sharing the data they store locally. FL algorithms
199
+ rely on a central server for message exchange and agent
200
+ coordination. A federation is composed of K clients, each
201
+ holding a private dataset Dk, k = 1, . . . , K. The goal of a
202
+ FL algorithm is to find the best parameters w that optimize a
203
+ global loss function L:
204
+ min
205
+ w L(w) = min
206
+ w
207
+ K
208
+
209
+ k=1
210
+ λkLk(w).
211
+ Lk is the local loss function computed by client k. λk is a
212
+ set of parameters weighting the contribution of each client to
213
+ the global loss. Usually, λk is proportional to the number of
214
+ samples on which each client k evaluated Lk(w) locally. This
215
+ weighting strategy favors contributions from clients holding
216
+ more private data, which are more likely to be representative
217
+ of the entire data distribution.
218
+ Federated Averaging (FedAvg) [26] is the first algorithm
219
+ developed to minimize L. It relies on iterative averaging of
220
+ model parameters trained locally on random subsets of clients.
221
+ However, FedAvg is not always suited to face system security
222
+
223
+ and confidentiality preservation challenges in real-world appli-
224
+ cations [27], [28]. Moreover, real-world applications present
225
+ multiple levels of heterogeneity. First, system heterogeneity
226
+ constraints FL algorithms to comply with the hardware limita-
227
+ tions of the network channel and the clients’ devices. Second,
228
+ datasets are not guaranteed to contain identically distributed
229
+ data. In fact, in most real-world scenarios data are likely to
230
+ be non-IID. In order to handle data heterogeneity in federated
231
+ environments, several non-survival federated algorithms have
232
+ been proposed [29]–[31].
233
+ C. Federated Survival Analysis
234
+ Federated learning provides key advantages for the future
235
+ of healthcare applications [4]. In particular, federated survival
236
+ analysis investigates the opportunities and challenges related
237
+ to the integration of federated learning into survival analy-
238
+ sis tasks. However, few works specifically tackle federated
239
+ survival analysis applications. Some works [3], [7] provide
240
+ solutions for the non-separability of the partial log-likelihood
241
+ loss, used to train Cox survival models [32]. Indeed, non-
242
+ separable loss functions are not suited for federated learning
243
+ algorithms, as their evaluation requires access to all the
244
+ available data in the federation. Other works [8], [9] provide
245
+ federated versions of classical survival algorithms asymptoti-
246
+ cally equivalent to their centralized counterparts. Within these
247
+ works, data federations are built with uniform data splits or
248
+ with entirely simulated datasets.
249
+ D. Federated Datasets
250
+ Concerning the available datasets for federated simulation,
251
+ LEAF [33] is the most widely spread dataset collection for
252
+ standard federated learning applications. It provides several
253
+ real-world datasets covering classification, sentiment analysis,
254
+ next-character, and next-word prediction. Secure Generative
255
+ Data Exchange (SGDE) [34] is a recent framework to build
256
+ synthetic datasets in a privacy-preserving way. SGDE provides
257
+ inherently heterogeneous datasets composed of synthetic sam-
258
+ ples provided by client-side data generators. Currently, SGDE
259
+ has been applied to classification and regression problems
260
+ only. Other studies [17], [18] investigate the taxonomy of data
261
+ heterogeneity and provide techniques to emulate non-IID data
262
+ splits starting from centralized classification datasets.
263
+ To the best of our knowledge, the existing federated dataset
264
+ collections do not contain survival datasets. Moreover, existing
265
+ data-splitting techniques are tailored for non-survival problems
266
+ only. This is the first study extending heterogeneous data-
267
+ splitting techniques for regression and classification to survival
268
+ analysis.
269
+ III. METHOD
270
+ This paper presents a set of techniques to split survival
271
+ datasets into heterogeneous federations. We start from a sur-
272
+ vival dataset D and a number of clients K. The goal is to
273
+ assign to each sample in D a client k ∈ {1, . . . , K}, such that
274
+ federated survival algorithms can leverage the set of Dks to
275
+ simulate heterogeneous learning scenarios. The work proposes
276
+ two splitting techniques: quantity-skewed and label-skewed
277
+ splitting.
278
+ A. Quantity-Skewed Splitting
279
+ Quantity-skewed splitting pertains to a scenario where the
280
+ number of samples for each client k, represented as |Dk|,
281
+ varies among clients. In such a scenario, clients with a limited
282
+ number of samples may generate gradients that are inherently
283
+ noisy, which can impede the convergence of federated learning
284
+ algorithms. This is due to the fact that clients with a smaller
285
+ number of samples tend to exhibit higher variance in their gra-
286
+ dients, leading to instability in the federated learning process
287
+ and hampering convergence rate.
288
+ Simulation of quantity-skewed scenarios is essential in
289
+ assessing the robustness of federated survival algorithms. It
290
+ enables researchers to evaluate the algorithm’s ability to handle
291
+ the imbalance in sample distribution across clients and its
292
+ impact on algorithm performance.
293
+ Similarly to [17], [18], the proportion of samples p to assign
294
+ to each client follows a Dirichlet distribution
295
+ p ∼ D(α · 1K).
296
+ Here, 1K is a vector of 1s of length K. p ∈ [0, 1]K such
297
+ that ⟨1K, p⟩ = 1. α > 0 is a similarity parameter controlling
298
+ the similarity between client dataset cardinalities |Dk|. For
299
+ α → 0, the number of samples for each Dk are heterogeneous.
300
+ Conversely, for α → ∞, the number of samples for each
301
+ Dk tends to be similar. With quantity-skewed splitting, each
302
+ sample (xi, δi, ti) is assigned to a client dataset Dk with
303
+ probability
304
+ P ((xi, δi, ti) ∈ Dk) = p[k].
305
+ B. Label-Skewed Splitting
306
+ Label-skewed splitting pertains to scenarios in which the
307
+ distribution of labels differs among client datasets. This type
308
+ of distribution heterogeneity is commonly encountered in real-
309
+ world federated learning scenarios. The non-IID distribution
310
+ can be attributed to various factors, including variations in data
311
+ collection and storage processes, the use of different acquisi-
312
+ tion devices, and variations in preprocessing or labeling tech-
313
+ niques. Additionally, clients may have different label quantities
314
+ due to domain-specific factors. For instance, in a federated
315
+ healthcare scenario for treatment risk assessment, one client
316
+ may have a dataset of records from a rural hospital, while
317
+ another client may have data from an urban hospital. These
318
+ datasets from different locations may exhibit heterogeneous
319
+ label distributions due to disparities in patient demographics
320
+ and healthcare access.
321
+ To produce a label-skewed data split, first, the timeline of
322
+ the original survival dataset is divided into B bins, obtaining
323
+ a set of time instants {τ0, . . . , τB}. The bin identification
324
+ can be uniform or quantile-based, as in [35]. Then, each
325
+ sample (xi, δi, ti) is assigned a class that corresponds to
326
+ the b-th bin, such that ti ∈ (τb−1, τb]. Following [17], [18],
327
+
328
+ TABLE I
329
+ SURVIVAL DATASETS INVOLVED IN THE EXPERIMENTS.
330
+ Dataset
331
+ Samples
332
+ Censored
333
+ Features
334
+ GBSG [20]
335
+ 686
336
+ 44%
337
+ 8
338
+ METABRIC [37]
339
+ 1904
340
+ 58%
341
+ 8
342
+ AIDS [38]
343
+ 2839
344
+ 62%
345
+ 4
346
+ FLCHAIN [39]
347
+ 7874
348
+ 28%
349
+ 10
350
+ SUPPORT [40]
351
+ 9105
352
+ 68%
353
+ 35
354
+ the Dirichlet distribution is used to identify heterogeneous
355
+ splitting proportions according to the sample class as
356
+ p1 ∼ D(α · 1K)
357
+ ...
358
+ pB ∼ D(α · 1K)
359
+ Finally, each sample (xi, δi, ti) assigned to label b is added to
360
+ Dk with probability
361
+ P ((xi, δi, ti) ∈ Dk) = pb[k].
362
+ The α parameter controls the level of similarity between label
363
+ distributions. For α → ∞, client label distributions are similar,
364
+ while for α → 0 label distributions differ. The numerical
365
+ dependency between α and the data heterogeneity level is
366
+ discussed in detail using log-rank tests [36] in Section IV.
367
+ IV. EXPERIMENTS
368
+ This section presents the experiments carried out to evaluate
369
+ the proposed methods for building heterogeneous datasets for
370
+ federated survival analysis.
371
+ A. Datasets
372
+ Each of the experiments involves the following sur-
373
+ vival datasets: the German Breast Cancer Study Group 2
374
+ (GBSG2) [20], the Molecular Taxonomy of Breast Cancer
375
+ International Consortium (METABRIC) [37], the Australian
376
+ AIDS survival dataset (AIDS) [38], the assay of serum-
377
+ free light chain dataset (FLCHAIN) [39], and the Study to
378
+ Understand Prognoses Preferences Outcomes and Risks of
379
+ Treatment (SUPPORT) [40]. The dataset summary statistics
380
+ are collected in Table I.
381
+ B. Visualizing Splitting Methods
382
+ This section presents the results of the splitting methods
383
+ under different α parameters. Figure 1 shows the results of the
384
+ quantity-skewed splitting algorithm described in Section III-A.
385
+ Each split is generated for a federation of 10 clients (K = 10).
386
+ Each row corresponds to one of the example datasets. Columns
387
+ refer to different values of the similarity parameter α. Each
388
+ plot shows the client dataset cardinalities |Dk| with respect to
389
+ clients k = 1, . . . , 10. For higher values of α, the cardinalities
390
+ |Dk| tend to be similar. Conversely, for lower α values, |Dk|s
391
+ differ between clients.
392
+ Figure 2 shows the results of the label-skewed splitting
393
+ algorithm described in Section III-B. Similarly to the visual-
394
+ izations for quantity-skewed splitting, each split is generated
395
+ for a federation of 10 clients (K = 10). Instead of the dataset
396
+ cardinalities, each plot shows the Kaplan-Meier estimators
397
+ ˆSk(t) of each client dataset Dk. Indeed, the KM method is
398
+ mostly used in survival tasks for survival function visualiza-
399
+ tion, as it encodes the summary information concerning the
400
+ survival labels in the dataset. From the left column to the right
401
+ column, datasets show more heterogeneous KM estimators,
402
+ as α decreases. This is expected, as for lower α values, the
403
+ Dirichlet distribution tends to assign non-uniform proportions
404
+ of samples from each time bin to the clients.
405
+ C. Numerical Analysis of Heterogeneity
406
+ This section provides the quantitative analysis carried out to
407
+ evaluate the level of heterogeneity induced by each splitting
408
+ method. A high level of data heterogeneity entails different
409
+ client data distributions, which may lead to more realistic
410
+ federations. To this end, the log-rank test [36] is exploited.
411
+ This test verifies the null hypothesis that there is no statistically
412
+ significant difference between the survival distributions of
413
+ two given populations. We use the log-rank test to determine
414
+ whether the event occurrence distribution is the same for
415
+ two clients. We consider the distribution difference between
416
+ two clients k1, k2 statistically significant if the p-value pk1,k2
417
+ resulting from the test is ≤ 0.05.
418
+ In order to summarize the results for a federation, we define
419
+ the heterogeneity score h of a federation as the fraction of
420
+ client pairs P = {(k1, k2 : k1 < k2 ∧ k1, k2 = 1, . . . , K)}
421
+ for which the distribution difference is statistically significant,
422
+ i.e.,
423
+ h =
424
+ 1
425
+ |P|
426
+
427
+ (k1,k2)∈P
428
+ 1(pk1,k2 ≤ 0.05).
429
+ Table II collects the h values for quantity-skewed and label-
430
+ skewed splits under several K and α values. Each result is
431
+ averaged over 100 runs.
432
+ Concerning quantity-skewed splitting, each setting presents
433
+ an average heterogeneity score smaller than 5%. In other
434
+ words, quantity-skewed survival data does not present statisti-
435
+ cally significant label distribution differences when comparing
436
+ pairs of client datasets. This trend is true even for the smallest
437
+ α value we tested.
438
+ Conversely, label-skewed splitting presents noticeable dif-
439
+ ferences in h scores depending on the value of α. In fact, for all
440
+ the tested datasets, the h score with α = 1000.0 is almost zero.
441
+ Increasing α affects the number of different label distributions
442
+ among clients. For datasets with smaller total cardinalities
443
+ (GBSG2, METABRIC, and AIDS) α must decrease to 10.0 in
444
+ order to detect a noticeable increase in heterogeneity. Instead,
445
+ datasets with more total samples (FLCHAIN and SUPPORT)
446
+ present noticeable levels of heterogeneity even for α = 100.0.
447
+ For all the dataset splits in small federations (K = 5 and
448
+ K = 10), α values smaller than 1.0 result in h > 50%.
449
+ The trend does not apply to federations with more clients
450
+ (K = 50). In this case, α = 0.1 is not enough to obtain
451
+ h > 50%.
452
+
453
+ 0
454
+ 100
455
+ 200
456
+ GBSG2
457
+ |Dk|
458
+ 0
459
+ 250
460
+ 500
461
+ METABRIC
462
+ |Dk|
463
+ 0
464
+ 500
465
+ AIDS
466
+ |Dk|
467
+ 0
468
+ 1000
469
+ 2000
470
+ FLCHAIN
471
+ |Dk|
472
+ 1 2 3 4 5 6 7 8 9 10
473
+ Client k (
474
+ = 1000.0)
475
+ 0
476
+ 2000
477
+ 4000
478
+ SUPPORT
479
+ |Dk|
480
+ 1 2 3 4 5 6 7 8 9 10
481
+ Client k (
482
+ = 100.0)
483
+ 1 2 3 4 5 6 7 8 9 10
484
+ Client k (
485
+ = 10.0)
486
+ 1 2 3 4 5 6 7 8 9 10
487
+ Client k (
488
+ = 1.0)
489
+ 1 2 3 4 5 6 7 8 9 10
490
+ Client k (
491
+ = 0.5)
492
+ Fig. 1. Number of samples |Dk| for each client k = 1, . . . , 10. Each row refers to one of the datasets described in Section IV-A. Each column corresponds
493
+ to a quantity-skewed split (Section III-A) with a fixed similarity parameter α.
494
+ 0
495
+ 1000
496
+ 2000
497
+ 0.0
498
+ 0.5
499
+ 1.0
500
+ GBSG2
501
+ Sk(t)
502
+ 0
503
+ 1000
504
+ 2000
505
+ 0
506
+ 1000
507
+ 2000
508
+ 0
509
+ 1000
510
+ 2000
511
+ 0
512
+ 1000
513
+ 2000
514
+ 0
515
+ 100
516
+ 200
517
+ 300
518
+ 0.0
519
+ 0.5
520
+ 1.0
521
+ METABRIC
522
+ Sk(t)
523
+ 0
524
+ 100
525
+ 200
526
+ 300
527
+ 0
528
+ 100
529
+ 200
530
+ 300
531
+ 0
532
+ 100
533
+ 200
534
+ 300
535
+ 0
536
+ 100
537
+ 200
538
+ 300
539
+ 0
540
+ 1000
541
+ 2000
542
+ 0.0
543
+ 0.5
544
+ 1.0
545
+ AIDS
546
+ Sk(t)
547
+ 0
548
+ 1000
549
+ 2000
550
+ 0
551
+ 1000
552
+ 2000
553
+ 0
554
+ 1000
555
+ 2000
556
+ 0
557
+ 1000
558
+ 2000
559
+ 0
560
+ 2000
561
+ 4000
562
+ 0.0
563
+ 0.5
564
+ 1.0
565
+ FLCHAIN
566
+ Sk(t)
567
+ 0
568
+ 2000
569
+ 4000
570
+ 0
571
+ 2000
572
+ 4000
573
+ 0
574
+ 2000
575
+ 4000
576
+ 0
577
+ 2000
578
+ 4000
579
+ 0
580
+ 1000
581
+ 2000
582
+ Time t (
583
+ = 1000.0)
584
+ 0.0
585
+ 0.5
586
+ 1.0
587
+ SUPPORT
588
+ Sk(t)
589
+ 0
590
+ 1000
591
+ 2000
592
+ Time t (
593
+ = 100.0)
594
+ 0
595
+ 1000
596
+ 2000
597
+ Time t (
598
+ = 10.0)
599
+ 0
600
+ 1000
601
+ 2000
602
+ Time t (
603
+ = 1.0)
604
+ 0
605
+ 1000
606
+ 2000
607
+ Time t (
608
+ = 0.5)
609
+ Fig. 2.
610
+ Kaplan-Meier estimators ˆSk(t) for each client k = 1, . . . , 10. Each row refers to one of the datasets described in Section IV-A. Each column
611
+ corresponds to a label-skewed split (Section III-B) with a fixed similarity parameter α.
612
+
613
+ TABLE II
614
+ HETEROGENEITY SCORE h FOR SEVERAL K AND α. h VALUES ARE AVERAGED OVER 100 RUNS AND SCALED BY 100 FOR BETTER READABILITY.
615
+ Quantity-Skewed Split, K = 5
616
+ Dataset
617
+ α = 1000.0
618
+ α = 100.0
619
+ α = 10.0
620
+ α = 1.0
621
+ α = 0.5
622
+ α = 0.1
623
+ GBSG2
624
+ 2.6±6.0
625
+ 3.1±7.1
626
+ 3.4±8.2
627
+ 2.1±5.9
628
+ 4.3±9.3
629
+ 2.2±6.6
630
+ METABRIC
631
+ 2.8±7.3
632
+ 3.3±7.9
633
+ 3.1±7.6
634
+ 2.9±8.3
635
+ 1.5±5.4
636
+ 2.2±7.5
637
+ AIDS
638
+ 1.4±5.3
639
+ 2.8±6.5
640
+ 2.1±5.0
641
+ 4.6±10.5
642
+ 4.6±9.8
643
+ 2.3±5.8
644
+ FLCHAIN
645
+ 1.9±4.6
646
+ 3.2±6.9
647
+ 2.3±6.0
648
+ 3.8±8.4
649
+ 2.9±9.8
650
+ 2.6±6.8
651
+ SUPPORT
652
+ 3.0±6.9
653
+ 2.0±4.7
654
+ 2.5±6.7
655
+ 3.3±7.4
656
+ 3.7±9.4
657
+ 0.3±2.2
658
+ Quantity-Skewed Split, K = 10
659
+ Dataset
660
+ α = 1000.0
661
+ α = 100.0
662
+ α = 10.0
663
+ α = 1.0
664
+ α = 0.5
665
+ α = 0.1
666
+ GBSG2
667
+ 4.1±4.8
668
+ 3.9±5.0
669
+ 3.0±4.9
670
+ 3.0±4.3
671
+ 3.0±4.5
672
+ 1.9±3.3
673
+ METABRIC
674
+ 3.6±5.3
675
+ 4.7±6.0
676
+ 4.4±5.9
677
+ 3.5±5.6
678
+ 3.6±4.7
679
+ 1.7±4.0
680
+ AIDS
681
+ 4.1±5.6
682
+ 4.5±5.5
683
+ 3.8±5.4
684
+ 4.5±6.4
685
+ 4.5±6.3
686
+ 2.3±3.6
687
+ FLCHAIN
688
+ 3.7±4.8
689
+ 3.4±5.0
690
+ 3.6±4.5
691
+ 5.5±6.7
692
+ 4.2±6.2
693
+ 2.3±4.0
694
+ SUPPORT
695
+ 4.1±5.8
696
+ 3.4±4.6
697
+ 4.0±4.8
698
+ 3.9±5.7
699
+ 4.2±6.5
700
+ 1.0±2.3
701
+ Quantity-Skewed Split, K = 50
702
+ Dataset
703
+ α = 1000.0
704
+ α = 100.0
705
+ α = 10.0
706
+ α = 1.0
707
+ α = 0.5
708
+ α = 0.1
709
+ GBSG2
710
+ 3.9±2.0
711
+ 3.4±1.8
712
+ 3.5±2.0
713
+ 3.0±1.8
714
+ 2.6±1.7
715
+ 1.6±1.0
716
+ METABRIC
717
+ 4.6±2.3
718
+ 4.7±2.6
719
+ 4.4±2.0
720
+ 3.9±2.1
721
+ 3.2±1.8
722
+ 1.5±1.1
723
+ AIDS
724
+ 4.5±2.2
725
+ 4.9±2.5
726
+ 4.4±2.1
727
+ 4.6±2.4
728
+ 4.2±2.4
729
+ 2.0±1.1
730
+ FLCHAIN
731
+ 4.8±2.4
732
+ 5.0±2.4
733
+ 4.6±2.2
734
+ 4.7±2.6
735
+ 4.8±2.7
736
+ 2.1±1.5
737
+ SUPPORT
738
+ 4.5±2.2
739
+ 4.5±2.3
740
+ 4.8±2.3
741
+ 3.9±2.1
742
+ 3.4±2.1
743
+ 0.8±0.8
744
+ Label-Skewed Split, K = 5
745
+ Dataset
746
+ α = 1000.0
747
+ α = 100.0
748
+ α = 10.0
749
+ α = 1.0
750
+ α = 0.5
751
+ α = 0.1
752
+ GBSG2
753
+ 0.2±2.0
754
+ 0.1±1.0
755
+ 5.8±9.4
756
+ 46.7±20.9
757
+ 58.2±17.0
758
+ 73.8±18.2
759
+ METABRIC
760
+ 0.0±0.0
761
+ 0.5±2.2
762
+ 20.9±17.2
763
+ 66.1±19.0
764
+ 76.7±14.5
765
+ 82.3±13.3
766
+ AIDS
767
+ 0.3±1.7
768
+ 3.1±7.2
769
+ 37.5±21.9
770
+ 75.1±16.2
771
+ 81.5±14.4
772
+ 86.6±11.3
773
+ FLCHAIN
774
+ 0.3±1.7
775
+ 12.6±14.9
776
+ 58.8±17.6
777
+ 83.9±12.4
778
+ 88.0±11.4
779
+ 94.1±7.0
780
+ SUPPORT
781
+ 0.5±2.2
782
+ 29.6±20.8
783
+ 74.3±15.7
784
+ 91.3±9.7
785
+ 92.5±7.4
786
+ 94.0±6.4
787
+ Label-Skewed Split, K = 10
788
+ Dataset
789
+ α = 1000.0
790
+ α = 100.0
791
+ α = 10.0
792
+ α = 1.0
793
+ α = 0.5
794
+ α = 0.1
795
+ GBSG2
796
+ 0.4±1.5
797
+ 0.6±1.5
798
+ 2.8±4.3
799
+ 32.2±11.7
800
+ 43.7±11.5
801
+ 63.2±12.9
802
+ METABRIC
803
+ 0.1±0.4
804
+ 0.2±1.0
805
+ 10.6±8.4
806
+ 54.6±13.6
807
+ 66.5±10.1
808
+ 76.7±8.7
809
+ AIDS
810
+ 0.3±1.0
811
+ 1.4±2.7
812
+ 24.7±12.6
813
+ 68.1±9.0
814
+ 74.0±9.1
815
+ 77.7±8.4
816
+ FLCHAIN
817
+ 0.4±1.2
818
+ 4.2±5.5
819
+ 42.8±13.0
820
+ 78.2±8.8
821
+ 84.9±5.6
822
+ 89.3±5.8
823
+ SUPPORT
824
+ 0.1±0.4
825
+ 14.7±9.7
826
+ 63.2±10.5
827
+ 87.0±4.9
828
+ 88.5±4.7
829
+ 89.7±6.1
830
+ Label-Skewed Split, K = 50
831
+ Dataset
832
+ α = 1000.0
833
+ α = 100.0
834
+ α = 10.0
835
+ α = 1.0
836
+ α = 0.5
837
+ α = 0.1
838
+ GBSG2
839
+ 0.5±0.6
840
+ 0.6±0.6
841
+ 0.5±0.6
842
+ 5.7±2.2
843
+ 10.8±3.3
844
+ 23.8±4.2
845
+ METABRIC
846
+ 0.2±0.3
847
+ 0.3±0.5
848
+ 1.3±1.2
849
+ 21.7±4.5
850
+ 33.1±5.2
851
+ 48.8±5.5
852
+ AIDS
853
+ 0.6±0.5
854
+ 0.8±0.8
855
+ 4.5±2.3
856
+ 34.6±5.2
857
+ 45.3±4.4
858
+ 49.8±4.9
859
+ FLCHAIN
860
+ 0.2±0.3
861
+ 0.6±0.6
862
+ 10.6±3.7
863
+ 55.4±4.1
864
+ 64.8±2.6
865
+ 72.4±3.6
866
+ SUPPORT
867
+ 0.0±0.0
868
+ 0.5±0.6
869
+ 29.0±5.5
870
+ 69.9±2.7
871
+ 75.5±2.3
872
+ 73.4±4.2
873
+ V. CONCLUSION
874
+ This paper proposed two algorithms to simulate data hetero-
875
+ geneity in survival datasets for federated learning. Federated
876
+ simulation is an important step in survival analysis toward
877
+ the implementation and production of more accurate, fair,
878
+ and privacy-preserving survival models. The two presented
879
+ splitting techniques are based on the Dirichlet distribution.
880
+ The quantity-skewed splitting produces datasets with variable
881
+ cardinalities, while the label-skewed splitting relies on time
882
+ binning to split samples according to different label distribu-
883
+ tions. Visual insights are provided to show the behavior of the
884
+ proposed methods under hyperparameter change. Moreover,
885
+ log-rank tests are reported to provide a quantitative evaluation
886
+ of the degree of heterogeneity induced by each data split. To
887
+ encourage the adoption of common benchmarking practices
888
+ for future experiments on federated survival analysis, we make
889
+ the source code of the proposed algorithms publicly available.
890
+ To the best of our knowledge, this work represents the first
891
+ milestone toward standardized and comparable federated sur-
892
+ vival analysis simulations.
893
+
894
+ ACKNOWLEDGMENT
895
+ The European Commission has partially funded this work
896
+ under the H2020 grant N. 101016577 AI-SPRINT: AI in
897
+ Secure Privacy-pReserving computINg conTinuum.
898
+ REFERENCES
899
+ [1] J. P. Klein and M. L. Moeschberger, Survival analysis: techniques for
900
+ censored and truncated data.
901
+ Springer, 2003, vol. 1230.
902
+ [2] P. Wang, Y. Li, and C. K. Reddy, “Machine learning for survival analysis:
903
+ A survey,” ACM Computing Surveys (CSUR), vol. 51, no. 6, pp. 1–36,
904
+ 2019.
905
+ [3] M. Andreux, A. Manoel, R. Menuet, C. Saillard, and C. Simpson,
906
+ “Federated survival analysis with discrete-time cox models,” arXiv
907
+ preprint arXiv:2006.08997, 2020.
908
+ [4] N. Rieke, J. Hancox, W. Li, F. Milletari, H. R. Roth, S. Albarqouni,
909
+ S. Bakas, M. N. Galtier, B. A. Landman, K. Maier-Hein et al., “The
910
+ future of digital health with federated learning,” NPJ digital medicine,
911
+ vol. 3, no. 1, pp. 1–7, 2020.
912
+ [5] T. Li, A. K. Sahu, A. Talwalkar, and V. Smith, “Federated learning:
913
+ Challenges, methods, and future directions,” IEEE Signal Processing
914
+ Magazine, vol. 37, no. 3, pp. 50–60, 2020.
915
+ [6] P. Kairouz, H. B. McMahan, B. Avent, A. Bellet, M. Bennis, A. N.
916
+ Bhagoji, K. Bonawitz, Z. Charles, G. Cormode, R. Cummings et al.,
917
+ “Advances and open problems in federated learning,” Foundations and
918
+ Trends in Machine Learning, vol. 14, no. 1–2, pp. 1–210, 2021.
919
+ [7] X. Wang, H. G. Zhang, X. Xiong, C. Hong, G. M. Weber, G. A. Brat, C.-
920
+ L. Bonzel, Y. Luo, R. Duan, N. P. Palmer et al., “Survmaximin: robust
921
+ federated approach to transporting survival risk prediction models,”
922
+ Journal of biomedical informatics, vol. 134, p. 104176, 2022.
923
+ [8] C.-L. Lu, S. Wang, Z. Ji, Y. Wu, L. Xiong, X. Jiang, and L. Ohno-
924
+ Machado, “Webdisco: a web service for distributed cox model learning
925
+ without patient-level data sharing,” Journal of the American Medical
926
+ Informatics Association, vol. 22, no. 6, pp. 1212–1219, 2015.
927
+ [9] M. M. Rahman and S. Purushotham, “Fedpseudo: Pseudo value-based
928
+ deep learning models for federated survival analysis,” arXiv preprint
929
+ arXiv:2207.05247, 2022.
930
+ [10] T. T. F. Authors, “TensorFlow Federated,” 12 2018. [Online]. Available:
931
+ https://github.com/tensorflow/federated
932
+ [11] D. J. Beutel, T. Topal, A. Mathur, X. Qiu, T. Parcollet, P. P. de Gusm˜ao,
933
+ and N. D. Lane, “Flower: A friendly federated learning research frame-
934
+ work,” arXiv preprint arXiv:2007.14390, 2020.
935
+ [12] S. P¨olsterl, “scikit-survival: A library for time-to-event analysis built on
936
+ top of scikit-learn,” Journal of Machine Learning Research, vol. 21, no.
937
+ 212, pp. 1–6, 2020. [Online]. Available: http://jmlr.org/papers/v21/20-
938
+ 729.html
939
+ [13] H. Kvamme, Ø. Borgan, and I. Scheel, “Time-to-event prediction with
940
+ neural networks and cox regression,” arXiv preprint arXiv:1907.00825,
941
+ 2019.
942
+ [14] S. Fotso et al., “PySurvival: Open source package for survival analysis
943
+ modeling,” 2019–. [Online]. Available: https://www.pysurvival.io/
944
+ [15] C. Davidson-Pilon, “lifelines: survival analysis in python,” Journal of
945
+ Open Source Software, vol. 4, no. 40, p. 1317, 2019.
946
+ [16] E. Drysdale, “Survset: An open-source time-to-event dataset repository,”
947
+ arXiv preprint arXiv:2203.03094, 2022.
948
+ [17] T.-M. H. Hsu, H. Qi, and M. Brown, “Measuring the effects of non-
949
+ identical data distribution for federated visual classification,” arXiv
950
+ preprint arXiv:1909.06335, 2019.
951
+ [18] Q. Li, Y. Diao, Q. Chen, and B. He, “Federated learning on non-iid
952
+ data silos: An experimental study,” in 2022 IEEE 38th International
953
+ Conference on Data Engineering (ICDE).
954
+ IEEE, 2022, pp. 965–978.
955
+ [19] E. T. Lee and J. Wang, Statistical methods for survival data analysis.
956
+ John Wiley & Sons, 2003, vol. 476.
957
+ [20] M. Schumacher, G. Bastert, H. Bojar, K. H¨ubner, M. Olschewski,
958
+ W.
959
+ Sauerbrei,
960
+ C.
961
+ Schmoor,
962
+ C.
963
+ Beyerle,
964
+ R.
965
+ Neumann,
966
+ and
967
+ H. Rauschecker, “Randomized 2 x 2 trial evaluating hormonal
968
+ treatment and the duration of chemotherapy in node-positive breast
969
+ cancer patients. german breast cancer study group.” Journal of Clinical
970
+ Oncology, vol. 12, no. 10, pp. 2086–2093, 1994.
971
+ [21] A. Dispenzieri, J. A. Katzmann, R. A. Kyle, D. R. Larson, T. M.
972
+ Therneau, C. L. Colby, R. J. Clark, G. P. Mead, S. Kumar, L. J.
973
+ Melton III et al., “Use of nonclonal serum immunoglobulin free light
974
+ chains to predict overall survival in the general population,” in Mayo
975
+ Clinic Proceedings, vol. 87, no. 6.
976
+ Elsevier, 2012, pp. 517–523.
977
+ [22] E. L. Kaplan and P. Meier, “Nonparametric estimation from incomplete
978
+ observations,” Journal of the American statistical association, vol. 53,
979
+ no. 282, pp. 457–481, 1958.
980
+ [23] W. Nelson, “Theory and applications of hazard plotting for censored
981
+ failure data,” Technometrics, vol. 14, no. 4, pp. 945–966, 1972.
982
+ [24] O. Aalen, “Nonparametric inference for a family of counting processes,”
983
+ The Annals of Statistics, pp. 701–726, 1978.
984
+ [25] S. J. Cutler and F. Ederer, “Maximum utilization of the life table method
985
+ in analyzing survival,” Journal of chronic diseases, vol. 8, no. 6, pp.
986
+ 699–712, 1958.
987
+ [26] B. McMahan, E. Moore, D. Ramage, S. Hampson, and B. A. y Arcas,
988
+ “Communication-efficient learning of deep networks from decentralized
989
+ data,” in Artificial intelligence and statistics.
990
+ PMLR, 2017, pp. 1273–
991
+ 1282.
992
+ [27] V. Mothukuri, R. M. Parizi, S. Pouriyeh, Y. Huang, A. Dehghantanha,
993
+ and G. Srivastava, “A survey on security and privacy of federated
994
+ learning,” Future Generation Computer Systems, vol. 115, pp. 619–640,
995
+ 2021.
996
+ [28] S. Rahimian, R. Kerkouche, I. Kurth, and M. Fritz, “Practical challenges
997
+ in differentially-private federated survival analysis of medical data,” in
998
+ Conference on Health, Inference, and Learning.
999
+ PMLR, 2022, pp.
1000
+ 411–425.
1001
+ [29] T. Li, A. K. Sahu, M. Zaheer, M. Sanjabi, A. Talwalkar, and V. Smith,
1002
+ “Federated optimization in heterogeneous networks,” Proceedings of
1003
+ Machine Learning and Systems, vol. 2, pp. 429–450, 2020.
1004
+ [30] S. P. Karimireddy, S. Kale, M. Mohri, S. Reddi, S. Stich, and A. T.
1005
+ Suresh, “Scaffold: Stochastic controlled averaging for federated learn-
1006
+ ing,” in International Conference on Machine Learning.
1007
+ PMLR, 2020,
1008
+ pp. 5132–5143.
1009
+ [31] D. A. E. Acar, Y. Zhao, R. M. Navarro, M. Mattina, P. N. Whatmough,
1010
+ and V. Saligrama, “Federated learning based on dynamic regularization,”
1011
+ arXiv preprint arXiv:2111.04263, 2021.
1012
+ [32] D. R. Cox, “Regression models and life-tables,” Journal of the Royal
1013
+ Statistical Society. Series B (Methodological), vol. 34, no. 2, pp.
1014
+ 187–220, 1972. [Online]. Available: http://www.jstor.org/stable/2985181
1015
+ [33] S. Caldas, S. M. K. Duddu, P. Wu, T. Li, J. Koneˇcn`y, H. B. McMahan,
1016
+ V. Smith, and A. Talwalkar, “Leaf: A benchmark for federated settings,”
1017
+ arXiv preprint arXiv:1812.01097, 2018.
1018
+ [34] E. Lomurno, A. Archetti, L. Cazzella, S. Samele, L. Di Perna, and
1019
+ M. Matteucci, “SGDE: Secure generative data exchange for cross-silo
1020
+ federated learning,” in AIPR 2022, International Conference on Artificial
1021
+ Intelligence and Pattern Recognition, 2022.
1022
+ [35] H. Kvamme and Ø. Borgan, “Continuous and discrete-time survival
1023
+ prediction with neural networks,” Lifetime Data Analysis, vol. 27, no. 4,
1024
+ pp. 710–736, 2021.
1025
+ [36] J. M. Bland and D. G. Altman, “The logrank test,” Bmj, vol. 328, no.
1026
+ 7447, p. 1073, 2004.
1027
+ [37] J. L. Katzman, U. Shaham, A. Cloninger, J. Bates, T. Jiang, and
1028
+ Y. Kluger, “Deepsurv: personalized treatment recommender system
1029
+ using a cox proportional hazards deep neural network,” BMC medical
1030
+ research methodology, vol. 18, no. 1, pp. 1–12, 2018.
1031
+ [38] B. Ripley, B. Venables, D. M. Bates, K. Hornik, A. Gebhardt, and
1032
+ D. Firth, “R package: Mass,” Jul. 27, 2022. [Online]. Available:
1033
+ https://stat.ethz.ch/R-manual/R-devel/library/MASS/html/00Index.html
1034
+ [39] T. Therneau, T. Lumley, E. Atkinson, and C. Crowson, “R package:
1035
+ survival,” Jan. 9, 2023. [Online]. Available: https://stat.ethz.ch/R-
1036
+ manual/R-devel/library/survival/html/00Index.html
1037
+ [40] Vanderbilt
1038
+ University
1039
+ Department
1040
+ of
1041
+ Biostatistics,
1042
+ “Vanderbilt
1043
+ biostatistics
1044
+ datasets,”
1045
+ Dec.
1046
+ 1,
1047
+ 2022.
1048
+ [Online].
1049
+ Available:
1050
+ http://hbiostat.org/data
1051
+
AdFLT4oBgHgl3EQfxDCd/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
B9FAT4oBgHgl3EQfsh6e/content/2301.08659v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7631686eacff0f84aa2cdee79cd38cb2de4b7495599847994699ec7bf35ddf8f
3
+ size 532686
B9FAT4oBgHgl3EQfsh6e/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b05e71aca137bbcdb9245dbe4c53643ab6167d46fd4823149039fa6a8966a12e
3
+ size 341805
BNAyT4oBgHgl3EQf4PoQ/content/2301.00781v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb558779bb53285fb6a04976ad4f0e572416f841d21d795993cbff849f942379
3
+ size 1033416
BNAyT4oBgHgl3EQf4PoQ/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a59f16fdba4999802112ddef84080b3ab1c19d7d12ec6d67b5ed5e4367c84fd
3
+ size 15138861
BNAyT4oBgHgl3EQf4PoQ/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee092d5b4d05852c3dbbfdc780c68b064c498b8ef2426c95e164e775077def49
3
+ size 537695
C9FRT4oBgHgl3EQfwjj-/content/2301.13639v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:985dbcd0f99cf107b40477fac85fbbfdbb06b21631e36695c6a92376b468f78b
3
+ size 333219
C9FRT4oBgHgl3EQfwjj-/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67614237495e2a98f57b84eb91bbec75c76de77760987c9860f7b16fe9b25c0a
3
+ size 173677
CdE0T4oBgHgl3EQfQADn/content/2301.02188v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:239137768d963b7e0b3eeee9ea46fa7afe08380b7eeba35bb47f380e903a64e2
3
+ size 754231
CdE0T4oBgHgl3EQfQADn/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8e8f4ae6d65b77606f258fdb8d5532b72c6d57bdd5eab5bee5ca46e07dcecbc
3
+ size 1966125
CdE0T4oBgHgl3EQfQADn/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ef4013a79eeeae09e8a0c742df7a0f0de8a1a00e512abc82b86b4ae1c97e0a1
3
+ size 62010
CtE0T4oBgHgl3EQfgQFr/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9994adf93731e9f66901f999d409f9f804b75dc4542f3f2297eed774546916ab
3
+ size 5242925
DdE2T4oBgHgl3EQf9gki/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4dd98cffe4eea7ad353a8cd2a5e0189fe657e0f98397bf79957a0b3827099d9e
3
+ size 173298
ENE0T4oBgHgl3EQfgwE0/content/2301.02421v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfaa21565c7e58eee12ab43c41b8cf4991309048e50c02ccb569b3bf5fdf7a36
3
+ size 773757
ENE0T4oBgHgl3EQfgwE0/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb43ab71f9c1f42e6e8d91d3abb6c85132910b8dd0a2d4097dc5a222de02bf4c
3
+ size 1966125
FNE1T4oBgHgl3EQfWwTK/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d50769062a74a4b0e0479d634ea06ef211aad45667714804a578a3626cdb8e7
3
+ size 4325421
FtAyT4oBgHgl3EQfSvdV/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b8a3dd1d25d09b32b85149c0b77f945199fa9fdae749708b65648bf2bc6d636
3
+ size 4522029
FtFKT4oBgHgl3EQfbC5a/content/tmp_files/2301.11810v1.pdf.txt ADDED
@@ -0,0 +1,923 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ BOMP-NAS: Bayesian Optimization Mixed
2
+ Precision NAS
3
+ David van Son
4
+ Eindhoven University of Technology
5
+ Eindhoven, the Netherlands
6
7
+ Floran de Putter
8
+ Eindhoven University of Technology
9
+ Eindhoven, the Netherlands
10
11
+ Sebastian Vogel
12
+ NXP Semiconductors
13
+ Eindhoven, the Netherlands
14
15
+ Henk Corporaal
16
+ Eindhoven University of Technology
17
+ Eindhoven, the Netherlands
18
19
+ Abstract—Bayesian Optimization Mixed-Precision Neural Ar-
20
+ chitecture Search (BOMP-NAS) is an approach to quantization-
21
+ aware neural architecture search (QA-NAS) that leverages both
22
+ Bayesian optimization (BO) and mixed-precision quantization
23
+ (MP) to efficiently search for compact, high performance deep
24
+ neural networks. The results show that integrating quantization-
25
+ aware fine-tuning (QAFT) into the NAS loop is a necessary step to
26
+ find networks that perform well under low-precision quantization:
27
+ integrating it allows a model size reduction of nearly 50% on the
28
+ CIFAR-10 dataset. BOMP-NAS is able to find neural networks
29
+ that achieve state of the art performance at much lower design
30
+ costs. This study shows that BOMP-NAS can find these neural
31
+ networks at a 6× shorter search time compared to the closest
32
+ related work.
33
+ I. INTRODUCTION
34
+ D
35
+ EEP LEARNING models have revolutionized image pro-
36
+ cessing tasks, such as classification and semantic segmen-
37
+ tation. However, designing these deep neural networks (DNNs)
38
+ is a challenging task. It is especially challenging considering
39
+ that nowadays, DNNs have to be deployed on resource con-
40
+ strained edge devices, e.g. mobile cellphones and electronic
41
+ control units of cars. On these devices, DNNs are subject to
42
+ limited memory and computational power constraints, while
43
+ peak performance in terms of accuracy and latency is expected.
44
+ The proposed solution to the tedious task of network design
45
+ is neural architecture search (NAS), an automated method
46
+ of generating competitive DNN architectures. DNNs designed
47
+ through NAS consistently outperform human-designed net-
48
+ works in various tasks both in terms of performance and
49
+ efficiency.
50
+ Besides NAS, model compression techniques, such as archi-
51
+ tecture pruning and parameter quantization, have become an
52
+ essential part of optimizing DNN architectures for embedded
53
+ deployment [1], [2]. Using model compression, DNNs derived
54
+ by NAS can be compressed even further, increasing efficiency
55
+ while keeping performance intact.
56
+ This study introduces a sampling-based NAS methodology
57
+ that integrates mixed-precision quantization and Bayesian op-
58
+ timization into a unified NAS algorithm, named Bayesian
59
+ Optimization Mixed-Precision NAS (BOMP-NAS). Therefore,
60
+ network quantization and the requirements and challenges for
61
+ integrating quantization into the NAS optimization will be
62
+ investigated in more detail in this paper.
63
+ The contributions of this paper are:
64
+ 1) A new sampling-based NAS methodology, called BOMP-
65
+ NAS, with fine-grained mixed-precision (MP) quantiza-
66
+ tion, where low-precision parameter use is enabled by
67
+ quantization-aware fine-tuning (QAFT) during the search
68
+ (Section III).
69
+ 2) Demonstrate
70
+ the
71
+ feasibility
72
+ of
73
+ BOMP-NAS
74
+ as
75
+ a
76
+ quantization-aware NAS (QA-NAS) application, both in
77
+ terms of found networks and search costs (Section IV).
78
+ 3) BOMP-NAS finds better performing models with similar
79
+ memory budgets at 6× shorter search time compared to
80
+ state-of-the-art (Section V).
81
+ This paper is structured as follows: Section II summarizes
82
+ existing approaches to QA-NAS, and the differences to BOMP-
83
+ NAS. In Section III, the methodology behind BOMP-NAS and
84
+ experimental setup is described. The results obtained using
85
+ BOMP-NAS are discussed in Section IV and compared to
86
+ existing works in Section V. Section VI describes the ablation
87
+ studies conducted. Finally, Section VII concludes this paper
88
+ and gives possible directions for future research.
89
+ II. RELATED WORK
90
+ Combining NAS with model compression techniques has
91
+ proven an effective way to design compact DNNs that rival
92
+ state-of-the-art (SotA) full precision networks. In several works,
93
+ authors advocate for the joint optimization of DNN architecture
94
+ and model compression [3]–[6]. This is because although the
95
+ objectives can be pursued separately, this leads to sub optimal
96
+ networks: i.e., the best architecture in a float32 format
97
+ floating point DNN may not be the best architecture in an int8
98
+ format quantized DNN [4].
99
+ In [5], a cell-based NAS was combined with homogeneous
100
+ quantization-aware training (QAT) to generate compact, effi-
101
+ cient DNNs. The authors first searched for efficient neural net-
102
+ work building blocks, referred to as cells, using gradient-based
103
+ NAS, combined with gradient-based QAT.
104
+ arXiv:2301.11810v1 [cs.LG] 27 Jan 2023
105
+
106
+ Trial
107
+ Generate candidate
108
+ model (1)
109
+ Search space (1a)
110
+ DNN (2a)
111
+ Quantization
112
+ policy (3a)
113
+ Train model (2)
114
+ Quantize model (3)
115
+ Quantization-aware
116
+ fine-tuning (4)
117
+ Evaluate model (5)
118
+ Model score (5a)
119
+ Max trials?
120
+ Update surrogate
121
+ model (6)
122
+ Surrogate
123
+ model (1b)
124
+ Final training
125
+ Pareto front (7)
126
+ No
127
+ Yes
128
+ Fig. 1: UML activity diagram of proposed workflow of BOMP-NAS.
129
+ DNNs (2a) and Quantization policies (3a) are selected (1) from the
130
+ Search space (1a) using the Surrogate model (1b). The DNN is
131
+ early trained in full precision (2), then quantized according to the
132
+ (MP) Quantization policy. This quantized DNN is then fine-tuned
133
+ quantization-aware (4). Next, the DNN is evaluated (5). These results
134
+ are then scalarized into a score (5a) according to (1). Lastly, the score
135
+ is used to update the surrogate model (6), which is then used to sample
136
+ the next candidate model (1).
137
+ In [4], the once-for-all (OFA) [7] approach to NAS was
138
+ used to quickly generate many different trained models, which
139
+ could then be used to train their quantization-aware accuracy
140
+ predictor. In this way, the search time is extremely short
141
+ compared to other approaches, as the generated models do
142
+ not need to be trained prior to evaluation. However, the initial
143
+ investment of training the supergraph and accuracy predictor
144
+ amounts to 2400 GPU hours on a V100 GPU, which is a
145
+ significant investment barrier.
146
+ [6] extends this work by introducing QAT into the training
147
+ of the supergraph. The authors claim to have reduced the initial
148
+ investment to 1805 hours. Compared to [8], the supernetwork
149
+ required 300 epochs fewer training due to the introduction of
150
+ BatchQuant, a method to reduce the instability of QAT when
151
+ training supernetworks.
152
+ In [9], aging evolution was combined with homogeneous
153
+ PTQ to 8-bit to find networks suitable for microcontrollers.
154
+ [3] extends this by also considering MP. In their work, an
155
+ evolutionary algorithm-based NAS was combined with hetero-
156
+ geneous PTQ to 16, 8 or 4 bits. A significant limitation of this
157
+ method is that the search engine is likely to get stuck in a bad
158
+ local minimum.
159
+ In all of these works, the results are compared against
160
+ regular NAS networks, and quantized versions of existing
161
+ architectures, which they consistently outperform. This shows
162
+ that combining quantization and network architecture design
163
+ into a single algorithm is a feasible approach for designing
164
+ compact, high-performance networks.
165
+ The goal of this study is to reduce the time spent search-
166
+ ing compared to existing quantization-aware NAS (QA-NAS)
167
+ methods while integrating MP quantization to {4-8}-bit in
168
+ sampling-based NAS. This study is most similar to [3], with
169
+ two major differences. Firstly, Bayesian optimization (BO) is
170
+ used as the search strategy to traverse the search space more
171
+ efficiently. This should reduce the search time of BOMP-NAS
172
+ significantly compared to other methods, because BO converges
173
+ quickly on promising solutions. Next to that, the issue of getting
174
+ stuck on local minima is mitigated, since BO considers all
175
+ previously trained networks, rather than a small subset as the
176
+ population.
177
+ Secondly, BOMP-NAS uses QAFT, enabling DNNs to learn
178
+ to compensate for the quantization noise. This should enable
179
+ BOMP-NAS to derive DNNs that outperform SotA.
180
+ This work is also somewhat similar to QFA [6], in that
181
+ QFA and BOMP-NAS both use QAFT during NAS. However,
182
+ BOMP-NAS is a sampling-based NAS approach, instead of the
183
+ OFA-based approach of QFA, because of the investment barrier
184
+ that is inherent in the OFA-based approaches.
185
+ III. BOMP-NAS METHODOLOGY
186
+ BOMP-NAS leverages multi-objective BO to efficiently
187
+ search for MP DNNs. The proposed workflow of this approach
188
+ is shown in Fig. 1. The Search strategy (1b) selects (1)
189
+ candidate DNNs (2a) and Quantization policies (3a) from the
190
+ Search space (1a).
191
+ The search space builds upon MobileNetV2 [10], a com-
192
+ pact, high-performing architecture originally designed for the
193
+ ImageNet dataset. The MobileNetV2 architecture consists of
194
+ a series of (possibly repeating) inverted bottlenecks. For each
195
+ inverted bottleneck, the kernel size, width multiplier, expansion
196
+ factor and number of repetitions was searchable. Also, for
197
+ each layer within a bottleneck, the bitwidth is a searchable
198
+ parameter. The search space is summarized in Table I, and
199
+ contains 3.96 · 1019 architectures and 1.19 · 1016 quantization
200
+ policies. In total, the search space contains 4.73 · 1039 MP
201
+ DNNs.
202
+ With this search space, the aim is to find compact, high-
203
+ performance networks for the CIFAR-10 and CIFAR-100
204
+ datasets [11]. The same search space was used for the CIFAR-
205
+ 100 dataset, except for the width multipliers, which could be
206
+ chosen from [0.25, 0.50, 0.75, 1.00, 1.30] instead. Since the
207
+ CIFAR-10 and CIFAR-100 datasets are addressed, the image
208
+ inputs are much smaller compared to the ImageNet dataset.
209
+ Therefore, the resolution reduction occurs after bottlenecks 4
210
+ and 6 in the search space by means of a strided convolution,
211
+ as proposed in [12].
212
+
213
+ TABLE I: Search space around MobileNetV2. The degrees of freedom
214
+ are the kernel size k, width multiplier α, expansion factor e and
215
+ the number of repetitions n. The search space contains 3.96 · 1019
216
+ architectures and 1.19 · 1016 quantization policies. In total, the search
217
+ space contains 4.73 · 1039 MP DNNs. Choices indicated in bold are
218
+ the seed values.
219
+ Block
220
+ Parameter
221
+ Choices
222
+ Inverted bottleneck 1
223
+ kernel size
224
+ [2, 3, 4, 5, 6, 7]
225
+ width multiplier
226
+ [0.01, 0.05, 0.1, 0.2, 0.3]
227
+ expansion factor
228
+ [1]
229
+ repetitions
230
+ [1]
231
+ Inverted bottleneck 2-6
232
+ kernel size
233
+ [2, 3, 4, 5, 6, 7]
234
+ width multiplier
235
+ [0.01, 0.05, 0.1, 0.2, 0.3]
236
+ expansion factor
237
+ [1, 2, 3, 4, 5, 6]
238
+ repetitions
239
+ [0, 1, 2, 3, 4, 5]
240
+ Inverted bottleneck 7
241
+ kernel size
242
+ [2, 3, 4, 5, 6, 7]
243
+ width multiplier
244
+ [0.01, 0.05, 0.1, 0.2, 0.3]
245
+ expansion factor
246
+ [1, 2, 3, 4, 5, 6]
247
+ repetitions
248
+ [1]
249
+ Convolutional 2
250
+ number of filters
251
+ [128, 256, 512, 1024, 1280]
252
+ kernel size
253
+ [1]
254
+ repetitions
255
+ [1]
256
+ Any
257
+ bitwidth
258
+ [4, 5, 6, 7, 8]
259
+ For the search strategy, BOMP-NAS uses multi-objective
260
+ Bayesian optimization (BO). because it exploits regularity in
261
+ the search space very efficiently. Using only a few random ini-
262
+ tial datapoints, BO extracts the most promising candidate DNNs
263
+ and Quantization policies, increasing the likelihood of finding
264
+ good quantized DNNs in each trial. Following [13], BOMP-
265
+ NAS uses a Gaussian process surrogate model (1b), with the
266
+ Mat´ern kernelization function to define edit-distances between
267
+ DNN architectures. The acquisition function was chosen to be
268
+ Upper Confidence Bound (UCB), again following [13].
269
+ The selected candidate DNN (2a) is trained in full precision.
270
+ This performance estimation strategy, called early training,
271
+ was also used in [12]. Early training provides a good relative
272
+ ranking of each architecture at much lower cost than fully
273
+ training each candidate DNN. Specifically, BOMP-NAS trains
274
+ each candidate DNN (2a) for 20 epochs in full-precision (2).
275
+ After the early training, the DNN is quantized (3) according
276
+ to the Quantization policy (3a). Employing MP quantization
277
+ in BOMP-NAS enables BOMP-NAS to distribute the available
278
+ model size budget more carefully; important layers get higher
279
+ precision, while less important layers get lower precision. The
280
+ parameters of the DNNs are quantized per output channel,
281
+ while activations were quantized per-tensor, as proposed in
282
+ [14].
283
+ The
284
+ quantized
285
+ DNN
286
+ resulting
287
+ from
288
+ (3)
289
+ is
290
+ fine-tuned
291
+ quantization-aware for 1 epoch (4). After the QAFT the early
292
+ training is done, and the DNN can be evaluated (5a). The
293
+ evaluation criteria in BOMP-NAS are flexible, and were chosen
294
+ to be task accuracy [%] and model size on disk [kB].
295
+ However, BO requires a single number to be returned as the
296
+ score (5a) of a trial. Therefore, to enable multi-objectiveness,
297
+ BOMP-NAS uses a scalarization function to combine multiple
298
+ objectives into a single score. The notion of the scalarization
299
+ function is to push for equal score along a Pareto front.
300
+ This allows BOMP-NAS to show the trade-off between the
301
+ optimization objectives that can be achieved for the current
302
+ use-case. This is achieved by dividing the objectives into two
303
+ categories: minimization and maximization objectives.
304
+ For maximization objectives, the objective value, e.g. task
305
+ accuracy, is divided by its corresponding reference value, e.g.
306
+ ref accuracy. For minimization objectives, the corresponding
307
+ reference value, e.g. ref model size, is divided by the reference
308
+ value, e.g. disk size. In this way, a convex function is defined,
309
+ which enables the generation of a Pareto front, instead of a
310
+ single DNN as the result of NAS. The reference values can be
311
+ tuned to increase or decrease importance of the objectives. The
312
+ scalarization function BOMP-NAS uses is defined as:
313
+ score = accuracy [%]
314
+ ref accuracy +
315
+ ref model size
316
+ log10 (model size [bits])
317
+ (1)
318
+ The resulting score (5a) is used to update (6) the Surrogate
319
+ model (1b), which is then used to sample the next candidate
320
+ model (1). This cycle continues until the maximum number
321
+ of trials has been reached. Lastly, the Pareto optimal DNNs
322
+ are finally trained (7). During final training, the DNNs are
323
+ trained for 200 epochs in full-precision, followed by 5 epochs
324
+ of QAFT.
325
+ Within the BOMP-NAS workflow, it is possible to skip the
326
+ QAFT during the search, this will be shown in the ablation
327
+ studies (Section VI). In the final training, also no QAFT is
328
+ applied in this case. Both homogeneous and MP PTQ were
329
+ investigated. Specifically, homogeneous (or fixed-precision) 8-
330
+ bit quantization was compared against {4,5,6,7,8}-bit MP pa-
331
+ rameter quantization. For all experiments, the activations were
332
+ quantized to INT8, and biases were quantized to INT32.
333
+ A. Experimental setup
334
+ BOMP-NAS combines the AutoKeras [13] NAS framework
335
+ with quantization provided by the QKeras [1] framework.
336
+ The baseline approach in this study is post-NAS quantiza-
337
+ tion, also referred to as the NAS-then-quantize or sequential
338
+ approach. In this approach, first a NAS is used to find the
339
+ optimal full-precision architecture for a given problem; then,
340
+ the optimal quantization policy for this network is determined.
341
+ All searches were run for 100 iterations, from which only the
342
+ Pareto optimal solutions in terms of task accuracy and model
343
+ size were trained for 200 epochs to obtain the final Pareto front.
344
+ The best models resulting from BOMP-NAS will be compared
345
+ with quantized DNNs from related work on their task accuracy,
346
+ disk size and design time.
347
+ IV. RESULTS
348
+ This section discusses the results obtained using BOMP-
349
+ NAS. First, the Post-NAS PTQ baseline results are discussed,
350
+ followed by the results of QAFT-aware NAS.
351
+ The baseline results were obtained by running BOMP-NAS
352
+ on the search space defined in Table I without quantization
353
+ in the loop. After the NAS finished, all the networks were
354
+ quantized homogeneously to 8-bit precision.
355
+ The results of running BOMP-NAS with QAFT in the loop
356
+ (Fig. 1) on the search space are shown in Fig. 2.
357
+
358
+ 101
359
+ 102
360
+ Model size [kB]
361
+ 40
362
+ 50
363
+ 60
364
+ 70
365
+ 80
366
+ 90
367
+ 100
368
+ Accuracy on cifar10 dataset [%]
369
+ 8.34
370
+ 8.40
371
+ 8.46
372
+ 8.52
373
+ 8.58
374
+ 8.64
375
+ 8.70
376
+ 8.76
377
+ 8.82
378
+ 8.88
379
+ 8.94
380
+ 9.00
381
+ 9.06
382
+ 9.12
383
+ 9.18
384
+ 9.24
385
+ 9.30
386
+ 9.36
387
+ 9.42
388
+ 9.48
389
+ Generation
390
+ (18 samples)
391
+ @20 epochs
392
+ 0
393
+ 1
394
+ 2
395
+ 3
396
+ 4
397
+ 5
398
+ Final training results
399
+ Fig.
400
+ 2:
401
+ Results
402
+ of
403
+ QAFT-aware
404
+ NAS
405
+ with
406
+ ref acc
407
+ =
408
+ 0.8,
409
+ ref model size
410
+ =
411
+ 8 on CIFAR-10. The found models
412
+ are up to 2x smaller while achieving better accuracy than the seed
413
+ architecture, which is MobileNetV2 quantized to 8-bit homogeneously.
414
+ The figure shows the model size [kB] (x-axis and blob
415
+ size) and task accuracy [%] (y-axis) of the candidate networks.
416
+ The candidate networks are colored based on when they were
417
+ sampled, earlier sampled models are darker than later sampled
418
+ models. The networks sampled by BO should improve with
419
+ time, as the surrogate model gets more information with each
420
+ new sample. The finally trained Pareto optimal models are
421
+ shown in red, with a line connecting them to their respective
422
+ candidate network. The seed network shown is the one defined
423
+ in Table I. The dotted lines are equal-score lines, candidate
424
+ networks along this line are considered equally optimal for the
425
+ chosen reference values.
426
+ The figure shows that the found models are up to 2x smaller
427
+ while achieving better accuracy than the seed architecture.
428
+ The bitwidth distributions per layer of the Pareto optimal
429
+ models is shown in Fig. 3. It demonstrates that in this search,
430
+ all of the models in the final Pareto front leverage the lower
431
+ precision bitwidths available. This shows QAFT enables the
432
+ leverage of low precision parameter quantization.
433
+ The results of running BOMP-NAS on the CIFAR-100 search
434
+ space are shown in Fig. 4.
435
+ V. EVALUATION AND DISCUSSION
436
+ In this section, the results in Section IV are compared with
437
+ the baseline and works from SotA. First, the PTQ-aware NAS is
438
+ compared to the baseline. Second, the effect of applying QAFT
439
+ to networks found through PTQ-aware NAS is investigated.
440
+ Lastly, the QAFT-aware NAS results are compared to the
441
+ baseline and works from SotA in terms of performance and
442
+ design cost.
443
+ Comparing the results from QAFT-NAS with the previously
444
+ discussed Pareto fronts yields Fig. 5. It shows that by inte-
445
+ grating QAFT into the NAS, an even more optimal Pareto
446
+ front can be obtained. BOMP-NAS now also finds many more
447
+ promising models well below 10 kB disk size compared to the
448
+ other approaches.
449
+ A comparison between the results of BOMP-NAS on CIFAR-
450
+ 10 and CIFAR-100, and state of the art is shown in Table II.
451
+ 0
452
+ 7
453
+ 11
454
+ 17
455
+ 19
456
+ 23
457
+ Layer index
458
+ 4
459
+ 5
460
+ 6
461
+ 7
462
+ 8
463
+ Bitwidth
464
+ Fig. 3: Bitwidth distribution per layer for each of the models in the
465
+ final Pareto front of the MP QAFT-aware NAS. The figure shows that
466
+ in this search, all of the models in the final Pareto front leverage the
467
+ lower precision bitwidths available. This shows QAFT enables the
468
+ leverage of low precision parameter quantization.
469
+ 102
470
+ 103
471
+ 104
472
+ Model size [kB]
473
+ 20
474
+ 30
475
+ 40
476
+ 50
477
+ 60
478
+ 70
479
+ 80
480
+ 90
481
+ 100
482
+ Accuracy on cifar100 dataset [%]
483
+ 7.92
484
+ 7.98
485
+ 8.04
486
+ 8.10
487
+ 8.16
488
+ 8.22
489
+ 8.28
490
+ 8.34
491
+ 8.40
492
+ 8.46
493
+ 8.52
494
+ 8.58
495
+ 8.64
496
+ 8.70
497
+ 8.76
498
+ 8.82
499
+ 8.88
500
+ 8.94
501
+ 9.00
502
+ 9.06
503
+ 9.12
504
+ Generation
505
+ (18 samples)
506
+ @20 epochs
507
+ 0
508
+ 1
509
+ 2
510
+ 3
511
+ 4
512
+ 5
513
+ Seed architecture
514
+ Final training results
515
+ Fig.
516
+ 4:
517
+ Results
518
+ of
519
+ QAFT-aware
520
+ NAS
521
+ with
522
+ ref acc
523
+ =
524
+ 0.8, ref model size = 6 on CIFAR-100.
525
+ 101
526
+ 102
527
+ Model size [kB]
528
+ 20
529
+ 30
530
+ 40
531
+ 50
532
+ 60
533
+ 70
534
+ 80
535
+ 90
536
+ 100
537
+ Accuracy on cifar10 dataset [%]
538
+ MP PTQ NAS Final training
539
+ MP PTQ NAS (QAFT) Final training
540
+ MP QAFT NAS Final training
541
+ Seed architecture
542
+ Fig. 5: Comparison between three Pareto fronts using 4-8-bit MP
543
+ quantization. The figure shows that fine-tuning the architectures found
544
+ with PTQ search (MP PTQ-NAS (QAFT)) improves the results,
545
+ especially on the left-hand side. However, QAFT-aware NAS yields
546
+ even better results, especially on the left-hand side the performance
547
+ of the found models is significantly improved.
548
+
549
+ TABLE II: Pareto optimal architectures found by a single search of
550
+ BOMP-NAS compared to works from SotA. The shown networks are
551
+ the best performing networks that are smaller than or similar size as
552
+ the respective SotA network. BOMP-NAS finds, in a single search,
553
+ DNNs that outperform SotA in a broad model size range.
554
+ Dataset
555
+ Method
556
+ Acc. [%]
557
+ Model size [kB]
558
+ CIFAR-10
559
+ JASQ (repr.)
560
+ 65.97
561
+ 4.47
562
+ BOMP-NAS
563
+ 67.36
564
+ 4.57
565
+ JASQ [3]
566
+ 97.03
567
+ 900.00
568
+ BOMP-NAS
569
+ 88.67
570
+ 76.08
571
+ µNAS [9]
572
+ 86.49
573
+ 11.40
574
+ BOMP-NAS
575
+ 83.96
576
+ 16.30
577
+ CIFAR-100
578
+ DFQ [15]
579
+ 77.30
580
+ 11200.00
581
+ GZSQ [16]
582
+ 75.95
583
+ 5600.00
584
+ BOMP-NAS
585
+ 75.84
586
+ 4199.00
587
+ LIE [17]
588
+ 73.34
589
+ 1800.00
590
+ BOMP-NAS
591
+ 74.00
592
+ 1773.00
593
+ Mix&Match [18]
594
+ 71.50
595
+ 1700.00
596
+ LIE [17]
597
+ 71.24
598
+ 1010.00
599
+ BOMP-NAS
600
+ 72.36
601
+ 1047.00
602
+ APoT [19]
603
+ 66.42
604
+ 90.00
605
+ BOMP-NAS
606
+ 68.18
607
+ 353.00
608
+ TABLE III: Search cost of various QA-NAS methods depending on
609
+ the number of deployment scenarios N.
610
+ Method
611
+ Dataset
612
+ Search cost
613
+ (GPU hours)
614
+ APQ [4]
615
+ ImageNet
616
+ 2400 + 0.5N
617
+ OQA [8]
618
+ ImageNet
619
+ 1200 + 0.5N
620
+ QFA [6]
621
+ ImageNet
622
+ 1805 + 0.N
623
+ JASQ [3]
624
+ CIFAR10
625
+ 72N
626
+ µNAS [9]
627
+ CIFAR10
628
+ 552N
629
+ BOMP-NAS
630
+ CIFAR10
631
+ 12N
632
+ CIFAR100
633
+ 30N
634
+ The table shows that BOMP-NAS outperforms the reproduced
635
+ version of JASQ on the same search space by more than 1pp
636
+ with a similar model size. Compared to µNAS, BOMP-NAS
637
+ performs 2.5pp worse, however, as shown in Table III, BOMP-
638
+ NAS has a more than 40 times lower search time.
639
+ For CIFAR-100, BOMP-NAS can outperform SotA works in
640
+ the same size range in a single search. Due to the limited trials
641
+ per search, it is expected that BOMP-NAS cannot outperform
642
+ every baseline within a single search. The expectation is that,
643
+ when considering models in the same size regime, BOMP-NAS
644
+ can find better performing networks.
645
+ Table III shows a comparison between BOMP-NAS approach
646
+ and SotA works. The table shows that, when compared to
647
+ other QA-NAS methods on the same dataset, BOMP-NAS
648
+ is significantly faster in yielding good results. An advantage
649
+ of using BO is that, given a strict time budget, BOMP-NAS
650
+ will converge faster on promising models compared to e.g.
651
+ evolutionary approaches [13]. Next to this, BOMP-NAS yields
652
+ a Pareto front of trained DNNs, rather than a single network.
653
+ This allows for better consideration of the trade-off between
654
+ task accuracy and disk size.
655
+ VI. ABLATION STUDIES
656
+ For the ablation studies, both MP PTQ-aware NAS and fixed-
657
+ precision QAFT-aware NAS were evaluated, and compared to
658
+ the results discussed in Section IV.
659
+ Using the PTQ-aware NAS configuration of BOMP-NAS
660
+ yields the results shown in Fig. 6. The found networks on the
661
+ 101
662
+ 102
663
+ Model size [kB]
664
+ 20
665
+ 30
666
+ 40
667
+ 50
668
+ 60
669
+ 70
670
+ 80
671
+ 90
672
+ 100
673
+ Accuracy on cifar10 dataset [%]
674
+ 8.40
675
+ 8.48
676
+ 8.56
677
+ 8.64
678
+ 8.72
679
+ 8.80
680
+ 8.88
681
+ 8.96
682
+ 9.04
683
+ 9.12
684
+ 9.20
685
+ 9.28
686
+ 9.36
687
+ 9.44
688
+ 9.52
689
+ 9.60
690
+ 9.68
691
+ 9.76
692
+ 9.84
693
+ 9.92
694
+ Generation
695
+ (17 samples)
696
+ @20 epochs
697
+ 0
698
+ 1
699
+ 2
700
+ 3
701
+ 4
702
+ 5
703
+ Final training results
704
+ Fig.
705
+ 6:
706
+ Results
707
+ of
708
+ MP
709
+ PTQ-aware
710
+ NAS
711
+ with
712
+ ref acc
713
+ =
714
+ 0.8, ref model size = 8. The found networks on the far left-hand
715
+ side perform significantly worse than the other models due to the
716
+ extremely low bitwidths in these models. The search therefore focused
717
+ on larger models, showing that simply applying MP PTQ to the found
718
+ networks is not a good strategy to find efficient networks.
719
+ 101
720
+ 102
721
+ Model size [kB]
722
+ 40
723
+ 50
724
+ 60
725
+ 70
726
+ 80
727
+ 90
728
+ 100
729
+ Accuracy on cifar10 dataset [%]
730
+ 8.64
731
+ 8.72
732
+ 8.80
733
+ 8.88
734
+ 8.96
735
+ 9.04
736
+ 9.12
737
+ 9.20
738
+ 9.28
739
+ 9.36
740
+ 9.44
741
+ 9.52
742
+ 9.60
743
+ 9.68
744
+ 9.76
745
+ 9.84
746
+ 9.92
747
+ Generation
748
+ (18 samples)
749
+ @20 epochs
750
+ 0
751
+ 1
752
+ 2
753
+ 3
754
+ 4
755
+ 5
756
+ Final training results
757
+ Fig. 7: Results of 4-bit QAFT-aware NAS with ref acc
758
+ =
759
+ 0.8, ref model size = 8.
760
+ far left-hand side perform significantly worse than the other
761
+ models due to the extremely low bitwidths in these models.
762
+ The search therefore focused on higher bitwidths, as is shown
763
+ by the high model sizes of the found networks. This shows
764
+ that simply applying MP PTQ to the found networks is not
765
+ a good strategy to find efficient networks. Notable is that for
766
+ the smallest model, applying PTQ after final training results in
767
+ worse accuracy than applying PTQ after initial training. This
768
+ shows that not only is the optimal quantization policy heavily
769
+ dependent on the network architecture, also the current weight
770
+ values have a significant influence.
771
+ A comparison between the results of 4-bit QAFT-NAS and
772
+ the previously discussed Pareto fronts is shown in Fig. 8, it
773
+ shows that using fixed 4-bit quantization NAS was not always
774
+ able to find better models compared to the MP approach.
775
+ On the far left, the 4-bit approach can find more optimal
776
+ networks, while in the middle of the size range, the networks
777
+ generally perform worse than equally sized networks from other
778
+ approaches. This could be due to the low sampling frequency
779
+ that is observed in that range, as shown in Fig. 7.
780
+ Table IV shows a comparison between the search cost of the
781
+
782
+ 101
783
+ 102
784
+ Model size [kB]
785
+ 50
786
+ 60
787
+ 70
788
+ 80
789
+ 90
790
+ 100
791
+ Accuracy on cifar10 dataset [%]
792
+ 8-bit PTQ NAS Final training
793
+ MP PTQ NAS (QAFT) Final training
794
+ MP QAT NAS Final training
795
+ 4-bit QAT NAS Final training
796
+ Seed architecture
797
+ Fig. 8: Comparison between several Pareto fronts. The MP searches
798
+ used bitwidths varying between 4 and 8-bit, fixed-bitwidth searches
799
+ have the used bitwidth specified. The figure shows that using fixed
800
+ 4-bit quantization NAS cannot always find better models compared to
801
+ the MP approach, while PTQ-aware NAS even with post-NAS QAFT
802
+ performs worse compared to QAFT-aware NAS.
803
+ TABLE IV: Search cost of various QA-NAS approaches in BOMP-
804
+ NAS depending on the number of deployment scenarios N.
805
+ Method
806
+ Dataset
807
+ Search cost
808
+ (GPU hours)
809
+ 8-bit PTQ-aware NAS
810
+ CIFAR10
811
+ 10N
812
+ CIFAR100
813
+ 23N
814
+ MP PTQ-aware NAS
815
+ CIFAR10
816
+ 10N
817
+ CIFAR100
818
+ 23N
819
+ MP QAFT-aware NAS
820
+ CIFAR10
821
+ 12N
822
+ (BOMP-NAS)
823
+ CIFAR100
824
+ 30N
825
+ 4-bit QAFT-aware NAS
826
+ CIFAR10
827
+ 15N
828
+ CIFAR100
829
+ 35N
830
+ discussed QA-NAS approaches. The table shows that the intro-
831
+ duction of MP into the search space does not affect the search
832
+ time in BOMP-NAS. This is because the BO can heavily exploit
833
+ its prior knowledge gained from previous samples due to the
834
+ regularity inherent in quantization, therefore convergence time
835
+ does not significantly increase. However, integrating QAFT
836
+ into the NAS does significantly impact the search time. For
837
+ example, when using MP QAFT-NAS, the search takes 25%
838
+ longer compared to the MP PTQ-aware NAS approach due to
839
+ the added QAFT.
840
+ VII. CONCLUSION
841
+ Designing deep neural networks is a challenging, but funda-
842
+ mental task in deep learning applications. To cater to the needs
843
+ of edge devices, neural network design is often paired with
844
+ model compression to design compact, high performance deep
845
+ neural networks.
846
+ Bayasian Optimization Mixed Precision (BOMP)-NAS is
847
+ an approach to quantization-aware NAS that leverages both
848
+ Bayesian optimization (BO) and mixed-precision (MP) to
849
+ efficiently search for compact, high performance networks.
850
+ BOMP-NAS is a an approach that allows the integration of
851
+ quantization in NAS, and that can be integrated in NAS without
852
+ much effort.
853
+ This study shows that integrating QAFT into the NAS loop is
854
+ a necessary step to find networks that perform well under low-
855
+ precision quantization. Integrating QAFT in the loop allows
856
+ BOMP-NAS to achieve a model size reduction of nearly 50%
857
+ on the CIFAR-10 dataset. Next to that, this paper shows
858
+ that using BOMP-NAS, DNNs that achieve state of the art
859
+ performance on the CIFAR datasets can be designed. For
860
+ example, DNNs designed by BOMP-NAS outperform JASQ
861
+ [3] by 1.4pp with a memory budget of 4.5 kB.
862
+ Lastly, this study shows that by using BO as the search
863
+ strategy, BOMP-NAS finds state of the art models at much
864
+ lower design costs. Compared to the closest related work, JASQ
865
+ [3], BOMP-NAS can find better performing models with similar
866
+ memory budgets at 6× shorter search time.
867
+ For future research, a possible improvement could be to
868
+ re-use the trained weights for each trial more efficiently. For
869
+ example, for each trained full-precision network, multiple quan-
870
+ tization policies could be tried. In this way, more information
871
+ can be extracted from each trial, thereby reducing the search
872
+ time further.
873
+ REFERENCES
874
+ [1] C. N. Coelho et al., “Automatic heterogeneous quantization of deep neural
875
+ networks for low-latency inference on the edge for particle detectors,”
876
+ Nature Machine Intelligence, vol. 3, no. 8, pp. 675–686, Aug. 2021.
877
+ [2] B. Wu et al., “Mixed precision quantization of convnets via differentiable
878
+ neural architecture search,” CoRR, vol. abs/1812.00090, 2018.
879
+ [3] Y. Chen et al., “Joint neural architecture search and quantization,” CoRR,
880
+ vol. abs/1811.09426, 2018.
881
+ [4] T. Wang et al., “Apq: Joint search for network architecture, pruning and
882
+ quantization policy,” in Proceedings of the IEEE/CVF Conference on
883
+ Computer Vision and Pattern Recognition (CVPR), June 2020.
884
+ [5] T. Kim et al., “Frostnet: Towards quantization-aware network architecture
885
+ search,” CoRR, vol. abs/2006.09679, 2020.
886
+ [6] H. Bai et al., “Batchquant: Quantized-for-all architecture search with
887
+ robust quantizer,” CoRR, vol. abs/2105.08952, 2021.
888
+ [7] H. Cai et al., “Once for all: Train one network and specialize it for
889
+ efficient deployment,” CoRR, vol. abs/1908.09791, 2019.
890
+ [8] M. Shen et al., “Once quantized for all: Progressively searching for
891
+ quantized efficient models,” arXiv preprint arXiv:2010.04354, vol. 6,
892
+ 2020.
893
+ [9] E. Liberis et al., “µnas: Constrained neural architecture search for
894
+ microcontrollers,” CoRR, vol. abs/2010.14246, 2020.
895
+ [10] M. Sandler et al., “Mobilenetv2: Inverted residuals and linear bottle-
896
+ necks,” in Proceedings of the IEEE Conference on Computer Vision and
897
+ Pattern Recognition (CVPR), June 2018.
898
+ [11] A. Krizhevsky, “Learning multiple layers of features from tiny images,”
899
+ Tech. Rep., 2009.
900
+ [12] T. Elsken et al., “Efficient multi-objective neural architecture search via
901
+ lamarckian evolution,” CoRR, 2019.
902
+ [13] H. Jin et al., “Auto-keras: An efficient neural architecture search system,”
903
+ in Proceedings of the 25th ACM SIGKDD International Conference on
904
+ Knowledge Discovery & Data Mining, ser. KDD ’19.
905
+ New York, NY,
906
+ USA: Association for Computing Machinery, 2019, p. 1946–1956.
907
+ [14] M. Nagel et al., “A white paper on neural network quantization,” CoRR,
908
+ vol. abs/2106.08295, 2021.
909
+ [15] Y. Choi et al., “Data-free network quantization with adversarial knowl-
910
+ edge distillation,” in Proceedings of the IEEE/CVF Conference on Com-
911
+ puter Vision and Pattern Recognition (CVPR) Workshops, June 2020.
912
+ [16] X. He et al., “Generative zero-shot network quantization,” in Proceedings
913
+ of the IEEE/CVF Conference on Computer Vision and Pattern Recogni-
914
+ tion (CVPR) Workshops, June 2021, pp. 3000–3011.
915
+ [17] H. Liu et al., “Layer importance estimation with imprinting for neural
916
+ network quantization,” in Proceedings of the IEEE/CVF Conference on
917
+ Computer Vision and Pattern Recognition (CVPR) Workshops, June 2021,
918
+ pp. 2408–2417.
919
+ [18] S. Chang et al., “Mix and match: A novel fpga-centric deep neural
920
+ network quantization framework,” CoRR, vol. abs/2012.04240, 2020.
921
+ [19] Y. Li et al., “Additive powers-of-two quantization: A non-uniform dis-
922
+ cretization for neural networks,” CoRR, vol. abs/1909.13144, 2019.
923
+