jackkuo commited on
Commit
a08e2af
·
verified ·
1 Parent(s): bd539b3

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +56 -0
  2. 0dE1T4oBgHgl3EQf4wVz/content/tmp_files/2301.03504v1.pdf.txt +1940 -0
  3. 0dE1T4oBgHgl3EQf4wVz/content/tmp_files/load_file.txt +0 -0
  4. 39AyT4oBgHgl3EQf1_mj/vector_store/index.faiss +3 -0
  5. 39E2T4oBgHgl3EQfOAbJ/content/2301.03744v1.pdf +3 -0
  6. 39E2T4oBgHgl3EQfOAbJ/vector_store/index.faiss +3 -0
  7. 39E2T4oBgHgl3EQfOAbJ/vector_store/index.pkl +3 -0
  8. 39E2T4oBgHgl3EQfjgft/content/2301.03970v1.pdf +3 -0
  9. 39E2T4oBgHgl3EQfjgft/vector_store/index.faiss +3 -0
  10. 3NFAT4oBgHgl3EQflB25/vector_store/index.faiss +3 -0
  11. 3dFKT4oBgHgl3EQfQy0a/vector_store/index.pkl +3 -0
  12. 4NE1T4oBgHgl3EQf6AXQ/vector_store/index.pkl +3 -0
  13. 4dE1T4oBgHgl3EQf6QUq/vector_store/index.faiss +3 -0
  14. 4tAzT4oBgHgl3EQffvxD/content/2301.01456v1.pdf +3 -0
  15. 4tAzT4oBgHgl3EQffvxD/vector_store/index.pkl +3 -0
  16. 79E1T4oBgHgl3EQfBwKM/content/tmp_files/2301.02856v1.pdf.txt +1946 -0
  17. 79E1T4oBgHgl3EQfBwKM/content/tmp_files/load_file.txt +0 -0
  18. 7tE1T4oBgHgl3EQfTwM_/content/2301.03081v1.pdf +3 -0
  19. 7tE1T4oBgHgl3EQfTwM_/vector_store/index.faiss +3 -0
  20. 7tE1T4oBgHgl3EQfTwM_/vector_store/index.pkl +3 -0
  21. 8NE2T4oBgHgl3EQfPgby/content/tmp_files/2301.03761v1.pdf.txt +1718 -0
  22. 8NE2T4oBgHgl3EQfPgby/content/tmp_files/load_file.txt +0 -0
  23. 8NE5T4oBgHgl3EQfQg5R/content/tmp_files/load_file.txt +0 -0
  24. 8dFLT4oBgHgl3EQfBS4r/vector_store/index.pkl +3 -0
  25. A9AyT4oBgHgl3EQf3_rL/vector_store/index.faiss +3 -0
  26. A9AzT4oBgHgl3EQf__9t/content/2301.01956v1.pdf +3 -0
  27. A9AzT4oBgHgl3EQf__9t/vector_store/index.pkl +3 -0
  28. C9FQT4oBgHgl3EQfPDYj/content/tmp_files/2301.13277v1.pdf.txt +1080 -0
  29. C9FQT4oBgHgl3EQfPDYj/content/tmp_files/load_file.txt +0 -0
  30. CNA0T4oBgHgl3EQfAP_i/content/tmp_files/2301.01961v1.pdf.txt +1182 -0
  31. CNA0T4oBgHgl3EQfAP_i/content/tmp_files/load_file.txt +0 -0
  32. CNAzT4oBgHgl3EQfTvwq/content/2301.01253v1.pdf +3 -0
  33. CdE4T4oBgHgl3EQfFgzX/content/tmp_files/2301.04887v1.pdf.txt +1466 -0
  34. CdE4T4oBgHgl3EQfFgzX/content/tmp_files/load_file.txt +0 -0
  35. DNAzT4oBgHgl3EQfwf6z/content/tmp_files/2301.01724v1.pdf.txt +2838 -0
  36. DNAzT4oBgHgl3EQfwf6z/content/tmp_files/load_file.txt +0 -0
  37. DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf +3 -0
  38. DdE1T4oBgHgl3EQfEAP6/vector_store/index.faiss +3 -0
  39. DdE1T4oBgHgl3EQfEAP6/vector_store/index.pkl +3 -0
  40. DdE3T4oBgHgl3EQfUwqw/vector_store/index.faiss +3 -0
  41. DdE3T4oBgHgl3EQfUwqw/vector_store/index.pkl +3 -0
  42. DdFJT4oBgHgl3EQfBSxP/content/2301.11424v1.pdf +3 -0
  43. DdFJT4oBgHgl3EQfBSxP/vector_store/index.pkl +3 -0
  44. EtFJT4oBgHgl3EQfCSzh/content/tmp_files/2301.11429v1.pdf.txt +982 -0
  45. EtFJT4oBgHgl3EQfCSzh/content/tmp_files/load_file.txt +0 -0
  46. FdE1T4oBgHgl3EQfqwVD/vector_store/index.faiss +3 -0
  47. INFLT4oBgHgl3EQfIy9R/content/2301.12001v1.pdf +3 -0
  48. INFLT4oBgHgl3EQfIy9R/vector_store/index.pkl +3 -0
  49. IdAyT4oBgHgl3EQfr_me/content/tmp_files/2301.00569v1.pdf.txt +420 -0
  50. IdAyT4oBgHgl3EQfr_me/content/tmp_files/load_file.txt +519 -0
.gitattributes CHANGED
@@ -4342,3 +4342,59 @@ _dAzT4oBgHgl3EQfFvqG/content/2301.01016v1.pdf filter=lfs diff=lfs merge=lfs -tex
4342
  L9E0T4oBgHgl3EQfjAEs/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4343
  SdAyT4oBgHgl3EQf8PoD/content/2301.00851v1.pdf filter=lfs diff=lfs merge=lfs -text
4344
  lNE0T4oBgHgl3EQf7wKH/content/2301.02780v1.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4342
  L9E0T4oBgHgl3EQfjAEs/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4343
  SdAyT4oBgHgl3EQf8PoD/content/2301.00851v1.pdf filter=lfs diff=lfs merge=lfs -text
4344
  lNE0T4oBgHgl3EQf7wKH/content/2301.02780v1.pdf filter=lfs diff=lfs merge=lfs -text
4345
+ iNE0T4oBgHgl3EQfpgG_/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4346
+ 39E2T4oBgHgl3EQfjgft/content/2301.03970v1.pdf filter=lfs diff=lfs merge=lfs -text
4347
+ dtE4T4oBgHgl3EQfpg2t/content/2301.05193v1.pdf filter=lfs diff=lfs merge=lfs -text
4348
+ UNE3T4oBgHgl3EQfEQnM/content/2301.04295v1.pdf filter=lfs diff=lfs merge=lfs -text
4349
+ tNAzT4oBgHgl3EQfPft0/content/2301.01184v1.pdf filter=lfs diff=lfs merge=lfs -text
4350
+ wdE2T4oBgHgl3EQfgwdJ/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4351
+ 39AyT4oBgHgl3EQf1_mj/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4352
+ KtE4T4oBgHgl3EQf7w7r/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4353
+ XdFRT4oBgHgl3EQf-jgZ/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4354
+ SdAyT4oBgHgl3EQf8PoD/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4355
+ CNAzT4oBgHgl3EQfTvwq/content/2301.01253v1.pdf filter=lfs diff=lfs merge=lfs -text
4356
+ h9E2T4oBgHgl3EQfHwY9/content/2301.03671v1.pdf filter=lfs diff=lfs merge=lfs -text
4357
+ KNFIT4oBgHgl3EQfaCs0/content/2301.11255v1.pdf filter=lfs diff=lfs merge=lfs -text
4358
+ 4dE1T4oBgHgl3EQf6QUq/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4359
+ jtAzT4oBgHgl3EQfbfyR/content/2301.01387v1.pdf filter=lfs diff=lfs merge=lfs -text
4360
+ xtE2T4oBgHgl3EQfhQc5/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4361
+ A9AyT4oBgHgl3EQf3_rL/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4362
+ WNE5T4oBgHgl3EQfcQ-J/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4363
+ eNE4T4oBgHgl3EQfQgzc/content/2301.04983v1.pdf filter=lfs diff=lfs merge=lfs -text
4364
+ WNE5T4oBgHgl3EQfcQ-J/content/2301.05602v1.pdf filter=lfs diff=lfs merge=lfs -text
4365
+ c9E_T4oBgHgl3EQf0hwq/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4366
+ jtAzT4oBgHgl3EQfbfyR/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4367
+ K9E0T4oBgHgl3EQfSgAu/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4368
+ mtAzT4oBgHgl3EQfqP1R/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4369
+ ptAzT4oBgHgl3EQfb_y2/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4370
+ 3NFAT4oBgHgl3EQflB25/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4371
+ DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf filter=lfs diff=lfs merge=lfs -text
4372
+ v9FPT4oBgHgl3EQf-jXG/content/2301.13216v1.pdf filter=lfs diff=lfs merge=lfs -text
4373
+ O9E0T4oBgHgl3EQf0wLF/content/2301.02691v1.pdf filter=lfs diff=lfs merge=lfs -text
4374
+ 39E2T4oBgHgl3EQfjgft/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4375
+ QdFRT4oBgHgl3EQf7Dgl/content/2301.13678v1.pdf filter=lfs diff=lfs merge=lfs -text
4376
+ c9E_T4oBgHgl3EQf0hwq/content/2301.08329v1.pdf filter=lfs diff=lfs merge=lfs -text
4377
+ ctE5T4oBgHgl3EQffg_5/content/2301.05628v1.pdf filter=lfs diff=lfs merge=lfs -text
4378
+ FdE1T4oBgHgl3EQfqwVD/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4379
+ mNE4T4oBgHgl3EQftw2O/content/2301.05227v1.pdf filter=lfs diff=lfs merge=lfs -text
4380
+ 4tAzT4oBgHgl3EQffvxD/content/2301.01456v1.pdf filter=lfs diff=lfs merge=lfs -text
4381
+ A9AzT4oBgHgl3EQf__9t/content/2301.01956v1.pdf filter=lfs diff=lfs merge=lfs -text
4382
+ 39E2T4oBgHgl3EQfOAbJ/content/2301.03744v1.pdf filter=lfs diff=lfs merge=lfs -text
4383
+ KNFIT4oBgHgl3EQfaCs0/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4384
+ DdE1T4oBgHgl3EQfEAP6/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4385
+ _9FJT4oBgHgl3EQfqyzS/content/2301.11606v1.pdf filter=lfs diff=lfs merge=lfs -text
4386
+ DdFJT4oBgHgl3EQfBSxP/content/2301.11424v1.pdf filter=lfs diff=lfs merge=lfs -text
4387
+ ddFJT4oBgHgl3EQfSCyU/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4388
+ lNE0T4oBgHgl3EQf7wKH/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4389
+ 39E2T4oBgHgl3EQfOAbJ/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4390
+ wNE5T4oBgHgl3EQfMQ59/content/2301.05480v1.pdf filter=lfs diff=lfs merge=lfs -text
4391
+ K9E1T4oBgHgl3EQfGgPl/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4392
+ UNE3T4oBgHgl3EQfEQnM/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4393
+ DdE3T4oBgHgl3EQfUwqw/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4394
+ KtE5T4oBgHgl3EQfYA_J/content/2301.05571v1.pdf filter=lfs diff=lfs merge=lfs -text
4395
+ utFAT4oBgHgl3EQfhx3p/content/2301.08596v1.pdf filter=lfs diff=lfs merge=lfs -text
4396
+ 7tE1T4oBgHgl3EQfTwM_/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4397
+ INFLT4oBgHgl3EQfIy9R/content/2301.12001v1.pdf filter=lfs diff=lfs merge=lfs -text
4398
+ btAzT4oBgHgl3EQfZvwc/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4399
+ 7tE1T4oBgHgl3EQfTwM_/content/2301.03081v1.pdf filter=lfs diff=lfs merge=lfs -text
4400
+ ctE5T4oBgHgl3EQffg_5/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
0dE1T4oBgHgl3EQf4wVz/content/tmp_files/2301.03504v1.pdf.txt ADDED
@@ -0,0 +1,1940 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Radial pulsations, moment of inertia and tidal deformability of dark energy stars
2
+ Juan M. Z. Pretel1, ∗
3
+ 1Centro Brasileiro de Pesquisas F´ısicas, Rua Dr. Xavier Sigaud,
4
+ 150 URCA, Rio de Janeiro CEP 22290-180, RJ, Brazil
5
+ (Dated: January 10, 2023)
6
+ We construct dark energy stars with Chaplygin-type equation of state (EoS) in the presence of
7
+ anisotropic pressure within the framework of Einstein gravity. From the classification established by
8
+ Iyer et al. [Class. Quantum Grav. 2, 219 (1985)], we discuss the possible existence of isotropic dark
9
+ energy stars as compact objects. However, there is the possibility of constructing ultra-compact stars
10
+ for sufficiently large anisotropies. We investigate the stellar stability against radial oscillations, and
11
+ we also determine the moment of inertia and tidal deformability of these stars. We find that the usual
12
+ static criterion for radial stability dM/dρc > 0 still holds for dark energy stars since the squared
13
+ frequency of the fundamental pulsation mode vanishes at the critical central density corresponding
14
+ to the maximum-mass configuration. The dependence of the tidal Love number on the anisotropy
15
+ parameter α is also examined. We show that the surface gravitational redshift, moment of inertia and
16
+ dimensionless tidal deformability undergo significant changes due to anisotropic pressure, primarily
17
+ in the high-mass region. Furthermore, in light of the detection of gravitational waves GW190814,
18
+ we explore the possibility of describing the secondary component of such event as a stable dark
19
+ energy star in the presence of anisotropy.
20
+ I.
21
+ INTRODUCTION
22
+ Different types of observations (such as Type Ia super-
23
+ novae, structure formation and CMB anisotropies) indi-
24
+ cate that our Universe is not only expanding, it is acceler-
25
+ ating. Within the standard ΛCDM model (which is based
26
+ on cold dark matter and cosmological constant in Ein-
27
+ stein gravity), this cosmic acceleration is due to a smooth
28
+ component with large negative pressure and repulsive
29
+ gravity, the so-called dark energy. Such a model gives
30
+ a good agreement with the recent observational data [1],
31
+ but suffers from the well-known coincidence problem and
32
+ the fine-tuning problem [2, 3]. The exact physical nature
33
+ of dark energy is still a mystery and, consequently, the
34
+ possibility that dark matter and dark energy could be
35
+ different manifestations of a single substance has been
36
+ considered [4–7]. In that regard, it was shown that the in-
37
+ homogeneous Chaplygin gas offers a simple unified model
38
+ of dark matter and dark energy [8]. It was also argued
39
+ that if the Universe is dominated by the Chaplygin gas
40
+ a cosmological constant would be ruled out with high
41
+ confidence [9].
42
+ Using the Planck 2015 CMB anisotropy, type-Ia su-
43
+ pernovae and observed Hubble parameter data sets, the
44
+ full parameter space of the modified Chaplygin gas was
45
+ measured by Li et al. [10]. Based on recent observations
46
+ of high-redshift quasars, Zheng and colleagues [11] inves-
47
+ tigated a series of Chaplygin gas models as candidates
48
+ for dark matter-energy unification. The application of
49
+ the Hamilton-Jacobi formalism for generalized Chaplygin
50
+ gas models was carried out in Ref. [12]. Additionally, it is
51
+ worth mentioning that Odintsov et al. [13] considered two
52
+ different equations of state for dark energy (i.e., power-
53
+ law and logarithmic effective corrections to the pressure).
54
55
+ They showed that the power-law model only yielded some
56
+ modest results, achieved under negative values of bulk
57
+ viscosity, while the logarithmic scenario provide good fits
58
+ in comparison to the ΛCDM model.
59
+ Another way to give rise to an accelerated expansion of
60
+ the Universe is by modifying the geometry itself [14, 15],
61
+ namely, considering higher curvature corrections to the
62
+ standard Einstein-Hilbert action.
63
+ Under this outlook,
64
+ the cosmic acceleration can be modeled in the scope of
65
+ a scalar-tensor gravity theory [16, 17]. Moreover, within
66
+ the context of the so-called f(R) theories [18, 19], the
67
+ quadratic term in the Ricci scalar R leads to an infla-
68
+ tionary solution in the early Universe [20], although such
69
+ a model does not provide a late-time accelerated expan-
70
+ sion. Nevertheless, the late-time acceleration era can be
71
+ realized by terms containing inverse powers of R [21],
72
+ though it was shown that this is not compatible with
73
+ the solar system experiments [22]. For a comprehensive
74
+ study on the evolution of the early and present Universe
75
+ in f(R) modified gravity, we refer the reader to the re-
76
+ view articles [23–25] and references contained therein.
77
+ On the other hand, the astrophysical implications due
78
+ to the f(R) modified gravitational Lagrangian on com-
79
+ pact stars have been intensively investigated in the past
80
+ few years [26–33].
81
+ According to the aforementioned works, different dark
82
+ energy models have been proposed in order to explain the
83
+ mechanisms that lead to the cosmic acceleration. Only
84
+ about 4% of the Universe is made of familiar atomic mat-
85
+ ter, 20% dark matter, and it turns out that roughly 76%
86
+ of the Universe is dark energy [34]. Within the context
87
+ of General Relativity, dark energy is an exotic negative
88
+ pressure contribution that can lead to the observed accel-
89
+ erated expansion. In the absence of consensus regarding a
90
+ theoretical description for the current accelerated expan-
91
+ sion of the Universe, theorists have proposed using the
92
+ Chaplygin gas as a useful phenomenological description
93
+ arXiv:2301.03504v1 [gr-qc] 9 Jan 2023
94
+
95
+ 2
96
+ [4]. If dark energy is distributed anywhere permeating or-
97
+ dinary matter, then it could be present in the interior of a
98
+ compact star. Therefore, the purpose of this manuscript
99
+ is to investigate the possible existence of compact stars
100
+ with dark energy by assuming a Chaplygin-type EoS. For
101
+ such stars to exist in nature, they need to be stable under
102
+ small radial perturbations.
103
+ Adopting a description of dark energy by means of a
104
+ phantom (ghost) scalar field, Yazadjiev [35] constructed a
105
+ general class of exact interior solutions describing mixed
106
+ relativistic stars containing both ordinary matter and
107
+ dark energy.
108
+ The energy conditions and gravitational
109
+ wave echoes of such stars were recently analyzed in
110
+ Ref. [36]. Furthermore, the effect of the dynamical scalar
111
+ field quintessence dark energy on neutron stars was inves-
112
+ tigated in [37]. Panotopoulos and collaborators [38] stud-
113
+ ied slowly rotating dark energy stars made of isotropic
114
+ matter using the Chaplygin EoS. Bhar [39] proposed a
115
+ model for a dark energy star made of dark and ordinary
116
+ matter in the Tolman–Kuchowicz spacetime geometry.
117
+ For further stellar models with dark energy we also refer
118
+ the reader to Refs. [40–48].
119
+ In addition, anisotropy in compact stars may arise due
120
+ to strong magnetic fields, pion condensation, phase tran-
121
+ sitions, mixture of two fluids, bosonic composition, rota-
122
+ tion, etc. Thus, regardless of the specific source of the
123
+ anisotropy, it is more natural to think of anisotropic fluids
124
+ when studying compact stars at densities above nuclear
125
+ saturation density. In that regard, the literature offers
126
+ some physically motivated functional relations for the
127
+ anisotropy, see for example Refs. [49–55]. However, we
128
+ must point out that these anisotropic models are based
129
+ on general assumptions (or ansatzes) that do not directly
130
+ relate to exotic modifications of matter or gravity. In-
131
+ deed, it has been argued that the deformation near the
132
+ maximum neutron-star mass comes from the anisotropic
133
+ pressure within these stars, which is caused by the distor-
134
+ tion of Fermi surface predicted by the equation of state
135
+ of the models [56]. Becerra-Vergara et al. [57] showed
136
+ that the contribution of the fourth order corrections pa-
137
+ rameter (a4) of the QCD perturbation on the radial
138
+ and tangential pressure generate significant effects on the
139
+ mass-radius relation and the stability of quark stars. It
140
+ has also been shown that the stellar structure equations
141
+ in Eddington-inspired Born-Infeld theory with isotropic
142
+ matter can be recast into GR with a modified (apparent)
143
+ anisotropic matter [58].
144
+ Motivated by the several works already mentioned, we
145
+ aim to discuss the impact of anisotropy on the macro-
146
+ scopic properties of dark energy stars with Chaplygin-like
147
+ EoS. We will address the following questions: Do these
148
+ stars belong to families of compact or ultra-compact
149
+ stars? How does anisotropy affect the compactness and
150
+ radial stability of dark energy stars satisfying the causal-
151
+ ity condition? In particular, by adopting the phenomeno-
152
+ logical ansatz proposed by Horvat et al. [51], we deter-
153
+ mine the radius, mass, gravitational redshift, frequency
154
+ of the fundamental oscillation mode, moment of inertia
155
+ and the dimensionless tidal deformability of anisotropic
156
+ dark energy stars. The isotropic solutions are recovered
157
+ when the anisotropy parameter vanishes, i.e. when α = 0.
158
+ The organization of this paper is as follows: In Sec. II
159
+ we start with a brief overview of relativistic stellar struc-
160
+ ture, describing the basic equations for radial pulsations,
161
+ moment of inertia and tidal deformability. We then in-
162
+ troduce the Chaplygin-like EoS and discuss its relation to
163
+ the cosmological context in Sec. III, as well as we present
164
+ the anisotropy profile. Section IV provides a discussion
165
+ of the numerical results for the different physical prop-
166
+ erties of dark energy stars. Finally, our conclusions are
167
+ summarized in Sec. V.
168
+ II.
169
+ STELLAR STRUCTURE EQUATIONS
170
+ In order to study the basic features of compact stars
171
+ with dark energy, in this section we briefly summarize the
172
+ stellar structure equations in Einstein gravity. In particu-
173
+ lar, we focus on hydrostatic equilibrium structure, radial
174
+ pulsations, moment of inertia, and tidal deformability.
175
+ The theory of gravity to be used in this work is general
176
+ relativity, where the Einstein field equations are given by
177
+ Gµν ≡ Rµν − 1
178
+ 2gµνR = 8πTµν,
179
+ (1)
180
+ with Gµν being the Einstein tensor, Rµν the Ricci tensor,
181
+ R denotes the scalar curvature, and Tµν is the energy-
182
+ momentum tensor.
183
+ Since we are interested in isolated
184
+ compact stars, we consider that the spacetime can be
185
+ described by the spherically symmetric four-dimensional
186
+ line element
187
+ ds2 = −e2ψdt2 + e2λdr2 + r2(dθ2 + sin2 θdφ2).
188
+ (2)
189
+ In addition, we model the compact-star matter by an
190
+ anisotropic perfect fluid, whose energy-momentum tensor
191
+ is given by
192
+ Tµν = (ρ + pt)uµuν + ptgµν − σkµkν,
193
+ (3)
194
+ where ρ is the energy density, σ ≡ pt − pr the anisotropy
195
+ factor, pr the radial pressure, pt the tangential pressure,
196
+ uµ the four-velocity of the fluid, and kµ is a unit four-
197
+ vector.
198
+ These four-vectors must satisfy uµuµ = −1,
199
+ kµkµ = 1 and uµkµ = 0. Notice that the stellar fluid
200
+ becomes isotropic when σ = 0.
201
+ A.
202
+ TOV equations
203
+ When the stellar fluid remains in hydrostatic equilib-
204
+ rium, neither metric nor thermodynamic quantities de-
205
+ pend on the time coordinate.
206
+ This allows us to write
207
+ uµ = e−ψδµ
208
+ 0 and kµ = e−λδµ
209
+ 1 . Accordingly, the hydro-
210
+ static equilibrium of an anisotropic compact star is gov-
211
+
212
+ 3
213
+ erned by the TOV equations:
214
+ dm
215
+ dr = 4πr2ρ,
216
+ (4)
217
+ dpr
218
+ dr = −(ρ + pr)
219
+ �m
220
+ r2 + 4πrpr
221
+ � �
222
+ 1 − 2m
223
+ r
224
+ �−1
225
+ + 2σ
226
+ r ,
227
+ (5)
228
+
229
+ dr = −
230
+ 1
231
+ ρ + pr
232
+ dpr
233
+ dr +
234
+
235
+ r(ρ + pr),
236
+ (6)
237
+ which are obtained from Eqs. (1)-(3) together with the
238
+ conservation law ∇µT µ
239
+ 1
240
+ = 0. The metric function λ(r)
241
+ is determined from the relation e−2λ = 1 − 2m/r, where
242
+ m(r) is the gravitational mass within a sphere of radius
243
+ r.
244
+ By supplying an EoS for the radial pressure in the form
245
+ pr = pr(ρ) and a defined anisotropy relation for σ, the
246
+ system of differential equations (4)-(6) is then numeri-
247
+ cally integrated from the center at r = 0 to the surface
248
+ of the star r = R which correspond to a vanishing pres-
249
+ sure. Therefore, the above equations will be solved under
250
+ the requirement of the following boundary conditions
251
+ ρ(0) = ρc,
252
+ m(0) = 0,
253
+ ψ(R) = 1
254
+ 2 ln
255
+
256
+ 1 − 2M
257
+ R
258
+
259
+ , (7)
260
+ where ρc is the central energy density, and M ≡ m(R)
261
+ is the total mass of the star calculated at its surface.
262
+ The numerical solution of the TOV equations describes
263
+ the equilibrium background and allow us to obtain the
264
+ metric components and fluid variables.
265
+ B.
266
+ Radial oscillations
267
+ A rigorous analysis of the radial stability of compact
268
+ stars requires the calculation of the frequencies of nor-
269
+ mal vibration modes. Such frequencies can be found by
270
+ considering small deviations from the hydrostatic equi-
271
+ librium state but maintaining the spherical symmetry of
272
+ the star. In the linear treatment, where all quadratic (or
273
+ higher-order) or mixed terms in the perturbations are
274
+ discarded, one assumes that all perturbations in physical
275
+ quantities are arbitrarily small.
276
+ The fluid element lo-
277
+ cated at r in the unperturbed configuration is displaced
278
+ to radial coordinate r + ξ(t, r) in the perturbed config-
279
+ uration, where ξ is the Lagrangian displacement.
280
+ All
281
+ perturbations have a harmonic time dependence of the
282
+ form ∼ eiνt, where ν is the oscillation frequency to be
283
+ determined. Consequently, defining ζ ≡ ξ/r, the adia-
284
+ batic1 radial pulsations of anisotropic compact stars are
285
+ 1 In the adiabatic theory, it is assumed that the fluid elements of
286
+ the star neither gain nor lose heat during the oscillation.
287
+ governed by the following differential equations [55]
288
+
289
+ dr = − 1
290
+ r
291
+
292
+ 3ζ + ∆pr
293
+ γpr
294
+ +
295
+ 2σζ
296
+ ρ + pr
297
+
298
+ + dψ
299
+ dr ζ,
300
+ (8)
301
+ d(∆pr)
302
+ dr
303
+ = ζ
304
+
305
+ ν2e2(λ−ψ)(ρ + pr)r − 4dpr
306
+ dr
307
+ −8π(ρ + pr)e2λrpr + r(ρ + pr)
308
+ �dψ
309
+ dr
310
+ �2
311
+ +2σ
312
+ �4
313
+ r + dψ
314
+ dr
315
+
316
+ + 2dσ
317
+ dr
318
+
319
+ + 2σ dζ
320
+ dr
321
+ − ∆pr
322
+ �dψ
323
+ dr + 4π(ρ + pr)re2λ
324
+
325
+ + 2
326
+ r δσ,
327
+ (9)
328
+ where ∆pr is the Lagrangian perturbation of the radial
329
+ pressure and γ = (1+ρ/pr)dpr/dρ is the adiabatic index
330
+ at constant specific entropy.
331
+ The above first-order time-independent equations (8)
332
+ and (9) require boundary conditions set at the center and
333
+ surface of the star, similar to a vibrating string fixed at
334
+ its ends. Since Eq. (8) has a singularity at the origin, the
335
+ following condition must be required
336
+ ∆pr = − 2σζ
337
+ ρ + pr
338
+ γpr − 3γζpr
339
+ as
340
+ r → 0,
341
+ (10)
342
+ while the Lagrangian perturbation of the radial pressure
343
+ at the surface must satisfy
344
+ ∆pr = 0
345
+ as
346
+ r → R.
347
+ (11)
348
+ C.
349
+ Moment of inertia
350
+ Suppose a particle is dropped from rest at a great dis-
351
+ tance from a rotating star, then it would experience an
352
+ ever increasing drag in the direction of rotation as it ap-
353
+ proaches the star. Based on this description, we intro-
354
+ duce the angular velocity acquired by an observer falling
355
+ freely from infinity, denoted by ω(r, θ). Here we will cal-
356
+ culate the moment of inertia of an anisotropic dark en-
357
+ ergy star under the slowly rotating approximation [59].
358
+ This means that when we consider rotational corrections
359
+ only to first order in the angular velocity of the star Ω,
360
+ the line element (2) is replaced by its slowly rotating
361
+ counterpart, namely
362
+ ds2 = − e2ψ(r)dt2 + e2λ(r)dr2 + r2(dθ2 + sin2 θdφ2)
363
+ − 2ω(r, θ)r2 sin2 θdtdφ,
364
+ (12)
365
+ and following Ref. [59], it is pertinent to define the differ-
366
+ ence ϖ ≡ Ω−ω as the coordinate angular velocity of the
367
+ fluid element at (r, θ) seen by the freely falling observer.
368
+ Keep in mind that Ω is the angular velocity of the stel-
369
+ lar fluid as seen by an observer at rest at some spacetime
370
+ point (t, r, θ, φ), and hence the four-velocity up to linear
371
+ terms in Ω can be written as uµ = (e−ψ, 0, 0, Ωe−ψ). To
372
+ this order, the spherical symmetry is still preserved and
373
+
374
+ 4
375
+ it is possible to extend the validity of the TOV equations
376
+ (4)-(6). Nonetheless, the 03-component of the field equa-
377
+ tions contributes an additional differential equation for
378
+ angular velocity. By retaining only first-order terms in
379
+ Ω, such component becomes
380
+ eψ−λ
381
+ r4
382
+
383
+ ∂r
384
+
385
+ e−(ψ+λ)r4 ∂ϖ
386
+ ∂r
387
+
388
+ +
389
+ 1
390
+ r2 sin3 θ
391
+
392
+ ∂θ
393
+
394
+ sin3 θ∂ϖ
395
+ ∂θ
396
+
397
+ = 16π(ρ + pt)ϖ.
398
+ (13)
399
+ As in the case of isotropic fluids, we follow the same
400
+ treatment carried out by Hartle [59, 60] and we assume
401
+ that ϖ can be written as
402
+ ϖ(r, θ) =
403
+
404
+
405
+ l=1
406
+ ϖl(r)
407
+ � −1
408
+ sin θ
409
+ dPl
410
+
411
+
412
+ ,
413
+ (14)
414
+ where Pl are Legendre polynomials. Taking this expan-
415
+ sion into account, Eq. (13) becomes
416
+ eψ−λ
417
+ r4
418
+ d
419
+ dr
420
+
421
+ e−(ψ+λ)r4 dϖl
422
+ dr
423
+
424
+ − l(l + 1) − 2
425
+ r2
426
+ ϖl
427
+ = 16π(ρ + pt)ϖl.
428
+ (15)
429
+ At a distance far away from the star, where e−(ψ+λ)
430
+ becomes unity, the asymptotic solution of Eq. (15) takes
431
+ the form ϖl(r) → a1r−l−2 + a2rl−1. If spacetime is to
432
+ be flat at large r, then ω → 2J/r3 (or equivalently, ϖ →
433
+ Ω − 2J/r3) for r → ∞, where J is the total angular
434
+ momentum of the star [59, 61].
435
+ Therefore, comparing
436
+ this with the asymptotic behavior of ϖl(r), we find that
437
+ l = 1. As a result, ϖ is a function only of the radial
438
+ coordinate, and Eq. (15) reduces to
439
+ eψ−λ
440
+ r4
441
+ d
442
+ dr
443
+
444
+ e−(ψ+λ)r4 dϖ
445
+ dr
446
+
447
+ = 16π(ρ + pt)ϖ,
448
+ (16)
449
+ which can be integrated to give
450
+
451
+ r4 dϖ
452
+ dr
453
+
454
+ R
455
+ = 16π
456
+ � R
457
+ 0
458
+ (ρ + pt)r4eλ−ψϖdr.
459
+ (17)
460
+ In view of Eq. (17), we can obtain the angular mo-
461
+ mentum J and hence the moment of inertia I = J/Ω of
462
+ a slowly rotating anisotropic star:
463
+ I = 8π
464
+ 3
465
+ � R
466
+ 0
467
+ (ρ + pr + σ)eλ−ψr4 �ϖ
468
+
469
+
470
+ dr,
471
+ (18)
472
+ which reduces to the expression given in Ref. [61] for
473
+ isotropic compact stars when σ = 0. For an arbitrary
474
+ choice of the central value ϖ(0), the appropriate bound-
475
+ ary conditions for the differential equation (16) come
476
+ from the requirements of regularity at the center of the
477
+ star and asymptotic flatness at infinity, namely
478
+
479
+ dr
480
+ ����
481
+ r=0
482
+ = 0,
483
+ lim
484
+ r→∞ ϖ = Ω.
485
+ (19)
486
+ Once the solution for ϖ(r) is found, we can then deter-
487
+ mine the moment of inertia through the integral (18). It
488
+ is remarkable that the above expression for I is referred
489
+ to as the “slowly rotating” approximation because it was
490
+ obtained to lowest order in the angular velocity Ω [61].
491
+ This means that the stellar structure equations are still
492
+ given by the TOV equations (4)-(6).
493
+ D.
494
+ Tidal deformability
495
+ It is well known that the tidal properties of neutron
496
+ stars are measurable in gravitational waves emitted from
497
+ the inspiral of a binary neutron-star coalescence [62, 63].
498
+ In that regard, here we also study the dimensionless tidal
499
+ deformability of individual dark energy stars. To do so,
500
+ we follow the procedure carried out by Hinderer et al. [64]
501
+ (see also Refs. [65–70] for additional results). The basic
502
+ idea is as follows: In a binary system, the deformation
503
+ of a compact star due to the tidal effect created by the
504
+ companion star is characterized by the tidal deformabil-
505
+ ity parameter ¯λ = −Qij/Eij, where Qij is the induced
506
+ quadrupole moment tensor and Eij is the tidal field ten-
507
+ sor [68]. Namely, the latter describes the tidal field from
508
+ the spacetime curvature sourced by the distant compan-
509
+ ion.
510
+ The tidal parameter is related to the tidal Love number
511
+ k2 through the relation2
512
+ ¯λ = 2
513
+ 3k2R5,
514
+ (20)
515
+ but it is common in the literature to define the dimen-
516
+ sionless tidal deformability Λ = ¯λ/M 5, so in our results
517
+ we will focus on Λ. The calculation of ¯λ requires consider-
518
+ ing linear quadrupolar perturbations (due to the external
519
+ tidal field) to the equilibrium configuration. Thus, the
520
+ spacetime metric is given by gµν = g0
521
+ µν + hµν, where g0
522
+ µν
523
+ describes the equilibrium configuration and hµν is a lin-
524
+ earized metric perturbation. For static and even-parity
525
+ perturbations in the Regge-Wheeler gauge [71], the per-
526
+ turbed metric can be written as [64]
527
+ hµν =
528
+ diag
529
+
530
+ −e2ψ(r)H0, e2λ(r)H2, r2K, r2 sin2 θK
531
+
532
+ Y2m(θ, φ),
533
+ (21)
534
+ where H0, H2 and K are functions of the radial coordi-
535
+ nate, and Ylm are the spherical harmonics for l = 2.
536
+ Since the perturbed energy-momentum tensor is given
537
+ by δT ν
538
+ µ = diag(−δρ, δpr, δpt, δpt), the linearized field
539
+ 2 It should be noted that the tidal deformability parameter is be-
540
+ ing denoted by ¯λ in order not to be confused with the metric
541
+ component λ.
542
+
543
+ 5
544
+ equations imply that:
545
+
546
+
547
+
548
+
549
+
550
+ H0 = −H2 ≡ H
551
+ from
552
+ δG2
553
+ 2 − δG3
554
+ 3 = 0,
555
+ K′ = 2Hψ′ + H′
556
+ from
557
+ δG2
558
+ 1 = 0,
559
+ δpt =
560
+ H
561
+ 8πre−2λ(λ′ + ψ′)Y2m
562
+ from
563
+ δG2
564
+ 2 = 8πδpt.
565
+ In addition, from δG0
566
+ 0 − δG1
567
+ 1 = −8π(δρ + δpt), we can
568
+ obtain the following differential equation [72]
569
+ H′′ + PH′ + QH = 0,
570
+ (22)
571
+ or alternatively,
572
+ ry′ = −y2 + (1 − rP)y − r2Q,
573
+ (23)
574
+ where we have defined
575
+ y ≡ rH′
576
+ H ,
577
+ (24)
578
+ P ≡ 2
579
+ r + e2λ
580
+ �2m
581
+ r2 + 4πr(pr − ρ)
582
+
583
+ ,
584
+ (25)
585
+ Q ≡ 4πe2λ
586
+
587
+ 4ρ + 8pr + ρ + pr
588
+ Av2sr
589
+ (1 + v2
590
+ sr)
591
+
592
+ − 6e2λ
593
+ r2
594
+ − 4ψ′2,
595
+ (26)
596
+ with A ≡ dpt/dpr and vsr being the radial speed of
597
+ sound.
598
+ By matching the internal solution with the external
599
+ solution of the perturbed variable H at the surface of the
600
+ star r = R, we obtain the tidal Love number [72]
601
+ k2 = 8
602
+ 5(1 − 2C)2C5 [2C(yR − 1) − yR + 2]
603
+ ×
604
+
605
+ 2C[4(yR + 1)C4 + (6yR − 4)C3
606
+ + (26 − 22yR)C2 + 3(5yR − 8)C − 3yR + 6
607
+
608
+ + 3(1 − 2C)2 [2C(yR − 1) − yR + 2] log(1 − 2C)
609
+ �−1 ,
610
+ (27)
611
+ where C ≡ M/R is the compactness of the star, and
612
+ yR ≡ y(R) is obtained by integrating equation (23) from
613
+ the origin up to the stellar surface.
614
+ III.
615
+ EQUATION OF STATE AND ANISOTROPY
616
+ MODEL
617
+ As it is well known, a possible alternative to the Phan-
618
+ tom and Quintessence fields is the Chaplygin gas, where
619
+ the EoS assumes the form pr = −B/ρ, with B being a
620
+ positive constant (given in m−4 units). In fact, it was ar-
621
+ gued that such gas could provide a solution to unify the
622
+ effects of dark matter in the early times and dark energy
623
+ in late times [4, 11]. Although the literature provides a
624
+ more generalized version for such EoS in the context of
625
+ the Friedmann-Lemaˆıtre-Robertson-Walker Universe [5–
626
+ 7, 73–77], here we will use the simplest form plus a linear
627
+ term corresponding to a barotropic fluid, namely
628
+ pr = Aρ − B
629
+ ρ ,
630
+ (28)
631
+ where A is a positive dimensionless constant. Our model
632
+ is characterized by two free parameters A and B. Never-
633
+ theless, we must emphasize here that Li et al. [10] consid-
634
+ ered an equation of state with three degrees of freedom,
635
+ specifically p = Aρ − B/ρα, where α is an extra param-
636
+ eter. They carried out a statistical treatment of astro-
637
+ nomical data in order to constrain the parameter space.
638
+ In the light of the Markov chain Monte Carlo method,
639
+ they found that at 2σ level, α = −0.0156+0.0982+0.2346
640
+ −0.1380−0.2180
641
+ and A = 0.0009+0.0018+0.0030
642
+ −0.0017−0.0030 from CMB+JLA+CC data
643
+ sets.
644
+ In other words, the constants α and A are very
645
+ close to zero and hence the nature of unified dark matter-
646
+ energy model is very similar to the cosmological standard
647
+ ΛCDM model.
648
+ On the other hand, at astrophysics level, compact stars
649
+ obeying the EoS (28) have been investigated by several
650
+ authors, see for example Refs. [38, 41, 43–45]. In this
651
+ work we will adopt values of A and B for which appre-
652
+ ciable changes in the mass-radius diagram can be visu-
653
+ alized in order to compare our theoretical results with
654
+ observational measurements of massive pulsars.
655
+ In order to describe physically realistic compact stars,
656
+ the causality condition must be respected throughout the
657
+ interior region of the star. In other words, the speed of
658
+ sound (defined by vs ≡
659
+
660
+ dp/dρ) cannot be greater than
661
+ the speed of light. Thus, in view of Eq. (28), we have
662
+ v2
663
+ sr ≡ dpr
664
+ dρ = A + B
665
+ ρ2 ,
666
+ (29)
667
+ and since the radial pressure vanishes at the surface of
668
+ the star, then B = Aρ2. Thereby, the causality condition
669
+ v2
670
+ sr(R) = 2A < 1 implies that A < 0.5.
671
+ Besides, it is more realistic to consider stellar models
672
+ where there exists a tangential pressure as well as a radial
673
+ one, since anisotropies arise at high densities, i.e. above
674
+ the nuclear saturation density as considered in this work.
675
+ Although the literature offers different functional rela-
676
+ tions to model anisotropic pressures at very high densities
677
+ inside compact stars [49–54], here we adopt the simplest
678
+ model, which was proposed by Horvat and collaborators
679
+ [51]
680
+ σ = α
681
+ �2m
682
+ r
683
+
684
+ pr = α
685
+
686
+ 1 − e−2λ�
687
+ pr,
688
+ (30)
689
+ where α is a dimensionless parameter that controls the
690
+ amount of anisotropy within the stellar fluid. This pa-
691
+ rameter can assume positive or negative values of the
692
+ order of unity, see Refs. [26, 32, 51, 52, 55, 78–82]. No-
693
+ tice that the isotropic solutions are recovered when the
694
+ value of α vanishes. Specifically, the anisotropy ansatz
695
+ (30) has two important characteristics: (i) the fluid be-
696
+ comes isotropic at the center generating regular solutions
697
+ and (ii) the effect of anisotropy vanishes in the hydro-
698
+ static equilibrium equation in the Newtonian limit. Un-
699
+ like this profile, the effect of anisotropy does not van-
700
+ ish in the hydrostatic equilibrium equation in the non-
701
+ relativistic regime for the Bowers-Liang model [49], which
702
+
703
+ 6
704
+ could be an unphysical trait as argued in Ref. [79]. For
705
+ a broader discussion on the different ways of generating
706
+ static spherically symmetric anisotropic fluid solutions,
707
+ we refer the reader to the recent review article [83].
708
+ Since the Eulerian perturbation for the metric poten-
709
+ tial λ can be written as δλ = −4πr(ρ+pr)e2λξ [55], then
710
+ δσ takes the form
711
+ δσ = α
712
+
713
+ (1 − e−2λ)δpr − 8πpr(ρ + pr)r2ζ
714
+
715
+ ,
716
+ (31)
717
+ where it should be noted that the relation between the
718
+ Eulerian and Lagrangian perturbations for radial pres-
719
+ sure is given by ∆pr = δpr + rζp′
720
+ r. The above expression
721
+ will be substituted in Eq. (9) when we discuss later the
722
+ radial pulsations in the stellar interior for at least some
723
+ values of α.
724
+ IV.
725
+ NUMERICAL RESULTS
726
+ A.
727
+ Equilibrium configurations
728
+ So far we do not know exactly whether the millisecond
729
+ pulsars (observed in compact binaries from optical spec-
730
+ troscopic and photometric measurements) are hadronic,
731
+ quark or hybrid stars.
732
+ In fact, it has been theorized
733
+ that cold quark matter might exist at the core of heavy
734
+ neutron stars [84]. Despite the precise measurements of
735
+ masses [85–87] and radii [88–90], such constraints are still
736
+ unable to distinguish the theoretical predictions coming
737
+ from the different models for strange stars and (hybrid)
738
+ neutron stars. This means that the dense matter EoS
739
+ within compact stars still remains poorly understood.
740
+ Furthermore, a realistic compact star possesses high mag-
741
+ netic fields and rotation properties, which significantly
742
+ alter its internal structure. For comparison reasons, it is
743
+ therefore common to use the observational mass-radius
744
+ measurements (in view of the detection of gravitational
745
+ waves and electromagnetic signals) on the mass-radius
746
+ diagrams for any type of EoS even being of different mi-
747
+ croscopic compositions. In that perspective, our theoret-
748
+ ical results will be compared with observational measure-
749
+ ments.
750
+ We begin our discussion of dark energy stars by con-
751
+ sidering the isotropic case (i.e., when σ = 0 in the TOV
752
+ equations). We numerically integrate Eqs. (4)-(6) from
753
+ the center up to the surface of the star through the
754
+ boundary conditions (7). As usual, the radius R is de-
755
+ termined when the pressure vanishes, and the total mass
756
+ M is calculated at the surface. The felt panel of Fig. 1
757
+ exhibits the mass-radius relations of dark energy stars for
758
+ different values of parameters A and B in the EoS (28).
759
+ Remark that we have adopted values of A less than 0.5 in
760
+ order to respect the causality condition. One can observe
761
+ that small values of A (see black curve) do not provide
762
+ compact stars that fit current observational data. How-
763
+ ever, higher values of maximum mass can be obtained for
764
+ larger values of A, see for example red and green curves.
765
+ For a fixed value of A, the maximum mass decreases as
766
+ the parameter B increases.
767
+ We perceive that the sec-
768
+ ondary component resulting from the gravitational-wave
769
+ signal GW190814 [91] can be consistently described as a
770
+ compact star with Chaplygin EoS (28) for A = 0.4 and
771
+ B ∈ [4, 5]µ. Furthermore, the magenta curve fits very
772
+ well with all observational data, but its maximum-mass
773
+ value is above 3M⊙.
774
+ Another interesting feature of these stars is their com-
775
+ pactness, defined by C ≡ M/R. According to the clas-
776
+ sification adopted by Iyer et al. [92], the configurations
777
+ shown in the mass-radius diagram correspond to compact
778
+ stars, see the right plot of Fig. 1. Besides, we can appre-
779
+ ciate that the compactness of dark energy stars is of the
780
+ order of the compactness of hadronic-matter stars, as is
781
+ the case of the SLy EoS [93], despite the fact that the
782
+ maximum mass in the magenta configuration sequence
783
+ can exceed 3M⊙. Nonetheless, as we will see later, the
784
+ introduction of anisotropy can turn such stars into ultra-
785
+ compact objects.
786
+ Of course, this will depend on the
787
+ amount of anisotropy in the stellar interior.
788
+ In order to include anisotropic pressures and investi-
789
+ gate their effects on the internal structure of dark energy
790
+ stars, we will adopt two specific models with the following
791
+ parameters
792
+ ⋆ Model I: A = 0.3, B = 6.0µ ,
793
+ ⋆ Model II: A = 0.4, B = 5.2µ ,
794
+ which are models favored by observational measurements
795
+ according to the left panel of Fig. 1. Moreover, model II
796
+ precisely corresponds to the first model considered by
797
+ Panotopoulos et al. [38].
798
+ Similar to the isotropic case, we numerically solve the
799
+ hydrostatic background equations (4)-(6) with boundary
800
+ conditions (7), but taking into account the anisotropy
801
+ profile (30). For instance, for the model I and a central
802
+ density ρc = 2.0×1018 kg/m3, Fig. 2 illustrates the mass
803
+ density, pressure and squared speed of sound as functions
804
+ of the radial coordinate for different values of the free pa-
805
+ rameter α. We can see that the internal structure of a
806
+ dark energy star is affected by the presence of anisotropy.
807
+ In effect, the radius of the star increases (decreases) for
808
+ more positive (negative) values of α. In addition, we re-
809
+ mark that the speed of sound, both radial and tangential,
810
+ respect the causality condition. This has also been veri-
811
+ fied for other values of central density considered in the
812
+ construction of Fig. 1.
813
+ Varying the central density, we obtain the mass-radius
814
+ diagrams and mass-central density relations for models I
815
+ and II, as shown in Fig. 3.
816
+ We observe that the sub-
817
+ stantial changes introduced by anisotropy in dark en-
818
+ ergy stars occur in the high-mass branch (close to the
819
+ maximum-mass point), while the effects are irrelevant at
820
+ low central densities. The maximum-mass values increase
821
+ as the parameter α increases (see also the data in Table
822
+ I). Note that model I without anisotropic pressures is
823
+ not capable of generating maximum masses above 2M⊙.
824
+
825
+ 7
826
+ SLy
827
+ A = 0.2, B = 6μ
828
+ A = 0.3, B = 3μ
829
+ A = 0.3, B = 4μ
830
+ A = 0.3, B = 5μ
831
+ A = 0.3, B = 6μ
832
+ A = 0.4, B = 3μ
833
+ A = 0.4, B = 4μ
834
+ A = 0.4, B = 5μ
835
+ A = 0.4, B = 6μ
836
+ A = 0.48, B = 3μ
837
+ 4
838
+ 6
839
+ 8
840
+ 10
841
+ 12
842
+ 14
843
+ 0.0
844
+ 0.5
845
+ 1.0
846
+ 1.5
847
+ 2.0
848
+ 2.5
849
+ 3.0
850
+ R [km]
851
+ M [M⊙]
852
+ C > 1/3
853
+ (ultra-compact objects)
854
+ 1/6 < C < 1/3
855
+ (compact objects)
856
+ 0.0
857
+ 0.5
858
+ 1.0
859
+ 1.5
860
+ 2.0
861
+ 2.5
862
+ 3.0
863
+ 0.00
864
+ 0.05
865
+ 0.10
866
+ 0.15
867
+ 0.20
868
+ 0.25
869
+ 0.30
870
+ 0.35
871
+ M [M⊙]
872
+ C
873
+ FIG. 1. Left panel: Mass-radius diagrams for dark energy stars with Chaplygin-like EoS (28) and isotropic pressure (σ = 0)
874
+ for several values of the positive parameters A and B.
875
+ Here the constant B is given in µ = 10−20 m−4 units.
876
+ The gray
877
+ horizontal stripe at 2.0M⊙ stands for the two massive NS pulsars J1614-2230 [85] and J0348+0432 [86]. Yellow and blue regions
878
+ represent the observational measurements of the masses of the highly massive NS pulsars J0740+6620 [87] and J2215+5135
879
+ [94], respectively. The filled pink band stands for the lower mass of the compact object detected by the GW190814 event
880
+ [91], and the cyan area is the mass-radius constraint from the GW170817 event. Moreover, the NICER measurements for PSR
881
+ J0030+0451 are displayed by black dots with their respective error bars [95, 96]. Right panel: Variation of the compactness
882
+ with total gravitational mass, where the gray and orange stripes represent compact and ultra-compact objects, respectively,
883
+ according to the classification given in Ref. [92]. For comparison reasons, we have included the results corresponding to the
884
+ SLy EoS [93] by blue curves in both plots.
885
+ Nevertheless, the inclusion of anisotropies (see the blue
886
+ curve for α = 0.4) allows a significant increase in the
887
+ maximum mass and hence a more favorable description
888
+ of the compact objects observed in nature. On the other
889
+ hand, model II with anisotropies (see orange curves) fits
890
+ better with the observational measurements. In particu-
891
+ lar, in view of the lower mass of the compact object from
892
+ the coalescence GW190814 [91], two curves are partic-
893
+ ularly outstanding. In other words, such object can be
894
+ well described as an anisotropic dark energy star when
895
+ α = 0.2 and α = 0.4. Moreover, model II with negative
896
+ anisotropies (such as α = −0.4) favors the description of
897
+ the massive pulsar J2215+5135 [94].
898
+ The left panel of Fig. 4 describes the behavior of
899
+ compactness as a function of central density.
900
+ Positive
901
+ anisotropies lead to an increase in compactness, mainly
902
+ in the high-central-density branch. Remarkably, for suf-
903
+ ficiently large values of α (see purple curve), it is possible
904
+ to obtain anisotropic dark energy stars as ultra-compact
905
+ objects.
906
+ The gravitational redshift, conventionally defined as
907
+ the fractional change between observed and emitted
908
+ wavelengths compared to emitted wavelength, in the case
909
+ of a Schwarzschild star is given by [61]
910
+ zsur = eλ(R) − 1 =
911
+
912
+ 1 − 2M
913
+ R
914
+ �−1/2
915
+ − 1.
916
+ (32)
917
+ In the right plot of Fig. 4, the surface gravitational red-
918
+ TABLE I.
919
+ Maximum-mass configurations with Chaplygin-
920
+ like EoS (28) for model I and II. The energy density values
921
+ correspond to the critical central density where the function
922
+ M(ρc) is a maximum on the right plot of Fig. 3.
923
+ Model
924
+ α
925
+ ρc [1018 kg/m3]
926
+ R [km]
927
+ M [M⊙]
928
+ −0.4
929
+ 2.424
930
+ 9.812
931
+ 1.786
932
+ −0.2
933
+ 2.364
934
+ 9.902
935
+ 1.852
936
+ I
937
+ 0
938
+ 2.295
939
+ 9.994
940
+ 1.919
941
+ 0.2
942
+ 2.219
943
+ 10.086
944
+ 1.988
945
+ 0.4
946
+ 2.135
947
+ 10.180
948
+ 2.059
949
+ −0.4
950
+ 1.777
951
+ 11.630
952
+ 2.320
953
+ −0.2
954
+ 1.721
955
+ 11.738
956
+ 2.402
957
+ II
958
+ 0
959
+ 1.661
960
+ 11.845
961
+ 2.486
962
+ 0.2
963
+ 1.594
964
+ 11.955
965
+ 2.570
966
+ 0.4
967
+ 1.523
968
+ 12.065
969
+ 2.565
970
+ shift is plotted as a function of the total mass for both
971
+ models I and II. This plot indicates that the gravita-
972
+ tional redshift of light emitted at the surface of a dark
973
+ energy star is substantially affected by the anisotropy in
974
+ the high-mass region, while the changes are negligible for
975
+ sufficiently low masses. For a fixed value of central den-
976
+ sity, Table II shows that positive (negative) anisotropy
977
+ increases (decreases) the value of the redshift.
978
+
979
+ 8
980
+ TABLE II.
981
+ Radius, mass, redshift, fundamental mode frequency (f0 = ν0/2π), moment of inertia and dimensionless tidal
982
+ deformability of dark energy stars with central energy density ρc = 1.5 × 1018 kg/m3 as predicted by models I and II for
983
+ several values of the anisotropy parameter α. Remarkably, with the exception of the fundamental mode frequency and tidal
984
+ deformability, these properties undergo a significant increase as α increases.
985
+ Model
986
+ α
987
+ R [km]
988
+ M [M⊙]
989
+ zsur
990
+ f0 [kHz]
991
+ I [1038 kg · m2]
992
+ Λ
993
+ −0.4
994
+ 10.062
995
+ 1.713
996
+ 0.418
997
+ 2.414
998
+ 1.695
999
+ 13.278
1000
+ −0.2
1001
+ 10.163
1002
+ 1.781
1003
+ 0.440
1004
+ 2.312
1005
+ 1.820
1006
+ 10.709
1007
+ I
1008
+ 0
1009
+ 10.263
1010
+ 1.852
1011
+ 0.463
1012
+ 2.201
1013
+ 1.957
1014
+ 8.598
1015
+ 0.2
1016
+ 10.361
1017
+ 1.926
1018
+ 0.489
1019
+ 2.081
1020
+ 2.105
1021
+ 6.868
1022
+ 0.4
1023
+ 10.456
1024
+ 2.003
1025
+ 0.518
1026
+ 1.950
1027
+ 2.265
1028
+ 5.454
1029
+ −0.4
1030
+ 11.767
1031
+ 2.310
1032
+ 0.543
1033
+ 1.131
1034
+ 3.298
1035
+ 4.889
1036
+ −0.2
1037
+ 11.859
1038
+ 2.395
1039
+ 0.574
1040
+ 0.998
1041
+ 3.531
1042
+ 3.823
1043
+ II
1044
+ 0
1045
+ 11.944
1046
+ 2.481
1047
+ 0.609
1048
+ 0.840
1049
+ 3.778
1050
+ 2.978
1051
+ 0.2
1052
+ 12.019
1053
+ 2.569
1054
+ 0.647
1055
+ 0.637
1056
+ 4.037
1057
+ 2.309
1058
+ 0.4
1059
+ 12.083
1060
+ 2.656
1061
+ 0.688
1062
+ 0.315
1063
+ 4.303
1064
+ 1.782
1065
+ α = -0.6
1066
+ α = -0.3
1067
+ α = 0
1068
+ α = 0.3
1069
+ α = 0.6
1070
+ 0
1071
+ 2
1072
+ 4
1073
+ 6
1074
+ 8
1075
+ 10
1076
+ 0.6
1077
+ 0.8
1078
+ 1.0
1079
+ 1.2
1080
+ 1.4
1081
+ 1.6
1082
+ 1.8
1083
+ 2.0
1084
+ r [km]
1085
+ ρ [kg/m3]
1086
+ Solid lines: pr
1087
+ Dashed lines: pt
1088
+ α = -0.6
1089
+ α = -0.3
1090
+ α = 0
1091
+ α = 0.3
1092
+ α = 0.6
1093
+ 0
1094
+ 2
1095
+ 4
1096
+ 6
1097
+ 8
1098
+ 10
1099
+ 0
1100
+ 1
1101
+ 2
1102
+ 3
1103
+ 4
1104
+ 5
1105
+ r [km]
1106
+ Pressure [1034 Pa]
1107
+ Solid lines: vsr
1108
+ 2
1109
+ Dashed lines: vst
1110
+ 2
1111
+ α = -0.6
1112
+ α = -0.3
1113
+ α = 0
1114
+ α = 0.3
1115
+ α = 0.6
1116
+ 0
1117
+ 2
1118
+ 4
1119
+ 6
1120
+ 8
1121
+ 10
1122
+ 0.2
1123
+ 0.3
1124
+ 0.4
1125
+ 0.5
1126
+ 0.6
1127
+ 0.7
1128
+ 0.8
1129
+ r [km]
1130
+ (Speed of sound)2/c2
1131
+ FIG. 2. Radial behavior of the mass density (left panel), pressures (middle panel) and squared speed of sound (right panel)
1132
+ inside an anisotropic dark energy star with central density ρc = 2.0 × 1018 kg/m3 and several values of the parameter α. All
1133
+ plots correspond to model I and the black curves represent the isotropic solutions. Note that both the radial and tangential
1134
+ speed of sound obey the causality condition. Furthermore, one can observe that the increase in α leads to larger radii, and the
1135
+ anisotropy is more pronounced in the intermediate regions.
1136
+ B.
1137
+ Oscillation spectrum
1138
+ A necessary condition (the well-known M(ρc) method)
1139
+ for stellar stability is that stable stars must lie in the re-
1140
+ gion where dM/dρc > 0.
1141
+ According to the right plot
1142
+ of Fig. 3, the full blue and orange circles on each curve
1143
+ indicate the onset of instability for each family of equi-
1144
+ librium solutions. However, a sufficient condition is to
1145
+ calculate the frequencies of the radial vibration modes
1146
+ for each central density [61]. Here we will analyze if both
1147
+ methods are compatible in the case of dark energy stars
1148
+ including anisotropic pressure.
1149
+ Once the equilibrium equations (4)-(6) are integrated
1150
+ from the center to the surface of the star, we then pro-
1151
+ ceed to solve the radial pulsation equations (8) and (9)
1152
+ with the corresponding boundary conditions (10) and
1153
+ (11) using the shooting method. Namely, we integrate
1154
+ from the origin (where we consider the normalized eigen-
1155
+ functions ζ(0) = 1) up to the stellar surface for a set
1156
+ of trial values ν2 satisfying the condition (10). In this
1157
+ way, the appropriate eigenfrequencies correspond to the
1158
+ values for which the boundary condition (11) is fulfilled.
1159
+ For instance, for a central density ρc = 1.5×1018 kg/m3,
1160
+ α = 0.4 and parameters given by model I, Fig. 5 dis-
1161
+ plays the radial behavior of the perturbation variables
1162
+ for the first five squared eigenfrequencies ν2
1163
+ n, where n
1164
+ indicates the number of nodes inside the star. This fre-
1165
+ quency spectrum forms an infinite discrete sequence, i.e.
1166
+ ν2
1167
+ 0 < ν2
1168
+ 1 < ν2
1169
+ 2 < · · · , where the eigenvalue corresponding
1170
+ to n = 0 is the lowest one (or equivalently, the longest
1171
+ period of all the allowed vibration modes) and it is known
1172
+ as the fundamental mode.
1173
+ Such mode has no nodes,
1174
+
1175
+ 9
1176
+ Model I
1177
+ Model II
1178
+ α = -0.4
1179
+ α = -0.2
1180
+ α = 0
1181
+ α = 0.2
1182
+ α = 0.4
1183
+ 7
1184
+ 8
1185
+ 9
1186
+ 10
1187
+ 11
1188
+ 12
1189
+ 0.5
1190
+ 1.0
1191
+ 1.5
1192
+ 2.0
1193
+ 2.5
1194
+ R [km]
1195
+ M [M⊙]
1196
+ Model I
1197
+ Model II
1198
+ α = -0.4
1199
+ α = -0.2
1200
+ α = 0
1201
+ α = 0.2
1202
+ α = 0.4
1203
+ 17.8
1204
+ 18.0
1205
+ 18.2
1206
+ 18.4
1207
+ 18.6
1208
+ 0.5
1209
+ 1.0
1210
+ 1.5
1211
+ 2.0
1212
+ 2.5
1213
+ Log ρc [kg/m3]
1214
+ M [M⊙]
1215
+ FIG. 3. Mass-radius diagram (left panel) and mass-central density relation (right panel) for anisotropic dark energy stars as
1216
+ predicted by model I (blue curves) and II (orange curves) with anisotropy profile (30) for several values of α. The colored
1217
+ bands in the left plot represent the same as in Fig. 1. Moreover, the full blue and orange circles on the right plot indicate the
1218
+ maximum-mass points for model I and II, respectively. Note that the maximum-mass values for model II correspond to lower
1219
+ central densities than those for model I, however, model II allows larger masses (see also Table I). The critical central density
1220
+ corresponding to the maximum point on the M(ρc) curve is modified by the presence of anisotropy for both models.
1221
+ Model I
1222
+ Model II
1223
+ C > 1/3
1224
+ (ultra-compact objects)
1225
+ 1/6 < C < 1/3
1226
+ (compact objects)
1227
+ α = 0.7
1228
+ α = -0.4
1229
+ α = -0.2
1230
+ α = 0
1231
+ α = 0.2
1232
+ α = 0.4
1233
+ 17.8
1234
+ 18.0
1235
+ 18.2
1236
+ 18.4
1237
+ 18.6
1238
+ 0.05
1239
+ 0.10
1240
+ 0.15
1241
+ 0.20
1242
+ 0.25
1243
+ 0.30
1244
+ 0.35
1245
+ Log ρc [kg/m3]
1246
+ C
1247
+ Model I
1248
+ Model II
1249
+ α = -0.4
1250
+ α = -0.2
1251
+ α = 0
1252
+ α = 0.2
1253
+ α = 0.4
1254
+ 0.5
1255
+ 1.0
1256
+ 1.5
1257
+ 2.0
1258
+ 2.5
1259
+ 0.1
1260
+ 0.2
1261
+ 0.3
1262
+ 0.4
1263
+ 0.5
1264
+ 0.6
1265
+ 0.7
1266
+ M [M⊙]
1267
+ zsur
1268
+ FIG. 4.
1269
+ Left panel: Variation of the compactness with central density for several anisotropic dark energy star sequences.
1270
+ The gray and light-green stripes represent compact and ultra-compact objects, respectively, according to the classification
1271
+ established by Iyer et al. [92]. Positive anisotropy results in increased compactness for sufficiently high central densities, while
1272
+ the opposite occurs for negative anisotropy. Note also that dark energy stars would correspond to ultra-compact objects if
1273
+ α > 0.4 for model II, see for instance the purple curve for α = 0.7. Right panel: Surface gravitational redshift as a function
1274
+ of the total mass. In the high-redshift region it can be observed that positive (negative) anisotropy increases (decreases) the
1275
+ value of zsur. Meanwhile, the effect of anisotropy is irrelevant for sufficiently low redshifts.
1276
+ whereas the first overtone (n = 1) has one node, the
1277
+ second overtone (n = 2) has two, and so on. Stable stars
1278
+ are described by their oscillatory behavior so that ν2
1279
+ n > 0
1280
+ (i.e., νn is purely real). On the other hand, if any of these
1281
+ is negative for a particular star, the frequency is purely
1282
+ imaginary and hence the star is unstable.
1283
+ Since each higher-order mode has a squared eigenfre-
1284
+ quency that is larger than in the case of the preceding
1285
+ mode, it is enough to calculate the frequency of the fun-
1286
+ damental pulsation mode for the equilibrium sequences
1287
+ presented in Fig. 3.
1288
+ With this in mind, in Fig. 6 we
1289
+ plot the squared frequency of the fundamental oscilla-
1290
+ tion mode as a function of the central density (left panel)
1291
+ and gravitational mass (right panel). According to the
1292
+ left plot, the squared frequency of the fundamental mode
1293
+ is exactly zero at the critical-central-density value corre-
1294
+ sponding to the maximum-mass configuration as shown
1295
+ in the right plot of Fig. 3, see the full blue and orange cir-
1296
+
1297
+ 10
1298
+ cles for both models. Furthermore, according to the right
1299
+ plot of Fig. 6, the maximum-mass values (that is, when
1300
+ dM/dρc = 0) can be used as turning points from stability
1301
+ to dynamical instability. Therefore, we can conclude that
1302
+ the usual criterion to guarantee stability dM/dρc > 0 is
1303
+ still valid for the case of anisotropic dark energy stars.
1304
+ In other words, the conventional M(ρc) method is com-
1305
+ patible with the calculation of the eigenfrequencies of the
1306
+ normal vibration modes.
1307
+ If the anisotropic dark energy star has a central den-
1308
+ sity higher than one corresponding to the maximum-mass
1309
+ configuration (indicated by full blue and orange circles
1310
+ in Figs. 3 and 6), the star will become unstable against
1311
+ radial perturbations and collapse to form a black hole.
1312
+ For further details on the dissipative gravitational col-
1313
+ lapse of compact stellar objects we also refer the reader
1314
+ to Refs. [55, 97–99].
1315
+ Nonetheless, we must point out
1316
+ that there are EoS models that allow a compact star to
1317
+ migrate to another branch of stable solutions instead of
1318
+ forming a black hole when it is subjected to a perturba-
1319
+ tion. As a matter of fact, the first-order phase transition
1320
+ between nuclear and quark matter can generate multiple
1321
+ stable branches in the mass-radius diagram for hybrid
1322
+ stars [100].
1323
+ C.
1324
+ Moment of inertia
1325
+ To calculate the moment of inertia of anisotropic dark
1326
+ energy stars, we first need to solve the differential equa-
1327
+ tion for the rotational drag (16) with boundary condi-
1328
+ tions (19). In particular, for model I and central density
1329
+ ρc = 1.5 × 1018 kg/m3, figure 7 illustrates the angular
1330
+ velocity everywhere for several values of α. As can be
1331
+ observed in the right plot, the dragging angular velocity
1332
+ outside the star has the behavior ω(r) ∼ r−3, so that at
1333
+ infinity (where spacetime is flat) the distant local inertial
1334
+ frames do not rotate around the star, namely, ω(r) → 0
1335
+ for r → ∞. Moreover, anisotropy significantly affects the
1336
+ angular velocity of the local inertial frames in the inte-
1337
+ rior region of the star. More specifically, the dragging
1338
+ angular velocity increases (decreases) for positive (nega-
1339
+ tive) values of the anisotropy parameter α. We can then
1340
+ determine the moment of inertia using the integral given
1341
+ in Eq. (18). For the above central density, we present the
1342
+ moment of inertia of some dark energy configurations for
1343
+ both models in Table II, where it can be noticed that I
1344
+ increases as the value of α increases.
1345
+ We can now calculate the moment of inertia for a whole
1346
+ sequence of dark energy stars by varying the central den-
1347
+ sity ρc. The left panel of Fig. 8 displays the moment of
1348
+ inertia as a function of the gravitational mass for both
1349
+ models. Remarkably, model II provides larger values for
1350
+ the moment of inertia than model I. Indeed, the maxi-
1351
+ mum value Imax depends quite sensitively on the free pa-
1352
+ rameters A and B in the EoS (28). In addition, the main
1353
+ effect of anisotropy on the moment of inertia for slow ro-
1354
+ tation occurs in the high-mass region, while its influence
1355
+ is irrelevant for sufficiently low masses. In order to bet-
1356
+ ter quantify the changes in the maximum values of the
1357
+ moment of inertia induced by the anisotropic pressure,
1358
+ we can define the following relative difference
1359
+ ∆I = Imax,ani − Imax,iso
1360
+ Imax,iso
1361
+ ,
1362
+ (33)
1363
+ where Imax,iso and Imax,ani are the maximum values of the
1364
+ moment of inertia for isotropic and anisotropic configura-
1365
+ tions, respectively. In the right plot of Fig. 8 we present
1366
+ the dependence ∆I against the anisotropy parameter α.
1367
+ The impact of anisotropy is getting stronger as |α| grows,
1368
+ reaching variations (with respect to the isotropic case) of
1369
+ up to ∼ 20% for α = 0.5. We can also note that such
1370
+ relative variations are almost independent of the model
1371
+ adopted.
1372
+ D.
1373
+ Tidal properties
1374
+ We will now investigate how the anisotropy parameter
1375
+ α affects the tidal properties of dark energy stars. Given
1376
+ a specific value of α, this requires solving the differential
1377
+ equation (23) for a range of central densities. The left
1378
+ panel of Fig. 9 is the result of calculating the tidal Love
1379
+ number (27) for a sequence of stellar configurations by
1380
+ considering different values of α, where the isotropic case
1381
+ corresponds to α = 0. Similar to the trends in strange
1382
+ quark stars, as reported in Ref. [70], the Love number of
1383
+ dark energy stars grows until it reaches a maximum value
1384
+ and then decreases as compactness increases. Note also
1385
+ that the maximum value of k2 is sensitive to the value
1386
+ of α, indicating that the Love number decreases as the
1387
+ parameter α increases for both models. Although model
1388
+ II provides larger maximum masses (as well as redshift
1389
+ and moment of inertia) than model I, we see that the
1390
+ behavior is different for the maximum values in the tidal
1391
+ Love number.
1392
+ Ultimately, in the right plot of Fig. 9, the dimensionless
1393
+ tidal deformability Λ = ¯λ/M 5 is plotted as a function of
1394
+ mass, where it can be observed that smaller masses yield
1395
+ higher deformabilities.
1396
+ In each model, the presence of
1397
+ anisotropy has a negligible effect on Λ for small masses,
1398
+ while slightly more significant changes take place only in
1399
+ the high-mass region.
1400
+ V.
1401
+ CONCLUSIONS AND OUTLOOK
1402
+ In this work, we have focused on the equilibrium struc-
1403
+ ture of dark energy stars by using a Chaplygin-like equa-
1404
+ tion of state under the presence of both isotropic and
1405
+ anisotropic pressures within the context of standard GR.
1406
+ Our goal was to construct stable compact stars whose
1407
+ characteristics could be compared with the observational
1408
+ data on the mass-radius diagram.
1409
+ In this perspective,
1410
+ the global properties of a compact star such as radius,
1411
+ mass, redshift, moment of inertia, oscillation spectrum
1412
+
1413
+ 11
1414
+ n=0 mode
1415
+ n=1 mode
1416
+ n=2 mode
1417
+ n=3 mode
1418
+ n=4 mode
1419
+ n=5 mode
1420
+ 0
1421
+ 2
1422
+ 4
1423
+ 6
1424
+ 8
1425
+ 10
1426
+ -0.2
1427
+ 0.0
1428
+ 0.2
1429
+ 0.4
1430
+ 0.6
1431
+ 0.8
1432
+ 1.0
1433
+ r [km]
1434
+ ζn (r)
1435
+ n=0 mode
1436
+ n=1 mode
1437
+ n=2 mode
1438
+ n=3 mode
1439
+ n=4 mode
1440
+ n=5 mode
1441
+ 0
1442
+ 2
1443
+ 4
1444
+ 6
1445
+ 8
1446
+ 10
1447
+ -1.5
1448
+ -1.0
1449
+ -0.5
1450
+ 0.0
1451
+ r [km]
1452
+ Δpr,n (r) [1035 Pa]
1453
+ FIG. 5. Numerical solution of the radial pulsation equations (8) and (9) in the case of an anisotropic dark energy star with
1454
+ central density ρc = 1.5 × 1018 kg/m3, α = 0.4 and EoS parameters given by model I. The radius, mass and the fundamental
1455
+ mode frequency for such configuration are found in Table II. The lines with different colors and styles indicate different overtones
1456
+ so that the solution corresponding to the nth vibration mode contains n nodes in the internal structure of the star. Note that
1457
+ the eigenfunctions ζn(r) have been normalized assuming ζ = 1 at r = 0, and the Lagrangian perturbation of the radial pressure
1458
+ ∆pr,n(r) obeys the boundary condition (11) at the stellar surface. Since f0 is real, this configuration corresponds to a stable
1459
+ anisotropic dark energy star.
1460
+ Model I
1461
+ Model II
1462
+ 17.8
1463
+ 18.0
1464
+ 18.2
1465
+ 18.4
1466
+ 18.6
1467
+ 0.0
1468
+ 0.5
1469
+ 1.0
1470
+ 1.5
1471
+ Log ρc [kg/m3]
1472
+ ν0
1473
+ 2 [109 s-2]
1474
+ -0.05
1475
+ 0.
1476
+ 0.05
1477
+ 18.12
1478
+ 18.22
1479
+ 18.32
1480
+ 18.42
1481
+ Model I
1482
+ Model II
1483
+ α = -0.4
1484
+ α = -0.2
1485
+ α = 0
1486
+ α = 0.2
1487
+ α = 0.4
1488
+ 1.0
1489
+ 1.5
1490
+ 2.0
1491
+ 2.5
1492
+ 0.0
1493
+ 0.5
1494
+ 1.0
1495
+ 1.5
1496
+ 2.0
1497
+ 2.5
1498
+ 3.0
1499
+ M [M⊙]
1500
+ ν0
1501
+ 2 [109 s-2]
1502
+ FIG. 6. Left panel: Squared frequency of the fundamental pulsation mode as a function of central mass density for anisotropic
1503
+ dark energy stars predicted by Einstein gravity. The full blue and orange circles indicate the central density values where
1504
+ ν2
1505
+ 0 = 0, whose values precisely correspond to the maximum-mass points on the M(ρc) curves on the right plot of Fig. 3. Right
1506
+ plot: Squared frequency of the fundamental mode versus gravitational mass, where it can be observed that the maximum-mass
1507
+ values determine the boundary between stable and unstable stars.
1508
+ and tidal deformability have been calculated. To describe
1509
+ the anisotropic pressure within the dark energy fluid we
1510
+ have adopted the anisotropy profile proposed by Horvat
1511
+ et al. [51], where a free parameter α measures the degree
1512
+ of anisotropy.
1513
+ We have discussed the possibility of observing sta-
1514
+ ble dark energy stars made of a negative pressure fluid
1515
+ “−B/ρ” plus a barotropic component “Aρ”. By way of
1516
+ comparison, the EoS parameters A and B have been cho-
1517
+ sen in such a way that they agree sufficiently with the
1518
+ observational data, e.g. the mass-radius constraint from
1519
+ the GW170817 event. For isotropic configurations, we
1520
+ have shown that various sets of values {A, B} can be
1521
+ chosen since they obey the causality condition and con-
1522
+ sistently describe compact stars observed in the Universe.
1523
+ Furthermore, we saw that the secondary component re-
1524
+ sulting from the gravitational-wave signal GW190814 [91]
1525
+ can be described as a dark energy star using A = 0.4 and
1526
+
1527
+ 12
1528
+ α = -0.4
1529
+ α = -0.2
1530
+ α = 0
1531
+ α = 0.2
1532
+ α = 0.4
1533
+ 0
1534
+ 10
1535
+ 20
1536
+ 30
1537
+ 40
1538
+ 0.4
1539
+ 0.5
1540
+ 0.6
1541
+ 0.7
1542
+ 0.8
1543
+ 0.9
1544
+ 1.0
1545
+ r [km]
1546
+ ϖ/Ω
1547
+ α = -0.4
1548
+ α = -0.2
1549
+ α = 0
1550
+ α = 0.2
1551
+ α = 0.4
1552
+ 0
1553
+ 10
1554
+ 20
1555
+ 30
1556
+ 40
1557
+ 0.0
1558
+ 0.1
1559
+ 0.2
1560
+ 0.3
1561
+ 0.4
1562
+ 0.5
1563
+ 0.6
1564
+ r [km]
1565
+ ω/Ω
1566
+ FIG. 7. Left panel: Numerical solution of the differential equation (16) for a dark energy star described by model I and central
1567
+ density ρc = 1.5 × 1018 kg/m3 in the presence of anisotropy for several values of the free parameter α. The solid and dashed
1568
+ lines represent the interior and exterior solutions, respectively. Right panel: Ratio of frame-dragging angular velocity to the
1569
+ angular velocity of the star, namely ω(r)/Ω = 1 − ϖ(r)/Ω. It can be observed that the outer solution behaves asymptotically
1570
+ at large distances from the surface of the star (this is, ω → 0 for r → ∞). Furthermore, appreciable changes in the angular
1571
+ velocity due to anisotropy can be noticeable, mainly in the interior region of the star.
1572
+ Model I
1573
+ Model II
1574
+ α = -0.4
1575
+ α = -0.2
1576
+ α = 0
1577
+ α = 0.2
1578
+ α = 0.4
1579
+ 0.5
1580
+ 1.0
1581
+ 1.5
1582
+ 2.0
1583
+ 2.5
1584
+ 0
1585
+ 1
1586
+ 2
1587
+ 3
1588
+ 4
1589
+ M [M⊙]
1590
+ I [1038 kg.m2]
1591
+ Model I
1592
+ Model II
1593
+ -0.4
1594
+ -0.2
1595
+ 0.0
1596
+ 0.2
1597
+ 0.4
1598
+ -15
1599
+ -10
1600
+ -5
1601
+ 0
1602
+ 5
1603
+ 10
1604
+ 15
1605
+ 20
1606
+ α
1607
+ ΔI [%]
1608
+ FIG. 8. Left panel: Moment of inertia versus mass for anisotropic dark energy stars, where a higher mass results in larger
1609
+ moment on inertia for both models. It is observed that the substantial impact of anisotropy on the moment of inertia occurs
1610
+ predominantly in the high-mass branch. Right panel: Relative deviation (33) as a function of the anisotropy parameter. The
1611
+ maximum value of the moment of inertia can undergo variations with respect to its isotropic counterpart of up to ∼ 20% for
1612
+ α = 0.5.
1613
+ B ∈ [4, 5]µ.
1614
+ Based on these results, we have established two mod-
1615
+ els with different values A and B in order to explore
1616
+ the effects of anisotropy in the interior region of a dark
1617
+ energy star. In particular, the maximum-mass values in-
1618
+ crease as the parameter α increases.
1619
+ We noticed that
1620
+ model I without anisotropic pressures is not capable of
1621
+ generating maximum masses above 2M⊙. However, the
1622
+ inclusion of anisotropies (α = 0.4) allows a significant in-
1623
+ crease in the maximum mass and thus a more favorable
1624
+ description of the compact objects observed in nature.
1625
+ On the other hand, model II with anisotropies fits bet-
1626
+ ter with the observational measurements, although such
1627
+ a model can lead to the formation of ultra-compact ob-
1628
+ jects for sufficiently large values of α. We also calculated
1629
+ the surface gravitational redshift for such stars, and our
1630
+ results indicated that zsur is substantially affected by the
1631
+ anisotropy in the high-mass branch, while the changes
1632
+ are irrelevant for sufficiently low masses.
1633
+ A star exists in the Universe only if it is dynamically
1634
+ stable, so our second task was to investigate whether the
1635
+ dark energy stars are stable or unstable with respect to an
1636
+ adiabatic radial perturbation. Our results showed that
1637
+ the standard criterion for radial stability dM/dρc > 0
1638
+ still holds for dark energy stars since the squared fre-
1639
+ quency of the fundamental pulsation mode (ν2
1640
+ 0) van-
1641
+
1642
+ 13
1643
+ Model I
1644
+ Model II
1645
+ α = -0.4
1646
+ α = -0.2
1647
+ α = 0
1648
+ α = 0.2
1649
+ α = 0.4
1650
+ 0.05
1651
+ 0.10
1652
+ 0.15
1653
+ 0.20
1654
+ 0.25
1655
+ 0.30
1656
+ 0.010
1657
+ 0.015
1658
+ 0.020
1659
+ C
1660
+ k2
1661
+ Model I
1662
+ Model II
1663
+ α = -0.4
1664
+ α = -0.2
1665
+ α = 0
1666
+ α = 0.2
1667
+ α = 0.4
1668
+ 0.5
1669
+ 1.0
1670
+ 1.5
1671
+ 2.0
1672
+ 2.5
1673
+ 1
1674
+ 5
1675
+ 10
1676
+ 50
1677
+ 100
1678
+ 500
1679
+ 1000
1680
+ M [M⊙]
1681
+ Λ
1682
+ FIG. 9. Left panel: Tidal Love number plotted as a function of the compactness C ≡ M/R. Right panel: Dimensionless tidal
1683
+ deformability versus gravitational mass predicted by each model, where larger masses yield smaller deformabilities. Note also
1684
+ that the Love number is substantially modified by the anisotropy parameter α for both models, while its greatest effect on tidal
1685
+ deformability Λ occurs only in the high-mass region.
1686
+ ishes at the critical central density corresponding to the
1687
+ maximum-mass configuration. This has been examined
1688
+ in detail for both isotropic (α = 0) and anisotropic
1689
+ (α ̸= 0) stellar configurations.
1690
+ In the slowly rotating approximation, where only first-
1691
+ order terms in the angular velocity are kept, we have also
1692
+ determined the moment of inertia of anisotropic dark en-
1693
+ ergy stars. For this purpose, we first had to calculate the
1694
+ frame-dragging angular velocity for each central density.
1695
+ The presence of anisotropic pressure results in a substan-
1696
+ tial increase (decrease) of the angular velocity ω for more
1697
+ positive (negative) values of α. We found that the signif-
1698
+ icant impact of the anisotropy on the moment of inertia
1699
+ occurs mainly in the high-mass branch for both models.
1700
+ Furthermore, the maximum value of the moment of in-
1701
+ ertia can undergo variations of up to ∼ 20% for α = 0.5
1702
+ as compared with the isotropic case.
1703
+ We have analyzed the effect of anisotropic pressure on
1704
+ the tidal properties of such stars. In particular, our out-
1705
+ comes revealed that the tidal Love number is sensitive to
1706
+ moderate variations of the parameter α, indicating that
1707
+ the maximum value of k2 can increase as α decreases.
1708
+ In addition, the greatest effect of anisotropy on the di-
1709
+ mensionless tidal deformability takes place only in the
1710
+ high-mass region.
1711
+ Based on the foregoing results, the
1712
+ present work thereby serves to develop a comprehensive
1713
+ perspective on the relativistic structure of dark energy
1714
+ stars in the presence of anisotropy.
1715
+ Summarizing, we have explored the possible existence
1716
+ of stable dark energy stars whose masses and radii are
1717
+ not in disagreement with the current observational data.
1718
+ The Chaplygin-like EoS predicts maximum-mass values
1719
+ consistent with observational measurements of highly
1720
+ massive pulsars. Future research includes the adoption
1721
+ of widespread versions of Chaplygin gas that best fit
1722
+ key cosmological parameters. In future studies we will
1723
+ thereby take further steps in that direction, focusing on
1724
+ the different types of generalized Chaplygin gas models
1725
+ as discussed in Ref. [11]. In addition, as carried out in the
1726
+ case of boson stars [101], it would be interesting to em-
1727
+ ploy a Fisher matrix analysis in order to distinguish dark
1728
+ energy stars from black holes and neutron stars from tidal
1729
+ interactions in inspiraling binary systems. It is also worth
1730
+ mentioning that Romano [102] has recently discussed the
1731
+ effects of dark energy on the propagation of gravitational
1732
+ waves. In that regard, we expect that future electromag-
1733
+ netic observations of compact binaries and gravitational-
1734
+ wave astronomy will provide a better understanding of
1735
+ compact stars in the presence of dark energy, and even
1736
+ help us answer the most basic question: How did dark
1737
+ energy form in the Universe? Anyway, our results sug-
1738
+ gest that dark energy stars deserve further investigation
1739
+ by taking into account the cosmological aspects as well
1740
+ as the gravitational-wave signals from binary mergers.
1741
+ ACKNOWLEDGMENTS
1742
+ The author would like to acknowledge the anonymous
1743
+ reviewer for useful constructive feedback and valuable
1744
+ suggestions. The author would also like to thank Maria
1745
+ F. A. da Silva for giving helpful comments. This research
1746
+ work was financially supported by the PCI program of
1747
+ the Brazilian agency “Conselho Nacional de Desenvolvi-
1748
+ mento Cient´ıfico e Tecnol´ogico”–CNPq.
1749
+
1750
+ 14
1751
+ [1] N. Aghanim et al., A&A 641, A6 (2020).
1752
+ [2] S. Weinberg, Rev. Mod. Phys. 61, 1 (1989).
1753
+ [3] T. Padmanabhan, Physics Reports 380, 235 (2003).
1754
+ [4] A. Kamenshchik, U. Moschella,
1755
+ and V. Pasquier,
1756
+ Physics Letters B 511, 265 (2001).
1757
+ [5] M. C. Bento, O. Bertolami, and A. A. Sen, Phys. Rev.
1758
+ D 66, 043507 (2002).
1759
+ [6] R. R. R. Reis, I. Waga, M. O. Calv˜ao, and S. E. Jor´as,
1760
+ Phys. Rev. D 68, 061302 (2003).
1761
+ [7] L. Xu, J. Lu, and Y. Wang, Eur. Phys. J. C 72, 1883
1762
+ (2012).
1763
+ [8] N. Bili´c, G. Tupper, and R. Viollier, Physics Letters B
1764
+ 535, 17 (2002).
1765
+ [9] M. Makler, S. Q. de Oliveira,
1766
+ and I. Waga, Physics
1767
+ Letters B 555, 1 (2003).
1768
+ [10] H. Li, W. Yang, and L. Gai, A&A 623, A28 (2019).
1769
+ [11] J. Zheng et al., Eur. Phys. J. C 82, 582 (2022).
1770
+ [12] Y. Ignatov and M. Pieroni, arXiv:2110.10085 [astro-
1771
+ ph.CO] (2021).
1772
+ [13] S. D. Odintsov, D. S.-C. G´omez,
1773
+ and G. S. Sharov,
1774
+ Phys. Rev. D 101, 044010 (2020).
1775
+ [14] E. J. Copeland, M. Sami,
1776
+ and S. Tsujikawa, Int. J.
1777
+ Mod. Phys. D 15, 1753 (2006).
1778
+ [15] K. Koyama, Rep. Prog. Phys. 79, 046902 (2016).
1779
+ [16] B. Boisseau, G. Esposito-Far`ese, D. Polarski, and A. A.
1780
+ Starobinsky, Phys. Rev. Lett. 85, 2236 (2000).
1781
+ [17] G. Esposito-Far`ese and D. Polarski, Phys. Rev. D 63,
1782
+ 063504 (2001).
1783
+ [18] T. P. Sotiriou and V. Faraoni, Rev. Mod. Phys. 82, 451
1784
+ (2010).
1785
+ [19] A. De Felice and S. Tsujikawa, Living Rev. Relativ. 13,
1786
+ 3 (2010).
1787
+ [20] A. Starobinsky, Physics Letters B 91, 99 (1980).
1788
+ [21] S. M. Carroll et al., Phys. Rev. D 70, 043528 (2004).
1789
+ [22] T. Chiba, Physics Letters B 575, 1 (2003).
1790
+ [23] S. Nojiri and S. D. Odintsov, Physics Reports 505, 59
1791
+ (2011).
1792
+ [24] T. Clifton et al., Physics Reports 513, 1 (2012).
1793
+ [25] S. Nojiri, S. Odintsov, and V. Oikonomou, Physics Re-
1794
+ ports 692, 1 (2017).
1795
+ [26] V. Folomeev, Phys. Rev. D 97, 124009 (2018).
1796
+ [27] G. J. Olmo, D. Rubiera-Garcia, and A. Wojnar, Physics
1797
+ Reports 876, 1 (2020).
1798
+ [28] A. Astashenok et al., Physics Letters B 811, 135910
1799
+ (2020).
1800
+ [29] A. Astashenok et al., Physics Letters B 816, 136222
1801
+ (2021).
1802
+ [30] K. Numajiri, T. Katsuragawa,
1803
+ and S. Nojiri, Physics
1804
+ Letters B 826, 136929 (2022).
1805
+ [31] K. Nobleson, A. Ali, and S. Banik, Eur. Phys. J. C 82,
1806
+ 32 (2022).
1807
+ [32] J. M. Z. Pretel and S. B. Duarte, Class. Quantum Grav.
1808
+ 39, 155003 (2022).
1809
+ [33] J. M. Z. Pretel et al., JCAP 2022, 058 (2022).
1810
+ [34] J. A. Frieman, M. S. Turner,
1811
+ and D. Huterer, Annu.
1812
+ Rev. Astron. Astrophys. 46, 385 (2008).
1813
+ [35] S. S. Yazadjiev, Phys. Rev. D 83, 127501 (2011).
1814
+ [36] M. F. A. R. Sakti and A. Sulaksono, Phys. Rev. D 103,
1815
+ 084042 (2021).
1816
+ [37] S. Smerechynskyi, M. Tsizh, and B. Novosyadlyj, JCAP
1817
+ 2021, 045 (2021).
1818
+ [38] G. Panotopoulos, ´Angel Rinc´on, and I. Lopes, Physics
1819
+ of the Dark Universe 34, 100885 (2021).
1820
+ [39] P. Bhar, Physics of the Dark Universe 34, 100879
1821
+ (2021).
1822
+ [40] R. Chan, M. F. A. da Silva,
1823
+ and J. F. V. da Rocha,
1824
+ Gen. Relativ. Gravit. 41, 1835 (2009).
1825
+ [41] F. Rahaman, S. Ray, A. K. Jafry, and K. Chakraborty,
1826
+ Phys. Rev. D 82, 104055 (2010).
1827
+ [42] C. R. Ghezzi, Astrophys. Space Sci. 333, 437 (2011).
1828
+ [43] P. Bhar, M. Govender, and R. Sharma, Pramana 90, 5
1829
+ (2018).
1830
+ [44] F. Tello-Ortiz et al., Eur. Phys. J. C 80, 371 (2020).
1831
+ [45] J. Estevez-Delgado et al., Mod. Phys. Lett. A 36,
1832
+ 2150213 (2021).
1833
+ [46] L. S. M. Veneroni, A. Braz, and M. F. A. da Silva, Int.
1834
+ J. Mod. Phys. D 30, 2150039 (2021).
1835
+ [47] T. Grammenos et al., Advances in High Energy Physics
1836
+ 2021, 6966689 (2021).
1837
+ [48] Z. Haghani and T. Harko, Phys. Rev. D 105, 064059
1838
+ (2022).
1839
+ [49] R. L. Bowers and E. P. T. Liang, Astrophys. J. 188, 657
1840
+ (1974).
1841
+ [50] M. Cosenza, L. Herrera, M. Esculpi, and L. Witten, J.
1842
+ Math. Phys. 22, 118 (1981).
1843
+ [51] D. Horvat, S. Iliji´c, and A. Marunovi´c, Class. Quantum
1844
+ Grav. 28, 025009 (2010).
1845
+ [52] D. D. Doneva and S. S. Yazadjiev, Phys. Rev. D 85,
1846
+ 124023 (2012).
1847
+ [53] L. Herrera and W. Barreto, Phys. Rev. D 88, 084022
1848
+ (2013).
1849
+ [54] G. Raposo et al., Phys. Rev. D 99, 104072 (2019).
1850
+ [55] J. M. Z. Pretel, Eur. Phys. J. C 80, 726 (2020).
1851
+ [56] R. Rizaldy,
1852
+ A. R. Alfarasyi,
1853
+ A. Sulaksono,
1854
+ and
1855
+ T. Sumaryada, Phys. Rev. C 100, 055804 (2019).
1856
+ [57] E. A. Becerra-Vergara, S. Mojica, F. D. Lora-Clavijo,
1857
+ and A. Cruz-Osorio, Phys. Rev. D 100, 103006 (2019).
1858
+ [58] M. D. Danarianto and A. Sulaksono, Phys. Rev. D 100,
1859
+ 064042 (2019).
1860
+ [59] J. B. Hartle, Astrophys. J. 150, 1005 (1967).
1861
+ [60] J. B. Hartle, Astrophys. Space Sci. 24, 385 (1973).
1862
+ [61] N. K. Glendenning, Compact Stars: Nuclear Physics,
1863
+ Particle Physics, and General Relativity, 2nd ed. (As-
1864
+ tron. Astrophys. Library, Springer, New York, 2000).
1865
+ [62] E. R. Most, L. R. Weih, L. Rezzolla, and J. Schaffner-
1866
+ Bielich, Phys. Rev. Lett. 120, 261103 (2018).
1867
+ [63] K. Chatziioannou, Gen. Relativ. Gravit. 52, 109 (2020).
1868
+ [64] T. Hinderer, Astrophys. J. 677, 1216 (2008).
1869
+ [65] T. Damour and A. Nagar, Phys. Rev. D 80, 084035
1870
+ (2009).
1871
+ [66] T. Binnington and E. Poisson, Phys. Rev. D 80, 084018
1872
+ (2009).
1873
+ [67] S. Postnikov, M. Prakash,
1874
+ and J. M. Lattimer, Phys.
1875
+ Rev. D 82, 024016 (2010).
1876
+ [68] A. G. Chaves and T. Hinderer, J. Phys. G: Nucl. Part.
1877
+ Phys. 46, 123002 (2019).
1878
+ [69] T. Dietrich, T. Hinderer, and A. Samajdar, Gen. Rel-
1879
+ ativ. Gravit. 53, 27 (2021).
1880
+ [70] M. Kumari and A. Kumar, Eur. Phys. J. C 81, 791
1881
+ (2021).
1882
+ [71] T. Regge and J. A. Wheeler, Phys. Rev. 108, 1063
1883
+ (1957).
1884
+
1885
+ 15
1886
+ [72] B. Biswas and S. Bose, Phys. Rev. D 99, 104002 (2019).
1887
+ [73] J. V. Cunha, J. S. Alcaniz,
1888
+ and J. A. S. Lima, Phys.
1889
+ Rev. D 69, 083501 (2004).
1890
+ [74] V. Gorini et al., JCAP 2008, 016 (2008).
1891
+ [75] O. F. Piattella, JCAP 2010, 012 (2010).
1892
+ [76] S. F. Salahedin et al., J. Astrophys. Astron. 43, 14
1893
+ (2022).
1894
+ [77] R.
1895
+ von
1896
+ Marttens,
1897
+ D.
1898
+ Barbosa,
1899
+ and
1900
+ J.
1901
+ Alcaniz,
1902
+ arXiv:2208.06302 [astro-ph.CO] (2022).
1903
+ [78] H. O. Silva et al., Class. Quantum Grav. 32, 145008
1904
+ (2015).
1905
+ [79] K. Yagi and N. Yunes, Phys. Rev. D 91, 123008 (2015).
1906
+ [80] A. Rahmansyah et al., Eur. Phys. J. C 80, 769 (2020).
1907
+ [81] A. Rahmansyah and A. Sulaksono, Phys. Rev. C 104,
1908
+ 065805 (2021).
1909
+ [82] J. M. Z. Pretel, Mod. Phys. Lett. A 37, 2250188 (2022).
1910
+ [83] J. Kumar and P. Bharti, New Astronomy Reviews 95,
1911
+ 101662 (2022).
1912
+ [84] E. Annala et al., Nature Phys. 16, 907 (2020).
1913
+ [85] P. Demorest, T. Pennucci, S. Ransom, M. Roberts, and
1914
+ J. Hessels, Nature 467, 1081 (2010).
1915
+ [86] J. Antoniadis et al., Science 340, 6131 (2013).
1916
+ [87] H. T. Cromartie et al., Nature Astronomy 4, 72 (2019).
1917
+ [88] M. C. Miller et al., Astrophys. J. Lett. 887, L24 (2019).
1918
+ [89] T. E. Riley et al., Astrophys. J. Lett. 887, L21 (2019).
1919
+ [90] G. Raaijmakers et al., Astrophys. J. Lett. 887, L22
1920
+ (2019).
1921
+ [91] R. Abbott et al., Astrophys. J. Lett. 896, L44 (2020).
1922
+ [92] B. R. Iyer, C. V. Vishveshwara, and S. V. Dhurandhar,
1923
+ Class. Quantum Grav. 2, 219 (1985).
1924
+ [93] F. Douchin and P. Haensel, A&A 380, 151 (2001).
1925
+ [94] M. Linares, T. Shahbaz, and J. Casares, Astrophys. J.
1926
+ 859, 54 (2018).
1927
+ [95] M. C. Miller et al., Astrophys. J. Lett. 887, L24 (2019).
1928
+ [96] T. E. Riley et al., Astrophys. J. Lett. 887, L21 (2019).
1929
+ [97] J. M. Z. Pretel and M. F. A. da Silva, MNRAS 495,
1930
+ 5027 (2020).
1931
+ [98] R. S. Bogadi, M. Govender,
1932
+ and S. Moyo, Eur. Phys.
1933
+ J. C 81, 922 (2021).
1934
+ [99] R. S. Bogadi and M. Govender, Eur. Phys. J. C 82, 475
1935
+ (2022).
1936
+ [100] M. G. Alford, S. Han, and M. Prakash, Phys. Rev. D
1937
+ 88, 083013 (2013).
1938
+ [101] N. Sennett et al., Phys. Rev. D 96, 024002 (2017).
1939
+ [102] A. E. Romano, arXiv:2211.05760 [gr-qc] (2022).
1940
+
0dE1T4oBgHgl3EQf4wVz/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
39AyT4oBgHgl3EQf1_mj/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c479ba589b0f0a41e8896c9f5d9321fe57ff76085e0e24dcc095d7bbdd6c2582
3
+ size 3735597
39E2T4oBgHgl3EQfOAbJ/content/2301.03744v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7e6506c4c57c14422803fa575e15cd67efa2441b7fe6a7862a8e3d1986d6a3a
3
+ size 4513237
39E2T4oBgHgl3EQfOAbJ/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ead374b1abbe53cb6aef85b039d7b452433a08682fa237e0753efee3add6fdb
3
+ size 3735597
39E2T4oBgHgl3EQfOAbJ/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c77f7eb4f1f096593fae880df245bb5b5908be119ad461766c1fa88472cc02e9
3
+ size 130029
39E2T4oBgHgl3EQfjgft/content/2301.03970v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34da1a629eda3f5af485ea0122f90ff4eeee1ded08654b65a8255b675d45ce2c
3
+ size 365394
39E2T4oBgHgl3EQfjgft/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7ef2aed3b2076af3f9d575be8e760318027de794adeb010a14f574d61da3372
3
+ size 5505069
3NFAT4oBgHgl3EQflB25/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4cc9f1204c8fa7d0935449b65c67ebce70352db358ee5764eff198115f673c4
3
+ size 2883629
3dFKT4oBgHgl3EQfQy0a/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc581c23c6976646d7740ffdf7d61fc0661e1ec40941b14abd3378d15d998385
3
+ size 359057
4NE1T4oBgHgl3EQf6AXQ/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1402d7bdaf4e6b67794300c0927351e07820fb69030daa45b4c294e6b481a447
3
+ size 167174
4dE1T4oBgHgl3EQf6QUq/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25509116b51cf3c3293fa38cd37edd670f65e4842d3e2b4c0377b3863ce72564
3
+ size 1966125
4tAzT4oBgHgl3EQffvxD/content/2301.01456v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b128b796fc7d155961fe0081b5499ab1e1e774170403d47dfe43444f82b8f8c4
3
+ size 538501
4tAzT4oBgHgl3EQffvxD/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9930099a7b7a4ac122859ddc8d500bfd3454ef42ab2ee8723afe2ffa1c981e99
3
+ size 130607
79E1T4oBgHgl3EQfBwKM/content/tmp_files/2301.02856v1.pdf.txt ADDED
@@ -0,0 +1,1946 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 1
2
+ Neural Network-Based DOA Estimation in the
3
+ Presence of Non-Gaussian Interference
4
+ S. Feintuch, J. Tabrikian, Fellow, IEEE, I. Bilik, Senior Member, IEEE, and H. Permuter, Senior Member, IEEE
5
+ Abstract—This work addresses the problem of direction- of-
6
+ arrival (DOA) estimation in the presence of non-Gaussian,
7
+ heavy-tailed, and spatially-colored interference. Conventionally,
8
+ the interference is considered to be Gaussian-distributed and
9
+ spatially white. However, in practice, this assumption is not guar-
10
+ anteed, which results in degraded DOA estimation performance.
11
+ Maximum likelihood DOA estimation in the presence of non-
12
+ Gaussian and spatially colored interference is computationally
13
+ complex and not practical. Therefore, this work proposes a
14
+ neural network (NN) based DOA estimation approach for spa-
15
+ tial spectrum estimation in multi-source scenarios with a-priori
16
+ unknown number of sources in the presence of non-Gaussian
17
+ spatially-colored interference. The proposed approach utilizes
18
+ a single NN instance for simultaneous source enumeration and
19
+ DOA estimation. It is shown via simulations that the proposed
20
+ approach significantly outperforms conventional and NN-based
21
+ approaches in terms of probability of resolution, estimation
22
+ accuracy, and source enumeration accuracy in conditions of low
23
+ SIR, small sample support, and when the angular separation
24
+ between the source DOAs and the spatially-colored interference
25
+ is small.
26
+ Index Terms—Array Processing, DOA Estimation, Source
27
+ Enumeration, Spatially-Colored Interference, Non-Gaussian In-
28
+ terference, Neural Networks, Deep Learning, Machine Learning,
29
+ MVDR, MDL, AIC, Radar.
30
+ I. INTRODUCTION
31
+ Direction-of-arrival (DOA) estimation using a sensor array
32
+ is required in multiple applications, such as radar, sonar,
33
+ ultrasonic, wireless communications, and medical imaging [1].
34
+ In real-world applications, the signal received at the sensor
35
+ array is a superposition of signals from the sources of interest,
36
+ interference, and receiver thermal noise. In radars, the received
37
+ signal consists of a target echo, clutter, and thermal noise. In
38
+ multiple scenarios, the radar clutter has a spatially-colored,
39
+ heavy-tailed non-Gaussian distribution [2], which can signifi-
40
+ cantly degrade the performance of conventional estimators.
41
+ Minimum-variance-distortionless-response (MVDR) [3], is
42
+ a conventional adaptive beamforming approach for DOA es-
43
+ timation. MVDR estimates the spatial spectrum and obtains
44
+ the source DOAs via a one-dimensional peak search on a
45
+ predefined grid. The estimation of signal parameters using
46
+ rotational invariance techniques (ESPIRIT) [4], multiple signal
47
+ classification (MUSIC) [5], and root-MUSIC (R-MUSIC) [6]
48
+ are additional widely used DOA estimation approaches. These
49
+ approaches involve received signal autocorrelation matrix
50
+ processing, which conventionally is performed via the sam-
51
+ ple autocorrelation matrix estimation [3]–[6]. However, the
52
+ Stefan Feintuch, Joseph Tabrikian, Igal Bilik, and Haim H. Permuter
53
+ are with the School of Electrical and Computer Engineering, Ben Gurion
54
+ University of the Negev, Beer Sheva, Israel. (e-mails: [email protected],
55
56
+ performance of the sample autocorrelation matrix estimator
57
+ degrades in small sample support or non-Gaussian scenarios.
58
+ Furthermore, these methods use the second-order statistics
59
+ only and omit the higher-order statistics on non-Gaussian-
60
+ distributed interference. In addition, ESPRIT, MUSIC, and R-
61
+ MUSIC approaches require a-priori knowledge of the number
62
+ of sources (or targets), which limits their practical use.
63
+ The problem of DOA estimation in the presence of non-
64
+ Gaussian interference is of great practical interest. The max-
65
+ imum likelihood estimator (MLE) for DOA estimation in the
66
+ presence of non-Gaussian interference does not have a closed-
67
+ form analytical solution [7], [8]. Multiple model-based DOA
68
+ estimation approaches have been intensively studied in the
69
+ literature [7]–[18].
70
+ Robust covariance matrix-based DOA estimation and source
71
+ enumeration methods have been studied in the literature. For
72
+ complex elliptically symmetric (CES) distributed data, the
73
+ authors in [9] showed that a scatter matrix-based beamformer
74
+ is consistent, and the semiparametric lower bound and Slepian-
75
+ Bangs formula for DOA estimation were derived in [10].
76
+ In [11], a generalized covariance-based (GC) approach for
77
+ the covariance matrix estimation in scenarios with impulsive
78
+ alpha-stable noise was proposed for MUSIC DOA estimation.
79
+ However, these methods consider a specific family of distri-
80
+ butions, such as the CES or alpha-stable, and are therefore,
81
+ limited in the case of model mismatch. In [12], a probability
82
+ measure transform (MT) based covariance matrix estimator
83
+ was proposed for MUSIC-based DOA estimation and mini-
84
+ mum descriptive length (MDL) based source enumeration. The
85
+ MT-based covariance estimator was also adopted for robust
86
+ MVDR beamformer [13]. These methods are usually based
87
+ on setting a parameter that determines the tradeoff between
88
+ the level of robustness and performance.
89
+ The problem of DOA estimation in the presence of a mix-
90
+ ture of spatially-white K-distributed and Gaussian-distributed
91
+ noise under a deterministic and unknown (conditional) source
92
+ model was studied in [7]. An iterative MLE-based approach
93
+ for the conditional and joint likelihood of interference distri-
94
+ bution’s parameters was derived in [14], [15]. This approach
95
+ was further extended in [16] to marginal likelihood function.
96
+ However, this approach is computationally complex due to
97
+ numerical integral evaluation that involves a 2M dimensional
98
+ grid search for M targets [8]. Therefore, [8] proposed a kernel
99
+ minimum error entropy-based adaptive estimator and a novel
100
+ criterion to reduce the estimator’s computational complexity.
101
+ The expectation-maximization (EM) with a partial relaxation-
102
+ based DOA estimation algorithm under the conditional model
103
+ assumption was proposed in [17]. In [18] a sparse Bayesian
104
+ learning (SBL) approach for outlier rejection of impulsive
105
+ arXiv:2301.02856v1 [eess.SP] 7 Jan 2023
106
+
107
+ 2
108
+ and spatially-white interference was proposed. This EM-based
109
+ approach does not require a-priori knowledge of the number
110
+ of sources and was shown to resolve highly-correlated and co-
111
+ herent sources. However, none of these model-based DOA es-
112
+ timation approaches considered an a-priori unknown number
113
+ of sources and spatially-colored interference and therefore are
114
+ limited for real-world applications. Although source enumera-
115
+ tion methods, such as MDL and Akaike information criterion
116
+ (AIC) [19] can be used, they assume signal Gaussianity, and
117
+ can therefore be inaccurate in non-Gaussian scenarios.
118
+ Deep learning and machine learning approaches were re-
119
+ cently adopted for radar signal processing. Three types of
120
+ NN-based DOA estimation approaches have been introduced
121
+ in literature [20]. The first approach assumes a-priori known
122
+ number of sources, and uses a NN, which is optimized to
123
+ output a vector of the estimated DOAs [21]–[27]. The second
124
+ approach does not assume a-priori known number of sources
125
+ and uses a NN for source enumeration [25]–[31]. The third
126
+ approach uses a NN to estimate source presence probability
127
+ at each DOA on a predefined angular grid and obtains the
128
+ source DOAs via a peak search [32]–[41]. However, all these
129
+ approaches have not addressed non-Gaussian and spatially-
130
+ colored interference [20]–[41].
131
+ The cases of non-Gaussian and/or spatially-colored inter-
132
+ ference have been addressed using machine learning-based
133
+ approaches. For massive MIMO cognitive radar, a reinforce-
134
+ ment learning-based approach for multi-target detection un-
135
+ der heavy-tailed spatially-colored interference was proposed
136
+ in [42]. In [43], authors addressed the MIMO radar target
137
+ detection under non-Gaussian spatially-colored interference
138
+ by using a CNN architecture that is optimized according to
139
+ a novel loss. A radial-basis-function (RBF) NN [44] and a
140
+ convolutional neural network (CNN) [45] architectures were
141
+ proposed for DOA estimation in the presence of non-Gaussian
142
+ impulsive noise. In [46], a CNN-based architecture that in-
143
+ cludes denoising NN, source enumeration NN, and DOA esti-
144
+ mation sub-NNs, was introduced. However, [44]–[46] consider
145
+ spatially-white noise and are suboptimal for scenarios with
146
+ spatially-colored interference.
147
+ This work addresses the problem of DOA estimation of a-
148
+ priori unknown number of sources in the presence of non-
149
+ Gaussian, heavy-tailed, spatially-colored interference at a low
150
+ signal-to-interference ratio (SIR) and small sample size. The
151
+ contribution of this work include:
152
+ 1) A novel NN-based processing mechanism is used for
153
+ array processing within non-Gaussian spatially-colored
154
+ interference. The proposed NN architecture utilizes the
155
+ structure of information within the set of received com-
156
+ plex snapshots.
157
+ 2) The proposed NN is optimized to output an interference-
158
+ mitigated spatial spectrum, and is used for simultaneous
159
+ source enumeration and DOA estimation of sources
160
+ within non-Gaussian spatially-colored interference.
161
+ The proposed approach outperforms conventional adaptive
162
+ beamforming and competing straightforward NN-based meth-
163
+ ods in terms of probability of resolution and estimation
164
+ accuracy in scenarios with non-Gaussian spatially-colored
165
+ interference. In addition, the proposed approach outperforms
166
+ conventional source enumeration techniques in scenarios char-
167
+ acterized by non-Gaussian spatially-colored interference.
168
+ The following notations are used throughout the paper.
169
+ Roman boldface lower-case and upper-case letters represent
170
+ vectors and matrices, respectively while Italic letters stand for
171
+ scalars. IN is the identity matrix of size N × N and 1N
172
+ is a column vector of length N whose entries are equal to
173
+ one. E(·), (·)T , and (·)H are the expectation, transpose, and
174
+ Hermitian transpose operators, respectively. Vec(·), diag(·),
175
+ and | · | stand for the vectorization, diagonalization, and
176
+ absolute value operators, respectively. [a]n and [A]n,m are the
177
+ n-th and n, m-th elements of the vector a and the matrix A,
178
+ respectively.
179
+ The remainder of this paper is organized as follows. The
180
+ addressed problem is stated in Section II. Section III intro-
181
+ duces the proposed NN-based DOA estimation approach. The
182
+ proposed approach is evaluated via simulations in Section IV.
183
+ Our conclusions are summarized in Section V.
184
+ II. PROBLEM DEFINITION
185
+ This work considers the problem of DOA estimation using
186
+ an array of L receiving elements and M distinct and unknown
187
+ sources with DOAs, Θ = {θ1, . . . , θM}. The measurements
188
+ contain K spatial snapshots, {xk}K
189
+ k=1:
190
+ xk = A (Θ) sk + σcck + nk ,
191
+ (1)
192
+ =
193
+ M
194
+
195
+ m=1
196
+ a (θm) sk,m + σcck + nk , k = 1, . . . , K ,
197
+ where A (Θ) =
198
+ �a (θ1)
199
+ · · ·
200
+ a (θM)�
201
+ , with a (θm) ∈ CL
202
+ denoting the steering vector for source at direction θm,
203
+ and sk ≜
204
+ �sk,1
205
+ · · ·
206
+ sk,M
207
+ �T is the source signal vector.
208
+ We assume an unconditional model [47], where {sk}
209
+ i.i.d.
210
+
211
+ CN
212
+
213
+ 0M, diag
214
+
215
+ σ2
216
+ 1, . . . , σ2
217
+ M
218
+ ��
219
+ , is temporally uncorrelated be-
220
+ tween pulses. The targets are assumed to be spatially distinct.
221
+ The receiver thermal noise, denoted by nk, is considered to be
222
+ complex Gaussian-distributed {nk}
223
+ i.i.d.
224
+ ∼ CN
225
+
226
+ 0L, σ2
227
+ nIL
228
+
229
+ . The
230
+ heavy-tailed non-Gaussian and spatially-colored interference is
231
+ modeled by the interference amplitude σc, and the interference
232
+ component ck ∈ CL. The considered compound-Gaussian
233
+ distributed interference, {ck}
234
+ i.i.d.
235
+ ∼ K (ν, θc) represents a non-
236
+ Gaussian interference with angular spread around an unknown
237
+ direction θc, such that c ∼ K (ν, θc) implies
238
+ c = √τz ,
239
+ (2)
240
+ τ
241
+ |=
242
+ z, τ ∼ Γ (ν, ν) , z ∼ CN (0L, Mθc) .
243
+ The compound-Gaussian statistical model is conventionally
244
+ used in the literature to model heavy-tailed non-Gaussian
245
+ interference [7], [8], [14], [16], [43], [48]. The texture com-
246
+ ponent, τ ∈ R+, determines the heavy-tailed behavior and
247
+ is characterized by, ν. The speckle component, z ∈ CL,
248
+ determines the spatial distribution of the interference and
249
+ is characterized by the covariance matrix, Mθc. The spatial
250
+ covariance matrix of the interference upholds:
251
+ E
252
+
253
+ σ2
254
+ cccH�
255
+ =σ2
256
+ cE [τ] E
257
+
258
+ zzH�
259
+ = σ2
260
+ cMθc ,
261
+ (3)
262
+
263
+ 3
264
+ where Mθc can be modeled as [14]–[16], [43], [48]:
265
+ [Mθc]m,l = ρ|m−l|ej(m−l)π sin θc .
266
+ (4)
267
+ The model in (3) and (4), represents the spatial interference,
268
+ characterized by ρ, with a spread around the interference DOA,
269
+ θc.
270
+ III. THE PROPOSED DAFC-BASED NEURAL NETWORK
271
+ The proposed approach generalizes the NN architecture that
272
+ was introduced for linear-frequency-modulated (LFM) radar
273
+ target detection in the range-Doppler domain [49]. In the
274
+ following, the data pre-processing and the proposed NN-based
275
+ processing mechanism are introduced in Subsections III-A and
276
+ III-B. The proposed NN architecture and loss function are
277
+ detailed in Subsections III-C and III-D, respectively.
278
+ A. Pre-Processing
279
+ The input matrix, X ∈ CL×K is constructed from the set
280
+ of K snapshots in (1), {xk}:
281
+ X =
282
+
283
+ x1
284
+ x2
285
+ · · ·
286
+ xK
287
+
288
+ ,
289
+ (5)
290
+ where the k-th column of X contains the k-th snapshot.
291
+ The variation between the columns of X is induced by the
292
+ statistical characteristics of the source signal sk, interference
293
+ signal ck, and thermal noise nk. Therefore, each column in
294
+ X can be interpreted as a complex “feature” vector containing
295
+ essential information for DOA estimation. The set of columns
296
+ in X can be interpreted as “realizations” of that feature.
297
+ The complex-valued matrix, X, is converted into real-valued
298
+ representation needed for the NN-based processing. To keep
299
+ consistency with [49], we apply a transpose operator to the
300
+ input matrix, such that the snapshots are stacked in rows. The
301
+ output of the pre-processing denoted by Z0 ∈ CK×2L, is:
302
+ Z0 =
303
+
304
+ Re
305
+
306
+ XT �
307
+ , Im
308
+
309
+ XT ��
310
+ .
311
+ (6)
312
+ B. Dimensional Alternating Fully-Connected
313
+ The dimensional alternating fully-connected (DAFC) block
314
+ was introduced to process measurements in a form similar to
315
+ the model in Section II [49]. Fig. 1 schematically shows the
316
+ DAFC mechanism.
317
+ For arbitrary dimensions D1, D2, D3, the formulation of a
318
+ general fully-connected (FC) layer applied to each row in a
319
+ given matrix Z ∈ RD1×D2 can be represented by the transform
320
+ F (·):
321
+ F : RD1×D2 → RD1×D3 ,
322
+ (7)
323
+ F (Z) ≜ h
324
+
325
+ ZW + 1D1bT �
326
+ .
327
+ This matrix-to-matrix transformation is characterized by the
328
+ “learnable” weight matrix, W ∈ RD2×D3, the bias vector,
329
+ b ∈ RD3, and a scalar element-wise activation function, h(·).
330
+ Let Fr (·) and Fc (·) be two separate, and not necessarily
331
+ identical instances of F (·) from (7), and Zin be an arbitrary
332
+ input matrix. The DAFC mechanism is formulated by the
333
+ following operations:
334
+ Dimensional Alternating Fully Connected
335
+ • Input: Zin ∈ RH×W
336
+ Fr : RH×W → RH×W ′
337
+ Fc : RW ′×H → RW ′×H′
338
+ 1) Apply a single FC layer to each row in Zin:
339
+ Zr = Fr (Zin)
340
+ 2) Apply a single FC layer to each column in Zr:
341
+ Zc = Fc
342
+
343
+ ZT
344
+ r
345
+
346
+ 3) Transpose to keep orientation:
347
+ Zout = ZT
348
+ c
349
+ • Output: Zout ≜ S (Z) ∈ RH′×W ′
350
+ In the following, three DAFC design principles are detailed.
351
+ 1) Structured transformation
352
+ The input to the first DAFC block is the pre-processed, Z0,
353
+ given in (6). Therefore, the first FC layer, Fr, of the first DAFC
354
+ block extracts spatial-related features from each row in Z0.
355
+ The second FC layer, Fc, of the first DAFC block, introduces
356
+ an interaction between transformed rows. This implies that
357
+ a) Fr performs “spatial-feature” extraction by transforming
358
+ the pre-processed i.i.d. snapshots (the rows of Z0) to a
359
+ high-dimensional feature space, and b) the Fc performs a
360
+ nonlinear transformation of the extracted features (the columns
361
+ of Fr (Z0)) from each snapshot. In this way, the DAFC utilizes
362
+ both spatial and statistical information. In addition, it can
363
+ exploit high-order statistics-related features. Thus, the DAFC
364
+ mechanism can contribute to estimating the source DOAs and
365
+ mitigating the interference when incorporated into a NN.
366
+ 2) Sparsity
367
+ Conventional DOA estimation considers the input data as
368
+ the collection of measurement vectors (the snapshots {xk}) in
369
+ a matrix form. One straightforward approach to processing
370
+ the input data using a NN is to reshape it and process it
371
+ via an FC-based architecture. In this way, each neuron in the
372
+ layer’s output interacts with every neuron in the input. On
373
+ the other hand, the DAFC block transforms the data using
374
+ a structured transformation, which is significantly sparser in
375
+ terms of learnable parameters compared to the straightforward
376
+ FC-based approach.
377
+ This parameter reduction can be observed in the following
378
+ typical case. Consider an input matrix Z1 ∈ RD1×D1, which
379
+ is transformed to an output matrix Z2 ∈ RD2×D2. The
380
+ number of learnable parameters in the FC- and the proposed
381
+ DAFC-based approaches is of the order of O
382
+
383
+ D2
384
+ 1D2
385
+ 2
386
+
387
+ , and
388
+ O (D1D2), respectively. Notice that the DAFC-based transfor-
389
+ mation complexity grows linearly with the number of learnable
390
+ parameters compared to the quadratic complexity growth of
391
+ the straightforward, FC-based approach.
392
+ The contribution of learnable parameters dimension reduc-
393
+ tion is twofold. First, the conventional NN optimization is
394
+ gradient-based [50]. Therefore, a significant reduction in the
395
+ learnable parameter dimension reduces the degrees of freedom
396
+ in the optimizable parameter space and improves the gradient-
397
+ based learning algorithm convergence rate. Second, reduction
398
+
399
+ 4
400
+ Figure 1: The DAFC mechanism concept. Each row of dimen-
401
+ sion W in Zin, represented by the red color, is transformed by
402
+ Fr to a row of dimension W ′ in the middle matrix, represented
403
+ by the transparent red color. Next, each column of dimension
404
+ H in the middle matrix, represented by the blue color, is
405
+ transformed by Fc to a column of dimension H′ in Zout,
406
+ represented by the transparent blue color.
407
+ in the learnable parameter dimension can be interpreted as
408
+ increasing the “inductive bias” of the NN model [51], which
409
+ conventionally contributes to the NN statistical efficiency and
410
+ generalization ability, thus, reducing the NNs tendency to
411
+ overfit the training data.
412
+ 3) Nonlinearity
413
+ The proposed DAFC considers an additional degree of
414
+ nonlinearity compared to the straightforward FC-based ap-
415
+ proach. A straightforward matrix-to-matrix approach includes
416
+ an interaction of every neuron in the output matrix with
417
+ every neuron in the input matrix, followed by an element-wise
418
+ nonlinear activation function. On the other hand, the proposed
419
+ DAFC consists of two degrees of nonlinearity, in Fr and Fc.
420
+ Although the weight matrices applied as part of Fr and Fc
421
+ are of lower dimension than the weight matrix used in the
422
+ straightforward approach, the extra degree of nonlinearity can
423
+ increase the NN’s capacity [50]. Therefore, a NN architecture
424
+ with the proposed DAFC is capable of learning a more abstract
425
+ and rich transformation of the input data.
426
+ C. NN Architecture
427
+ The continuous DOA space is discretized into a d-
428
+ dimensional grid: φ =
429
+ �φ1
430
+ φ2
431
+ · · ·
432
+ φd
433
+ �T . This implies
434
+ that the entire field-of-view (FOV) is partitioned into d DOAs,
435
+ {φi}d
436
+ i=1, determined by the selected grid resolution, ∆φ ≜
437
+ φi+1 − φi. The proposed NN is designed to represent a
438
+ mapping from the input set of snapshots, {xk} given in (1),
439
+ into the probability of source present in the DOAs {φi}d
440
+ i=1.
441
+ The proposed NN architecture is formulated as follows:
442
+ Z0 = P (X) ,
443
+ (8)
444
+ zvec = Vec (S6 (. . . S1 (Z0))) ,
445
+ ˆy = G3 (G2 (G1 (zvec))) ,
446
+ Operator
447
+ Output
448
+ Dimension
449
+ Activation
450
+ # Parameters
451
+ P
452
+ K × 2L
453
+ -
454
+ -
455
+ S1
456
+ 64 × 256
457
+ tanh-
458
+ ReLu
459
+ 9,536
460
+ S2
461
+ 128 × 512
462
+ tanh-
463
+ ReLu
464
+ 139,904
465
+ S3
466
+ 256 × 1024
467
+ tanh-
468
+ ReLu
469
+ 558,336
470
+ S4
471
+ 64 × 512
472
+ tanh-
473
+ ReLu
474
+ 541,248
475
+ S5
476
+ 16 × 256
477
+ tanh-
478
+ ReLu
479
+ 132,368
480
+ S6
481
+ 4 × 128
482
+ tanh-
483
+ ReLu
484
+ 32,964
485
+ vec
486
+ 512
487
+ -
488
+ -
489
+ G1
490
+ 1024
491
+ tanh
492
+ 525,312
493
+ G2
494
+ 256
495
+ tanh
496
+ 262,400
497
+ G3
498
+ d
499
+ sigmoid
500
+ 31,097
501
+ Table I:
502
+ Specification of the proposed NN architecture for
503
+ K = 16, L = 16, d = 121. “tanh-ReLu” activation stands
504
+ for tanh in Fr and ReLU in Fc of each DAFC block. The
505
+ number of total learnable parameters is 2, 233, 165.
506
+ where Z0 is the output of the pre-processing procedure,
507
+ denoted as P (·) and detailed in Section III-A, and X is the
508
+ input matrix in (5).
509
+ In the next stage, six DAFC instances, represented by
510
+ S1 (·) , . . . , S6 (·), of different dimensions with tanh activa-
511
+ tion for the row transform (Fr in Section III-B) and ReLu
512
+ activation for the column transform (Fc in Section III-B), are
513
+ used to generate the vectorized signal zvec. Our experiments
514
+ showed that this configuration of row and column activation
515
+ functions provides the best performance. At the last stage, the
516
+ signal, zvec, is processed by three FC layers, where the first
517
+ two use tanh activation, and the final (output) layer of equal
518
+ size to the DOA grid dimension, d, uses sigmoid activation
519
+ function to output ˆy ∈ [0, 1]d. Thus, {[ˆy]i}d
520
+ i=1 represent the
521
+ estimated probabilities of a source presence at {φi}d
522
+ i=1. Table I
523
+ and Fig. 2 summarize the parameters and architecutre of the
524
+ proposed NN-based approach.
525
+ The estimated source DOAs are extracted from the spatial
526
+ spectrum via peak search and applying 0.5 threshold:
527
+ {i1, . . . , i ˆ
528
+ N} = peak search
529
+
530
+ {[ˆy]i}d
531
+ i=1
532
+
533
+ (9)
534
+ ˆΘ =
535
+
536
+ φin : [ˆy]in > 0.5
537
+ � ˆ
538
+ N
539
+ n=1 .
540
+ Namely, the set of estimated DOAs, ˆΘ, consists of the grid
541
+ points corresponding to the peaks of ˆy that exceed the 0.5
542
+ threshold. The number of peaks that exceed this threshold is
543
+ used for source enumeration, and therefore the proposed NN
544
+ can be utilized as a source enumeration method as well.
545
+ The dimensionality of the hidden layers in the proposed
546
+
547
+ 5
548
+ Figure 2: Proposed NN architecture. The pre-processing P is described in Section III-A and appears in yellow. The purple
549
+ matrices denote the concatenation of DAFC blocks, which is detailed in Section III-B. The blue vector represents a vectorization
550
+ of the last DAFC output, and the orange vectors stands for FC layers with tanh activation function. The last green vector is
551
+ the output of the last FC layer, which consists of sigmoid activation function and yields the estimated spatial spectrum ˆy.
552
+ NN architecture expands in the first layers and then reduces.
553
+ This trend resembles the NN architecture presented in [49] and
554
+ characterizes both the DAFC-based and FC-based processing
555
+ stages. This expansion-reduction structure can be explained
556
+ by a) the early NN stages need to learn an expressive and
557
+ meaningful transformation of the input data by mapping it to
558
+ a higher dimensional representation and b) the late stages need
559
+ to extract significant features from the early mappings, and are
560
+ therefore limited in dimensionality. In addition, the late stages
561
+ are adjacent to the output vector and therefore need to be of
562
+ similar dimension.
563
+ D. Loss Function
564
+ The label used for the supervised learning process, y ∈
565
+ {0, 1}d, is defined as a sparse binary vector with the value 1,
566
+ at the grid points that correspond to the source DOAs, and
567
+ 0, otherwise. In practice, the DOAs in Θ do not precisely
568
+ correspond to the grid points. Therefore, for each DOA in
569
+ Θ, the nearest grid point in {φi}d
570
+ i=1 is selected as the
571
+ representative grid point in the label. Each training example
572
+ is determined by the input-label pair, (X, y). Using the NN
573
+ feed-forward in (8), X is used to generate the output spatial
574
+ spectrum, ˆy, which is considered as the estimated label.
575
+ The loss function, L, is a weighted mean of the binary cross
576
+ entropy (BCE) loss computed at each grid point:
577
+ L (y, ˆy, t) = 1
578
+ d
579
+ d
580
+
581
+ i=1
582
+ w(t)
583
+ i BCE ([y]i , [ˆy]i) ,
584
+ (10)
585
+ BCE (y, ˆy) = −y log (ˆy) − (1 − y) log (1 − ˆy) ,
586
+ where w(t)
587
+ i
588
+ represents the loss weight of the i-th grid point at
589
+ the t-th epoch. The loss value for equally-weighted BCEs eval-
590
+ uated per grid point (w(t)
591
+ i
592
+ = 1 in (10)) does not significantly
593
+ increase in the case of a large error in source/interference esti-
594
+ mated probability, due to the sparsity of the label y. This forces
595
+ the NN convergence into a sub-optimal solution that is prone
596
+ to “miss” the sources. Therefore, the loss weights, {w(t)
597
+ i }d
598
+ i=1,
599
+ are introduced to “focus” the penalty on source/interference
600
+ grid points.
601
+ The loss weight of the i-th grid point, w(t)
602
+ i , is determined
603
+ by the presence of source or interference in the corresponding
604
+ label entry [y]i. This relation is defined using the epoch and
605
+ label dependent factors e(t)
606
+ 0 , e(t)
607
+ 1 , according to:
608
+ w(t)
609
+ i
610
+ =
611
+
612
+ 1/e(t)
613
+ 1 ,
614
+ if φi contains source or interference
615
+ 1/e(t)
616
+ 0 ,
617
+ else
618
+ .
619
+ (11)
620
+ For t = 0, the factor e(0)
621
+ 1
622
+ is determined by the fraction of
623
+ label grid points that contain source or interference out of
624
+ the total label grid points in the training set, and e(0)
625
+ 0
626
+ is the
627
+ corresponding complement. For subsequent epochs, the factors
628
+ are updated according to a predefined schedule, similarly to a
629
+ predefined learning rate schedule. The loss weights are updated
630
+ Nw times with spacing of ∆t epochs during training. The
631
+ update values are determined by updating e(t)
632
+ 0 , e(t)
633
+ 1 , according
634
+ to the following decaying rule:
635
+ e(t)
636
+ q
637
+ = (1 − β(l))e(l∆t)
638
+ q
639
+ + β(l), l∆t ≤ t < (l + 1)∆t
640
+ (12)
641
+ q = 0, 1, l = 1, . . . , Nw,
642
+
643
+ 6
644
+ where l is the loss weight update iteration, and {β(l)}Nw
645
+ l=1
646
+ represent the loss weight update factors which uphold, 0 ≤
647
+ β(l) ≤ 1. Note that for Nw∆t ≤ t, the weight factor
648
+ remains e(Nw∆t)
649
+ i
650
+ during the rest of the training stage. No-
651
+ tice that as β(l) → 1, the corresponding loss weights will
652
+ tend to be equally distributed across the grid points, i.e.,
653
+ e(t)
654
+ 1
655
+ ≈ e(t)
656
+ 0 . In this case, an erroneously estimated proba-
657
+ bility for source/interference containing grid point is equally
658
+ weighted to a neither-containing grid point. On the other
659
+ hand, as β(l) → 0, the corresponding factors will uphold
660
+ e(t)
661
+ 1
662
+ ≪ e(t)
663
+ 0 , yielding a significantly larger contribution of
664
+ source/interference containing grid points to the loss value.
665
+ The rule in (12) enables a “transition of focus” throughout
666
+ the training. That is, during the early epochs β(l) → 0, which
667
+ contributes more weight to the source/interference containing
668
+ areas in the estimated label ˆy (i.e., the estimated spatial spec-
669
+ trum) to focus the NN to being correct for source/interference.
670
+ During the later epochs, β(l) is incrementally increased, which
671
+ relaxes the focus on source/interference from early epochs.
672
+ Thus, reducing erroneously estimated sources in areas that do
673
+ not contain source/interference (i.e. “false-alarms”).
674
+ IV. PERFORMANCE EVALUATION
675
+ This section evaluates the performance of the proposed
676
+ DAFC-based NN approach and compares it to the conventional
677
+ approaches, summarized in Subsection IV-A1. The data for
678
+ all considered scenarios is simulated using the measurement
679
+ model from Section II.
680
+ A. Setup & Training
681
+ This work considers a uniform linear array (ULA) with
682
+ half-wavelength-spaced L elements. Each simulated example
683
+ consists of the input-label pair, (X, y), where the input X is
684
+ defined in (5), and the label y is defined in Section III-D.
685
+ The simulation configurations are detailed in Table II. The
686
+ performance of the proposed approach is evaluated using
687
+ a single NN instance. Therefore, a single NN model is
688
+ used for various signal-to-interference ratios (SIRs), signal-
689
+ to-noise ratios (SNRs), interference-to-noise ratios (INRs),
690
+ DOAs, interference distribution, and the number of sources for
691
+ joint DOA estimation and source enumeration. The following
692
+ definitions for the m-th source are used in all experiments:
693
+ INR
694
+ =
695
+ E[∥c∥2]
696
+ E[∥n∥2] = σ2
697
+ c/σ2
698
+ n ,
699
+ (13)
700
+ SNRm
701
+ =
702
+ E[∥a(θm)sm∥2]
703
+ E[∥n∥2]
704
+ = σ2
705
+ m/σ2
706
+ n ,
707
+ (14)
708
+ SIRm
709
+ =
710
+ E[∥a(θm)sm∥2]
711
+ E[∥c∥2]
712
+ = σ2
713
+ m/σ2
714
+ c .
715
+ (15)
716
+ The NN optimization for all evaluated architectures is
717
+ performed using the loss function in (10) and Adam opti-
718
+ mizer [52] with a learning rate of 10−3, and a plateau learning
719
+ rate scheduler with a decay of 0.905. The set of loss weight up-
720
+ date factors, {β(l)}Nw
721
+ l=1, in (12) is chosen as the evenly-spaced
722
+ logarithmic scale between 10−5 and 10−2 with Nw = 6, that
723
+ is {10−5, 7.25 · 10−5, 5.25 · 10−4, 3.8 · 10−3, 2.78 · 10−2, 0.2}.
724
+ The chosen batch size is 512, the number of epochs is 500,
725
+ and early stopping is applied according to the last 200 epochs.
726
+ Notation
727
+ Description
728
+ Value
729
+ Mmax
730
+ Maximal
731
+ number
732
+ of
733
+ sources
734
+ 4
735
+ L
736
+ Number of sensors
737
+ 16
738
+ K
739
+ Number of snapshots
740
+ 16
741
+ d
742
+ Angular grid dimension
743
+ 121
744
+ ∆φ
745
+ Angular grid resolution
746
+ 1◦
747
+ FOV
748
+ Field of view
749
+ [−60◦, 60◦]
750
+ σ2
751
+ n
752
+ Thermal noise power
753
+ 1
754
+ Table II: Simulation Configurations.
755
+ 1) DOA Estimation Approaches: This subsection briefly
756
+ summarizes the conventional DOA estimation approaches. The
757
+ performance of the proposed approach is compared to the
758
+ conventional MVDR, CNN, and FC-based NN. All the NN-
759
+ based approaches were implemented using similar number of
760
+ layers and learnable parameters. In addition, the FC-based NN
761
+ and CNN were optimized using the same learning algorithm
762
+ and configurations.
763
+ (a) Conventional Adaptive Beamforming
764
+ The MVDR [3] estimator is based on adaptive beamforming,
765
+ and it is the maximum likelihood estimator in the presence
766
+ of unknown Gaussian interference [53]. The MVDR estimates
767
+ DOAs by a peak search on the MVDR spectrum:
768
+ PMV DR (φ) =
769
+ 1
770
+ aH (φ) ˆR−1
771
+ x a (φ)
772
+ ,
773
+ (16)
774
+ where ˆRx =
775
+ 1
776
+ K
777
+ �K
778
+ k=1 xkxH
779
+ k is the sample covariance ma-
780
+ trix estimator. Notice that the MVDR spectrum utilizes only
781
+ second-order statistics of the received signal xk. For Gaussian-
782
+ only interference (i.e. ck = 0 in (1)), the second-order statistics
783
+ contains the entire statistical information. However, for non-
784
+ Gaussian interference, information from higher-order statistics
785
+ is needed.
786
+ (b) CNN Architecture
787
+ We consider a CNN-based DOA estimation approach using a
788
+ CNN architecture that is similar to the architecture provided
789
+ in [38]. The input to the CNN of dimension L × L × 3
790
+ consists of the real, imaginary, and angle parts of ˆRx. The
791
+ CNN architecture consists of 4 consecutive CNN blocks,
792
+ such that each block contains a convolutional layer, a batch
793
+ normalization layer, and a ReLu activation. The convolutional
794
+ layers consist of [128, 256, 256, 128] filters. Kernel sizes of
795
+ 3 × 3 for the first block and 2 × 2 for the following three
796
+ blocks are used. Similarly to [38], 2 × 2 strides are used
797
+ for the first block and 1 × 1 for the following three blocks.
798
+ Next, a flatten layer is used to vectorize the hidden tensor,
799
+ and 3 FC layers of dimensions 1024, 512, 256 are used
800
+ with a ReLu activation and Dropout of 30%. Finally, the
801
+ output layer is identical to the proposed DAFC-based NN
802
+ as detailed in Subsection III-C. The considered loss function
803
+ is identical to the proposed DAFC-based approach in (10).
804
+ The number of trainable parameters in the considered CNN
805
+
806
+ 7
807
+ architecture accounts for 3, 315, 449. Notice that the CNN-
808
+ based architecture utilizes the information within the sample
809
+ covariance matrix and therefore, is limited to second-order
810
+ statistics only.
811
+ (c) FC Architecture
812
+ A straightforward implementation of an FC-based architecture,
813
+ as mentioned in Subsection III-B, was implemented. The
814
+ data matrix, X, is vectorized, and the real and imaginary
815
+ parts of the values were concatenated to obtain a 2KL-
816
+ dimension input vector. The selected hidden layers are of
817
+ sizes: [512, 512, 1024, 1024, 512, 256] where each hidden layer
818
+ is followed by a tanh activation function. The output layer
819
+ is identical to the proposed DAFC-based NN approach as
820
+ detailed in Subsection III-C. The considered loss function
821
+ is (10), and the number of trainable parameters in the FC-
822
+ based NN accounts for 2, 787, 449. Notice that the FC-based
823
+ NN architecture utilizes all the measurements by interacting
824
+ with all samples in the input data. However, this processing is
825
+ not specifically tailored to the structure of information within
826
+ the measurements. On the other hand, the proposed DAFC-
827
+ based NN utilizes the information structure to process the input
828
+ data. Therefore, for the considered DOA estimation problem,
829
+ the “inductive bias” [51] for this approach is improper and can
830
+ result in under-fitted NN architecture.
831
+ 2) Performance Evaluation Metrics: This subsection dis-
832
+ cusses the criteria for the performance evaluation of the
833
+ proposed DOA estimation approach. In this work, similarly
834
+ to [38], the DOA estimation accuracy of a set of sources
835
+ is evaluated by the Hausdorff distance between sets. The
836
+ Hausdorff distance, dH between the sets, A, and B, is defined
837
+ as:
838
+ dH (A, B) = max {d (A, B) , d (B, A)} ,
839
+ (17)
840
+ d (A, B) = sup {inf {|α − β| : β ∈ B} : α ∈ A} .
841
+ Notice that d (A, B) ̸= d (B, A). Let Θ = {θm}M
842
+ m=1 and ˆΘ =
843
+ {ˆθm} ˆ
844
+ M
845
+ m=1 be the sets of true and estimated DOAs, respectively.
846
+ The estimation error is obtained by evaluating the Hausdorff
847
+ distance, dH(Θ, ˆΘ). We define the root mean squared distance
848
+ (RMSD) for an arbitrary set of N examples (e.g., test set),
849
+
850
+ X(n), y(n)�N
851
+ n=1, with the corresponding true and estimated
852
+ DOAs,
853
+
854
+ Θ(n), ˆΘ(n)�N
855
+ n=1 as:
856
+ RMSD ≜
857
+
858
+
859
+
860
+ � 1
861
+ N
862
+ N
863
+
864
+ n=1
865
+ d2
866
+ H
867
+
868
+ Θ(n), ˆΘ(n)
869
+
870
+ .
871
+ (18)
872
+ Angular resolution is one of the key criteria for DOA
873
+ estimation performance. The probability of resolution is com-
874
+ monly used as a performance evaluation metric for angular
875
+ resolution. In the considered problem, resolution between two
876
+ sources and between source and interference are used for
877
+ performance evaluation. For an arbitrary example with M
878
+ sources, the resolution event Ares is defined as:
879
+ Ares
880
+
881
+ Θ, ˆΘ
882
+
883
+
884
+
885
+ 1,
886
+ �M
887
+ m=1 ξm ≤ 2◦ and | ˆΘ| ≥ M
888
+ 0,
889
+ else
890
+ ,
891
+ (19)
892
+ ξm ≜ min
893
+ ˆθ∈ ˆΘ
894
+ |θm − ˆθ|, m = 1, . . . , M .
895
+ For example, a scene with M sources is considered success-
896
+ fully resolved if for each true DOA a) there exists a close-
897
+ enough estimated DOA, ˆθ ∈ ˆΘ, that is at most 2◦ apart, and
898
+ b) there exists at least M DOA estimations. According to (18),
899
+ the probability of resolution, can be defined as:
900
+ Pres = 1
901
+ N
902
+ N
903
+
904
+ n=1
905
+ Ares
906
+
907
+ Θ(n), ˆΘ(n)�
908
+ .
909
+ (20)
910
+ 3) Data Sets: This subsection describes the structure and
911
+ formation of Training & Test sets.
912
+ (a) Training Set
913
+ The considered training set contains Ntrain
914
+ =
915
+ 10, 000
916
+ examples re-generated at each epoch. For each exam-
917
+ ple, i.e. an input-label pair (X, y), the number of DOA
918
+ sources, M, is generated from uniform and i.i.d. distribution,
919
+ {1, . . . , Mmax}. The training set contains 10% of interference-
920
+ free examples and 90% of interference-containing. Out of the
921
+ interference-containing examples, 90% generated such that the
922
+ source DOAs, {θm}M
923
+ m=1, and the interference’s DOA, θc, are
924
+ distributed uniformly over the simulated FOV. The remaining
925
+ 10% are generated such that θc is distributed uniformly over
926
+ the FOV, and the source DOAs, {θm}M
927
+ m=1, are distributed
928
+ uniformly over the interval [θc − 8◦, θc + 8◦]. This data set
929
+ formation enables to “focus” the NN training on the chal-
930
+ lenging scenarios where the source and interference DOAs are
931
+ closely spaced. The generalization capabilities of the proposed
932
+ NN to variations in interference statistics are achieved via the
933
+ interference angular spread parameter, ρ, from the uniform dis-
934
+ tribution, U ([0.7, 0.95]), and the interference spikiness param-
935
+ eter, ν, from the uniform distribution, U ([0.1, 1.5]). The INR
936
+ for each interference-containing example and {SIRm}M
937
+ m=1 or
938
+ {SNRm}M
939
+ m=1 are drawn independently according to Table III.
940
+ (b) Test Set
941
+ The test set consists of Ntest = 20, 000 examples. The results
942
+ are obtained by averaging the evaluated performance over 50
943
+ independent test set realizations. Considering the low-snapshot
944
+ support regime, the number of snapshots is set to K = 16,
945
+ except for experiment (c) in IV-B2. Considering heavy-tailed
946
+ interference, the spikiness parameter is set to ν = 0.2. The
947
+ INR is set to INR = 5 dB, and the interference angular spread
948
+ parameter is set to ρ = 0.9. The signal amplitude was set to be
949
+ identical for all sources, σ1 = · · · = σm, except for experiment
950
+ (b) in IV-B2.
951
+ B. Experiments
952
+ 1) Single Source Within Interference: In this scenario, the
953
+ ability to resolve a single source from interference is evaluated.
954
+ Let M = 1 with θ1 = 0.55◦, and θc = θ1 + ∆θc such
955
+ that ∆θc is the angular separation between the single source
956
+ and interference. The 0.55◦ offset is considered to impose
957
+ a realistic off-grid condition. Fig. 3 shows the RMSD and
958
+ probability of resolution for all evaluated approaches.
959
+ Fig. 3a shows that the FC-based NN approach does not
960
+ manage to resolve the single source from the interference
961
+ for all evaluated angular separations. This result supports the
962
+
963
+ 8
964
+ (a)
965
+ (b)
966
+ Figure 3: Scenario with a single source at θ1 = 0.55◦ and interference located at θc = θ1 + ∆θc. (a) probability of resolution
967
+ and (b) RMSD.
968
+ Notation Description
969
+ Value
970
+ ρ
971
+ Interference angular
972
+ spread parameter
973
+ ∼ U ([0.7, 0.95])
974
+ ν
975
+ Interference
976
+ spikiness parameter
977
+ ∼ U ([0.1, 1.5])
978
+ INR
979
+ INR
980
+ ∼ U ([0, 10]) [dB]
981
+ SIRm
982
+ SIR of m-th source
983
+ ∼ U ([−10, 10]) [dB]
984
+ SNRm
985
+ SNR of m-th source
986
+ ∼ U ([−10, 10]) [dB]
987
+ Table III: Training set parameters. SNRm distribution applies
988
+ to interference-free examples.
989
+ under-fitting limitation of the FC-based NN approach for the
990
+ DOA estimation, which can be explained by the architecture
991
+ that processes the input data as-is, without any structured
992
+ transformation or model-based pre-processing.
993
+ The MVDR and CNN performance in terms of the resolu-
994
+ tion are similar since both rely only on second-order statistics,
995
+ which is sufficient in scenarios with widely separated sources
996
+ and interference. Fig. 3a shows that the proposed DAFC-based
997
+ NN approach outperforms all other considered approaches in
998
+ low angular separation scenarios. This can be explained by the
999
+ fact that the DAFC uses the high-order statistics needed for
1000
+ the resolution of closely spaced sources and interference.
1001
+ Fig. 3b shows the RMSD of all considered DOA estimation
1002
+ approaches. The proposed DAFC-based NN approach outper-
1003
+ forms the other tested approaches in low SIR. At high SIR
1004
+ and small angular separation, ∆θc = 5◦, the interference
1005
+ is negligible with respect to the strong source signal, and
1006
+ therefore, the DAFC-based, CNN, and MVDR approaches
1007
+ obtain similar performance. For large angular separation,
1008
+ ∆θc = 30◦, the source and the interference are sufficiently
1009
+ separated, and therefore, DOA estimation errors are mainly
1010
+ induced by the interference DOA, θc. The MVDR spectrum
1011
+ contains a peak at θc = 30.55◦, and therefore, MVDR’s
1012
+ RMSD = 30◦ is approximately constant. The NNs are trained
1013
+ to output a 0-probability for the interference, therefore, the
1014
+ NN-based approaches: FC, CNN, and DAFC achieve a smaller
1015
+ DOA estimation error. The DAFC-based NN and CNN utilize
1016
+ structured transformations, which better fit the input data, and
1017
+ therefore, they outperform the FC-based NN approach in terms
1018
+ of RMSD.
1019
+ 2) Resolving Two Sources from Interference: This subsec-
1020
+ tion evaluates the performance of the tested DOA estimation
1021
+ approaches in scenarios with two sources within AWGN and
1022
+ interference.
1023
+ (a) Resolution of Equal-Strength Sources
1024
+ In the following experiment, the resolution between two equal-
1025
+ power sources, M = 2, with θ1 = − ∆θ
1026
+ 2 + 0.55◦, and θ2 =
1027
+ ∆θ
1028
+ 2 +0.55◦, is evaluated. The off-grid additional 0.55◦ offset to
1029
+ the ∆θ angular separation between the sources represents the
1030
+ practical scenario. The interference at θc = 0.55◦ influences
1031
+ the two sources similarly. Fig. 4 shows the probability of
1032
+ resolution of the tested approaches in scenarios with (a) the
1033
+ AWGN only and (b) spatially-colored interference.
1034
+ The FC-based NN approach does not resolve the two targets
1035
+ in both evaluated scenarios. Subplot (a) in Fig. 4 shows
1036
+ that the proposed DAFC-based NN approach outperforms the
1037
+ MVDR and the CNN at low-SNR and small angular separation
1038
+ scenarios due to its generalization ability to spatially-white
1039
+ interference. Subplot (b) in Fig. 4 shows that at low SIR
1040
+ of SIR = −5 dB, the performances of MVDR and CNN
1041
+ significantly degrade compared to the proposed DAFC-based
1042
+ NN approach. Comparing subplots in Fig. 4, notice that at
1043
+ SIR = −5 dB, the MVDR fails to resolve the sources with
1044
+ angular separation ∆θ < 20◦ due to the presence of the heavy-
1045
+ tailed spatially-colored interference in the proximity of the
1046
+ sources. However, the proposed DAFC-based NN approach
1047
+ mitigates this interference and resolves the sources, and hence,
1048
+ outperforms other tested approaches at both SIR = 0 dB and
1049
+ SIR = −5 dB.
1050
+ Subplot (b) in Fig. 4 shows the non-monotonic trend of
1051
+ CNN and MVDR performance at 4◦ < ∆θ < 18◦ and
1052
+
1053
+ 1.0
1054
+ 0.8
1055
+ MVDR, SIR=0
1056
+ DAFC, SIR=0
1057
+ FC, SIR=0
1058
+ S
1059
+ 0.6
1060
+ res
1061
+ CNN. SIR=O
1062
+ P
1063
+ MVDR, SIR=-5
1064
+ DAFC, SIR=-5
1065
+ 0.4
1066
+ FC, SIR=-5
1067
+ CNN, SIR=-5
1068
+ 0.2
1069
+ 10
1070
+ 15
1071
+ 20
1072
+ 25
1073
+ 30
1074
+ △0c [Deg]RMSD [Deg]
1075
+ 101
1076
+ MVDR, △0c=5
1077
+ DAFC, A0c=5
1078
+ FC, △Qc=5
1079
+ CNN, △0c=5
1080
+ MVDR, △0c=30
1081
+ DAFC, △0c=30
1082
+ 100
1083
+ FC, △0c=30
1084
+ CNN, △0.=30
1085
+ -10
1086
+ -5
1087
+ 0
1088
+ 5
1089
+ 10
1090
+ 15
1091
+ 20
1092
+ SIR[dB]9
1093
+ (a)
1094
+ (b)
1095
+ Figure 4: Probability of resolution for two sources located at θ1,2 = θc ± ∆θ/2, and interference located at θc = 0.55◦. (a)
1096
+ AWGN-only scenario and (b) interference-containing scenario.
1097
+ (a) FC
1098
+ (b) MVDR
1099
+ (c) CNN
1100
+ (d) DAFC
1101
+ Figure 5: Spatial spectrum, two sources with SIR = −5 dB
1102
+ located at θ1,2 = θc ± ∆θ/2 with ∆θ = 12◦ and θc = 0.55◦.
1103
+ The dashed blue lines represent the mean spatial spectrum,
1104
+ and the color fill represents the standard deviation around the
1105
+ mean obtained from 2, 000 i.i.d. examples. The solid vertical
1106
+ orange lines represent the true source DOAs, and the dashed
1107
+ vertical green line represents the interference DOA.
1108
+ SIR = −5 dB. For 4◦ < ∆θ < 8◦ the sources are closer
1109
+ to the peak of the interference’s lobe and are therefore less
1110
+ mitigated by it. As ∆θ initially increases, 8◦ < ∆θ < 12◦,
1111
+ the sources reach DOAs which are in the proximity of the
1112
+ interference lobe’s “nulls” which explains the reduction in
1113
+ resolution, and as ∆θ further increases, 16◦ < ∆θ, the
1114
+ sources are sufficiently separated from the interference such
1115
+ that the resolution increases. As a result, MVDR and CNN-
1116
+ based approaches that use second-order statistics only, can not
1117
+ resolve the sources in the vicinity of a stronger interference.
1118
+ Fig. 5 shows the average spatial spectrum of all tested
1119
+ approaches for ∆θ = 12◦ and SIR = −5 dB. The average
1120
+ spatial spectrum of the FC-based NN approach does not show
1121
+ two prominent peaks, which results in its poor probability of
1122
+ resolution in Fig. 4. The MVDR “bell-shaped” spatial spec-
1123
+ trum does not contain the two prominent peaks at θ1,2 since the
1124
+ interference “masks” the two sources. The CNN and proposed
1125
+ DAFC-based NN approaches show two peaks at the average
1126
+ spatial spectrum. The peaks at the CNN’s average spatial
1127
+ spectrum are lower, resulting in a low-resolution probability.
1128
+ The average spatial spectrum of the proposed DAFC-based
1129
+ NN approach contains two high peaks, resulting in a superior
1130
+ probability of resolution in Fig. 4.
1131
+ (b) Resolution of Unequal-Power Sources
1132
+ Fig. 6 shows the probability of resolution in a scenario
1133
+ with two sources, M = 2, at θ1 = −∆θ/2 + 0.55◦, and
1134
+ θ2 = +∆θ/2 + 0.55◦ with interference located between the
1135
+ sources at θc = 0.55◦. The signal strength of the second
1136
+ source is set to SIR1 = SIR2 + 10 dB. Comparing Fig. 6 to
1137
+ Fig. 4b, the competing methods show similar trends, except
1138
+ the degradation of the CNN’s probability of resolution for the
1139
+ SIR = 0 dB case. On the other hand, the proposed DAFC-
1140
+ based NN approach outperforms other tested approaches in
1141
+ terms of the probability of resolution. Therefore, Fig. 6 demon-
1142
+ strates the generalization ability of the proposed DAFC-based
1143
+ NN approach to a variance between source strengths.
1144
+ (c) Effect of the Number of Snapshots on the Resolution
1145
+ This experiment investigates the influence of the number
1146
+ of snapshots, K, on the ability to resolve two proximate
1147
+ sources from heavy-tailed spatially-colored interference. The
1148
+ equal-strength resolution scenario is repeated using K =
1149
+ 4, 8, 16, 32, 64 with different instances of NN training for
1150
+ each K value. Fig. 7 shows the probability of resolution for
1151
+ two equal-strength sources at θ1,2 = θc ±∆θ/2 for ∆θ = 12◦
1152
+ and θc = 0.55◦.
1153
+ The FC-based NN approach fails to resolve the two sources.
1154
+ For SIR = 0 dB, the MVDR, CNN, and DAFC-based NN
1155
+
1156
+ DAFC
1157
+ 0.8
1158
+ SIR=-5.00 dB
1159
+ INR=5.00 dB
1160
+ 0.6
1161
+ 0.4
1162
+ 0.2
1163
+ 0.0
1164
+ -60
1165
+ -40
1166
+ -20
1167
+ 0
1168
+ 20
1169
+ 40
1170
+ 60
1171
+ Φ[Deg]1.0
1172
+ 0.8
1173
+ +*+
1174
+ MVDR, SNR=O
1175
+ DAFC, SNR=O
1176
+ 0.6
1177
+ FC, SNR=0
1178
+ res
1179
+ CNN, SNR=0
1180
+ MVDR, SNR=-5
1181
+ P
1182
+ 0.4
1183
+ DAFC, SNR=-5
1184
+ FC, SNR=-5
1185
+ CNN, SNR=-5
1186
+ 0.2
1187
+ 0.0
1188
+ 10
1189
+ 20
1190
+ 30
1191
+ 40
1192
+ 50
1193
+ 60
1194
+ Ae [Deg]1.0
1195
+ 0.8
1196
+ MVDR, SIR=O
1197
+ DAFC, SIR=O
1198
+ 0.6
1199
+ FC, SIR=0
1200
+ res
1201
+ CNN. SIR=0
1202
+ MVDR, SIR=-5
1203
+ P
1204
+ 0.4
1205
+ DAFC, SIR=-5
1206
+ FC, SIR=-5
1207
+ CNN, SIR=-5
1208
+ 0.2
1209
+ 0.0
1210
+ 10
1211
+ 20
1212
+ 30
1213
+ 40
1214
+ 50
1215
+ 60
1216
+ Ae [Deg]0.35
1217
+ FC
1218
+ SIR=-5.00 dB
1219
+ 0.30
1220
+ NR=5.00 dB
1221
+ 0.25
1222
+ 0.20
1223
+ 0.15
1224
+ 0.10
1225
+ 0.05
1226
+ 0.00
1227
+ -60
1228
+ -40
1229
+ -20
1230
+ 0
1231
+ 20
1232
+ 40
1233
+ 60
1234
+ Φ[Deg]MVDR
1235
+ 1.2
1236
+ SIR=-5.00 dB
1237
+ 1.0
1238
+ INR=5.00 dB
1239
+ 0.8
1240
+ 0.6
1241
+ 0.4
1242
+ 0.2
1243
+ 0.0
1244
+ -60
1245
+ -40
1246
+ -20
1247
+ 0
1248
+ 20
1249
+ 40
1250
+ 60
1251
+ Φ[Deg]0.6
1252
+ CNN
1253
+ SIR=-5.00 dB
1254
+ 0.5
1255
+ INR=5.00 dB
1256
+ 0.4
1257
+ 0.3
1258
+ 0.2
1259
+ 0.1
1260
+ 0.0
1261
+ -60
1262
+ -40
1263
+ -20
1264
+ 0
1265
+ 20
1266
+ 40
1267
+ 60
1268
+ Φ[Deg]10
1269
+ Figure 6: Probability of resolution for two sources located at
1270
+ θ1,2 = θc ±∆θ/2, and interference located at θc = 0.55◦. The
1271
+ SIR in the legend represents the SIR of the first source, SIR1.
1272
+ The SIR of the second source is set to SIR2 = SIR1 + 10 dB.
1273
+ approaches achieve a monotonic increasing probability of
1274
+ resolution with increasing K. The proposed DAFC-based NN
1275
+ approach slightly outperforms other tested approaches. At low
1276
+ SIR of SIR = −5 dB, the proposed DAFC-based NN ap-
1277
+ proach significantly outperforms the other tested approaches.
1278
+ This can be explained by the fact that increasing K increases
1279
+ the probability for outliers to be present in the input data
1280
+ matrix, X. Therefore, the estimated autocorrelation matrix,
1281
+ ˆRx, is more likely to be biased by the interference-related
1282
+ outliers, which results in interference “masking” the sources.
1283
+ The proposed DAFC-based NN approach is immune to these
1284
+ outliers and successfully exploits the information from the
1285
+ additional snapshots to improve the probability of resolution.
1286
+ Figure 7: Probability of resolution for two sources located at
1287
+ θ1,2 = θc ± ∆θ/2 with ∆θ = 12◦, and interference located at
1288
+ θc = 0.55◦, as a function of the number of snapshots, K.
1289
+ Figs. 4, 5, 6, and 7 show the ability of the proposed
1290
+ DAFC-based NN approach to utilize the information structure
1291
+ of the input data by exploiting the higher-order statistics
1292
+ and performing the domain-fitted transformation in order to
1293
+ provide superior resolution ability in the case of proximate
1294
+ heavy-tailed spatially-colored interference, low SIR and small
1295
+ sample size.
1296
+ 3) Multiple Source Localization: The performances of the
1297
+ tested DOA estimation approaches are evaluated and compared
1298
+ in a multi-source scenario. Four sources, (M = 4) were
1299
+ simulated with angular separation, ∆θ: {θ1, θ2, θ3, θ4} =
1300
+ θc + {−2∆θ, −∆θ, ∆θ, 2∆θ}, where θc = 0.51◦ represents
1301
+ a realistic off-grid condition. The RMSD of evaluated meth-
1302
+ ods is depicted in Fig. 8. The proposed DAFC-based NN
1303
+ approach outperforms the other tested approaches at low SIR
1304
+ (SIR < 0 dB) for large and small angular separations. For
1305
+ high SIR and low angular separation, ∆θ = 5◦, the MVDR
1306
+ achieves the lowest RMSD. The reason is that for this case, the
1307
+ interference is negligible with respect to the lobe of the strong
1308
+ source in the MVDR’s spectrum. However, at high angular
1309
+ separation, ∆θ = 20◦, the proposed DAFC-based NN ap-
1310
+ proach significantly outperforms the other tested approaches.
1311
+ This is explained by Fig. 9, that shows the spectrum of the
1312
+ tested DOA estimation approaches. Notice that the proposed
1313
+ DAFC-based NN mitigates interference, while the spectra of
1314
+ other tested approaches contain high peaks at the interference
1315
+ DOA, θc. These peaks increase the Hausdorff distance in (17),
1316
+ increasing the RMSD of other tested approaches in Fig. 8.
1317
+ Figure 8: RMSD in scenarios with M = 4 sources located at
1318
+ {θ1, θ2, θ3, θ4} = θc + {−2∆θ, −∆θ, ∆θ, 2∆θ}, where θc =
1319
+ 0.51◦.
1320
+ 4) Multiple Source Enumeration: The source enumeration
1321
+ performance is evaluated in this experiment. The DOAs of
1322
+ the sources are selected from the set of following values:
1323
+ {10.51◦, −9.49◦, −19.49◦, 10.51◦} such that for M sources,
1324
+ the DOAs are selected to be the first M DOAs. The interfer-
1325
+ ence is located at θc = 0.51◦. The proposed DAFC-based NN
1326
+ approach is compared to the MDL and AIC [19]. Fig. 10 shows
1327
+ the source enumeration confusion matrices for the MDL, AIC,
1328
+ and the proposed DAFC-based NN with SIR = 0 dB.
1329
+ Figs. 10a, 10b show that in both the MDL and the AIC, the
1330
+ predicted number of sources has a constant bias for each true
1331
+ M due to the spatially-colored interference. Fig. 10c shows the
1332
+ source enumeration performance of the proposed DAFC-based
1333
+ NN approach in the presence of spatially colored interference.
1334
+ The DAFC-based NN identifies the interference and does not
1335
+ count it as one of the sources by outputting a low probability
1336
+ for angular grid points near θc, resulting in a better source
1337
+ enumeration performance.
1338
+
1339
+ 1.0
1340
+ 0.8
1341
+ MVDR, SIR=O
1342
+ DAFC, SIR=O
1343
+ 0.6
1344
+ FC, SIR=0
1345
+ res
1346
+ CNN. SIR=0
1347
+ DP
1348
+ MVDR, SIR=-5
1349
+ 0.4
1350
+ DAFC, SIR=-5
1351
+ FC, SIR=-5
1352
+ CNN, SIR=-5
1353
+ 0.2
1354
+ 0.0
1355
+ 10
1356
+ 20
1357
+ 30
1358
+ 40
1359
+ 50
1360
+ 60
1361
+ Ae [Deg]1.0
1362
+ 0.8
1363
+ 0.6
1364
+ res
1365
+ MVDR. SIR=O
1366
+ 0.4
1367
+ DAFC, SIR=0
1368
+ FC, SIR=0
1369
+ CNN, SIR=0
1370
+ 0.2
1371
+ MVDR,SIR=-5
1372
+ DAFC, SIR=-5
1373
+ FC, SIR=-5
1374
+ 0.0
1375
+ CNN, SIR=-5
1376
+ 4
1377
+ 8
1378
+ 16
1379
+ 32
1380
+ 64
1381
+ KRMSD [Deg]
1382
+ 101
1383
+ MVDR. A0=5
1384
+ DAFC, △0=5
1385
+ FC, △0=5
1386
+ CNN, △0=5
1387
+ MVDR,A0=20
1388
+ 100
1389
+ DAFC, △0=20
1390
+ FC, △0=20
1391
+ CNN, △0=20
1392
+ -10
1393
+ -5
1394
+ 0
1395
+ 5
1396
+ 10
1397
+ 15
1398
+ 20
1399
+ SIR[dB]11
1400
+ (a) FC
1401
+ (b) MVDR
1402
+ (c) CNN
1403
+ (d) DAFC
1404
+ Figure 9: Spatial spectrum, four sources with SIR = 0 dB
1405
+ located at {θ1, θ2, θ3, θ4} = θc + {−2∆θ, −∆θ, ∆θ, 2∆θ},
1406
+ where θc = 0.51◦ and ∆θ = 20◦. The dashed blue lines rep-
1407
+ resent the mean spatial spectrum, and the color fill represents
1408
+ the standard deviation around the mean obtained from 2, 000
1409
+ i.i.d. examples. The solid vertical orange lines represent the
1410
+ true source DOAs and the dashed vertical green line represents
1411
+ the interference DOA.
1412
+ 5) Loss Weights: This experiment evaluates the effect of the
1413
+ loss weight update factors, {β(l)}Nw
1414
+ l=1, introduced in (12), on
1415
+ the confidence level in the spatial spectrum. Let �B denote the
1416
+ set of {β(l)}Nw
1417
+ l=1 values used in the proposed approach. The
1418
+ loss weights, {w(t)
1419
+ i }d
1420
+ i=1, are defined by the factors e(t)
1421
+ 0 , e(t)
1422
+ 1
1423
+ according to (11), and are introduced to provide a trade-off
1424
+ between the penalty obtained on source/interference and the
1425
+ penalty obtained for the rest of the output spatial spectrum.
1426
+ For comparison, we set B0 = {10−6, 3.98 · 10−6, 1.58 ·
1427
+ 10−5, 6.31 · 10−5, 2.51 · 10−4, 10−3}, and B1 = {10−3, 3.98 ·
1428
+ 10−3, 0.0158, 0.063, 0.25, 0.1} as two sets of loss weight up-
1429
+ date factors. For B0, the loss weight update factors are closer
1430
+ to 0, hence the loss weights emphasize the source/interference,
1431
+ since e(t)
1432
+ 1
1433
+ ≪ e(t)
1434
+ 0
1435
+ which, according to (11), translates to larger
1436
+ w(t)
1437
+ i
1438
+ for source/interference grid points. For B1 the values are
1439
+ closer to 1, hence the loss weights are more equally distributed
1440
+ among grid points, since e(t)
1441
+ 1
1442
+ ≈ e(t)
1443
+ 0 . The experiment in IV-B1
1444
+ is repeated here for the DAFC-based NN approach with the
1445
+ two additional B0, B1 values mentioned above.
1446
+ Let ˆp1 represent the probability assigned for the source-
1447
+ containing grid point in the estimated label ˆy. Let ˆp0 represent
1448
+ the maximum over probabilities assigned for non-source grid
1449
+ points in ˆy, excluding a 5-grid point guard interval around the
1450
+ source. Fig. 11 shows ˆp1 and ˆp0 for various angular separations
1451
+ between the source and interference for SIR = −5 dB. For
1452
+ B0, the source’s contribution to the loss value is substan-
1453
+ tially higher, which results in a higher probability for the
1454
+ (a) MDL
1455
+ (b) AIC
1456
+ (c) DAFC
1457
+ Figure 10: Confusion matrix for source enumeration, SIR =
1458
+ 0 dB, sources located at {10.51◦, −9.49◦, −19.49◦, 10.51◦}.
1459
+ (a) MDL, (b) AIC, (c) proposed DAFC-based NN.
1460
+ source-containing grid point. However, this results in a higher
1461
+ probability obtained for non-source grid points, since their
1462
+ contribution to the loss value is negligible compared to the
1463
+ source-containing grid point, increasing “false-alarm” peaks
1464
+ in the spatial spectrum, subsequently increasing the estimation
1465
+ error. Correspondingly, for B1 the source’s contribution to the
1466
+ loss value is less significant, which results in low probability
1467
+ assigned for the source-containing grid points, as well as low
1468
+ probability for non-source grid points.
1469
+ V. CONCLUSION
1470
+ This work addresses the problem of DOA estimation and
1471
+ source enumeration of an unknown number of sources within
1472
+ heavy-tailed, non-Gaussian, and spatially colored interference.
1473
+ A novel DAFC-based NN approach is proposed for this
1474
+
1475
+ FC
1476
+ 0.4
1477
+ SIR=0.00 dB
1478
+ INR=5.00 dB
1479
+ 0.3
1480
+ 0.2
1481
+ 0.1
1482
+ 0.0
1483
+ -60
1484
+ -40
1485
+ -20
1486
+ 0
1487
+ 20
1488
+ 40
1489
+ 60
1490
+ Φ[Deg]1.6
1491
+ 1.4
1492
+ 1.2
1493
+ 1.0
1494
+ MVDR
1495
+ SIR=0.00 dB
1496
+ 0.8
1497
+ INR=5.00 dB
1498
+ 0.6
1499
+ 0.4
1500
+ 0.2
1501
+ 0.0
1502
+ -60
1503
+ -40
1504
+ -20
1505
+ 0
1506
+ 20
1507
+ 40
1508
+ 60
1509
+ Φ[Deg]0.8
1510
+ 0.7
1511
+ 0.6
1512
+ 0.5
1513
+ CNN
1514
+ 0.4
1515
+ SIR=0.00 dB
1516
+ NR三5.00 dB
1517
+ 0.3
1518
+ 0.2
1519
+ 0.1
1520
+ 0.0
1521
+ -60
1522
+ -40
1523
+ -20
1524
+ 0
1525
+ 20
1526
+ 40
1527
+ 60
1528
+ Φ[Deg]0.8
1529
+ 0.6
1530
+ DAFC
1531
+ SIR0.00 dB
1532
+ 0.4
1533
+ INR=5.00dB
1534
+ 0.2
1535
+ 0.0
1536
+ -60
1537
+ -40
1538
+ -20
1539
+ 0
1540
+ 20
1541
+ 40
1542
+ 60
1543
+ Φ[Deg]0
1544
+ 0
1545
+ 0
1546
+ 0
1547
+ 0
1548
+ 0
1549
+ 0
1550
+ 0
1551
+ 0.6
1552
+ 0.5
1553
+ 0
1554
+ 0.13
1555
+ 0.66
1556
+ 0.2
1557
+ 0
1558
+ 0
1559
+ 0
1560
+ True M
1561
+ - 0.4
1562
+ 0
1563
+ 0
1564
+ 0.16
1565
+ 0.68
1566
+ 0.16
1567
+ 0
1568
+ 0
1569
+ 0.3
1570
+ 0
1571
+ 0
1572
+ 0
1573
+ 0.18
1574
+ 0.68
1575
+ 0.14
1576
+ 0
1577
+ 3
1578
+ 0.2
1579
+ - 0.1
1580
+ 0
1581
+ 0
1582
+ 0
1583
+ 0
1584
+ 0.21
1585
+ 0.66
1586
+ 0.12
1587
+ 4
1588
+ - 0.0
1589
+ 1
1590
+ 1
1591
+ 1
1592
+ 1
1593
+ 0
1594
+ 1
1595
+ 2
1596
+ 3
1597
+ 4
1598
+ 5
1599
+ 6
1600
+ Predicted M0.6
1601
+ 0
1602
+ 0
1603
+ 0
1604
+ 0
1605
+ 0
1606
+ 0
1607
+ 0
1608
+ 0
1609
+ 0.5
1610
+ 0
1611
+ 0.06
1612
+ 0.57
1613
+ 0.36
1614
+ 0.01
1615
+ 0
1616
+ 0
1617
+ - 0.4
1618
+ True M
1619
+ 0
1620
+ 0
1621
+ 0.07
1622
+ 0.61
1623
+ 0.32
1624
+ 0
1625
+ 0
1626
+ 0.3
1627
+ 0.2
1628
+ 0
1629
+ 0
1630
+ 0
1631
+ 0.08
1632
+ 0.63
1633
+ 0.29
1634
+ 0
1635
+ 3
1636
+ - 0.1
1637
+ 0
1638
+ 0
1639
+ 0
1640
+ 0
1641
+ 0.09
1642
+ 0.65
1643
+ 0.26
1644
+ 4
1645
+ - 0.0
1646
+ 1
1647
+ 1
1648
+ 1
1649
+ 1
1650
+ 0
1651
+ 1
1652
+ 2
1653
+ 3
1654
+ 4
1655
+ 5
1656
+ 6
1657
+ Predicted M0
1658
+ 0
1659
+ 0
1660
+ 0
1661
+ 0
1662
+ 0
1663
+ 0.8
1664
+ 0
1665
+ 0.7
1666
+ 0
1667
+ 0.83
1668
+ 0.17
1669
+ 0.01
1670
+ 0
1671
+ 0
1672
+ -0.6
1673
+ True M
1674
+ -0.5
1675
+ 0
1676
+ 0
1677
+ 0.85
1678
+ 0.15
1679
+ 0
1680
+ 0
1681
+ 2
1682
+ - 0.4
1683
+ 0.3
1684
+ 0
1685
+ 0
1686
+ 0.01
1687
+ 0.89
1688
+ 0.1
1689
+ 0
1690
+ - 0.2
1691
+ 0
1692
+ 0
1693
+ 0
1694
+ 0.01
1695
+ 0.89
1696
+ 0.1
1697
+ - 0.1
1698
+ 4
1699
+ - 0.0
1700
+ 1
1701
+ 1
1702
+ 1
1703
+ 1
1704
+ 1
1705
+ 0
1706
+ 1
1707
+ 2
1708
+ 3
1709
+ 4
1710
+ 5
1711
+ Predicted M12
1712
+ Figure 11: Loss weight update factor impact on probability
1713
+ levels obtained in the DAFC-based NN’s spatial spectrum,
1714
+ single target at θ1 = 0.55◦ with interference at θc = θ1 +∆θc,
1715
+ SIR = −5 dB. ˆp1 represents the probability obtained for
1716
+ source-containing grid points. ˆp0 represents the probability
1717
+ obtained for non-source grid points.
1718
+ problem. The DAFC mechanism applies a structured transfor-
1719
+ mation capable of exploiting the interference non-Gaussianity
1720
+ for its mitigation while retaining a low complexity of learnable
1721
+ parameters. The proposed DAFC-based NN approach is opti-
1722
+ mized to provide an interference-mitigated spatial spectrum
1723
+ using a loss weight scheduling routine, performing DOA
1724
+ estimation and source enumeration using a unified NN.
1725
+ The performance of the proposed approach is compared to
1726
+ MVDR, CNN-based, and FC-based approaches. Simulations
1727
+ showed the superiority of the proposed DAFC-based NN ap-
1728
+ proach in terms of probability of resolution and estimation ac-
1729
+ curacy, evaluated by RMSD, especially in weak signal power,
1730
+ small number of snapshots, and near-interference scenarios.
1731
+ The source enumeration performance of the proposed DAFC-
1732
+ based NN approach was compared to the MDL and AIC. It was
1733
+ shown that in the considered scenarios, the proposed approach
1734
+ outperforms the MDL and the AIC in the source enumeration
1735
+ accuracy.
1736
+ REFERENCES
1737
+ [1] H. L. Van Trees, Optimum Array Processing: Part IV of Detection,
1738
+ Estimation, and Modulation Theory.
1739
+ John Wiley & Sons, 2004.
1740
+ [2] E. Ollila, D. E. Tyler, V. Koivunen, and H. V. Poor, “Complex elliptically
1741
+ symmetric distributions: Survey, new results and applications,” IEEE
1742
+ Transactions on signal processing, vol. 60, no. 11, pp. 5597–5625, 2012.
1743
+ [3] J. Capon, “High-resolution frequency-wavenumber spectrum analysis,”
1744
+ Proceedings of the IEEE, vol. 57, no. 8, pp. 1408–1418, 1969.
1745
+ [4] R. Roy and T. Kailath, “ESPRIT-estimation of signal parameters via ro-
1746
+ tational invariance techniques,” IEEE Transactions on Acoustics, Speech,
1747
+ and Signal Processing, vol. 37, no. 7, pp. 984–995, 1989.
1748
+ [5] R. Schmidt, “Multiple emitter location and signal parameter estimation,”
1749
+ IEEE Transactions on Antennas and Propagation, vol. 34, no. 3, pp.
1750
+ 276–280, 1986.
1751
+ [6] A. Barabell, “Improving the resolution performance of eigenstructure-
1752
+ based direction-finding algorithms,” in ICASSP’83. IEEE International
1753
+ Conference on Acoustics, Speech, and Signal Processing, vol. 8. IEEE,
1754
+ 1983, pp. 336–339.
1755
+ [7] O. Besson, Y. Abramovich, and B. Johnson, “Direction-of-Arrival es-
1756
+ timation in a mixture of K-distributed and Gaussian noise,” Signal
1757
+ Processing, vol. 128, pp. 512–520, 2016.
1758
+ [8] U. K. Singh, R. Mitra, V. Bhatia, and A. K. Mishra, “Kernel minimum
1759
+ error entropy based estimator for mimo radar in non-Gaussian clutter,”
1760
+ IEEE Access, vol. 9, pp. 125 320–125 330, 2021.
1761
+ [9] E. Ollila and V. Koivunen, “Influence function and asymptotic efficiency
1762
+ of scatter matrix based array processors: Case MVDR beamformer,”
1763
+ IEEE Transactions on Signal Processing, vol. 57, no. 1, pp. 247–259,
1764
+ 2008.
1765
+ [10] S. Fortunati, F. Gini, M. S. Greco, A. M. Zoubir, and M. Rangaswamy,
1766
+ “Semiparametric CRB and Slepian-Bangs formulas for complex ellipti-
1767
+ cally symmetric distributions,” IEEE Transactions on Signal Processing,
1768
+ vol. 67, no. 20, pp. 5352–5364, 2019.
1769
+ [11] S. Luan, M. Zhao, Y. Gao, Z. Zhang, and T. Qiu, “Generalized
1770
+ covariance for non-Gaussian signal processing and GC-MUSIC under
1771
+ Alpha-stable distributed noise,” Digital Signal Processing, vol. 110, p.
1772
+ 102923, 2021.
1773
+ [12] K. Todros and A. O. Hero, “Robust multiple signal classification
1774
+ via probability measure transformation,” IEEE Transactions on Signal
1775
+ Processing, vol. 63, no. 5, pp. 1156–1170, 2015.
1776
+ [13] N. Yazdi and K. Todros, “Measure-transformed MVDR beamforming,”
1777
+ IEEE Signal Processing Letters, vol. 27, pp. 1959–1963, 2020.
1778
+ [14] X. Zhang, M. N. El Korso, and M. Pesavento, “Maximum likelihood and
1779
+ maximum a posteriori Direction-of-Arrival estimation in the presence
1780
+ of SIRP noise,” in 2016 IEEE International Conference on Acoustics,
1781
+ Speech and Signal Processing (ICASSP).
1782
+ IEEE, 2016, pp. 3081–3085.
1783
+ [15] ——, “MIMO radar target localization and performance evaluation
1784
+ under SIRP clutter,” Signal Processing, vol. 130, pp. 217–232, 2017.
1785
+ [16] B. Meriaux, X. Zhang, M. N. El Korso, and M. Pesavento, “Iterative
1786
+ marginal maximum likelihood DOD and DOA estimation for MIMO
1787
+ radar in the presence of SIRP clutter,” Signal Processing, vol. 155, pp.
1788
+ 384–390, 2019.
1789
+ [17] M. Trinh-Hoang, M. N. El Korso, and M. Pesavento, “A partially-
1790
+ relaxed robust DOA estimator under non-Gaussian low-rank interference
1791
+ and noise,” in ICASSP 2021-2021 IEEE International Conference on
1792
+ Acoustics, Speech and Signal Processing (ICASSP).
1793
+ IEEE, 2021, pp.
1794
+ 4365–4369.
1795
+ [18] J. Dai and H. C. So, “Sparse Bayesian learning approach for outlier-
1796
+ resistant direction-of-arrival estimation,” IEEE Transactions on Signal
1797
+ Processing, vol. 66, no. 3, pp. 744–756, 2017.
1798
+ [19] M. Wax and T. Kailath, “Detection of signals by information theoretic
1799
+ criteria,” IEEE Transactions on Acoustics, Speech, and Signal Process-
1800
+ ing, vol. 33, no. 2, pp. 387–392, 1985.
1801
+ [20] J. Fuchs, M. Gardill, M. L¨ubke, A. Dubey, and F. Lurz, “A machine
1802
+ learning perspective on automotive radar direction of arrival estimation,”
1803
+ IEEE Access, 2022.
1804
+ [21] J. Fuchs, R. Weigel, and M. Gardill, “Single-snapshot direction-of-arrival
1805
+ estimation of multiple targets using a multi-layer perceptron,” in 2019
1806
+ IEEE MTT-S International Conference on Microwaves for Intelligent
1807
+ Mobility (ICMIM).
1808
+ IEEE, 2019, pp. 1–4.
1809
+ [22] A. H. El Zooghby, C. G. Christodoulou, and M. Georgiopoulos,
1810
+ “Performance of radial-basis function networks for direction of arrival
1811
+ estimation with antenna arrays,” IEEE Transactions on Antennas and
1812
+ Propagation, vol. 45, no. 11, pp. 1611–1617, 1997.
1813
+ [23] B. Milovanovic, M. Agatonovic, Z. Stankovic, N. Doncov, and
1814
+ M. Sarevska, “Application of neural networks in spatial signal process-
1815
+ ing,” in 11th Symposium on Neural Network Applications in Electrical
1816
+ Engineering.
1817
+ IEEE, 2012, pp. 5–14.
1818
+ [24] G. Ofek, J. Tabrikian, and M. Aladjem, “A modular neural network for
1819
+ direction-of-arrival estimation of two sources,” Neurocomputing, vol. 74,
1820
+ no. 17, pp. 3092–3102, 2011.
1821
+ [25] A. Barthelme and W. Utschick, “A machine learning approach to DoA
1822
+ estimation and model order selection for antenna arrays with subarray
1823
+ sampling,” IEEE Transactions on Signal Processing, vol. 69, pp. 3075–
1824
+ 3087, 2021.
1825
+ [26] O. Bialer, N. Garnett, and T. Tirer, “Performance advantages of deep
1826
+ neural networks for angle of arrival estimation,” in ICASSP 2019-
1827
+ 2019 IEEE International Conference on Acoustics, Speech and Signal
1828
+ Processing (ICASSP).
1829
+ IEEE, 2019, pp. 3907–3911.
1830
+ [27] J. Cong, X. Wang, M. Huang, and L. Wan, “Robust DOA estimation
1831
+ method for MIMO radar via deep neural networks,” IEEE Sensors
1832
+ Journal, vol. 21, no. 6, pp. 7498–7507, 2020.
1833
+ [28] M. Gardill, J. Fuchs, C. Frank, and R. Weigel, “A multi-layer perceptron
1834
+ applied to number of target indication for direction-of-arrival estimation
1835
+ in automotive radar sensors,” in 2018 IEEE 28th International Workshop
1836
+ on Machine Learning for Signal Processing (MLSP).
1837
+ IEEE, 2018, pp.
1838
+ 1–6.
1839
+ [29] J. Fuchs, R. Weigel, and M. Gardill, “Model order estimation using a
1840
+ multi-layer perceptron for direction-of-arrival estimation in automotive
1841
+
1842
+ 0.8
1843
+ Bo, p1
1844
+ B,P1
1845
+ 0.6
1846
+ B1,P1
1847
+ Bo, Po
1848
+ B, po
1849
+ 0.4
1850
+ B1, po
1851
+ 0.2
1852
+ 5
1853
+ 10
1854
+ 15
1855
+ 20
1856
+ 25
1857
+ 30
1858
+ △c [Deg]13
1859
+ radar sensors,” in 2019 IEEE Topical Conference on Wireless Sensors
1860
+ and Sensor Networks (WiSNet).
1861
+ IEEE, 2019, pp. 1–3.
1862
+ [30] J. Rogers, J. E. Ball, and A. C. Gurbuz, “Estimating the number of
1863
+ sources via deep learning,” in 2019 IEEE Radar Conference (Radar-
1864
+ Conf).
1865
+ IEEE, 2019, pp. 1–5.
1866
+ [31] ——, “Robust estimation of the number of coherent radar signal sources
1867
+ using deep learning,” IET Radar, Sonar & Navigation, vol. 15, no. 5,
1868
+ pp. 431–440, 2021.
1869
+ [32] Z.-M. Liu, C. Zhang, and S. Y. Philip, “Direction-of-arrival estimation
1870
+ based on deep neural networks with robustness to array imperfections,”
1871
+ IEEE Transactions on Antennas and Propagation, vol. 66, no. 12, pp.
1872
+ 7315–7327, 2018.
1873
+ [33] E. Ozanich, P. Gerstoft, and H. Niu, “A deep network for single-snapshot
1874
+ direction of arrival estimation,” in 2019 IEEE 29th International Work-
1875
+ shop on Machine Learning for Signal Processing (MLSP). IEEE, 2019,
1876
+ pp. 1–6.
1877
+ [34] M. Gall, M. Gardill, T. Horn, and J. Fuchs, “Spectrum-based single-
1878
+ snapshot super-resolution direction-of-arrival estimation using deep
1879
+ learning,” in 2020 German Microwave Conference (GeMiC).
1880
+ IEEE,
1881
+ 2020, pp. 184–187.
1882
+ [35] M. Gall, M. Gardill, J. Fuchs, and T. Horn, “Learning representa-
1883
+ tions for neural networks applied to spectrum-based direction-of-arrival
1884
+ estimation for automotive radar,” in 2020 IEEE/MTT-S International
1885
+ Microwave Symposium (IMS).
1886
+ IEEE, 2020, pp. 1031–1034.
1887
+ [36] A. M. Ahmed, O. Eissa, and A. Sezgin, “Deep autoencoders for DOA
1888
+ estimation of coherent sources using imperfect antenna array,” in 2020
1889
+ Third International Workshop on Mobile Terahertz Systems (IWMTS).
1890
+ IEEE, 2020, pp. 1–5.
1891
+ [37] G. K. Papageorgiou and M. Sellathurai, “Direction-of-arrival estimation
1892
+ in the low-SNR regime via a denoising autoencoder,” in 2020 IEEE
1893
+ 21st International Workshop on Signal Processing Advances in Wireless
1894
+ Communications (SPAWC).
1895
+ IEEE, 2020, pp. 1–5.
1896
+ [38] G. K. Papageorgiou, M. Sellathurai, and Y. C. Eldar, “Deep networks
1897
+ for direction-of-arrival estimation in low SNR,” IEEE Transactions on
1898
+ Signal Processing, vol. 69, pp. 3714–3729, 2021.
1899
+ [39] E. Ozanich, P. Gerstoft, and H. Niu, “A feedforward neural network for
1900
+ direction-of-arrival estimation,” The journal of the acoustical society of
1901
+ America, vol. 147, no. 3, pp. 2035–2048, 2020.
1902
+ [40] Y. Yao, H. Lei, and W. He, “A-CRNN-based method for coherent DOA
1903
+ estimation with unknown source number,” Sensors, vol. 20, no. 8, p.
1904
+ 2296, 2020.
1905
+ [41] A. Barthelme and W. Utschick, “DoA estimation using neural network-
1906
+ based covariance matrix reconstruction,” IEEE Signal Processing Let-
1907
+ ters, vol. 28, pp. 783–787, 2021.
1908
+ [42] A. M. Ahmed, A. A. Ahmad, S. Fortunati, A. Sezgin, M. S. Greco,
1909
+ and F. Gini, “A reinforcement learning based approach for multitarget
1910
+ detection in massive MIMO radar,” IEEE Transactions on Aerospace
1911
+ and Electronic Systems, vol. 57, no. 5, pp. 2622–2636, 2021.
1912
+ [43] D. Luo, Z. Ye, B. Si, and J. Zhu, “Deep MIMO radar target detector in
1913
+ Gaussian clutter,” IET Radar, Sonar & Navigation, 2022.
1914
+ [44] W. Guo, T. Qiu, H. Tang, and W. Zhang, “Performance of RBF neural
1915
+ networks for array processing in impulsive noise environment,” Digital
1916
+ Signal Processing, vol. 18, no. 2, pp. 168–178, 2008.
1917
+ [45] D. Chen and Y. H. Joo, “A novel approach to 3D-DOA estimation of
1918
+ stationary EM signals using convolutional neural networks,” Sensors,
1919
+ vol. 20, no. 10, p. 2761, 2020.
1920
+ [46] ——, “Multisource DOA estimation in impulsive noise environments
1921
+ using convolutional neural networks,” International Journal of Antennas
1922
+ and Propagation, vol. 2022, 2022.
1923
+ [47] P. Stoica and A. Nehorai, “Performance study of conditional and
1924
+ unconditional direction-of-arrival estimation,” IEEE Transactions on
1925
+ Acoustics, Speech, and Signal Processing, vol. 38, no. 10, pp. 1783–
1926
+ 1795, 1990.
1927
+ [48] M. Viberg, P. Stoica, and B. Ottersten, “Maximum likelihood array pro-
1928
+ cessing in spatially correlated noise fields using parameterized signals,”
1929
+ IEEE Transactions on Signal Processing, vol. 45, no. 4, pp. 996–1004,
1930
+ 1997.
1931
+ [49] S. Feintuch, H. Permuter, I. Bilik, and J. Tabrikian, “Neural network-
1932
+ based multi-target detection within correlated heavy-tailed clutter,” Sub-
1933
+ mitted to IEEE Transactions on Aerospace and Electronic Systems, 2022.
1934
+ [50] I. Goodfellow, Y. Bengio, and A. Courville, Deep Learning. MIT Press,
1935
+ 2016, http://www.deeplearningbook.org.
1936
+ [51] S. Shalev-Shwartz and S. Ben-David, Understanding Machine Learning:
1937
+ From theory to algorithms.
1938
+ Cambridge university press, 2014.
1939
+ [52] D. P. Kingma and J. Ba, “Adam: A method for stochastic optimization,”
1940
+ arXiv preprint arXiv:1412.6980, 2014. [Online]. Available: https:
1941
+ //doi.org/10.48550/arXiv.1412.6980
1942
+ [53] K. Harmanci, J. Tabrikian, and J. L. Krolik, “Relationships between
1943
+ adaptive minimum variance beamforming and optimal source localiza-
1944
+ tion,” IEEE Transactions on Signal Processing, vol. 48, no. 1, pp. 1–12,
1945
+ 2000.
1946
+
79E1T4oBgHgl3EQfBwKM/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
7tE1T4oBgHgl3EQfTwM_/content/2301.03081v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da21566038279c42bc5fc2fc8e971fb2cfc236e570f67072d17d0a2823356813
3
+ size 1319299
7tE1T4oBgHgl3EQfTwM_/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5fd418cf89e2447be5f7c2bc821ed25b12c9f4ecba15aaf22baea471d79339af
3
+ size 3538989
7tE1T4oBgHgl3EQfTwM_/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7037249fb25803f93575f4db98dac81dac606cf1ec4debe20836856da82412f8
3
+ size 133538
8NE2T4oBgHgl3EQfPgby/content/tmp_files/2301.03761v1.pdf.txt ADDED
@@ -0,0 +1,1718 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 1
2
+ Tensor Denoising via Amplification and Stable
3
+ Rank Methods
4
+ Jonathan Gryak1, Kayvan Najarian2,3,4,5,6, and Harm Derksen7
5
+ Abstract—Tensors in the form of multilinear arrays are ubiq-
6
+ uitous in data science applications. Captured real-world data,
7
+ including video, hyperspectral images, and discretized physical
8
+ systems, naturally occur as tensors and often come with attendant
9
+ noise. Under the additive noise model and with the assumption
10
+ that the underlying clean tensor has low rank, many denoising
11
+ methods have been created that utilize tensor decomposition
12
+ to effect denoising through low rank tensor approximation.
13
+ However, all such decomposition methods require estimating the
14
+ tensor rank, or related measures such as the tensor spectral and
15
+ nuclear norms, all of which are NP-hard problems.
16
+ In this work we adapt the previously developed framework of
17
+ tensor amplification, which provides good approximations of the
18
+ spectral and nuclear tensor norms, to denoising synthetic tensors
19
+ of various sizes, ranks, and noise levels, along with real-world
20
+ tensors derived from physiological signals. We also introduce de-
21
+ noising methods based on two variations of rank estimates called
22
+ stable X-rank and stable slice rank. The experimental results
23
+ show that in the low rank context, tensor-based amplification
24
+ provides comparable denoising performance in high signal-to-
25
+ noise ratio (SNR) settings and superior performance in noisy
26
+ (i.e., low SNR) settings, while the stable X-rank method achieves
27
+ superior denoising performance on the physiological signal data.
28
+ Index Terms—Tensors, Denoising, Tensor Amplification, Stable
29
+ Rank Methods
30
+ I. INTRODUCTION
31
+ T
32
+ ENSORS in the form of multilinear arrays are ubiquitous
33
+ in data science applications. Captured real-world data,
34
+ including color and hyperspectral images (HSIs), video, and
35
+ discretized physical systems, naturally occur as tensors and
36
+ often come with attendant noise. As is common in other
37
+ signal processing applications, the captured tensor T
38
+
39
+ Rp1×p2×···×pd is modeled as T = D + N, where D is a pure
40
+ or “clean” tensor D that has been corrupted by additive noise
41
+ D, which is typically assumed to be Gaussian. Additionally,
42
+ the clean tensor D is assumed to be low rank.
43
+ Under this framework, tensor denoising can be achieved by
44
+ utilizing tensor decompositions methods, such as the canonical
45
+ 1Department of Computer Science, Queens College, City University of New
46
+ York, New York, NY, USA
47
+ 2Department of Computational Medicine and Bioinformatics, University of
48
+ Michigan, Ann Arbor, MI, USA
49
+ 3Department of Emergency Medicine, University of Michigan, Ann Arbor,
50
+ MI, USA
51
+ 4Electrical and Computer Engineering, College of Engineering, University
52
+ of Michigan, Ann Arbor, MI, USA
53
+ 5Michigan Institute for Data Science, University of Michigan, Ann Arbor,
54
+ MI, USA
55
+ 6Max Harry Weil Institute for Critical Care Research and Innovation,
56
+ University of Michigan, Ann Arbor, MI, USA
57
+ 7Department of Mathematics, Northeastern University, Boston, MA, USA
58
+ polyadic (CP) [1], [2] and Tucker [3], [4] decompositions, to
59
+ determine a low-rank approximation of the observed tensor.
60
+ These decomposition algorithms require a pre-specified rank
61
+ to compute an approximation, however, determining the rank
62
+ of a tensor is NP-hard [5]. Thus, tensor decomposition-based
63
+ methods utilize some estimate of the tensor rank to effect
64
+ tensor denoising.
65
+ CP decomposition has been frequently used for HSI de-
66
+ noising, such as in Liu et al., [6], which estimated the tensor
67
+ rank using covariance matrices of the n-model flattenings; in
68
+ Veganzones et al. [7], which used a non-negative variant of
69
+ CP decomposition; and in [8], in which a CP decomposition
70
+ regularized by the nuclear norm of clustered 3D patches of
71
+ the HSI was employed. Tucker decomposition based denoising
72
+ include two works by Rajwade et al. that utilized higher order
73
+ singular value decomposition [9], a tensor analog of matrix
74
+ SVD, to denoise video [10] and images [11]; as well Lee
75
+ et al. [12], which focused on denoising tensors with ordinal
76
+ values. More recently, a tensor train (matrix product state)
77
+ decomposition [13] was used for denoising of HSIs [14].
78
+ A general framework for understanding tensor denoising in
79
+ the additive model was developed in [15], that relates the
80
+ problem of denoising in the low rank context to the mini-
81
+ mization of dual norms ∥D∥X and ∥N∥Y , such as the nuclear
82
+ ∥·∥⋆ and spectral ∥·∥σ norms, respectively. The calculation
83
+ of these norms for tensors is also NP-hard [16],[5], thus in
84
+ order make use of the denoising framework in [15] the co-
85
+ authors developed the method of tensor amplification [17],
86
+ which provides good approximations of the tensor spectral
87
+ norm and its dual the nuclear norm.
88
+ In this work, we utilize the general framework of [15] and
89
+ the approximations of the spectral norm [17] to devise three
90
+ novel tensor denoising methods - amplification-based, stable
91
+ slice rank, and stable X-rank denoising, the latter two methods
92
+ based on their eponymous rank estimates. The performance of
93
+ these methods is compared to several standard decomposition-
94
+ based denoising methods on synthetic tensors of various sizes,
95
+ ranks, and noise levels, along with real-world tensors derived
96
+ from physiological signals. The experimental results show that
97
+ in the low rank context, tensor-based amplification provides
98
+ comparable denoising performance in high signal-to-noise
99
+ ratio (SNR) settings and superior performance in noisy (i.e.,
100
+ low SNR) settings, while the stable X-rank method achieves
101
+ superior denoising performance on the physiological signal
102
+ data.
103
+ arXiv:2301.03761v1 [cs.LG] 10 Jan 2023
104
+
105
+ 2
106
+ II. PRELIMINARIES AND RELATED WORK
107
+ A. Basic Notation
108
+ Let T ∈ Rp1×p2×···×pd denote a real-valued tensor of order
109
+ d. In the denoising experiments that are performed in this study
110
+ we will assume that the tensor T is the noisy version of a
111
+ pure tensor D ∈ Rp1×p2×···×pd corrupted by additive noise
112
+ N ∈ Rp1×p2×···×pd, that is,
113
+ T = D + N.
114
+ (1)
115
+ The Frobenius norm of T is denoted ∥T ∥ and defined as
116
+ ∥T ∥ =
117
+
118
+
119
+
120
+
121
+ p1
122
+
123
+ i1
124
+ p2
125
+
126
+ i2
127
+ · · ·
128
+ pd
129
+
130
+ id
131
+ t2
132
+ i1i2···id,
133
+ (2)
134
+ while the tensor inner product of two tensors T , S of matching
135
+ order and dimension is defined as
136
+ ⟨T , S⟩ =
137
+ p1
138
+
139
+ i1
140
+ p2
141
+
142
+ i2
143
+ · · ·
144
+ pd
145
+
146
+ id
147
+ ti1i2···idsi1i2···id.
148
+ (3)
149
+ The induced norm of the tensor inner product is the Frobenius
150
+ norm defined above, with the typical relation ⟨T , T ⟩ = ∥T ∥2.
151
+ Given a tensor T and a permutation q = ⟨q1, . . . , qd⟩ of
152
+ the indices 1 : d, the q-transpose of T is the tensor T ⟨q⟩ ∈
153
+ Rpq1×pq2×···×pqd with entries
154
+ (T ⟨q⟩)i1i2...id = tiq1iq2...iqd .
155
+ (4)
156
+ At times we will need to matricize the tensors under con-
157
+ sideration by rearranging their entries in specific ways, as well
158
+ as employ various tensor-tensor, tensor-matrix, and matrix-
159
+ matrix products. In the definitions below and throughout the
160
+ manuscript we will primarily follow the notational conventions
161
+ introduced by Kolda and Bader in [18].
162
+ The mode-n flattening or unfolding of the tensor T is the
163
+ matrix T(n) ∈ Rpn×N/pn, where N = �
164
+ i pi, whose columns
165
+ are the mode-n fibers of T .
166
+ The n-mode product of a tensor T and a matrix A ∈ RJ×pn
167
+ is the tensor T ×n A of size p1 ×p2 ×· · ·×pn−1 ×J ×pn+1 ×
168
+ · · · × pd with entries
169
+ (T ×n A)i1i2...in−1jin+1...id =
170
+ pn
171
+
172
+ in=1
173
+ ti1i2...idujin.
174
+ (5)
175
+ If S = T ×n A, then the n-mode product as defined above is
176
+ equivalent to S(n) = AT(n).
177
+ The Kronecker product of two matrices A ∈ RI×J and
178
+ B ∈ RK×L is the matrix A ⊗ B ∈ RIK×JL defined by
179
+ A ⊗ B =
180
+
181
+ ����
182
+ a11B
183
+ a12B
184
+ · · ·
185
+ a1JB
186
+ a21B
187
+ a22B
188
+ · · ·
189
+ a2JB
190
+ ...
191
+ ...
192
+ ...
193
+ ...
194
+ aI1B
195
+ aI2B
196
+ · · ·
197
+ aIJB
198
+
199
+ ���� .
200
+ (6)
201
+ Finally, given two tensors T
202
+ ∈ Rp1×···×pd and S
203
+
204
+ Rq1×···×qe, their outer product is the tensor T ◦ S of size
205
+ p1 × · · · × pd × q1 × · · · × qe with entries
206
+ (T ◦ S)i1i2...idj1j2...je = ti1ti2 . . . tidsj1sj2 . . . sje.
207
+ (7)
208
+ B. Decomposition-based Denoising
209
+ Tensor decomposition methods seek to represent a given
210
+ tensor by decomposing it into factors such as simple tensors or
211
+ matrices and whose combination results in a “good” approx-
212
+ imation of the original tensor. In the context of denoising,
213
+ it is typical to assume that a noisy signal is sparse, in the
214
+ sense that its ℓ1 norm is small. In the case of matrices and
215
+ tensors, this assumption corresponds to the original tensor
216
+ having low rank, with the high rank components corresponding
217
+ to additive noise. Thus, computing a low rank approximation
218
+ of the original tensor via tensor decomposition is a means to
219
+ effect tensor denoising.
220
+ In the case of matrices (order two tensors), singular value
221
+ decomposition yields the exact rank r of the matrix and its
222
+ decomposition into r factor, with the best low rank approxi-
223
+ mation for a given rank l < r provided by choosing the factors
224
+ corresponding to the l largest singular values. For higher order
225
+ tensors, calculating the exact rank is NP hard [5]. Moreover,
226
+ unlike the matrix case, the factors used to create the best rank
227
+ r − 1 approximation need not be those used to produce the
228
+ best rank r approximation [19], and for degenerate tensors,
229
+ the best rank r approximation may not even exist [20].
230
+ Despite these theoretical limitations, in practice one can
231
+ utilize tensor decomposition methods to effect denoising by
232
+ creating decompositions for a range of rank values, then choos-
233
+ ing the best rank r decomposition D that best approximates
234
+ the original tensor T , e.g., min ∥T − D∥. This strategy for
235
+ tensor denoising was evaluated using three common tensor
236
+ decomposition methods: canonical polyadic decomposition,
237
+ higher-order orthogonal iteration, and multiway Wiener filters.
238
+ 1) CP Decomposition via Alternating Least Squares (CP-
239
+ ALS): Let U (j) = [uj,1uj,2 . . . uj,r] ∈ Rpj×r, 1 ≤ j ≤ d. CP
240
+ decomposition factorizes a d-way tensor into d factor matrices
241
+ and a vector Λ = [λ1, λ2, . . . , λr] ∈ Rr:
242
+ S =
243
+ r
244
+
245
+ i=1
246
+ λiu1,i ◦ u2,i ◦ . . . ◦ ud,i.
247
+ (8)
248
+ The best rank r approximation problem for a tensor T
249
+
250
+ Rp1×p2×...×pd can be given as:
251
+ min
252
+ Λ,U (1),...,U (d) ∥T − S∥ where S = [Λ ; U (1), U (2), . . . , U (d)].
253
+ (9)
254
+ This can be found by employing alternating least squares
255
+ (ALS), wherein each iteration of the algorithm an approxima-
256
+ tion of the flattening for one mode is found by fixing all other
257
+ modes of the tensors and solving a least squares problem.
258
+ This process is repeated, cycling through all modes, until
259
+ convergence or a maximum number of iterations is reached. In
260
+ this work, the implementation of CP-ALS from TensorToolbox
261
+ [21] was utilized with the default level of tolerance (10−4)
262
+ and maximum number of iterations (50). CP-ALS was run
263
+ for specified rank values r ∈ [1, min(pi)], with the rank r∗
264
+ approximation
265
+ Dr∗ = min
266
+ r
267
+ ∥T − Dr∥
268
+ (10)
269
+ chosen as the best denoised tensor.
270
+
271
+ 3
272
+ 2) Higher-Order Orthogonal Iteration (HOOI): For matri-
273
+ ces, orthogonal iteration produces a sequence orthonormal
274
+ bases for each subspace of the vector space. De Lathauwer
275
+ et al. [22] extended this to tensors, developing the technique
276
+ known as higher-order orthogonal iteration (HOOI). This
277
+ method uses ALS to estimate the best rank-[r1, . . . , rd] ap-
278
+ proximation for a tensor, and is achieved by iteratively solving
279
+ the optimization problem
280
+ argminU(i)|ri ∥T − G ×1 U(1)|r1 ×2 U(2)|r2 × . . . ×N U(d)|rd∥,
281
+ (11)
282
+ where G is a core tensor of size r1 × . . . × rd and each U(i)|ri
283
+ is a matrix comprised of the ri leftmost singular vectors of
284
+ the singular value decomposition of the modal flattening U(i).
285
+ HOOI-based
286
+ denoising
287
+ was
288
+ implemented
289
+ using
290
+ the
291
+ tucker_als method in TensorToolbox [21] to determine
292
+ the best rank [r∗
293
+ 1, . . . , r∗
294
+ d] approximation, where each ri was
295
+ chosen equally and uniformly from r ∈ [1, min(pi)], with the
296
+ rank r∗ approximation (Eq. 10) chosen as the best denoised
297
+ tensor.
298
+ 3) Multiway Wiener Filter: For a discrete signal y[n] and
299
+ filter output ˆy[n], the Wiener filter h[n] is the filter that
300
+ minimizes the mean squared error between ˆy[n] and y[n]:
301
+ argminh[·]E[(ˆy[n] − y[n])2)].
302
+ (12)
303
+ Wiener filters have been used in a variety of denoising appli-
304
+ cations, such as for images [23], [24], physiological signals
305
+ [25], [26] and speech [27], [28].
306
+ Muti et al. [29] created a multiway Wiener filter that can be
307
+ used to denoise tensors of arbitrary size. Given a noisy tensor
308
+ T , their method uses an ALS approach to learn Wiener filters
309
+ {Hn} for each mode n so that the mean squared error between
310
+ T and the denoised tensor D is minimized, where
311
+ D = T ×1 H1 ×2 H2 ×3 · · · ×d Hd.
312
+ (13)
313
+ The implementation of the multiway Wiener filter utilized in
314
+ this study and the exposition below follows [30]. The filters
315
+ Hn in each mode n are initialized to the identity matrix
316
+ of Rpn. At each stage k of the algorithm, the filter Hk
317
+ n is
318
+ computed for each mode as
319
+ Hk
320
+ n = VnΛnV ⊺
321
+ n ,
322
+ (14)
323
+ where Vn is a matrix containing the Kn orthonormal basis
324
+ vectors of the signal subspace in the column space of T(n),
325
+ the mode-n flattening of T , and
326
+ Λn = diag
327
+
328
+ λγ
329
+ 1 − ˆσγ2
330
+ n
331
+ λΓ
332
+ 1
333
+ , . . . , λγ
334
+ Kn − ˆσγ2
335
+ n
336
+ λΓ
337
+ Kn
338
+
339
+ ,
340
+ (15)
341
+ where {λγ
342
+ i , i = 1, . . . , Kn} and {λΓ
343
+ i , i = 1, . . . , Kn} are
344
+ respectively the Kn largest eigenvalues of the matrices γn and
345
+ Γn, defined as
346
+ γn =
347
+ E
348
+
349
+ T(n)qnT(n)
350
+ ⊺�
351
+ (16)
352
+ Γn =
353
+ E
354
+
355
+ T(n)QnT(n)
356
+ ⊺�
357
+ (17)
358
+ with
359
+ qn
360
+ =
361
+ d
362
+
363
+ i̸=n
364
+ Hi
365
+ (18)
366
+ Qn
367
+ =
368
+ d
369
+
370
+ i̸=n
371
+ H⊺
372
+ i Hi.
373
+ (19)
374
+ The values ˆσγ2
375
+ n in Equation 15 are estimates of the pn − Kn
376
+ smallest eigenvalues of γn, calculated as
377
+ ˆσγ2
378
+ n =
379
+ 1
380
+ pn − Kn
381
+ pn
382
+
383
+ i=Kn+1
384
+ λγ
385
+ i .
386
+ (20)
387
+ Following [31], the optimal Kn for mode n is estimated using
388
+ the Akaike Information Criterion (AIC). Please refer to [30]
389
+ for further details.
390
+ III. AMPLIFICATION AND STABLE RANK DENOISING
391
+ In this section we introduce three different denoising meth-
392
+ ods – Amplification-based, Stable Slice Rank, and Stable X-
393
+ Rank denoising – that leverage the general framework for
394
+ denoising based on dual norms as introduced in Derksen [15],
395
+ to effect denoising on tensors.
396
+ A. A Framework for Denoising Using Dual Norms
397
+ The model T = D +N utilized in this work can be viewed
398
+ as an instance of the additive noise model c = a + b, where
399
+ a, b, c ∈ V are elements of a vector space V . In Derksen
400
+ [15], a general framework for understanding the denoising of
401
+ vectors under the additive model was developed that relates the
402
+ problem of denoising the vector c to the minimization of ∥a∥X
403
+ and ∥b∥Y , where ∥ · ∥X and ∥ · ∥Y are dual norms. Moreover,
404
+ the framework makes the assumptions that the original vector
405
+ (or tensor) a is sparse, e.g., that it has few non-zero values or
406
+ is of low rank, while the additive noise b is dense or of high
407
+ rank. Thus, the norms ∥ · ∥X and ∥ · ∥Y can be interpreted as
408
+ respectively measuring the sparsity and noise of the vector (or
409
+ tensor) under consideration. The prototypical ∥ · ∥X norm is
410
+ the nuclear norm, which for a matrix is the sum of its singular
411
+ values, while for a tensor the tensor nuclear norm ∥T ∥⋆, is
412
+ defined as
413
+ ∥T ∥⋆ = min
414
+ r
415
+
416
+ i=1
417
+ ∥vi∥2,
418
+ where v1, . . . , vr are rank-1 tensors and T = �r
419
+ i=1 vi.
420
+ The prototypical ∥·∥Y norm and dual to ∥·∥X is the spectral
421
+ norm, which for a matrix is the absolute value of its largest
422
+ singular value, while for a tensor the tensor spectral norm
423
+ ∥T ∥σ is defined as
424
+ ∥T ∥σ = sup |T · u1 ⊗ u2 ⊗ . . . ⊗ ud|,
425
+ where uj ∈ Rpj and ∥uj∥ = 1 for 1 ≤ j ≤ d.
426
+ If V is also an inner product space we also have the induced
427
+ norm
428
+
429
+ ⟨c, c⟩ that corresponds to the standard Euclidean norm
430
+ ∥c∥2 for vectors or the Frobenius norm ∥ · ∥, introduced in
431
+ Section II-A, for matrices and tensors. As shown in [15],
432
+ the denoising of a vector c via a decomposition c = a + b
433
+
434
+ 4
435
+ that simultaneously minimizes the values ∥a∥X and ∥b∥Y is
436
+ governed by the Pareto frontier, which models the competing
437
+ objectives of minimizing the two norms in terms of Pareto
438
+ efficiency, and the above XY -decomposition that achieves this
439
+ is deemed Pareto efficient. Moreover, [15] defines the related
440
+ notion of the Pareto subfrontier, which relates the three norms
441
+ ∥ · ∥X, ∥ · ∥Y , ∥ · ∥2 and their induced decompositions XY ,
442
+ X2, and 2Y , describing the conditions under which these
443
+ decompositions can achieve Pareto efficiency.
444
+ B. Amplification-based Denoising
445
+ To make use of the denoising framework introduced in
446
+ [15] requires the calculations of various norms for the vec-
447
+ tors of interest. While the Frobenius norm of a tensor is
448
+ easily obtained, computing either the nuclear norm [16] or
449
+ the spectral norm [5] for tensors is NP-hard. In order to
450
+ obtain an approximation to the tensor spectral norm, the co-
451
+ authors developed the methodology of tensor amplification
452
+ [17]. For a matrix A with singular values λ1, . . . , λr, the
453
+ function φ : A → AA⊺A produces the matrix AA⊺A whose
454
+ singular values are λ3
455
+ 1, . . . , λ3
456
+ r. Repeated applications of φ(·)
457
+ will amplify the larger singular values, which correspond to
458
+ the sparse or low rank components of the matrix, while
459
+ minimizing smaller singular values that likely correspond to
460
+ noise.
461
+ Analogously, tensor amplification utilizes degree d polyno-
462
+ mial functions on tensors to amplify the low rank structure.
463
+ Moreover, for each amplification map Φσ′ there exists a cor-
464
+ responding norm ∥·∥σ′,d that approximates the tensor spectral
465
+ norm, in the sense that limd→∞ ∥T ∥σ′,d = ∥T ∥σ. Two such
466
+ amplification maps – Φσ,4 and Φ# – were introduced for
467
+ order 3 tensors in [17], with Φ# being show to be a better
468
+ approximation to the tensor spectral norm than Φσ,4.
469
+ The method presented in Algorithm 1 utilizes the 2Y -
470
+ decomposition framework of [15] and the tensor spectral norm
471
+ approximations Φ to denoise a given tensor T . The algorithm
472
+ allows for the choice of amplification map as well as the
473
+ number of amplifications per round. For third order tensors
474
+ the amplification map Φ# was used, while for fourth order
475
+ tensors a compatible version of Φσ,4 was employed as there
476
+ is no currently known analogue of the Φ# map for fourth
477
+ order tensors. Multiple experiments were performed with m,
478
+ the number of amplifications per round, ranging from 1 to
479
+ 10, with m = 5 being found to produce the best denoising
480
+ performance.
481
+ C. Stable Slice Rank Denoising
482
+ Slice rank was introduced in [32] in relation to the cap set
483
+ problem. Following Tao [33], the slice rank of a tensor T
484
+ is the least non-negative integer srk such that T is a sum
485
+ of tensors with slice rank 1, i.e., T = �r
486
+ i=1 Ti, where Ti is
487
+ contained in the tensor product space
488
+ V1 ◦ · · · Vi−1 ◦ s ◦ Vi−1 ◦ · · · Vd,
489
+ (21)
490
+ where Vj are vector spaces and s is a vector in some Vi. In
491
+ [34], the notion of a stable rank for matrices was introduced,
492
+ in which the matrix rank function rank(A), is replaced by the
493
+ Algorithm 1 Amplification-based tensor denoising.
494
+ D ← DENOISE AMPLIFICATION(T , Φ, m)
495
+ ϵ ← ∥T ∥
496
+ N ← T
497
+ while true do
498
+ A ← Φm(N)
499
+ A ←
500
+ A
501
+ ∥A∥
502
+ N ← N − ⟨A, N⟩A
503
+ Break if ∥N∥ < ϵ
504
+ end while
505
+ D ← T − N
506
+ numerical rank function,
507
+ ∥A∥2
508
+ ∥A∥2σ , or the related stable nuclear
509
+ rank
510
+ ∥A∥2
511
+
512
+ ∥A∥2 . These ranks are stable in the sense that small
513
+ perturbations of the values of the matrix A will not change
514
+ their value. Extending this methodology to tensors, the stable
515
+ slice rank is defined as
516
+ �d
517
+ i=1 ∥T(i)∥2
518
+
519
+ ∥T ∥2
520
+ ,
521
+ (22)
522
+ where T(i) are the mode-i flattenings of T .
523
+ For a given value of the parameter λ, the stable slice rank
524
+ (SliceRank) method denoises a tensor by finding a decompo-
525
+ sition T = D + N that minimizes the Frobenius norm of
526
+ D = �
527
+ i Si under the constraints that the nuclear norms of
528
+ the flattenings of N are all ≤ λ. The method also produces
529
+ a decomposition D = �
530
+ i Si that minimizes the sum of the
531
+ nuclear norms of S(i), the mode-i flattenings of Si. Typically,
532
+ the S(i) have low rank.
533
+ Algorithm 2 Stable SliceRank denoising.
534
+ (D, {Si}, ssrk) ← DENOISE SLICERANK(T , λ, acc)
535
+ Si ← 0 ∈ Rp1×p2×···×pd
536
+ curr acc ← 0
537
+ while curr acc < acc do
538
+ for i ← 1 : d do
539
+ A ← T − �
540
+ j̸=i Sj
541
+ q ← CIRCSHIFT([1, . . . , d], −(i − 1))
542
+ A ← A⟨q⟩
543
+ (U, D, V ) ← SVD(A(i))
544
+ Ei ← MAX(D − λ, 0)
545
+ ei ← DIAG(Ei)
546
+ F ← U · Ei · V T
547
+ F ← RESHAPE(F, pi, . . . , pd, p1, . . . , pi−1)
548
+ q ← CIRCSHIFT([1, . . . , d], (i − 1))
549
+ Si ← F⟨q⟩
550
+ end for
551
+ curr acc ←
552
+ ⟨T − �d
553
+ j=1 Sj, �d
554
+ j=1 Sj⟩
555
+ λ �d
556
+ j=1 ∥A(j)∥⋆
557
+ end while
558
+ D ← �d
559
+ i=1 Si
560
+ ssrk ←
561
+ �d
562
+ i=1 ∥A(i)∥2
563
+
564
+ ∥D∥2
565
+ Algorithm 2 depicts the implementation of SliceRank de-
566
+ noising, which utilizes a number of auxiliary functions from
567
+
568
+ 5
569
+ MATLAB [35]: circshift performs a cyclic permutation of
570
+ an index set [1, . . . , d], with the second parameter determining
571
+ the number of forward or backwards shifts; reshape is used
572
+ to flatten a tensor into a matrix with the specified dimensions;
573
+ and diag returns a vector comprising the entries on the
574
+ main diagonal of the specified matrix. The algorithm takes
575
+ as hyperparameters λ as described above and acc ∈ (0, 1],
576
+ the specified accuracy level that once achieved the algorithm
577
+ terminates. The algorithm returns the denoised tensor D, the
578
+ decomposition factors Si, and ssrk, the stable slice rank of
579
+ D. The hyperparameters were optimized via grid search over
580
+ the ranges λ ∈ {10−2, 0.1, 1, 10} and acc ∈ {0.90, 0.95}.
581
+ D. Stable X-Rank Denoising
582
+ As noted in Section II-B, a degenerate tensor T of rank r
583
+ may not have a best rank k < r approximation for a given
584
+ rank k. In such cases, a tensor may be approximated to any
585
+ desired precision by rank j < k tensors. This is due to the set
586
+ of all tensors for a given rank r not being Zariski closed [20].
587
+ In Derksen [36], the G-stable rank of a tensor was introduced
588
+ that, among its other advantages, is Zariski closed. Thus, every
589
+ tensor T has a best G-stable rkG < r approximation. The G-
590
+ stable α rank of a tensor can be defined as
591
+ rkG
592
+ α (T ) = sup
593
+ g∈G
594
+ min
595
+ i
596
+ αi∥g · T ∥2
597
+ ∥ (g · T )(i) ∥2σ
598
+ ,
599
+ (23)
600
+ where α = (α1, . . . , αd) and g is an element of a reductive
601
+ group G, i.e., g ∈ SL(Rp1) × · · · × SL(Rpd).
602
+ Using the above definition we can define the related concept
603
+ of stable X-rank, which is
604
+ sXrkG(T ) = max
605
+ α
606
+ rkG
607
+ α (T ),
608
+ (24)
609
+ where α is subject to the restriction that �
610
+ i αi = d. Algorithm
611
+ 4 depicts the implementation of the stable X-Rank (XRank)
612
+ denoising method. Like SliceRank, the method imposes a con-
613
+ straint on the nuclear norm of the flattenings of N. However,
614
+ in the XRank method, this cutoff is determined automatically
615
+ using Algorithm 3. The hyperparameters were optimized via
616
+ grid search over the ranges λ ∈ {10−2, 0.1, 1, 10} and acc ∈
617
+ {0.90, 0.95}.
618
+ IV. EXPERIMENTAL RESULTS AND DISCUSSION
619
+ In order to evaluate the various denoising methods under
620
+ consideration, two sets of synthetic tensors were generated
621
+ with varying orders, ranks, and dimensions, resulting in 512
622
+ parameter combinations. For each combination, one hundred
623
+ (100) tensors were generated. For all synthetic tensors, varying
624
+ amounts of noise were added from a standard Gaussian
625
+ distribution N(0, 1), with the resulting noisy tensors having
626
+ signal-to-noise ratios (SNR) ranging from 20 dB to −20 dB.
627
+ The full range of parameters is provided in Table I.
628
+ Additionally, two sets of tensors were extracted from elec-
629
+ trocardiogram (ECG) signals to which Gaussian noise was
630
+ added prior to tensor extraction, using the same range of
631
+ resultant SNRs as those employed in the generation of the
632
+ synthetic tensors.
633
+ Algorithm 3 Determine the nuclear norm cutoff for XRank
634
+ denoising.
635
+ c ← FIND CUTOFF(f = [λ1, . . . , λr]⊺, λ)
636
+ r ← |f|
637
+ t ← 0 ∈ Rr
638
+ for i ← 1 : r do
639
+ ti ← λ
640
+ �i
641
+ j=1 λj
642
+ 1 + λ · i
643
+ end for
644
+ S ← 0 ∈ Rr×r
645
+ for i ← 1 : r do
646
+ for j ← 1 : r do
647
+ sij ← MAX(fi − tj, 0)
648
+ end for
649
+ end for
650
+ v ← 0 ∈ Rr
651
+ for j ← 1 : r do
652
+ vj ← �r
653
+ i=1(fij − sij)2 + λ �r
654
+ i=1(sij)2
655
+ end for
656
+ k ← ARGMIN(v)
657
+ c ← tk
658
+ Algorithm 4 Stable XRank denoising.
659
+ (D, {Si}, sxrk) ← DENOISE XRANK(T , λ, acc)
660
+ Si ← 0 ∈ Rp1×p2×···×pd
661
+ curr acc ← 0
662
+ while curr acc < acc do
663
+ for i ← 1 : d do
664
+ A ← T − �
665
+ j̸=i Sj
666
+ q ← CIRCSHIFT([1, . . . , d], −(i − 1))
667
+ A ← A⟨q⟩
668
+ (U, D, V ) ← SVD(A(i))
669
+ c ← FIND CUTOFF(DIAG(D),λ)
670
+ Ei ← MAX(D − c, 0)
671
+ ei ← DIAG(Ei)
672
+ F ← U · Ei · V T
673
+ F ← RESHAPE(F, pi, . . . , pd, p1, . . . , pi−1)
674
+ q ← CIRCSHIFT([1, . . . , d], (i − 1))
675
+ Si ← F⟨q⟩
676
+ end for
677
+ Scurr ← �d
678
+ i=1 Si
679
+ T S = T − Scurr
680
+ y ← 0
681
+ for i ← 1 : d do
682
+ q ← CIRCSHIFT([1, . . . , d], −(i − 1))
683
+ B ← T S⟨q⟩
684
+ (U, D, V ) ← SVD(B(i))
685
+ y ← y + d2
686
+ 1
687
+ end for
688
+ y ← √y
689
+ curr acc ←
690
+ ⟨T , Scurr⟩
691
+ y ·
692
+ ��d
693
+ j=1 ∥A(j)∥2⋆
694
+ end while
695
+ D �� �d
696
+ i=1 Si
697
+ sxrk ←
698
+ �d
699
+ j=1 ∥A(j)∥2
700
+
701
+ ∥D∥2
702
+
703
+ 6
704
+ TABLE I: Parameters and their respective values used to
705
+ generate the synthetic tensor datasets.
706
+ Parameter
707
+ Range/Values
708
+ Distribution
709
+ Normal N(0, 1)
710
+ Order
711
+ 3, 4
712
+ Rank
713
+ [1, 5], 10, 20, 25
714
+ Size
715
+ 5, 10, 25, 50
716
+ SNR
717
+ 20, 10, 5, 1, −1, −5, −10, −20
718
+ 1) Uniform Synthetic Tensors: In this dataset, the dimen-
719
+ sions of a given tensor are chosen uniformly across each mode.
720
+ To generate synthetic tensors from a distribution D of a given
721
+ rank r, size s, and order d, scalar values λ1, . . . , λr are chosen
722
+ from D, then for each mode j, r random vectors xj,i ∈ Rs
723
+ are chosen from D. The synthetic tensor is then
724
+ r
725
+
726
+ i=1
727
+ λix1,i ◦ x2,i ◦ · · · ◦ xd,i.
728
+ 2) Non-Uniform Synthetic Tensors: In this dataset, one
729
+ mode mk of a given tensor is “stretched” to a different
730
+ dimension dk by choosing a number uniformly in the range
731
+ dk = [s, min(500, sd)], i.e., the lower bound is the dimension
732
+ of the other models while the upper bound is the product of
733
+ the dimensions of each mode or 500, whichever is lower. After
734
+ choosing the stretch mode and its dimension the tensors are
735
+ generated in the same manner as for the uniform tensors above.
736
+ 3) ECG Waveform Tensors: The PTB Diagnostic ECG
737
+ Database [37] is comprised of high resolution (1 kHz) digitized
738
+ recordings of electrocardiograms (ECGs) from patients with
739
+ various cardiovascular diseases, including myocardial infarc-
740
+ tion, heart failure, and arrhythmia, as well as healthy controls.
741
+ The database is publicly available via Physionet [38].
742
+ Tensor-based methods have been shown to be effective for
743
+ a number of ECG analytical tasks, a survey of such methods
744
+ can be found in [39]. Given the utility of tensor-based methods
745
+ in this context and that such that recordings of physiological
746
+ signals may be corrupted by noise yields a natural application
747
+ of the proposed denoising methods. In order to evaluate these
748
+ methods, we first must construct tensors from the ECGs. In
749
+ forming these tensors, one has to consider the amount of
750
+ signal over which to perform subsequent signal processing and
751
+ feature extraction: two methods were employed. In the first,
752
+ ninety (90) seconds of a patient’s ECG recording was sampled
753
+ across all twelve ECG leads, while in the second method three
754
+ windowed samples of thirty (30) seconds each were extracted.
755
+ Using these two sampling strategies we adapted the tensor
756
+ formation method introduced in [40] that has been shown
757
+ to be effective for subsequent applications of machine learn-
758
+ ing for prognosticating severe cardiovascular conditions [41],
759
+ [42]. In this method, each ECG signal is preprocessed using
760
+ the taut string method, which produces a piecewise linear
761
+ approximation of a given signal, parametrized by ϵ, which
762
+ controls the coarseness of the approximation. Given a discrete
763
+ signal x = (x1, . . . , xn) one can define the finite difference
764
+ D(x) = (x2 − x1, . . . , xn − xn−1). For a fixed ϵ > 0, the
765
+ taut string estimate of x is the unique function y such that
766
+ ∥x − y∥∞ ≤ ϵ and ∥D(y)∥2 is minimal. The taut string
767
+ approximation can be found efficiently using the method in
768
+ [43].
769
+ After the taut string approximation for a given signal is
770
+ found, six morphological and statistical features are extracted
771
+ following [40]. This process is repeated for five values of
772
+ epsilon: (0.0100, 0.1575, 0.3050, 0.4525, 0.6000). As each pa-
773
+ tient’s ECG recording is comprised of the standard 12 leads,
774
+ the approximation of each 90 second ECG sample via taut
775
+ string and the extraction of taut string features yields third
776
+ order tensors of size 5 × 6 × 12 for each patient. For the
777
+ windowed samples, fourth order tensors were formed of size
778
+ 5 × 6 × 12 × 3, with the fourth mode corresponding to the
779
+ features extracted in each window.
780
+ 4) Adding Noise: For every generated synthetic tensor, a
781
+ set of noisy tensors was created by adding Gaussian noise
782
+ (N(0, 1)) so that the resultant tensors had SNRs in the
783
+ range [20, 10, 5, 1, −1, −5, −10, −20]. For the ECG waveform
784
+ tensors, Gaussian noise was added to each ECG signal to
785
+ produce a set of noisy signals with the same SNR range as
786
+ for the synthetic tensors. However, this was performed prior
787
+ to tensor formation given that in practical applications ECG
788
+ signals themselves may come with some intrinsic amount of
789
+ noise, rather than noise being introduced to the tensors directly.
790
+ A. Results: Synthetic Data
791
+ The overall denoising performance for third order tensors
792
+ across various ranks and tensors sizes are presented in Table II.
793
+ The performance statistics for only one order are presented due
794
+ to the incomparable sizes of the non-uniform tensors across
795
+ orders, please see Table IV in Appendix A for the fourth order
796
+ 4. The best performing denoising algorithms for uniformly
797
+ sized tensors, as depicted in Table II (a), varied by noise level.
798
+ For cleaner tensors (20 and 10 dB), the multiway Wiener
799
+ filter performed best overall, achieving mean and standard
800
+ deviations in denoised SNRs of 20.95 (13.19) and 20.22 (10.1)
801
+ dBs, respectively. For moderately noisy tensors (5, 1, and
802
+ −1 dB), ALS was the best performing denoising method,
803
+ achieving denoised SNRs of 15.11 (8.26), 11.12 (7.98), and
804
+ 9.1 (7.88) dBs. For nosier tensors with starting SNRs of −5
805
+ and −10, tensor amplification produced the best denoised
806
+ SNRs of 3.37 (5.69) and −2.25 (4.57) dBs. Finally for tensors
807
+ with starting SNRs of −20 dB, the noisiest tensors evaluated,
808
+ XRank produced on average the highest denoised SNR of
809
+ −9.22 (4.79) dB. The results for non-uniformly sized tensors,
810
+ as depicted in Table II (b), are much clearer, with ALS
811
+ achieving the best denoised SNRs across all starting SNRs,
812
+ ranging from 30.81 (12.7) dBs for tenors with starting SNRs
813
+ of 20 dB to −6.6 (8.57) dB for the noisiest tensors (starting
814
+ SNRs of −20 dB).
815
+ The relationship between tensor size (dimension of each
816
+ mode) and achieved denoised SNRs is depicted in Figure 1.
817
+ With the exception of HOOI, all other denoising algorithms
818
+ see improvements in achieved SNR as the size of the tensor
819
+ increases. The multiway Wiener filter maintains the best de-
820
+ noising performance as size increases, followed by ALS. Both
821
+ amplification and XRank have similar denoising performances,
822
+ while slice rank and HOOI having the lowest performance
823
+ overall.
824
+
825
+ 7
826
+ TABLE II: Mean (SD) SNR, in decibels, after tensor denoising across all parameters.
827
+ Starting SNR
828
+ HOOI
829
+ ALS
830
+ Wiener
831
+ Amp
832
+ SliceRank
833
+ XRank
834
+ 20
835
+ 19.57 (3.32)
836
+ 28.67 (11.1)
837
+ 29.05 (13.19)
838
+ 10.0 (14.13)
839
+ 18.79 (1.59)
840
+ 10.89 (5.64)
841
+ 10
842
+ 10.59 (1.12)
843
+ 19.91 (8.88)
844
+ 20.22 (10.1)
845
+ 8.65 (11.09)
846
+ 13.21 (2.21)
847
+ 9.84 (4.15)
848
+ 5
849
+ 5.81 (0.75)
850
+ 15.11 (8.26)
851
+ 14.94 (8.59)
852
+ 7.85 (9.64)
853
+ 8.18 (2.52)
854
+ 8.56 (3.13)
855
+ 1
856
+ 1.87 (0.7)
857
+ 11.12 (7.98)
858
+ 10.34 (7.64)
859
+ 7.01 (8.56)
860
+ 3.54 (2.25)
861
+ 6.91 (2.49)
862
+ -1
863
+ -0.12 (0.7)
864
+ 9.1 (7.88)
865
+ 7.96 (7.01)
866
+ 6.48 (8.1)
867
+ 1.18 (2.03)
868
+ 5.86 (2.36)
869
+ -5
870
+ -4.12 (0.69)
871
+ 4.99 (7.77)
872
+ 3.37 (5.69)
873
+ 5.17 (7.45)
874
+ -3.46 (1.57)
875
+ 3.24 (2.6)
876
+ -10
877
+ -9.12 (0.69)
878
+ -0.19 (7.65)
879
+ -2.25 (4.57)
880
+ 2.85 (7.35)
881
+ -9.02 (1.12)
882
+ -0.58 (3.43)
883
+ -20
884
+ -19.11 (0.7)
885
+ -10.38 (7.39)
886
+ -11.17 (7.38)
887
+ -11.79 (6.98)
888
+ -19.61 (0.6)
889
+ -9.22 (4.79)
890
+ (a) Uniformly sized tensors.
891
+ Starting SNR
892
+ HOOI
893
+ ALS
894
+ Wiener
895
+ Amp
896
+ SliceRank
897
+ XRank
898
+ 20
899
+ 23.91 (7.33)
900
+ 30.81 (12.7)
901
+ 29.46 (13.3)
902
+ 11.68 (14.81)
903
+ 18.7 (1.53)
904
+ 11.85 (5.83)
905
+ 10
906
+ 15.45 (4.71)
907
+ 22.75 (10.29)
908
+ 21.39 (10.79)
909
+ 10.33 (11.77)
910
+ 13.28 (2.11)
911
+ 10.75 (4.34)
912
+ 5
913
+ 10.99 (3.94)
914
+ 18.25 (9.55)
915
+ 16.28 (9.86)
916
+ 9.45 (10.34)
917
+ 8.26 (2.44)
918
+ 9.4 (3.3)
919
+ 1
920
+ 7.27 (3.67)
921
+ 14.49 (9.23)
922
+ 11.84 (9.03)
923
+ 8.25 (9.36)
924
+ 3.63 (2.19)
925
+ 7.68 (2.6)
926
+ -1
927
+ 5.38 (3.63)
928
+ 12.57 (9.12)
929
+ 9.54 (8.41)
930
+ 7.32 (9.03)
931
+ 1.28 (2.01)
932
+ 6.6 (2.42)
933
+ -5
934
+ 1.5 (3.64)
935
+ 8.66 (8.99)
936
+ 5.38 (6.92)
937
+ 4.89 (9.02)
938
+ -3.37 (1.61)
939
+ 3.95 (2.56)
940
+ -10
941
+ -3.51 (3.61)
942
+ 3.65 (8.86)
943
+ 0.08 (5.37)
944
+ 1.65 (9.72)
945
+ -8.92 (1.29)
946
+ -0.13 (3.39)
947
+ -20
948
+ -13.59 (3.51)
949
+ -6.6 (8.57)
950
+ -7.94 (7.44)
951
+ -11.74 (8.56)
952
+ -19.56 (0.64)
953
+ -9.01 (4.66)
954
+ (b) Non-uniformly sized tensors.
955
+ (a)
956
+ (b)
957
+ Fig. 1: Denoising performance with respect to tensor size.
958
+ The relationship between tensor rank and achieved de-
959
+ noised SNRs is depicted in Figure 2. Tensor amplification
960
+ achieves the best rank-1 performance for both uniformly
961
+ and non-uniformly sized tensors, followed by the multiway
962
+ Wiener filter. The denoising performance of both methods
963
+ decreases as tensor rank increases, with the multiway Wiener
964
+ filter maintaining denoising performance for higher ranks
965
+ than amplification. ALS has lower performance at low ranks
966
+ but generally maintains its denoising performance as rank
967
+ increases, ultimately achieving the best results by rank 20.
968
+ XRank has greater performance than SliceRank for uniformly
969
+ sized tensors, but their denoising performances converge prior
970
+ to rank 20 for non-uniformly sized ones. HOOI has the
971
+ lowest denoising performance for uniformly sized tensors, but
972
+ performs slightly better than the amplification and SliceRank
973
+ methods for non-uniformly sized tensors with ranks greater
974
+ than 5.
975
+ The results depicted in Table III provide a further investiga-
976
+ tion into the denoising performance for low rank (ranks 1 and
977
+ 2) and high noise (SNRs ≤ 1) tensors. From these results one
978
+ can observe that amplification achieves the best performance
979
+ for uniformly sized tensors, with the multiway Wiener filter
980
+ achieving the second-best denoising performance. In the case
981
+ of non-uniformly sized tensors, the Wiener filter and amplifi-
982
+ cation achieve comparable results for starting SNRs of 1 and
983
+ −1 dB, while amplification achieving better performance for
984
+ starting SNRs of −5,−10, and −20 dB.
985
+ B. Results: Real Data - ECG Waveform Tensors
986
+ Figure 3 shows the denoising performance on the tensors
987
+ derived from the PTB dataset. For the tensors derived from
988
+ 90 second samples of ECG signal (Figure 3 a), only the
989
+ stable rank methods (XRank and SliceRank) were able to
990
+ achieve any effective denoising, with XRank achieving the
991
+ best denoising performance with a modest ≈ 4 dB denoised
992
+ SNR for tensors whose signals had SNR ratios of 20 dB
993
+ prior to tensor formation. This performance was maintained
994
+ for tensors from signals with starting SNRs down to 5 dB,
995
+ after which the denoising performance of XRank declines. The
996
+ SliceRank method does not yield any tensor denoising until the
997
+ starting signal SNR dropped below −5 dB, after which it too
998
+ experiences a continued decline in denoising performance. All
999
+
1000
+ Uniform Synthetic Tensors
1001
+ 1OOH
1002
+ 14 -
1003
+ ALS
1004
+ Wiener
1005
+ 12 -
1006
+ Amp
1007
+ Denoised SNR (dB)
1008
+ 10
1009
+ SliceRank
1010
+ XRank
1011
+ 8
1012
+ 6 ·
1013
+ 4
1014
+ 2 -
1015
+ 5
1016
+ 10
1017
+ 25
1018
+ 50
1019
+ SizeNon-uniform Synthetic Tensors
1020
+ 16
1021
+ IOOH
1022
+ ALS
1023
+ 14 -
1024
+ Wiener
1025
+ 12
1026
+ Amp
1027
+ Denoised SNR (dB)
1028
+ SliceRank
1029
+ 10
1030
+ XRank
1031
+ 8
1032
+ 6 -
1033
+ 4 -
1034
+ 2
1035
+ 01
1036
+ 5
1037
+ 10
1038
+ 25
1039
+ 50
1040
+ Size8
1041
+ (a)
1042
+ (b)
1043
+ Fig. 2: Denoising performance with respect to tensor rank.
1044
+ TABLE III: Mean (SD) denoised SNR, in decibels, for low rank and noisy tensors.
1045
+ Starting SNR
1046
+ HOOI
1047
+ ALS
1048
+ Wiener
1049
+ Amp
1050
+ SliceRank
1051
+ XRank
1052
+ 1
1053
+ 1.59 (0.36)
1054
+ 5.97 (2.93)
1055
+ 12.21 (4.05)
1056
+ 14.98 (7.72)
1057
+ 5.64 (2.29)
1058
+ 8.89 (1.91)
1059
+ -1
1060
+ -0.42 (0.35)
1061
+ 3.92 (2.92)
1062
+ 10.31 (3.85)
1063
+ 13.66 (7.28)
1064
+ 3.33 (2.11)
1065
+ 7.34 (2.01)
1066
+ -5
1067
+ -4.43 (0.32)
1068
+ -0.17 (2.91)
1069
+ 6.42 (3.78)
1070
+ 10.68 (6.90)
1071
+ -1.52 (1.70)
1072
+ 3.77 (2.34)
1073
+ -10
1074
+ -9.45 (0.30)
1075
+ -5.24 (2.93)
1076
+ 1.90 (3.84)
1077
+ 6.16 (7.47)
1078
+ -7.49 (1.39)
1079
+ -0.79 (2.88)
1080
+ -20
1081
+ -19.45 (0.30)
1082
+ -15.29 (2.91)
1083
+ -8.86 (5.90)
1084
+ -4.36 (7.84)
1085
+ -18.79 (1.03)
1086
+ -10.31 (3.48)
1087
+ (a) Uniformly sized tensors.
1088
+ Starting SNR
1089
+ HOOI
1090
+ ALS
1091
+ Wiener
1092
+ Amp
1093
+ SliceRank
1094
+ XRank
1095
+ 1
1096
+ 4.59 (1.82)
1097
+ 8.32 (4.32)
1098
+ 16.96 (8.56)
1099
+ 16.15 (8.84)
1100
+ 5.85 (2.30)
1101
+ 9.68 (1.96)
1102
+ -1
1103
+ 2.58 (1.82)
1104
+ 6.25 (4.30)
1105
+ 14.99 (8.41)
1106
+ 14.88 (8.34)
1107
+ 3.51 (2.24)
1108
+ 8.25 (2.03)
1109
+ -5
1110
+ -1.46 (1.84)
1111
+ 2.12 (4.29)
1112
+ 10.77 (8.21)
1113
+ 12.06 (7.72)
1114
+ -1.25 (2.06)
1115
+ 4.89 (2.39)
1116
+ -10
1117
+ -6.50 (1.87)
1118
+ -2.98 (4.31)
1119
+ 4.07 (6.49)
1120
+ 7.84 (7.93)
1121
+ -7.19 (1.87)
1122
+ 0.21 (3.09)
1123
+ -20
1124
+ -16.52 (1.87)
1125
+ -13.05 (4.31)
1126
+ -7.18 (6.61)
1127
+ -1.79 (9.00)
1128
+ -18.86 (0.84)
1129
+ -9.05 (3.87)
1130
+ (b) Non-uniformly sized tensors.
1131
+ other methods - HOOI, ALS, and amplification - introduced
1132
+ noise into the tensors across all starting signal SNRs. No
1133
+ appreciable difference was observed in tensors derived from
1134
+ the two patient cohorts (healthy and unhealthy). The denoising
1135
+ results for the tensors derived from windowed samples (Figure
1136
+ 3 b) are essentially the same as those derived from the
1137
+ 90 second samples, with the only exception being a slight
1138
+ increase in SliceRank’s denoising performance for tensors
1139
+ corresponding to unhealthy patients with a starting signal SNR
1140
+ of −5 dB.
1141
+
1142
+ Uniform Synthetic Tensors
1143
+ 1OOH
1144
+ 17.5
1145
+ ALS
1146
+ Wiener
1147
+ 15.0
1148
+ Amp
1149
+ Denoised SNR (dB)
1150
+ SliceRank
1151
+ 12.5
1152
+ XRank
1153
+ 10.0
1154
+ 7.5
1155
+ 5.0
1156
+ 2.5
1157
+ 0.0
1158
+ 10
1159
+ 20
1160
+ 25
1161
+ RankNon-uniform Synthetic Tensors
1162
+ IOOH
1163
+ 20
1164
+ ALS
1165
+ Wiener
1166
+ Amp
1167
+ Denoised SNR (dB)
1168
+ 15
1169
+ SliceRank
1170
+ XRank
1171
+ 10
1172
+ 5
1173
+ 0
1174
+ 10
1175
+ 20
1176
+ 25
1177
+ 4.
1178
+ 5
1179
+ Rank9
1180
+ (a)
1181
+ (b)
1182
+ Fig. 3: Denoising performance on the PTB tensors formed
1183
+ from a) 90 second samples, and b) windowed samples.
1184
+ C. Discussion
1185
+ Overall alternating least squares (ALS) was the best per-
1186
+ forming method for denoising synthetic tensors across all
1187
+ tensor orders, sizes, ranks, and starting noise levels, with the
1188
+ multiway Wiener filter (MWF) also performing well across
1189
+ all parameters. Amplification-based denoising performed well
1190
+ for low ranked tensors as well as very noisy (< 0 dB)
1191
+ tensors. The performance of amplification at low ranks and
1192
+ its decreased performance at higher ranks is to be expected,
1193
+ as the amplification maps correspond to approximations of the
1194
+ spectral norm, which only measures the highest singular value
1195
+ for a given tensor. Amplification-based denoising for higher
1196
+ rank tensors may be improved through the development of
1197
+ a decomposition method that can find successively smaller
1198
+ singular values and their corresponding rank 1 components,
1199
+ such as through a gradient-based descent optimization method.
1200
+ For the tensors derived from physiological signals, only the
1201
+ XRank method had any appreciable denoising performance.
1202
+ Such a method may find applications as a preprocessing step
1203
+ in a machine learning pipeline that utilizes tensorial data, such
1204
+ as that used for the prediction of hemodynamic decomposition
1205
+ in [40]. One limitation of the amplification-based denois-
1206
+ ing method is that amplification requires determining order-
1207
+ specific amplification maps; currently only those for orders
1208
+ three and four have been computed. Other tested methods have
1209
+ no such restriction. However, many real-world data modalities,
1210
+ such as images (order 3) and video (order 4) can potentially
1211
+ be denoised using current amplification maps.
1212
+ V. CONCLUSION
1213
+ In this work, we utilize the general framework of tensor
1214
+ denoising introduced [15] and previously developed approxi-
1215
+ mations of the spectral norm [17] to devise three novel tensor
1216
+ denoising methods based on tensor amplification and two
1217
+ notions of tensor rank related to the G-stable rank [36] - stable
1218
+ slice rank and stable X-rank. The performance of these meth-
1219
+ ods was compared to several standard decomposition-based
1220
+ denoising methods on synthetic tensors of various sizes, ranks,
1221
+ and noise levels, along with real-world tensors derived from
1222
+ electrocardiogram (ECG) signals. The experimental results
1223
+ show that in the low rank context, tensor-based amplification
1224
+ provides comparable denoising performance in high signal-to-
1225
+ noise ratio (SNR) settings (> 0 dB) and superior performance
1226
+ in noisy (< 1 dB) settings, while the stable X-rank method
1227
+ achieves superior denoising performance on the ECG signal
1228
+ data. Future work will seek to improve the performance of
1229
+ amplification-based methods for higher rank tensors.
1230
+ ACKNOWLEDGMENT
1231
+ This work was partially supported by the National Science
1232
+ Foundation under Grant No. 1837985 and by the Department
1233
+ of Defense under Grant No. BA150235.
1234
+ APPENDIX A
1235
+ ORDER 4 DENOISING RESULTS
1236
+
1237
+ Healthy Patients
1238
+ Unhealthy Patients
1239
+ 5
1240
+ 5
1241
+ 0
1242
+ -0
1243
+ -5 -
1244
+ (dB)
1245
+ Denoised SNR
1246
+ -10
1247
+ -10
1248
+ -15
1249
+ -15
1250
+ 1O0H
1251
+ 20
1252
+ -20
1253
+ ALS
1254
+ Wiener
1255
+ -25
1256
+ Amp
1257
+ -25
1258
+ SliceRank
1259
+ XRank
1260
+ -30
1261
+ -30
1262
+ 20
1263
+ 10
1264
+ 5
1265
+ 1-1 -5
1266
+ -10
1267
+ -20
1268
+ 20
1269
+ 10
1270
+ 5
1271
+ 1 -1
1272
+ -5
1273
+ -10
1274
+ -20
1275
+ Starting SNR (dB)
1276
+ Starting SNR (dB)Healthy Patients
1277
+ Unhealthy Patients
1278
+ 5
1279
+ IOOH
1280
+ ALS
1281
+ 0
1282
+ Wiener
1283
+ 0
1284
+ Amp
1285
+ SliceRank
1286
+ -5
1287
+ XRank
1288
+ -5
1289
+ (dB)
1290
+ Denoised SNR (
1291
+ -10
1292
+ -10
1293
+ -15
1294
+ -15
1295
+ -20
1296
+ -20
1297
+ -25 -
1298
+ -25
1299
+ -30 -
1300
+ -30
1301
+ 20
1302
+ 10
1303
+ 5
1304
+ 1-1 -5
1305
+ -10
1306
+ -20
1307
+ 20
1308
+ 10
1309
+ 5
1310
+ 1 -1
1311
+ -5
1312
+ -10
1313
+ -20
1314
+ Starting SNR (dB)
1315
+ Starting SNR (dB)10
1316
+ TABLE IV: Mean (SD) SNR, in decibels, after tensor denoising across all parameters.
1317
+ Starting SNR
1318
+ HOOI
1319
+ ALS
1320
+ Wiener
1321
+ Amp
1322
+ SliceRank
1323
+ XRank
1324
+ 20
1325
+ 19.68 (3.70)
1326
+ 33.04 (12.81)
1327
+ 32.12 (15.50)
1328
+ 10.93 (15.73)
1329
+ 18.96 (1.43)
1330
+ 9.82 (4.10)
1331
+ 10
1332
+ 10.82 (1.35)
1333
+ 24.54 (9.92)
1334
+ 22.99 (11.98)
1335
+ 9.60 (12.65)
1336
+ 13.31 (2.18)
1337
+ 9.20 (3.60)
1338
+ 5
1339
+ 6.11 (0.88)
1340
+ 20.04 (8.73)
1341
+ 17.08 (10.24)
1342
+ 8.81 (11.16)
1343
+ 8.29 (2.63)
1344
+ 8.31 (3.10)
1345
+ 1
1346
+ 2.20 (0.82)
1347
+ 16.21 (8.10)
1348
+ 11.73 (9.21)
1349
+ 7.92 (10.06)
1350
+ 3.62 (2.38)
1351
+ 7.05 (2.67)
1352
+ -1
1353
+ 0.22 (0.81)
1354
+ 14.24 (7.85)
1355
+ 8.90 (8.47)
1356
+ 7.36 (9.60)
1357
+ 1.23 (2.12)
1358
+ 6.22 (2.58)
1359
+ -5
1360
+ -3.78 (0.80)
1361
+ 10.14 (7.64)
1362
+ 3.68 (6.62)
1363
+ 6.06 (8.98)
1364
+ -3.44 (1.61)
1365
+ 4.10 (2.77)
1366
+ -10
1367
+ -8.79 (0.79)
1368
+ 4.89 (7.52)
1369
+ -2.18 (4.03)
1370
+ 3.93 (8.78)
1371
+ -9.03 (1.09)
1372
+ 0.69 (3.64)
1373
+ -20
1374
+ -18.78 (0.81)
1375
+ -5.44 (7.21)
1376
+ -9.70 (6.52)
1377
+ -16.67 (3.64)
1378
+ -19.64 (0.49)
1379
+ -7.44 (5.15)
1380
+ (a) Uniformly sized tensors of order 4.
1381
+ Starting SNR
1382
+ HOOI
1383
+ ALS
1384
+ Wiener
1385
+ Amp
1386
+ SliceRank
1387
+ XRank
1388
+ 20
1389
+ 25.83 (8.53)
1390
+ 35.74 (14.42)
1391
+ 32.02 (15.29)
1392
+ 14.20 (15.98)
1393
+ 18.82 (1.44)
1394
+ 11.16 (4.57)
1395
+ 10
1396
+ 17.69 (5.21)
1397
+ 28.12 (11.06)
1398
+ 24.55 (12.18)
1399
+ 12.83 (12.86)
1400
+ 13.34 (2.14)
1401
+ 10.41 (3.85)
1402
+ 5
1403
+ 13.47 (3.77)
1404
+ 24.00 (9.59)
1405
+ 18.98 (11.13)
1406
+ 11.83 (11.43)
1407
+ 8.32 (2.49)
1408
+ 9.31 (3.17)
1409
+ 1
1410
+ 10.00 (2.87)
1411
+ 20.60 (8.58)
1412
+ 13.77 (10.31)
1413
+ 10.14 (10.68)
1414
+ 3.67 (2.21)
1415
+ 7.90 (2.67)
1416
+ -1
1417
+ 8.21 (2.55)
1418
+ 18.86 (8.14)
1419
+ 10.99 (9.52)
1420
+ 8.70 (10.62)
1421
+ 1.29 (1.99)
1422
+ 6.97 (2.49)
1423
+ -5
1424
+ 4.49 (2.20)
1425
+ 15.20 (7.47)
1426
+ 6.25 (7.26)
1427
+ 4.99 (11.29)
1428
+ -3.41 (1.49)
1429
+ 4.63 (2.69)
1430
+ -10
1431
+ -0.49 (2.06)
1432
+ 10.30 (7.00)
1433
+ 0.78 (4.80)
1434
+ 0.76 (12.49)
1435
+ -8.95 (1.16)
1436
+ 0.91 (3.77)
1437
+ -20
1438
+ -10.64 (1.95)
1439
+ -0.10 (6.65)
1440
+ -6.69 (6.44)
1441
+ -18.46 (2.99)
1442
+ -19.53 (0.67)
1443
+ -7.47 (5.21)
1444
+ (b) Non-uniformly sized tensors of order 4.
1445
+ (a)
1446
+ (b)
1447
+ Fig. 4: Denoising performance with respect to tensor rank for fourth-order tensors.
1448
+ TABLE V: Mean (SD) denoised SNR, in decibels, for low rank and noisy tensors.
1449
+ Starting SNR
1450
+ HOOI
1451
+ ALS
1452
+ Wiener
1453
+ Amp
1454
+ SliceRank
1455
+ XRank
1456
+ 1
1457
+ 1.59 (0.36)
1458
+ 5.97 (2.93)
1459
+ 12.21 (4.05)
1460
+ 14.98 (7.72)
1461
+ 5.64 (2.29)
1462
+ 8.89 (1.91)
1463
+ -1
1464
+ -0.42 (0.35)
1465
+ 3.92 (2.92)
1466
+ 10.31 (3.85)
1467
+ 13.66 (7.28)
1468
+ 3.33 (2.11)
1469
+ 7.34 (2.01)
1470
+ -5
1471
+ -4.43 (0.32)
1472
+ -0.17 (2.91)
1473
+ 6.42 (3.78)
1474
+ 10.68 (6.90)
1475
+ -1.52 (1.70)
1476
+ 3.77 (2.34)
1477
+ -10
1478
+ -9.45 (0.30)
1479
+ -5.24 (2.93)
1480
+ 1.90 (3.84)
1481
+ 6.16 (7.47)
1482
+ -7.49 (1.39)
1483
+ -0.79 (2.88)
1484
+ -20
1485
+ -19.45 (0.30)
1486
+ -15.29 (2.91)
1487
+ -8.86 (5.90)
1488
+ -4.36 (7.84)
1489
+ -18.79 (1.03)
1490
+ -10.31 (3.48)
1491
+ (a) Uniformly sized fourth-order tensors.
1492
+ Starting SNR
1493
+ HOOI
1494
+ ALS
1495
+ Wiener
1496
+ Amp
1497
+ SliceRank
1498
+ XRank
1499
+ 1
1500
+ 10.68 (2.08)
1501
+ 22.45 (7.05)
1502
+ 24.99 (11.39)
1503
+ 23.47 (14.61)
1504
+ 5.82 (2.20)
1505
+ 10.97 (2.55)
1506
+ -1
1507
+ 8.67 (2.08)
1508
+ 20.36 (7.02)
1509
+ 21.36 (11.26)
1510
+ 22.36 (13.82)
1511
+ 3.43 (2.08)
1512
+ 9.71 (2.59)
1513
+ -5
1514
+ 4.65 (2.07)
1515
+ 16.19 (6.95)
1516
+ 14.20 (9.11)
1517
+ 20.10 (12.42)
1518
+ -1.59 (1.63)
1519
+ 6.94 (2.98)
1520
+ -10
1521
+ -0.38 (2.05)
1522
+ 11.03 (6.88)
1523
+ 4.95 (5.79)
1524
+ 17.07 (10.93)
1525
+ -7.48 (1.42)
1526
+ 3.01 (4.21)
1527
+ -20
1528
+ -10.56 (1.98)
1529
+ 0.71 (6.97)
1530
+ -6.40 (5.90)
1531
+ -15.23 (4.45)
1532
+ -18.66 (0.87)
1533
+ -5.51 (5.53)
1534
+ (b) Non-uniformly sized fourth-order tensors.
1535
+
1536
+ Uniform Synthetic Tensors, Order 4
1537
+ IOOH
1538
+ ALS
1539
+ 25
1540
+ Wiener
1541
+ Amp
1542
+ SliceRank
1543
+ 20
1544
+ XRank
1545
+ Denoised SNR (dB)
1546
+ 15
1547
+ 10
1548
+ 5
1549
+ 0
1550
+ 3
1551
+ 4
1552
+ 5
1553
+ 10
1554
+ 20
1555
+ 25
1556
+ RankNon-uniform Synthetic Tensors, Order 4
1557
+ IOOH
1558
+ 30
1559
+ ALS
1560
+ Wiener
1561
+ Amp
1562
+ 25
1563
+ SliceRank
1564
+ XRank
1565
+ Denoised SNR (dB)
1566
+ 20
1567
+ 15
1568
+ 10
1569
+ 5
1570
+ 0
1571
+ 2
1572
+ 3
1573
+ 4
1574
+ 5
1575
+ 10
1576
+ 20
1577
+ 25
1578
+ Rank11
1579
+ REFERENCES
1580
+ [1] J. D. Carroll and J.-J. Chang, “Analysis of individual differences in
1581
+ multidimensional scaling via an n-way generalization of “eckart-young”
1582
+ decomposition,” Psychometrika, vol. 35, no. 3, pp. 283–319, 1970.
1583
+ [2] R. A. Harshman et al., “Foundations of the parafac procedure: Models
1584
+ and conditions for an” explanatory” multimodal factor analysis,” 1970.
1585
+ [3] L. R. Tucker, “Implications of factor analysis of three-way matrices for
1586
+ measurement of change,” Problems in measuring change, vol. 15, no.
1587
+ 122-137, p. 3, 1963.
1588
+ [4] L. R. Tucker et al., “The extension of factor analysis to three-
1589
+ dimensional matrices,” Contributions to mathematical psychology, vol.
1590
+ 110119, 1964.
1591
+ [5] C. J. Hillar and L.-H. Lim, “Most tensor problems are NP-hard,” Journal
1592
+ of the ACM (JACM), vol. 60, no. 6, pp. 1–39, 2013.
1593
+ [6] X. Liu, S. Bourennane, and C. Fossati, “Denoising of hyperspectral
1594
+ images using the parafac model and statistical performance analysis,”
1595
+ IEEE Transactions on Geoscience and Remote Sensing, vol. 50, no. 10,
1596
+ pp. 3717–3724, 2012.
1597
+ [7] M. A. Veganzones, J. E. Cohen, R. C. Farias, J. Chanussot, and
1598
+ P. Comon, “Nonnegative tensor cp decomposition of hyperspectral data,”
1599
+ IEEE Transactions on Geoscience and Remote Sensing, vol. 54, no. 5,
1600
+ pp. 2577–2588, 2015.
1601
+ [8] J. Xue, Y. Zhao, W. Liao, and J. C.-W. Chan, “Nonlocal low-rank
1602
+ regularized tensor decomposition for hyperspectral image denoising,”
1603
+ IEEE Transactions on Geoscience and Remote Sensing, vol. 57, no. 7,
1604
+ pp. 5174–5189, 2019.
1605
+ [9] L. De Lathauwer, B. De Moor, and J. Vandewalle, “A multilinear
1606
+ singular value decomposition,” SIAM journal on Matrix Analysis and
1607
+ Applications, vol. 21, no. 4, pp. 1253–1278, 2000.
1608
+ [10] A. Rajwade, A. Rangarajan, and A. Banerjee, “Using the higher order
1609
+ singular value decomposition for video denoising,” in International
1610
+ Workshop on Energy Minimization Methods in Computer Vision and
1611
+ Pattern Recognition.
1612
+ Springer, 2011, pp. 344–354.
1613
+ [11] ——, “Image denoising using the higher order singular value de-
1614
+ composition,” IEEE Transactions on Pattern Analysis and Machine
1615
+ Intelligence, vol. 35, no. 4, pp. 849–862, 2012.
1616
+ [12] C. Lee and M. Wang, “Tensor denoising and completion based on
1617
+ ordinal observations,” in International Conference on Machine Learning.
1618
+ PMLR, 2020, pp. 5778–5788.
1619
+ [13] I. V. Oseledets, “Tensor-train decomposition,” SIAM Journal on Scien-
1620
+ tific Computing, vol. 33, no. 5, pp. 2295–2317, 2011.
1621
+ [14] X. Gong, W. Chen, J. Chen, and B. Ai, “Tensor denoising using low-rank
1622
+ tensor train decomposition,” IEEE Signal Processing Letters, vol. 27, pp.
1623
+ 1685–1689, 2020.
1624
+ [15] H. Derksen, “A general theory of singular values with applications to
1625
+ signal denoising,” SIAM Journal on Applied Algebra and Geometry,
1626
+ vol. 2, no. 4, pp. 535–596, 2018.
1627
+ [16] S. Friedland and L.-H. Lim, “Nuclear norm of higher-order tensors,”
1628
+ Mathematics of Computation, vol. 87, no. 311, pp. 1255–1281, 2018.
1629
+ [17] N. Tokcan, J. Gryak, K. Najarian, and H. Derksen, “Algebraic methods
1630
+ for tensor data,” SIAM Journal on Applied Algebra and Geometry, vol. 5,
1631
+ no. 1, pp. 1–27, 2021.
1632
+ [18] T. G. Kolda and B. W. Bader, “Tensor decompositions and applications,”
1633
+ SIAM review, vol. 51, no. 3, pp. 455–500, 2009.
1634
+ [19] T. G. Kolda, “A counterexample to the possibility of an extension of
1635
+ the eckart–young low-rank approximation theorem for the orthogonal
1636
+ rank tensor decomposition,” SIAM Journal on Matrix Analysis and
1637
+ Applications, vol. 24, no. 3, pp. 762–767, 2003.
1638
+ [20] V. De Silva and L.-H. Lim, “Tensor rank and the ill-posedness of the best
1639
+ low-rank approximation problem,” SIAM Journal on Matrix Analysis and
1640
+ Applications, vol. 30, no. 3, pp. 1084–1127, 2008.
1641
+ [21] B. W. Bader, T. G. Kolda et al. (2022) Tensor Toolbox for MATLAB,
1642
+ Version 3.4. [Online]. Available: www.tensortoolbox.org
1643
+ [22] L. De Lathauwer, B. De Moor, and J. Vandewalle, “On the best rank-1
1644
+ and rank-(r1, r2, . . . , rn) approximation of higher-order tensors,” SIAM
1645
+ journal on Matrix Analysis and Applications, vol. 21, no. 4, pp. 1324–
1646
+ 1342, 2000.
1647
+ [23] F. Jin, P. Fieguth, L. Winger, and E. Jernigan, “Adaptive wiener
1648
+ filtering of noisy images and image sequences,” in Proceedings 2003
1649
+ International Conference on Image Processing (Cat. No. 03CH37429),
1650
+ vol. 3.
1651
+ IEEE, 2003, pp. III–349.
1652
+ [24] X. Zhang, “Image denoising using local wiener filter and its method
1653
+ noise,” Optik, vol. 127, no. 17, pp. 6821–6828, 2016.
1654
+ [25] L. Smital, M. Vitek, J. Kozumpl´ık, and I. Provaznik, “Adaptive wavelet
1655
+ wiener filtering of ECG signals,” IEEE transactions on biomedical
1656
+ engineering, vol. 60, no. 2, pp. 437–445, 2012.
1657
+ [26] B. Somers, T. Francart, and A. Bertrand, “A generic EEG artifact
1658
+ removal algorithm based on the multi-channel wiener filter,” Journal
1659
+ of neural engineering, vol. 15, no. 3, p. 036007, 2018.
1660
+ [27] A. Spriet, M. Moonen, and J. Wouters, “Spatially pre-processed speech
1661
+ distortion weighted multi-channel wiener filtering for noise reduction,”
1662
+ Signal Processing, vol. 84, no. 12, pp. 2367–2387, 2004.
1663
+ [28] J. Chen, J. Benesty, Y. Huang, and S. Doclo, “New insights into the
1664
+ noise reduction wiener filter,” IEEE Transactions on audio, speech, and
1665
+ language processing, vol. 14, no. 4, pp. 1218–1234, 2006.
1666
+ [29] D. Muti and S. Bourennane, “Multidimensional filtering based on a
1667
+ tensor approach,” Signal Processing, vol. 85, no. 12, pp. 2338–2353,
1668
+ 2005.
1669
+ [30] T. Lin and S. Bourennane, “Survey of hyperspectral image denoising
1670
+ methods based on tensor decompositions,” EURASIP journal on Ad-
1671
+ vances in Signal Processing, vol. 2013, no. 1, pp. 1–11, 2013.
1672
+ [31] N. Renard, S. Bourennane, and J. Blanc-Talon, “Denoising and dimen-
1673
+ sionality reduction using multilinear tools for hyperspectral images,”
1674
+ IEEE Geoscience and Remote Sensing Letters, vol. 5, no. 2, pp. 138–
1675
+ 142, 2008.
1676
+ [32] J. Blasiak, T. Church, H. Cohn, J. A. Grochow, E. Naslund, W. F. Sawin,
1677
+ and C. Umans, “On cap sets and the group-theoretic approach to matrix
1678
+ multiplication,” arXiv preprint arXiv:1605.06702, 2016.
1679
+ [33] “Notes on the “slice rank” of tensors,” https://terrytao.wordpress.com/
1680
+ 2016/08/24/.
1681
+ [34] M. Rudelson and R. Vershynin, “Sampling from large matrices: An
1682
+ approach through geometric functional analysis,” Journal of the ACM
1683
+ (JACM), vol. 54, no. 4, pp. 21–es, 2007.
1684
+ [35] MATLAB, “Statistics and machine learning toolbox,” R2022a, the
1685
+ MathWorks Inc., Natick, MA, USA.
1686
+ [36] H. Derksen, “The g-stable rank for tensors and the cap set problem,”
1687
+ Algebra & Number Theory, vol. 16, no. 5, pp. 1071–1097, 2022.
1688
+ [37] R. Bousseljot, D. Kreiseler, and A. Schnabel, “Nutzung der ekg-
1689
+ signaldatenbank cardiodat der ptb ¨uber das internet,” 1995.
1690
+ [38] A. L. Goldberger, L. A. N. Amaral, L. Glass, J. M. Hausdorff, P. C.
1691
+ Ivanov, R. G. Mark, J. E. Mietus, G. B. Moody, C.-K. Peng, and H. E.
1692
+ Stanley, “PhysioBank, PhysioToolkit, and PhysioNet: Components of a
1693
+ new research resource for complex physiologic signals,” Circulation,
1694
+ vol. 101, no. 23, pp. e215–e220, 2000.
1695
+ [39] S. Padhy, G. Goovaerts, M. Bouss´e, L. D. Lathauwer, and S. V. Huffel,
1696
+ “The power of tensor-based approaches in cardiac applications,” in
1697
+ Biomedical Signal Processing.
1698
+ Springer, 2020, pp. 291–323.
1699
+ [40] L. Hernandez, R. Kim, N. Tokcan, H. Derksen, B. E. Biesterveld,
1700
+ A. Croteau, A. M. Williams, M. Mathis, K. Najarian, and J. Gryak,
1701
+ “Multimodal tensor-based method for integrative and continuous patient
1702
+ monitoring during postoperative cardiac care,” Artificial Intelligence in
1703
+ Medicine, p. 102032, 2021.
1704
+ [41] R. B. Kim, O. P. Alge, G. Liu, B. E. Biesterveld, G. Wakam, A. M.
1705
+ Williams, M. R. Mathis, K. Najarian, and J. Gryak, “Prediction of post-
1706
+ operative cardiac events in multiple surgical cohorts using a multimodal
1707
+
1708
+ 12
1709
+ and integrative decision support system,” Scientific reports, vol. 12,
1710
+ no. 1, pp. 1–11, 2022.
1711
+ [42] M. R. Mathis, M. C. Engoren, A. M. Williams, B. E. Biesterveld,
1712
+ A. J. Croteau, L. Cai, R. B. Kim, G. Liu, K. R. Ward, K. Najarian
1713
+ et al., “Prediction of postoperative deterioration in cardiac surgery
1714
+ patients using electronic health record and physiologic waveform data,”
1715
+ Anesthesiology, 2022.
1716
+ [43] P. L. Davies and A. Kovac, “Local extremes, runs, strings and multires-
1717
+ olution,” The Annals of Statistics, vol. 29, no. 1, pp. 1–65, 2001.
1718
+
8NE2T4oBgHgl3EQfPgby/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
8NE5T4oBgHgl3EQfQg5R/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
8dFLT4oBgHgl3EQfBS4r/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f57cba4ab03adcbbede5f87aaa2d6ada1897cdcab2d2d594e31a75e551e5e6c
3
+ size 85657
A9AyT4oBgHgl3EQf3_rL/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6862893d256697beab1db79564dd2ae417ff3c69b825c9f86e87de1920d7ebf0
3
+ size 7798829
A9AzT4oBgHgl3EQf__9t/content/2301.01956v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee310b65655c75a6074f37fcdd0bb5b4e42d720c313e5fee437900a11276858a
3
+ size 1341835
A9AzT4oBgHgl3EQf__9t/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f586d69e2574c51995d8d2a950d1673ad20d2b7e0bddea79844bd734dd7ab81
3
+ size 197933
C9FQT4oBgHgl3EQfPDYj/content/tmp_files/2301.13277v1.pdf.txt ADDED
@@ -0,0 +1,1080 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.13277v1 [cond-mat.soft] 30 Jan 2023
2
+ Transport properties in liquids from first principles: the case of
3
+ liquid water and liquid Argon
4
+ Pier Luigi Silvestrelli
5
+ Dipartimento di Fisica e Astronomia “G. Galilei”,
6
+ Universit`a di Padova, via Marzolo 8, I-35131 Padova, Italy
7
+ (Dated: February 1, 2023)
8
+ Abstract
9
+ Shear and bulk viscosity of liquid water and Argon are evaluated from first principles in the Den-
10
+ sity Functional Theory (DFT) framework, by performing Molecular Dynamics simulations in the
11
+ NVE ensemble and using the Kubo-Greenwood equilibrium approach. Standard DFT functional is
12
+ corrected in such a way to allow for a reasonable description of van der Waals (vdW) effects. For
13
+ liquid Argon the thermal conductivity has been also calculated. Concerning liquid water, to our
14
+ knowledge this is the first estimate of the bulk viscosity and of the shear-viscosity/bulk-viscosity
15
+ ratio from first principles. By analyzing our results we can conclude that our first-principles sim-
16
+ ulations, performed at a nominal average temperature of 366 K to guarantee that the systems is
17
+ liquid-like, actually describe the basic dynamical properties of liquid water at about 330 K. In
18
+ comparison with liquid water, the normal, monatomic liquid Ar is characterized by a much smaller
19
+ bulk-viscosity/shear-viscosity ratio (close to unity) and this feature is well reproduced by our first-
20
+ principles approach which predicts a value of the ratio in better agreement with experimental
21
+ reference data than that obtained using the empirical Lennard-Jones potential. The computed
22
+ thermal conductivity of liquid Argon is also in good agreement with the experimental value.
23
+ 1
24
+
25
+ I.
26
+ INTRODUCTION
27
+ Transport properties are among the most important and useful features of condensed-
28
+ matter systems, particularly for characterizing the dynamical behavior of liquids, since they
29
+ play an important role in many technical and natural processes. Therefore their estimate
30
+ represents one of the most relevant goal of Molecular Dynamics (MD) simulation techniques
31
+ which become particularly useful in cases where experimental data are not available or
32
+ difficult to obtain. Different theoretical approaches can be adopted with a varying degree of
33
+ accuracy (see, for instance, refs. 1–27, and further references therein).
34
+ Basically, in MD simulation transport properties can be evaluated either through a gen-
35
+ uine nonequilibrium approach by applying an explicit external perturbation (such as a shear
36
+ flow or a temperature gradient), which is clearly direct and intuitive but is affected by non-
37
+ trivial technical issues (in particular the need to generate nonequilibrium steady states in
38
+ typical systems characterized by finite-size supercells with periodic boundary conditions and
39
+ to extrapolate to the limit of zero driving force). Alternatively, the transport coefficients
40
+ can be more easily estimated from equilibrium MD simulations by using the Green-Kubo
41
+ relations28–30 of statistical mechanics (dissipation-fluctuation theorem) which allow the cal-
42
+ culation of transport coefficients by integration of suitable autocorrelation functions. This
43
+ latter approach is simpler because standard equilibrium MD simulations can be easily car-
44
+ ried out and estimated transport coefficients exhibit a weaker system-size dependence.26 An
45
+ equivalent17 equilibrium method exploits the Einstein–Helfand expressions2 to get transport
46
+ coefficients directly from the particle displacements and velocities;18 for instance, the shear
47
+ viscosity can be computed in terms of the mean-square x displacement of the center of y mo-
48
+ mentum, while the thermal conductivity is proportional to the mean square x displacement
49
+ of the center of energy.
50
+ The shear viscosity describes the resistance of a fluid to shear forces and is a measure
51
+ of the shear stress induced by an applied velocity gradient,1 while the bulk viscosity refers
52
+ to the resistance to dilatation of an infinitesimal volume element at constant shape and
53
+ measures the resistance of a fluid to compression. It is closely connected with absorption
54
+ and dispersion of ultrasonic waves in a fluid, so it can provide valuable information about
55
+ intermolecular forces. Moreover, the role of the bulk viscosity is acquiring more and more
56
+ importance, for instance in the area of surface and interface-related phenomena and for
57
+ 2
58
+
59
+ the interpretation of acoustic sensor data.31 In spite of its relevance, bulk viscosity has
60
+ received less experimental and theoretical attention, partly due to the greater difficulties in
61
+ obtaining accurate measurements and estimates. In principle it should be evaluated in the
62
+ microcanonical (NVE) ensemble where there is no need to evaluate an additional term which
63
+ would be required if, for instance, the canonical NVT ensemble were used.13,31 Moreover,
64
+ bulk viscosity is subject to much larger statistical error caused by the fact that it must
65
+ be calculated by the regression of fluctuations about a nonzero mean.3 While the shear
66
+ viscosity is associated with changes in water Hydrogen-bond network connectivity and is
67
+ mostly related to translational molecular motion, the bulk viscosity is associated with local
68
+ density fluctuations and reflects the relaxation of both rotational and vibrational modes.32,33
69
+ The thermal conductivity describes instead the capability of a substance to allow molecular
70
+ transport of energy driven by temperature gradients.
71
+ In general dynamical properties such as the transport coefficients are much more depen-
72
+ dent on the simulation size and timescale than structural properties.23 One must also point
73
+ out that shear and bulk viscosities, and thermal conductivity are even more difficult to be
74
+ evaluated accurately than, for instance, the diffusion coefficient (a single-particle property)
75
+ since they are collective transport properties involving all the particles.14 In fact, for esti-
76
+ mating the diffusion coefficient one can perform a statistical average over the particles in
77
+ addition to the average over time because every particle diffuses individually but any stress
78
+ or energy fluctuation is an event involving the system as a whole. As a consequence, in
79
+ order to obtain the same statistical accuracy, collective properties need much longer runs
80
+ than single particle properties by a factor proportional to the size of the system.12
81
+ We here estimate from first principles simulations, in the framework of the Density Func-
82
+ tional Theory (DFT), the shear and bulk viscosity of liquid water and Argon. For liquid
83
+ Argon the thermal conductivity is also calculated. By analyzing our results we can con-
84
+ clude that our first-principles simulations, performed at a nominal average temperature of
85
+ 366 K to guarantee that the systems is liquid-like, actually describe the basic dynamical
86
+ properties of liquid water at about 330 K. Our approach is also able to reproduce well the
87
+ bulk-viscosity/shear-viscosity ratio of liquid Ar which is much smaller than that of liquid
88
+ water.
89
+ 3
90
+
91
+ II.
92
+ METHOD
93
+ We have performed first principles MD simulations of liquid water using the CPMD
94
+ package,34 at constant volume, considering the experimental density of water at room tem-
95
+ perature. The computations were performed at the Γ-point only of the Brillouin zone, using
96
+ norm-conserving pseudopotentials35 and a basis set of plane waves to expand the wavefunc-
97
+ tions with an energy cutoff of 250 Ry; we have explicitly tested that this energy cutoff, much
98
+ higher than that used in standard DFT simulations of liquid water, is required to have a
99
+ good convergence also for the stress tensor components.
100
+ We have adopted the gradient-corrected BLYP36 density functional augmented by van
101
+ der Waals (vdW) corrections, hereafter referred to as DFT-D2(BLYP).37 This choice is
102
+ motivated both by the fact that BLYP has been shown38–42 to give an acceptable description
103
+ of Hydrogen bonding in water, and because it represents a good reference DFT functional to
104
+ add vdW corrections.43–46 A good description of Hydrogen bonding is essential here since, in
105
+ liquid water, the shear viscosity mostly originates from covalent interactions in the Hydrogen-
106
+ bond dynamics of water molecules.19 Moreover, vdW corrections to BLYP are important
107
+ because it was shown that BLYP significantly underestimates (by 25%) the equilibrium
108
+ density of liquid water; the experimental density can be recovered by adding the vdW
109
+ corrections proposed by Grimme,37 which have the further effect of making the oxygen-
110
+ oxygen radial distribution function in better agreement with experiment.47,48 Our system
111
+ consists of 64 water molecules contained in a supercell with simple-cubic symmetry and
112
+ periodic boundary conditions. Hydrogen nuclei have been treated as classical particles with
113
+ the mass of the deuterium isotope which allows us to use larger time steps. The effective
114
+ mass determining the time scale of the fictitious dynamics of the electrons was 700 a.u. and
115
+ the equations of motion were integrated with a time step of 3 a.u. (=0.073 fs).
116
+ Our simulation consisted of an initial equilibration phase, lasting about 0.15 ps, in which
117
+ the ionic temperature was simply controlled by velocity rescaling, followed by a much longer
118
+ (about 22 ps) canonical (NVT) MD simulation (using suitable thermostats for a Nos´e-Hoover
119
+ dynamics), followed by a final 22 ps microcanonical (NVE) production MD run. A common
120
+ drawback of most standard DFT functionals applied to liquid water at room temperature
121
+ is their tendency to ”freeze” the system which therefore exhibits an ice-like behavior. By
122
+ applying vdW corrections the problem is reduced but it still present. In particular, since the
123
+ 4
124
+
125
+ melting temperature of water estimated by DFT-D2(BLYP) is 360 K49 (while it is 411 K with
126
+ BLYP), following a common strategy, we performed NVT simulations with an average ionic
127
+ temperature of 380 K to be sure that the system is indeed liquid-like. This use of artificially
128
+ increased temperature also serves to mimic Nuclear Quantum Effects in simulations of liquid
129
+ water.23 The average ionic temperature of the subsequent NVE MD simulation was 366
130
+ K. Several data (atomic coordinates, velocities, stress-tensor components,...) relevant for
131
+ characterizing structural and dynamical properties of the system were recorded every 20
132
+ steps in the production stage.
133
+ As far as liquid Ar is concerned, before starting MD simulations, we have performed exten-
134
+ sive preliminary calculations to choose optimal parameters and a suitable DFT functional.
135
+ Clearly in this case even an empirical Lennard-Jones potential reference could probably give
136
+ reasonable results but here we are interested in studying transport properties using DFT
137
+ functionals in a first-principle framework, which has the advantage of explicitly accounting
138
+ for the electronic structure of matter. Application to the face-centered cubic (fcc) Ar crystal
139
+ (considering a fcc supercell with 32 Ar atoms) and comparison with experimental reference
140
+ values for the equilibrium Ar-Ar distance and the cohesive energy, suggests that, among
141
+ many tested, vdW-corrected DFT functionals, DFT-D2(PBE)37,50 is the most adequate to
142
+ describe extended systems made by Ar atoms, hence we mainly use it for the MD simula-
143
+ tions of liquid Ar. In this case we have checked that a suitable energy cutoff to get a good
144
+ convergence for the stress tensor components is 110 Ry.
145
+ The liquid Ar sample was prepared starting from an initial (unfavorable) simple cubic
146
+ lattice configuration with 64 Ar atoms and considering the experimental Ar density (1.4
147
+ g/cm3) at melting point (84 K). Then the systems was heated by gradually increasing the
148
+ ionic temperature (by velocity rescaling) to 500 K (in a time of 1.3 ps) to be sure that the
149
+ system was truly melted; then the temperature was gradually decreased (in 1.0 ps) to 150
150
+ K, which is a temperature sufficiently higher than the experimental melting point that it
151
+ can be assumed that the system is indeed in a liquid phase; this has been explicitly checked
152
+ looking at the translational order parameter.1
153
+ Then a 60 ps canonical (NVT) MD simulation (with a ionic temperature of 150 K) was
154
+ performed, followed by a 60 ps microcanonical (NVE) MD production runs with an average
155
+ ionic temperature of 129 K. In this case the electronic effective mass was 700 a.u. and the
156
+ equations of motion were integrated with a time step of 5 a.u. (=0.121 fs). Data (atomic
157
+ 5
158
+
159
+ coordinates, velocities, stress-tensor components,...) relevant for structural and dynamical
160
+ properties of the system were recorded every 10 steps in the production stage.
161
+ As mentioned above, different approaches exist for the calculation of shear, ηS, and
162
+ bulk, ηB, viscosity from MD simulations.1,8–11,13 The most used technique is based on the
163
+ evaluation of the autocorrelation functions of stress-tensor components; in particular,1
164
+ ηS =
165
+ V
166
+ kBT
167
+ � ∞
168
+ 0
169
+ dt⟨Pαβ(0)Pαβ(t)⟩ ,
170
+ (1)
171
+ ηB =
172
+ V
173
+ 9kBT
174
+ � ∞
175
+ 0
176
+ dt⟨δPαα(0)δPββ(t)⟩ =
177
+ V
178
+ kBT
179
+ � ∞
180
+ 0
181
+ dt⟨δP(0)δP(t)⟩ ,
182
+ (2)
183
+ where, in practice the upper limit of integration (∞) is replaced by a reasonably-long
184
+ simulation time, tmax, ⟨...⟩ denotes average over different time origins, V is the system
185
+ volume, T the ionic temperature, kB the Boltzmann constant, Pαβ quantities denote the
186
+ components of the stress tensor, the instantaneous pressure is given by P(t) = 1/3 �
187
+ α Pαα
188
+ (that is the average of the diagonal elements of the stress tensor), and the fluctuations are
189
+ defined as:
190
+ δPαα(t) = Pαα(t) − ⟨Pαα⟩ = Pαα(t) − P , δP(t) = P(t) − ⟨P⟩ = P(t) − P ,
191
+ (3)
192
+ where P is the system pressure obtained as the ensemble average of P(t). In isotropic
193
+ fluids (with rotational invariance) there are only 5 independent (and equivalent) components
194
+ of the traceless stress tensor: Pxy, Pyz, Pzx, (Pxx − Pyy)/2, and (Pyy − Pzz)/2, so that it is
195
+ convenient to compute the shear viscosity ηS by averaging over these 5 components to get
196
+ better statistics.
197
+ Instead, the bulk viscosity ηB has only one component, moreover the diagonal stresses
198
+ must be evaluated carefully since a non-vanishing equilibrium average must be subtracted.
199
+ In oder to get more accurate evaluations of transport properties and also reliable estimates
200
+ of the associated statistical errors, we adopt the block-average technique,51 which consists
201
+ of dividing the whole simulation into a sequence of several shorter intervals (“blocks”),
202
+ each with an equal number of samples; then block averages are calculated which allow to
203
+ estimate means and variances.15 In the case of the bulk-viscosity calculation, to reduce the
204
+ error, it is convenient to take for the system pressure the average value of the pressures
205
+ over all blocks.13 Clearly the choice of the block size must be made with care; in fact,
206
+ 6
207
+
208
+ samples become uncorrelated as the block size increases so for small block sizes, the error is
209
+ underestimated while for large block sizes the error estimate is inaccurate due to insufficient
210
+ sampling (see detailed discussion below).
211
+ Typically transport coefficients are estimated from classical MD simulations based on
212
+ empirical interatomic potentials. The practical feasibility of calculating transport coefficients
213
+ in liquids using instead first principles MD simulations, was demonstrated by D. Alf´e and
214
+ M. J. Gillan,12 who used the Green-Kubo relations to compute the shear viscosity of liquid
215
+ iron and aluminum, with a statistical error of about 5%. However, the simulations of D. Alf´e
216
+ and M. J. Gillan12 were performed in the NVT ensemble, while our simulations have been
217
+ carried out using the NVE ensemble, since the NVE simulations also allow the evaluation
218
+ of the bulk viscosity without any correction term (see above).
219
+ A simpler alternative method exists (valid for temperatures that are not too low52) to
220
+ obtain an approximate estimate of the shear viscosity, by exploiting its connection with the
221
+ self-diffusion coefficient D via the Stokes-Einstein relation:4,12
222
+ ηS = kBT
223
+ 2πaD ,
224
+ (4)
225
+ where a is an effective atomic diameter. Such relation is exact for the Brownian motion
226
+ of a macroscopic particle of diameter a in a liquid of shear viscosity ηS, but it is only
227
+ approximate when applied to atoms; however if a is chosen to be the radius r1 of the first
228
+ peak in the radial distribution function, the relation usually predicts ηS to within 40%.12
229
+ Here we take for r1 the position of the first peak in the O-O and Ar-Ar radial distribution
230
+ function for liquid water and liquid Ar, respectively, while the diffusion coefficient D can
231
+ be computed1 from the mean square displacement of the oxygen atoms (for liquid water)
232
+ or Ar atoms (for liquid Ar). The validity of the Stokes–Einstein relation has been recently
233
+ discussed in detail by Herrero et al.52 who also explored the connection between structural
234
+ properties and transport coefficients.
235
+ For liquid Argon the thermal conductivity has been also calculated, using the formula:1
236
+ λT =
237
+ V
238
+ kBT 2
239
+ � ∞
240
+ 0
241
+ dt⟨jE
242
+ α (0)jE
243
+ α (t)⟩ ,
244
+ (5)
245
+ where jE
246
+ α is the α component of the energy current defined as the time derivative of
247
+ 7
248
+
249
+ δEα = 1
250
+ V
251
+
252
+ i
253
+ riα(Ei − ⟨Ei⟩) ,
254
+ (6)
255
+ and Ei is the energy of the i−th Ar atom (located at coordinates rix, riy, riz), which can
256
+ be evaluated as
257
+ Ei = p2
258
+ i /2mi + 1/2
259
+
260
+ j̸=i
261
+ v(rij) ,
262
+ (7)
263
+ by assuming a pairwise interatomic potential. In order to obtain a pair potential for
264
+ evaluating the thermal conductivity of liquid Ar using configurational data from our first-
265
+ principles DFT simulations, we have adopted a strategy similar to that proposed in ref. 26:
266
+ we assume for the pair potential a Lennard-Jones analytical form:
267
+ v(r) = a(b2/r12 − b/r6) ,
268
+ (8)
269
+ where a and b are parameters optimized by fitting the potential-energy curve of the Ar
270
+ dimer (at different interatomic distances) obtained by using our DFT approach.
271
+ III.
272
+ RESULTS AND DISCUSSION
273
+ In Fig. 1 and 2 we plot the behavior of the temperature and pressure as a function of time
274
+ in the NVE simulation for liquid water and Ar, respectively. As can be seen, these quantities
275
+ turn out to be stable and exhibit only moderate oscillations around the average values, which
276
+ are, for liquid water, 0.132 GPa and 366 K for the pressure and the temperature, respectively,
277
+ while for liquid Ar the values are 0.173 GPa and 129 K.
278
+ In Fig. 3 and 4 we instead plot the auto-correlation functions (ACFs), corresponding to
279
+ the integrands (considering the average over the components for the shear viscosity) of eqs.
280
+ (1) and (2). Differently from what observed in monatomic systems (such as liquid Ar) or
281
+ in classical MD simulations where waters are modeled by rigid molecules, in first-principles
282
+ simulations of liquid water, high-frequency intermolecular vibrations lead to corresponding
283
+ high-frequency oscillations in the pressure and in related ACFs. In order to better appreciate
284
+ the global decay behavior of ACFs, in the case of liquid water, high-frequency components
285
+ have been cut by Fourier-transforming the ACFs.
286
+ A quantitative estimate of the ACFs
287
+ relaxation times can be obtained assuming a global exponential decay (≃ e−t/τ) of the
288
+
289
+ 0
290
+ 5
291
+ 10
292
+ 15
293
+ 20
294
+ time (ps)
295
+ 0
296
+ 0
297
+ 100
298
+ 100
299
+ 200
300
+ 200
301
+ 300
302
+ 300
303
+ 400
304
+ 400
305
+ T(K)
306
+ P (10 MPa)
307
+ FIG. 1: Temperature and pressure of liquid water plotted as a function of the simulation time.
308
+ integrands and computing:
309
+ τS =
310
+ � ∞
311
+ 0
312
+ dt ⟨Pαβ(0)Pαβ(t)⟩
313
+ ⟨Pαβ(0)Pαβ(0)⟩ ,
314
+ (9)
315
+ and
316
+ τB =
317
+ � ∞
318
+ 0
319
+ dt ⟨δPαα(0)δPββ(t)⟩
320
+ ⟨δPαα(0)δPββ(0)⟩
321
+ (10)
322
+ For liquid water we find τS ≃ 6 fs and τB ≃ 4 fs, while for liquid Ar τS ≃ 340 fs and
323
+ τB ≃ 410 fs.
324
+
325
+ 0
326
+ 10
327
+ 20
328
+ 30
329
+ 40
330
+ 50
331
+ 60
332
+ time (ps)
333
+ 0
334
+ 0
335
+ 50
336
+ 50
337
+ 100
338
+ 100
339
+ 150
340
+ 150
341
+ T(K)
342
+ P (10 MPa)
343
+ FIG. 2: Temperature and pressure of liquid Ar plotted as a function of the simulation time.
344
+ The shear and bulk viscosity, computed using eqs. (1) and (2), are plotted as a function
345
+ of the upper limit of the integrals in Fig. 5 and 6, while the thermal conductivity of liquid
346
+ Ar is reported in Fig. 7. From these curves an approximate estimate of the shear and
347
+ bulk viscosity can be obtained considering the values of the quantities corresponding to the
348
+ position of the first pronounced maximum-plateau; in fact this indicates that the running
349
+ integral starts becoming nearly independent of time implying that the corresponding ACF
350
+ has decayed to zero and is fluctuating along the horizontal time axis. Clearly, considering
351
+
352
+ 0
353
+ 2
354
+ 4
355
+ 6
356
+ 8
357
+ 10
358
+ time (ps)
359
+ -0,005
360
+ 0
361
+ 0,005
362
+ 0,01
363
+ 0,015
364
+ 0,02
365
+ ACF (Pa )
366
+ for shear viscosity
367
+ for bulk viscosity
368
+ 2
369
+ FIG. 3: Auto-correlation functions (ACFs) used for the evaluation of the shear and bulk viscosities
370
+ of liquid water (see text) plotted as a function of the simulation time.
371
+ longer times only introduces additional noise to the signal and the beginning of a plateau
372
+ represents the desired value of the viscosity with the smallest uncertainty. As can be seen,
373
+ the maximum-plateau is reached at about t = 0.8 ps for both the shear and bulk viscosity of
374
+ liquid water, while the corresponding values for liquid Ar are 3.0, 5.0 ps, and 0.5 ps for the
375
+ shear viscosity, the bulk viscosity, and the thermal conductivity, respectively. As expected,
376
+ these times are much larger than the corresponding relaxation times τS and τB estimated
377
+
378
+ 0
379
+ 2
380
+ 4
381
+ 6
382
+ 8
383
+ 10
384
+ time (ps)
385
+ 0
386
+ 0,0002
387
+ 0,0004
388
+ 0,0006
389
+ 0,0008
390
+ ACF (Pa )
391
+ for shear viscosity
392
+ for bulk viscosity
393
+ 2
394
+ FIG. 4: Auto-correlation functions (ACFs) used for the evaluation of the shear and bulk viscosities
395
+ of liquid Ar (see text) plotted as a function of the simulation time.
396
+ above.
397
+ As already discussed, a more accurate evaluation, with also a reliable estimate of the
398
+ associated statistical error, can be obtained by adopting a block-average technique.
399
+ In
400
+ this case a proper choice of the block size is crucial: with many, small-size blocks, the
401
+ statistical error is small but the blocks are probably correlated and the viscosity is typically
402
+ underestimated (not yet converged); on the contrary, with just a few, large-size blocks, these
403
+
404
+ 0
405
+ 0,5
406
+ 1
407
+ 1,5
408
+ 2
409
+ time (ps)
410
+ 0
411
+ 5
412
+ 10
413
+ ( Pa s)
414
+ bulk viscosity
415
+ shear viscosity
416
+ 10-4
417
+ FIG. 5: Shear and bulk viscosity of liquid water plotted as a function of the upper limit of the
418
+ integrals of the ACFs.
419
+ are probably uncorrelated and the viscosity is converged but the statistical error is large.
420
+ In Figs. 8, 9, 10, and 11 we plot the values of the shear and bulk viscosity of liquid
421
+ water and Ar evaluated by using different numbers of blocks (keeping constant the total
422
+ number of configurations) with the relative statistical errors. The dashed horizontal lines
423
+ indicate the corresponding values inferred by considering the maximas-plateaus of the curves
424
+ in Figs. 5 and 6. As can be seen, in the case of liquid water, the maxima of the shear and
425
+
426
+ 0
427
+ 1
428
+ 2
429
+ 3
430
+ 4
431
+ 5
432
+ 6
433
+ time (ps)
434
+ 0
435
+ 1
436
+ 2
437
+ 3
438
+ 4
439
+ ( Pa s)
440
+ bulk viscosity
441
+ shear viscosity
442
+ 10-4
443
+ FIG. 6: Shear and bulk viscosity of liquid Ar plotted as a function of the upper limit of the integrals
444
+ of the ACFs.
445
+ bulk viscosities are obtained considering 16 blocks, each equivalent to a simulation time
446
+ of about 1.4 ps. Interestingly, taking statistical uncertainties into account, these maxima
447
+ are compatible with the rough estimates obtained before and, for the shear viscosity, also
448
+ with the values obtained using the Stokes-Einstein formula (Eq. (4)). As already described
449
+ above, in the Stokes-Einstein estimate the shear viscosity is obtained in terms of the diffusion
450
+ coefficient D and the radius of the first peak in the radial distribution function (see Eq. (4),
451
+
452
+ 0
453
+ 1
454
+ 2
455
+ 3
456
+ 4
457
+ time (ps)
458
+ 0
459
+ 0,05
460
+ 0,1
461
+ thermal conductivity (W/m K)
462
+ FIG. 7: Thermal conductivity of liquid Ar plotted as a function of the upper limit of the integral
463
+ of the ACF.
464
+ for liquid water we have considered the first peak in the oxygen-oxygen radial distribution
465
+ function, see below). Actually our reported Stokes-Einstein estimated values are corrected
466
+ by finite-size effects: in fact D can be extrapolated to infinite size of the simulation box (see,
467
+ for instance, ref. 52) by just considering the shear-viscosity value:
468
+
469
+ D∞ = D + 2.837 kBT
470
+ 6πηSL ,
471
+ (11)
472
+ where L is the size of the cubic simulation box. Therefore, by simultaneously taking
473
+ into account Eqs. (4) and (11), one can get a “self-consistent”, finite-size corrected Stokes-
474
+ Einstein estimate for ηS :
475
+ η∗
476
+ S = kBT
477
+ 2πaD − 2.837 kBT
478
+ 6πLD .
479
+ (12)
480
+ Quantitative data are collected in Table I where they are also compared with some the-
481
+ oretical and experimental reference values.
482
+ As far as the shear viscosity is concerned, for liquid water our estimated value, obtained
483
+ from the NVE simulation at an average temperature of 366 K, agrees with the experimental
484
+ reference data at a lower temperature of about 330 K. This is in line with the performances
485
+ of other DFT functionals; for instance (see Table I), in recent simulations52 of liquid water
486
+ based on the SCAN functional,59 the shear viscosity estimate is close to that obtained from
487
+ a force-field approach (that, for this quantity, well reproduces the experimental behavior)
488
+ only between 330 and 360 K, while it is severely overestimated at 300 K. With the OPTB88-
489
+ vdW functional60 reasonable agreement with experimental data at room temperature is only
490
+ found52 at 360 K.
491
+ For liquid Ar the behavior is qualitatively similar (see Figs. 10, 11, and 12 for the shear
492
+ viscosity, the bulk viscosity, and the thermal conductivity, respectively). In this case both the
493
+ maxima of the shear and bulk viscosities are obtained considering 5 blocks, each equivalent
494
+ to a simulation time of about 12.0 ps. Even in this case, taking statistical uncertainties into
495
+ account, these maxima are compatible with the plateau positions and, for the shear viscosity,
496
+ also with the estimate from the Stokes-Einstein formula. The maximum of the thermal
497
+ conductivity is instead reached with 25 blocks, each equivalent to a simulation time of about
498
+ 2.4 ps, and its value (0.11±0.02 W/m K) is again compatible with that estimated considering
499
+ the maximum-plateau position and in good agreement with the literature reference value
500
+ (0.12 W/m K) at 90 K61 and that obtained by classical MD simulations based on the
501
+ Lennard-Jones potential (0.119 W/m K).62
502
+ An interesting physical quantity is represented by the ratio between bulk and shear
503
+ viscosity, which can be related to the ratio of observed to classical absorption coefficients in
504
+
505
+ 0
506
+ 5
507
+ 10
508
+ 15
509
+ 20
510
+ 25
511
+ 30
512
+ 35
513
+ # of blocks
514
+ 0
515
+ 2
516
+ 4
517
+ 6
518
+ 8
519
+ shear viscosity ( Pa s)
520
+ *
521
+ 10-4
522
+ expt. 303 K
523
+ expt. 323 K
524
+ expt. 333 K
525
+ FIG. 8: Shear viscosity of liquid water evaluated by using different numbers of blocks (the smaller
526
+ is the block number the larger is the number of configurations of each block) with the relative sta-
527
+ tistical errors. The dashed horizontal line indicates the position of the first-pronounced maximum-
528
+ plateau of the corresponding curve of Fig.
529
+ 5.
530
+ The asterisk denotes the value obtained by the
531
+ Stokes-Einstein formula (Eq.12), while the triangles indicate experimental estimates at different
532
+ temperatures.
533
+
534
+ TABLE I: Shear and bulk viscosity of liquid water and Ar, in 10−4 Pa s, compared with theoretical
535
+ and experimental reference data.
536
+ Statistical errors are in parenthesis.
537
+ η∗
538
+ S indicates the shear
539
+ viscosity estimate obtained by the Stokes-Einstein relation (see text).
540
+ system
541
+ ηS
542
+ η∗
543
+ S
544
+ ηB
545
+ ηB/ηS 3/4ηB/ηS + 1
546
+ water (366 K)
547
+ 4.8(0.7) 5.7 11.3(2.9) 2.4(0.8)
548
+ 2.8(0.6)
549
+ water DFT SCANa (300K)
550
+ 23
551
+
552
+
553
+
554
+
555
+ water DFT SCANa (330K)
556
+ 6
557
+
558
+
559
+
560
+
561
+ water DFT SCANa (360K)
562
+ 5
563
+
564
+
565
+
566
+
567
+ water DFT OPTB88-vdWa (300K)
568
+ 30
569
+
570
+
571
+
572
+
573
+ water DFT OPTB88-vdWa (330K)
574
+ 15
575
+
576
+
577
+
578
+
579
+ water DFT OPTB88-vdWa (360K)
580
+ 8
581
+
582
+
583
+
584
+
585
+ water force fielda (300K)
586
+ 8
587
+
588
+
589
+
590
+
591
+ water force fielda (330K)
592
+ 5
593
+
594
+
595
+
596
+
597
+ water force fielda (360K)
598
+ 3.5
599
+
600
+
601
+
602
+
603
+ water force fieldb (303K)
604
+ 6.5(0.4) — 15.5(1.6) 2.4(0.3)
605
+ 2.8(0.2)
606
+ water expt.c (298 K)
607
+ 8.90
608
+
609
+
610
+
611
+
612
+ water expt.b,d,e (303 K)
613
+ 7.97
614
+
615
+ 21.5
616
+ 2.7
617
+ 3.0
618
+ water expt.f (323 K)
619
+ 5.47
620
+
621
+ 14.8
622
+ 2.7
623
+ 3.0
624
+ water expt.c (333 K)
625
+ 4.67
626
+
627
+
628
+
629
+
630
+ Ar (129 K)
631
+ 3.7(1.6) 2.0 4.0(2.2) 1.1(0.8)
632
+ 1.8(0.6)
633
+ Ar expt.g (90 K)
634
+ 2.33
635
+
636
+ 1.82
637
+ 0.8
638
+ 1.6
639
+ Ar expt.h (90 K)
640
+ 2.57
641
+
642
+
643
+
644
+ aref.52.
645
+ bref.13.
646
+ cref.55.
647
+ dref.53.
648
+ eref.54.
649
+ fref.56.
650
+ gref.57.
651
+ href.58.
652
+
653
+ 0
654
+ 5
655
+ 10
656
+ 15
657
+ 20
658
+ 25
659
+ 30
660
+ 35
661
+ # of blocks
662
+ 0
663
+ 5
664
+ 10
665
+ 15
666
+ bulk viscosity ( Pa s)
667
+ 10-4
668
+ FIG. 9: Bulk viscosity of liquid water evaluated by using different numbers of blocks (the smaller
669
+ is the block number the larger is the number of configurations of each block) with the relative sta-
670
+ tistical errors. The dashed horizontal line indicates the position of the first-pronounced maximum-
671
+ plateau of the corresponding curve of Fig. 5.
672
+ ultrasonic absorption experiments.13 In fact, under the condition that the heat conductivity
673
+ contribution to the ultrasonic absorption may be neglected,
674
+
675
+ 0
676
+ 10
677
+ 20
678
+ 30
679
+ 40
680
+ 50
681
+ # of blocks
682
+ 0
683
+ 1
684
+ 2
685
+ 3
686
+ 4
687
+ 5
688
+ 6
689
+ shear viscosity ( Pa s)
690
+ *
691
+ 10-4
692
+ expt. 90 K
693
+ FIG. 10: Shear viscosity of liquid Ar evaluated by using different numbers of blocks (the smaller is
694
+ the block number the larger is the number of configurations of each block) with the relative statis-
695
+ tical errors. The dashed horizontal line indicates the position of the first-pronounced maximum-
696
+ plateau of the corresponding curve of Fig.
697
+ 6.
698
+ The asterisk denotes the value obtained by the
699
+ Stokes-Einstein formula (Eq.4), while the triangle indicates the experimental estimate at 90 K.
700
+ α
701
+ αclass
702
+ = 3/4ηB
703
+ ηS
704
+ + 1 ,
705
+ (13)
706
+ and water belongs to the group of the so-called ”associated liquids”, characterized by a
707
+ ratio from 1 to 3, where structural relaxation is dominant.
708
+ Classical MD simulations based on the SPC/E semiempirical potential predict13 a ηB
709
+ ηS
710
+ ratio of 2.4, leading to a
711
+ α
712
+ αclass ratio of 2.79, in reasonable agreement with the experimental
713
+
714
+ 0
715
+ 10
716
+ 20
717
+ 30
718
+ 40
719
+ 50
720
+ # of blocks
721
+ 0
722
+ 1
723
+ 2
724
+ 3
725
+ 4
726
+ 5
727
+ 6
728
+ 7
729
+ bulk viscosity ( Pa s)
730
+ 10-4
731
+ FIG. 11: Bulk viscosity of liquid Ar evaluated by using different numbers of blocks (the smaller is
732
+ the block number the larger is the number of configurations of each block) with the relative statis-
733
+ tical errors. The dashed horizontal line indicates the position of the first-pronounced maximum-
734
+ plateau of the corresponding curve of Fig. 6.
735
+ value of 3.0.53 Instead normal liquids, such as monatomic liquids (for instance liquid Ar)
736
+ are characterized by a ratio no greater than 1.2. Although in general the ratio varies with
737
+ temperature and pressure, in liquid water it is found to remain constant within 20% in
738
+ the temperature range 0-90 C (273-363 K).63 By taking statistical errors into account, our
739
+ estimated value of the
740
+ α
741
+ αclass ratio (2.8 ± 0.6) is compatible with the available experimental
742
+
743
+ 0
744
+ 10
745
+ 20
746
+ 30
747
+ 40
748
+ 50
749
+ # of blocks
750
+ 0
751
+ 0,05
752
+ 0,1
753
+ 0,15
754
+ 0,2
755
+ 0,25
756
+ thermal conductivity (W/m K)
757
+ FIG. 12: Thermal conductivity of liquid Ar evaluated by using different numbers of blocks (the
758
+ smaller is the block number the larger is the number of configurations of each block) with the relative
759
+ statistical errors. The dashed horizontal line indicates the position of the maximum-plateau of the
760
+ corresponding curve in Fig. 7.
761
+ data at ambient temperature (3.0). This is a remarkable result, considering that most of the
762
+ reported classical MD simulations13 predict a bulk viscosity lower than the the experimental
763
+ one, leading to an underestimated value of the
764
+ α
765
+ αclass ratio.
766
+ One should also point out that a proper comparison with experimental data requires a
767
+ careful analysis taking into account the pronounced temperature dependence of shear and
768
+
769
+ bulk viscosity. In fact, according to a common empirical model,56,64 the viscosity strongly de-
770
+ creases with increasing temperature following an exponential decay. By fitting experimental
771
+ data56 with an exponential function and taking statistical errors into account, our estimated
772
+ values of the shear and bulk viscosity of liquid water are compatible with experimental
773
+ data in the temperature range of 323-344 K. One should also consider that also the bulk-
774
+ viscosity/shear-viscosity ratio for liquid water tends to decrease slightly with temperature,56
775
+ suggesting an even better agreement between our estimated value and the experimental
776
+ data.56 We remind that our simulations have been carried out at temperatures higher than
777
+ ambient temperature to guarantee that the systems is liquid-like. By considering that our
778
+ estimate (after finite-size correction) for the diffusion coefficient, D = 5.02 × 10−5 cm2/s,
779
+ corresponds to the experimental value measured at about 336 K,65 we can conclude that,
780
+ our DFT simulations based on the DFT-D2(BLYP) functional and performed at a nominal
781
+ average temperature of 366 K, actually describe the basic dynamical properties of liquid
782
+ water at about 330 K. One should also mention that bulk-viscosity measurements are in-
783
+ direct and affected by considerable errors.13,27,33,56,66,67 In summary, we can conclude that
784
+ our adopted BLYP-D2 functional is able to describe reasonably well the density fluctuations
785
+ of liquid water; the discrepancy with respect to experimental data at ambient conditions
786
+ can be to a large extend explained in terms of the pronounced temperature dependence of
787
+ both shear and bulk viscosity and the need to perform first-principles MD simulations at
788
+ temperatures higher than ambient temperature.
789
+ As far as liquid Ar is concerned, our shear and bulk viscosities, computed by first-
790
+ principles at a nominal average simulation temperature of 129 K, turn out to be some-
791
+ how overestimated with respect to the reference experimental values at 90 K, although
792
+ they are compatible with them if statistical errors are taken into account. Moreover our
793
+ bulk-viscosity/shear-viscosity ratio (close to unity) agrees well with the reference estimate,
794
+ while interestingly this is not the case if a standard Lennard-Jones empirical potential is
795
+ adopted using classical MD simulations that predict instead a very low value13,62 of the ratio
796
+ (0.17-0.35 at high densities), thus showing that this popular potential cannot properly re-
797
+ produce all the dynamical properties of liquid Ar and underlining once again the superiority
798
+ of first-principles approaches.
799
+ We conclude our study by reporting some basic structural properties of our investigated
800
+ systems. In particular, in Fig. 13, for liquid water we plot our computed O-O pair correlation
801
+
802
+ 2
803
+ 3
804
+ 4
805
+ 5
806
+ 6
807
+ 7
808
+ r (A)
809
+ 0
810
+ 0,5
811
+ 1
812
+ 1,5
813
+ 2
814
+ 2,5
815
+ 3
816
+ g(r)
817
+ FIG. 13: O-O pair correlation function, gOO(r), compared with that obtained experimentally from
818
+ X-ray diffraction measurements at ambient conditions.68–70
819
+ function, gOO(r), compared with that obtained experimental from X-ray diffraction measure-
820
+ ments at ambient conditions.68–70 The main features of the gOO(r) curves are reported in
821
+ Table II. As can be seen, there is a good agreement between the two curves; the fact the
822
+ oscillations of our computed curve are slightly reduced with respect to the experimental one
823
+ can again be related to the higher effective temperature of our simulation.
824
+
825
+ 2
826
+ 3
827
+ 4
828
+ 5
829
+ 6
830
+ 7
831
+ r (A)
832
+ 0
833
+ 0,5
834
+ 1
835
+ 1,5
836
+ 2
837
+ 2,5
838
+ 3
839
+ 3,5
840
+ g(r)
841
+ FIG. 14: Ar-Ar pair correlation function, g(r), compared with that obtained experimentally from
842
+ neutron-scattering measurements at 85K.73
843
+ In Fig. 14, for liquid Ar our computed Ar-Ar pair correlation function, g(r), is compared
844
+ with that obtained experimentally from neutron-scattering measurements at 85 K,73 while
845
+ again the main features of the g(r) curves are reported in Table II. Even in this case there
846
+ is a reasonable agreement between the simulation and experimental curve, by considering
847
+ that simulations for liquid Ar have been performed at significantly higher temperature (129
848
+ K) than experiments (85 K) (note that the experimental melting and boiling points of Ar
849
+ are at 84 and 87 K, respectively). After applying the same finite-size correction adopted
850
+
851
+ TABLE II: Main features of the O-O pair correlation function, gOO(r), of liquid water and of
852
+ the Ar-Ar pair correlation function, g(r) of liquid Ar compared with experimental reference data,
853
+ obtained from X-ray diffraction measurements at ambient conditions for liquid water and neutron-
854
+ scattering measurements for liquid Ar. rmax and rmin indicate the position of the first maximum
855
+ (the main peak) and the first minimum of gOO(r) and g(r), respectively, and gmax and gmin the
856
+ corresponding values of the gOO(r) and g(r) functions.
857
+ system
858
+ rmax(˚A)
859
+ gmax
860
+ rmin(˚A)
861
+ gmin
862
+ water (366 K)
863
+ 2.79
864
+ 2.42
865
+ 3.66
866
+ 0.88
867
+ water expt.a (293 K) 2.80(1) 2.55(5) 3.41(4) 0.85(2)
868
+ Ar (129 K)
869
+ 3.70
870
+ 2.80
871
+ 5.29
872
+ 0.64
873
+ Ar expt.b (85 K)
874
+ 3.68
875
+ 3.05
876
+ 5.18
877
+ 0.56
878
+ aref.68–70.
879
+ bref.73.
880
+ above for liquid water, our estimated diffusion coefficient for liquid Ar, D = 3.82 × 10−5
881
+ cm2/s, evaluated at a nominal simulation temperature of 129 K is significantly higher than
882
+ the reference value (1.6 × 10−5 cm2/s) reported at 84 K.71 Again this discrepancy can be
883
+ explained in terms of the higher temperature of the liquid Ar simulation.
884
+ IV.
885
+ CONCLUSIONS
886
+ Shear and bulk viscosity of liquid water and Argon have been evaluated, together with
887
+ other structural and dynamical properties, from first principles by adopting a vdW-corrected
888
+ DFT approach, by performing Molecular Dynamics simulations in the NVE ensemble and
889
+ using the Kubo-Greenwood equilibrium approach. For liquid Argon the thermal conductivity
890
+ has been also calculated. Concerning liquid water, to our knowledge this is the first estimate
891
+ of the bulk viscosity and of the shear-viscosity/bulk-viscosity ratio from first principles. By
892
+ analyzing our results and comparing then with reference experimental data, we can conclude
893
+ that our first-principles simulations, performed at a nominal average temperature of 366
894
+ K to guarantee that the systems is liquid-like, actually describe well the basic dynamical
895
+
896
+ properties of liquid water at about 330 K. In comparison with liquid water, the normal,
897
+ monatomic liquid Ar is characterized by a much smaller bulk-viscosity/shear-viscosity ratio
898
+ (close to unity) and this feature is well reproduced by our first-principles approach which
899
+ predicts a value of the ratio in better agreement with experimental reference data than that
900
+ obtained using the empirical Lennard-Jones potential. The computed thermal conductivity
901
+ of liquid Argon is also in good agreement with the experimental value.
902
+ V.
903
+ ACKNOWLEDGEMENTS
904
+ We acknowledge funding from Fondazione Cariparo, Progetti di Eccellenza 2017, relative
905
+ to the project: ”Engineering van der Waals Interactions: Innovative paradigm for the control
906
+ of Nanoscale Phenomena”.
907
+ VI.
908
+ DATA AVAILABILITY
909
+ The data that support the findings of this study are available from the corresponding
910
+ author upon reasonable request.
911
+ 1 M. P. Allen and D. J. Tildesley, Computer Simulations of Liquids (Oxford Science Publications,
912
+ Clarendon Press, Oxford 1987).
913
+ 2 E. Helfand, ”Transport Coefficients from Dissipation in a Canonical Ensemble”, Phys. Rev.
914
+ 119, 1 (1960).
915
+ 3 B. J. Alder, D. M. Gass, T. E. Wainwright, ”Studies in Molecular Dynamics. VIII. The Trans-
916
+ port Coefficients for a Hard-Sphere Fluid”, J. Chem. Phys. 53, 3813 (1970).
917
+ 4 E. M. Gosling, I. R. McDonald, K. Singer, ”On the calculation by molecular dynamics of the
918
+ shear viscosity of a simple fluid”, Mol. Phys. 26, 1475 (1973).
919
+ 5 G. Ciccotti, G. Jacucci, I. R. McDonald, ”Transport properties of molten alkali halides”, Phys.
920
+ Rev. A 13, 426 (1976).
921
+ 6 G. Ciccotti, G. Jacucci, K. R. McDonald, ”Thermal response to a weak external field”, J. Phys.
922
+ C: Solid State Phys. 11, L509 (1978).
923
+
924
+ 7 G. Ciccotti, G. Jacucci, I. R. McDonald, ”Thought-experiments by molecular dynamics”, J.
925
+ Stat. Phys. 21, 1 (1979).
926
+ 8 M. Schoen, C. Hoheisel, ”The shear viscosity of a Lennard-Jones fluid calculated by equilibrium
927
+ molecular dynamics”, Mol. Phys. 56, 653 (1985).
928
+ 9 C. Hoheisel, ”Bulk viscosity of model fluids. A comparison of equilibrium and nonequilibrium
929
+ molecular dynamics results”, J. Chem. Phys. 86, 2328 (1987).
930
+ 10 J. J. Erpenbeck, ”Einstein-Kubo-Helfand and McQuarrie relations for transport coefficients”,
931
+ Phys. Rev. E 51, 4296 (1995).
932
+ 11 S. Balasubramanian, C. J. Mundy, M. L. Klein, ”Shear viscosity of polar fluids: Molecular
933
+ dynamics calculations of water”, J. Chem. Phys. 105, 11190 (1996).
934
+ 12 D. Alf´e, M. J. Gillan, ”First-principles calculation of transport coefficients”, Phys. Rev. Lett.
935
+ 81, 5161 (1998).
936
+ 13 G.-J. Guo, Y. Zhang, ”Equilibrium molecular dynamics calculation of the bulk viscosity of
937
+ liquid water”, Mol. Phys. 99, 283 (2001).
938
+ 14 S. Viscardy, J. Servantie and P. Gaspard, ”Transport and Helfand moments in the Lennard-
939
+ Jones fluid. I. Shear viscosity”, J. Chem. Phys. 126, 184512 (2007).
940
+ 15 R. E. Jones, K. K. Mandadapu, ”Adaptive Green-Kubo estimates of transport coefficients from
941
+ molecular dynamics based on robust error analysis”, J. Chem. Phys. 136, 154102 (2012).
942
+ 16 S. V. Lishchuk, ”Role of three-body interactions in formation of bulk viscosity in liquid argon”,
943
+ preprint (2012): arXiv:1204.1235 [cond-mat.soft]
944
+ 17 C. Kim, O. Borodin, G. Emkarniadakis, ”Quantification of sampling uncertainty for molecular
945
+ dynamics simulation: Time-dependent diffusion coefficient in simple fluids”, J. Comput. Phys.
946
+ 302, 485 (2015).
947
+ 18 E. M. Kirova, G. E. Norman, ”Viscosity calculations at molecular dynamics simulations”, Jour-
948
+ nal of Physics: Conference Series 653, 012106 (2015).
949
+ 19 Y. Shi, H. Scheiber, R. Z. Khaliullin, ”Contribution of the Covalent Component of the Hydrogen-
950
+ Bond Network to the Properties of Liquid Water”, J. Phys. Chem. A 122, 7482 (2018).
951
+ 20 F. Z. Chen, N. A. Mauro, S. M. Bertrand, P. McGrath, L. Zimmer, K. F. Kelton, ”Breakdown of
952
+ the Stokes-Einstein relationship and rapid structural ordering in CuZrAl metallic glass-forming
953
+ liquids”, J. Chem. Phys. 155, 104501 (2021).
954
+ 21 R. Rabani, M. H. Saidi, L. Joly, S. Merabia, A. Rajabpour, ”Enhanced local viscosity around
955
+
956
+ colloidal nanoparticles probed by equilibrium molecular dynamics simulations”, J. Chem. Phys.
957
+ 155, 174701 (2021).
958
+ 22 H. Kusudo, T. Omori, Y. Yamaguchi, ”Local stress tensor calculation by the method-of-plane
959
+ in microscopic systems with macroscopic flow: A formulation based on the velocity distribution
960
+ function”, J. Chem. Phys. 155, 184103 (2021).
961
+ 23 A. Torres, L. S. Pedroza, M. Fernandez-Serra, A. R. Rocha, ”Using Neural Network Force
962
+ Fields to Ascertain the Quality of Ab Initio Simulations of Liquid Water”, J. Phys. Chem. B
963
+ 125, 10772 (2021).
964
+ 24 R. Vogelsang, C. Hoheisel, G. Ciccotti, ”Thermal conductivity of the Lennard-Jones liquid by
965
+ molecular dynamics calculations”, J. Chem. Phys. 86, 6371 (1987).
966
+ 25 Z. Fan, L. F. C. Pereira, H.-Q. Wang, J.-C. Zheng, D. Donadio, A. Harju, ”Force and heat
967
+ current formulas for many-body potentials in molecular dynamics simulations with applications
968
+ to thermal conductivity calculations”, Phys. Rev. B 92, 094301 (2015).
969
+ 26 J. Kang, L.-W. Wang, ”First-principles Green-Kubo method for thermal conductivity calcula-
970
+ tions”, Phys. Rev. B 96, 020302(R) (2017).
971
+ 27 G. A. Fernandez, J. Vrabec, and H. Hasse, ”A molecular simulation study of shear and bulk
972
+ viscosity and thermal conductivity of simple real fluids”, Fluid Phase Equilibria 221, 157 (2004).
973
+ 28 M. S. Green, ”Markoff Random Processes and the Statistical Mechanics of Time-Dependent
974
+ Phenomena. II. Irreversible Processes in Fluids”, J. Chem. Phys. 22, 398 (1954).
975
+ 29 R. Kubo, M. Yokota, and S. Nakajima, ”Statistical-Mechanical Theory of Irreversible Processes.
976
+ II. Response to Thermal Disturbance”, J. Phys. Soc. Jpn 12, 1203 (1957).
977
+ 30 D. McQuarrie, ”Electronic Transport in Mesoscopic Systems”, (University Science Books,
978
+ Sausalito, 2000).
979
+ 31 R. Hafner, G. Guevara-Carrion, J. Vrabec, P. Klein, ”Sampling the Bulk Viscosity of Water
980
+ with Molecular Dynamics Simulation in the Canonical Ensemble”, J. Phys. Chem. B 126, 10172
981
+ (2022).
982
+ 32 A. Yahya, L. Tan, S. Perticaroli, E. Mamontov, D. Pajerowski, J. Neuefeind, G. Ehlersd, J. D.
983
+ Nickels, ”Molecular origins of bulk viscosity in liquid water”, Phys. Chem. Chem. Phys. 22,
984
+ 9494 (2020).
985
+ 33 A. S. Dukhin and P. J. Goetz, ”Bulk viscosity and compressibility measurement using acoustic
986
+ spectroscopy”, J. Chem. Phys. 130, 124519 (2009).
987
+
988
+ 34 R. Car and M. Parrinello, ”Unified Approach for MD and DFT”, Phys. Rev. Lett. 55, 2471
989
+ (1985). We have used the code CPMD: http://www.cpmd.org/, Copyright IBM Corp 1990-2022,
990
+ Copyright MPI f¨ur Festk¨orperforschung Stuttgart 1997-2001.
991
+ 35 N. Troullier and J. L. Martins, ”Efficient pseudopotentials for plane-wave calculations”, Phys.
992
+ Rev. B 43, 1993 (1991).
993
+ 36 A. D. Becke, ”Density-functional exchange energy approximation with correct asymptotic be-
994
+ havior”, Phys. Rev. A 38, 3098 (1988); C. Lee, W. Yang, and R. C. Parr, ”Development of the
995
+ Colle-Salvetti correlation energy formula into a functional of the electron density”, Phys. Rev.
996
+ B 37, 785 (1988).
997
+ 37 S. Grimme, ”Semiempirical GGA-type density functional constructed with a long-range disper-
998
+ sion correction”, J. Comp. Chem. 27, 1787 (2006).
999
+ 38 M. Sprik, J. Hutter, and M. Parrinello, ”Ab initio MD simulation of liquid water: Comparison
1000
+ of 3 gradient-corrected density functionals”, J. Chem. Phys. 105, 1142 (1996).
1001
+ 39 P. L. Silvestrelli and M. Parrinello, ”Water Molecule Dipole in the Gas and in the Liquid Phase”,
1002
+ Phys. Rev. Lett. 82, 3308 (1999).
1003
+ 40 P. L. Silvestrelli and M. Parrinello, ”Structural, electronic, and bonding properties of liquid
1004
+ water from first principles”, J. Chem. Phys. 111, 3572 (1999).
1005
+ 41 M. Boero, K. Terakura, T. Ikeshoji, C. C. Liew, and M. Parrinello, ”Hydrogen Bonding and
1006
+ Dipole Moment of Water at Supercritical Conditions: A First-Principles Molecular Dynamics
1007
+ Study”, Phys. Rev. Lett. 85, 3245 (2000).
1008
+ 42 M. Boero, K. Terakura, T. Ikeshoji, C. C. Liew, and M. Parrinello, ”Water at Supercritical
1009
+ Conditions: A First-Principles Study”, J. Chem. Phys. 115, 2219 (2001).
1010
+ 43 P. L. Silvestrelli, ”Van der Waals Interactions in DFT Made Easy by Wannier Functions”, Phys.
1011
+ Rev. Lett. 100, 053002 (2008).
1012
+ 44 P. L. Silvestrelli, K. Benyahia, S. Grubisic, F. Ancilotto, and F. Toigo, ”Van der Waals interac-
1013
+ tions at surfaces by density functional theory using Wannier functions”, J. Chem. Phys. 130,
1014
+ 074702 (2009).
1015
+ 45 P. L. Silvestrelli, ”Van der Waals Interactions in DFT using Wannier Functions”, J. Phys. Chem.
1016
+ A 113, 5224 (2009).
1017
+ 46 F. O. Kannemann and A. D. Becke, ”Van der Waals Interactions in Density-Functional Theory:
1018
+ Rare-Gas Diatomics”, J. Chem. Theory Comput. 5, 719 (2009).
1019
+
1020
+ 47 J. Schmidt, J. VandeVondele, I.-F. W. Kuo, D. Sebastiani, J. I. Siepmann, J. Hutter, C.
1021
+ J. Mundy, ”Isobaric-Isothermal Molecular Dynamics Simulations Utilizing Density Functional
1022
+ Theory: An Assessment of the Structure and Density of Water at Near-Ambient Conditions”,
1023
+ J. Phys. Chem. B 113, 11959 (2009).
1024
+ 48 J. Wang, G. Rom´an-P´erez, J. M. Soler, E. Artacho, M.-V. Fern´andez-Serra, ”Density, structure,
1025
+ and dynamics of water: The effect of van der Waals interactions”, J. Chem. Phys. 134, 024516
1026
+ (2011).
1027
+ 49 S. Yoo and S. S. Xantheas, ”Communication: The effect of dispersion corrections on the melting
1028
+ temperature of liquid water”, J. Chem. Phys. 134, 121105 (2011).
1029
+ 50 J. P. Perdew, K. Burke, and M. Ernzerhof, ”Generalized Gradient approximation made simple”,
1030
+ Phys. Rev. Lett. 77, 3865 (1996).
1031
+ 51 D. Frenkel and R. Smit, Understanding Molecular Simulation (Academic Press, San Diego,
1032
+ 1996).
1033
+ 52 C. Herrero, M. Pauletti, G. Tocci, M. Iannuzzi, L. Joly, ”Connection between water’s dynamical
1034
+ and structural properties: Insights from ab initio simulations”, PNAS 119, e2121641119 (2022).
1035
+ 53 T. A. Litovitz, E. H. Carnevale, ”Effect of Pressure on Sound Propagation in Water”, J. Appl.
1036
+ Phys. 26, 816 (1955).
1037
+ 54 J. V. Sengers, J. T. R. Watson, ”Improved International Formulations for the Viscosity and
1038
+ Thermal Conductivity of Water Substance”, J. Phys. Chem. Ref. Data 15, 1291 (1986).
1039
+ 55 K. R. Harris and L. A. Woolf, ”Temperature and Volume Dependence of the Viscosity of Water
1040
+ and Heavy Water at Low Temperatures”, J. Chem. Eng. Data 49, 1064 (2004).
1041
+ 56 M. J. Holmes, N. G. Parker, M. J. W. Povey, ”Temperature dependence of bulk viscosity in
1042
+ water using acoustic spectroscopy”, J. Phys.: Conf. Ser. 269, 012011 (2011).
1043
+ 57 J. A. Cowan, R. N. Ball, ”Temperature dependence of bulk viscosity in liquid argon”, Can. J.
1044
+ Phys. 50, 1881 (1972).
1045
+ 58 P. J. Linstrom, G. W. Mallard eds., NIST Chemistry WebBook, NIST Standard Reference
1046
+ Database Number 69
1047
+ 59 J. Sun, A. Ruzsinszky, J. P. Perdew, ”Strongly constrained and appropriately normed semilocal
1048
+ density functional”, Phys. Rev. Lett. 115, 036402 (2015).
1049
+ 60 J. Klimeˆs, D. R. Bowler, A. Michaelides, ”Chemical accuracy for the van der Waals density
1050
+ functional”, J. Phys. Condens.Matter 22, 022201 (2010); ”Van der Waals density functionals
1051
+
1052
+ applied to solids”, Phys. Rev. B 83, 195131 (2011).
1053
+ 61 B. A. Younglove and H. J. M. Hanley, ”The Viscosity and Thermal Conductivity Coefficients of
1054
+ Gaseous and Liquid Argon”, Journal of Physical and Chemical Reference Data 15, 1323 (1986).
1055
+ 62 C. Hoheisel, R. Vogelsang and M. Schoen, ”Bulk viscosity of the Lennard-Jones fluid for a wide
1056
+ range of states computed by equilibrium molecular dynamics”, J. Chem. Phys. 87, 7195 (1987).
1057
+ 63 C. M. Davis, J. Jarzynski, “Water: A Comprehensive Treatise, Vol. 1, edited by F. Franks
1058
+ (1972) p. 443.
1059
+ 64 O. Reynolds, ”IV. On the theory of lubrication and its application to Mr. Beauchamp tower’s
1060
+ experiments, including an experimental determination of the viscosity of olive oil”, Phil. Trans.
1061
+ R. Soc. Lond. 177, 157 (1886).
1062
+ 65 See, for instance, values tabulated in https://dtrx.de/od/diff/
1063
+ 66 T. A. Litovitz and C. M. Davis, ”Physical Acoustics”, Vol. 2, edited by W. P. Mason, New
1064
+ York: Academic, Chap. 5.
1065
+ 67 J. Xu, X. Ren, W. Gong, R. Dai, D. Liu, ”Measurement of the bulk viscosity of liquid by
1066
+ Brillouin scattering”, Appl. Opt. 42, 6704 (2003).
1067
+ 68 L. B. Skinner, C. Huang, D. Schlesinger, L. G. M. Pettersson, A. Nilsson, C. J. Benmore,
1068
+ ”Benchmark oxygen-oxygen pair-distribution function of ambient water from x-ray diffraction
1069
+ measurements with a wide Q-range”, J. Chem. Phys. 138, 074506 (2013).
1070
+ 69 L. B. Skinner, C. J. Benmore, J. C. Neuefeind, J. B. Parise, ”The structure of water around
1071
+ the compressibility minimum”, J. Chem. Phys. 141, 214507 (2014).
1072
+ 70 J. Daru, H. Forbert, J. Behler, D. Marx, ”Coupled Cluster Molecular Dynamics of Condensed
1073
+ Phase Systems Enabled by Machine Learning Potentials: Liquid Water Benchmark”, Phys.
1074
+ Rev. Lett. 129, 226001 (2022).
1075
+ 71 J.-P. Hansen, I. R. McDonald, ”Theory of simple liquids”, Academic Press (1990).
1076
+ 72 E. H. Hardy, A. Zygar, M. D. Zeidler, M. Holz, F. D. Sacher, ”Isotope effect on the translational
1077
+ and rotational motion in liquid water and ammonia”, J. Chem. Phys. 114, 3174 (2001).
1078
+ 73 J. L. Yarnell, M. J. Katz, and R. G. Wenzel, ”Structure Factor and Radial Distribution Function
1079
+ for Liquid Argon at 85 K”, Phys. Rev. A 7, 2130 (1973).
1080
+
C9FQT4oBgHgl3EQfPDYj/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
CNA0T4oBgHgl3EQfAP_i/content/tmp_files/2301.01961v1.pdf.txt ADDED
@@ -0,0 +1,1182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.01961v1 [math.AG] 5 Jan 2023
2
+ SOME MORE FANO THREEFOLDS WITH A MULTIPLICATIVE
3
+ CHOW–K ¨UNNETH DECOMPOSITION
4
+ ROBERT LATERVEER
5
+ ABSTRACT. We exhibit several families of Fano threefolds with a multiplicative Chow–K¨unneth
6
+ decomposition, in the sense of Shen–Vial. As a consequence, a certain tautological subring of the
7
+ Chow ring of powers of these threefolds injects into cohomology. As a by-product of the argu-
8
+ ment, we observe that double covers of projective spaces admit a multiplicative Chow–K¨unneth
9
+ decomposition.
10
+ 1. INTRODUCTION
11
+ Given a smooth projective variety Y over C, let Ai(Y ) := CHi(Y )Q denote the Chow groups
12
+ of Y (i.e. the groups of codimension i algebraic cycles on Y with Q-coefficients, modulo rational
13
+ equivalence). The intersection product defines a ring structure on A∗(Y ) = �
14
+ i Ai(Y ), the Chow
15
+ ring of Y [14].
16
+ In the special case of K3 surfaces, this ring structure has remarkable properties:
17
+ Theorem 1.1 (Beauville–Voisin [3]). Let S be a projective K3 surface. The Q-subalgebra
18
+
19
+ A1(S), cj(S)
20
+
21
+ ⊂ A∗(S)
22
+ injects into cohomology under the cycle class map.
23
+ Theorem 1.2 (Voisin [54], Yin [57]). Let S be a projective K3 surface, and m ∈ N. The Q-
24
+ subalgebra
25
+ R∗(Sm) :=
26
+
27
+ A1(S), ∆S
28
+
29
+ ⊂ A∗(Sm)
30
+ (generated by pullbacks of divisors and pullbacks of the diagonal ∆S ⊂ S × S) injects into
31
+ cohomology under the cycle class map for all m ≤ 2 dim H2
32
+ tr(S, Q)+1 (where H2
33
+ tr(S, Q) denotes
34
+ the transcendental part of cohomology). Moreover, R∗(Sm) injects into cohomology for all
35
+ m ∈ N if and only if S is Kimura finite-dimensional.
36
+ The Chow ring of abelian varieties also has an interesting property: there is a multiplicative
37
+ splitting, defined by the Fourier transform [1].
38
+ Motivated by the particular behaviour of K3 surfaces and abelian varieties, Beauville [2] has
39
+ conjectured that for certain special varieties, the Chow ring should admit a multiplicative split-
40
+ ting. In the wake of Beauville’s “splitting property conjecture”, Shen–Vial [47] have introduced
41
+ the concept of multiplicative Chow–K¨unneth decomposition (we will abbreviate this to “MCK
42
+ Key words and phrases. Algebraic cycles, Chow group, motive, Beauville’s “splitting property” conjecture, mul-
43
+ tiplicative Chow–K¨unneth decomposition, Fano threefolds, tautological ring.
44
+ 2020 Mathematics Subject Classification: 14C15, 14C25, 14C30.
45
+ Supported by ANR grant ANR-20-CE40-0023.
46
+ 1
47
+
48
+ 2
49
+ ROBERT LATERVEER
50
+ decomposition”). With the concept of MCK decomposition, it is possible to make concrete sense
51
+ of this elusive “splitting property conjecture” of Beauville.
52
+ It is hard to understand precisely which varieties admit an MCK decomposition. To give an
53
+ idea of what is known: hyperelliptic curves have an MCK decomposition [47, Example 8.16],
54
+ but the very general curve of genus ≥ 3 does not have an MCK decomposition [12, Example
55
+ 2.3]; K3 surfaces have an MCK decomposition, but certain high degree surfaces in P3 do not
56
+ have an MCK decomposition (cf. the examples given in [43], cf. also section 2 below).
57
+ In this note, we will focus on Fano threefolds and ask the following question:
58
+ Question 1.3. Let X be a Fano threefold with Picard number 1. Does X admit an MCK decom-
59
+ position ?
60
+ The restriction on the Picard number is necessary to rule out a counterexample of Beauville
61
+ [2, Examples 9.1.5]. The answer to Question 1.3 is affirmative for cubic threefolds [8], [12], for
62
+ intersections of 2 quadrics [32], for intersections of a quadric and a cubic [34], and for prime
63
+ Fano threefolds of genus 8 [37] and of genus 10 [38].
64
+ The main result of this paper answers Question 1.3 for several more families of Fano three-
65
+ folds:
66
+ Theorem (=Theorem 4.1). The following smooth Fano threefolds have a multiplicative Chow–
67
+ K¨unneth decomposition:
68
+ • hypersurfaces of weighted degree 6 in weighted projective space P(13, 2, 3);
69
+ • quartic double solids;
70
+ • sextic double solids;
71
+ • double covers of a quadric in P4 branched along the intersection with a quartic;
72
+ • special Gushel–Mukai threefolds.
73
+ In Table 1 (at the end of this paper), we have listed all Fano threefolds of Picard number 1 and
74
+ what is known about MCK for them.
75
+ To prove Theorem 4.1, we provide a general criterion (Proposition 3.3), that may be useful in
76
+ other situations. For example, using this criterion we also prove the following:
77
+ Proposition (=Proposition 3.6). Let X be a smooth projective variety such that X → Pn is a
78
+ double cover ramified along a smooth divisor D ⊂ Pn of degree d > n. Then X admits an MCK
79
+ decomposition.
80
+ As a consequence of Theorem 4.1, we obtain an injectivity result similar to Theorem 1.2:
81
+ Corollary (cf. Theorem 5.1). Let Y be a Fano threefold as in Theorem 4.1, and m ∈ N. Let
82
+ R∗(Y m) :=
83
+
84
+ h, ∆Y
85
+
86
+
87
+ A∗(Y m)
88
+ be the Q-subalgebra generated by pullbacks of the polarization h ∈ A1(Y ) and pullbacks of the
89
+ diagonal ∆Y ∈ A3(Y × Y ). The cycle class map induces injections
90
+ R∗(Y m) ֒→ H∗(Y m, Q) for all m ��� N .
91
+
92
+ SOME MORE FANO THREEFOLDS WITH AN MCK DECOMPOSITION
93
+ 3
94
+ Conventions. In this paper, the word variety will refer to a reduced irreducible scheme of finite
95
+ type over C. A subvariety is a (possibly reducible) reduced subscheme which is equidimensional.
96
+ All Chow groups will be with rational coefficients: we will denote by Aj(Y ) the Chow group
97
+ of j-dimensional cycles on Y with Q-coefficients; for Y smooth of dimension n the notations
98
+ Aj(Y ) and An−j(Y ) are used interchangeably. The notation Aj
99
+ hom(Y ) will be used to indicate
100
+ the subgroup of homologically trivial cycles. For a morphism f : X → Y , we will write Γf ∈
101
+ A∗(X × Y ) for the graph of f.
102
+ The contravariant category of Chow motives (i.e., pure motives with respect to rational equiv-
103
+ alence as in [46], [41]) will be denoted Mrat.
104
+ 2. MCK DECOMPOSITION
105
+ Definition 2.1 (Murre [40]). Let X be a smooth projective variety of dimension n. We say that
106
+ X has a CK decomposition if there exists a decomposition of the diagonal
107
+ ∆X = π0
108
+ X + π1
109
+ X + · · · + π2n
110
+ X
111
+ in An(X × X) ,
112
+ such that the πi
113
+ X are mutually orthogonal idempotents and (πi
114
+ X)∗H∗(X, Q) = Hi(X, Q).
115
+ (NB: “CK decomposition” is shorthand for “Chow–K¨unneth decomposition”.)
116
+ Remark 2.2. Murre has conjectured that any smooth projective variety should have a CK de-
117
+ composition [40], [20].
118
+ Definition 2.3 (Shen–Vial [47]). Let X be a smooth projective variety of dimension n, and let
119
+ ∆sm
120
+ X ∈ A2n(X × X × X) denote the class of the small diagonal
121
+ ∆sm
122
+ X :=
123
+
124
+ (x, x, x) | x ∈ X
125
+
126
+ ⊂ X × X × X .
127
+ An MCK decomposition is defined as a CK decomposition {πi
128
+ X} of X that is multiplicative, i.e.
129
+ it satisfies
130
+ πk
131
+ X ◦ ∆sm
132
+ X ◦ (πi
133
+ X × πj
134
+ X) = 0 in A2n(X × X × X) for all i + j ̸= k .
135
+ (NB: “MCK decomposition” is shorthand for “multiplicative Chow–K¨unneth decomposition”.)
136
+ Remark 2.4. The small diagonal (when considered as a correspondence from X × X to X)
137
+ induces the multiplication morphism
138
+ ∆sm
139
+ X :
140
+ h(X) ⊗ h(X) → h(X) in Mrat .
141
+ Let us assume X has a CK decomposition
142
+ h(X) =
143
+ 2n
144
+
145
+ i=0
146
+ hi(X) in Mrat .
147
+ By definition, this decomposition is multiplicative if for any i, j the composition
148
+ hi(X) ⊗ hj(X) → h(X) ⊗ h(X)
149
+ ∆sm
150
+ X
151
+ −−→ h(X) in Mrat
152
+ factors through hi+j(X).
153
+
154
+ 4
155
+ ROBERT LATERVEER
156
+ If X has an MCK decomposition, then setting
157
+ Ai
158
+ (j)(X) := (π2i−j
159
+ X
160
+ )∗Ai(X) ,
161
+ one obtains a bigraded ring structure on the Chow ring: that is, the intersection product sends
162
+ Ai
163
+ (j)(X) ⊗ Ai′
164
+ (j′)(X) to Ai+i′
165
+ (j+j′)(X).
166
+ It is conjectured that for any X with an MCK decomposition, one has
167
+ Ai
168
+ (j)(X)
169
+ ??= 0 for j < 0 ,
170
+ Ai
171
+ (0)(X) ∩ Ai
172
+ hom(X)
173
+ ??= 0 ;
174
+ this is related to Murre’s conjectures B and D, that have been formulated for any CK decompo-
175
+ sition [40].
176
+ For more background on the concept of MCK, and for examples of varieties with an MCK
177
+ decomposition, we refer to [47, Section 8], as well as [53], [48], [13], [28], [39], [29], [30], [31],
178
+ [12], [33], [34], [36], [42].
179
+ 3. A GENERAL CRITERION
180
+ We develop a general criterion for having an MCK. The criterion hinges on the Franchetta
181
+ property for families of varieties, which is defined as follows:
182
+ Definition 3.1. Let X → B be a smooth projective morphism, where X , B are smooth quasi-
183
+ projective varieties, and let us write Xb for the fiber over b ∈ B. We say that X → B has the
184
+ Franchetta property in codimension j if the following holds: for every Γ ∈ Aj(X ) such that the
185
+ restriction Γ|Xb is homologically trivial for the very general b ∈ B, the restriction Γ|b is zero in
186
+ Aj(Xb) for all b ∈ B.
187
+ We say that X → B has the Franchetta property if X → B has the Franchetta property in
188
+ codimension j for all j.
189
+ This property is studied in [45], [5], [10], [11].
190
+ Definition 3.2. Given a family X → B as in Definition 3.1, we use the shorthand
191
+ GDAj
192
+ B(Xb) := Im
193
+
194
+ Aj(X ) → Aj(Xb)
195
+
196
+ ⊂ Aj(Xb)
197
+ (GDA∗() stands for the “generically defined cycles”).
198
+ The Franchetta property for X → B means that the generically defined cycles inject into
199
+ cohomology.
200
+ Proposition 3.3. Let X → B be a family of smooth projective varieties of relative dimension n,
201
+ with fiber Xb. Assume the following:
202
+ (i) the family X ×B X → B has the Franchetta property;
203
+ (ii) there exists a projective quotient variety P (i.e. P = P ′/G where P ′ is smooth projective
204
+ and G ⊂ Aut(P ′) is a finite cyclic group) with trivial Chow groups (i.e. A∗
205
+ hom(P) = 0), such
206
+ that Xb → P is a double cover with branch locus a smooth ample divisor, for all b ∈ B.
207
+ Then Xb admits an MCK decomposition, for all b ∈ B.
208
+ Proof. We have the following Lefschetz-type result in cohomology:
209
+
210
+ SOME MORE FANO THREEFOLDS WITH AN MCK DECOMPOSITION
211
+ 5
212
+ Lemma 3.4. Let Xb → P be as in the proposition. Then pullback
213
+ Hi(P, Q) → Hi(Xb, Q)
214
+ is an isomorphism for i < n, and injective for i = n.
215
+ Proof. In case P is smooth, this is a result of Cornalba [6]. The general case is readily deduced
216
+ from this: assume P = P ′/G where P ′ is smooth projective and G ⊂ Aut(P ′) is a finite cyclic
217
+ group, and consider the fiber square
218
+ X′
219
+ b
220
+
221
+ Xb
222
+
223
+
224
+ P ′
225
+
226
+ P .
227
+ Cornalba’s result applies to the double cover of the left-hand vertical arrow, and so pullback
228
+ Hi(P ′, Q) → Hi(X′
229
+ b, Q)
230
+ is an isomorphism for i < n, and injective for i = n. The G-action on P ′ lifts to X′
231
+ b, and taking
232
+ G-invariants we find that
233
+ Hi(P, Q) = Hi(P ′, Q)G → Hi(X′
234
+ b, Q)G = Hi(Xb, Q)
235
+ is an isomorphism for i < n, and injective for i = n.
236
+
237
+ Since H∗(P, Q) is algebraic (this is a general fact for any variety with trivial Chow groups, cf.
238
+ [23]), this implies that also Hi(Xb, Q) is algebraic, for all i ̸= n. More precisely, for i ̸= n odd,
239
+ one has Hi(Xb, Q) = 0 while for i < n even, one has isomorphisms
240
+ Ai/2(P) ∼= Hi(Xb, Q) ,
241
+ induced by pullback. This implies that for i < n the K¨unneth components πi
242
+ Xb are algebraic, and
243
+ generically defined. To define the K¨unneth components πi
244
+ Xb explicitly, let p: Xb → P denote
245
+ the projection morphism, and let πi
246
+ P denote the (unique) CK decomposition of P. One can then
247
+ define
248
+ πi
249
+ Xb := 1/2 tΓp ◦ πi
250
+ P ◦ Γp if i < n ,
251
+ πi
252
+ Xb := π2n−i
253
+ Xb
254
+ if i > n ,
255
+ πn,fix
256
+ Xb
257
+ := 1/2 tΓp ◦ πn
258
+ P ◦ Γp ,
259
+ πn,var
260
+ Xb
261
+ := ∆Xb −
262
+
263
+ j̸=n
264
+ πj
265
+ Xb − πn,fix
266
+ Xb
267
+ ,
268
+ πn
269
+ Xb := πn,fix
270
+ Xb
271
+ + πn,var
272
+ Xb
273
+ ∈ An(Xb × Xb) .
274
+ (Note that πn
275
+ Xb = 0 in case n is odd.) The notation is meant to remind the reader that πn,fix
276
+ Xb
277
+ and
278
+ πn,var
279
+ Xb
280
+ are projectors on the fixed part resp. the variable part of cohomology in degree n.
281
+
282
+ 6
283
+ ROBERT LATERVEER
284
+ These projectors define a generically defined CK decomposition for each Xb, i.e. all projectors
285
+ are in GDAn
286
+ B(Xb × Xb). This CK decomposition has the property that
287
+ hj(Xb) := (Xb, πj
288
+ Xb, 0) = ⊕1(∗) ∀j ̸= n ,
289
+ hn,fix(Xb) := (Xb, πn,fix
290
+ Xb
291
+ , 0) = ⊕1(∗) in Mrat .
292
+ (1)
293
+ Let us now proceed to verify that this CK decomposition is MCK. What we need to check is
294
+ the vanishing
295
+ πk
296
+ Xb ◦ ∆sm
297
+ Xb ◦ (πi
298
+ Xb × πj
299
+ Xb) = 0 in A2n(Xb × Xb × Xb) for all i + j ̸= k .
300
+ First, let us assume that at least one of the 3 integers (i, j, k) is different from n, and i+j ̸= k.
301
+ In this case, we have that
302
+ πk
303
+ Xb ◦ ∆sm
304
+ Xb ◦ (πi
305
+ Xb × πj
306
+ Xb) = (tπi
307
+ Xb × tπj
308
+ Xb × πk
309
+ Xb)∗∆sm
310
+ Xb
311
+ = (π2n−i
312
+ Xb
313
+ × π2n−j
314
+ Xb
315
+ × πk
316
+ Xb)∗∆sm
317
+ Xb
318
+ ֒→
319
+
320
+ A∗(Xb × Xb) .
321
+ Here the first equality is an application of Lieberman’s lemma [41, Lemma 2.1.3], and the in-
322
+ clusion follows from property (1). The resulting cycle in � A∗(Xb × Xb) is generically defined
323
+ (since the π∗
324
+ Xb and ∆sm
325
+ Xb are) and homologically trivial (since i + j ̸= k). By assumption (i), the
326
+ resulting cycle in � A∗(Xb × Xb) is rationally trivial, and so
327
+ πk
328
+ Xb ◦ ∆sm
329
+ Xb ◦ (πi
330
+ Xb × πj
331
+ Xb) = 0 in A2n(Xb × Xb × Xb) ,
332
+ as desired.
333
+ It remains to treat the case i = j = k = n. The decomposition πn
334
+ Xb := πn,fix
335
+ Xb
336
+ + πn,var
337
+ Xb
338
+ induces
339
+ a decomposition
340
+ πn
341
+ Xb ◦ ∆sm
342
+ Xb ◦ (πn
343
+ Xb × πn
344
+ Xb) =πn,fix
345
+ Xb
346
+ ◦ ∆sm
347
+ Xb ◦ (πn,fix
348
+ Xb
349
+ × πn,fix
350
+ Xb
351
+ )
352
+ + πn,fix
353
+ Xb
354
+ ◦ ∆sm
355
+ Xb ◦ (πn,fix
356
+ Xb
357
+ × πn,var
358
+ Xb
359
+ )
360
+ + · · · · · ·
361
+ + πn,var
362
+ Xb
363
+ ◦ ∆sm
364
+ Xb ◦ (πn,var
365
+ Xb
366
+ × πn,var
367
+ Xb
368
+ ) in A2n(Xb × Xb × Xb) .
369
+ Using property (1) and the Franchetta property for Xb × Xb, all summands containing πn,fix
370
+ Xb
371
+ vanish. One is left with the last term. To deal with the last term, we observe that the covering
372
+ involution ι ∈ Aut(Xb) of the double cover p: Xb → P induces a splitting of the motive
373
+ h(Xb) =h(Xb)+ ⊕ h(Xb)−
374
+ :=(Xb, 1/2 (∆Xb + Γι), 0) ⊕ (Xb, 1/2 (∆Xb − Γι), 0) in Mrat ,
375
+ where Γι denotes the graph of the involution ι. Moreover, there is equality
376
+ hn,var(Xb) = h(Xb)− in Mrat .
377
+ But the intersection product map
378
+ h(Xb)− ⊗ h(Xb)−
379
+ ∆sm
380
+ Xb
381
+ −−→ h(Xb)
382
+
383
+ SOME MORE FANO THREEFOLDS WITH AN MCK DECOMPOSITION
384
+ 7
385
+ factors over h(Xb)+, as is readily seen (cf. Lemma 3.5 below), which is saying exactly that
386
+ πn,var
387
+ Xb
388
+ ◦ ∆sm
389
+ Xb ◦ (πn,var
390
+ Xb
391
+ × πn,var
392
+ Xb
393
+ ) = 0 in A2n(Xb × Xb × Xb) .
394
+ This closes the proof, modulo the following lemma (which is probably well-known, but we
395
+ include a proof for completeness):
396
+ Lemma 3.5. Let X → P be a double cover, where X and P are quotient varieties, and let
397
+ ι ∈ Aut(X) be the covering involution. Let
398
+ h(X)+ := (X, 1/2 (∆X + Γι, 0) ,
399
+ h(X)− := (X, 1/2 (∆X − Γι), 0) in Mrat .
400
+ The map of motives
401
+ h(X)− ⊗ h(X)−
402
+ ∆sm
403
+ X
404
+ −−→ h(X)
405
+ factors over h(X)+.
406
+ To prove the lemma, let ι ∈ Aut(X) denote the covering involution. The motive h(X)− is
407
+ defined by the projector
408
+ ∆−
409
+ X := 1/2 (∆X − Γι)
410
+ ∈ An(X × X) .
411
+ Plugging this in and developing, it follows that
412
+ ∆−
413
+ X ◦ ∆sm
414
+ X ◦ (∆−
415
+ X × ∆−
416
+ X) = 1/8 (∆X − Γι) ◦ ∆sm
417
+ X ◦ (∆X×X − ∆X × Γι − Γι × ∆X + Γι × Γι)
418
+ = 1/8
419
+
420
+ ∆X ◦ ∆sm
421
+ X ◦ (∆X × ∆X) + · · · − Γι ◦ ∆sm
422
+ X ◦ (Γι × Γι)
423
+
424
+ = 1/8
425
+
426
+ ∆sm
427
+ X
428
+ − (id × id ×ι)∗(���sm
429
+ X ) − (id ×ι × id)∗(∆sm
430
+ X ) − (ι × id × id)∗(∆sm
431
+ X )
432
+ + (id ×ι × ι)∗(∆sm
433
+ X ) + (ι × id ×ι)∗(∆sm
434
+ X ) + (ι × ι × id)∗(∆sm
435
+ X )
436
+ − (ι × ι × ι)∗(∆sm
437
+ X )
438
+
439
+ in A2n(X × X × X) .
440
+ Here the last equality is by virtue of Lieberman’s lemma [41, Lemma 2.1.3]. However, we have
441
+ equality
442
+ ∆sm
443
+ X = {(x, x, x) | x ∈ X} = (ι × ι × ι)∗(∆sm
444
+ X ) in A2n(X × X × X) ,
445
+ and so the sum of the first and last summand vanish. Likewise, we have equality
446
+ (id ×ι×ι)∗(∆sm
447
+ X ) = (id ×ι×ι)∗(ι×ι×ι)∗(∆sm
448
+ X ) = (ι×id × id)∗(∆sm
449
+ X ) in A2n(X ×X ×X) ,
450
+ and so the other summands cancel each other pairwise. This proves the lemma.
451
+
452
+ As a first application of our general criterion, we now proceed to show the following:
453
+ Proposition 3.6. Let X be a smooth projective variety such that X → Pn is a double cover
454
+ ramified along a smooth divisor D ⊂ Pn, and assume either dim Hn(X, Q) > 1, or D has
455
+ degree d > n. Then X admits an MCK decomposition.
456
+
457
+ 8
458
+ ROBERT LATERVEER
459
+ Proof. Double covers X as in the proposition are exactly the smooth hypersurfaces of degree 2d
460
+ in the weighted projective space P := P(1n+1, d), where 2d := deg D. Let
461
+ B ⊂ ¯B := PH0(P, OP(2d))
462
+ denote the Zariski open parametrizing smooth hypersurfaces, and let
463
+ B × P ⊃ X → B
464
+ denote the universal family. In view of Proposition 3.3, it suffices to check that the family
465
+ X ×B X → B has the Franchetta property.
466
+ To this end, we remark that the line bundle OP(2d) is very ample (cf. Lemma 3.7 below),
467
+ which means that the set-up verifies condition (∗2) of [12, Definition 2.5]. An application of the
468
+ stratified projective bundle argument [12, Proposition 2.6] then implies that
469
+ (2)
470
+ GDA∗
471
+ B(Xb × Xb) =
472
+
473
+ (pi)∗(h), ∆Xb
474
+
475
+ ,
476
+ where we write h ∈ A1(Xb) for the hyperplane class. The excess intersection formula [14,
477
+ Theorem 6.3] gives an equality
478
+ ∆Xb · (pi)∗(h) = 2d
479
+
480
+ j
481
+ (p1)∗(hj) · (p2)∗(hn+1−j) in An+1(Xb × Xb) ,
482
+ and so equality (2) reduces to the equality
483
+ GDA∗
484
+ B(Xb × Xb) =
485
+
486
+ (p1)∗(h), (p2)∗(h)
487
+
488
+ ⊕ Q[∆Xb] .
489
+ The “decomposable part” ⟨(p1)∗(h), (p2)∗(h)⟩ injects into cohomology, because of the K¨unneth
490
+ formula for H∗(Xb × Xb, Q). The class of the diagonal in cohomology is linearly independent
491
+ from the decomposable part: indeed, if the diagonal were decomposable it would act as zero on
492
+ the primitive cohomology
493
+ Hn
494
+ prim(Xb, Q) := Coker
495
+
496
+ Hn(Pn, Q) → Hn(Xb, Q)
497
+
498
+ .
499
+ But the assumption dim Hn(Xb, Q) > 1 is equivalent to having Hn
500
+ prim(Xb, Q) ̸= 0. This proves
501
+ the Franchetta property for X ×B X → B, and closes the proof.
502
+ The case d > n is a special case where Hn
503
+ prim(Xb, Q) ̸= 0, because it is known that the
504
+ geometric genus of Xb is
505
+ pg(Xb) =
506
+ �d − 1
507
+ n
508
+
509
+ [9, Section 3.5.4].
510
+ It remains to prove the following, which we have used above:
511
+ Lemma 3.7. Let P := P(1n+1, d). The sheaf OP(d) is locally free and very ample.
512
+ The assertion about the sheaf being locally free is just because d is a multiple of the weights
513
+ of P (cf. [7, Remarques 1.8]). As for the very ampleness, we apply Delorme’s criterion [7,
514
+ Proposition 2.3(iii)] (cf. also [4, Theorem 4.B.7]). To prove very ampleness of OP(d), we need
515
+ to prove that the integer E as defined in [7] and [4] is equal to 0.
516
+ Let us write x0, . . . , xn, y for the weighted homogeneous coefficients of P, where xj and y
517
+ have weight 1 resp. d. It is readily seen that every monomial in xj, y of (weighted) degree
518
+
519
+ SOME MORE FANO THREEFOLDS WITH AN MCK DECOMPOSITION
520
+ 9
521
+ m + dk (where m is a positive multiple of d, and k is any positive integer) is divisible by a
522
+ monomial of (weighted) degree dk. This means that the integer E defined in loc. cit. is 0, and so
523
+ [7, Proposition 2.3(iii)] implies the very ampleness of OP(d).
524
+ This proves the lemma, and ends the proof of the proposition.
525
+
526
+ Here is another sample application of our general criterion:
527
+ Proposition 3.8. Let X ⊂ P(1n, 2, 3) be a smooth hypersurface of (weighted) degree 6. Assume
528
+ dim Hn(X, Q) > 1. Then X has an MCK decomposition.
529
+ Proof. The varieties X as in the proposition are exactly the smooth double covers of P :=
530
+ P(1n, 2) branched along a (weighted) degree 6 divisor (cf. [26, Remark 2.3] and for n = 3
531
+ also [17, Theorem 4.2]). Let X → B denote the family of such double covers. We are going to
532
+ check that the family X ×B X → B has the Franchetta property. Proposition 3.8 is then a special
533
+ case of our general criterion Proposition 3.3.
534
+ Let ¯
535
+ X → ¯B ∼= Pr denote the universal family of all (possibly singular) hypersurfaces of
536
+ weighted degree 6 in P. The line bundle OP(6) is very ample (cf. Lemma 3.9 below), and so the
537
+ projection
538
+ ¯
539
+ X × ¯B ¯
540
+ X → P × P
541
+ has the structure of a stratified projective bundle (with strata the diagonal ∆P and its comple-
542
+ ment). One can thus use the stratified projective bundle argument [12, Proposition 2.6] to deduce
543
+ the identity
544
+ GDA∗
545
+ B(X × X) =
546
+
547
+ (pi)∗GDA∗
548
+ B(X), ∆X
549
+
550
+ =
551
+
552
+ (pi)∗(h), ∆X
553
+
554
+ (here, h ∈ A1(X) denotes the restriction to X of an ample generator of A1(P) ∼= Q).
555
+ Since X ⊂ P is a hypersurface, the excess intersection formula gives
556
+ ∆X · (pi)∗(h) = ∆P|X
557
+
558
+
559
+ (pi)∗(h)
560
+
561
+ .
562
+ The above identification thus simplifies to
563
+ GDA∗
564
+ B(X × X) =
565
+
566
+ (pi)∗(h)
567
+
568
+ ⊕ Q[∆X] .
569
+ The assumption that dim Hn(X, Q) > 1 implies that the diagonal ∆X is linearly independent
570
+ in cohomology from the decomposable classes
571
+
572
+ (pi)∗(h)
573
+
574
+ (indeed, the decomposable classes act
575
+ as zero on the primitive cohomology of X, while the diagonal acts as the identity). This shows
576
+ that GDA∗
577
+ B(X × X) injects into cohomology, as requested.
578
+ Lemma 3.9. Let P := P(1n, 2, 3). The sheaf OP(6) is (locally free and) very ample.
579
+ The assertion about the sheaf being locally free is just because 6 is a multiple of all the weights
580
+ (cf. [7, Remarques 1.8]). As for the very ampleness, we apply Delorme’s criterion [7, Proposition
581
+ 2.3(iii)] (cf. also [4, Theorem 4.B.7]). To prove very ampleness of OP(6), we need to prove that
582
+ the integer E defined in [7] and [4] is equal to 0.
583
+ Let us write x1, . . . , y, z for the weighted homogeneous coefficients of P, where y and z have
584
+ weight 2 resp. 3. We need to check that every monomial in xj, y, z of (weighted) degree 6 + 6k
585
+ is divisible by a monomial of (weighted) degree 6k (if this is the case, then E = 0 and [7,
586
+
587
+ 10
588
+ ROBERT LATERVEER
589
+ Proposition 2.3(iii)] implies the very ampleness of OP(6)). In case the monomial contains z2, it
590
+ is divisible by z2 and so the condition is satisfied. Assume now the monomial contains only one
591
+ z. In case the monomial contains y3 it is divisible by y3. Next, if the monomial contains y (or
592
+ y2) it is divisible by zyxj (for some j) and so the condition is satisfied. A monomial in z and xj
593
+ obviously satisfies the condition. Finally, monomials in xj satisfy the condition.
594
+ This proves the lemma, and ends the proof of the proposition.
595
+
596
+ 4. MAIN RESULT
597
+ Theorem 4.1. The following Fano threefolds admit an MCK decomposition:
598
+ (i) hypersurfaces of weighted degree 6 in weighted projective space P(13, 2, 3);
599
+ (ii) quartic double solids;
600
+ (iii) sextic double solids;
601
+ (iv) double covers of a quadric in P4 branched along the intersection with a quartic;
602
+ (v) special Gushel–Mukai threefolds.
603
+ Proof. The cases (ii) and (iii) are immediate applications of Proposition 3.6. The case (i) is a
604
+ special case of Proposition 3.8.
605
+ Before proving case (iv), let us first state a preparatory lemma:
606
+ Lemma 4.2. Let Z ⊂ P := P(15, 2) be a smooth weighted hypersurface of degree 2. Then
607
+ ∆Z = 1
608
+ 2
609
+ 4
610
+
611
+ j=0
612
+ hj × h4−j
613
+ in A4(Z × Z) .
614
+ Proof. Z is a quotient of a non-singular quadric in P5 and so Z has trivial Chow groups (i.e.
615
+ A∗
616
+ hom(Z) = 0). Using [9, 4.4.2], one can compute the Betti numbers of Z and one finds that
617
+ they are the same as those of projective space P4. This means that there is a cohomological
618
+ decomposition of the diagonal
619
+ ∆Z = 1
620
+ 2
621
+ 4
622
+
623
+ j=0
624
+ hj × h4−j
625
+ in H8(Z × Z, Q) .
626
+ Since Z (and hence also Z × Z) has trivial Chow groups, the same decomposition holds modulo
627
+ rational equivalence, proving the lemma.
628
+
629
+ Now, to prove case (iv) of Theorem 4.1, we apply our general criterion Proposition 3.3. Let
630
+ P := P(15, 2), and let Y → B be the universal family of smooth dimensionally transverse
631
+ complete intersections of OP(2) ⊕ OP(4), where the base B is a Zariski open
632
+ B ⊂ ¯B := PH0(P, OP(2) ⊕ OP(4)) .
633
+ It follows from Lemma 3.7 that OP(2) and OP(4) are very ample line bundles on P, and so
634
+ ¯Y × ¯B ¯Y → P × P is a stratified projective bundle with strata ∆P and its complement. The usual
635
+ stratified projective bundle argument [12, Proposition 2.6] applies, and we find that
636
+ GDA∗
637
+ B(Y × Y ) =
638
+
639
+ (pi)∗GDA∗
640
+ B(Y ), ∆Y
641
+
642
+ =
643
+
644
+ (pi)∗(h), ∆Y
645
+
646
+
647
+ SOME MORE FANO THREEFOLDS WITH AN MCK DECOMPOSITION
648
+ 11
649
+ (here, h ∈ A1(Y ) denotes the restriction to Y of an ample generator of A1(P) ∼= Q). Let
650
+ Y = Z ∩ Z′, where Z and Z′ ⊂ P are hypersurfaces of (weighted) degree 2 and 4. Up to
651
+ shrinking B, we may assume the hypersurface Z is smooth. Since Y ⊂ Z is a divisor, the excess
652
+ intersection formula gives
653
+ ∆Y · (pi)∗(h) = ∆Z|Y
654
+ in A4(Y × Y ) .
655
+ Using Lemma 4.2, it follows that
656
+ ∆Y · (pi)∗(h) ∈
657
+
658
+ (pi)∗(h)
659
+
660
+ .
661
+ The above identification thus simplifies to
662
+ GDA∗
663
+ B(Y × Y ) =
664
+
665
+ (pi)∗(h)
666
+
667
+ ⊕ Q[∆Y ] .
668
+ As before, the fact that the diagonal ∆Y is linearly independent from the decomposable corre-
669
+ spondences in cohomology now shows that
670
+ GDA∗
671
+ B(Y × Y ) → H∗(Y × Y, Q)
672
+ is injective, and so Y verifies the hypotheses of Proposition 3.3.
673
+ The argument for case (v) is similar to that of (iv). First, in view of the spread argument
674
+ [55, Lemma 3.2], it suffices to establish an MCK decomposition for the generic special Gushel–
675
+ Mukai threefold Y . Thus we may assume that there exists P ⊂ Gr(2, 5), a smooth complete
676
+ intersection of Pl¨ucker hyperplanes, and a double cover p: Y → P branched along a smooth
677
+ Gushel–Mukai surface. We now consider the family Y → B of all double covers of P branched
678
+ along smooth Gushel–Mukai surfaces (so B ⊂ ¯B is a Zariski open in the projectivized space of
679
+ quadratic sections of the cone over P), and we apply our general criterion Proposition 3.3 to this
680
+ family.
681
+ Lemma 4.3. Let Y → B be the family of double covers of P branched along smooth Gushel–
682
+ Mukai surfaces. The family Y → B has the Franchetta property.
683
+ Proof. We consider the family ¯Y → ¯B with the projection to the cone C over P. This is a
684
+ projective bundle, and so for any fiber Y = Yb with b ∈ B we have
685
+ GDA∗
686
+ B(Y ) = Im
687
+
688
+ A∗(C) → A∗(Y )
689
+
690
+ .
691
+ The condition b ∈ B means exactly that Y avoids the summit of the cone C, and so (writing
692
+ C◦ ⊂ C for the complement of the summit of the cone) we have
693
+ (3)
694
+ GDA∗
695
+ B(Y ) = Im
696
+
697
+ A∗(C◦) → A∗(Y )
698
+
699
+ .
700
+ But C◦ → P is an affine bundle, and
701
+ A∗(P) = Im
702
+
703
+ A∗(Gr(2, 5)) → A∗(P)
704
+
705
+ =
706
+
707
+ h
708
+
709
+ ,
710
+ where h denotes the restriction to P of a Pl¨ucker hyperplane (this follows from [35, Theorem
711
+ 3.17], or alternatively from the fact that the derived category of P has a full exceptional collection
712
+ of length 4 [44]). Thus, (3) reduces to
713
+ GDA∗
714
+ B(Y ) =
715
+
716
+ h
717
+
718
+ .
719
+ This proves the Franchetta property for Y .
720
+
721
+
722
+ 12
723
+ ROBERT LATERVEER
724
+ Lemma 4.4. Let Y → B be as in Lemma 4.3. The family Y ×B Y → B has the Franchetta
725
+ property.
726
+ Proof. Let us consider the family ¯Y × ¯B ¯Y → ¯B with the projection to C × C. This is a stratified
727
+ projective bundle, with strata ∆C and its complement. Thus, the stratified projective bundle
728
+ argument [12, Proposition 2.6] implies that
729
+ GDA∗
730
+ B(Y × Y ) =
731
+
732
+ Im
733
+
734
+ A∗(C◦ × C◦) → A∗(Y × Y )
735
+
736
+ , ∆Y
737
+
738
+ .
739
+ Since A∗(C◦) = Im
740
+
741
+ A∗(Gr(2, 5)) → A∗(C◦), we find that
742
+ GDA∗
743
+ B(Y × Y ) =
744
+
745
+ Im
746
+
747
+ A∗(Gr(2, 5) × Gr(2, 5)) → A∗(Y × Y )
748
+
749
+ , ∆Y
750
+
751
+ .
752
+ But A∗(Gr(2, 5) × Gr(2, 5)) = A∗(Gr(2, 5)) ⊗ A∗(Gr(2, 5)) since the Grassmannian has trivial
753
+ Chow groups, and so
754
+ GDA∗
755
+ B(Y × Y ) =
756
+
757
+ GDB(Y ), ∆Y
758
+
759
+ =
760
+
761
+ h, ∆Y
762
+
763
+ (where the last equality follows from Lemma 4.3).
764
+ To finish the proof of the lemma, we now claim that for any (ordinary or special) Gushel–
765
+ Mukai threefold Y we have
766
+ (4)
767
+ ∆Y · h ∈
768
+
769
+ Im
770
+
771
+ A∗(Gr(2, 5)) → A∗(Y )
772
+ ��
773
+ .
774
+ Combined with Lemma 4.3, this means that for a special Gushel–Mukai threefold Y (and Y → B
775
+ as above) there is equality
776
+ GDA∗
777
+ B(Y × Y ) =
778
+
779
+ h
780
+
781
+ ⊕ Q[∆Y ] .
782
+ Then, since the diagonal is linearly independent in cohomology of
783
+
784
+ h
785
+
786
+ (since h1,2(Y ) ̸= 0), this
787
+ proves the lemma.
788
+ It remains to prove the claim (4). Using the spread argument [55, Lemma 3.2], it suffices to
789
+ prove equality (4) for the very general Gushel–Mukai threefold. Thus, we may assume that Y is
790
+ ordinary, and moreover that
791
+ Y = Y ′ ∩ Q ,
792
+ where Q is a quadric and Y ′ = Gr(2, 5) ∩ H1 ∩ H2 is a smooth fourfold (where H1, H2 are
793
+ Pl¨ucker hyperplanes) and Y ′ is such that
794
+ A∗(Y ′) = Im
795
+
796
+ A∗(Gr(2, 5)) → A∗(Y ′)
797
+
798
+ .
799
+ (Indeed, the smooth fourfold Y ′ has trivial Chow groups [35, Corollary 4.6], and the very general
800
+ Y ′ has no primitive cohomology, as follows from [35, Lemma 3.15]). The excess intersection
801
+ formula then implies that
802
+ ∆Y · h = 1
803
+ 2 ∆Y ′|Y ×Y ,
804
+ and the claim (4) follows.
805
+
806
+ Lemma 4.4 being proven, all conditions of Proposition 3.3 are met with, and so fibers Y of the
807
+ family Y → B have an MCK decomposition; this settles (v).
808
+
809
+
810
+ SOME MORE FANO THREEFOLDS WITH AN MCK DECOMPOSITION
811
+ 13
812
+ 5. THE TAUTOLOGICAL RING
813
+ Theorem 5.1. Let Y be a Fano threefold of Picard number 1. Assume that Y has an MCK
814
+ decomposition, and Y is member of a family Y → B such that Y ×B Y → B has the Franchetta
815
+ property. For m ∈ N, let
816
+ R∗(Y m) :=
817
+
818
+ (pi)∗(h), (pij)∗(∆Y )
819
+
820
+
821
+ A∗(Y m)
822
+ be the Q-subalgebra generated by pullbacks of the polarization h ∈ A1(Y ) and pullbacks of the
823
+ diagonal ∆Y ∈ A3(Y × Y ). (Here pi and pij denote the various projections from Y m to Y resp.
824
+ to Y × Y ). The cycle class map induces injections
825
+ R∗(Y m) ֒→ H∗(Y m, Q) for all m ∈ N .
826
+ Proof. This is inspired by the analogous result for cubic hypersurfaces [11, Section 2.3]. In its
827
+ turn, the result of [11] was inspired by analogous results for hyperelliptic curves [49], [50] (cf.
828
+ Remark 5.2 below) and for K3 surfaces [54], [57].
829
+ Let d denote the degree of Y , and let 2b := dim H3(Y, Q). As in [11, Section 2.3], let us write
830
+ o := 1
831
+ dh3 ∈ A3(Y ) (the “distinguished zero-cycle”) and
832
+ τ := ∆Y − 1
833
+ d
834
+ 3
835
+
836
+ j=0
837
+ hj × h3−j
838
+ ∈ A3(Y × Y )
839
+ (this cycle τ is nothing but the projector on the motive h3(Y ) considered above). Moreover, let
840
+ us write
841
+ hi := (pi)∗(h) ∈ A1(Y m) ,
842
+ oi := (pi)∗(o) ∈ A3(Y m) ,
843
+ τi,j := (pij)∗(τ) ∈ A3(Y m) .
844
+ We define the Q-subalgebra
845
+ ¯R∗(Y m) := ⟨oi, hi, τi,j⟩
846
+ ⊂ H∗(Y m, Q)
847
+ (where i ranges over 1 ≤ i ≤ m, and 1 ≤ i < j ≤ m). One can prove (just as [11, Lemma 2.11]
848
+ and [57, Lemma 2.3]) that the Q-algebra ¯R∗(Y m) is isomorphic to the free graded Q-algebra
849
+ generated by oi, hi, τij, modulo the following relations:
850
+ (5)
851
+ oi · oi = 0,
852
+ hi · oi = 0,
853
+ h3
854
+ i = d oi ;
855
+ (6)
856
+ τi,j · oi = 0,
857
+ τi,j · hi = 0,
858
+ τi,j · τi,j = 2b oi · oj ;
859
+ (7)
860
+ τi,j · τi,k = τj,k · oi ;
861
+ (8)
862
+
863
+ σ∈S2b+2
864
+ b+1
865
+
866
+ i=1
867
+ τσ(2i−1),σ(2i) = 0 .
868
+ To prove Theorem 5.1, we need to check that these relations are also verified modulo ratio-
869
+ nal equivalence. The relations (5) take place in R∗(Y ) and so they follow from the Franchetta
870
+
871
+ 14
872
+ ROBERT LATERVEER
873
+ property for Y . The relations (6) take place in R∗(Y 2). The first and the last relations are triv-
874
+ ially verified, because Y being Fano one has A6(Y 2) = Q. As for the second relation of (6),
875
+ this follows from the Franchetta property for Y × Y . (Alternatively, it is possible to deduce the
876
+ second relation from the MCK decomposition: indeed, the product τ · hi lies in A4
877
+ (0)(Y 2), and it
878
+ is readily checked that A4
879
+ (0)(Y 2) injects into H8(Y 2, Q).)
880
+ Relation (7) takes place in R∗(Y 3) and follows from the MCK relation. Indeed, we have
881
+ ∆sm
882
+ Y
883
+ ◦ (π3
884
+ Y × π3
885
+ Y ) = π6
886
+ Y ◦ ∆sm
887
+ Y
888
+ ◦ (π3
889
+ Y × π3
890
+ Y ) in A6(Y 3) ,
891
+ which (using Lieberman’s lemma) translates into
892
+ (π3
893
+ Y × π3
894
+ Y × ∆Y )∗∆sm
895
+ Y
896
+ = (π3
897
+ Y × π3
898
+ Y × π6
899
+ Y )∗∆sm
900
+ Y
901
+ in A6(Y 3) ,
902
+ which means that
903
+ τ1,3 · τ2,3 = τ1,2 · o3 in A6(Y 3) .
904
+ It is left to consider relation (8), which takes place in R∗(Y 2b+2). To check that this relation is
905
+ also verified modulo rational equivalence, we observe that relation (8) involves a cycle contained
906
+ in
907
+ A∗�
908
+ Sym2b+2(h3(Y )
909
+
910
+ .
911
+ But we have vanishing of the Chow motive
912
+ Sym2b+2 h3(Y ) = 0 in Mrat ,
913
+ because dim H3(Y, Q) = 2b and h3(Y ) is oddly finite-dimensional in the sense of Kimura [22]
914
+ (all Fano threefolds are known to have Kimura finite-dimensional motive [51, Theorem 4]). This
915
+ establishes relation (8), modulo rational equivalence, and ends the proof.
916
+
917
+ Remark 5.2. Given a curve C and an integer m ∈ N, one can define the tautological ring
918
+ R∗(Cm) :=
919
+
920
+ (pi)∗(KC), (pij)∗(∆C)
921
+
922
+ ⊂ A∗(Cm)
923
+ (where pi, pij denote the various projections from Cm to C resp. C × C). Tavakol has proven
924
+ [50, Corollary 6.4] that if C is a hyperelliptic curve, the cycle class map induces injections
925
+ R∗(Cm) ֒→ H∗(Cm, Q) for all m ∈ N .
926
+ On the other hand, there are many (non hyperelliptic) curves for which the tautological ring
927
+ R∗(C3) does not inject into cohomology (this is related to the non-vanishing of the Ceresa cycle,
928
+ cf. [50, Remark 4.2] and also [12, Example 2.3 and Remark 2.4]).
929
+
930
+ SOME MORE FANO THREEFOLDS WITH AN MCK DECOMPOSITION
931
+ 15
932
+ 6. A TABLE
933
+ Table 1 below lists all Fano threefolds with Picard number 1 (the classification of Fano three-
934
+ folds is contained in [18]). The last column indicates the existence of an MCK decomposition.
935
+ Note that a Fano threefold X with h1,2(X) = 0 has trivial Chow groups (i.e. A∗
936
+ hom(X) = 0), and
937
+ so these Fano threefolds have an MCK decomposition for trivial reasons. The asterisks indicate
938
+ new cases settled in this paper. Question marks indicate cases I am not able to settle.
939
+ Label
940
+ Index
941
+ Degree
942
+ h1,2
943
+ Description
944
+ MCK
945
+ 4
946
+ 4
947
+ 1
948
+ 0
949
+ P3
950
+ trivial
951
+ 3
952
+ 3
953
+ 2
954
+ 0
955
+ X2 ⊂ P4
956
+ trivial
957
+ 2.1
958
+ 2
959
+ 1
960
+ 21
961
+ X6 ⊂ P(13, 2, 3)
962
+
963
+ 2.2
964
+ 2
965
+ 2
966
+ 10
967
+ X4 ⊂ P(14, 2)
968
+
969
+ 2.3
970
+ 2
971
+ 3
972
+ 5
973
+ X3 ⊂ P4
974
+ [8], [12]
975
+ 2.4
976
+ 2
977
+ 4
978
+ 2
979
+ X(2,2) ⊂ P5
980
+ [32]
981
+ 2.5
982
+ 2
983
+ 5
984
+ 0
985
+ Gr(2, 5) ∩ L ⊂ P9
986
+ trivial
987
+ 1.2
988
+ 1
989
+ 2
990
+ 52
991
+ X6 ⊂ P(14, 3)
992
+
993
+ 1.4.a
994
+ 1
995
+ 4
996
+ 30
997
+ X4 ⊂ P4
998
+ ?
999
+ 1.4.b
1000
+ 1
1001
+ 4
1002
+ 30
1003
+ X
1004
+ 2:1
1005
+ −→ Q with quartic branch locus
1006
+
1007
+ 1.6
1008
+ 1
1009
+ 6
1010
+ 20
1011
+ X(2,3) ⊂ P5
1012
+ [34]
1013
+ 1.8
1014
+ 1
1015
+ 8
1016
+ 14
1017
+ X(2,2,2) ⊂ P6
1018
+ ?
1019
+ 1.10.a
1020
+ 1
1021
+ 10
1022
+ 10
1023
+ ordinary Gushel–Mukai 3fold
1024
+ ?
1025
+ 1.10.b
1026
+ 1
1027
+ 10
1028
+ 10
1029
+ special Gushel–Mukai 3fold
1030
+
1031
+ 1.12
1032
+ 1
1033
+ 12
1034
+ 7
1035
+ OGr+(5, 10) ∩ L ⊂ P15
1036
+ ?
1037
+ 1.14
1038
+ 1
1039
+ 14
1040
+ 5
1041
+ Gr(2, 6) ∩ L ⊂ P14
1042
+ [37]
1043
+ 1.16
1044
+ 1
1045
+ 16
1046
+ 3
1047
+ LGr(3, 6) ∩ L ⊂ P13
1048
+ ?
1049
+ 1.18
1050
+ 1
1051
+ 18
1052
+ 2
1053
+ G2/P ∩ L ⊂ P13
1054
+ [38]
1055
+ 1.22
1056
+ 1
1057
+ 22
1058
+ 0
1059
+ V (s) ⊂ Gr(3, 7)
1060
+ trivial
1061
+ TABLE 1. All Fano threefolds with Picard number 1. Here, X(d1,...,dr) denotes
1062
+ a complete intersection of multidegree (d1, . . . , dr), Q is a quadric, and L ⊂ Pr
1063
+ is a linear subspace of the appropriate dimension. The notations LGr(3, 6) and
1064
+ OGr+(5, 10) indicate the Lagrangian Grassmannian, resp. a connected compo-
1065
+ nent of the orthogonal Grassmannian. In 1.22, V (s) denotes the zero locus of a
1066
+ section of some vector bundle.
1067
+ Acknowledgements. Thanks to Mr. Kai Laterveer of the Lego University of Schiltigheim who
1068
+ provided inspiration for this work.
1069
+
1070
+ 16
1071
+ ROBERT LATERVEER
1072
+ REFERENCES
1073
+ [1] A. Beauville, Sur l’anneau de Chow d’une vari´et´e ab´elienne. Math. Ann. 273 (1986), 647—651,
1074
+ [2] A. Beauville, On the splitting of the Bloch–Beilinson filtration, in: Algebraic cycles and motives (J. Nagel
1075
+ and C. Peters, editors), London Math. Soc. Lecture Notes 344, Cambridge University Press 2007,
1076
+ [3] A. Beauville and C. Voisin, On the Chow ring of a K3 surface, J. Alg. Geom. 13 (2004), 417—426,
1077
+ [4] M. Beltrametti and L. Robbiano, Introduction to the theory of weighted projective spaces, Exposition.
1078
+ Math. 4 (1986), no. 2, 111—162,
1079
+ [5] N. Bergeron and Z. Li, Tautological classes on moduli space of hyperk¨ahler manifolds, Duke Math. J.,
1080
+ arXiv:1703.04733,
1081
+ [6] M. Cornalba, Una osservazione sulla topologia dei rivestimenti ciclici di variet`a algebriche, Bolletino
1082
+ U.M.I. (5) 18-A (1981), 323—328,
1083
+ [7] C. Delorme, Espaces projectifs anisotropes, Bull. Soc. Math. France 103 (1975), 203—223,
1084
+ [8] H. Diaz, The Chow ring of a cubic hypersurface, International Math. Research Notices 2021 no. 22
1085
+ (2021), 17071—17090,
1086
+ [9] I. Dolgachev, Weighted projective varieties, in: Group Actions and Vector Fields. Springer Berlin Hei-
1087
+ delberg, 1982, pp. 34—71,
1088
+ [10] L. Fu, R. Laterveer and Ch. Vial, The generalized Franchetta conjecture for some hyper-K¨ahler varieties
1089
+ (with an appendix joint with M. Shen), Journal Math. Pures et Appliqu´ees (9) 130 (2019), 1—35,
1090
+ [11] L. Fu, R. Laterveer and Ch. Vial, The generalized Franchetta conjecture for some hyper-K¨ahler varieties,
1091
+ II, Journal de l’Ecole Polytechnique–Math´ematiques 8 (2021), 1065—1097,
1092
+ [12] L. Fu, R. Laterveer and Ch. Vial, Multiplicative Chow–K¨unneth decompositions and varieties of coho-
1093
+ mological K3 type, Annali Mat. Pura ed Applicata 200 no. 5 (2021), 2085—2126,
1094
+ [13] L. Fu, Z. Tian and Ch. Vial, Motivic hyperk¨ahler resolution conjecture: I. Generalized Kummer varieties,
1095
+ Geometry & Topology 23-1 (2019), 427—492,
1096
+ [14] W. Fulton, Intersection theory, Springer–Verlag Ergebnisse der Mathematik, Berlin Heidelberg New York
1097
+ Tokyo 1984,
1098
+ [15] M. Green and P. Griffiths, An interesting 0-cycle, Duke Math. J. 119 no. 2 (2003), 261—313,
1099
+ [16] A. Iliev and L. Manivel, Prime Fano threefolds and integrable systems, Math. Ann. 339 no. 4 (2007),
1100
+ 937—955,
1101
+ [17] V. Iskovskikh, Fano 3-folds. I, Izv. Akad. Nauk SSSR Ser. Mat. Tom. 41 no. 3 (1977), 485—527,
1102
+ [18] V. Iskovskih and Yu. Prokhorov, Algebraic Geometry V: Fano varieties, Encyclopaedia of Math. Sciences
1103
+ 47, Springer-Verlag, Berlin 1999,
1104
+ [19] U. Jannsen, Motivic sheaves and filtrations on Chow groups, in: Motives (U. Jannsen et alii, eds.), Pro-
1105
+ ceedings of Symposia in Pure Mathematics Vol. 55 (1994), Part 1,
1106
+ [20] U. Jannsen, On finite-dimensional motives and Murre’s conjecture, in: Algebraic cycles and motives (J.
1107
+ Nagel and C. Peters, editors), Cambridge University Press, Cambridge 2007,
1108
+ [21] S.-I. Kimura, On the characterization of Alexander schemes, Comp. Math. 92 no. 3 (1994), 273—284,
1109
+ [22] S.-I. Kimura, Chow groups are finite dimensional, in some sense, Math. Ann. 331 no. 1 (2005), 173—201,
1110
+ [23] S.-I. Kimura, Surjectivity of the cycle map for Chow motives, in: Motives and algebraic cycles, Fields
1111
+ Inst. Commun., vol. 56, Amer. Math. Soc., Providence, RI, 2009, pp. 157—165,
1112
+ [24] A. Kuznetsov, Hyperplane sections and derived categories, Izv. Math. 70 (2006), 447—547,
1113
+ [25] A. Kuznetsov, Derived categories of Fano threefolds, Proc. V. A. Steklov Inst. Math 264 (2009), 110—
1114
+ 122,
1115
+ [26] A. Kuznetsov and Y. Prokhorov, On higher-dimensional del Pezzo varieties, arXiv:2206.01549,
1116
+ [27] R. Laterveer, A family of cubic fourfolds with finite-dimensional motive, Journal of the Mathematical
1117
+ Society of Japan 70 no. 4 (2018), 1453—1473,
1118
+ [28] R. Laterveer, A remark on the Chow ring of K¨uchle fourfolds of type d3, Bulletin Australian Math. Soc.
1119
+ 100 no. 3 (2019), 410—418,
1120
+ [29] R. Laterveer, Algebraic cycles and Verra fourfolds, Tohoku Math. J. 72 no. 3 (2020), 451—485,
1121
+
1122
+ SOME MORE FANO THREEFOLDS WITH AN MCK DECOMPOSITION
1123
+ 17
1124
+ [30] R. Laterveer, On the Chow ring of certain Fano fourfolds, Ann. Univ. Paedagog. Crac. Stud. Math. 19
1125
+ (2020), 39—52,
1126
+ [31] R. Laterveer, On the Chow ring of Fano varieties of type S2, Abh. Math. Semin. Univ. Hambg. 90 (2020),
1127
+ 17—28,
1128
+ [32] R. Laterveer, Algebraic cycles and intersections of 2 quadrics, Mediterranean Journal of Mathematics 18
1129
+ no. 4 (2021),
1130
+ [33] R. Laterveer, Algebraic cycles and intersections of three quadrics, Mathematical Proceedings of the Cam-
1131
+ bridge Philosophical Society 173 no. 2 (2022), 349—367,
1132
+ [34] R. Laterveer, Algebraic cycles and intersections of a quadric and a cubic, Forum Mathematicum 33 no. 3
1133
+ (2021), 845—855,
1134
+ [35] R. Laterveer, Motives and the Pfaffian–Grassmannian equivalence, Journal of the London Math. Soc. 104
1135
+ no. 4 (2021), 1738—1764,
1136
+ [36] R. Laterveer, On the Chow ring of Fano varieties on the Fatighenti-Mongardi list, Communications in
1137
+ Algebra 50 no. 1 (2022), 131—145,
1138
+ [37] R. Laterveer, Algebraic cycles and Fano threefolds of genus 8, Portugal. Math. (N.S.) Vol. 78, Fasc. 3-4
1139
+ (2021), 255—280,
1140
+ [38] R. Laterveer, Algebraic cycles and Fano threefolds of genus 10, preprint,
1141
+ [39] R. Laterveer and Ch. Vial, On the Chow ring of Cynk–Hulek Calabi–Yau varieties and Schreieder vari-
1142
+ eties, Canadian Journal of Math. 72 no. 2 (2020), 505—536,
1143
+ [40] J. Murre, On a conjectural filtration on the Chow groups of an algebraic variety, parts I and II, Indag.
1144
+ Math. 4 (1993), 177—201,
1145
+ [41] J. Murre, J. Nagel and C. Peters, Lectures on the theory of pure motives, Amer. Math. Soc. University
1146
+ Lecture Series 61, Providence 2013,
1147
+ [42] A. Negut, G. Oberdieck and Q. Yin, Motivic decompositions for the Hilbert scheme of points of a K3
1148
+ surface, arXiv:1912.09320v1,
1149
+ [43] K. O’Grady, Decomposable cycles and Noether-Lefschetz loci, Doc. Math. 21 (2016), 661—687,
1150
+ [44] D. Orlov, An Exceptional Collection of Vector Bundles on the Threefold V5, Vestn. Mosk. Univ., Ser. 1:
1151
+ Mat., Mekh., No. 5, 69–71 (1991) [Moscow Univ. Math. Bull. 46 (5), 48–50 (1991)],
1152
+ [45] N. Pavic, J. Shen and Q. Yin, On O’Grady’s generalized Franchetta conjecture, Int. Math. Res. Notices
1153
+ (2016), 1—13,
1154
+ [46] T. Scholl, Classical motives, in: Motives (U. Jannsen et alii, eds.), Proceedings of Symposia in Pure
1155
+ Mathematics Vol. 55 (1994), Part 1,
1156
+ [47] M. Shen and Ch. Vial, The Fourier transform for certain hyperK¨ahler fourfolds, Memoirs of the AMS
1157
+ 240 (2016), no. 1139,
1158
+ [48] M. Shen and Ch. Vial, The motive of the Hilbert cube X[3], Forum Math. Sigma 4 (2016), 55 pp.,
1159
+ [49] M. Tavakol, The tautological ring of the moduli space M rt
1160
+ 2 , International Math. Research Notices 2014
1161
+ no. 24 (2014), 6661—6683,
1162
+ [50] M. Tavakol, Tautological classes on the moduli space of hyperelliptic curves with rational tails, J. Pure
1163
+ Applied Algebra 222 no. 8 (2018), 2040—2062,
1164
+ [51] Ch. Vial, Projectors on the intermediate algebraic Jacobians, New York J. Math. 19 (2013), 793—822,
1165
+ [52] Ch. Vial, Niveau and coniveau filtrations on cohomology groups and Chow groups, Proceedings of the
1166
+ LMS 106 no. 2 (2013), 410—444,
1167
+ [53] Ch. Vial, On the motive of some hyperk¨ahler varieties, J. Reine Angew. Math. 725 (2017), 235—247,
1168
+ [54] C. Voisin, On the Chow ring of certain algebraic hyperk¨ahler manifolds, Pure Appl. Math. Q. 4 no. 3 part
1169
+ 2 (2008), 613—649,
1170
+ [55] C. Voisin, Chow Rings, Decomposition of the Diagonal, and the Topology of Families, Princeton Univer-
1171
+ sity Press, Princeton and Oxford, 2014,
1172
+ [56] Q. Yin, The generic nontriviality of the Faber–Pandharipande cycle, International Math. Res. Notices
1173
+ 2015 no. 5 (2015), 1263—1277,
1174
+
1175
+ 18
1176
+ ROBERT LATERVEER
1177
+ [57] Q. Yin, Finite-dimensionality and cycles on powers of K3 surfaces, Comment. Math. Helv. 90 (2015),
1178
+ 503–511.
1179
+ INSTITUT DE RECHERCHE MATH´EMATIQUE AVANC´EE, CNRS – UNIVERSIT´E DE STRASBOURG, 7 RUE
1180
+ REN´E DESCARTES, 67084 STRASBOURG CEDEX, FRANCE.
1181
+ Email address: [email protected]
1182
+
CNA0T4oBgHgl3EQfAP_i/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
CNAzT4oBgHgl3EQfTvwq/content/2301.01253v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc2afdd2090143536748eeb9b7768b7a4db132711abb4f702191ae6b42a0882c
3
+ size 8730594
CdE4T4oBgHgl3EQfFgzX/content/tmp_files/2301.04887v1.pdf.txt ADDED
@@ -0,0 +1,1466 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Learning Partial Differential Equations by Spectral Approximates of General
2
+ Sobolev Spaces
3
+ Juan Esteban Suarez Cardona 1 Michael Hecht 1
4
+ Abstract
5
+ We introduce a novel spectral, finite-dimensional approximation of general Sobolev spaces in terms of Chebyshev
6
+ polynomials. Based on this polynomial surrogate model (PSM), we realise a variational formulation, solving
7
+ a vast class of linear and non-linear partial differential equations (PDEs). The PSMs are as flexible as the
8
+ physics-informed neural nets (PINNs) and provide an alternative for addressing inverse PDE problems, such as
9
+ PDE-parameter inference. In contrast to PINNs, the PSMs result in a convex optimisation problem for a vast
10
+ class of PDEs, including all linear ones, in which case the PSM-approximate is efficiently computable due to the
11
+ exponential convergence rate of the underlying variational gradient descent.
12
+ As a practical consequence prominent PDE problems were resolved by the PSMs without High Performance
13
+ Computing (HPC) on a local machine. This gain in efficiency is complemented by an increase of approximation
14
+ power, outperforming PINN alternatives in both accuracy and runtime.
15
+ Beyond the empirical evidence we give here, the translation of classic PDE theory in terms of the Sobolev
16
+ space approximates suggests the PSMs to be universally applicable to well-posed, regular forward and inverse
17
+ PDE problems.
18
+ 1. Introduction
19
+ Partial differential equations (PDEs) are omnipresent mathematical models governing the dynamics and (physical)
20
+ laws of complex systems (Jost, 2002; Brezis, 2011). However, analytic PDE solutions are rarely known for most of the
21
+ systems being the centre of current research. Therefore, there is a strong demand on efficient and accurate numerical solvers
22
+ and simulations.
23
+ Main classic numerical solvers divide into: Finite Elements (Ern & Guermond, 2004); Finite Differences (LeVeque, 2007);
24
+ Finite Volumes(Eymard et al., 2000); Spectral Methods (Bernardi & Maday, 1997; Canuto et al., 2007) and Particle Methods
25
+ (Li & Liu, 2007).
26
+ Machine learning methods such as: Physics-Informed GAN (Arjovsky et al., 2017), Deep Galerkin Method (Sirignano
27
+ & Spiliopoulos, 2018), and Physics Informed Neural Networks (PINNs) (Raissi et al., 2019), gain big traction in the
28
+ scientific computing community. In contrast to classic solvers, PINNs provide a neural net (NN) surrogate model e.g.,
29
+ ˆu : (−1, 1)m −→ R, m ∈ N, parametrising the solution space of the PDEs and enabling to solve inverse problems like
30
+ inference of PDE parameters or initial condition detection. PINN-learning is given by minimising a variational problem,
31
+ which is typically formulated in L2-loss terms
32
+
33
+
34
+ ��ˆu(x) − u(x)
35
+ ��2dΩ ≈
36
+ 1
37
+ |P|
38
+
39
+ p∈P
40
+ ��ˆu(p) − u(p)
41
+ ��2
42
+ (1)
43
+ being approximated by the mean square error (MSE) in random (data) nodes P, (Yang et al., 2020),(Long et al., 2018).
44
+ The applications of PINNs range from fluid mechanics (Jin et al., 2020) to biology (Lagergren et al., 2020) or medicine
45
+ (Sahli Costabal et al., 2020), physics (Ellis et al., 2021) and beyond.
46
+ 1CASUS - Center for Advanced System Understanding, Helmholtz-Zentrum Dresden-Rossendorf e.V. (HZDR), G¨orlitz, Germany.
47
+ Correspondence to: Juan Esteban Suarez Cardona <[email protected]>, Michael Hecht <[email protected]>.
48
+ This work was partially funded by the Center of Advanced Systems Understanding (CASUS), financed by Germany’s Federal Ministry of
49
+ Education and Research (BMBF) and by the Saxon Ministry for Science, Culture and Tourism (SMWK) with tax funds on the basis of the
50
+ budget approved by the Saxon State Parliament.
51
+ arXiv:2301.04887v1 [math.NA] 12 Jan 2023
52
+
53
+ Learning Partial Differential Equations by Spectral Approximates of General Sobolev Spaces
54
+ 1.1. Related work – Physics Informed Neural Nets (PINNs)
55
+ We identify the essential approaches addressing stability and accuracy of PINNs below.
56
+ 1.1.1. VARIATIONAL PINNS (VPINNS)
57
+ VPINNs were introduced in (Kharazmi et al., 2019; 2020) resting on variational Sobolev losses for PINN-training.
58
+ The approach exploits analytic integration and differentiation formulas of shallow neural networks with specified activation
59
+ functions. The method is extended by using quadrature rules and automatic differentiation for computing the losses and is
60
+ complemented by a domain decomposition approach. The drawback of VPINNs, we identify and demonstrate here, is their
61
+ highly consuming runtime performance, preventing the approach to be applicable for multi-dimensional PDE problems.
62
+ 1.1.2. INVERSE DIRICHLET LOSS BALANCING
63
+ The Inverse Dirichlet method (Maddu et al., 2021) was shown to increase the numerical stability of PINNS by
64
+ dynamically balancing the occurring variational gradient amplitudes, which if unbalanced cause numerical stiffness
65
+ phenomena (Wang et al., 2021). However, the PINN formulation rests on classic MSE losses, limiting the approach to
66
+ consider only strong PDE problem formulations.
67
+ 1.1.3. SOBOLEV CUBATURES PINNS (SC-PINN)
68
+ In our prior work (Cardona & Hecht, 2022) we gave a PINN formulation, by replacing the MSE loss by Sobolev
69
+ Cubatures. In contrast to ID-PINNs approximating Sobolev losses enables the approach to consider PDE problems in the
70
+ weak and strong sense. As a consequence, the automatic differentiation (A.D.) is replaced by polynomial differentiation
71
+ implicitly realised in the Sobolev cubatures. As we demonstrated this results in an increase of accuracy and runtime
72
+ efficiency by several orders of magnitude compared to PINNs relying on A.D.
73
+ 1.2. Related Work - Classic spectral methods
74
+ Spectral methods are well established techniques solving PDEs and ODEs. Hereby, one aims to approximate the PDE
75
+ solution by an expansion u = �
76
+ α∈A cαϕα, A ⊆ Nm with respect to a specific finite dimensional space Π = span{ϕα}α∈A
77
+ generated by a chosen basis, e.g., Fourier basis for periodic PDEs or Jacobi-Chebyshev polynomials for general, non-periodic
78
+ problems. The coefficients of the expansion are constrained by the PDE and its corresponding boundary conditions. For
79
+ example: Consider a (non-linear) differential operator L and the equation
80
+ Lu = f
81
+ in Ω,
82
+ with homogeneous Dirichlet boundary conditions. By sampling the function f = f(pα)α∈A ∈ R|A|, A ⊆ Nm in some
83
+ node set P = {pα}α∈A determination of the coefficients C := (cα)α∈A ⊆ R|A| demands solving the truncated (non-linear)
84
+ system:
85
+ L[C] − f
86
+ != 0 ,
87
+ where L = L|Π denotes the truncated operator. This system of equations is typically formulated as the solution of the
88
+ weighted residual:
89
+ ⟨ϕi, L[C] − f⟩
90
+ != 0 ,
91
+ ∀α ∈ A.
92
+ Depending on the choice of the test functions ϕi we obtain pseudo-spectral methods or Galerkin spectral methods (Kang &
93
+ Suh, 2008; Canuto et al., 2007; Bernardi & Maday, 1997). If the operator L is linear, the problem is reduced to solving a
94
+ linear system. In the non-linear case, least square methods with Newton-Raphson minimiser are commonly used (Hessari
95
+ & Shin, 2013; Kim & Shin, 2006). Extending this formulation to inverse problems (inferring parameters) with general
96
+ boundary conditions and/or additional constraints without causing ill-conditioned problems is a unresolved challenge for
97
+ classic spectral methods. Our contribution relies on providing the demanded extensions, enabling to addresses general
98
+ forward and inverse PDE problems in a numerically stable, efficient and accurate fashion.
99
+ 1.3. Contribution
100
+ We present a generalised soft-constrained spectral method that results in a λ-convex variational optimisation problem
101
+ for linear and a class of non-linear PDEs. We theoretically guarantee exponentially fast convergence of the resulting
102
+ variational gradient descent. While established PINN alternatives result in non-convex variational problems, already for
103
+ linear PDEs, the spectral polynomial surrogate models (PSMs) provide approximates of the PDE solutions outperforming
104
+ PINNs in runtime and accuracy, as demonstrated in Section 4.
105
+
106
+ Learning Partial Differential Equations by Spectral Approximates of General Sobolev Spaces
107
+ Our approach rests on using Chebyshev Polynomial Surrogate Models (PSMs):
108
+ ˆu(x, Θ) =
109
+
110
+ α∈Am,n
111
+ θαTα(x) ,
112
+ Θ = (θα)α∈Am,n ∈ R|Am,n|, x ∈ Rm ,
113
+ (2)
114
+ where Am,n denotes a multi-index set, see Section 2.1, and Tα denotes the Cheybshev polynomial basis of first kind given
115
+ by the relation:
116
+ Tα(cos(x)) = Tα(cos(x1), . . . , cos(xm)) =
117
+ m
118
+
119
+ i=1
120
+ cos(αixi) = cos(αx)
121
+ (3)
122
+ for all α ∈ Am,n. The Chebyshev polynomials are widely used due to their excellent approximation properties extensively
123
+ discussed in (Trefethen, 2019). In our recent work (Cardona & Hecht, 2022), we already formulated (weak) PDE losses by
124
+ generalising classic Gauss-Legendre cubature rules, we termed Sobolev cubatures. As aforementioned, for linear and a
125
+ class of non-linear PDEs the induced variational λ-convex gradient flows possess an exponential rate of convergence. The
126
+ resulting PSMs deliver an increase of accuracy up to 10 orders of magnitude, by reducing the runtime costs up to 3 orders of
127
+ magnitude compared to PINN alternatives. Moreover, we demonstrate the PSMs to be as flexible as PINNs for addressing
128
+ inverse PDE problems, such as PDE-parameter inference.
129
+ In contrast to PINNs, the prominent PDE problems considered in Section 4 were solved by our PSM-method without
130
+ High Performance Computing (HPC) on a local machine. We consequently expect the approach to deeply impact current
131
+ methodology addressing computational challenges arising across all scientific disciplines and believe that even currently
132
+ non-reachable (high-dimensional, strongly varying) PDE problems can be successfully resolved due to our contribution.
133
+ 2. PDE theory
134
+ In this section we introduce the mathematical concepts on which our approach rest. This includes the formulation of
135
+ Sobolev cubatures (Cardona & Hecht, 2022), approximating general Sobolev norms. To start with we fix the notation used
136
+ throughout this article.
137
+ 2.1. Notation and basic concepts
138
+ We denote with Ω = (−1, 1)m the open m-dimensional standard hypercube, with ��Ω = [−1, 1]m its closure, and with
139
+ ∂Ω its boundary. ∥x∥p = (�m
140
+ i=1 |xi|p)1/p, x = (x1, . . . , xm) ∈ Rm, 1 ≤ p < ∞, ∥x∥∞ = max1≤i≤m |xi| denotes the
141
+ lp-norm, and ⟨x, y⟩, ∥x∥, x, y ∈ Rm the standard Euclidean inner product and norm on Rm.
142
+ Moreover, Πm,n = span{xα}∥α∥∞≤n denotes the R-vector space of all real polynomials in m variables spanned by
143
+ all monomials xα = �m
144
+ i=1 xαi
145
+ i
146
+ of maximum degree n ∈ N, whereas Πm,n(∂Ω) = {Q|Ω : Q ∈ Πm,n} denotes the space of
147
+ restricted polynomials with support Ω.
148
+ We consider the multi-index set Am,n = {α ∈ Nm : ∥α∥∞ ≤ n} with |Am,n| = (n + 1)m and order Am,n with
149
+ respect to the lexicographic order ⪯ on Nm starting from last entry to the 1st, e.g., (5, 3, 1) ⪯ (1, 0, 3) ⪯ (1, 1, 3). Let
150
+ D ∈ R|Am,n|×|Am,n| be a matrix we slightly abuse notation by writing
151
+ D = (dα,β)α,β∈Am,n ,
152
+ (4)
153
+ where dα,β ∈ R is the α-th, β-th entry of D.
154
+ 2.2. Sobolev space theory
155
+ We recommend (Adams & Fournier, 2003; Neuberger, 2008; Brezis, 2011) for an excellent overview on functional
156
+ analysis and Sobolev space theory including the concepts we shortly summarise: We denote with Ck(Ω, R), k ∈ N∪{∞} the
157
+ Banach spaces of all k-times continuously differentiable functions with norm ∥f∥Ck(Ω) = �k
158
+ i=0 supx∈Ω,∥α∥1=i |Dαf(x)|.
159
+ The Sobolev spaces
160
+ Hk(Ω, R) =
161
+
162
+ f ∈ L2(Ω, R) : Dαf ∈ L2(Ω, R)
163
+
164
+ ,
165
+ ∥α∥1 = �m
166
+ i=1 αi ≤ k, k ∈ N are given by all L2-integrable functions f : Ω −→ R with existing L2-integrable weak
167
+ derivatives Dαf = ∂α1
168
+ x1 . . . ∂αm
169
+ xm f up to order k. In fact, Hk(Ω, R) is a Hilbert space with inner product
170
+ ⟨f, g⟩Hk(Ω) =
171
+
172
+ 0≤∥α∥1≤k
173
+ ⟨Dαf, Dαg⟩L2(Ω)
174
+
175
+ Learning Partial Differential Equations by Spectral Approximates of General Sobolev Spaces
176
+ and norm ∥f∥2
177
+ Hk(Ω) = ⟨f, f⟩Hk(Ω). Thus, the embeddings j : Hk(Ω, R) �→ Hk′(Ω) are well defined and continuous for
178
+ all k′ ≤ k due to ∥ · ∥Hk′(Ω ≤ ∥ · ∥Hk(Ω,R), whereas H0(Ω, R) = L2(Ω, R), with ⟨f, g⟩L2(Ω) =
179
+
180
+
181
+ f · g dΩ.
182
+ For k ≥ 1 the trace operator
183
+ tr : Hk(Ω, R) −→ L2(∂Ω, R)
184
+ (5)
185
+ is defined as usual as the Hk-extension of the classic continuous trace tr(u) = u|∂Ω with domain dom(tr) = C0(¯Ω, R).
186
+ The Sobolev spaces with zero trace are denoted as usual with Hk
187
+ 0 (Ω, R) = {u ∈ Hk(Ω, R) : tr(u) = 0}, k ≥ 1 and can be
188
+ alternatively defined as completion of the space of smooth functions that vanish on the boundary ∂Ω of Ω, i.e.,
189
+ Hk
190
+ 0 (Ω, R) = C∞
191
+ 0 (Ω, R)
192
+ ∥·∥Hk(Ω) ,
193
+ C∞
194
+ 0 (Ω, R) = {f ∈ C∞(Ω, R) : f|∂Ω = 0} .
195
+ We further consider the space of all distributions D′(Ω) = {F : C∞
196
+ 0 (¯Ω) −→ R} also known as generalised functions
197
+ (being the dual space of all test functions C∞
198
+ 0 (¯Ω) = {f ∈ C∞(Ω) : f|∂Ω = 0} with respect to the canonical LF topology).
199
+ We associate the negative order Sobolev space as the completion of D′(Ω) with respect to the following norm
200
+ H−k(Ω, R) := D′(Ω)
201
+ ∥·∥H−k(Ω) ,
202
+ ∥F∥H−k(Ω,R) =
203
+ sup
204
+ u∈Hk(Ω,R)
205
+ |Fu|
206
+ ∥u∥Hk(Ω,R)
207
+ ,
208
+ (6)
209
+ yielding a separable, reflexive Hilbert space (Lax, 1955).
210
+ The weak PDE formulations and their underlying Hilbert space choice we will propose later on require the notion of
211
+ adjoint (differential) operators. We recall the definition.
212
+ Definition 1 (Adjoint operators). Let (K, ∥ · ∥K), (H, ∥ · ∥H) be Hilbert spaces and T : dom(T) ⊆ K −→ H, T ∗ :
213
+ dom(T ∗) ⊆ H −→ K be linear operators with dense domains. Then T ∗ is called an adjoint operator of T if and only if
214
+ ⟨Tx, y⟩H = ⟨x, T ∗y⟩K
215
+ for all x ∈ dom(T) and y ∈ dom(T ∗).
216
+ Example 2. Consider ∂xi : L2(Ω, R) −→ L2(Ω, R) as the differential operator in the weak sense. Then its domain is given
217
+ by dom(∂xi) = H1(Ω, R) ⊆ L2(Ω, R), which is a dense subset. Following Definition 1, and applying integration by parts,
218
+ an adjoint operator ∂∗
219
+ xi : L2(Ω, R) −→ L2(Ω, R), with domain dom(∂∗
220
+ xi) = H1
221
+ 0(Ω, R) is given by ∂∗
222
+ xi = −∂xi.
223
+ We link the spaces H−k(Ω, R) and Hk(Ω, R) due to the following fact.
224
+ Proposition 3. Let j : Hk(Ω, R) �→ L2(Ω, R), k ∈ N be the embedding with adjoint operator j∗ : L2(Ω, R) −→
225
+ Hk(Ω, R). Let f, g ∈ L2(Ω, R) and the distributions F = ⟨f, ·⟩L2(Ω,R), G = ⟨g, ·⟩L2(Ω,R) ∈ H−k(Ω, R), with f ∈
226
+ L2(Ω, R). Then
227
+ ∥F∥H−k(Ω,R) = ∥j∗f∥Hk(Ω) ,
228
+ ⟨F, G⟩H−k(Ω) = ⟨j∗f, j∗g⟩Hk(Ω) .
229
+ Proof. The proof is derived directly from the definition of the H−k(Ω, R)-norm in Eq. (6):
230
+ ∥j∗f∥Hk(Ω) =
231
+ ∥j∗f∥2
232
+ Hk(Ω)
233
+ ∥j∗f∥Hk(Ω)
234
+ = |⟨jf, j∗f⟩L2(Ω)|
235
+ ∥j∗f∥Hk(Ω)
236
+ = |⟨f, j∗f⟩L2(Ω)|
237
+ ∥j∗f∥Hk(Ω)
238
+
239
+ sup
240
+ u∈Hk(Ω,R)
241
+ |⟨f, u⟩L2(Ω)|
242
+ ∥u∥Hk(Ω)
243
+ = ∥F∥H−k(Ω) .
244
+ Vice versa, applying the Cauchy-Schwarz inequality yields
245
+ ∥F∥H−k(Ω,R) =
246
+ sup
247
+ u∈Hk(Ω,R)
248
+ |⟨f, ju⟩L2(Ω)|
249
+ ∥u∥Hk(Ω)
250
+ =
251
+ sup
252
+ u∈Hk(Ω,R)
253
+ |⟨j∗f, u⟩Hk(Ω)|
254
+ ∥u∥Hk(Ω)
255
+
256
+ sup
257
+ u∈Hk(Ω,R)
258
+ ∥j∗f∥Hk(Ω)∥u∥Hk(Ω)
259
+ ∥u∥Hk(Ω)
260
+ = ∥j∗f∥Hk(Ω) ,
261
+ implying the claimed equality. The statement for the inner product follows analogously.
262
+ A main ingredient of all further considerations are the truncated L2- or Hk-inner products that rest on adaptions of
263
+ classic Gauss-Legendre cubatures, which we provide next.
264
+
265
+ Learning Partial Differential Equations by Spectral Approximates of General Sobolev Spaces
266
+ 2.3. Orthogonal polynomials and Gauss-Legendre cubatures
267
+ Here, we recapture the underlying concept of orthogonal polynomials: Let m, n ∈ N and Pm,n = ⊕m
268
+ i=1Legn ⊆ Ω be
269
+ the we the m-dimensional Legendre grids, where Legn = {p0, . . . , pn} are the n + 1 Legendre nodes given by the roots of
270
+ the Legendre polynomials of degree n + 2 We denote pα = (pα1, . . . , pαm) ∈ Pm,n, α ∈ Am,n. It is a classic fact (Stroud,
271
+ 1971; 2011; Trefethen, 2017; 2019), that the Lagrange polynomials Lα ∈ Πm,n, α ∈ Am,n given by
272
+ Lα =
273
+ m
274
+
275
+ i=1
276
+ lαi,i ,
277
+ lj,i =
278
+ m
279
+
280
+ j̸=i,j=0
281
+ xi − pj
282
+ pi − pj
283
+ ,
284
+ (7)
285
+ satisfy Lα(pβ) = δα,β, ∀ α, β ∈ Am,n and form an orthogonal L2-basis of Πm,n, i.e.,
286
+ ⟨Lα, Lβ⟩L2(Ω) =
287
+
288
+
289
+ Lα(x)Lβ(x)dΩ = wαδα,β ,
290
+ ∀ α, β ∈ Am,n, where δ·,· denotes the Kronecker delta and
291
+ wα = ∥Lα∥2
292
+ L2(Ω)
293
+ (8)
294
+ the efficiently computable Gauss-Legendre cubature weight (Stroud, 1971; 2011; Trefethen, 2017; 2019). Consequently, for
295
+ any polynomial Q ∈ Πm,2n+1 of degree 2n + 1 the following cubature rule applies:
296
+
297
+
298
+ Q(x)dΩ =
299
+
300
+ α∈Am,n
301
+ wαQ(pα) .
302
+ (9)
303
+ Summarising: Polynomials of degree 2n + 1 can be (numerically) integrated exactly when sampled on the Legendre grid
304
+ Pm,n of order n + 1. Thanks to |Pm,n| = (n + 1)m ≪ (2n + 1)m this makes Gauss-Legendre integration a very powerful
305
+ scheme yielding
306
+ ⟨Q1, Q2⟩L2(Ω) =
307
+
308
+ Ωm
309
+ Q1(x)Q2(x)dΩm =
310
+
311
+ α∈Am,n
312
+ Q1(pα)Q2(pα)wα ,
313
+ (10)
314
+ for all Q1, Q2 ∈ Πm,n. In light of this fact, we propose the following definition.
315
+ Definition 4 (Legendre interpolation and L2-projection ). Let m, n ∈ N, Pm,n be the Legendre grid and Lα, α ∈ Am,n be
316
+ the corresponding Lagrange polynomials from Eq.(7). For continuous functions f : ¯Ω −→ R we denote with
317
+ Im,n : C0(Ω, R) −→ Πm,n ,
318
+ Im,n(f) =
319
+
320
+ α∈Am,n
321
+ f(pα)Lα ∈ Πm,n
322
+ (11)
323
+ the interpolation operator. Moreover, we denote with
324
+ πm,n : L2(Ω, R) −→ Πm,n ,
325
+ πm,n(f) =
326
+
327
+ α∈Am,n
328
+ 1
329
+
330
+ ⟨f, Lα⟩L2(Ω)Lα ∈ Πm,n
331
+ (12)
332
+ the L2-projection.
333
+ Remark 5. It is important to note that Im,n(f) ̸= πm,n(f) in general. However, both operators are projections that due to
334
+ Eq. (10) satisfy
335
+ πm,n(πm,n(f)) = πm,n(f) ,
336
+ Im,n(Im,n(f)) = Im,n(f) ,
337
+ Im,n(πm,n(f)) = Im,n(f) ,
338
+ πm,n(Im,n(f)) = Im,n(f) .
339
+ In fact, both concepts can deliver exponential fast approximation rates (truncation errors) in case the considered function f
340
+ is analytic (Trefethen, 2019).
341
+ How differential operators acting on polynomial spaces can be understood due to these concepts is proposed in the
342
+ next section.
343
+
344
+ Learning Partial Differential Equations by Spectral Approximates of General Sobolev Spaces
345
+ 2.4. Truncated differential and adjoint operators
346
+ Based on Eq. (7) we derive exact matrix representations of differential operators acting on the polynomial spaces
347
+ Πm,n. This allows to extend Eq. (10) and deliver approximates of the Sobolev norms for general functions f ∈ Hk(Ω, R),
348
+ k ∈ N.
349
+ For Lα ∈ Πm,n from Eq. (7) and 1 ≤ i ≤ m the computation of the values dα,β = ∂xiLα(pβ), pβ ∈ Pm,n,
350
+ ∀ β ∈ Am,n yield the Lagrange expansion
351
+ ∂xiLα(x) =
352
+
353
+ β∈Am,n
354
+ dα,βLβ(x) .
355
+ (13)
356
+ Consequently, the matrix
357
+ Di = (dα,β)α,β∈Am,n ∈ R|Am,n|×|Am,n| ,
358
+ (14)
359
+ represents the finite dimensional truncation of the differential operator ∂xi : C1(Ω, R) −→ C0(Ω, R) to the polynomial
360
+ space Πm,n and for β ∈ Nm we set
361
+ Dβ =
362
+ m
363
+
364
+ j=1
365
+ Dβi ,
366
+ with D0 = I ,
367
+ (15)
368
+ to be the approximation of the differential operator ∂β := ∂β1
369
+ x1 . . . ∂βm
370
+ xm .
371
+ For representing the truncation of general adjoint operators we we consider the Legendre grid Pm,n = {pα : α ∈
372
+ Am,n}, m, n, ∈ N the positive, symmetric Gauss-Legendre cubature weight matrix Wm,n = diag(wα)α∈Am,n, and the
373
+ evaluation vector f = (f(Pα))α∈Am,n ∈ R|Am,n| for a given function f : Ω −→ R. With these ingredients we state:
374
+ Proposition 6. Let Dβ : L2(Ω, R) −→ L2(Ω, R), β ∈ Nm be a differential operator and Dβ : Πm,n(Ω) −→ Πm,n(Ω) be
375
+ its truncation to the polynomial space. Then the matrix representation of the truncated adjoint operator D∗
376
+ β : Πm,n(Ω) −→
377
+ Πm,n(Ω) is given by:
378
+ D∗
379
+ β = W−1
380
+ m,nD⊤
381
+ β Wm,n .
382
+ (16)
383
+ Proof. We derive Eq. (16) due to the Gauss-cubature in terms of Eq. (10). Let Q1, Q2 ∈ Πm,n, and denote with q1 =
384
+ (Q1(pα))α∈Am,n, q2 = (Q2(pα))α∈Am,n ∈ R|Am,n| the corresponding evaluation vectors. Then we compute
385
+ ⟨DβQ1, Q2⟩L2(Ω,R) = ⟨Dq1, Wm,nq2⟩ = q⊤
386
+ 1 D⊤
387
+ β Wm,nq2 = q⊤
388
+ 1 Wm,nW−1
389
+ m,nD⊤
390
+ β Wm,nq2
391
+ = ⟨W⊤
392
+ m,nq1, D∗
393
+ βq2⟩ = ⟨q1, Wm,nD∗
394
+ βq2⟩ = ⟨Q1, D∗
395
+ βQ2⟩L2(Ω,R) ,
396
+ proving the statement.
397
+ We provide a matrix representation of the truncation of the adjoint operator j∗ : Hk(Ω, R) −→ L2(Ω, R) of the
398
+ embedding j : Hk(Ω, R) −→ L2(Ω, R).
399
+ Theorem 7. Let j∗ : L2(Ω, R) −→ Hk(Ω, R) be the adjoint operator of the embedding j : Hk(Ω, R) −→ L2(Ω, R).
400
+ Denote with Dβ the representations of the derivatives from Eq. (15) then its truncation J∗ : Πm,n(Ω) ⊆ L2(Ω, R) −→
401
+ Πm,n(Ω) ⊆ Hk(Ω, R) can be represented by the matrix J∗ ∈ R|Am,n|×|Am,n| given by
402
+ J∗ =
403
+ � �
404
+ |β|≤k
405
+ D∗
406
+ βDβ
407
+ �−1
408
+ .
409
+ (17)
410
+ Proof. Let Q1, Q2 ∈ Πm,n, Pm,n the Legendre grid and q1 = (Q1(pα))α∈Am,n, q2 = (Q2(pα))α∈Am,n ∈ R|Am,n| the
411
+ evaluation vectors,respectively. Then we compute
412
+ ⟨Q1, Q2⟩Hk(Ω) =
413
+
414
+ |β|≤k
415
+ ⟨DβQ1, DβQ2⟩L2(Ω,R) =
416
+
417
+ |β|≤k
418
+ ⟨D∗
419
+ βDβQ1, Q2⟩L2(Ω,R)
420
+ = ⟨
421
+ � �
422
+ |β|≤k
423
+ D∗
424
+ βDβ
425
+
426
+ Q1, Q2⟩L2(Ω,R) .
427
+
428
+ Learning Partial Differential Equations by Spectral Approximates of General Sobolev Spaces
429
+ Thus, setting J∗−1 := �
430
+ |β|≤k D∗
431
+ βDβ yields that due to the identity above J∗−1 is a symmetric and positive definite linear
432
+ operator on a finite dimensional space implying its invertibility. Due to
433
+
434
+ � �
435
+ |β|≤k
436
+ D∗
437
+ βDβ
438
+
439
+ Q1, Q2⟩L2(Ω,R) = ⟨
440
+ � �
441
+ |β|≤k
442
+ D∗
443
+ βDβ
444
+
445
+ q1, q2⟩
446
+ we realise that J∗−1 := �
447
+ |β|≤k D∗
448
+ βDβ represents J∗−1.
449
+ As introduced, the PSMs rely on the Chebyshev polynomials {Tα}α∈Am,n, m, n ∈ N, Eq. (3). For later purpose we
450
+ provide the basis transformation between the Tα and the Lagrange basis Lα in the Legendre grid Pm,n. That is to consider
451
+ the matrix
452
+ T = (Tβ(pα))α,β∈Am,n ∈ R|Am,n|×|Am,n|
453
+ and its inverse
454
+ T−1 ∈ R|Am,n|×|Am,n| .
455
+ (18)
456
+ Given Lagrange coefficients C = (cα)α∈Am,n of a polynomial Q = �
457
+ α∈Am,n cαLα, Θ = (θα)α∈Am,n = T−1C yields the
458
+ coefficients of its Chebyshev representation Q = �
459
+ α∈Am,n θαTα. Vice versa D = (dα)α∈Am,n = TΘ yields the Lagrange
460
+ coefficients of its Chebyshev expansion. We close this section, by deriving a matrix representation of the trace operator,
461
+ Eq. (5):
462
+ Definition 8 (Truncated trace operator). Let tr : Hk(Ω, R) −→ L2(∂Ω, R) be the trace operator, Eq. (5). Denote with
463
+ P ±
464
+ m−1,n,j ⊆ ∂Ω±
465
+ j the m-1-dimensional Legendre grids for each of the faces ∂Ω±
466
+ j = {x ∈ Ω : xj = ±1} of the hypercube
467
+ Ω. Then the matrix S±
468
+ m,n,j ∈∈ R|Am−1,n|×|Am,n| with
469
+
470
+ m,n,j = (Tα(pγ))(γ,α)∈Am−1,n×Am,n ,
471
+ pγ ∈ P ±
472
+ m−1,n,j , j = 1, . . . , m .
473
+ (19)
474
+ represents the truncated trace operator tr : Πm,n −→ Πm−1,n(∂Ω±
475
+ j ) for each of the faces ∂Ω±
476
+ j .
477
+ The derived representations of the truncated differential and adjoint operators enable to derive cubature rules for the
478
+ truncated Sobolev spaces.
479
+ 2.5. Sobolev cubatures
480
+ Based on the classic Gauss-Legendre cubature Eq. (10) we, here, derive general Sobolev cubatures. We start by
481
+ defining:
482
+ Definition 9 (Truncated (dual) inner product and norm). For β ∈ Nm, ∥β∥1 ≤ k, m, n ∈ N we consider the truncated
483
+ differential operator Dβ and its adjoint Dβ : Πm,n(Ω) −→ Πm,n(Ω), D∗
484
+ β : Πm,n(Ω) −→ Πm,n(Ω) satisfying
485
+ ⟨DβQ1, Q2⟩L2(Ω) = ⟨Q1, D∗
486
+ βQ2⟩L2(Ω) ,
487
+ ∀Q1, Q2 ∈ Πm,n
488
+ Given the matrix representations Dβ, D∗
489
+ β = W −1
490
+ m,nDT
491
+ β Wm,n from Proposition 6, J∗ from Eq. (17) and its formal dual
492
+ J∗ =
493
+ � �
494
+ |β|≤k
495
+ D∗
496
+ βDβ
497
+ �−1
498
+ ,
499
+ J∗ =
500
+ � �
501
+ |β|≤k
502
+ DβD∗
503
+ β
504
+ �−1
505
+ ,
506
+ we introduce
507
+ Wm,n,k = Wm,nJ∗−1 , Wm,n,−k = Wm,nJ∗ ,
508
+ Wm,n,k = Wm,nJ∗−1 , Wm,n,−k = Wm,nJ∗ ,
509
+ and for f, g ∈ Πm,n and their dual distributions F = ⟨f, ·⟩L2(Ω), G = ⟨g, ·⟩L2(Ω) we set
510
+ ⟨f, g⟩Hk(Ω)
511
+ =
512
+
513
+ β∈Nm,∥β∥1≤k
514
+ ⟨Dβf, Dβg⟩L2(Ω)
515
+ =⟨f, Wm,n,kg⟩
516
+ ⟨f, g⟩Hk(Ω),∗
517
+ =
518
+
519
+ β∈Nm,∥β∥1≤k
520
+ ⟨D∗
521
+ βf, D∗
522
+ βg⟩L2(Ω)
523
+ =⟨f, Wm,n,kg⟩
524
+ ⟨F, G⟩H−k(Ω) =
525
+
526
+ β∈Nm,∥β∥1≤k
527
+ ⟨DβJ∗f, DβJ∗g⟩L2(Ω)=⟨f, Wm,n,−kg⟩
528
+ ⟨F, G⟩H−k(Ω),∗=
529
+
530
+ β∈Nm,∥β∥1≤k
531
+ ⟨D∗
532
+ βJ∗f, D∗
533
+ βJ∗g⟩L2(Ω)=⟨f, Wm,n,−kg⟩ ,
534
+ (20)
535
+
536
+ Learning Partial Differential Equations by Spectral Approximates of General Sobolev Spaces
537
+ where f = (f(pα))α∈Am,n ∈ R|Am,n|, g = (g(pα))α∈Am,n ∈ R|Am,n| are the evaluation vectors of f, g in the Legendre
538
+ nodes pα ∈ Pm,n, respectively. The corresponding norms are given by
539
+ ∥f∥Hk(Ω) = ⟨f, f⟩1/2
540
+ Hk(Ω) ,
541
+ ∥f∥Hk(Ω),∗ = ⟨f, f⟩1/2
542
+ Hk(Ω),∗
543
+ ∥F∥H−k(Ω) = ⟨F, F⟩1/2
544
+ H−k(Ω) ,
545
+ ∥F∥H−k(Ω),∗ = ⟨F, F⟩1/2
546
+ H−k(Ω),∗ .
547
+ (21)
548
+ In fact, while including the L2-inner product for β = 0, the expressions above define inner products and norms. We
549
+ deduce the exactness of the equations.
550
+ Theorem 10 (Sobolev cubatures). Let f, g ∈ Hk(Ω, R) and F = ⟨f, ·⟩, G = ⟨g, ·⟩ ∈ H−k(Ω, R). Then the approximations
551
+ given by Definition 9, Eq. (20), are exact for all f, g ∈ Πm,n.
552
+ Proof. By combining Proposition 3, Theorem 7 and Im,n(πm,n(f)) = πm,n(f) the proof follows.
553
+ The following observation is helpful for computing the Sobolev cubatures.
554
+ Corollary 11. Let f ∈ Πm,n and the assumptions of Definition 9 be fulfilled. Then the following identities hold:
555
+ ⟨Dβf, Dβf⟩L2(Ω,R) =
556
+
557
+ α∈Am,n
558
+ 1
559
+
560
+ ⟨Dβf, Lβ⟩2
561
+ L2(Ω,R)
562
+ ⟨D∗
563
+ βf, D∗
564
+ βf⟩L2(Ω,R) =
565
+
566
+ α∈Am,n
567
+ 1
568
+
569
+ ⟨f, DβLα⟩2
570
+ L2(Ω,R)
571
+ (22)
572
+ Proof. We use Proposition 6 in terms of D∗
573
+ β = W−1
574
+ m,nDT
575
+ β Wm,n and due to Theorem 10 compute
576
+ ⟨D∗
577
+ βf, D∗
578
+ βf⟩L2(Ω,R) = ⟨D∗
579
+ βf, Wm,nD∗
580
+ βf⟩ = ⟨W−1
581
+ m,nD⊤
582
+ β Wm,nf, D⊤
583
+ β Wm,nf⟩
584
+ =
585
+
586
+ α∈Am,n
587
+ 1
588
+
589
+ ⟨f, D⊤
590
+ β Wm,neα⟩2 =
591
+
592
+ α∈Am,n
593
+ 1
594
+
595
+ ⟨f, DβLα⟩2
596
+ L2(Ω,R) ,
597
+ where eα is the α-th standard basis vector of R|Am,n|. The analog computation applies for Dβ.
598
+ In fact, when considering the truncated (dual) norms (∥·∥H−k(Ω),∗, ∥·∥Hk(Ω),∗), ∥·∥H−k(Ω), ∥·∥Hk(Ω), computations
599
+ based on Eq. (22) are straightforwardly achieved and documented in (ABC, 2021). We provide the formal setup next.
600
+ 3. PDE formulations
601
+ In light of the provided perspectives, we follow (Jost, 2002; Brezis, 2011) to propose the following formalization of
602
+ classic PDE problems. For the sake of simplicity, we focus on classic Poisson type equations. Extensions to more general
603
+ PDE problems can be derived once the notion is given, see Section 4.
604
+ 3.1. Poisson equation
605
+ Let us consider the Poisson equation, for f ∈ C0(Ω, R). The strong Poisson problem with Dirichlet boundary
606
+ condition g ∈ C0(∂Ω, R) seeks for solutions u ∈ C2(Ω, R) fulfilling:
607
+ � −∆u(x) − f(x)
608
+ = 0
609
+ , ∀x ∈ Ω
610
+ u(x) − g(x)
611
+ = 0
612
+ , ∀x ∈ ∂Ω .
613
+ (23)
614
+ By using the notion of weak derivatives we can formulate a weaker version of the Poisson equation. That is, finding
615
+ u ∈ H2(Ω, R) ⊆ C0(Ω, R) fulfilling
616
+
617
+
618
+ (−∆u − f)φ dx, ∀φ ∈ C∞(Ω, R),
619
+ (24)
620
+ subjected to the same Dirichlet boundary conditions as in equation (23). The notions give rise to the following optimisation
621
+ problems.
622
+
623
+ Learning Partial Differential Equations by Spectral Approximates of General Sobolev Spaces
624
+ 3.2. PDE loss
625
+ We use the Sobolev space setting Hk(Ω, R), Hl(∂Ω, R), k, l ∈ Z for introducing soft-constrained PDE-losses that
626
+ impose the Poisson-PDE-solution with general boundary condition as one global variational optimisation problem.
627
+ Definition 12. Given the setup of Eq. (23) the strong PDE-loss Lstrong : Hk+2(Ω, R) ∩ Hl(∂Ω, R) −→ R, k, l ∈ N is
628
+ defined by
629
+ Lstrong(u) = rstrong(u) + sstrong(u) = ∥ − ∆u − f∥2
630
+ Hk(Ω) + ∥u|∂Ω − g∥2
631
+ L2(Ω) .
632
+ (25)
633
+ The weak PDE-loss Lweak : Hk+2(Ω, R) ∩ Hl(∂Ω, R) −→ R, reflecting the weak formulation in Eq. (24), is given by
634
+ Lweak(u) = rweak(u) + sweak(u)
635
+ =
636
+ sup
637
+ φ∈C∞(Ω,R)
638
+ ⟨−∆u − f, φ⟩2
639
+ Hk(Ω) +
640
+ sup
641
+ φ∈C∞(∂Ω,R)
642
+ ⟨u − g, φ⟩2
643
+ L2(Ω) .
644
+ (26)
645
+ Truncations of the the strong loss Lstrong : Πm,n −→ R+ can be derived by applying the Sobolev cubatures from
646
+ Definition 9. A truncation Lweak : Πm,n −→ R+ of the weak PDE-loss, Eq. (26) is given by requiring Eq. (24) to be
647
+ fulfilled only for all polynomial test functions ϕ ∈ Πm,n = span(Lα)α∈Am,n spanned by the Lagrange polynomials. Hence,
648
+ we consider
649
+ rweak(u) ≈
650
+
651
+ α∈Am,n
652
+ ⟨−∆u − f, Lα⟩2
653
+ Hk(Ω) ,
654
+ sweak(u) ≈
655
+
656
+ α∈Am,n
657
+ ⟨u − g, Lα⟩2
658
+ Hl(Ω) .
659
+ (27)
660
+ While Definition 12 includes the case k, l < 0 the corresponding losses occur when replacing ∥ · ∥Hk(Ω), ∥ · ∥H−k(Ω)
661
+ with ∥·∥Hk(Ω), ∥·∥H−k(Ω),∗, yielding well-defined notions due to Proposition 3. Next, we derive the corresponding gradient
662
+ flows of the given losses.
663
+ 3.3. Variational gradient flows
664
+ Given a polynomial QC0
665
+ =
666
+
667
+ α∈Am,n cαLα in Lagrange expansion with respect to the Legendre
668
+ grid Pm,n
669
+
670
+ Ω with coefficients C0
671
+ =
672
+ (cα)α∈Am,n
673
+
674
+ R|Am,n|.
675
+ We consider the truncated loss
676
+ L : R|Am,n| −→ R+, L = L[C] acting on the coefficients and the gradient flow ODE
677
+ ∂tC(t) = −∇L(QC(t))
678
+ , C(0) = C0 .
679
+ (28)
680
+ Combining the identity QC(pα) = cα, with Definition 9 for the evaluation vector f = (f(pα))α∈Am,n we derive the
681
+ following expression for the L2-gradient in case for the strong loss L = Lstrong from Eq. (25),i.e,
682
+ ∇C(rstrong) = ∇C⟨
683
+
684
+ (D2
685
+ x1 + · · · + D2
686
+ xm)C + f
687
+
688
+ , Wm,n
689
+
690
+ (D2
691
+ x1 + · · · + D2
692
+ xm)C + f
693
+
694
+ ⟩ ,
695
+ where according to Eq. (15), D2
696
+ xi = D2ei with ei ∈ Rm being the standard basis, i = 1, . . . , m. Thus,
697
+ ∇C(rstrong) = −2(D2
698
+ x1 + · · · + D2
699
+ xm)T Wm,n
700
+
701
+ (D2
702
+ x1 + · · · + D2
703
+ xm)C + f
704
+
705
+ ,
706
+ (29)
707
+ ∇C(sstrong)±
708
+ j = 2Wm��1,n(S±
709
+ m,n,jC − g±
710
+ j ) ,
711
+ j = 1, . . . , m ,
712
+ where g±
713
+ j is the evaluation vector of g in the m-1-dimensional Legendre grid P ±
714
+ m−1,n,j ⊆ ∂Ω±
715
+ j contained in each face ∂Ω±
716
+ j
717
+ of Ω, and S±
718
+ m,n,j denotes the truncated trace operator, Definition 8.
719
+ Analogously, in case of the weak loss L = Lweak from Eq. (26) we derive
720
+ ∇C(rweak) = −2(D2
721
+ x1 + · · · D2
722
+ xm)T W2
723
+ m,n
724
+
725
+ (D2
726
+ x1 + · · · + D2
727
+ xm)C + f
728
+
729
+ (30)
730
+ ∇C(sweak)±
731
+ j = 2W2
732
+ m−1,n(S±
733
+ m,n,jC − g±
734
+ j ) .
735
+ Formulas for choosing truncated dual norms ∥ · ∥Hk(Ω), ∥ · ∥Hk(Ω),∗, 0 < k < ∞ as in Definition 9 result when replacing
736
+ Wm,n with the corresponding cubature matrix, e.g. Wm,nJ∗−1, from Definition 9 in Eq. (29), while in Eq. (30) W2
737
+ m,nJ∗−1
738
+ occurs.
739
+ For all cases, Corollary 11 provides the baseline for numerical stable implementations, which are realised and
740
+ documented in (ABC, 2021).
741
+
742
+ Learning Partial Differential Equations by Spectral Approximates of General Sobolev Spaces
743
+ 3.3.1. ANALYTIC VARIATION OF LINEAR PDES
744
+ Given the analytic expressions of the variational gradients in Eq. (29),(30) we derive the analytic solution of the
745
+ gradient descent, Eq. (28): To do so, we shorten D := (D2
746
+ x1 + · · · + D2
747
+ xm), D∗ := DT Wm,n, S := �m
748
+ j=1 S±
749
+ m,n,j,
750
+ S∗g := Wm−1,n
751
+ �m
752
+ j=1 g±
753
+ j and realise that Eq. (28) becomes:
754
+ d
755
+ dtC(t) = −2(D∗D + S∗S)C(t) + 2(S∗g − D∗f) .
756
+ By applying the variation of parameters we derive the solution of the ODE as:
757
+ C(t) = exp(−t · K∗K)C0 + 2(I − exp(−t · K∗K))(K∗K)+(S∗g − D∗f) ,
758
+ where K∗K := 2(D∗D+S∗S), and (K∗K)+ denotes the Moore–Penrose pseudo-left-inverse, see e.g., (Ben-Israel & Greville,
759
+ 2003; Trefethen & Bau III, 1997). In case, where K∗K is a positive definite matrix that imples
760
+ C∞ := lim
761
+ t→∞ C(t) = (K∗K)−1(S∗g − D∗f) .
762
+ (31)
763
+ While we expect that K∗K is positive definite, and thus invertible, whenever the underlying PDE problem is well posed and
764
+ posses a unique solution a formal proof of this implication requires a deeper theoretical study that is out of scope of this
765
+ article. Empirical demonstrations in Section 4, however, suggest this expectation to be genuine.
766
+ Whatsoever, non-linear PDEs or inverse PDE problems can not be solved due to Eq. (31) and require gradient descent
767
+ methods, realising Eq. (28). A deeper investigation of such approaches is given in the next section.
768
+ 3.4. Exponential convergence of λ-convex gradient flows
769
+ In practice more general problems than linear (forward) PDE problems occur. We motivate this section by considering
770
+ an inverse problem for the Poisson equation (23). That is to consider a function f : Ω −→ R and an unknown parameter
771
+ µ ∈ R and pose the PDE problem
772
+ � −∆u(x) − µf(x)
773
+ = 0
774
+ , ∀x ∈ Ω
775
+ u(x) − g(x)
776
+ = 0
777
+ , ∀x ∈ Ω
778
+ (32)
779
+ where g is one specific Poisson solution, i.e., ∆g = µf on Ω. For inferring the parameter µ ∈ R and the PDE solutions
780
+ simultaneously we assume that g can be sampled at the Legendre grid Pm,n and formulate the truncated (polynomial) loss
781
+ by:
782
+ L[C, µ] = ∥ − ∆QC − µf∥2
783
+ Hk(Ω) + ∥QC − g∥2
784
+ Hl(Ω) ,
785
+ k, l ∈ N .
786
+ (33)
787
+ While the PDE solution depends on µ itself, we cannot compute the analytic solution directly. Instead, we apply an iterative
788
+ gradient descent for deriving the solution based on Eq. (33). We prove that the proposed approach converges exponentially
789
+ fast for even more general problems.
790
+ Definition 13. A differentiable functional F : R|Am,n| → R is called λ-convex if there is a λ > 0 such that:
791
+ F[x] ≥ F[y] + ∇F[y]T (x − y) + λ
792
+ 2 ∥x − y∥2, ∀x, y ∈ R|A|
793
+ (34)
794
+ Theorem 14. Given a truncated loss L : R|Am,n| −→ R+, m, n ∈ N, as in Section 3.2, that is λ-convex and differentiable
795
+ and assume that the optimal solution C∞ := argminC∈R|Am,n|L[C] minimizing the variational problem exists and is unique.
796
+ Then both the loss and the gradient descent
797
+ ∂tC(t) = −∇L(QC(t))
798
+ , C(0) = C0 .
799
+ converge exponentially fast as t → ∞:
800
+ λ
801
+ 2 ∥C(t) − C∞∥2 ≤ L[C(t)] − L[C∞] ≤ e−2λt(L[C0] − L[C∞]).
802
+ (35)
803
+ Proof. The proof of the statement is given in the appendix.
804
+ We give some insights to assert in which situations Theorem 14 applies:
805
+
806
+ Learning Partial Differential Equations by Spectral Approximates of General Sobolev Spaces
807
+ Proposition 15. Let A ∈ Rr×s, r ≥ s ∈ N be a positive definite matrix, λ > 0 be the smallest eigenvalue of A then the
808
+ affine loss
809
+ L(C) = ∥AC + b∥2 ,
810
+ b ∈ Rr
811
+ (36)
812
+ is λ-convex.
813
+ Proof. We start by observing that any norm is 1−convex, in particular it holds:
814
+ ∥x∥2 = ∥y∥2 + (∇∥y∥2)T (x − y) + ∥x − y∥2 ,
815
+ (37)
816
+ where (∇∥y∥2)T (x − y) = 2⟨y, x − y⟩.
817
+ By replacing the roles of x, y with Ax + b, Ay + b, respectively, we compute:
818
+ ∥Ax + b∥2 = ∥Ay + b∥2 + 2⟨Ay + b, A(x − y)⟩ + ∥A(x − y)∥2
819
+ = ∥Ay + b∥2 + 2⟨AT (Ay + b), x − y⟩ + ∥A(x − y)∥2
820
+ ≥ ∥Ay + b∥2 + 2(∇(∥Ay + b∥2), x − y) + λ∥x − y∥2 ,
821
+ where ∇(∥Ay + b∥2) = 2(AT (Ay + b)).
822
+ We want to note that the assumption on A in Proposition 15 can be relaxed:
823
+ Remark 16 (Exponential convergence of non-unique solutions). Given that ker A ̸= 0, but b ∈ Rr in Eq. (36) satisfies
824
+ b ∈ cokerAT = {x ∈ Rs : AT x ̸= 0} we observe that solving AC = b is equivalent to minimising
825
+ L(C) = ∥AT AC + AT b∥2 = ∥A′C + b′∥2 ,
826
+ (38)
827
+ with b′ = AT b, A′ = AT A. Let λ > 0 be the smallest non-vanishing eigenvalue of A′ = AT A. While cokerAT ∼= imA,
828
+ L is λ-convex on (ker A)⊥. Due to Theorem 14 and Proposition 15 this implies that the gradient descent of well-posed
829
+ problems, Eq. (38), converges exponentially fast to a solution as long as the initial coefficients C0 = C(0) ̸∈ ker A were
830
+ proper chosen.
831
+ The practical relevance of the observation above is part of the empirical demonstrations of our proposed concepts
832
+ given in the next section.
833
+ 4. Numerical experiments
834
+ We designed several numerical experiments for validating our theoretical results. The computations of the PSMs were
835
+ executed on a standard Linux laptop (Intel(R) Core(TM) i7-1065G7 CPU @ 1.30GHz, 32 GB RAM). Precomputation of the
836
+ Sobolev cubature matrices is realised as a feature of the open source package (Hernandez Acosta et al., 2021). The PSMs are
837
+ realised by Chebyshev polynomials, Eq. (3), constrained on Legendre grids as asserted in Eq. (18). All PINN experiments
838
+ were executed on the NVIDIA V100 cluster at HZDR. Complete code and benchmark sets is available at (ABC, 2021). We
839
+ intensively compared several PINN approaches in our previous work (Cardona & Hecht, 2022). That is why, apart from
840
+ classic PINNs, here, we focus on comparing our approach with the PINN-methods that turned out to be most reliable:
841
+ i) Classic PINNs with the strong L2-MSE loss based on (Raissi et al., 2019), as described in the introduction.
842
+ ii) Inverse Dirichlet Balancing (ID-PINNs) with the L2-MSE loss (Maddu et al., 2021), as described in the introduction.
843
+ iii) Sobolev Cubature PINNs (SC-PINNs) (Cardona & Hecht, 2022), with the weak L2-loss for all the experiments unless
844
+ specified otherwise.
845
+ iv) Gradient flow optimised PSMs (GF-PSM), using the LBFGS-optimiser (Byrd et al., 1995) for the forward problem
846
+ with the H−1
847
+ ⋆ -norm for the PDE loss and the strong L2−loss for the other terms (unless further specified). Poisson and
848
+ QHO Inverse problems are solved by an Implicit-Euler time integration (Butcher, 2001) with the strong L2 loss and
849
+ Newton-Raphson (Chong & Zak, 1996) for the Navier Stokes inverse problem, with the H−1
850
+
851
+ loss.
852
+ iv) Analytic Descent (AD-PSM), deriving the PSM by the analytic descent given in Eq. (31) by choosing the dual H−1
853
+ ⋆ -loss,
854
+ Eq. (20), for the PDE-loss and the strong L2-loss for the remaining terms.
855
+
856
+ Learning Partial Differential Equations by Spectral Approximates of General Sobolev Spaces
857
+ For measuring the approximation errors of a ground truth function g : Ω −→ R by a surrogate model u we evaluate
858
+ both on equidistant grids g = (g(pi))i=1,...,N ∈ RN u(u(pi))i=1,...,N ∈ RN of size N and compute the l1, l∞-errors
859
+ ϵ1 := ∥g − u∥1/N, ϵ∞ := ∥g − u∥∞. We used N = 1002 points for the 2D problems and N = 204 points for the 4D
860
+ problem. The parameter inference error is denoted with ϵµ := |µ − µgt|.
861
+ All models are trained with the same number of training points T. For the PINN and ID-PINN methods, the training
862
+ points are given by randomly sampling from an equidistant grid G of size |G| ≫ N. For the SC-PINN and the PSM methods
863
+ the training points are given by the Legendre grids. CPU-training-runtimes are reported in seconds.
864
+ 4.1. 2D and 4D Poisson equations
865
+ We start by considering the Poisson problem in dimension m = 2 in the strong formulation with Dirichlet boundary
866
+ conditions, Eq. (23).
867
+ Figure 1. Solution for 2D Poisson problem
868
+ Approximation error
869
+ Runtime (s)
870
+ dim = 2
871
+ ϵ1
872
+ ϵ∞
873
+ PINN
874
+ 4.43 · 10−3
875
+ 5.2 · 10−2
876
+ t = 886
877
+ ID-PINN
878
+ 5.23 · 10−3
879
+ 1.9 · 10−2
880
+ t = 1356
881
+ SC-PINN
882
+ 2.52 · 10−3
883
+ 3.33 · 10−2
884
+ t = 79.2
885
+ GF-PSM
886
+ 5.37 · 10−5
887
+ 2.94 · 10−3
888
+ t = 12.84
889
+ AD-PSM
890
+ 8.79 · 10−10
891
+ 1.25 · 10−8
892
+ t = 1.21
893
+ Approximation error
894
+ Runtime (s)
895
+ dim = 4
896
+ ϵ1
897
+ ϵ∞
898
+ GF-PSM
899
+ 1.33 · 10−6
900
+ 1.0 · 10−3
901
+ t = 173.59s
902
+ AD-PSM
903
+ 5.42 · 10−8
904
+ 6.37 · 10−7
905
+ t = 7.66s
906
+ Table 1. Errors for 2D and 4D Poisson forward problem
907
+ Experiment 4.1 (Non-periodic 2D-Poisson forward problem with hard transitions). We consider the Poisson equation with
908
+ right hand side function f given by
909
+ f(x, y) =C(A sin(ωy) + tanh(βy))(−Aω2 sin(ωx) − 2β2 tanh(βx)sech2(βx))
910
+ + C(A sin(ωx) + tanh(βx))(−Aω2 sin(ωy) − 2β2 tanh(βy)sech2(βy)),
911
+ with C = 0.1, A = 0.1, β = 5, ω = 10π. All the experiments where conducted with the same number of training points, as
912
+ required for the Sobolev cubatures of degree n = 50 in the domain and n = 100 for the boundary. For the SC-PINN the
913
+ weak L2 -loss was used for the PDE loss and for the boundary.
914
+ Table 1 (top) reports the results and shows that the PSM methods outperform all PINN approaches, both, in accuracy
915
+ and runtime. AD-PSM reaches seven orders of magnitude smaller ϵ1-error and requires up to three orders of magnitude
916
+ less runtime. The GF-PSM performance is non-compatible to AD-PSM, but still far better than the PINN alternatives. The
917
+ results clearly demonstrate the PSM method to be capable of finding solutions to non-trivial linear PDEs with general
918
+ non-periodic boundary conditions.
919
+ The following experiment indicates that this observation maintains true even for higher dimensional problems.
920
+ Experiment 4.2 (4D Poisson equation forward problem). We seek for a solution of a Poisson problem in dimension m = 4.
921
+ We choose
922
+ f(x) := −4ω2g(x),
923
+ with ω = 1 and periodic boundary condition g(x) := sin(ωx1) cos(ωx2) sin(ωx3) cos(ωx4) yielding u(x) = g(x) to be
924
+ the analytic solution. We choose Sobolev cubatures of degree n = 8 for both, the domain and the boundary loss.
925
+ In Table 1 (bottom) the approximation errors are reported. While all PINN approaches failed to provide any reasonable
926
+ solution, the PINN-results were skipped. In contrast, the PSMs can recover the solution accurately. We want to stress that
927
+ the PSM runtimes are still smaller than the training runtimes of ID-PINN or the standard PINNs occuring for the analogue
928
+ 2D Poisson problem, validating again its superior efficiency.
929
+
930
+ 1.0
931
+ 0.10
932
+ 0.5
933
+ 0.05
934
+ > 0.0
935
+ 0.00
936
+ -0.05
937
+ -0.5
938
+ -0.10
939
+ .0
940
+ -0.5
941
+ 0.0
942
+ 0.5
943
+ 1.0
944
+ XLearning Partial Differential Equations by Spectral Approximates of General Sobolev Spaces
945
+ Figure 2. Solution for 2D inverse Poisson
946
+ problem with ωgt = π.
947
+ Approximation error
948
+ Runtime (s)
949
+ ϵµ
950
+ ϵ1
951
+ ϵ∞
952
+ PINN
953
+ 4.63 · 10−1
954
+ 1.13 · 10−2
955
+ 1.24 · 10−1
956
+ t ≈ 1592
957
+ ID-PINN
958
+ 2.14 · 10−2
959
+ 8.09 · 10−4
960
+ 1.52 · 10−2
961
+ t ≈ 2184
962
+ SC-PINN
963
+ 3.0 · 10−4
964
+ 5.49 · 10−4
965
+ 1.01 · 10−2
966
+ t ≈ 103
967
+ GF-PSM
968
+ 5.8 · 10−8
969
+ 6.0 · 10−10
970
+ 3.47 · 10−9
971
+ t ≈ 0.49
972
+ Table 2. Errors for 2D Poisson inverse problem
973
+ Figure 4. Solution for 2D QHO with µ = 31 on Ω′ = 5.3Ω due to AD-PSM.
974
+ Experiment 4.3 (2D Poisson inverse problem). We consider the inverse 2D-Poisson problem, as introduced in Section 3.4,
975
+ Eq. (32): We are seeking for inferring the parameter µ in the right hand side f(x) = µ cos(ωx) sin(ωy), for the unknown
976
+ ground truth µgt = 2ω2
977
+ gt, ωgt = π and the corresponding PDE solution simultaneously, with the L2-loss (k = l = 0) given
978
+ in equation (33). The GF-PSM is applied for a Sobolev cubature with degree n = 100 for the boundary and n = 30 for the
979
+ PDE loss. Benchmarks for the standard PINN and the ID-PINN are executed with the same number of training points.
980
+ Table 2 reports the reached accuracy and the required runtimes. The GF-PSM outperforms all other methods by
981
+ several orders of magnitude in accuracy for both the solution of the PDE, as well as the inferred parameter µ. As discussed in
982
+ Section 3.4 the analytic variation, Eq. (31), does not directly apply for this task and is, thus, omitted here. The exponentially
983
+ fast convergence of the GF-PSM, Section 3.4, is reflected in the required runtime being 4 orders of magnitude less than the
984
+ PINN alternatives.
985
+ 4.2. Quantum Harmonic Oscillator in 2D
986
+ We consider eigenvalue problem for the time-independent Quantum Harmonic Oscillator in dimension m = 2, which
987
+ is a special case of the Schr¨odinger equation with linear potential V (u(x)) := (x2
988
+ 1 + x2
989
+ 2)u(x), u ∈ C2(Ω, R), see e.g.,
990
+ (Liboff, 1980; Griffiths & Schroeter, 2018):
991
+
992
+ −∆u(x) + V (u(x))
993
+ = µu(x)
994
+ , ∀x ∈ Ω
995
+ u(x) − g(x)
996
+ = 0
997
+ , ∀x ∈ ∂Ω ,
998
+ It is a classic fact, that the the eigenvalues are given by µ = n1 + n2 + 1, n1, n2 ∈ N with corresponding eigenfunctions
999
+ g(x1, x2) =
1000
+ π−1/4
1001
+ √2n1+n2n1!n2!e−
1002
+ (x2
1003
+ 1+x2
1004
+ 2)
1005
+ 2
1006
+ Hn1(x1)Hn2(x2) ,
1007
+ whereas Hn denotes the n-th Hermite polynomial.
1008
+ Experiment 4.4 (QHO forward problem). For solving the QHO forward problem with eigenvalue µ = 21 and extended
1009
+ domain Ω′ = [−5.3, 5.3], GF-PSM and the AD-PSM use Sobolev cubatures of degree n = 100 for the boundary and
1010
+
1011
+ 1.0
1012
+ 1.00
1013
+ 0.75
1014
+ 0.5
1015
+ 0.50
1016
+ 0.25
1017
+ V0.0
1018
+ 0.00
1019
+ 0.25
1020
+ -0.5
1021
+ 0.50
1022
+ 0.75
1023
+ -1.0 -
1024
+ 1.00
1025
+ -1.0
1026
+ -0.5
1027
+ 0.0
1028
+ 0.5
1029
+ 1.0
1030
+ XGround Truth
1031
+ Prediction
1032
+ Point-wise Error le-8
1033
+ 5.3
1034
+ 0.4
1035
+ 5.3
1036
+ 0.4
1037
+ 5.3
1038
+ 2.5
1039
+ 2.6
1040
+ 2.6
1041
+ 2.6
1042
+ 2.0
1043
+ 0.2
1044
+ 0.2
1045
+ 1.5
1046
+ 0.0
1047
+ y
1048
+ 0.0
1049
+ y 0.0
1050
+ 0.0
1051
+ 0.0
1052
+ 1.0
1053
+ -2.6
1054
+ -2.6
1055
+ -2.6
1056
+ 0.5
1057
+ -0.2
1058
+ -0.2
1059
+ -5.3
1060
+ -5.3
1061
+ -5.3
1062
+ 5.3
1063
+ -2.6
1064
+ 0.0
1065
+ 2.6
1066
+ 5.3
1067
+ -5.3
1068
+ -2.6
1069
+ 0.0
1070
+ 2.6
1071
+ 5.3
1072
+ -5.3
1073
+ -2.6
1074
+ 0.0
1075
+ 2.6
1076
+ 5.3
1077
+ x
1078
+ xLearning Partial Differential Equations by Spectral Approximates of General Sobolev Spaces
1079
+ Figure 3. Solution of 2D QHO
1080
+ forward problem with µ = 21.
1081
+ Approximation error
1082
+ Runtime (s)
1083
+ µ = 21
1084
+ ϵ1
1085
+ ϵ∞
1086
+ PINN
1087
+ 6.97 · 10−2
1088
+ 1. · 10−3
1089
+ t ≈ 776
1090
+ ID-PINN
1091
+ 4.29 · 10−2
1092
+ 1.30 · 10−1
1093
+ t ≈ 948
1094
+ SC-PINN
1095
+ 8.16 · 10−4
1096
+ 7.27 · 10−3
1097
+ t ≈ 167
1098
+ GF-PSM
1099
+ 1.6 · 10−8
1100
+ 5.4 · 10−8
1101
+ t ≈ 0.16
1102
+ AD-PSM
1103
+ 7.61 · 10−13
1104
+ 2.37 · 10−12
1105
+ t ≈ 0.07
1106
+ µ = 31
1107
+ ϵ1
1108
+ ϵ∞
1109
+ GF-PSM
1110
+ 1.09 · 10−9
1111
+ 1.45 · 10−8
1112
+ t ≈ 2.39
1113
+ AD-PSM
1114
+ 2.25 · 10−9
1115
+ 9.82 · 10−9
1116
+ t ≈ 1.07
1117
+ Table 3. Errors for 2D QHO forward problem with µ = 21, 31.
1118
+ Figure 5. Solution for 2D QHO with
1119
+ µgt = 9 on Ω′ = 5.3Ω.
1120
+ Approximation error
1121
+ Runtime (s)
1122
+ ϵµ
1123
+ ϵ1
1124
+ ϵ∞
1125
+ PINN
1126
+ 6.01
1127
+ 7.32 · 10−2
1128
+ 4.37 · 10−1
1129
+ t ≈ 1414
1130
+ ID-PINN
1131
+ 6.21 · 10−2
1132
+ 7.51 · 10−3
1133
+ 9.40 · 10−2
1134
+ t ≈ 1346
1135
+ SC-PINN
1136
+ 2.18 · 10−4
1137
+ 5.68 · 10−4
1138
+ 1.39 · 10−2
1139
+ t ≈ 192
1140
+ GF-PSM
1141
+ 9.50 · 10−11
1142
+ 1.49 · 10−12
1143
+ 5.13 · 10−10
1144
+ t ≈ 5
1145
+ Table 4. Errors for 2D QHO inverse problem with µgt = 9
1146
+ n = 30 for the PDE loss, whereas we choose n = 200 and n = 50 for eigenvalue µ = 31 on the standard hypercube Ω,
1147
+ respectively. The AD-PSM uses the by default chosen H−1(Ω), ∗ norm, while the GF-PSM was applied with weak L2-loss,
1148
+ as in Eq. (26).
1149
+ Results are reported in Table 3. SC-PINN was the only PINN method that gains reasonable results for µ = 31
1150
+ and Ω = [−1, 1]2. However, as in Section 4.1 the PSMs-methods outperform SC-PINN in both runtime and accuracy
1151
+ performance. In the second scenario, µ = 21, Ω′ = 5.3Ω, none of PINN approaches was able to reach close approximations,
1152
+ while AD-PSM and GF-PSM do. AD-PSM performs best and its solution is visualised in Fig. 4.
1153
+ Experiment 4.5 (QHO inverse problem). Similar to Exp. 4.3 we seek for inferring the unknown eigenvalue µ, set to µgt = 9,
1154
+ and the corresponding continuous approximation of the PDE solution simultaneously, with given data u ∈ R|Am,n| sampled
1155
+ on the Legendre grid by optimising the loss:
1156
+ L[C, µ] = ∥∆Qc + V (Qc) − µQC∥2
1157
+ L2 + ∥QC − u∥2
1158
+ L2
1159
+ (39)
1160
+ We choose a n = 50 degree Sobolev cubature for the domain and n = 200 on the boundary and compare it with the PINN
1161
+ and the ID-PINN for the same number of training points.
1162
+ As shown in Table 4 the GF-PSM outperforms the ID-PINN by several orders of magnitude in both accuracy and
1163
+ runtime. This reflects the strength and flexibility of the method when addressing linear inverse problems. While na¨ıve,
1164
+ unconditioned Implicit-Euler implementations are inherently unstable the insights of Section 3.4 enable us to exploit the
1165
+ structure of the gradient flow to realize stable numerical integrators. Applying the PSM method to non-linear forward
1166
+ problems is our next demonstration task.
1167
+
1168
+ 1.0
1169
+ 0.15
1170
+ 0.10
1171
+ 0.5
1172
+ 0.05
1173
+ y 0.0
1174
+ 0.00
1175
+ -0.05
1176
+ -0.5
1177
+ 0.10
1178
+ -1.0
1179
+ -0.15
1180
+ -1.0
1181
+ -0.5
1182
+ 0.0
1183
+ 0.5
1184
+ 1.0
1185
+ X5.3
1186
+ 0.4
1187
+ 0.3
1188
+ 2.6
1189
+ 0.2
1190
+ 0.1
1191
+ > 0.0
1192
+ 0.0
1193
+ -0.1
1194
+ -2.6
1195
+ -0.2
1196
+ -0.3
1197
+ -5.35.3
1198
+ -0.4
1199
+ -2.6
1200
+ 0.0
1201
+ 2.6
1202
+ 5.3
1203
+ XLearning Partial Differential Equations by Spectral Approximates of General Sobolev Spaces
1204
+ 4.3. 2D Incompressible Navier Stokes equation
1205
+ We consider the incompressible 2D Navier Stokes equation as an example of a non-linear PDE problem: Let
1206
+ u = (u1, u2), u ∈ C2(Ω, R2) be the vector velocity field and p ∈ C1(Ω; R) the scalar pressure field the equation becomes:
1207
+
1208
+
1209
+
1210
+ −ν∆u(x, y) + (u(x, y) · ∇)u(x, y) + ∇p(x, y)
1211
+ = f(x, y)
1212
+ , ∀(x, y) ∈ Ω
1213
+ ∇ · u(x, y)
1214
+ = 0
1215
+ , ∀(x, y) ∈ Ω
1216
+ u(x, y) − g(x, y)
1217
+ = 0
1218
+ , ∀(x, y) ∈ ∂Ω ,
1219
+ where
1220
+ f(x, y) = 2νπ2(u1(x, y), u2(x, y)) + π cos(πx) cos(πy)(−u1(x, y), u2(x, y))
1221
+ + π sin(πx) sin(πy)(u2, −u1) + exp(πy)(1, πx) ,
1222
+ g(x, y) = [− sin(πx) cos(πy), cos(πx) sin(πy)]T
1223
+ Experiment 4.6 (Navier-Stokes Forward and Inverse Problem). We solve the Navier-Stokes forward problem by applying
1224
+ GF-PSM with n = 100 and n = 30 degree Sobolev cubature for the boundary and the domain respectively. We set the
1225
+ viscosity to ν = 0.05 and use the analytic pressure field p = x exp(πy) with Dirichlet boundary conditions.
1226
+ The inverse problem seeks for inferring ν and the scalar pressure field p for the ground truth viscosity νgt = 0.05
1227
+ and u1 = − sin(πx) cos(πy), u2 = cos(πx) sin(πy). The errors ϵ1 and ϵ∞ reported for this experiment, correspond to the
1228
+ predicted pressure against the ground truth one.
1229
+ Figure 6. Solution u1.
1230
+ Approximation error
1231
+ Runtime (s)
1232
+ Forward Problem
1233
+ ϵ1
1234
+ ϵ∞
1235
+ GF-PSM
1236
+ u1
1237
+ 3.31 · 10−10
1238
+ 2.35 · 10−9
1239
+ t ≈ 405.22
1240
+ GF-PSM
1241
+ u2
1242
+ 3.28 · 10−10
1243
+ 2.35 · 10−9
1244
+ t ≈ 405.22
1245
+ Table 5. Approximation errors of the forward problem.
1246
+ Approximation error
1247
+ Runtime (s)
1248
+ Inverse Problem
1249
+ ϵν
1250
+ ϵ1
1251
+ ϵ∞
1252
+ GF-PSM
1253
+ 2.91 · 10−16
1254
+ 2.63 · 10−14
1255
+ 1.21 · 10−11
1256
+ t ≈ 0.79
1257
+ Table 6. Approximation errors of the inverse problem.
1258
+ While none of the PINN approaches was able to address the problem reasonably the PSM methods reach similar
1259
+ accuracy as in the prior (linear) experiments, as reported in Tables 5,6.
1260
+ We summarise the experimental and theoretical findings in the concluding thoughts below.
1261
+ 5. Conclusion
1262
+ We introduced a novel variational spectral method solving linear, non-linear, forward and inverse PDE problems.
1263
+ In contrast to neural network - PINN approaches Chebyshev polynomials surve as a polynomial surrogate model - PSM,
1264
+ maintainig the same flexibility as PINNs.
1265
+ Based on our prior work (Cardona & Hecht, 2022), we gave weak PDE formulations, resting on the novel Sobolev
1266
+ cubatures approximating general Sobolev norms. Allowing us to formulate and compute the resulting finite-dimensional
1267
+ gradient flow for finding the optimal coefficients for the PSMs, in the case of linear PDEs, we could even derive the analytical
1268
+ solution of the gradient flow. In particular, the resulting efficient computation of the negative order dual Sobolev norm
1269
+
1270
+ 1.0
1271
+ 1.0
1272
+ 0.5
1273
+ 0.5
1274
+ 0.0
1275
+ 0.0
1276
+ -0.5
1277
+ -0.5
1278
+ -1.0
1279
+ -1.0
1280
+ -1.0
1281
+ -0.5
1282
+ 0.0
1283
+ 0.5
1284
+ 1.0Learning Partial Differential Equations by Spectral Approximates of General Sobolev Spaces
1285
+ ∥ · ∥H−k(Ω),∗ was demonstrated to perform best compared to the alternative formulations. While we meanwhile deepened
1286
+ the theoretical insights, presented here, to deliver the optimal choice of the Sobolev norm beforehand these subjects are
1287
+ part of a follow-up study. This includes a relaxation of the Sobolev cubatures, resisting the curse of dimensionality when
1288
+ addressing higher dimensional problems.
1289
+ In summary, the PSMs methods outperformed all other benchmark methods by far, showing the superiority in runtime
1290
+ and accuracy performance of the PSMs formulation on the whole spectrum of the considered problems. Since the PSMs
1291
+ offer the same flexibility and capabilities of PINNs, we propose to extend the presented approach in order to learn PDE
1292
+ solutions for ranges of boundary conditions, parameters (like diffusion constants) or dynamic time ranges. Because the
1293
+ gain in efficiency allowed to compute the presented benchmarks without High Performance Computing (HPC) on a local
1294
+ machine, we expect so far non-reachable high-dimensional dim ≥ 3, strongly varying PDE problems, appearing for instance
1295
+ for dynamic phase space simulations, to become solvable when being addressed by a parallelised HPC version of the current
1296
+ implementation (ABC, 2021).
1297
+ References
1298
+ ABC. Repository with documentation and implementations under construction. https://github.com/XYZ, 2021.
1299
+ Adams, R. A. and Fournier, J. J. Sobolev spaces, volume 140. Academic press, 2003.
1300
+ Arjovsky, M., Chintala, S., and Bottou, L. Wasserstein generative adversarial networks. In Precup, D. and Teh, Y. W.
1301
+ (eds.), Proceedings of the 34th International Conference on Machine Learning, volume 70 of Proceedings of Machine
1302
+ Learning Research, pp. 214–223. PMLR, 06–11 Aug 2017. URL https://proceedings.mlr.press/v70/
1303
+ arjovsky17a.html.
1304
+ Ben-Israel, A. and Greville, T. N. Generalized inverses: theory and applications, volume 15. Springer Science & Business
1305
+ Media, 2003.
1306
+ Bernardi, C. and Maday, Y. Spectral methods. Handbook of numerical analysis, 5:209–485, 1997.
1307
+ Brezis, H. Functional analysis, Sobolev spaces and partial differential equations, volume 2. Springer, 2011.
1308
+ Butcher, J. Numerical methods for ordinary differential equations in the 20th century. 12 2001. ISBN 9780444506177. doi:
1309
+ 10.1016/B978-0-444-50617-7.50018-5.
1310
+ Byrd, R. H., Lu, P., Nocedal, J., and Zhu, C. A limited memory algorithm for bound constrained optimization. SIAM
1311
+ Journal on Scientific Computing, 16(5):1190–1208, 1995. doi: 10.1137/0916069. URL https://doi.org/10.
1312
+ 1137/0916069.
1313
+ Canuto, C., Hussaini, M. Y., Quarteroni, A., and Zang, T. A. Spectral methods: fundamentals in single domains. Springer
1314
+ Science & Business Media, 2007.
1315
+ Cardona, J. E. S. and Hecht, M. Replacing automatic differentiation by sobolev cubatures fastens physics informed neural
1316
+ nets and strengthens their approximation power. arXiv preprint arXiv:2211.15443, 2022.
1317
+ Chong, E. and Zak, S. An introduction to optimization. Antennas and Propagation Magazine, IEEE, 38:60, 05 1996. doi:
1318
+ 10.1109/MAP.1996.500234.
1319
+ Ellis, J. A., Fiedler, L., Popoola, G. A., Modine, N. A., Stephens, J. A., Thompson, A. P., Cangi, A., and Rajamanickam, S.
1320
+ Accelerating finite-temperature kohn-sham density functional theory with deep neural networks. Physical Review B, 104
1321
+ (3):035120, 2021.
1322
+ Ern, A. and Guermond, J.-L. Theory and practice of finite elements, volume 159. Springer, 2004.
1323
+ Eymard, R., Gallou¨et, T., and Herbin, R. Finite volume methods. Handbook of numerical analysis, 7:713–1018, 2000.
1324
+ Griffiths, D. J. and Schroeter, D. F. Introduction to quantum mechanics. Cambridge University Press, 2018.
1325
+ Hernandez Acosta, U., Krishnan Thekke Veettil, S., Wicaksono, D., and Hecht, M. MINTERPY - multivariate interpolation
1326
+ in python. https://github.com/casus/minterpy/, 2021.
1327
+
1328
+ Learning Partial Differential Equations by Spectral Approximates of General Sobolev Spaces
1329
+ Hessari, P. and Shin, B.-C. The least-squares pseudo-spectral method for navier–stokes equations. Computers & Mathematics
1330
+ with Applications, 66(3):318–329, 2013. ISSN 0898-1221. doi: https://doi.org/10.1016/j.camwa.2013.05.009. URL
1331
+ https://www.sciencedirect.com/science/article/pii/S0898122113003118.
1332
+ Jin, X., Cai, S., Li, H., and Karniadakis, G. E. NSFnets (Navier-Stokes Flow nets): Physics-informed neural networks for
1333
+ the incompressible Navier-Stokes equations. arXiv:2003.06496 [physics], March 2020. URL http://arxiv.org/
1334
+ abs/2003.06496. arXiv: 2003.06496.
1335
+ Jost, J. Partial Differential Equations. New York: Springer-Verlag, 2002.
1336
+ Kang, S. and Suh, Y. K. Spectral Methods, pp. 1875–1881. Springer US, Boston, MA, 2008. ISBN 978-0-387-48998-8.
1337
+ URL https://doi.org/10.1007/978-0-387-48998-8_1442.
1338
+ Karimi, H., Nutini, J., and Schmidt, M. Linear convergence of gradient and proximal-gradient methods under the polyak-
1339
+ łojasiewicz condition. In Frasconi, P., Landwehr, N., Manco, G., and Vreeken, J. (eds.), Machine Learning and Knowledge
1340
+ Discovery in Databases, pp. 795–811, Cham, 2016. Springer International Publishing. ISBN 978-3-319-46128-1.
1341
+ Kharazmi, E., Zhang, Z., and Karniadakis, G. E. Variational physics-informed neural networks for solving partial differential
1342
+ equations. arXiv preprint arXiv:1912.00873, 2019.
1343
+ Kharazmi, E., Zhang, Z., and Karniadakis, G. E. hp-vpinns: Variational physics-informed neural networks with domain
1344
+ decomposition. ArXiv, abs/2003.05385, 2020.
1345
+ Kim, S. D. and Shin, B. C. Chebyshev weighted norm least-squares spectral methods for the elliptic problem. Journal of
1346
+ Computational Mathematics, pp. 451–462, 2006.
1347
+ Lagergren, J. H., Nardini, J. T., Baker, R. E., Simpson, M. J., and Flores, K. B. Biologically-informed neural networks
1348
+ guide mechanistic modeling from sparse experimental data. arXiv:2005.13073 [math, q-bio], May 2020. URL http:
1349
+ //arxiv.org/abs/2005.13073. arXiv: 2005.13073.
1350
+ Lax, P. D. On cauchys problem for hyperbolic equations and the differentiability of solutions of elliptic equations. Comm.
1351
+ Pure Appl. Math. 8, 615-633, 1955.
1352
+ LeVeque, R. J. Finite difference methods for ordinary and partial differential equations: steady-state and time-dependent
1353
+ problems. SIAM, 2007.
1354
+ Li, S. and Liu, W. K. Meshfree particle methods. Springer Science & Business Media, 2007.
1355
+ Liboff, R. L. Introductory Quantum Mechanics. Addison-Wesley Publishing Company. Canad´a, 1980.
1356
+ Long, Z., Lu, Y., Ma, X., and Dong, B. Pde-net: Learning pdes from data. ArXiv, abs/1710.09668, 2018.
1357
+ Maddu, S., Sturm, D., M¨uller, C. L., and Sbalzarini, I. F. Inverse dirichlet weighting enables reliable training of physics
1358
+ informed neural networks. Machine Learning: Science and Technology, 2021. URL http://iopscience.iop.
1359
+ org/article/10.1088/2632-2153/ac3712.
1360
+ Neuberger, P. K. J. Potential theory and applications in a constructive method for finding critical points of ginzburg–landau
1361
+ type equations. Nonlinear Analysis: Theory, Methods & Applications vol. 69 iss. 3, 69, aug 2008. doi: 10.1016/j.na.2008.
1362
+ 02.074. URL libgen.li/file.php?md5=871f710130ca8f46f6cc6df7e25eb611.
1363
+ Raissi, M., Perdikaris, P., and Karniadakis, G. Physics-informed neural networks: A deep learning framework for
1364
+ solving forward and inverse problems involving nonlinear partial differential equations. Journal of Computational
1365
+ Physics, 378:686–707, 2019. ISSN 0021-9991. doi: https://doi.org/10.1016/j.jcp.2018.10.045. URL https://www.
1366
+ sciencedirect.com/science/article/pii/S0021999118307125.
1367
+ Sahli Costabal, F., Yang, Y., Perdikaris, P., Hurtado, D. E., and Kuhl, E. Physics-Informed Neural Networks for Cardiac
1368
+ Activation Mapping. Frontiers in Physics, 8:42, February 2020. ISSN 2296-424X. doi: 10.3389/fphy.2020.00042. URL
1369
+ https://www.frontiersin.org/article/10.3389/fphy.2020.00042/full.
1370
+ Sirignano, J. A. and Spiliopoulos, K. Dgm: A deep learning algorithm for solving partial differential equations. Journal of
1371
+ Computational Physics, 2018.
1372
+
1373
+ Learning Partial Differential Equations by Spectral Approximates of General Sobolev Spaces
1374
+ Stroud, A. Approximate calculation of multiple integrals: Prentice-Hall series in automatic computation. Prentice-Hall
1375
+ (Englewood Cliffs, NJ), 1971.
1376
+ Stroud, A. Secrest. d.(1966). Gaussian quadrature formulas, 2011.
1377
+ Trefethen, L. N. Cubature, approximation, and isotropy in the hypercube. SIAM Review, 59(3):469–491, 2017.
1378
+ Trefethen, L. N. Approximation theory and approximation practice, volume 164. SIAM, 2019.
1379
+ Trefethen, L. N. and Bau III, D. Numerical linear algebra, volume 50. SIAM, 1997.
1380
+ Wang, S., Teng, Y., and Perdikaris, P. Understanding and mitigating gradient flow pathologies in physics-informed neural
1381
+ networks. SIAM Journal on Scientific Computing, 43(5):A3055–A3081, 2021.
1382
+ Yang, L., Zhang, D., and Karniadakis, G. E. Physics-informed generative adversarial networks for stochastic differential
1383
+ equations. ArXiv, abs/1811.02033, 2020.
1384
+ Appendix
1385
+ The result provided in Theorem 14 is a known fact and could be also found for example in (Karimi et al., 2016) in
1386
+ a more general setting. We prove it by combining the following lemmas. Given a differentiable λ-convex truncated loss
1387
+ L : R|Am,n| −→ R+, m, n ∈ N, as in Theorem 14, inducing the gradient descent ODE
1388
+ ∂tC(t) = −∇L(QC(t))
1389
+ , C(0) = C0 ,
1390
+ where C0 ∈ R|Am,n| is some initial guess of the coefficients. The Implicit Euler discretisation of the ODE is given by
1391
+ Cn+1 = Cn − τ∇L[Cn+1] ,
1392
+ (40)
1393
+ where τ ∈ R is the learning rate. We will use the following two definitions:
1394
+ Definition 17. A functional F : R|Am,n| → R is convex if:
1395
+ F[tx + (1 − t)y] ≤ tF[x] + (1 − t)F[y],
1396
+ (41)
1397
+ it is called strictly convex, if the inequality is strict.
1398
+ Definition 18. A functional F : R|Am,n| → R is coercive if:
1399
+ lim
1400
+ ||u||→∞ F[u] = ∞
1401
+ (42)
1402
+ Lemma 19. Let the assumptions of Theorem 14 be fulfilled then the following estimate applies:
1403
+ λ
1404
+ 2 ∥Cn − C∞∥2 ≤ L[Cn] − L[C∞] ≤ 1
1405
+ 2λ∥∇L[Cn]∥2 .
1406
+ Proof. We prove the first inequality by rephrasing the λ- convexity property,Eq. (34). Let γt := tx + (1 − t)y, then
1407
+ L = L(x) is λ-convex if
1408
+ L[γt] ≤ tL[x] + (1 − t)L[y] − λ
1409
+ 2 t(1 − t)∥x − y∥2 .
1410
+ By replacing x and y with Cn and C∞, respectively, and re-arranging, we obtain:
1411
+ λ
1412
+ 2 t(1 − t)∥Cn − C∞∥2 ≤ t(L[Cn] − L[C∞]) + L[C∞] − L[γt] ≤ t(L[Cn] − L[C∞]) ,
1413
+ where we used the minimality of C∞ for the last inequality. Dividing by t and taking the limit for t → 0 yields the first
1414
+ inequality of Lemma 19. The second inequality follows directly from the λ-convexity, Eq. (34), implying
1415
+ L[Cn] − L[C∞] ≤ −∇L[Cn]T (C∞ − Cn) − λ
1416
+ 2 ∥C∞ − Cn∥2,
1417
+
1418
+ Learning Partial Differential Equations by Spectral Approximates of General Sobolev Spaces
1419
+ We set F[C∞] := ∇L[Cn]T (C∞ − Cn) + λ
1420
+ 2 ∥C∞ − Cn∥2
1421
+ 2 and realise that F is a coercive, strictly convex functional with
1422
+ respect to C∞. Hence, the uniquely determined minimum C∗
1423
+ ∞ is given by:
1424
+ ∇F
1425
+ != 0 ⇐⇒ (C∗
1426
+ ∞ − Cn) = − 1
1427
+ λ∇L[Cn].
1428
+ In light of this fact, we can bound −∇F by
1429
+ L[Cn] − L[C∞] ≤ ( 1
1430
+ λ − 1
1431
+ 2λ)∥∇L[Cn]∥2 ,
1432
+ yielding the desired result.
1433
+ The following lemma provides the monotonicity property of the gradient flow, being a necessary ingredient for proving
1434
+ the exponential convergence.
1435
+ Lemma 20. Let the assumptions of Theorem 14 be fulfilled the the following estimate holds:
1436
+ L[Cn−1] − L[C∞] ≥ (1 + λτ)2(L[Cn] − L[C∞])
1437
+ Proof. Due to the λ-convexity and the Implicit Euler update, Eq. (40), we realise that:
1438
+ L[Cn−1] ≥ L[Cn] + ∇L[Cn](Cn−1 − Cn) + λ
1439
+ 2 ∥Cn−1 − Cn∥2
1440
+ = L[Cn] + τ(τλ
1441
+ 2 + 1)∥∇L[Cn]∥2 .
1442
+ Due to Lemma 19 we further conclude
1443
+ L[Cn−1] ≥ L[Cn] + 2λτ(τλ
1444
+ 2 + 1)(L[Cn] − L[C∞]) .
1445
+ (43)
1446
+ Adding −L[C∞] at both sides provides the claim.
1447
+ Lemma 21. Let the assumptions of Theorem 14 be fulfilled and define ˆλ := 1
1448
+ τ log(1 + λτ). Then the sequence:
1449
+ ∆nL := L[Cn] − L[C∞],
1450
+ decreases monotonically with an exponential rate of e−2ˆλτn, i.e.
1451
+ ∆nL ≤ e−2ˆλτn(L[C0] − L[C∞])
1452
+ (44)
1453
+ Proof. Due to Lemma (20) we compute
1454
+ e2ˆλτn(L[Cn] − L[C∞]) = (1 + λτ)2n(L[Cn] − L[C∞])
1455
+ ≤ (1 + λτ)2(n−1)(L[Cn−1] − L[C∞])
1456
+ · · ·
1457
+ ≤ L[C0] − L[C∞] .
1458
+ Proof of Theorem 14. Theorem (14) now follows by combing Lemma (19) and (21) yielding:
1459
+ 1
1460
+ λ∥Cn − C∞∥2
1461
+ 2 ≤ L[Cn] − L[C∞] ≤ e−2ˆλτn(L[C0] − L[C∞]) .
1462
+ (45)
1463
+ Thus, for τ → 0, it follows by the definition of ˆλ that ˆλ → λ and Cn → C(t), with t = nτ due to the continuity of
1464
+ C = C(t) inherited from the differentiability of F. Hence, the continuity of the norm implies the statement.
1465
+ Remark 22. Lemma 20 implies that also the Implicit Euler discretised gradient flow, converges exponentially fast.
1466
+
CdE4T4oBgHgl3EQfFgzX/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
DNAzT4oBgHgl3EQfwf6z/content/tmp_files/2301.01724v1.pdf.txt ADDED
@@ -0,0 +1,2838 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 1
2
+ Super-resolution with Binary Priors: Theory and
3
+ Algorithms
4
+ Pulak Sarangi, Ryoma Hattori, Takaki Komiyama and Piya Pal
5
+ Abstract—The problem of super-resolution is concerned with
6
+ the reconstruction of temporally/spatially localized events (or
7
+ spikes) from samples of their convolution with a low-pass filter.
8
+ Distinct from prior works which exploit sparsity in appropriate
9
+ domains in order to solve the resulting ill-posed problem, this
10
+ paper explores the role of binary priors in super-resolution,
11
+ where the spike (or source) amplitudes are assumed to be
12
+ binary-valued. Our study is inspired by the problem of neural
13
+ spike deconvolution, but also applies to other applications such
14
+ as symbol detection in hybrid millimeter wave communication
15
+ systems. This paper makes several theoretical and algorithmic
16
+ contributions to enable binary super-resolution with very few
17
+ measurements. Our results show that binary constraints offer
18
+ much stronger identifiability guarantees than sparsity, allowing
19
+ us to operate in “extreme compression" regimes, where the num-
20
+ ber of measurements can be significantly smaller than the sparsity
21
+ level of the spikes. To ensure exact recovery in this "extreme
22
+ compression" regime, it becomes necessary to design algorithms
23
+ that exactly enforce binary constraints without relaxation. In
24
+ order to overcome the ensuing computational challenges, we
25
+ consider a first order auto-regressive filter (which appears in
26
+ neural spike deconvolution), and exploit its special structure. This
27
+ results in a novel formulation of the super-resolution binary spike
28
+ recovery in terms of binary search in one dimension. We perform
29
+ numerical experiments that validate our theory and also show
30
+ the benefits of binary constraints in neural spike deconvolution
31
+ from real calcium imaging datasets.
32
+ Index Terms—Binary compressed sensing, super-resolution,
33
+ spike deconvolution, sparsity, binary search, beta-expansions
34
+ I. INTRODUCTION
35
+ The problem of recovering localized events (spikes) from
36
+ their convolution with a blurring kernel, arises in a wide range
37
+ of scientific and engineering applications such as fluorescence
38
+ microscopy [1], neural spike deconvolution [2]–[4], hybrid
39
+ millimeter wave (mmWave) communication [5], to name a few.
40
+ Consider K temporal spikes, which can be represented as:
41
+ xhi(t) =
42
+ K
43
+
44
+ k=1
45
+ ckδ(t − nkThi)
46
+ Here, the high-rate spikes are supported on a fine temporal grid
47
+ with spacing Thi, nk ∈ Z is an integer corresponding to the
48
+ time index of the kth spike and ck denotes its amplitude. The
49
+ convolution of spikes with a filter h(t) is typically uniformly
50
+ (down)sampled at a (low) rate Tlo = DThi (D > 1), yielding
51
+ measurements:
52
+ y[n] = xhi(t) ⋆ h(t)|t=nT lo =
53
+ K
54
+
55
+ k=1
56
+ ckh(nTlo − nkThi)
57
+ (1)
58
+ The goal of super-resolution is to recover the spike locations nk
59
+ and amplitudes ck, k = 1, 2, · · · , K from a limited number (M)
60
+ of low-rate samples {y[n]}M−1
61
+ n=0 . The problem is typically ill-
62
+ posed due to systematic attenuation of high-frequency contents
63
+ of the spikes by the low-pass filter h(t). In order to make the
64
+ problem well-posed, it becomes necessary to exploit priors such
65
+ as sparsity [6]–[9] and/or non-negativity [10], [11]. In recent
66
+ times, there has been a substantial progress towards developing
67
+ efficient algorithms for provably solving the super-resolution
68
+ problem [7]–[19].
69
+ In this paper, we investigate the problem of binary super-
70
+ resolution, where the amplitudes of the spikes are known
71
+ apriori to be ck = A, but their number (K) and locations
72
+ (nk) are unknown. Motivated by the problem of neural spike
73
+ deconvolution in two-photon calcium imaging [2], [20], we
74
+ will focus on a blurring kernel that can be represented as a
75
+ stable first order auto-regressive (AR(1)) filter. Each neural
76
+ spike results in a sharp rise in Ca2+ concentration followed by
77
+ a slow exponential decay (modeled as the impulse response of
78
+ an AR(1) filter), which results in an overlap of the responses
79
+ from nearby spiking events, leading to poor temporal resolution
80
+ [2], [21].
81
+ A. Related Works
82
+ Early
83
+ works
84
+ on
85
+ super-resolution
86
+ date
87
+ back
88
+ to
89
+ algebraic/subspace-based
90
+ techniques
91
+ such
92
+ as
93
+ Prony’s
94
+ method, MUSIC [12], [22], ESPRIT [8], [23] and matrix
95
+ pencil [9], [24]. Following the seminal work in [6], substantial
96
+ progress has been made in understanding the role of sparsity
97
+ as a prior for super-resolution [7], [25], [26]. In recent times,
98
+ convex optimization-based techniques have been developed
99
+ that employ Total Variational (TV) norm and atomic norm
100
+ regularizers, in order to promote sparsity [7], [18], [19], [25],
101
+ [26] and/or non-negativity [10], [11], [27]. These techniques
102
+ primarily employ sampling in the Fourier/frequency domain by
103
+ assuming the kernel h(t) to be (approximately) bandlimited.
104
+ However, selecting the appropriate cut-off frequency is crucial
105
+ for super-resolution and needs careful consideration [25],
106
+ [28]. Unlike subspace-based methods, theoretical guarantees
107
+ for these convex algorithms rely on a minimum separation
108
+ between the spikes, which is also shown to be necessary even
109
+ in absence of noise [29]. The finite rate of innovation (FRI)
110
+ framework [30]–[34] also considers the recovery of spikes
111
+ from measurements acquired using an exponentially decaying
112
+ kernel, which includes the AR(1) filter considered in this
113
+ paper. In the absence of noise, FRI enables the exact recovery
114
+ of K spikes with arbitrary amplitudes from M = Ω(K)1
115
+ measurements, without any separation condition [32]. It is
116
+ to be noted that all of the above methods require M > K
117
+ measurements for resolving K spikes. In contrast, we will
118
+ show that it is possible to recover K spikes from M ≪ K
119
+ 1This notation essentially means that there exists a positive constant c such
120
+ that M ≥ cK.
121
+ arXiv:2301.01724v1 [eess.SP] 4 Jan 2023
122
+
123
+ 2
124
+ measurements by exploiting the binary nature of the spiking
125
+ signal. The above algorithms are designed to handle arbitrary
126
+ real-valued amplitudes and as such, they are oblivious to
127
+ binary priors. Therefore, they cannot successfully recover
128
+ spikes in the regime M < K, which is henceforth referred to
129
+ as the extreme compression regime.
130
+ The problem of recovering binary signals from underde-
131
+ termined linear measurements (with more unknowns than
132
+ equations/measurements) has been recently studied under the
133
+ parlance of Binary Compressed Sensing (BCS) [35]–[42].
134
+ In BCS, the undersampling operation employs random (and
135
+ typically dense) sampling matrices, whereas we consider a
136
+ deterministic and structured measurement matrix derived from
137
+ a filter, followed by uniform downsampling. Moreover, existing
138
+ theoretical guarantees for BCS crucially rely on sparsity
139
+ assumptions that will be shown to be inadequate for our
140
+ problem (discussed in Section II-C). Most importantly, in order
141
+ to achieve computational tractability, BCS relaxes the binary
142
+ constraints and solves continuous-valued optimization problems.
143
+ Consequently, their theoretical guarantees do not apply in the
144
+ extreme compression regime M < K.
145
+ As mentioned earlier, our study is motivated by the problem
146
+ of neural spike deconvolution arising in calcium imaging [3],
147
+ [4], [20], [32], [43]–[45]. A majority of the existing spike
148
+ deconvolution techniques [4], [43], [44] infer the spiking
149
+ activity at the same (low) rate at which the fluorescence signal
150
+ is sampled, and a single estimate such as spike counts or
151
+ rates are obtained over a temporal bin equal to the resolution
152
+ of the imaging rate. Although sequential Monte-Carlo based
153
+ techniques have been proposed that generate spikes at a rate
154
+ higher than the calcium frame rate [3], no theoretical guarantees
155
+ are available that prove that these methods can indeed uniquely
156
+ identify the high-rate spiking activity. Algorithms that rely
157
+ on sparsity and non-negativity [43], [44] alone are ineffective
158
+ for inferring the neural spiking activity that occurs at a much
159
+ higher rate than the calcium sampling rate. On the other hand,
160
+ at the high-rate, the spiking activity is often assumed to be
161
+ binary since the probability of two or more spikes occurring
162
+ within two time instants on the fine temporal grid is negligible
163
+ [2], [46]. Therefore, we propose to exploit the inherent binary
164
+ nature of the neural spikes and provide the first theoretical
165
+ guarantees that it is indeed possible to resolve the high-rate
166
+ binary neural spikes from calcium fluorescence signal acquired
167
+ at a much lower rate.
168
+ B. Our Contributions
169
+ We make both theoretical and algorithmic contributions to
170
+ the problem of binary super-resolution in the setting when
171
+ the spikes lie on a fine grid. We theoretically establish that
172
+ at very low sampling rates, sparsity and non-negativity are
173
+ inadequate for the exact reconstruction of binary spikes (Lemma
174
+ 2). However, by exploiting the binary nature of the spiking
175
+ activity, much stronger identifiability results can be obtained
176
+ compared to classical sparsity-based results (Theorem 1). In the
177
+ absence of noise, we show that it is possible to uniquely recover
178
+ K binary spikes from only M = Ω(1) low-rate measurements.
179
+ The analysis also provides interesting insights into the interplay
180
+ between binary priors and the “infinite memory" of the AR(1)
181
+ filter.
182
+ Although it is possible to uniquely identify binary spikes in
183
+ the extreme compression regime (M ≪ K), the combinatorial
184
+ nature of binary constraints introduce computational hurdles in
185
+ exactly enforcing them. Our second contribution is to leverage
186
+ the special structure of the AR(1) measurements to overcome
187
+ this computational challenge in the extreme compression
188
+ regime M < K (Section III-A). Our formulation reveals
189
+ an interesting and novel connection between binary super-
190
+ resolution, and finding the generalized radix representation of
191
+ real numbers, known as β-expansion [47]–[49] (Section III). In
192
+ order to circumvent the problem of exhaustive search, we pre-
193
+ construct and store (in memory) a binary tree that is completely
194
+ determined by the model parameters (filter and undersampling
195
+ factor). When the low-rate measurements are acquired, we can
196
+ efficiently perform a binary search to traverse the tree and find
197
+ the desired binary solution. This ability to trade-off memory
198
+ for computational efficiency is made possible by the unique
199
+ structure of the measurement model governed by the AR(1)
200
+ filter. The algorithm guarantees exact super-resolution even
201
+ when the measurements are corrupted by a small bounded
202
+ (adversarial) noise, the strength of which depends on the
203
+ AR filter parameter and the undersampling factor. When the
204
+ measurements are corrupted by additive Gaussian noise, we
205
+ characterize the probability of erroneous decoding (Theorem
206
+ 3) in the extreme compression regime M < K and indicate
207
+ the trade-off among the filter parameter, SNR and the extent
208
+ of compression. Finally, we also demonstrate how binary
209
+ priors can improve the performance of a popularly used spike
210
+ deconvolution algorithm (OASIS [43]) on real calcium imaging
211
+ datasets.
212
+ II. FUNDAMENTAL SAMPLE COMPLEXITY OF BINARY
213
+ SUPER-RESOLUTION
214
+ Let yhi[n] be the output of a stable first-order Autoregressive
215
+ AR(1) filter with parameter α, 0 < α < 1, driven by an
216
+ unknown binary-valued input signal xhi[n] ∈ {0, A}, A > 0:
217
+ yhi[n] = αyhi[n − 1] + xhi[n]
218
+ (2)
219
+ In this paper, we consider a super-resolution setting where
220
+ we do not directly observe yhi[n], and instead acquire M
221
+ measurements {ylo[n]}M−1
222
+ n=0
223
+ at a lower-rate by uniformly
224
+ subsampling yhi[n] by a factor of D:
225
+ ylo[n] = yhi[Dn],
226
+ n = 0, 1, · · · , M − 1,
227
+ (3)
228
+ The signal ylo[n] corresponds to a filtered and downsampled
229
+ version of the signal xhi[n] where the filter is an infinite impulse
230
+ response (IIR) filter with a single pole at α. Let ylo ∈ RM
231
+ be a vector obtained by stacking the low-rate measurements
232
+ {ylo[n]}M−1
233
+ n=0 :
234
+ ylo = [ylo[0], ylo[1], · · · , ylo[M − 1]]⊤
235
+ Since (2) represents a causal filtering operation, the low rate
236
+ signal ylo only depends on the present and past high-rate
237
+ binary signal. Denote L := (M − 1)D + 1. The M low-rate
238
+ measurements in ylo are a function of L samples of the high
239
+
240
+ 3
241
+ rate binary input signal {xhi[n]}L−1
242
+ n=0. These L samples are
243
+ given by the following vector xhi ∈ {0, A}L:
244
+ xhi := [xhi[0], xhi[1], · · · , xhi[L − 1]]⊤.
245
+ Assuming the system to be initially at rest, i.e., yhi[n] = 0, n <
246
+ 0, we can represent the M samples from (3) in a compact
247
+ matrix-vector form as:
248
+ ylo := SDyhi = SDGαxhi
249
+ (4)
250
+ where Gα ∈ RL×L is a Toeplitz matrix given by:
251
+ Gα =
252
+
253
+ ����
254
+ 1
255
+ 0
256
+ · · ·
257
+ 0
258
+ α
259
+ 1
260
+ · · ·
261
+ 0
262
+ ...
263
+ ...
264
+ ...
265
+ ...
266
+ αL−1
267
+ αL−2
268
+ · · ·
269
+ 1
270
+
271
+ ����
272
+ (5)
273
+ and SD ∈ RM×L is defined as:
274
+ [SD]i,j =
275
+
276
+ 1,
277
+ j = (i − 1)D + 1
278
+ 0, else
279
+ .
280
+ The matrix SD represents the D−fold downsampling operation.
281
+ Our goal is to infer the unknown high-rate binary input signal
282
+ xhi[n] from the low-rate measurements ylo[n]. This is essentially
283
+ a “super-resolution" problem because the AR(1) filter first
284
+ attenuates the high-frequency components of xhi[n], and
285
+ the uniform downsampling operation systematically discards
286
+ measurements. As a result, it may seem that the spiking activity
287
+ {xhi[(n − 1)D + k]}D
288
+ k=1 occurring “in-between" two low-rate
289
+ measurements ylo[n − 1] and ylo[n] is apparently lost. One can
290
+ potentially interpolate arbitrarily, making the problem hopeless.
291
+ In the next section, we will show that surprisingly, xhi still
292
+ remains identifiable from ylo in the absence of noise, due to
293
+ the binary nature of xhi and “infinite memory" of the AR(1)
294
+ filter.
295
+ A. Identifiability Conditions for Binary super-resolution
296
+ Consider the following partition of xhi into M disjoint blocks,
297
+ where the first block is a scalar and the remaining M −1 blocks
298
+ are of length D, xhi = [xhi(0), xhi(1)⊤, . . . , xhi(M−1)⊤]⊤. Here,
299
+ xhi(0) = xhi[0] and xhi(n) ∈ {0, A}D is given by:
300
+ [xhi
301
+ (n)]k = xhi[(n − 1)D + k],
302
+ 1 ≤ n ≤ M − 1
303
+ (6)
304
+ The sub-vectors xhi(n), and xhi(n−1) (n ≥ 1) represent consec-
305
+ utive and disjoint blocks (of length D) of the high-rate binary
306
+ spike signal. In order to study the identifiability of xhi from ylo,
307
+ we first introduce an alternative (but equivalent) representation
308
+ for (4), by constructing a sequence c[n] as follows c[0] = ylo[0],
309
+ c[n] = ylo[n] − αDylo[n − 1], 1 ≤ n ≤ M − 1
310
+ (7)
311
+ Given the high rate AR(1) model defined in (2), it is possible
312
+ to recursively represent yhi[Dn] in terms of yhi[Dn − 1], which
313
+ in turn, can be represented in terms of yhi[Dn − 2], and so
314
+ on. By this recursive relation, we can represent yhi[Dn − 1] in
315
+ terms of yhi[Dn−D] and {xhi[Dn−i]}D−1
316
+ i=0 and re-write ylo[n]
317
+ as
318
+ ylo[n] = yhi[Dn] = αyhi[Dn − 1] + xhi[Dn]
319
+ = αDyhi[Dn − D] + αD−1xhi[D(n − 1) + 1] + · · ·
320
+ + αxhi[Dn − 1] + xhi[Dn],
321
+ ylo[n] − αDylo[n − 1] = αD−1xhi[D(n − 1) + 1] + · · ·
322
+ + αxhi[Dn − 1] + xhi[Dn]
323
+ (8)
324
+ The last equality holds due to the fact that ylo[n−1] = yhi[Dn−
325
+ D]. Combining (7) and (8), the sequence c[n] can be re-written
326
+ as c[0] = ylo[0] = xhi(0), and for 1 ≤ n ≤ M − 1
327
+ c[n] =
328
+ D
329
+
330
+ i=1
331
+ αD−ixhi[(n − 1)D + i] = hT
332
+ αxhi
333
+ (n)
334
+ (9)
335
+ where hα = [αD−1, αD−2, . . . , α, 1]T ∈ RD. This implies
336
+ that c[n] depends only on the block xhi(n). Denote c :=
337
+ [c[0], c[1], . . . , c[M − 1]]⊤ ∈ RM. For any D, (9) can be
338
+ compactly represented as:
339
+ c = HD(α)xhi
340
+ (10)
341
+ where HD(α) ∈ RM×L is given by:
342
+ HD(α) =
343
+
344
+ ������
345
+ 1
346
+ 0⊤
347
+ 0⊤
348
+ · · ·
349
+ 0⊤
350
+ 0
351
+ h⊤
352
+ α
353
+ 0⊤
354
+ · · ·
355
+ 0⊤
356
+ 0
357
+ 0⊤
358
+ h⊤
359
+ α
360
+ · · ·
361
+ 0⊤
362
+ ...
363
+ ...
364
+ ...
365
+ ...
366
+ ...
367
+ 0
368
+ 0⊤
369
+ 0⊤
370
+ · · ·
371
+ h⊤
372
+ α
373
+
374
+ ������
375
+ The following Lemma establishes the equivalence between (4)
376
+ and (10).
377
+ Lemma 1. Given ylo, construct c following (7). Then, there
378
+ is a unique binary xhi ∈ {0, A}L satisfying (4) if and only if
379
+ xhi is a unique binary vector satisfying (10).
380
+ Proof. First suppose that there is a unique binary xhi ∈ {0, A}L
381
+ satisfying (4) but (10) has a non-unique binary solution, i.e.,
382
+ there exists xhi′ ∈ {0, A}L, xhi′ ̸= xhi, such that
383
+ c = HD(α)xhi = HD(α)xhi
384
+
385
+ (11)
386
+ Define yhi′ := Gαxhi′ whose entries are given by:
387
+ yhi
388
+ ′[n] =
389
+ n
390
+
391
+ k=0
392
+ αn−kxhi
393
+ ′[k],
394
+ 0 ≤ n ≤ L − 1
395
+ (12)
396
+ Notice that (7) can be re-written as
397
+ ylo[0] = c[0] = xhi[0], ylo[1] = c[1] + αDylo[0] = c[1] + αDc[0]
398
+ ylo[2] = c[2] + αDylo[1] = c[2] + αDc[1] + α2Dc[0]
399
+ ...
400
+ Following this recursive relation, and using (9) and (11), we
401
+ can further re-write ylo[n] as:
402
+ ylo[n] =
403
+ n
404
+
405
+ i=0
406
+ α(n−i)Dc[i] = αnDx′
407
+ hi
408
+ (0) +
409
+ n
410
+
411
+ i=1
412
+ α(n−i)Dh⊤
413
+ α xhi
414
+ ′(i)
415
+ = αnDx′
416
+ hi
417
+ (0) +
418
+ n
419
+
420
+ i=1
421
+ D
422
+
423
+ j=1
424
+ αnD−(i−1)D−jx′
425
+ hi[(i − 1)D + j]
426
+ (a)
427
+ =
428
+ nD
429
+
430
+ k=0
431
+ αnD−kx′
432
+ hi[k]
433
+ (b)
434
+ = y′
435
+ hi[nD]
436
+ (13)
437
+
438
+ 4
439
+ The equality (a) follows by a re-indexing of the summation
440
+ into a single sum, and (b) follows from (12). By arranging
441
+ (13) in a matrix form we obtain the following relation:
442
+ ylo = SDGαxhi
443
+
444
+ However from (4), we have ylo = SDGαxhi. This contradicts
445
+ the supposition that (4) has a unique binary solution.
446
+ Next, suppose that (10) has a unique binary solution but the
447
+ binary solution to (4) is non-unique, i.e., there exists xhi′ ∈
448
+ {0, A}L, xhi′ ̸= xhi such that
449
+ ylo = SDGαxhi
450
+ ′ = SDGαxhi
451
+ By following (7) and (10), we also have c = HD(α)xhi′ =
452
+ HD(α)xhi which contradicts the assumption that solution of
453
+ (10) is unique.
454
+ Lemma 1 assures that a binary xhi is uniquely identifiable
455
+ from measurements ylo if and only if there is a unique binary
456
+ solution xhi ∈ {0, A}L to (10). From (9), it can be seen that
457
+ c[n] and c[n − 1] have contributions from only disjoint blocks
458
+ of high rate spikes xhi(n), and xhi(n−1). Hence effectively,
459
+ we only have a single scalar measurement c[n] to decode an
460
+ entire block xhi(n) of length D, regardless of how sparse it
461
+ is. The task of decoding xhi(n) from a single measurement
462
+ seems like a hopelessly “ill-posed" problem, caused by the
463
+ uniform downsampling operation. But this is precisely where
464
+ the binary nature of xhi can be used as a powerful prior to
465
+ make the problem well-posed. Theorem 1 specifies conditions
466
+ under which it is possible to do so.
467
+ Theorem 1. (Identifiability) For any α ∈ (0, 1), with the
468
+ possible exception of α belonging to a set of Lebesgue measure
469
+ zero, there is a unique xhi ∈ {0, A}L that satisfies (10) for
470
+ every D ≥ 1.
471
+ Proof. In Appendix A.
472
+ Using Lemma 1 and Theorem 1, we can conclude that xhi
473
+ is uniquely identifiable from ylo for almost all α ∈ (0, 1). It
474
+ can be verified that for α = 1 the mapping is non-injective.
475
+ Theorem 1 establishes that it is fundamentally possible to
476
+ decode each block xhi(n) of length D, from effectively a single
477
+ measurement c[n]. Since xhi(n) can take 2D possible values, in
478
+ principle, one can always perform an exhaustive search over
479
+ these 2D possible binary sequences and by Theorem 1, only
480
+ one of them will satisfy c[n] = h⊤
481
+ α xhi(n). Since exhaustive
482
+ search is computationally prohibitive, this leads to the natural
483
+ question regarding alternative solutions. In Section III, we will
484
+ develop an alternative algorithm that leverages the trade-off
485
+ between memory and computation to achieve a significantly
486
+ lower run-time decoding complexity.
487
+ B. Comparison with Finite Rate of Innovation Approach
488
+ In a related line of work [30]–[32], [34], the FRI framework
489
+ has been developed to reconstruct spikes from the measurement
490
+ model considered here. However, in the general FRI framework,
491
+ there is no assumption on the amplitude of the spikes, and there
492
+ are a total of 2D real valued unknowns corresponding to the
493
+ locations and amplitudes of D spikes. In [32], it was shown that
494
+ by leveraging the property of exponentially reproducing kernels,
495
+ it is possible to recover arbitrary amplitudes and spike locations
496
+ using Prony-type algorithms, provided at least 2D+1(> D) low-
497
+ rate measurements are available. However, since we exploit
498
+ the binary nature of spiking activity, we can operate at a
499
+ much smaller sample complexity than FRI. In fact, Theorem
500
+ 1 shows that when we exploit the fact that the spikes occur
501
+ on a high-resolution grid with binary amplitudes, M = Ω(1)
502
+ measurements suffice to identify D spikes regardless of how
503
+ large D is. A direct application of the FRI approach cannot
504
+ succeed in this regime, since the number of spikes is larger than
505
+ the number of measurements. That being said, with enough
506
+ measurements, FRI techniques are powerful, and they can also
507
+ identify off-grid spikes. In future, it would be interesting to
508
+ combine the two approaches by incorporating binary priors to
509
+ FRI based techniques and remove the grid assumptions.
510
+ C. Curse of Uniform Downsampling: Inadequacy of sparsity
511
+ and non-negativity
512
+ By virtue of being a binary signal, xhi is naturally sparse and
513
+ non-negative. Therefore, one may ask if sparsity and/or non-
514
+ negativity are sufficient to uniquely identify xhi from c, without
515
+ the need for imposing any binary constraints. In particular, we
516
+ would like to understand if the solution to the following problem
517
+ that seeks the sparsest non-negative vector in RL satisfying
518
+ (10) indeed coincides with the true xhi ∈ {0, A}L
519
+ min
520
+ x∈RL
521
+ ∥x∥0
522
+ subject to c = HD(α)x,
523
+ x ≥ 0
524
+ (P0)
525
+ Lemma 2. For every xhi ∈ {0, A}L (except xhi = Ae1),
526
+ and c ∈ RM satisfying (10), the following are true
527
+ (i) There exists a solution x⋆ ̸= xhi to (P0) satisfying
528
+ ∥x⋆∥0 ≤ ∥xhi∥0
529
+ (14)
530
+ (ii) The inequality in (14) is strict as long as there exists an
531
+ integer n0 ≥ 1 such that the block x(n0)
532
+ hi
533
+ of xhi (defined
534
+ in (6)) satisfies ∥x(n0)
535
+ hi
536
+ ∥0 ≥ 2.
537
+ Proof. The proof is in Appendix B.
538
+ Lemma 2 shows there exist other non-binary solution(s) to
539
+ (10) (different from xhi) that have the same or smaller sparsity
540
+ as the binary signal xhi ∈ {0, A}L. Furthermore, there exist
541
+ problem instances where the sparsest solution to (P0) is strictly
542
+ sparser than xhi. Hence, sparsity and/or non-negativity are
543
+ inadequate to identify the ground truth xhi uniquely.
544
+ Implicit Bias of Relaxation: The optimization problem (P0)
545
+ is non-convex and the binary constraints are not enforced. In
546
+ binary compressed sensing [35], [36], it is common to relax the
547
+ binary constraints using box-constraint and l0 norm is relaxed
548
+ to l1 norm in the following manner:
549
+ min
550
+ x∈RL ∥x∥1
551
+ subject to c = HD(α)x, 0 ≤ x ≤ A1 (P1-B)
552
+ In the following Lemma, we show that there is an implicit bias
553
+ introduced to the solution of (P1-B).
554
+ Lemma 3. For every xhi ∈ {0, A}L, and c ∈ RM satisfying
555
+ (10). There exists a solution x⋆ to (P1-B) satisfying
556
+ ∥x⋆∥1 ≤ ∥xhi∥1.
557
+ (15)
558
+
559
+ 5
560
+ Moreover, for all n ≥ 1, the blocks x(n)⋆ ∈RD of x⋆ satisfy:
561
+ supp(x(n)⋆) = {D, D − 1, · · · , D − jn}, if c[n] ̸= 0
562
+ (16)
563
+ for some 0 ≤ jn ≤ D − 1 and x(n)⋆ = 0 if c[n] = 0,
564
+ irrespective of the support of xhi.
565
+ Proof. The proof is in Appendix B.
566
+ Lemma 3 shows that even in the noiseless setting, introducing
567
+ the box-constraint as a means of relaxing the binary constraint
568
+ introduces a bias in the support of the recovered spikes.
569
+ The optimal solution always results in spikes with support
570
+ clustered towards the end of each block of length D, irrespective
571
+ of the ground truth spiking pattern xhi that generated the
572
+ measurements. This bias is a consequence of the nature of
573
+ relaxation, as well as the specific structure of the measurement
574
+ matrix HD(α) arising in the problem.
575
+ D. Role of Memory in Super-resolution: IIR vs. FIR filters
576
+ The ability to identify the high-rate binary signal xhi ∈
577
+ {0, A}L from D−fold undersampled measurements ylo (for
578
+ arbitrarily large D) in the absence of noise, is in parts also due to
579
+ the “infinite memory" or infinite impulse response of the AR(1)
580
+ filter. Indeed, for an Finite Impulse Response (FIR) filter, there
581
+ is a limit to downsampling without losing identifiability. This
582
+ was recently studied in our earlier work [40] where we showed
583
+ that the undersampling limit is determined by the length of
584
+ the FIR filter. To see this, consider the convolution of a binary
585
+ valued signal xhi with a FIR filter u = [u[0], u[1], · · · , u[r −
586
+ 1]]T ∈ Rr of length r: zf[n] = �r−1
587
+ i=0 u[r − 1 − i]xhi[n + i].
588
+ These samples are represented in the vector form as zf :=
589
+ u⋆xhi ∈ RL (by suitable zero padding). Suppose, as before, we
590
+ only observe a D−fold downsampling of the output zD[n] =
591
+ zf[Dn]. Two consecutive samples zD[p], zD[p + 1] of the low-
592
+ rate observation are given by:
593
+ zD[p] =
594
+ r−1
595
+
596
+ i=0
597
+ u[r − 1 − i]xhi[Dp + i],
598
+ zD[p + 1] =
599
+ r−1
600
+
601
+ i=0
602
+ u[r − 1 − i]xhi[D(p + 1) + i]
603
+ If D > r, notice that none of the measurements is a function of
604
+ the samples xhi[Dp+r], xhi[Dp+r +1], · · · , xhi[D(p+1)−1].
605
+ Hence, it is possible to assign them arbitrary binary values and
606
+ yet be consistent with the low-rate measurements zD[n]. This
607
+ makes it impossible to exactly recover xhi (even if it is known
608
+ to be binary valued) if the decimation is larger than the filter
609
+ length (D > r). The following lemma summarizes this result.
610
+ Lemma 4. For every FIR filter u ∈ Rr, if the undersampling
611
+ factor exceeds the filter length, i.e. D > r, there exist x0, x1 ∈
612
+ {0, A}L, x0 ̸= x1 such that SD(u ⋆ x0) = SD(u ⋆ x1).
613
+ This shows that the identifiability result presented in Theorem
614
+ 1 is not merely a consequence of binary priors but the infinite
615
+ memory of the autoregressive process is also critical in allowing
616
+ arbitrary undersampling D > 1 in absence of noise. For such
617
+ IIR filters, the memory of all past (binary) spiking activity
618
+ is encoded (with suitable weighting) into every measurement
619
+ captured after the spike, which would not be the case for a
620
+ finite impulse response filter.
621
+ III. EFFICIENT BINARY SUPER-RESOLUTION USING
622
+ BINARY SEARCH WITH STRUCTURED MEASUREMENTS
623
+ By Theorem 1, we already know that it is possible to uniquely
624
+ identify xhi from c (or equivalently, each block xhi(n) from
625
+ a single measurement c[n]) by exhaustive search. We now
626
+ demonstrate how this exhaustive search can be avoided by
627
+ formulating the decoding problem in terms of “binary search"
628
+ over an appropriate set, and thereby attaining computational
629
+ efficiency. We begin by introducing some notations and
630
+ definitions. Given a non-negative integer k, 0 ≤ k ≤ 2D − 1,
631
+ let (b1(k), b2(k), · · · , bD(k)) be the unique D-bit binary repre-
632
+ sentation of k: k = �D
633
+ d=1 2D−dbd(k),
634
+ bd(k) ∈ {0, 1} ∀ 1 ≤
635
+ d ≤ D. Here b1(k) is the most significant bit and bD(k) is
636
+ the least significant bit. Using this notation, we define the
637
+ following set:
638
+ Sall := {v0, v1, v2, · · · , v2D−1},
639
+ (17)
640
+ where each vk ∈ {0, A}D is a binary vector given by
641
+ [vk]d = Abd(k).
642
+ 1 ≤ d ≤ D
643
+ (18)
644
+ In other words, the binary vector
645
+ 1
646
+ Avk is the D-bit binary
647
+ representation of its index k. Using this convention, v0 = 0
648
+ (i.e., a binary sequence of all 0′s) and v2D−1 = A1 (i.e., a
649
+ binary sequence of all A′s). Recall the partition of xhi defined
650
+ in (6), where each block xhi(n) (n ≥ 1) is a binary vector of
651
+ length D and xhi(0) ∈ {0, A} is a scalar. It is easy to see that
652
+ (17) comprises of all possible values that each block xhi(n) can
653
+ assume. According to (9) each scalar measurement c[n] can be
654
+ written as: c[0] = x(0),
655
+ c[n] = hα⊤xhi(n), 1 ≤ n ≤ M − 1.
656
+ For every α, we define the following set:
657
+ Θα := {θ0, θ1, · · · , θ2D−1}, where θk := h⊤
658
+ α vk
659
+ (19)
660
+ Observe that every measurement c[n] = �D
661
+ i=1 αD−ixhi[(n −
662
+ 1)D+i] takes values from this set Θα, depending on the value
663
+ taken by the underlying block of spiking pattern from Sall. Our
664
+ goal is to recover the spikes {xhi[(n − 1)D + i]}D
665
+ i=1 from c[n].
666
+ In the following, we show that this problem is equivalent to
667
+ finding the representation of a real number over an arbitrary
668
+ radix, which is known as “β-expansion" [49]. Given a real
669
+ (potentially non-integer) number β > 1, the representation of
670
+ another real number p ≥ 0 of the form:
671
+ p =
672
+
673
+
674
+ n=1
675
+ anβ−n, where 0 ≤ an < ⌊β⌋
676
+ (20)
677
+ is referred to as a β-expansion of p. The coefficients 0 ≤ an <
678
+ ⌊β⌋ are integers. This is a generalization of the representation
679
+ of numbers beyond integer-radix to a system where the radix
680
+ can be chosen as an arbitrary real number. This notion of
681
+ representation over arbitrary radix was first introduced by Renyi
682
+ in [49], and since then has been extensively studied [47], [48],
683
+ [50]. There is a direct connection between β-expansion and
684
+ the binary super-resolution problem considered here. In the
685
+ problem at hand, any element θk ∈ Θα can be written as:
686
+ θk = h⊤
687
+ α vk =
688
+ D
689
+
690
+ i=1
691
+ αD−i[vk]i
692
+ When 1/2 < α < 1, by letting β = 1/α, we see that the
693
+ coefficients in (20) must satisfy 0 ≤ an < ⌊1/α⌋ < 2, i.e.,
694
+
695
+ 6
696
+ they are restricted to be binary valued an ∈ {0, 1}. Therefore,
697
+ decoding the spikes vk from the observation θk is equivalent
698
+ to finding a D−bit representation for the number θk/A over
699
+ the non-integer radix β = 1/α. Questions regarding the
700
+ existence of β-expansion, and finding the coefficients of a finite
701
+ β−expansion (whenever it exists) has been an active topic of
702
+ research [47], [48], [50], [51]. When β ≥ 2 (equivalently,
703
+ 0 < α ≤ 1/2), it is possible to find the coefficients using
704
+ a greedy algorithm which proceeds in a fashion similar to
705
+ finding the D-bit binary representation of an integer [47], [51].
706
+ However, the regime β ∈ (1, 2) (equivalently 1/2 < α < 1),
707
+ is significantly more complicated and is of continued research
708
+ interest [47], [48], [50]. To the best of our knowledge, there
709
+ are no known computationally efficient ways to find the finite
710
+ β-expansion when 1/2 < α < 1 (if it exists) [N. Sidorov,
711
+ personal communication, May 24, 2022]. In practice, we
712
+ encounter filter values α (= 1/β) that are much closer to
713
+ 1, and hence, we need an alternative approach to find this
714
+ finite β-radix representation for θk. In the next section, we
715
+ show that by performing a suitable preprocessing, finite β-radix
716
+ representation can be formulated as a binary search problem
717
+ which is guaranteed to succeed for all values of β that permit
718
+ unique finite β−expansions.
719
+ A. Formulation as a Binary Search Problem
720
+ Before describing the algorithm, we first introduce the notion
721
+ of a collision-free set.
722
+ Definition 1 (Collision Free set). Given an undersampling
723
+ factor D, define a class of “collision free" AR(1) filters as:
724
+ GD = {α ∈ (0, 1) s.t. h⊤
725
+ α vi ̸= h⊤
726
+ α vj ∀ i ̸= j, vi, vj ∈ Sall}
727
+ The set GD denotes permissible values of the AR(1) filter
728
+ parameter α such that each of the 2D binary sequences in
729
+ Sall maps to a unique element in the set Θα. In other words,
730
+ every θk ∈ Θα has a unique D−bit expansion for all α ∈ GD.
731
+ This naturally raises the question “How large is the set GD?".
732
+ Theorem 1 already provided the answer to this question, where
733
+ the identifiability result implies that for every D, almost all
734
+ α ∈ (0, 1) belong to this set GD (with the possible exception
735
+ of a measure zero set). Hence, Theorem 1 ensures that there
736
+ are infinite choices for collision-free filter parameters.
737
+ Lemma 5. For every α ∈ GD, the mapping Φα(.) : Sall → Θα,
738
+ Φα(v) = h⊤
739
+ α v forms a bijection between Sall and Θα.
740
+ Proof. Since α ∈ GD, from the definition of the set GD, it is
741
+ clear that for any vi, vj ∈ Sall, vi ̸= vj we have hα⊤vi ̸=
742
+ hα⊤vj. Therefore, the mapping is injective. Furthermore, from
743
+ (19) we also have |Θα| ≤ |Sall| = 2D. Since Φα(·) is injective,
744
+ we must also have |Θα| = 2D and hence the mapping Φα(.)
745
+ forms a bijection between Sall and Θα.
746
+ When α ∈ GD, Lemma 5 states that the finite beta expansion
747
+ for every θk ∈ Θα is unique. Lemma 5 provides a way to avoid
748
+ exhaustive search over Sall, and yet identify xhi(n) from c[n] in
749
+ a computationally efficient way. From Lemma 5, we know that
750
+ each of the 2D spiking patterns in Sall maps to a unique element
751
+ in Θα, and each element in Θα has a corresponding spiking
752
+ pattern. Hence instead of searching Sall, we can equivalently
753
+ search the set Θα in order to determine the unknown spiking
754
+ pattern. Since Θα permits “ordering", searching Θα has a
755
+ distinct computational advantage over searching Sall. This
756
+ ordering enables us to employ binary search over (an ordered)
757
+ Θα and find the desired element in a computationally efficient
758
+ manner. To do this, we first sort the set Θα (in ascending order)
759
+ and arrange the corresponding elements of Sall in the same
760
+ order. Given Θα as an input, the function SORT(·) returns
761
+ a sorted list Θsort
762
+ α , and an index set I = {i0, i1, · · · , i2D−1}
763
+ containing the indices of the sorted elements in the list Θα.
764
+ Θsort
765
+ α , I ← SORT(Θα)
766
+ Let us denote the elements of the sorted lists as Θsort
767
+ α
768
+ =
769
+ {�θ0, · · · , �θ2D−1}, and Ssort
770
+ all = {�v0, · · · , �v2D−1} where:
771
+ �θ0 < �θ1 < · · · < �θ2D−1
772
+ and �θj = θij,
773
+ �vj = vij
774
+ ∀j.
775
+ It is important to note that this sorting step does not depend
776
+ on the measurements c, and can therefore be part of a pre-
777
+ processing pipeline that can be performed offline. However,
778
+ it does require memory to store the sorted lists. In the
779
+ Algorithm 1 Noiseless Spike Recovery
780
+ 1: Input: Measurement c[n], Sorted list Θsort
781
+ α
782
+ and the corre-
783
+ sponding (ordered) spike patterns Ssort
784
+ all
785
+ 2: Output: Decoded spike block �xhi(n)
786
+ 3: i⋆ ← BINSEARCH(Θsort
787
+ α , c[n])
788
+ 4: Return �xhi(n) ← �vi⋆
789
+ noiseless setting, we know that every scalar measurement
790
+ c[n] = h⊤
791
+ α xhi(n) belongs to the set Θsort
792
+ α . Therefore, if we
793
+ identify its index, say i⋆, then we can successfully recover
794
+ xhi(n) by returning the corresponding binary vector �vi⋆ from
795
+ Ssort
796
+ all . Therefore, we can formulate the decoding problem as
797
+ searching for the input c[n] in the sorted list Θsort
798
+ α . This can be
799
+ efficiently done by using “Binary Search". The noiseless spike
800
+ decoding procedure is summarized as Algorithm 1. Since the
801
+ complexity of performing a binary search over an ordered list
802
+ of N elements is O(log N), the complexity of Algorithm 1
803
+ is logarithmic in the cardinality of Θsort
804
+ α , which results in a
805
+ complexity of O(log(2D)) = O(D). We summarize this result
806
+ in the following Lemma.
807
+ Lemma 6. Assume α ∈ GD. Given the ordered set Θsort
808
+ α
809
+ , and
810
+ an input c[n] = h⊤
811
+ α xhi(n), Algorithm 1 terminates in O(D)
812
+ steps and its output �xhi(n) satisfies �xhi(n) = xhi(n).
813
+ B. Noisy Measurements and 1 D Nearest Neighbor Search
814
+ We demonstrate how binary search can still be useful in
815
+ presence of noise by formulating noisy spike detection as a
816
+ one dimensional nearest neighbor search problem. Suppose
817
+ {zlo[n]}M−1
818
+ n=0 denote noisy D-fold decimated filter output
819
+ zlo[n] = ylo[n] + w[n],
820
+ 0 ≤ n ≤ M − 1
821
+ (21)
822
+
823
+ 7
824
+ Here w[n] represents the additive noise term that corrupts the
825
+ (noiseless) low-rate measurements ylo[n]. Similar to (7), we
826
+ compute ce[n] from zlo[n] as follows:
827
+ ce[n] = zlo[n] − αDzlo[n − 1]
828
+ (22)
829
+ =
830
+ D
831
+
832
+ i=1
833
+ αD−ixhi[(n − 1)D + i] + e[n]= c[n] + e[n] (23)
834
+ where c[n] = h⊤
835
+ α xhi(n) ∈ Θsort
836
+ α , and e[n] = w[n] − αDw[n −
837
+ 1]. We can interpret ce[n] as a noisy/perturbed version of an
838
+ element c[n] ∈ Θsort
839
+ �� , with e[n] representing the noise. This
840
+ perturbed signal may no longer belong to Θsort
841
+ α
842
+ (i.e. ce[n] ̸∈
843
+ Θsort
844
+ α ) and hence, we cannot find an exact match in the set
845
+ Θsort
846
+ α . Instead, we aim to find the closest element in Θsort
847
+ α
848
+ (the
849
+ nearest neighbor of ce[n]) by solving the following problem:
850
+ �xhi
851
+ (n) = arg min
852
+ v∈Ssort
853
+ all
854
+ |ce[n] − h⊤
855
+ α v|
856
+ (24)
857
+ Solving (24) is equivalent to finding the spike sequence
858
+ �v ∈ Ssort
859
+ all
860
+ that maps to the nearest neighbor of ce[n] in the
861
+ set Θsort
862
+ α . By leveraging the sorted list Θsort
863
+ α , it is no longer
864
+ necessary to parse the list sequentially (which would incur
865
+ O(2D) complexity), instead we can perform a modified binary
866
+ search as summarized in Algorithm 2, that keeps track of
867
+ additional indices compared to the vanilla binary search. Finally,
868
+ we return the unique spiking pattern from Ssort
869
+ α
870
+ that gets
871
+ mapped to the nearest neighbor of the noisy measurement
872
+ ce[n]. It is well-known that the nearest neighbor for any query
873
+ could be found in O(log(2D)) = O(D) steps, instead of the
874
+ linear complexity of O(2D). This guarantees a computationally
875
+ efficient decoding of spikes by solving (24).
876
+ Next, we characterize the error events that lead to erroneous
877
+ detection of a block of spikes. Recall that the set Θsort
878
+ α
879
+ is sorted,
880
+ and its elements satisfy the ordering:
881
+ 0 = �θ0 < �θ1 < · · · < �θlD = 1 + α + · · · + αD−1
882
+ where lD := 2D−1. We also have �θk = h⊤
883
+ α �vk, where �vk ∈ Ssort
884
+ all
885
+ is a binary spiking sequence of length D.
886
+ For each �vk and each n, we will determine the error event
887
+ �xhi(n) ̸= xhi(n), when xhi(n) = �vk. First, consider the scenario
888
+ when xhi(n) = �vk for some 0 < k < lD (excluding �v0, �vlD).
889
+ The corresponding noiseless measurement is c[n] = �θk =
890
+ h⊤
891
+ α �vk which satisfies �θk−1 < c[n] = �θk < �θk+1. Since Θsort
892
+ α
893
+ is
894
+ sorted, it can be easily verified that the nearest neighbor of
895
+ ce[n] will be �θk, if and only if ce[n] satisfies the following
896
+ condition:
897
+ (�θk−1 + �θk)/2 ≤ ce[n] ≤ (�θk+1 + �θk)/2
898
+ (25)
899
+ Since �θk = h⊤
900
+ α �vk, the solution to (24) is attained at �vk ∈ Ssort
901
+ all ,
902
+ and the decoding is successful. Therefore Algorithm 2 produces
903
+ an erroneous estimate of �vk if and only if ce[n] violates (25).
904
+ The event ce[n] ̸∈ [
905
+ �θk−1+�θk
906
+ 2
907
+ ,
908
+ �θk+1+�θk
909
+ 2
910
+ ] is equivalent to e[n] ∈
911
+ Ek (e[n] is defined earlier in (23)), where
912
+ Ek = {e[n] < −
913
+ �θk − �θk−1
914
+ 2
915
+ , or e[n] >
916
+ �θk+1 − �θk
917
+ 2
918
+ }
919
+ (26)
920
+ Finally, we characterize the error events for k = 0, lD. The
921
+ error events for c[n] = θ0 = 0 or c[n] = θlD are given by:
922
+ E0 = {e[n] ≥ �θ1/2}, ElD = {e[n] ≤ −(�θlD − �θlD−1)/2} (27)
923
+ Define the “minimum distance" between points in Θsort
924
+ α :
925
+ ∆θmin(α, D) =
926
+ min
927
+ 1≤k≤lD |�θk − �θk−1|.
928
+ This minimum distance depends on A, α and D. From (26),
929
+ (27) it can be verified that if 2|w[n]| < ∆θmin(α, D)/2 (which
930
+ would imply |e[n]| < ∆θmin(α, D)/2) for all n, then �xhi(n) =
931
+ xhi(n). As summarized in Theorem 2, Algorithm 2 can exactly
932
+ recover the ground truth spikes from measurements corrupted
933
+ by bounded adversarial noise, the extent of the robustness is
934
+ determined by the parameters A, α, D.
935
+ Algorithm 2 Noisy Spike Recovery
936
+ 1: Input: Measurement ce[n], Sorted list Θsort
937
+ α
938
+ and the
939
+ corresponding (ordered) spike patterns Ssort
940
+ all
941
+ 2: Output: Decoded spike block �xhi(n)
942
+ 3: Set l ← 0, u ← 2D − 1
943
+ 4:
944
+ while u − l > 1
945
+ 5:
946
+ Set m ← l + ⌊(u − l)/2⌋
947
+ 6:
948
+ if �θm > ce[n] then
949
+ 7:
950
+ u ← m
951
+ 8:
952
+ else
953
+ 9:
954
+ l ← m
955
+ 10:
956
+ end if
957
+ 11:
958
+ end while
959
+ 12: Find the nearest neighbor i⋆ = arg mini∈{l,u}(ce[n]− �θi)2
960
+ 13: Return �xhi(n) ← �vi⋆
961
+ Theorem 2. Assume α ∈ GD. Given the ordered set Θsort
962
+ α , the
963
+ output of Algorithm 2 with input ce[n] exactly coincides with
964
+ the solution of the optimization problem (24) in at most O(D)
965
+ steps. Furthermore, if for all n, |w[n]| < ∆θmin(α, D)/4, then
966
+ the output of Algorithm 2 satisfies �xhi(n) = xhi(n).
967
+ From Theorem 2, it is evident that ∆θmin(α, D) plays an
968
+ important role in characterizing the upper bound on noise.
969
+ We attempt to gain insight into how ∆θmin(α, D) varies as a
970
+ function of α when D is held fixed.
971
+ Lemma 7. Given D, ∆θmin(α, D) = αD−1 for α ∈ (0, 0.5].
972
+ Proof. The proof is in Appendix C.
973
+ When α ∈ (0, 0.5], ∆θmin(α, D) is monotonically increasing
974
+ with α. However, for α > 0.5 the trend fluctuates with α
975
+ differently for different D, and becomes quite challenging to
976
+ predict. This is also confirmed by the empirical plot in Fig. 1.
977
+ A refined analysis of ∆θmin(α, D) to gain insight into desirable
978
+ filter parameters α is an interesting direction for future work.
979
+ C. Trade-off between memory and computational complexity
980
+ A crucial aspect of Algorithms 1 and 2 is that they
981
+ achieve efficient run-time complexity by leveraging the off-
982
+ line construction of the sorted list Θsort
983
+ α
984
+ and Ssort
985
+ all . These lists,
986
+ each with 2D elements, need to be stored in memory and
987
+ made available during run-time. Since there is no free lunch,
988
+ the resulting computational efficiency of O(D) at run-time
989
+ is attained at the expense of the additional memory that is
990
+ required to store the sorted lists Θsort
991
+ α , Ssort
992
+ all .
993
+
994
+ 8
995
+ D. Parallelizable Implementation
996
+ Algorithm 2 (also Algo. 1) only takes ce[n](c[n]) as input
997
+ and returns �xhi(n), and is completely de-coupled from any
998
+ other �xhi(n′), n′ ̸= n. Recall that in reality, we are provided
999
+ with measurements zlo[n](ylo[n]), and ce[n](respectively c[n])
1000
+ needs to be computed. Due to this de-coupling, we can compute
1001
+ ce[n]′s in parallel using two consecutive low-rate samples
1002
+ zlo[n], zlo[n−1] and perform a nearest neighbor search without
1003
+ waiting for any previously decoded spikes. Therefore, the total
1004
+ decoding complexity can be further improved depending on
1005
+ the available parallel computing resources.
1006
+ IV. ERROR ANALYSIS FOR GAUSSIAN NOISE
1007
+ Algorithm 2 solves (24) without requiring any knowledge
1008
+ of the noise statistics. However, in order to analyze its per-
1009
+ formance, we will make the following (standard) assumptions
1010
+ on the statistics of the high-rate spiking signal xhi and the
1011
+ measurement noise w[n] as follows:
1012
+ • (A1) The entries of the binary vector xhi ∈ {0, A}L are
1013
+ i.i.d random variables distributed as xhi[n] ∼ ABern(p).
1014
+ • (A2) The additive noise w[n], 0 ≤ n ≤ M − 1 is
1015
+ independent of xhi[n], and distributed as w[n] ∼ N(0, σ2)
1016
+ A. Probability of Erroneous Decoding
1017
+ Under assumption (A2), the ML estimate of xhi is given by
1018
+ the solution to the following problem:
1019
+ �xML = arg
1020
+ min
1021
+ v∈{0,A}L ∥zlo − SDGαv∥2
1022
+ (PNN)
1023
+ The proposed Algorithm 2 does not attempt to solve
1024
+ (PNN), which is computationally intractable. Instead, it solves
1025
+ a set of M − 1 one dimensional nearest neighbor search
1026
+ problems, by finding the nearest neighbor of ce[n] for each
1027
+ n = 1, 2, · · · , M − 1. This scalar nearest neighbor search is
1028
+ implemented in a computationally efficient manner by using
1029
+ parallel binary search on a pre-sorted list. Notice that by the
1030
+ operation (22), the variance of the equivalent noise term e[n]
1031
+ gets amplified by a factor of at most (1+α2D) < 2. This can be
1032
+ thought of as a price paid to achieve computational efficiency
1033
+ and parallelizability. The following theorem characterizes the
1034
+ dependence of certain key quantities of interest, such as the
1035
+ signal-to-noise ratio (SNR), undersampling factor D, and filter’s
1036
+ frequency response (controlled by α) on the performance of
1037
+ Algorithm 2.
1038
+ Theorem 3. Suppose α ∈ GD and assumptions (A1-A2) hold.
1039
+ Given δ > 0, if the following condition is satisfied:
1040
+ ∆θ2
1041
+ min(α, D)/σ2 ≥ 4 ln (2M/δ)
1042
+ (28)
1043
+ then Algorithm 2 can exactly recover the binary signal xhi
1044
+ with probability at least 1 − δ.
1045
+ Proof. The proof follows standard arguments for computing
1046
+ the probability of error for symbol detection in Gaussian noise,
1047
+ followed by certain simplifications and is included in Appendix
1048
+ D for completeness.
1049
+ In Fig. 1, we plot ∆θmin(α, D) as a function of D for
1050
+ different values of α. As expected, ∆θmin(α, D) decays as the
1051
+ D increases. Understandably, for a fixed α, as D increases,
1052
+ 0.1
1053
+ 0.2
1054
+ 0.3
1055
+ 0.4
1056
+ 0.5
1057
+ 0.6
1058
+ 0.7
1059
+ 0.8
1060
+ 0.9
1061
+ 1
1062
+ 0
1063
+ 0.2
1064
+ 0.4
1065
+ 0.6
1066
+ 0.8
1067
+ 1
1068
+ Minimum distance
1069
+ For D=4
1070
+ For D=5
1071
+ Min. Dist (D=4)
1072
+ Min. Dist (D=5)
1073
+ Cluster Min. Dist (D=4)
1074
+ Cluster Min. Dist (D=5)
1075
+ 1
1076
+ 1.5
1077
+ 2
1078
+ 2.5
1079
+ 3
1080
+ 3.5
1081
+ 4
1082
+ 4.5
1083
+ 5
1084
+ Undersampling factor (D)
1085
+ 0
1086
+ 0.2
1087
+ 0.4
1088
+ 0.6
1089
+ 0.8
1090
+ 1
1091
+ Minimum distance
1092
+ =0.2
1093
+ =0.5
1094
+ =0.9
1095
+ Fig. 1: Variation of ∆θmin(α, D) as a function of undersampling factor
1096
+ D and α. The cluster-distance ∆c
1097
+ min(α, D) vs. α is also overlaid. Each
1098
+ dotted line denotes the start of the interval FD.
1099
+ it becomes harder to recover the spikes exactly, and higher
1100
+ SNR is needed to compensate for the lower sampling rate.
1101
+ This can be interpreted as the price paid for super-resolution
1102
+ in presence of noise. This phenomenon is also reminiscent of
1103
+ the noise amplification effect in super-resolution, where the
1104
+ ability to super-resolve point sources becomes more severely
1105
+ hindered by noise as the target resolution grid becomes finer
1106
+ [6]. In Fig. 1, we plot ∆θmin(α, D) as a function of α and as
1107
+ predicted by Lemma 7, it monotonically increases upto 0.5,
1108
+ but for α > 0.5, the behavior becomes much more erratic
1109
+ and a precise characterization becomes challenging. It is to
1110
+ be noted that in Theorem 3, we aim to exactly recover xhi.
1111
+ The SNR requirement can be relaxed if our goal is to recover
1112
+ only spike counts instead of the true spikes as discussed in the
1113
+ next subsection. One can define other notions of approximate
1114
+ recovery, the analysis of which will be a topic of future research.
1115
+ B. Relaxed Spike reconstruction: Count Estimation
1116
+ As shown in Theorem 2, exact recovery of spikes is possible
1117
+ under somewhat restrictive condition on the noise in terms
1118
+ of ∆θmin(α, D), which becomes quite small as D increases.
1119
+ This naturally calls for other relaxed notions of recovery
1120
+ which can handle larger noise levels. In neuroscience, it is
1121
+ believed that information is encoded as either the spike timing
1122
+ (temporal code) or the firing rates (rate coding) of individual
1123
+ neurons in the brain. Therefore, the spike counts over an
1124
+ interval can be informative to understand neural functions, even
1125
+ when it is impossible to temporally localize the neural spikes.
1126
+ For example, neurons in the visual cortex encode stimulus
1127
+ orientations as their firing rates [52]. We will therefore focus
1128
+ on spike count as an approximate recovery metric, which
1129
+ concerns estimating the number of spikes occurring between
1130
+ two consecutive low-rate measurements instead of resolving
1131
+ the individual spiking activity at a higher resolution.
1132
+ Let γ[n] denote the total number of spikes occurring between
1133
+ two consecutive low-rate samples zlo[n] and zlo[n − 1]. Since
1134
+ xhi and its estimate �xhi are both binary valued (amplitude A),
1135
+ the true spike count (γ[n]) and estimated count (�γ[n]) are given
1136
+ by: γ[n] = ∥xhi(n)∥0,
1137
+ �γ[n] = ∥�x(n)
1138
+ hi ∥0, n = 1, · · · , M − 1,
1139
+
1140
+ 9
1141
+ γ[0] = xhi[0]/A and �γ[0] = �xhi[0]/A since the first block is of
1142
+ size 1 as described in (6). Define a set CD
1143
+ k as:
1144
+ CD
1145
+ k := {v ∈ {0, A}D, ∥v∥0 = k},
1146
+ 0 ≤ k ≤ D
1147
+ It is a collection of all binary vectors (of length D) with spike
1148
+ count k. The ground truth spike block belongs to CD
1149
+ γ[n]. Any
1150
+ element from CD
1151
+ γ[n] will give the true spike count. Hence, exact
1152
+ recovery of count can be possible even when spikes cannot be
1153
+ recovered.
1154
+ For a fixed D, we define a set of α denoted by FD:
1155
+ FD := {α ∈ (0, 1)|αD − αD−k0−1 − αk0 + 1 < 0}
1156
+ (29)
1157
+ where k0 = ⌊D/2⌋. We will obtain a sufficient condition for
1158
+ robust spike count estimation when α ∈ FD. It can be shown
1159
+ that for any D, FD will always be non-empty. Define
1160
+ θk
1161
+ min := min
1162
+ u∈CD
1163
+ k
1164
+ h⊤
1165
+ α u
1166
+ θk
1167
+ max := max
1168
+ u∈CD
1169
+ k
1170
+ h⊤
1171
+ α u
1172
+ (30)
1173
+ Observe that if
1174
+ θk+1
1175
+ min > θk
1176
+ max, k = 0, 1, · · · , D − 1
1177
+ (31)
1178
+ then all spike patterns ui ∈ CD
1179
+ k (with the same spike count k)
1180
+ are clustered together when mapped on to the real line by the
1181
+ transformation h⊤
1182
+ α u as shown in Figure 2. When (31) holds,
1183
+ we can define a “cluster-restricted minimum distance" as:
1184
+ ∆c
1185
+ min(α, D) :=
1186
+ min
1187
+ 0≤k≤D−1 θk+1
1188
+ min − θk
1189
+ max
1190
+ (32)
1191
+ Given a noisy observation ce[n] = h⊤
1192
+ α xhi(n)+e[n], the solution
1193
+ to the nearest neighbor problem (24) may return an incorrect
1194
+ neighbor θj ̸= h⊤
1195
+ α xhi(n). However, when (31) holds and if
1196
+ the noisy observation satisfies the following conditions:
1197
+ (θγ[n]
1198
+ min + θγ[n]−1
1199
+ max
1200
+ )/2 < ce[n] < (θγ[n]+1
1201
+ min
1202
+ + θγ[n]
1203
+ max)/2
1204
+ (33)
1205
+ then the nearest-neighbor decision rule in Algorithm 2 will still
1206
+ ensure that θj ∈ CD
1207
+ γ[n]. This has also been visualized in Fig. 2
1208
+ where each colored band represents the “safe-zone" for each
1209
+ count and the black dotted-line denotes the boundary. This will
1210
+ result in correct identification of the spike count but will incur
1211
+ error in terms of spiking pattern. We formally summarize this
1212
+ in the following Theorem that provides robustness guarantee
1213
+ for exact count recovery from measurements corrupted by
1214
+ adversarial noise (similar to Theorem 2 for spike recovery).
1215
+ Theorem 4. Assume α ∈ FD. Given the ordered set Θsort
1216
+ α , let
1217
+ �γ[n] be the estimated spike count obtained from Algorithm 2
1218
+ with input ce[n]. If for all n, |w[n]| < ∆c
1219
+ min(α, D)/4, then the
1220
+ count can be exactly recovered, i.e., �γ[n] = γ[n].
1221
+ Proof. Proof is in Appendix E.
1222
+ It is clear that when (31) holds, ∆c
1223
+ min(α, D) is no smaller
1224
+ than ∆θmin(α, D), since the former is computed over neigh-
1225
+ boring elements of the cluster whereas ∆θmin(D, α) computes
1226
+ the minimum distance over all consecutive elements (both
1227
+ inter-cluster as well as intra-cluster) in Θsort
1228
+ α . This essentially
1229
+ suggests that estimation of counts (for this range of α and
1230
+ D) can be more robust compared to inferring the individual
1231
+ spiking patterns. We also illustrate this numerically in Figure
1232
+ 1 (top), where we plot both ∆c
1233
+ min and ∆θmin as a function of
1234
+ α and the start of the interval FD (computed numerically) is
1235
+ C0
1236
+ C1
1237
+ C2
1238
+ C3
1239
+ 000
1240
+ 100
1241
+ 010
1242
+ 001
1243
+ 110
1244
+ 101
1245
+ 011
1246
+ 111
1247
+ Fig. 2: Visualization of the sets CD
1248
+ k for D = 3. In this scenario, the
1249
+ spiking patterns corresponding to the same count are clustered together
1250
+ and hence, are favorable for robust count estimation.
1251
+ denoted using dotted lines. For both values of D, we can see
1252
+ that ∆c
1253
+ min > ∆θmin and the gap grows as α increases.
1254
+ V. NUMERICAL EXPERIMENTS
1255
+ We conduct numerical experiments to evaluate the per-
1256
+ formance of the proposed super-resolution spike decoding
1257
+ algorithm on both synthetic and real calcium imaging datasets.
1258
+ 1
1259
+ 2
1260
+ 3
1261
+ 4
1262
+ 5
1263
+ 6
1264
+ 7
1265
+ 8
1266
+ 9
1267
+ 10
1268
+ Undersampling Factor (D)
1269
+ 0
1270
+ 0.2
1271
+ 0.4
1272
+ 0.6
1273
+ 0.8
1274
+ 1
1275
+ F-score
1276
+ p=0.35, s=350
1277
+ Algo 2 ( =0.5)
1278
+ l1 Box ( =0.5)
1279
+ Algo 2 ( =0.9)
1280
+ l1 Box ( =0.9)
1281
+ 1
1282
+ 2
1283
+ 3
1284
+ 4
1285
+ 5
1286
+ 6
1287
+ 7
1288
+ 8
1289
+ 9
1290
+ 10
1291
+ Undersampling factor (D)
1292
+ 0.6
1293
+ 0.7
1294
+ 0.8
1295
+ 0.9
1296
+ 1
1297
+ F1-score
1298
+ p=0.5, s=500
1299
+ D=3
1300
+ D=5
1301
+ D=7
1302
+ AR(1), =0.5
1303
+ FIR (r=3)
1304
+ FIR (r=5)
1305
+ FIR (r=7)
1306
+ Fig. 3: (Top) Quantitative comparison of Algorithm 2 against box-
1307
+ constrained l1 minimization method with noiseless measurements
1308
+ (with tolerance t0 = 0). (Bottom) (Role of Filter Memory): Average
1309
+ F-score vs. D for FIR and IIR (AR(1)) filters. Each dotted line indicates
1310
+ the corresponding theoretical transition point (D = r).
1311
+ A. Synthetic Data Generation and Evaluation Metrics
1312
+ We create a synthetic dataset by generating high-rate binary
1313
+ spike sequence xhi ∈ {0, 1}L (A = 1 and L = 1000) that
1314
+ satisfies assumption (A1). The spiking probability p controls
1315
+ the average sparsity level given by s := E[∥xhi∥0] = Lp. We
1316
+ aim to reconstruct xhi from M ≈ L/D low-rate measurements
1317
+ zlo[n] defined in (21). Notice that we operate in a regime where
1318
+ the expected sparsity is greater than the total number of low-
1319
+ rate measurements, i.e., s > M. We employ the widely-used
1320
+ F-score metric to evaluate the accuracy of spike detection [4],
1321
+ [10]. The F-score is computed by first matching the estimated
1322
+ and ground truth spikes. An estimated spike is considered a
1323
+ “match" to a ground truth spike if it is within a distance of t0
1324
+ of the ground truth (many-to-one matching is not allowed) [4],
1325
+ [10]. Let K and K′ be the total number of ground truth and
1326
+ estimated spikes, respectively. The number of spikes declared as
1327
+ true positives is denoted by Tp. After the matching procedure,
1328
+ we compute the recall (R =
1329
+ Tp
1330
+ K ) which is defined as the
1331
+ ratio of true positives (Tp) and the total number of ground
1332
+ truth spikes (K). Precision (P = Tp
1333
+ K′ ) measures the fraction
1334
+ of the total detected spikes which were correct. Finally, the
1335
+ F-score is given by the harmonic mean of recall and precision
1336
+ F-score = 2PR/(P + R).
1337
+
1338
+ 10
1339
+ 0
1340
+ 2
1341
+ 4
1342
+ 6
1343
+ 8
1344
+ 10
1345
+ 12
1346
+ 14
1347
+ 16
1348
+ 18
1349
+ 20
1350
+ 22
1351
+ 24
1352
+ 26
1353
+ 28
1354
+ 30
1355
+ 32
1356
+ 34
1357
+ 36
1358
+ 38
1359
+ 40
1360
+ 42
1361
+ 44
1362
+ 46
1363
+ 48
1364
+ 50
1365
+ 0
1366
+ 4.99
1367
+ yhi[n]
1368
+ D = 5
1369
+ (Top)
1370
+ (Bottom)
1371
+ 0
1372
+ 4.99
1373
+ 0
1374
+ 5
1375
+ 10
1376
+ 15
1377
+ 20
1378
+ 25
1379
+ 30
1380
+ 35
1381
+ 40
1382
+ 45
1383
+ 50
1384
+ ylo[n]
1385
+ 0
1386
+ 1.02
1387
+ 0
1388
+ 5
1389
+ 10
1390
+ 15
1391
+ 20
1392
+ 25
1393
+ 30
1394
+ 35
1395
+ 40
1396
+ 45
1397
+ 50
1398
+ xhi[n]
1399
+ 0
1400
+ 1.02
1401
+ 0
1402
+ 5
1403
+ 10
1404
+ 15
1405
+ 20
1406
+ 25
1407
+ 30
1408
+ 35
1409
+ 40
1410
+ 45
1411
+ 50
1412
+ �xhi[n]
1413
+ 0
1414
+ 1.02
1415
+ 0
1416
+ 5
1417
+ 10
1418
+ 15
1419
+ 20
1420
+ 25
1421
+ 30
1422
+ 35
1423
+ 40
1424
+ 45
1425
+ 50
1426
+ �xl1[n]
1427
+ 0
1428
+ 2
1429
+ 4
1430
+ 6
1431
+ 8
1432
+ 10
1433
+ 12
1434
+ 14
1435
+ 16
1436
+ 18
1437
+ 20
1438
+ 22
1439
+ 24
1440
+ 26
1441
+ 28
1442
+ 30
1443
+ 32
1444
+ 34
1445
+ 36
1446
+ 38
1447
+ 40
1448
+ 42
1449
+ 44
1450
+ 46
1451
+ 48
1452
+ 50
1453
+ 0
1454
+ 4.47
1455
+ yhi[n]
1456
+ D = 10
1457
+ 0
1458
+ 4.47
1459
+ 0
1460
+ 10
1461
+ 20
1462
+ 30
1463
+ 40
1464
+ 50
1465
+ ylo[n]
1466
+ 0
1467
+ 1.02
1468
+ 0
1469
+ 10
1470
+ 20
1471
+ 30
1472
+ 40
1473
+ 50
1474
+ xhi[n]
1475
+ 0
1476
+ 1.02
1477
+ 0
1478
+ 10
1479
+ 20
1480
+ 30
1481
+ 40
1482
+ 50
1483
+ �xhi[n]
1484
+ 0
1485
+ 1.02
1486
+ 0
1487
+ 10
1488
+ 20
1489
+ 30
1490
+ 40
1491
+ 50
1492
+ �xl1[n]
1493
+ 0
1494
+ 4.99
1495
+ 0
1496
+ 5
1497
+ 10
1498
+ 15
1499
+ 20
1500
+ 25
1501
+ 30
1502
+ 35
1503
+ 40
1504
+ 45
1505
+ 50
1506
+ ylo[n]
1507
+ 0
1508
+ 1.02
1509
+ 0
1510
+ 5
1511
+ 10
1512
+ 15
1513
+ 20
1514
+ 25
1515
+ 30
1516
+ 35
1517
+ 40
1518
+ 45
1519
+ 50
1520
+ xhi[n]
1521
+ 0
1522
+ 1.02
1523
+ 0
1524
+ 5
1525
+ 10
1526
+ 15
1527
+ 20
1528
+ 25
1529
+ 30
1530
+ 35
1531
+ 40
1532
+ 45
1533
+ 50
1534
+ �xhi[n]
1535
+ 0
1536
+ 1.02
1537
+ 0
1538
+ 5
1539
+ 10
1540
+ 15
1541
+ 20
1542
+ 25
1543
+ 30
1544
+ 35
1545
+ 40
1546
+ 45
1547
+ 50
1548
+ �xl1[n]
1549
+ 0
1550
+ 3.45
1551
+ 0
1552
+ 10
1553
+ 20
1554
+ 30
1555
+ 40
1556
+ 50
1557
+ ylo[n]
1558
+ 0
1559
+ 1.02
1560
+ 0
1561
+ 10
1562
+ 20
1563
+ 30
1564
+ 40
1565
+ 50
1566
+ xhi[n]
1567
+ 0
1568
+ 1.02
1569
+ 0
1570
+ 10
1571
+ 20
1572
+ 30
1573
+ 40
1574
+ 50
1575
+ �xhi[n]
1576
+ 0
1577
+ 1.02
1578
+ 0
1579
+ 10
1580
+ 20
1581
+ 30
1582
+ 40
1583
+ 50
1584
+ �xl1[n]
1585
+ xhi[n]: Ground Truth Spikes, �xhi[n]: Output of Algorithm 2, �xl1[n]: Output of l1 minimization,
1586
+ yhi[n]: High rate waveform, ylo[n]: Low rate samples
1587
+ Fig. 4: Qualitative comparison of Algorithm 2 and box-constrained l1 minimization on simulated data. For each simulation noisy measurements
1588
+ are generated with α = 0.9 such that the noise realization (Top) obeys the bound |w[n]| ≤ ∆θmin (from Theorem 2) and (Bottom) violates
1589
+ the bound. For larger noise (Bottom), the spike recovery is imperfect but the spike count can still be exactly recovered using Algorithm 2.
1590
+ B. Noiseless Recovery: Role of Binary priors and memory
1591
+ We first consider the noiseless setting (w[n] = 0 in (21)).
1592
+ We compare the performance of Algorithm 2 against box-
1593
+ constrained l1 minimization method [35], [36], where we solve:
1594
+ min
1595
+ x∈RL ∥x∥1 s.t. ∥ylo − SDGαx∥2 ≤ ϵ, 0 ≤ x ≤ A1
1596
+ (P1)
1597
+ For synthetic data, ϵ is chosen using the norm of the noise term
1598
+ ∥w∥2. This oracle choice ensures most favorable parameter
1599
+ tuning for the (P1), although a more realistic choice would
1600
+ be to set ϵ =
1601
+
1602
+ Mσ according to the noise power (σ). In the
1603
+ noiseless setting, we choose ϵ = 0. The problem (P1) is a
1604
+ standard convex relaxation of (P0) which promotes sparsity
1605
+ as well as tries to impose the binary constraint via the box-
1606
+ relaxation (introduced in Section II-C). In Fig. 3 (Top), we plot
1607
+ the F-score (t0 = 0) as a function of D. As can be observed,
1608
+ Algorithm 2 consistently achieves an F-score of 1, whereas the
1609
+ F-score of l1 minimization shows a decay as D increases. This
1610
+ confirms Lemma 3 that for D > 1, using box-constraints with l1
1611
+ norm minimization is not enough to enable exact recovery from
1612
+ low rate measurements. In absence of noise, the performance
1613
+ of Algorithm 2 is not affected by the filter parameter α as
1614
+ shown in Fig. 3 (Top).
1615
+ Next, we compare the reconstruction from the decimated
1616
+ output of (i) an AR(1) filter and (ii) an FIR filter of length
1617
+ r driven by the same input xhi ∈ {0, 1}1000. We choose the
1618
+ FIR filter h = [1, α, · · · , αr−1]⊤ (truncation of the IIR filter)
1619
+ with α = 0.5. Algorithm 2 is applied to the low-rate AR(1)
1620
+ measurements, whereas the algorithm proposed in [40] is used
1621
+ for the FIR case. The algorithm applied for the FIR case can
1622
+ provably operate with the optimal number of measurements
1623
+ when α = 0.5 and hence, we chose this specific value for
1624
+ the filter parameter. In Figure 3 (Bottom), we again compare
1625
+ the average F-score as a function of D, averaged over 10000
1626
+ Monte Carlo runs, for p = 0.5. As predicted by Lemma 4,
1627
+ despite utilizing binary priors, the error for the FIR filter shows
1628
+ a phase transition when D > r. This demonstrates the critical
1629
+ role played by the infinite memory of the AR(1) filter in
1630
+ achieving exact recovery with arbitrary D.
1631
+ C. Performance of noisy spike decoding
1632
+ We generate noisy measurements of the form (21), where
1633
+ w[n] and xhi[n] satisfy assumptions (A1-A2). We illustrate
1634
+ some representative examples of recovered spikes on synthetic
1635
+ data. In Fig. (4), we display the recovered super-resolution
1636
+ estimates on synthetically generated measurements for two
1637
+ undersampling factors D = 5 (left), 10 (right). For each D, the
1638
+ top plots show the spikes recovered using Algorithm 2 and l1
1639
+ minimization with box-constraint where the noise realization
1640
+ obeys the bound in Theorem 2, while the bottom plots show
1641
+ the same for noise realization violating the bound. The output
1642
+ of l1 minimization with box-constraint is inaccurate, and the
1643
+ spikes are clustered towards the end of each block of length
1644
+ D. This bias is consistent with the prediction made by our
1645
+ theoretical results in Lemma 3. When the noise is small enough
1646
+ (top), Algorithm 2 exactly decodes the spikes, including the
1647
+ ones occurring between two consecutive low-rate samples as
1648
+ predicted by Theorem 2. In presence of larger noise (violating
1649
+ the bound), the spikes estimated using l1 minimization continue
1650
+ to be biased to be clustered towards the end of the block.
1651
+ Although the spikes recovered using Algorithm 2 are not exact,
1652
+ most of the detected spikes are within a tolerance window of
1653
+ ground truth spikes. In fact, the spike count estimation is perfect
1654
+ as predicted by Theorem 4. We next quantitatively evaluate
1655
+ the performance in presence of noise, where the metrics are
1656
+ computed with t0 = 2. In Fig. 5 (Top), we plot the F-score
1657
+ as a function of D for different values of α. For a fixed α,
1658
+ the F-score of both methods decays with increasing D, but
1659
+ Algorithm 2 consistently attains a higher F-score compared to
1660
+
1661
+ 11
1662
+ 3
1663
+ 4
1664
+ 5
1665
+ 6
1666
+ 7
1667
+ 8
1668
+ 9
1669
+ 10
1670
+ Undersampling Factor (D)
1671
+ 0.2
1672
+ 0.4
1673
+ 0.6
1674
+ 0.8
1675
+ 1
1676
+ 1.2
1677
+ F-score
1678
+ p=0.35, s=350>M
1679
+ Algo 2 (alpha=0.9)
1680
+ l1 Box (alpha=0.9)
1681
+ Algo 2 (alpha=0.5)
1682
+ l1 Box (alpha=0.5)
1683
+ Fig. 5: Spike detection performance with noisy measurements. (Top)
1684
+ F-score vs. D for different filter parameters α (σ = 0.01). Here,
1685
+ L = 1000 and expected sparsity s = 350 where we operate in the
1686
+ regime s > M. The F-score is computed with a tolerance of t0 = 2.
1687
+ l1 minimization. We observe that α = 0.5 leads to a higher F-
1688
+ score potentially due to having a larger ∆θmin(α, D) compared
1689
+ to α = 0.9. Next, in Fig. 7, we study the behavior of spike
1690
+ detection as a function of the spiking probability p, while
1691
+ keeping D fixed at D = 5. When σ is fixed, the performance
1692
+ trend is not significantly affected by the spiking probability.
1693
+ At first, this may seem surprising as the expected sparsity
1694
+ is growing while the number of measurements is unchanged.
1695
+ However, since our algorithm exploits the binary nature of
1696
+ the spikes (and not just sparsity), it can handle larger sparsity
1697
+ levels. The spikes reconstructed using l1 minimization achieve
1698
+ a much lower F-score than Algorithm 2 since the former fails
1699
+ to succeed when the sparsity is large. As expected, smaller σ
1700
+ leads to higher F-scores.
1701
+ In Fig. 8, we study the probability of erroneous spike
1702
+ detection as a function of D and validate the upper bound
1703
+ derived in Theorem 3. Recall that the decoding is considered
1704
+ successful if “every" spike is detected correctly. Therefore, it
1705
+ becomes more challenging to “exactly super-resolve" all the
1706
+ spikes in presence of noise as the desired resolution becomes
1707
+ finer. We calculate the empirical probability of error and overlay
1708
+ the corresponding theoretical bound. As shown in Fig. 8, the
1709
+ empirical probability of error is indeed upper bounded by the
1710
+ bound computed by our analysis. The empirical probability of
1711
+ error increases as a function of undersampling factor D.
1712
+ 10-5
1713
+ 10-4
1714
+ 10-3
1715
+ 10-2
1716
+ 10-1
1717
+ 100
1718
+ Noise Level
1719
+ 0.2
1720
+ 0.4
1721
+ 0.6
1722
+ 0.8
1723
+ 1
1724
+ F-score
1725
+ D=5, M=200, s/M>1
1726
+ Algo 2 ( =0.5)
1727
+ l1 Box ( =0.5)
1728
+ Algo 2 ( =0.9)
1729
+ l1 Box ( =0.9)
1730
+ 10-5
1731
+ 10-4
1732
+ 10-3
1733
+ 10-2
1734
+ 10-1
1735
+ 100
1736
+ Noise Level
1737
+ 10-10
1738
+ 10-5
1739
+ 100
1740
+ 105
1741
+ Count Estimation Error
1742
+ D=5, M=200, s/M>1
1743
+ Algo 2 ( =0.5)
1744
+ l1 Box ( =0.5)
1745
+ Algo 2 ( =0.9)
1746
+ l1 Box ( =0.9)
1747
+ Fig. 6: Spike detection performance with noisy measurements for
1748
+ different filter parameters α. (Top) F-score vs. noise level (σ) (Bottom)
1749
+ Count estimation error vs. noise level. Here, L = 1000 and expected
1750
+ sparsity is fixed at s = 350 where we operate in the regime s > M.
1751
+ The F-score is computed with a tolerance of t0 = 2.
1752
+ Finally, we evaluate the noise tolerance of the proposed
1753
+ methodology by comparing the average F-score as a function
1754
+ of the noise level σ, while keeping the spiking rate and
1755
+ undersampling factor fixed at p = 0.35 and D = 5, respectively.
1756
+ As seen in Fig. 6 (Top), the performance of both algorithms
1757
+ degrades with increasing noise level and this is also consistent
1758
+ with the intuition that it becomes harder to super-resolve spikes
1759
+ with more noise. However, for both filter parameters considered
1760
+ in this experiment Algorithm 2 has a higher F-score compared
1761
+ to box-constrained l1 minimization. For large noise levels
1762
+ (comparable to spike amplitude A = 1), the performance gap
1763
+ decreases for α = 0.9 but Algorithm 2 achieves a much higher
1764
+ F-score for α = 0.5 at all noise levels.
1765
+ As discussed in Section IV-B, we next study a relaxed
1766
+ notion of spike recovery which focuses on the spike counts
1767
+ occurring between two consecutive low-rate samples. Let Γ =
1768
+ [γ[0], · · · , γ[M − 1]]⊤ be the vector of counts and �Γ be its
1769
+ estimate. In Fig. 6 (Bottom) we plot the average l1 distance
1770
+ ∥Γ − �Γ∥1 as a function of the noise level. We observe that for
1771
+ α = 0.9 (it can be verified from Fig. 1 (Top) that 0.9 ∈ F5), it
1772
+ is possible to exactly recover the spike counts at higher noise
1773
+ even though the F-score (for timing recovery) has dropped
1774
+ below 1. However, this is not the case for α = 0.5, since
1775
+ 0.5 ̸∈ F5. This is consistent with the conclusion of Theorem 4
1776
+ which states that when α ∈ FD, the noise tolerance for exact
1777
+ count recovery can be much larger than exact spike recovery
1778
+ since ∆c
1779
+ min(α, D) > ∆θmin(α, D).
1780
+ 0.2
1781
+ 0.25
1782
+ 0.3
1783
+ 0.35
1784
+ 0.4
1785
+ 0.45
1786
+ 0.5
1787
+ Spiking Probability (p)
1788
+ 0.2
1789
+ 0.4
1790
+ 0.6
1791
+ 0.8
1792
+ 1
1793
+ F-score
1794
+ D=5, M=200, s/M>1
1795
+ Algo 2 (sigma=0.001)
1796
+ l1 Box (sigma=0.001)
1797
+ Algo 2 (sigma=0.01)
1798
+ l1 Box (sigma=0.01)
1799
+ Fig. 7: Spike detection performance with noisy measurements. F-score
1800
+ vs. spiking probability (p) for different noise levels σ (fix α = 0.9,
1801
+ D = 5, L = 1000) in the extreme compression regime s > M.
1802
+ 1
1803
+ 2
1804
+ 3
1805
+ 4
1806
+ 5
1807
+ 6
1808
+ 7
1809
+ 8
1810
+ 9
1811
+ 10
1812
+ Undersampling factor (D)
1813
+ 0
1814
+ 0.2
1815
+ 0.4
1816
+ 0.6
1817
+ 0.8
1818
+ 1
1819
+ Probability of Error
1820
+ s=30, L=100
1821
+ Algo 2 ( =0.9)
1822
+ Theoretical Bound ( =0.9)
1823
+ Algo 2 ( =0.95)
1824
+ Theoretical Bound ( =0.95)
1825
+ Fig. 8: Probability of erroneous detection of high-rate spikes xhi ∈
1826
+ {0, 1}L as a function of the undersampling factor D. Theoretical
1827
+ upper bounds are overlaid using dotted lines. Here, L = 100.
1828
+ D. Spike Deconvolution from Real Calcium Imaging Datasets
1829
+ We now discuss how the mathematical framework developed
1830
+ in this paper can be used for super-resolution spike deconvo-
1831
+ lution in calcium imaging. Two-photon calcium imaging is a
1832
+ widely used imaging technique for large scale recording of
1833
+ neural activity with high spatial but poor temporal resolution. In
1834
+ calcium imaging, the signal xhi corresponds to the underlying
1835
+ neural spikes which is modeled to be binary valued on a finer
1836
+ temporal scale [2], [46]. Each neural spike results in a sharp
1837
+ rise in Ca2+ concentration followed by a slow exponential
1838
+ decay, leading to superposition of the responses from nearby
1839
+
1840
+ 12
1841
+ spiking events [2]–[4]. This calcium transient can be modeled
1842
+ by the first order autoregressive model introduced in Section
1843
+ II. The decay time constant depends on the calcium indicator
1844
+ and essentially determines the filter parameter α. The signal
1845
+ yhi[n] is an unobserved signal corresponding to sampling the
1846
+ calcium fluorescence at a high sampling rate (at the same rate
1847
+ as the underlying spikes). The observed calcium signal ylo[n]
1848
+ corresponds to downsampling yhi[n] at an interval determined
1849
+ by the frame rate of the microscope. The frame rate of a
1850
+ typical scanning microscopy system (that captures the changes
1851
+ in the calcium fluorescence) is determined by the amount of
1852
+ time required to spatially scan the desired field of view, which
1853
+ makes it significantly slower compared to the temporal scale
1854
+ of the neural spiking activity. We model this discrepancy by
1855
+ the downsampling operation (by a factor D). Therefore, the
1856
+ mathematical framework developed in this paper can be directly
1857
+ applied to reconstruct the underlying spiking activity at a
1858
+ temporal scale finer than the sampling rate of the calcium signal.
1859
+ Using real calcium imaging data, we demonstrate a way to fuse
1860
+ our algorithm with a popular spike deconvolution algorithm
1861
+ called OASIS [43]. OASIS solves an l1 minimization problem
1862
+ similar to (P1) with only the non-negativity constraint, in order
1863
+ to exploit the sparse nature of the spiking activity. Unlike our
1864
+ approach where we wish to obtain spikes representation on a
1865
+ finer temporal scale, OASIS returns the spike estimates on the
1866
+ low-resolution grid. This is typically used to infer the spiking
1867
+ rate over a temporal bin equal to the sampling interval. We
1868
+ demonstrate that our proposed framework can be integrated with
1869
+ OASIS and improve its performance. As we saw in the synthetic
1870
+ experiments, the noise level is an important consideration. By
1871
+ augmenting Algorithm 2 with OASIS, referred as “B-OASIS",
1872
+ the denoising power of l1 minimization can be leveraged.Let
1873
+ �xl1 ∈ RM be the estimate obtained on a low-resolution grid
1874
+ by solving the l1 minimization problem such as the one
1875
+ implemented in OASIS. We can obtain an estimate of the
1876
+ denoised calcium signal as �ylo[n] = αD�ylo[n] + �xl1[n], n ≥ 1
1877
+ and �ylo[0] = �xl1[0]. We can now utilize the denoised calcium
1878
+ signal �ylo[n] generated by OASIS to obtain the estimate ce[n]
1879
+ indirectly. Due to the non-linear processing done by OASIS, it
1880
+ is difficult to obtain the resulting noise statistics. An important
1881
+ advantage of Algorithm 2 is that it does not rely on the
1882
+ knowledge of the noise statistics. Hence, we can directly apply
1883
+ Algorithm 2 on �ce[n] = �ylo[n]−αD�ylo[n−1] (instead of ce[n])
1884
+ to obtain a binary “fused super-resolution spike estimate".
1885
+ B-OASIS
1886
+ OASIS
1887
+ 0
1888
+ 0.1
1889
+ 0.2
1890
+ 0.3
1891
+ 0.4
1892
+ 0.5
1893
+ 0.6
1894
+ 0.7
1895
+ 0.8
1896
+ 0.9
1897
+ Recall
1898
+ F-score
1899
+ B-OASIS
1900
+ OASIS
1901
+ 0
1902
+ 0.1
1903
+ 0.2
1904
+ 0.3
1905
+ 0.4
1906
+ 0.5
1907
+ 0.6
1908
+ 0.7
1909
+ 0.8
1910
+ 0.9
1911
+ Recall
1912
+ F-score
1913
+ Fig. 9: Spike detection performance of OASIS and B-OASIS on
1914
+ GCaMP6f dataset sampled at (Left) 60 Hz and (Right) 30 Hz. We
1915
+ compare the average F-score of data points where the F-score of
1916
+ OASIS is < 0.5. Standard deviation is depicted using the error bars.
1917
+ 20
1918
+ 20.2
1919
+ 20.4
1920
+ 20.6
1921
+ 20.8
1922
+ 21
1923
+ 21.2
1924
+ 21.4
1925
+ 21.6
1926
+ 21.8
1927
+ 22
1928
+ ylo[n]
1929
+ 20
1930
+ 20.2
1931
+ 20.4
1932
+ 20.6
1933
+ 20.8
1934
+ 21
1935
+ 21.2
1936
+ 21.4
1937
+ 21.6
1938
+ 21.8
1939
+ 22
1940
+ xhi[n]
1941
+ 20
1942
+ 20.2
1943
+ 20.4
1944
+ 20.6
1945
+ 20.8
1946
+ 21
1947
+ 21.2
1948
+ 21.4
1949
+ 21.6
1950
+ 21.8
1951
+ 22
1952
+ �xhi
1953
+ B-OA[n]
1954
+ 20
1955
+ 20.2
1956
+ 20.4
1957
+ 20.6
1958
+ 20.8
1959
+ 21
1960
+ 21.2
1961
+ 21.4
1962
+ 21.6
1963
+ 21.8
1964
+ 22
1965
+ �xhi
1966
+ OA[n]
1967
+ Fig. 10: Example of spike reconstruction on GENIE dataset (GCaMP6f
1968
+ indicator) using OASIS and B-OASIS (binary augmented) with
1969
+ calcium signal sampled at 30Hz.
1970
+ E. Results
1971
+ We evaluate the algorithms on the publicly available GENIE
1972
+ dataset [53], [54] which consists of simultaneous calcium imag-
1973
+ ing and in vivo cell-attached recording from the mouse visual
1974
+ cortex using genetically encoded GCaMP6f calcium indicator
1975
+ GCaMP6f [53], [54]. The calcium images were acquired at a
1976
+ frame rate of 60 Hz and the ground truth electrophysiology
1977
+ signal was digitized at 10 KHz and synchronized with the
1978
+ calcium frames. In addition to using the original data, we also
1979
+ synthetically downsample it to emulate the effect of a lower
1980
+ frame rate of 30 Hz, and evaluate how the performance changes
1981
+ by this downsampling operation.
1982
+ In Fig. 10, we extract an interval of ∼ 2 sec (from the neuron
1983
+ 1 of the GCaMP6f indicator dataset) and qualitatively compare
1984
+ the detected spikes with the ground truth. We downsample
1985
+ the data by a factor of 2 to emulate frame rate of 30 Hz,
1986
+ the low-rate grid becomes coarser. As a result of which, we
1987
+ observe an offset between ground truth spikes and estimate
1988
+ produced by OASIS. However, with the help of binary priors
1989
+ (B-OASIS), we can output spikes that are not restricted to be
1990
+ on the coarser scale, and this mitigates the offset observed in
1991
+ the raw estimates obtained by OASIS.
1992
+ We quantify the improvement in the performance by com-
1993
+ paring the F-scores of OASIS and B-OASIS at both sampling
1994
+ rates (60 and 30 Hz). Since the output of OASIS is non-
1995
+ binary, the estimated spikes are binarized by thresholding.
1996
+ To ensure a fair comparison, we select the threshold by a
1997
+ 80 − 20 cross-validation scheme that maximizes the average
1998
+ F-score on a held-out validation set (averaged over 3-random
1999
+ selections of the validation set). The tolerance for the F-score
2000
+ was set at 100 ms. The dataset consisted of 34 traces of
2001
+ length ∼ 234 s. The OASIS algorithm has an automated
2002
+ routine to estimate the parameter α, which we utilize for
2003
+ our experiments. The amplitude A is estimated using the
2004
+ procedure described in Appendix F. We use D = 12 to obtain
2005
+ the spike representation for B-OASIS. In order to quantify
2006
+ the performance boost achieved by augmentation, we isolate
2007
+ the traces where the F−score of OASIS drops below 0.5
2008
+ and compare the average F-score and recall for these data
2009
+ points. As shown in Fig. 9, at both sampling rates, we see a
2010
+ significant improvement in the average F-score of B-OASIS
2011
+ over OASIS, attributed to an increase in recall while keeping the
2012
+ precision unchanged. Additionally, despite downsampling, the
2013
+ spike detection performance is not significantly degraded with
2014
+ binary priors, although the detection criteria were unchanged.
2015
+
2016
+ 13
2017
+ VI. CONCLUSION
2018
+ We theoretically established the benefits of binary priors in
2019
+ super-resolution, and showed that it is possible to achieve
2020
+ significant reduction in sample complexity over sparsity-
2021
+ based techniques. Using an AR(1) model, we developed
2022
+ and analyzed an efficient algorithm that can operate in the
2023
+ extreme compression regime ( M ≪ K) by exploiting the
2024
+ special structure of measurements and trading memory for
2025
+ computational efficiency at run-time. We also demonstrated that
2026
+ binary priors can be used to boost the performance of existing
2027
+ neural spike deconvolution algorithms. In the future, we will
2028
+ develop algorithmic frameworks for incorporating binary priors
2029
+ into different neural spike deconvolution pipelines and evaluate
2030
+ the performance gain on diverse datasets. The extension of
2031
+ this binary framework for higher-order AR filters is another
2032
+ exciting future direction.
2033
+ APPENDIX
2034
+ APPENDIX A: PROOF OF THEOREM 1
2035
+ Proof. We show that for any α in 0 < α < 1, except possibly
2036
+ for a set consisting of only a finite number of points, (10)
2037
+ always has a unique binary solution. Consider all possible
2038
+ D−dimensional ternary vectors with their entries chosen from
2039
+ {−1, 0, 1}, and denote them as v(i) = [v(i)
2040
+ 1 , v(i)
2041
+ 2 , · · · , v(i)
2042
+ D ]T ∈
2043
+ {−1, 0, 1}D, 0 ≤ i ≤ 3D − 1. We use the convention that
2044
+ v(0) = 0. For every i > 0, we define a set Zv(i) determined
2045
+ by v(i) as Zv(i) :=
2046
+
2047
+ x ∈ (0, 1)
2048
+ �� �D
2049
+ k=1 v(i)
2050
+ k xD−k = 0
2051
+
2052
+ . Notice
2053
+ that pi(x) := �D
2054
+ k=1 v(i)
2055
+ k xD−k denotes a polynomial (in x) of
2056
+ degree at most D−1, whose coefficients are given by the ternary
2057
+ vector v(i). The set Zv(i) denotes the set of zeros of pi(x) that
2058
+ are contained in (0, 1). Since the degree of pi(x) is at most
2059
+ D−1, Zv(i) is a finite set with cardinality at most D−1.
2060
+ Now suppose that the binary solution of (10) is non-unique,
2061
+ i.e., there exist u, w ∈ {0, A}L, u ̸= w, such that
2062
+ HD(α)u = HD(α)w ⇒ HD(α)u − HD(α)w = 0
2063
+ (34)
2064
+ By partitioning u, w into blocks u(n), w(n) in the same way
2065
+ as in (6), we can re-write (34) as u(0) = w(0) and
2066
+ D
2067
+
2068
+ i=1
2069
+ 1
2070
+ A([u(j)]i − [w(j)]i)αD−i = 0,
2071
+ 1 ≤ j ≤ M − 1
2072
+ (35)
2073
+ Since u ̸= w, they differ at least at one block, i.e., there exists
2074
+ some j0, 1 ≤ j0 ≤ M − 1 such that u(j0) ̸= w(j0). Define
2075
+ b := 1
2076
+ A(u(j0) − w(j0)). Then, b is a non-zero ternary vector,
2077
+ i.e., b ∈ {−1, 0, 1}D. Now from (35), we have
2078
+ D
2079
+
2080
+ i=1
2081
+ [b]iαD−i = 0,
2082
+ (36)
2083
+ which implies that α ∈ Zb. Since b can be any one of the 3D−1
2084
+ ternary vectors {v(i)}3D−1
2085
+ i=1 , (36) holds if and only if α ∈ S :=
2086
+ �3D−1
2087
+ i=1 Zv(i), i.e., α is a root of at least one of the polynomials
2088
+ pi(x) defined by the vectors v(i) as their coefficients. For each
2089
+ v(i), since the cardinality of Zv(i) is at most D−1, S is a finite
2090
+ set (of cardinality at most (D − 1)(3D − 1)), and therefore its
2091
+ Lebesgue measure is 0. This implies that (10) has a non-unique
2092
+ binary solution only if α belongs to the measure zero set S,
2093
+ thereby proving the theorem.
2094
+ APPENDIX B: PROOF OF LEMMA 2 AND LEMMA 3
2095
+ Proof. (i) Let sn denote the sparsity (number of non-zero
2096
+ elements) of the nth block xhi(n) of xhi. Then, the total
2097
+ sparsity is ∥xhi∥0 = �M−1
2098
+ n=0 sn. We will construct a vec-
2099
+ tor v ∈ RL, v ̸= xhi that satisfies c = HD(α)v and
2100
+ ∥xhi∥0 ≥ ∥v∥0. Following (6), consider the partition of v
2101
+ v = [v(0), v(1)⊤, · · · , v(M−1)⊤]⊤. Firstly, we assign v(0) =
2102
+ c[0] = xhi(0). We construct v(n) as follows. For each n ≥ 1,
2103
+ there are three cases:
2104
+ Case I: sn = 0. In this case, xhi(n) = 0 and hence c[n] = 0.
2105
+ Therefore, we assign v(n) = xhi(n) = 0.
2106
+ Case II: sn = 1. First suppose that [xhi(n)]D = 0. We
2107
+ construct v(n) as follows:
2108
+ [v(n)]k =
2109
+
2110
+ c[n],
2111
+ if k = D
2112
+ 0,
2113
+ else
2114
+ .
2115
+ (37)
2116
+ Next suppose that [xhi(n)]D ̸= 0. Since sn = 1, this implies
2117
+ that [xhi(n)]k = 0, k = 1, · · · , D−1. In this case, we construct
2118
+ v(n) as follows:
2119
+ [v(n)]k =
2120
+
2121
+ c[n]/α,
2122
+ if k = D − 1
2123
+ 0,
2124
+ else
2125
+ .
2126
+ (38)
2127
+ Notice that both (37) and (38) ensure that v(n) ̸= xhi(n) and
2128
+ c[n] = hT
2129
+ αv(n). Moreover, ∥v(n)∥0 = sn.
2130
+ Case III: sn ≥ 2. In this case, we follow the same
2131
+ construction as (37). As before v(n) satisfies c[n] = h⊤
2132
+ α v(n).
2133
+ Since ∥xhi(n)∥0 ≥ 2 and ∥v(n)∥0 = 1, we automatically have
2134
+ v(n) ̸= xhi(n), and ∥v(n)∥0 < sn. Therefore, combining the
2135
+ three cases, we can construct the desired vector v that satisfies
2136
+ v ̸= xhi, c = HD(α)v, and ∥v∥0 ≤ �M−1
2137
+ n=0 sn = ∥xhi(n)∥0.
2138
+ Therefore, the solution x⋆ to (P0) satisfies ∥x⋆∥0 ≤ ∥v∥0 ≤
2139
+ ∥xhi(n)∥0.
2140
+ (ii) In this case, we construct v(n0) according to Case III.
2141
+ Since ∥v(n0)∥0 < sn0, and ∥v(n)∥0 ≤ sn, n ̸= n0, we have
2142
+ ∥v∥0 < ∥xhi∥0, implying ∥x⋆∥0 ≤ ∥v∥0 < ∥xhi∥0.
2143
+ A. Proof of Lemma 3
2144
+ Proof. We will construct a vector v ∈ RL whose support is of
2145
+ the form (16), that is feasible for (P1-B), and we will prove
2146
+ that it has the smallest l1 norm. Using the block structure given
2147
+ by (6), we choose v(0) = c[0]. For each n ≥ 1, we construct
2148
+ v(n) based on the following two cases:
2149
+ Case I: c[n] ≥ A. Let kn be the largest integer such that the
2150
+ following holds: µ[n] := A(1 + α + · · · + αkn−1) ≤ c[n],
2151
+ where 1 ≤ kn ≤ D. Note that kn = 1 always produces a valid
2152
+ lower bound. However, we are interested in the largest lower
2153
+ bound on c[n] of the above form. We choose
2154
+ [v(n)]k =
2155
+
2156
+
2157
+
2158
+
2159
+
2160
+ A,
2161
+ if D − kn + 1 ≤ k ≤ D
2162
+ (c[n] − µ[n])/αkn, if k = D − kn
2163
+ 0, else
2164
+ It is easy to verify that h⊤
2165
+ α v(n) = c[n]. From the definition
2166
+ of kn, it follows that µ[n] ≤ c[n] < µ[n] + Aαkn and hence,
2167
+ 0 ≤ (c[n] − µ[n])/αkn < A, which ensures that v obeys the
2168
+ box-constraints in (P1-B). Now, let vf ∈ RL be any feasible
2169
+ point of (P1-B) which must be of the form v(0)
2170
+ f
2171
+ = c[0], v(n)
2172
+ f
2173
+ =
2174
+ v(n) + r(n), where r(n) ∈ N(h⊤
2175
+ α ) is a vector in the null-space
2176
+
2177
+ 14
2178
+ of h⊤
2179
+ α . It can be verified that the following vectors {wt}D−1
2180
+ t=1
2181
+ form a basis for N(h⊤
2182
+ α ):
2183
+ [wt]k =
2184
+
2185
+
2186
+
2187
+
2188
+
2189
+ 1,
2190
+ k = t
2191
+ −α,
2192
+ k = t + 1
2193
+ 0,
2194
+ else
2195
+ ,
2196
+ Therefore, ∃ {β(n)
2197
+ t
2198
+ }D−1
2199
+ t=1 such that r(n) = �D−1
2200
+ t=1 β(n)
2201
+ t
2202
+ wt. We
2203
+ further consider two scenarios: (i) 1 ≤ kn ≤ D − 2. In this
2204
+ case [v(n)]1 = 0, and for k = 1, 2, · · · D, [v(n)
2205
+ f ]k satisfies 2
2206
+ [v(n)
2207
+ f ]k =
2208
+
2209
+
2210
+
2211
+
2212
+
2213
+
2214
+
2215
+
2216
+
2217
+
2218
+
2219
+
2220
+
2221
+
2222
+
2223
+ β(n)
2224
+ k , if k = 1
2225
+ β(n)
2226
+ k
2227
+ − αβ(n)
2228
+ k−1, if 2 ≤ k ≤ D − kn − 1
2229
+ [v(n)]k + β(n)
2230
+ k
2231
+ − αβ(n)
2232
+ k−1, if k = D − kn
2233
+ A + β(n)
2234
+ k
2235
+ − αβ(n)
2236
+ k−1, if D − kn + 1 ≤ k ≤ D − 1
2237
+ A − αβ(n)
2238
+ k−1, if k = D
2239
+ To ensure v(n)
2240
+ f
2241
+ is a feasible point for (P1-B), the following must
2242
+ hold: 0 ≤ β(n)
2243
+ D−1 ≤ A/α and 0 ≤ β(n)
2244
+ 1
2245
+ ≤ A. For 2 ≤ k ≤ D −
2246
+ kn−1, the constraint [v(n)
2247
+ f ]k ≥ 0 implies β(n)
2248
+ k
2249
+ ≥ αβ(n)
2250
+ k−1. Since
2251
+ β(n)
2252
+ 1
2253
+ ≥ 0, it follows that β(n)
2254
+ k
2255
+ ≥ 0 for all 2 ≤ k ≤ D − kn − 1.
2256
+ For D−kn+1 ≤ k ≤ D−1, the constraint [v(n)
2257
+ f ]k ≤ A implies
2258
+ β(n)
2259
+ k−1 ≥ β(n)
2260
+ k /α. Since β(n)
2261
+ D−1 ≥ 0, it follows that β(n)
2262
+ k
2263
+ ≥ 0 for
2264
+ all D − kn ≤ k ≤ D − 1. (ii) kn ∈ {D − 1, D}. In this case,
2265
+ for k = 1, 2, · · · , D, [v(n)
2266
+ f ]k satisfies
2267
+ [v(n)
2268
+ f ]k =
2269
+
2270
+
2271
+
2272
+
2273
+
2274
+ [v(n)]1 + β(n)
2275
+ 1
2276
+ , if k = 1
2277
+ A + β(n)
2278
+ k
2279
+ − αβ(n)
2280
+ k−1, if 2 ≤ k ≤ D − 1
2281
+ A − αβ(n)
2282
+ k−1, if k = D
2283
+ For 2 ≤ k ≤ D − 1, the box-constraint [v(n)
2284
+ f ]k ≤ A implies
2285
+ β(n)
2286
+ k−1 ≥ β(n)
2287
+ k /α. Since β(n)
2288
+ D−1 ≥ 0, it follows that β(n)
2289
+ k
2290
+ ≥ 0 for
2291
+ all 1 ≤ k ≤ D − 1. Summarizing, we have established that
2292
+ β(n)
2293
+ i
2294
+ ≥ 0, ∀i.
2295
+ Case II: c[n] < A. In this case, v(n) is constructed following
2296
+ (37), and hence v(n)
2297
+ f
2298
+ has the following structure:
2299
+ [v(n)
2300
+ f ]k =
2301
+
2302
+
2303
+
2304
+
2305
+
2306
+ β(n)
2307
+ k , if k = 1
2308
+ −αβ(n)
2309
+ k−1 + β(n)
2310
+ k , if 2 ≤ k ≤ D − 1
2311
+ c[n] − αβ(n)
2312
+ k−1, if k = D
2313
+ To ensure v(n)
2314
+ f
2315
+ is a feasible point, it must hold that β(n)
2316
+ 1
2317
+
2318
+ 0, β(n)
2319
+ k
2320
+ ≥ αβ(n)
2321
+ k−1 ≥ 0 for 2 ≤ k ≤ D − 1. Hence, in both
2322
+ Cases I and II, we established that β(n)
2323
+ k
2324
+ ≥ 0. For each case,
2325
+ since v(n)
2326
+ f
2327
+ is a non-negative vector ∀n, it can be verified that
2328
+ ∥vf∥1 =
2329
+ M−1
2330
+
2331
+ n=0
2332
+ ∥v(n)
2333
+ f ∥1 = v(0)
2334
+ f
2335
+ +
2336
+ M−1
2337
+
2338
+ n=1
2339
+ D
2340
+
2341
+ k=1
2342
+ [v(n)
2343
+ f ]k
2344
+ = c[0] +
2345
+ M−1
2346
+
2347
+ n=1
2348
+ D
2349
+
2350
+ k=1
2351
+ [v(n)]k
2352
+
2353
+ ��
2354
+
2355
+ ∥v∥1
2356
+ +
2357
+ M−1
2358
+
2359
+ n=1
2360
+ D−1
2361
+
2362
+ k=1
2363
+ (1 − α)β(n)
2364
+ k
2365
+ 2In the definition of v(n)
2366
+ f
2367
+ , an assignment will be ignored if the specified
2368
+ interval for k is empty.
2369
+ We used the fact that �D
2370
+ k=1
2371
+ �D−1
2372
+ t=1 β(n)
2373
+ t
2374
+ [wt]k = �D−1
2375
+ t=1 (1 −
2376
+ α)β(n)
2377
+ t
2378
+ . If vf ̸= v, we must have β(n)
2379
+ k
2380
+ ̸= 0 for some k and
2381
+ n > 0. This implies that ∥vf∥1 > ∥v∥1. It is easy to see
2382
+ that the support of the constructed vector is of the form (16).
2383
+ Moreover, based on the above argument, v is the only vector
2384
+ that has the minimum l1 norm among all possible feasible
2385
+ points of (P1-B).
2386
+ APPENDIX C: PROOF OF LEMMA 7
2387
+ Proof. For any 0 < α ≤ 0.5, we begin by showing that for an
2388
+ integer p ≥ 1 the following inequality holds:
2389
+ p
2390
+
2391
+ k=1
2392
+ αD−k = αD−p−1
2393
+ � 1 − αp
2394
+ 1/α − 1
2395
+
2396
+ < αD−p−1
2397
+ (39)
2398
+ since 1/α − 1 ≥ 1 and 1 − αp < 1 in the regime 0 < α ≤ 0.5.
2399
+ Let S1 = {0, αD−1, αD−2, αD−1 + αD−2}. Notice that the
2400
+ elements of S1 are sorted in ascending order for any α and D.
2401
+ Now, we recursively define the sets Si as follows:
2402
+ Si := {Si−1, Si−1 + αD−1−i}, 2 ≤ i ≤ D − 1
2403
+ (40)
2404
+ Our hypothesis is that for every 2 ≤ i ≤ D − 1 α ∈ (0, 0.5]
2405
+ and D, the set Si as defined in (40), is automatically sorted in
2406
+ ascending order. We prove this via induction. For i = 2, the
2407
+ sets S1 and S1 + αD−3 are individually sorted. Moreover from
2408
+ (39), we can show that: maxa∈S1 a = αD−1+αD−2 < αD−3 =
2409
+ minb∈S1+αD−3 b. This shows that S2 is ordered, establishing the
2410
+ the base case of our induction. Now, assume Si is ordered for
2411
+ some 2 ≤ i ≤ D−2. We need to show that Si+1 is also ordered.
2412
+ As a result of the induction hypothesis, both Si and Si+αD−2−i
2413
+ are ordered. Using the ordering of Si, we have: maxa∈Si a =
2414
+ �i+1
2415
+ j=1 αD−j, minb∈Si+αD−2−i b = αD−(i+1)−1. From (39), we
2416
+ can conclude that maxa∈Si a < minb∈Si+αD−2−i b and hence,
2417
+ Si+1 is also ordered. This completes the induction proof. Also,
2418
+ note that for α ∈ (0, 0.5], we have Θsort
2419
+ α
2420
+ = SD−1.
2421
+ Let ∆min(Si) be the min. distance between the elements of the
2422
+ set Si. It is easy to see that ∆min(Si) = ∆min(Si + αD−2−i).
2423
+ Since Si is sorted for α ∈ (0, 0.5], ∆min(Si) is given by:
2424
+ ∆min(Si) = min(∆min(Si−1),
2425
+ min
2426
+ x∈Si−1+αD−1−i x − max
2427
+ y∈Si−1 y)
2428
+ = min{∆min(Si−1), αD−i−1 −
2429
+ i
2430
+
2431
+ j=1
2432
+ αD−j}.
2433
+ (41)
2434
+ Now, we use induction to establish the following conjecture:
2435
+ ���min(Si) = αD−1, 1 ≤ i ≤ D − 1
2436
+ (42)
2437
+ For the base case i = 1, ∆min(S1) = min(αD−1, αD−2 −
2438
+ αD−1) = αD−1, where the last equality holds since α ∈
2439
+ (0, 0.5] ⇒ αD−1(1/α − 1) ≥ αD−1. Suppose (42) holds for
2440
+ some 1 ≤ i ≤ D − 2. From the definition of ∆min(Si+1) and
2441
+ the induction hypothesis that ∆min(Si) = αD−1, it follows that
2442
+ ∆min(Si+1) = min{αD−1, αD−(i+1)−1 −�i+1
2443
+ j=1 αD−j}. Again,
2444
+ from the definition of ∆min(Si) in (41), and the induction
2445
+ hypothesis we also have αD−i−1 −�i
2446
+ j=1 αD−j ≥ ∆min(Si) =
2447
+ αD−1. Using this and the fact that α ≤ 0.5, we can show:
2448
+ αD−i−2 −αD−i−1 − �i
2449
+ j=1 αD−j ≥ αD−i−2 − 2αD−i−1 + αD−1
2450
+ ≥ αD−1 + αD−i−1(1/α − 2) ≥ αD−1
2451
+
2452
+ 15
2453
+ Therefore ∆min(Si+1)=min{αD−1, αD−i−2−�i+1
2454
+ j=1 αD−j} =
2455
+ αD−1.
2456
+ Thus,
2457
+ we
2458
+ can
2459
+ conclude
2460
+ that
2461
+ ∆min(α, D)
2462
+ =
2463
+ ∆min(SD−1)=αD−1.
2464
+ APPENDIX D: PROOF OF THEOREM 3
2465
+ Proof. The probability of incorrectly identifying xhi(n) from a
2466
+ single measurement ce[n] is given by
2467
+ pe := P(�xhi
2468
+ (n) ̸= xhi
2469
+ (n))
2470
+ =
2471
+ lD
2472
+
2473
+ k=0
2474
+ P(�xhi
2475
+ (n) ̸= xhi
2476
+ (n)|xhi
2477
+ (n) = �vk)P(xhi
2478
+ (n) = �vk)
2479
+ Given a binary vector z ∈ {0, 1}D, define the function ψ(z) :=
2480
+ �D
2481
+ k=1 zk, which denotes the count of ones in z. Since the
2482
+ noisy observations are given by ce[n] = c[n] + e[n], where
2483
+ e[n] = w[n] − αDw[n − 1], it follows from assumption (A2)
2484
+ that e[n] ∼ N(0, σ2
2485
+ 1) where σ2
2486
+ 1 = (1 + α2D)σ2. From (27),
2487
+ we obtain P(�xhi(n) ̸= xhi(n)|xhi(n) = �v0) = P(e[n] ∈ E0) =
2488
+ Q(αD−1/(2σ1)). Similarly, P(�xhi(n) ̸= xhi(n)|xhi(n) = �vlD) =
2489
+ P(e[n] ∈ ElD) = Q((�θlD − �θlD−1)/(2σ1)) = Q(αD−1/(2σ1)).
2490
+ The last equality follows from the fact that �θlD − �θlD−1 = αD−1.
2491
+ Finally, when conditioned on xhi(n) = �vk for 0 < k < lD,
2492
+ from (26), we obtain P(�x(n) ̸= xhi(n)|xhi(n) = �vk) = P(e[n] ∈
2493
+ Ek) = Q(
2494
+ �θk−�θk−1
2495
+ 2σ1
2496
+ ) + Q(
2497
+ �θk+1−�θk
2498
+ 2σ1
2499
+ ). Due to Assumption (A1)
2500
+ on xhi, we have P(xhi(n) = �vk) = pψ(�vk)(1 − p)D−ψ(�vk).
2501
+ Therefore, pe is given by
2502
+ pe = Q(αD−1/(2σ1))(1 − p)D + Q(αD−1/(2σ1))pD+
2503
+ lD−1
2504
+
2505
+ k=1
2506
+
2507
+ Q(
2508
+ �θk − �θk−1
2509
+ 2σ1
2510
+ ) + Q(
2511
+ �θk+1 − �θk
2512
+ 2σ1
2513
+ )
2514
+
2515
+ pψ(vk)(1 − p)D−ψ(vk)
2516
+ (43)
2517
+ The spike train xhi is incorrectly decoded if at least one of the
2518
+ blocks are decoded incorrectly, hence, the total probability of
2519
+ error is given by:
2520
+ P(
2521
+ M−1
2522
+
2523
+ n=0
2524
+ �x(n) ̸= xhi
2525
+ (n)) ≤
2526
+ M−1
2527
+
2528
+ n=0
2529
+ P(�x(n) ̸= xhi
2530
+ (n)) = Mpe
2531
+ (a)
2532
+ ≤ 2MQ(∆θmin(α, D)/(2σ1))
2533
+ D
2534
+
2535
+ j=0
2536
+ pj(1 − p)D−j
2537
+ �D
2538
+ j
2539
+
2540
+ (b)
2541
+ ≤ 2M exp(−∆θ2
2542
+ min(α, D)/(4σ2
2543
+ 1))
2544
+ (44)
2545
+ where the first inequality follows from union bound and second
2546
+ equality is a consequence of (43). The inequality (a) follows
2547
+ from the monotonically decreasing property of Q(.) function
2548
+ and the sum can be re-written by grouping all terms with the
2549
+ same count, i.e., ψ(vk) = j. The inequality (b) follows from
2550
+ the inequality Q(x) ≤ exp(−x2/2) for x > 0. If the SNR
2551
+ condition (28) holds then from (44) the total probability of
2552
+ error is bounded by δ.
2553
+ APPENDIX E: PROOF OF THEOREM 4
2554
+ Proof. We first begin by showing that α ∈ FD implies that (31)
2555
+ holds and hence the mapping of spikes with the same counts are
2556
+ clustered. Notice that for k = 0, θk
2557
+ max = θk
2558
+ min = 0. For k ≥ 1,
2559
+ it is easy to verify that θk
2560
+ max and θk
2561
+ min are attained by the spiking
2562
+ patterns 00...1111 (with k consecutive spikes at the indices
2563
+ D − k + 1 to D) and 111...000 (with consecutive spikes at the
2564
+ indices 1 to k), which allows us to simplify (31) as αD−1 > 0
2565
+ for k = 0 and �k+1
2566
+ i=1 αD−i > �k−1
2567
+ j=0 αj, k = 1, · · · , D − 1.
2568
+ The values of α that satisfy each of these relations can be
2569
+ described by the following sets:
2570
+ G0 = {α ∈ (0, 1)|αD−1 > 0}, Gk = {α ∈ (0, 1)|rk(α) < 0},
2571
+ where rk(α) = αD − αD−k−1 − αk + 1 for 1 ≤ k ≤ D − 1. It
2572
+ is easy to see that FD = Gk0. Observe that the relations are
2573
+ symmetric, i.e., Gk = GD−k−1. Furthermore, for 1 ≤ k ≤ D/2,
2574
+ we show that Gk ⊆ Gk−1 as follows. Trivially, G1 ⊂ G0.
2575
+ For 2 ≤ k ≤ D/2, observe that
2576
+ rk(α) − rk−1(α) =
2577
+ αD−k(1 − 1/α) − αk(1 − 1/α) = (1/α − 1)(αk − αD−k) ≥ 0.
2578
+ Therefore, α ∈ Gk ⇒ α ∈ Gk−1, k = 1, 2 · · · , k0. Moreover,
2579
+ since Gk = GD−k−1, it follows that FD = Gk0 = ∩D−1
2580
+ k=0Gk.
2581
+ Hence, α ∈ FD ⇒ α ∈ Gi for all 0 ≤ i ≤ D − 1, which
2582
+ implies that (31) holds. If the noise perturbation satisfies
2583
+ |w[n]| < ∆c
2584
+ min(α, D)/4, it implies |e[n]| < ∆c
2585
+ min(α, D)/2.
2586
+ For any block xhi(n) ∈ CD
2587
+ k , θk
2588
+ min ≤ h⊤
2589
+ α xhi(n) ≤ θk
2590
+ max. If
2591
+ |e[n]| < ∆c
2592
+ min(α, D)/2, we have
2593
+ h⊤
2594
+ α xhi
2595
+ (n) + e[n] < θk
2596
+ max + ∆c
2597
+ min(α, D)
2598
+ 2
2599
+ < θk
2600
+ max + θk+1
2601
+ min − θk
2602
+ max
2603
+ 2
2604
+ h⊤
2605
+ α xhi
2606
+ (n) + e[n] > θk
2607
+ min − ∆c
2608
+ min(α, D)
2609
+ 2
2610
+ > θk
2611
+ min − θk
2612
+ min − θk−1
2613
+ max
2614
+ 2
2615
+ This shows that
2616
+ whenever α ∈ FD, the condition |e[n]| <
2617
+ ∆c
2618
+ min(α, D)/2 is sufficient for (33) to hold ∀ γ[n] and hence
2619
+ the spike count can be exactly recovered.
2620
+ APPENDIX F: AMPLITUDE ESTIMATION
2621
+ We suggest a procedure to estimate the binary amplitude A, if
2622
+ it is unknown. We first evaluate the signal c[n] from different
2623
+ time instants n = 1, 2, · · · , M − 1. For some 1 ≤ n0 ≤
2624
+ M − 1, we estimate a set A = {Ak} of candidate amplitudes:
2625
+ Ak := c[n0]/hT
2626
+ αvk where vk ∈ Sall. Only a certain amplitudes
2627
+ can generate c[n0] from a valid binary spiking pattern vk ∈ Sall.
2628
+ Our goal is to prune A by sequentially eliminating certain
2629
+ candidate amplitudes from the set based on a consistency
2630
+ test across the remaining measurements c[n]. At the tth stage
2631
+ (t = 2, 3, · · · ), for every remaining candidate amplitude Ak ∈
2632
+ A, we perform the following consistency test with c[n], to
2633
+ identify if a candidate amplitude can potentially generate the
2634
+ corresponding measurement c[n]. Suppose there exists a spiking
2635
+ pattern vl ∈ Sall such that
2636
+ c[n] = AkhT
2637
+ αvl
2638
+ (45)
2639
+ then Ak remains a valid candidate. If we cannot find a
2640
+ corresponding vl ∈ Sall for an amplitude Ak, we remove
2641
+ it, A = A \ Ak. In presence of noise, (45) can be modified
2642
+ to allow a tolerance γ as we may not find an exact match.
2643
+ The tolerance γ is chosen to be 0.5 in the experiments on
2644
+ the GENIE dataset. This procedure prunes out possible values
2645
+ for the amplitude by leveraging the shared amplitude across
2646
+ multiple measurements c[n].
2647
+ ACKNOWLEDGEMENT
2648
+ The authors would like to thank Prof. Nikita Sidorov,
2649
+ Department of Mathematics at the University of Manchester,
2650
+ for helpful discussions regarding computational challenges
2651
+ in finding finite β-expansion in the range β ∈ (1, 2). This
2652
+ work was supported by Grants ONR N00014-19-1-2256, DE-
2653
+ SC0022165, NSF 2124929, and NSF CAREER ECCS 1700506.
2654
+
2655
+ 16
2656
+ REFERENCES
2657
+ [1] A. Small and S. Stahlheber, “Fluorophore localization algorithms for
2658
+ super-resolution microscopy,” Nature methods, vol. 11, no. 3, pp. 267–
2659
+ 279, 2014.
2660
+ [2] R. Brette and A. Destexhe, Handbook of neural activity measurement.
2661
+ Cambridge University Press, 2012.
2662
+ [3] J. T. Vogelstein, B. O. Watson, A. M. Packer, R. Yuste, B. Jedynak, and
2663
+ L. Paninski, “Spike inference from calcium imaging using sequential
2664
+ monte carlo methods,” Biophysical journal, vol. 97, no. 2, pp. 636–655,
2665
+ 2009.
2666
+ [4] T. Deneux, A. Kaszas, G. Szalay, G. Katona, T. Lakner, A. Grinvald,
2667
+ B. Rózsa, and I. Vanzetta, “Accurate spike estimation from noisy
2668
+ calcium signals for ultrafast three-dimensional imaging of large neuronal
2669
+ populations in vivo,” Nature communications, vol. 7, p. 12190, 2016.
2670
+ [5] S. Yang and L. Hanzo, “Fifty years of mimo detection: The road to
2671
+ large-scale mimos,” IEEE Communications Surveys & Tutorials, vol. 17,
2672
+ no. 4, pp. 1941–1988, 2015.
2673
+ [6] D. L. Donoho, “Superresolution via sparsity constraints,” SIAM journal
2674
+ on mathematical analysis, vol. 23, no. 5, pp. 1309–1331, 1992.
2675
+ [7] E. J. Candès and C. Fernandez-Granda, “Towards a mathematical theory
2676
+ of super-resolution,” Communications on pure and applied Mathematics,
2677
+ vol. 67, no. 6, pp. 906–956, 2014.
2678
+ [8] W. Li, W. Liao, and A. Fannjiang, “Super-resolution limit of the esprit
2679
+ algorithm,” IEEE Transactions on Information Theory, vol. 66, no. 7,
2680
+ pp. 4593–4608, 2020.
2681
+ [9] D. Batenkov, G. Goldman, and Y. Yomdin, “Super-resolution of near-
2682
+ colliding point sources,” Information and Inference: A Journal of the
2683
+ IMA, vol. 10, no. 2, pp. 515–572, 2021.
2684
+ [10] G. Schiebinger, E. Robeva, and B. Recht, “Superresolution without
2685
+ separation,” Information and Inference: A Journal of the IMA, vol. 7,
2686
+ no. 1, pp. 1–30, 2017.
2687
+ [11] T. Bendory, “Robust recovery of positive stream of pulses,” IEEE
2688
+ Transactions on Signal Processing, vol. 65, no. 8, pp. 2114–2122, 2017.
2689
+ [12] W. Liao and A. Fannjiang, “Music for single-snapshot spectral estimation:
2690
+ Stability and super-resolution,” Applied and Computational Harmonic
2691
+ Analysis, vol. 40, no. 1, pp. 33–67, 2016.
2692
+ [13] H. Qiao and P. Pal, “Guaranteed localization of more sources than
2693
+ sensors with finite snapshots in multiple measurement vector models
2694
+ using difference co-arrays,” IEEE Transactions on Signal Processing,
2695
+ vol. 67, no. 22, pp. 5715–5729, 2019.
2696
+ [14] ——, “A non-convex approach to non-negative super-resolution: Theory
2697
+ and algorithm,” in ICASSP 2019-2019 IEEE International Conference
2698
+ on Acoustics, Speech and Signal Processing (ICASSP).
2699
+ IEEE, 2019,
2700
+ pp. 4220–4224.
2701
+ [15] H. Qiao, S. Shahsavari, and P. Pal, “Super-resolution with noisy
2702
+ measurements: Reconciling upper and lower bounds,” in ICASSP 2020-
2703
+ 2020 IEEE International Conference on Acoustics, Speech and Signal
2704
+ Processing (ICASSP).
2705
+ IEEE, 2020, pp. 9304–9308.
2706
+ [16] S. Shahsavari, J. Millhiser, and P. Pal, “Fundamental trade-offs in noisy
2707
+ super-resolution with synthetic apertures,” in ICASSP 2021-2021 IEEE
2708
+ International Conference on Acoustics, Speech and Signal Processing
2709
+ (ICASSP).
2710
+ IEEE, 2021, pp. 4620–4624.
2711
+ [17] H. Qiao and P. Pal, “On the modulus of continuity for noisy positive
2712
+ super-resolution,” in 2018 IEEE International Conference on Acoustics,
2713
+ Speech and Signal Processing (ICASSP).
2714
+ IEEE, 2018, pp. 3454–3458.
2715
+ [18] Y. Chi and M. F. Da Costa, “Harnessing sparsity over the continuum:
2716
+ Atomic norm minimization for superresolution,” IEEE Signal Processing
2717
+ Magazine, vol. 37, no. 2, pp. 39–57, 2020.
2718
+ [19] B. N. Bhaskar, G. Tang, and B. Recht, “Atomic norm denoising with
2719
+ applications to line spectral estimation,” IEEE Transactions on Signal
2720
+ Processing, vol. 61, no. 23, pp. 5987–5999, 2013.
2721
+ [20] B. F. Grewe, D. Langer, H. Kasper, B. M. Kampa, and F. Helmchen,
2722
+ “High-speed in vivo calcium imaging reveals neuronal network activity
2723
+ with near-millisecond precision,” Nature methods, vol. 7, no. 5, p. 399,
2724
+ 2010.
2725
+ [21] E. A. Pnevmatikakis, D. Soudry, Y. Gao, T. A. Machado, J. Merel, D. Pfau,
2726
+ T. Reardon, Y. Mu, C. Lacefield, W. Yang et al., “Simultaneous denoising,
2727
+ deconvolution, and demixing of calcium imaging data,” Neuron, vol. 89,
2728
+ no. 2, pp. 285–299, 2016.
2729
+ [22] R. Schmidt, “Multiple emitter location and signal parameter estimation,”
2730
+ IEEE transactions on antennas and propagation, vol. 34, no. 3, pp.
2731
+ 276–280, 1986.
2732
+ [23] R. Roy and T. Kailath, “Esprit-estimation of signal parameters via
2733
+ rotational invariance techniques,” IEEE Transactions on acoustics, speech,
2734
+ and signal processing, vol. 37, no. 7, pp. 984–995, 1989.
2735
+ [24] Y. Hua and T. K. Sarkar, “Matrix pencil method for estimating
2736
+ parameters of exponentially damped/undamped sinusoids in noise,” IEEE
2737
+ Transactions on Acoustics, Speech, and Signal Processing, vol. 38, no. 5,
2738
+ pp. 814–824, 1990.
2739
+ [25] B. Bernstein and C. Fernandez-Granda, “Deconvolution of point sources:
2740
+ a sampling theorem and robustness guarantees,” Communications on
2741
+ Pure and Applied Mathematics, vol. 72, no. 6, pp. 1152–1230, 2019.
2742
+ [26] A. Koulouri, P. Heins, and M. Burger, “Adaptive superresolution in
2743
+ deconvolution of sparse peaks,” IEEE Transactions on Signal Processing,
2744
+ vol. 69, pp. 165–178, 2020.
2745
+ [27] V. I. Morgenshtern and E. J. Candes, “Super-resolution of positive sources:
2746
+ The discrete setup,” SIAM Journal on Imaging Sciences, vol. 9, no. 1,
2747
+ pp. 412–444, 2016.
2748
+ [28] D. Batenkov, A. Bhandari, and T. Blu, “Rethinking super-resolution: the
2749
+ bandwidth selection problem,” in ICASSP 2019-2019 IEEE International
2750
+ Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE,
2751
+ 2019, pp. 5087–5091.
2752
+ [29] M. F. Da Costa and W. Dai, “A tight converse to the spectral resolution
2753
+ limit via convex programming,” in 2018 IEEE International Symposium
2754
+ on Information Theory (ISIT).
2755
+ IEEE, 2018, pp. 901–905.
2756
+ [30] T. Blu, P.-L. Dragotti, M. Vetterli, P. Marziliano, and L. Coulot, “Sparse
2757
+ sampling of signal innovations,” IEEE Signal Processing Magazine,
2758
+ vol. 25, no. 2, pp. 31–40, 2008.
2759
+ [31] J. A. Urigüen, T. Blu, and P. L. Dragotti, “Fri sampling with arbitrary
2760
+ kernels,” IEEE Transactions on Signal Processing, vol. 61, no. 21, pp.
2761
+ 5310–5323, 2013.
2762
+ [32] J. Onativia, S. R. Schultz, and P. L. Dragotti, “A finite rate of innovation
2763
+ algorithm for fast and accurate spike detection from two-photon calcium
2764
+ imaging,” Journal of neural engineering, vol. 10, no. 4, p. 046017, 2013.
2765
+ [33] R. Tur, Y. C. Eldar, and Z. Friedman, “Innovation rate sampling of pulse
2766
+ streams with application to ultrasound imaging,” IEEE Transactions on
2767
+ Signal Processing, vol. 59, no. 4, pp. 1827–1842, 2011.
2768
+ [34] S. Rudresh and C. S. Seelamantula, “Finite-rate-of-innovation-sampling-
2769
+ based super-resolution radar imaging,” IEEE Transactions on Signal
2770
+ Processing, vol. 65, no. 19, pp. 5021–5033, 2017.
2771
+ [35] M. Stojnic, “Recovery thresholds for l1 optimization in binary com-
2772
+ pressed sensing,” in 2010 IEEE International Symposium on Information
2773
+ Theory.
2774
+ IEEE, 2010, pp. 1593–1597.
2775
+ [36] S. Keiper, G. Kutyniok, D. G. Lee, and G. E. Pfander, “Compressed
2776
+ sensing for finite-valued signals,” Linear Algebra and its Applications,
2777
+ vol. 532, pp. 570–613, 2017.
2778
+ [37] A. Flinth and S. Keiper, “Recovery of binary sparse signals with biased
2779
+ measurement matrices,” IEEE Transactions on Information Theory,
2780
+ vol. 65, no. 12, pp. 8084–8094, 2019.
2781
+ [38] S. M. Fosson and M. Abuabiah, “Recovery of binary sparse signals from
2782
+ compressed linear measurements via polynomial optimization,” IEEE
2783
+ Signal Processing Letters, vol. 26, no. 7, pp. 1070–1074, 2019.
2784
+ [39] Z. Tian, G. Leus, and V. Lottici, “Detection of sparse signals under
2785
+ finite-alphabet constraints,” in 2009 IEEE International Conference on
2786
+ Acoustics, Speech and Signal Processing.
2787
+ IEEE, 2009, pp. 2349–2352.
2788
+ [40] P. Sarangi and P. Pal, “No relaxation: Guaranteed recovery of finite-valued
2789
+ signals from undersampled measurements,” in ICASSP 2021-2021 IEEE
2790
+ International Conference on Acoustics, Speech and Signal Processing
2791
+ (ICASSP).
2792
+ IEEE, 2021, pp. 5440–5444.
2793
+ [41] ——, “Measurement matrix design for sample-efficient binary com-
2794
+ pressed sensing,” IEEE Signal Processing Letters, 2022.
2795
+ [42] S. Razavikia, A. Amini, and S. Daei, “Reconstruction of binary shapes
2796
+ from blurred images via hankel-structured low-rank matrix recovery,”
2797
+ IEEE Transactions on Image Processing, vol. 29, pp. 2452–2462, 2019.
2798
+ [43] J. Friedrich, P. Zhou, and L. Paninski, “Fast online deconvolution of
2799
+ calcium imaging data,” PLoS computational biology, vol. 13, no. 3, p.
2800
+ e1005423, 2017.
2801
+ [44] S. W. Jewell, T. D. Hocking, P. Fearnhead, and D. M. Witten, “Fast
2802
+ nonconvex deconvolution of calcium imaging data,” Biostatistics, vol. 21,
2803
+ no. 4, pp. 709–726, 2020.
2804
+ [45] P. Sarangi, M. C. Hücümeno˘glu, and P. Pal, “Effect of undersampling on
2805
+ non-negative blind deconvolution with autoregressive filters,” in ICASSP
2806
+ 2020-2020 IEEE International Conference on Acoustics, Speech and
2807
+ Signal Processing (ICASSP).
2808
+ IEEE, 2020, pp. 5725–5729.
2809
+ [46] A. Rupasinghe and B. Babadi, “Robust inference of neuronal correlations
2810
+ from blurred and noisy spiking observations,” in 2020 54th Annual
2811
+ Conference on Information Sciences and Systems (CISS).
2812
+ IEEE, 2020,
2813
+ pp. 1–5.
2814
+ [47] N. Sidorov, “Almost every number has a continuum of β-expansions,”
2815
+ The American Mathematical Monthly, vol. 110, no. 9, pp. 838–842, 2003.
2816
+
2817
+ 17
2818
+ [48] P. Glendinning and N. Sidorov, “Unique representations of real numbers
2819
+ in non-integer bases,” Mathematical Research Letters, vol. 8, no. 4, pp.
2820
+ 535–543, 2001.
2821
+ [49] A. Rényi, “Representations for real numbers and their ergodic properties,”
2822
+ Acta Mathematica Academiae Scientiarum Hungarica, vol. 8, no. 3-4,
2823
+ pp. 477–493, 1957.
2824
+ [50] C. Frougny and B. Solomyak, “Finite beta-expansions,” Ergodic Theory
2825
+ Dynam. Systems, vol. 12, no. 4, pp. 713–723, 1992.
2826
+ [51] V. Komornik and P. Loreti, “Expansions in noninteger bases.” Integers,
2827
+ vol. 11, no. A9, p. 30, 2011.
2828
+ [52] D. H. Hubel and T. N. Wiesel, “Receptive fields of single neurones in
2829
+ the cat’s striate cortex,” The Journal of physiology, vol. 148, no. 3, p.
2830
+ 574, 1959.
2831
+ [53] T.-W. Chen, T. J. Wardill, Y. Sun, S. R. Pulver, S. L. Renninger,
2832
+ A. Baohan, E. R. Schreiter, R. A. Kerr, M. B. Orger, V. Jayaraman
2833
+ et al., “Ultrasensitive fluorescent proteins for imaging neuronal activity,”
2834
+ Nature, vol. 499, no. 7458, pp. 295–300, 2013.
2835
+ [54] H. K. S. c. GENIE Project, Janelia Farm Campus, “Simultaneous imaging
2836
+ and loose-seal cell-attached electrical recordings from neurons expressing
2837
+ a variety of genetically encoded calcium indicators,” CRCNS. org, 2015.
2838
+
DNAzT4oBgHgl3EQfwf6z/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
DdE1T4oBgHgl3EQfEAP6/content/2301.02886v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdc31ec3a0bada040aa1098e317565147bf8b9a8e83938767dbc9708caaf49d6
3
+ size 408975
DdE1T4oBgHgl3EQfEAP6/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b358752aa3314e0496e862d49c925310ec05f0fdf0734c1adbb09bc1ddee3e20
3
+ size 1245229
DdE1T4oBgHgl3EQfEAP6/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2383999abd4787a467c171d66463323162ac53301e34e9ebfa52e9fbe0e1501a
3
+ size 56968
DdE3T4oBgHgl3EQfUwqw/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47a32bae090c3a2c3c6d74bb096cb67fbe01bd7f7ba22faee055e4fcc1c1d6c1
3
+ size 1966125
DdE3T4oBgHgl3EQfUwqw/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94e6a0bae7e287da2603bc75f9f564d826e7f5425dc37d6d83005773596fc57c
3
+ size 76558
DdFJT4oBgHgl3EQfBSxP/content/2301.11424v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20c52a50e8a492d8a8bd35df915ec3435b10a8d1ace85d3703a54deb4fda22fa
3
+ size 514705
DdFJT4oBgHgl3EQfBSxP/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d5013ea6328bbec096d654e957e709e63ef415015ac6fe0a6688e751a808bc6
3
+ size 279975
EtFJT4oBgHgl3EQfCSzh/content/tmp_files/2301.11429v1.pdf.txt ADDED
@@ -0,0 +1,982 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Just Another Day on Twitter: A Complete 24 Hours of Twitter Data
2
+ J¨urgen Pfeffer1, Daniel Matter1, Kokil Jaidka2, Onur Varol3, Afra Mashhadi4, Jana Lasser5, 15,
3
+ Dennis Assenmacher6, Siqi Wu7, Diyi Yang8, Cornelia Brantner9, Daniel M. Romero7, Jahna
4
+ Otterbacher10, Carsten Schwemmer11, Kenneth Joseph12, David Garcia13, Fred Morstatter14
5
+ 1School of Social Sciences and Technology, Technical University of Munich, 2Centre for Trusted Internet and Community,
6
+ National University of Singapore, 3Sabanci University, 4University of Washington (Bothell), 5Graz University of Technology,
7
+ 6GESIS - Leibniz Institute for the Social Sciences, 7University of Michigan, 8Stanford University, 9Karlstad University,
8
+ 10Open University of Cyprus & CYENS CoE, 11Ludwig Maximilian University of Munich, 12University at Buffalo,
9
+ 13University of Konstanz, 14Information Sciences Institute, University of Southern California 15Complexity Science Hub
10
+ Vienna
11
+ Abstract
12
+ At the end of October 2022, Elon Musk concluded his acqui-
13
+ sition of Twitter. In the weeks and months before that, sev-
14
+ eral questions were publicly discussed that were not only of
15
+ interest to the platform’s future buyers, but also of high rele-
16
+ vance to the Computational Social Science research commu-
17
+ nity. For example, how many active users does the platform
18
+ have? What percentage of accounts on the site are bots? And,
19
+ what are the dominating topics and sub-topical spheres on the
20
+ platform? In a globally coordinated effort of 80 scholars to
21
+ shed light on these questions, and to offer a dataset that will
22
+ equip other researchers to do the same, we have collected all
23
+ 375 million tweets published within a 24-hour time period
24
+ starting on September 21, 2022. To the best of our knowl-
25
+ edge, this is the first complete 24-hour Twitter dataset that
26
+ is available for the research community. With it, the present
27
+ work aims to accomplish two goals. First, we seek to an-
28
+ swer the aforementioned questions and provide descriptive
29
+ metrics about Twitter that can serve as references for other
30
+ researchers. Second, we create a baseline dataset for future
31
+ research that can be used to study the potential impact of the
32
+ platform’s ownership change.
33
+ Introduction
34
+ On March 21, 2006, Twitter’s first CEO Jack Dorsey sent
35
+ the first message on the platform. In the subsequent 16 years,
36
+ close to 3 trillion tweets have been sent.1 Roughly two-thirds
37
+ of these have been either removed from the platform be-
38
+ cause the senders deleted them or because the accounts (and
39
+ all their tweets) have been banned from the platform, have
40
+ been made private by the users, or are otherwise inaccessi-
41
+ ble via the historic search with the v2 API endpoints. We
42
+ estimate that about 900 billion public tweets were on the
43
+ platform when Elon Musk acquired Twitter in October 2022
44
+ for $44B., i.e., he paid about 5 cents per tweet.
45
+ Besides its possible economic value, Twitter has been
46
+ instrumental in studying human behavior with social me-
47
+ dia data and the entire field of Computational Social Sci-
48
+ ence (CSS) has heavily relied on data from Twitter. At the
49
+ Copyright © 2022, Association for the Advancement of Artificial
50
+ Intelligence (www.aaai.org). All rights reserved.
51
+ 1While we do not have an official source for this number, it rep-
52
+ resents an educated guess from a collaboration of dozens of schol-
53
+ ars of Twitter.
54
+ AAAI International Conference on Web and Social Media
55
+ (ICWSM), in the past two years alone (2021-2022), over
56
+ 30 scientific papers analyzed a subset of Twitter for a wide
57
+ range of topics ranging from public and mental health anal-
58
+ yses to politics and partisanship. Indeed, since its emer-
59
+ gence, Twitter has been described as a digital socioscope
60
+ (i.e., social telescope) by researchers in fields of social sci-
61
+ ence (Mejova, Weber, and Macy 2015), “a massive antenna
62
+ for social science that makes visible both the very large (e.g.,
63
+ global patterns of communications) and the very small (e.g.,
64
+ hourly changes in emotions)”. Beyond CSS, there is increas-
65
+ ing use of Twitter data for training large pre-trained language
66
+ models in the field of natural language processing and ma-
67
+ chine learning, such as Bernice (DeLucia et al. 2022), where
68
+ 2.5 billion tweets are used to develop representations for
69
+ Twitter-specific languages, and TwHIN-BERT (Zhang et al.
70
+ 2022) that leverages 7 billion tweets covering over 100 dis-
71
+ tinct languages to model short, noisy, and user-generated
72
+ text.
73
+ Although Twitter data has fostered interdisciplinary re-
74
+ search across many fields and has become a “model organ-
75
+ ism” of big data, scholarship using Twitter data has also
76
+ been criticized for various forms of bias that can emerge
77
+ during analyses (Tufekci 2014). One major challenge giv-
78
+ ing rise to these biases is getting access to data and knowing
79
+ about data quality and possible data biases (Ruths and Pfef-
80
+ fer 2014; Gonz´alez-Bail´on et al. 2014; Olteanu et al. 2019).
81
+ While Twitter has long served as one of the most collabora-
82
+ tive big social media platforms in the context of data-sharing
83
+ with academic researchers, there nonetheless exists a lack
84
+ of transparency in sampling procedures and possible biases
85
+ created from technical artifacts (Morstatter et al. 2013; Pf-
86
+ effer, Mayer, and Morstatter 2018). These unknown biases
87
+ may jeopardize research quality. At the same time, access to
88
+ unfiltered/unsampled Twitter data is nearly impossible to ac-
89
+ cess, and thus the above mentioned studies, as well as thou-
90
+ sands of others, still retain unknown and potentially signifi-
91
+ cant biases in their use of sampled data.
92
+ Contributions.
93
+ The data collection efforts presented in
94
+ this paper were driven by a desire to address these concerns
95
+ about sampling bias that exist because of the lack of a com-
96
+ plete sample of Twitter data. Consequently, the main contri-
97
+ arXiv:2301.11429v1 [cs.SI] 26 Jan 2023
98
+
99
+ bution of this article is to create the first complete dataset of
100
+ 24 hours on Twitter and make these Tweets available via fu-
101
+ ture collaborations with the authors and contributors of this
102
+ article. The dataset collected and described here can be used
103
+ by the research community to:
104
+ • Promote a better understanding of the communication
105
+ dynamics on the platform. For example, it can be used
106
+ to answer questions like, how many active (posting) ac-
107
+ counts are on the platform? And, what are the dominating
108
+ languages and topics?
109
+ • Create a set of descriptive metrics that can serve as refer-
110
+ ences for the research community and provide context to
111
+ past and present research papers on Twitter.
112
+ • Provide a baseline for the situation before the recent
113
+ sale of Twitter. With the new ownership of Twitter, plat-
114
+ form policies as well as the company structures are un-
115
+ der significant change, which will create questions about
116
+ whether previous Twitter studies will be still valuable ref-
117
+ erences for future studies.
118
+ In the following sections, we describe the data collection
119
+ process and provide some descriptive analyses of the dataset.
120
+ We also discuss ethical considerations and data availability.
121
+ Data
122
+ Data Collection.
123
+ We have collected 24 hours of Twitter
124
+ data from September 20, 15:00:00 UTC to September 21
125
+ 14:59:59 UTC. The data collection was accomplished by
126
+ utilizing the Academic API (Pfeffer et al. 2022) that is free
127
+ and openly available for researchers. The technical setup of
128
+ the data collection pipeline was dominated by two major
129
+ challenges: First, how can we avoid—at least to a satisfying
130
+ extent—a temporal bias in data collection? Second, how can
131
+ we get a good representation of Twitter? In the following,
132
+ these two aspects are discussed in more detail.
133
+ What is a complete dataset?
134
+ What does complete mean
135
+ when we want to collect a day’s worth of Twitter data? It has
136
+ been shown previously that the availability of tweets fluctu-
137
+ ates, especially in the first couple of minutes (Pfeffer et al.
138
+ 2022)—people might delete their tweets because of typos,
139
+ tweets might be removed because of violations of terms of
140
+ service, etc. To reduce this initial uncertainty, we have de-
141
+ cided to collect the data 10 minutes after the tweets were
142
+ sent. Consequently, this dataset does not include all tweets
143
+ that were sent on the collection day but instead tries to create
144
+ a somewhat stable representation of Twitter.
145
+ Avoiding temporal collection bias.
146
+ We wanted to collect
147
+ a set of tweets close to the time when they were created.
148
+ However, collecting data takes time, which can introduce
149
+ possible temporal bias, e.g., if we want to collect data from
150
+ the previous hour and the data collection job takes three
151
+ hours, then the data that is collected at the end of the col-
152
+ lection job will be much older (with potentially more tweet
153
+ removals) than the data that is collected at the beginning. To
154
+ tackle this challenge, we have split the day into 86,400 col-
155
+ lection tasks, each consisting of 1 second of Twitter activ-
156
+ ity. The collection of every second of data started exactly 10
157
+ Time
158
+ Tweets per minute
159
+ 200,000
160
+ 300,000
161
+ 400,000
162
+ 15
163
+ 18
164
+ 21
165
+ 00
166
+ 03
167
+ 06
168
+ 09
169
+ 12
170
+ Figure 1: Tweets per minute over the 24-hour collection pe-
171
+ riod, time in UTC.
172
+ minutes after the data creation time. Because the data collec-
173
+ tion of a second took more than a minute during peak times,
174
+ we have distributed the workload to 80 collection processes,
175
+ i.e., Academic API tokens, in order to avoid backlogs.
176
+ Number of tweets.
177
+ With the above-described process, we
178
+ have collected 374,937,971 tweets within the 24 hours time
179
+ span. On average, this amounts to 4,340 [2,989 – 8,955]
180
+ tweets per second. Fig. 1 plots the number of tweets per
181
+ Minute (avg=260,374, min=192,322, max=435,721). The
182
+ data collection started at 15:00 UTC, when almost the en-
183
+ tire Twitter world is awake. Then, we can see from Japan to
184
+ Europe time zone after time zone getting off the platform.
185
+ While Europe and the Americas are sleeping, Asia keeps
186
+ the number of tweets at around 200,000. Starting at 7:00
187
+ UTC, Europe is getting active again, followed by the Amer-
188
+ icas from East to West. Another astonishing observation of
189
+ this time series is that the first minute of every hour has on
190
+ average 15.5% more tweets than the minute before—most
191
+ likely due to bot activities and other timed tweet releases,
192
+ e.g., news.
193
+ Descriptive Analyses
194
+ Active Users
195
+ The 375 million tweets in our dataset were sent by
196
+ 40,199,195 accounts. While the publicly communicated
197
+ numbers of users of a platform are often based on the num-
198
+ ber of active and passive visitors, we can state that Twitter
199
+ has (or at least had on our observed day) 40 million active
200
+ contributors who have sent at least one tweet. Less than 100
201
+ accounts have created about 1% (=3.5M) tweets. ∼175, 000
202
+ accounts (0.44%) created 50% of all tweets.
203
+ These numbers are not surprising when we consider that
204
+ > 95% of active accounts have sent one or two tweets. How-
205
+ ever, these numbers lend more nuance to recent reports from
206
+ the Pew Research Center, which reported that while the ma-
207
+ jority of Americans use social media, approximately 97% of
208
+ all tweets were posted by 25% of the users (McClain 2021).
209
+
210
+ hi
211
+ fr
212
+ qme
213
+ in
214
+ zxx
215
+ fa
216
+ ko
217
+ th
218
+ pt
219
+ und
220
+ ar
221
+ tr
222
+ es
223
+ ja
224
+ en
225
+ Proportion of Tweets
226
+ 0.00
227
+ 0.05
228
+ 0.10
229
+ 0.15
230
+ 0.20
231
+ 0.25
232
+ 0.30
233
+ 0.01
234
+ 0.015
235
+ 0.017
236
+ 0.022
237
+ 0.023
238
+ 0.024
239
+ 0.03
240
+ 0.04
241
+ 0.044
242
+ 0.049
243
+ 0.05
244
+ 0.053
245
+ 0.073
246
+ 0.165
247
+ 0.31
248
+ Figure 2: All languages occurring in at least 1% of the
249
+ tweets.
250
+ In fact, our dataset suggests that worldwide, the numbers
251
+ may be more skewed than previously suggested.
252
+ User metrics
253
+ Followers.
254
+ The active accounts on our day of Twitter
255
+ data have a mean of 2,123 followers (median=99). We can
256
+ find six accounts with more than 100 million followers
257
+ (max=133,301,854), and 427/8,635 accounts with more than
258
+ 10/1 million followers. Exactly 50% of accounts that were
259
+ active on our collection day have less than 100 followers.
260
+ Following.
261
+ These accounts follow much fewer other ac-
262
+ counts: mean=547, median=197, range: 0–4,103,801. Inter-
263
+ estingly, there are 2,377 accounts that follow more than
264
+ 100,000 other accounts. One-third of accounts follow less
265
+ than 100 accounts, but only 1.7% of accounts follow zero
266
+ other accounts.
267
+ Listed.
268
+ Lists are a Twitter feature for users to organize
269
+ accounts around topics and filter tweets. While there is lit-
270
+ tle evidence that lists are used widely on the platform, this
271
+ feature might be useful for getting an impression about the
272
+ number of interesting content creators on the platform. The
273
+ 40 million active accounts in our dataset are listed (i.e.,
274
+ number of lists that include a user) in 0 to 3,086,443 lists
275
+ (mean=10.1, median=0). 1,692/46,139 accounts are in lists
276
+ of at least 10,000/1,000 accounts.
277
+ Tweets sent.
278
+ The user information of the tweet metadata
279
+ also includes the number of tweets that a user has sent—or
280
+ at least how many of those tweets are still available on Twit-
281
+ ter. The sum of the sent tweets variable of all 40 million ac-
282
+ Table 1: Distribution of user activity
283
+ % Total Tweets
284
+ % Total Users
285
+ Min. no. of Tweets
286
+ 1%
287
+ 0.00023%
288
+ 2,267
289
+ 10%
290
+ 0.01199%
291
+ 465
292
+ 25%
293
+ 0.07284%
294
+ 152
295
+ 50%
296
+ 0.43526%
297
+ 39
298
+ 75%
299
+ 1.70955%
300
+ 11
301
+ 90%
302
+ 4.18836%
303
+ 3
304
+ counts is ∼404 billion (mean=9,704, median=1,522). If we
305
+ assume that our initial estimate of having 900 billion tweets
306
+ on the platform at the time of data collection is somewhat
307
+ correct, the accounts active in our dataset have contributed
308
+ ∼45% of all of the available tweets over the entire lifetime
309
+ of Twitter.
310
+ Verified accounts.
311
+ At the time of our data collection, we
312
+ can identify 221,246 verified accounts among the 40 million
313
+ active users.
314
+ Tweets and retweets
315
+ 79.2% of all tweets refer to other tweets, i.e. they are
316
+ retweets or quotes of or replies to other tweets. Conse-
317
+ quently, 20.8% of the tweets in our dataset are original
318
+ tweets. The tweets with references are of the following
319
+ types: 50.7% retweets, 4.3% quotes, 24.2% replies, i.e. half
320
+ of all tweets are retweets and a fourth are replies.
321
+ Retweeted and liked.
322
+ Studying the retweet and like num-
323
+ bers from the tweets’ metadata has created little insight since
324
+ the top retweeted tweets are very old tweets that have been
325
+ retweeted by chance on our collection day. Furthermore, we
326
+ can see the number of likes only for tweets that have been
327
+ tweeted and retweeted. In any case, the retweeted number
328
+ is interesting—the 374 million tweets have been retweeted
329
+ 401 billion times. In other words, significant parts of historic
330
+ Twitter get retweeted on a daily basis.
331
+ Languages
332
+ Twitter annotates a language variable for every tweet. Fig. 2
333
+ shows those languages that were annotated on at least 1% of
334
+ our dataset. Together, these 15 languages make up 92.5% of
335
+ all tweets. Besides the most common languages on Twitter,
336
+ we can also find interesting language codes in this list: und
337
+ stands for undefined and represents tweets for which Twitter
338
+ was not able to identify a language; qme and zxx seem to
339
+ be used by Twitter for tweets consisting of only media or a
340
+ Twitter card.
341
+ Media
342
+ There are 112,779,266 media attachments in our data collec-
343
+ tion (76.9% photos, 20.7% videos, 2.4% animated GIFs), of
344
+ which 37,803,473 have unique media keys (83.8% photos,
345
+ 10.0% videos, 6.2% animated GIFs).
346
+ Geo-tags
347
+ We found only 0.5% of tweets to be geo-tagged. This is
348
+ not surprising as previous works have shown that the per-
349
+ centage of geo-tagging in Twitter has been declining (Ajao,
350
+ Hong, and Liu 2015). Fig. 3 shows the distribution of the
351
+ geo-tagged tweets across the world, with USA (20%), Brazil
352
+ (11%), Japan (8%), Saudi Arabia (6%) and India (4%) being
353
+ the top five countries.
354
+ Estimating prevalence of bot accounts
355
+ Twitter has a pivotal role in public discourse and entities
356
+ that are after power and influence often utilize this platform
357
+
358
+ Figure 3: Choropleth map of the geo-tagged tweets across
359
+ the world.
360
+ through social bots and other means of automated activi-
361
+ ties. Since the early days of Twitter, researchers have been
362
+ studying bot behavior, and it has become an active research
363
+ area (Ferrara et al. 2016; Cresci 2020). The first estimation
364
+ of bot prevalence on Twitter indicates that 9-15% of Twit-
365
+ ter accounts exhibit automated behavior (Varol et al. 2017),
366
+ while others have observed significantly higher percentages
367
+ of tweets produced by bot-likely accounts on specific dis-
368
+ courses (Uyheng and Carley 2021; Antenore, Camacho Ro-
369
+ driguez, and Panizzi 2022). One major challenge in estimat-
370
+ ing bot prevalence is the variety of definitions, datasets, and
371
+ models used for detection (Varol 2022).
372
+ In this study, we employed BotometerLite (Yang et al.
373
+ 2020), a scalable and light-weight version of the Botome-
374
+ ter (Sayyadiharikandeh et al. 2020), for computing bot
375
+ scores for unique accounts in our collection. In Fig. 4a, we
376
+ present the distribution of bot scores and nearly 20% of the
377
+ 40 million active accounts have scores above 0.5 suggesting
378
+ bot-likely behavior.
379
+ While identification of bots is a complex and possi-
380
+ bly controversial challenge, plotting the distributions of
381
+ BotometerLite scores grouped by account age in Fig. 4b sug-
382
+ gests the proportions of accounts that show bot-like behavior
383
+ has increased dramatically in recent years. This result may
384
+ also suggest that the longevity of simpler bot accounts is
385
+ limited and they are no longer active on the platform. In Fig.
386
+ 4c, we also present the distribution of bot scores for differ-
387
+ ent rates of activities in our dataset. Accounts that have over
388
+ 1,000 posts exhibit higher rates of bot-like behaviors.
389
+ It is important to mention that accounts studied in this pa-
390
+ per were identified due to their content creation activities.
391
+ Our collection cannot capture passive accounts that are sim-
392
+ ply used to boost follower counts without visible activity on
393
+ tweet streams. Fair assessment of bot prevalence is only pos-
394
+ sible with complete access to Twitter’s internal database;
395
+ since activity streams, network data, and historical tweet
396
+ archives can capture different sets of accounts (Varol 2022).
397
+ Content on Twitter
398
+ The top 500 hashtags occurred 81,468,508 times in the
399
+ tweets. Via manual inspection, we were able to identify the
400
+ meaning of 95% of these top hashtags. They can be aggre-
401
+ gated into ten the categories.
402
+ Table 2 suggests that a large proportion of tweets referred
403
+ to entertainment, which together comprised about 30% of
404
+ tweets. These included mentions of celebrities (25.5%) and
405
+ other entertainment-related tweets (5.4%) such as mentions
406
+ of South Korean boy band members, and other references
407
+ to music, movies, and TV shows. Our data collection time
408
+ window occurred during Fall/Winter 2022, when the world
409
+ was discussing the protests in Iran after the death of Mahsa
410
+ Amini. Therefore, the Iranian protests also comprised a large
411
+ proportion of the hashtag volume at 16.6%.
412
+ Finally, and perhaps surprisingly, the category sex com-
413
+ prised over a quarter of all content covered by the top hash-
414
+ tags, and was almost completely related to escorts. “Other”
415
+ topics reflect that on “regular” Twitter days, sports, tech, and
416
+ art may take up only about 3.3% of Twitter volume.
417
+ Fig. 5 is a hashtag visualization that attempts to provide an
418
+ overview of the entire content on Twitter. We first removed
419
+ all tweets from accounts with more than 240 tweets to re-
420
+ duce the noise from bots using random trending hashtags.
421
+ From the remaining tweets, we extracted the 10,000 most
422
+ often used hashtags in our dataset and created a hashtag sim-
423
+ ilarity matrix with the number of accounts that have used a
424
+ pair of two hashtags on the day of data collection. Every el-
425
+ ement in Fig. 5 represents a hashtag. The position is the re-
426
+ sult of Multidimensional Scaling (MDS) and the color shows
427
+ the dominant language that was used in the tweets with the
428
+ particular hashtag. In this figure, we can see how languages
429
+ separate the Twitter universe but that there are also topical
430
+ sub-communities within languages.
431
+ Discussion and Potential Applications
432
+ Twitter is a social media platform with a worldwide user-
433
+ base. Open access to its data also makes it attractive to a
434
+ large community of researchers, journalists, technologists,
435
+ and policymakers who are interested in examining social
436
+ and civic behavior online. Early studies of Twitter explored
437
+ who says what to whom on Twitter (Wu et al. 2011), char-
438
+ acterizing its primary use as a communication tool. Other
439
+ early work mapped follower communities through ego net-
440
+ works (Gruzd, Wellman, and Takhteyev 2011). However,
441
+ Twitter has since expanded into its own universe, with a
442
+ plethora of users, uses, modalities, communities, and real-
443
+ life implications. Twitter is increasingly the source of break-
444
+ Table 2: The categories of the top 500 hashtags in the dataset
445
+ Category
446
+ Hashtags
447
+ Occurrence
448
+ Celebrities
449
+ 159
450
+ 20,809,742
451
+ 25.5%
452
+ Sex
453
+ 104
454
+ 20,529,196
455
+ 25.2%
456
+ Iranian Protests
457
+ 15
458
+ 13,488,295
459
+ 16.6%
460
+ Entertainment
461
+ 45
462
+ 4,392,227
463
+ 5.4%
464
+ Advertisement
465
+ 32
466
+ 4,644,540
467
+ 5.7%
468
+ Politics
469
+ 38
470
+ 3,858,550
471
+ 4.7%
472
+ Finance
473
+ 30
474
+ 3,549,107
475
+ 4.4%
476
+ Games
477
+ 21
478
+ 3,348,128
479
+ 4.1%
480
+ Other
481
+ 31
482
+ 2,672,291
483
+ 3.3%
484
+ Unknown
485
+ 25
486
+ 4,176,432
487
+ 5.1%
488
+ Sum
489
+ 500
490
+ 81,468,508
491
+ 100.0%
492
+
493
+ GeotaggedTweets
494
+ 500k
495
+ 400k
496
+ 300k
497
+ 200k
498
+ 100k0.0
499
+ 0.2
500
+ 0.4
501
+ 0.6
502
+ 0.8
503
+ 1.0
504
+ BotometerLite score
505
+ 0.0
506
+ 0.2
507
+ 0.4
508
+ 0.6
509
+ 0.8
510
+ 1.0
511
+ Histogram of botscores
512
+ 1e6
513
+ 0.0
514
+ 0.2
515
+ 0.4
516
+ 0.6
517
+ 0.8
518
+ 1.0
519
+ CDF
520
+ (a)
521
+ 0.0
522
+ 0.2
523
+ 0.4
524
+ 0.6
525
+ 0.8
526
+ 1.0
527
+ BotometerLite score
528
+ 0
529
+ 1
530
+ 2
531
+ 3
532
+ 4
533
+ 5
534
+ 6
535
+ Density
536
+ 2007
537
+ 2008
538
+ 2009
539
+ 2010
540
+ 2011
541
+ 2012
542
+ 2013
543
+ 2014
544
+ 2015
545
+ 2016
546
+ 2017
547
+ 2018
548
+ 2019
549
+ 2020
550
+ 2021
551
+ 2022
552
+ 0
553
+ 2
554
+ 4
555
+ 6
556
+ 8
557
+ # of accounts
558
+ 1e6
559
+ (b)
560
+ 0.0
561
+ 0.2
562
+ 0.4
563
+ 0.6
564
+ 0.8
565
+ 1.0
566
+ BotometerLite score
567
+ 0.0
568
+ 0.5
569
+ 1.0
570
+ 1.5
571
+ 2.0
572
+ 2.5
573
+ Density
574
+ Nt = Tweet count
575
+ Nt < 101
576
+ 101
577
+ Nt < 102
578
+ 102
579
+ Nt < 103
580
+ 103
581
+ Nt
582
+ (c)
583
+ Figure 4: BotometerLite scores distribution: (a) histogram and cumulative distribution, (b) by account age, (c) by tweet counts
584
+ in our dataset.
585
+ ing news, and many studies from the U.S. and Europe have
586
+ reported that Twitter is one of the primary sources of news
587
+ for their citizens. Twitter has been used for political engage-
588
+ ment and citizen activism worldwide. During the COVID-
589
+ 19 pandemic, Twitter even assumed the role of the official
590
+ mouthpiece and crisis communication tool for many gov-
591
+ ernments to contact their citizens, and from which citizens
592
+ could seek help and information.
593
+ Fig. 3 confirms prior reports that geotagging practices are
594
+ limited in many low- and middle-income countries (Malik
595
+ et al. 2015); however, this should not deter scholars from ex-
596
+ ploring alternative methods of triangulating the location of
597
+ users (Schwartz et al. 2013), and creating post-stratified es-
598
+ timates of regional language use (Jaidka et al. 2020; Giorgi
599
+ et al. 2022). In prior studies, the difficulties in widespread
600
+ data collection and analyses have so far implied that most
601
+ answers are based on smaller samples (usually constrained
602
+ by geography, for convenience) of a burgeoning Twitter pop-
603
+ ulation. Fig. 5 and Table 2 also impressively illustrate that
604
+ Twitter is about so much more than US politics.
605
+ We hope that our dataset is the first step in creating al-
606
+ ternatives for conducting a representative and truly inclusive
607
+ analysis of the Twitterverse. Temporal snapshots are invalu-
608
+ able to map the national and international migration patterns
609
+ that increasingly blur geopolitical boundaries (Zagheni et al.
610
+ 2014).
611
+ The increasing popularity of Twitter has led it into issues
612
+ of scale, where its moderation can no longer check the large
613
+ proportion of bots on the platform. Our findings in Fig. 4
614
+ indicate that the infestation of bots may be more pernicious
615
+ than previously imagined. We are especially concerned that
616
+ the escalation of the war on Ukraine by Russia may reflect a
617
+ spike (in our dataset) in the online activity of bots from Rus-
618
+ sia operated either by the Russian government or its allied
619
+ intelligence agencies (Badawy, Ferrara, and Lerman 2018).
620
+ These and other bots serve to amplify trending topics and
621
+ facilitate the spread of misinformation (though, perhaps, at
622
+ a rate less than humans do (Vosoughi, Roy, and Aral 2018)).
623
+ They may also misuse hashtags to divert attention away from
624
+ social or political topics (Earl, Maher, and Pan 2022; Broni-
625
+ atowski et al. 2018) or strategically target influential users
626
+ (Shao et al. 2018; Varol and Uluturk 2020). We hope that
627
+ our work will spur more studies on these topics, and we wel-
628
+ come researchers to explore our data.
629
+ By observing bursts of discussions around politically
630
+ charged events and characterizing the temporal spikes in
631
+ Twitter topics, we can better rationalize how our experience
632
+ of Twitter as a political hotbed differs from the simplified
633
+ understanding of the American Twitter landscape reported
634
+ in Mukerjee, Jaidka, and Lelkes (2022), which suggested
635
+ that politics is largely a sideshow on Twitter. It is worth con-
636
+ sidering that these politically active users may not be rep-
637
+ resentative of social media users at large (McClain 2021;
638
+ Wojcieszak et al. 2022).
639
+ Twitter is also under scrutiny for how its platform gover-
640
+ nance may conflict with users’ interests and rights (Van Di-
641
+ jck, Poell, and De Waal 2018). Concerns have been raised
642
+ about alleged biases in the algorithmic amplification (and
643
+ deamplification) of content, with evidence from France,
644
+ Germany, Turkey, and the United States, among other coun-
645
+ tries (Maj´o-V´azquez et al. 2021; Tanash et al. 2015; Jaidka,
646
+ Mukerjee, and Lelkes 2023). Other scholars have also criti-
647
+ cized Twitter’s use as a censorship weapon by governments
648
+ and political propagandists worldwide (Varol 2016; Elmas,
649
+ Overdorf, and Aberer 2021; Jakesch et al. 2021). They, and
650
+ others, may be interested in examining the trends in the en-
651
+ forcement of content moderation policies by Twitter.
652
+ Besides answering questions of data, representativeness,
653
+ access, and censorship, we anticipate that our dataset
654
+ is suited to explore the temporal dynamics of online
655
+ (mis)information in the following directions:
656
+ • Content characteristics: We have provided a high-level
657
+ exploration of the topics on Twitter. However, more can
658
+
659
+ Figure 5: MDS of top 10,000 hashtags based on co-usage by same accounts; colors represent dominant language in tweets using
660
+ a hashtag.
661
+ be done with regard to understanding users’ concerns and
662
+ priorities. While hashtags act as signposts for the broader
663
+ Twitter community to find and engage in topics of mu-
664
+ tual interest (Cunha et al. 2011), tweets without hashtags
665
+ may offer a different understanding of Twitter discourse,
666
+ where users may engage in more interpersonal discus-
667
+ sions of news, politics, and sports than the numbers sug-
668
+ gest (Rajadesingan, Budak, and Resnick 2021).
669
+ • Patterns of information dissemination: Informational
670
+ exchanges occurring on Twitter can overcome spatio-
671
+ temporal limitations as they essentially reconfigure user
672
+ connections to create newly emergent communities.
673
+ However, these communities may vanish as quickly as
674
+ they are created, as the lifecycle of a tweet determines
675
+ how long it continues to circulate on Twitter timelines.
676
+ To the best of our knowledge, no prior research has re-
677
+ ported on the average “age” of a tweet, and we hope that
678
+ a 24-hour snapshot will enable us to answer this question
679
+ empirically.
680
+ • Content moderation and fake news: Prior research
681
+ suggests that 0.1% of Twitter users accounted for 80%
682
+ of all fake news sources shared in the lead-up to a
683
+ US election (Grinberg et al. 2019). However, we ex-
684
+ pect there to be cross-lingual differences in this distri-
685
+ bution, especially for low- or under-resourced languages
686
+ with fewer open tools for fact-checking. Similarly, we
687
+ expect that the quality of moderation and hate speech
688
+ will vary by geography and language, and recommend
689
+ the use of multilingual large language models to explore
690
+ these trends (with attention to persisting representative-
691
+
692
+ fa
693
+ hi
694
+ ko
695
+ th
696
+ tr
697
+ un
698
+ ar
699
+ en
700
+ it
701
+ de
702
+ es
703
+ ja
704
+ pt
705
+ und
706
+ zh
707
+ frness caveats (Wu and Dredze 2020)).
708
+ • Mass mobilization: Twitter is increasingly the hotbed of
709
+ protest, which has led to some activists donning the role
710
+ of “movement spilloverers” (Zhou and Yang 2021) or se-
711
+ rial activists (Bastos and Mercea 2016) who broker infor-
712
+ mation across different online movements, thereby acting
713
+ as key coordinators, itinerants, or gatekeepers in the ex-
714
+ change of information. Such users, as well as the constant
715
+ communities in which they presumably reside (Chowd-
716
+ hury et al. 2022), may be easier to study through tempo-
717
+ ral snapshots, as facilitated by this dataset.
718
+ • Echo chambers and filter bubbles: On Twitter, algo-
719
+ rithms can affect the information diets of users in over
720
+ 200 countries, with an estimated 396.5 million monthly
721
+ users (Kemp 2022). Recent surveys of the literature have
722
+ considered the evidence on how platforms’ designs and
723
+ affordances influence users behaviors, attitudes, and be-
724
+ liefs (Gonz´alez-Bail´on and Lelkes 2022). Studies of the
725
+ structural and informational networks based on snapshots
726
+ of Twitter can offer clues to solving these puzzles with-
727
+ out the constraints of data selection.
728
+ Ethics Statement and Data Availability
729
+ Ethics statement.
730
+ We acknowledge that privacy and ethi-
731
+ cal concerns are associated with collecting and using social
732
+ media data for research. However, we took several steps to
733
+ avoid risks to human subjects since participants no longer
734
+ opt into being part of our study, in a traditional sense (Zim-
735
+ mer 2020). In our analysis, we only studied and reported
736
+ population level, and aggregated observations of our dataset.
737
+ We share publicly only the tweet IDs with the research com-
738
+ munity to account for privacy issues and Twitter’s TOS. For
739
+ this purpose, we use a data sharing and long-term archiving
740
+ service provided by GESIS - Leibniz Institute for the Social
741
+ Sciences, a German infrastructure institute for the social sci-
742
+ ences 2.
743
+ With regards to data availability, this repository adheres
744
+ to the FAIR principles (Wilkinson et al. 2016) as follows:
745
+ • Findability: In compliance with Twitter’s terms of ser-
746
+ vice, only tweet IDs are made publicly available at DOI:
747
+ https://doi.org/10.7802/2516. A unique Document Ob-
748
+ ject Identifier (DOI) is associated with the dataset. Its
749
+ metadata and licenses are also readily available.
750
+ • Accessibility: The dataset can be downloaded using stan-
751
+ dard APIs and communications protocol (the REST API
752
+ and OAI-PMH).
753
+ • Interoperability: The data is provided in raw text for-
754
+ mat.
755
+ • Reusability: The CC BY 4.0 license implies that re-
756
+ searchers are free to use the data with proper attribution.
757
+ Furthermore, we want to invite the broader research com-
758
+ munity to approach one or more of the authors and collab-
759
+ orators (see Acknowledgments) of this paper with research
760
+ ideas about what can be done with this dataset. We will be
761
+ very happy to collaborate with you on your ideas!
762
+ 2https://www.gesis.org/en/data-services/share-data
763
+ Acknowledgments
764
+ The data collection effort described in this paper could
765
+ not have been possible without the great collaboration of
766
+ a large number of scholars, here are some of them (in
767
+ random order): Chris Schoenherr, Leonard Husmann, Diyi
768
+ Liu, Benedict Witzenberger, Joan Rodriguez-Amat, Flo-
769
+ rian Angermeir, Stefanie Walter, Laura Mahrenbach, Isaac
770
+ Bravo, Anahit Sargsyan, Luca Maria Aiello, Sophie Brandt,
771
+ Wienke Strathern, Bilal C¸ akir, David Schoch, Yuliia Holu-
772
+ bosh, Savvas Zannettou, Kyriaki Kalimeri.
773
+ References
774
+ Ajao, O.; Hong, J.; and Liu, W. 2015. A survey of loca-
775
+ tion inference techniques on Twitter. Journal of Information
776
+ Science, 41(6): 855–864.
777
+ Antenore, M.; Camacho Rodriguez, J. M.; and Panizzi, E.
778
+ 2022. A Comparative Study of Bot Detection Techniques
779
+ With an Application in Twitter Covid-19 Discourse. Social
780
+ Science Computer Review, 08944393211073733.
781
+ Badawy, A.; Ferrara, E.; and Lerman, K. 2018. Analyzing
782
+ the digital traces of political manipulation: The 2016 Rus-
783
+ sian interference Twitter campaign. In 2018 IEEE/ACM in-
784
+ ternational conference on advances in social networks anal-
785
+ ysis and mining (ASONAM), 258–265. IEEE.
786
+ Bastos, M. T.; and Mercea, D. 2016. Serial activists: Polit-
787
+ ical Twitter beyond influentials and the twittertariat. New
788
+ Media & Society, 18(10): 2359–2378.
789
+ Broniatowski, D. A.; Jamison, A. M.; Qi, S.; AlKulaib, L.;
790
+ Chen, T.; Benton, A.; Quinn, S. C.; and Dredze, M. 2018.
791
+ Weaponized health communication: Twitter bots and Rus-
792
+ sian trolls amplify the vaccine debate. American journal of
793
+ public health, 108(10): 1378–1384.
794
+ Chowdhury, A.; Srinivasan, S.; Bhowmick, S.; Mukherjee,
795
+ A.; and Ghosh, K. 2022. Constant community identifica-
796
+ tion in million-scale networks. Social Network Analysis and
797
+ Mining, 12(1): 1–17.
798
+ Cresci, S. 2020. A decade of social bot detection. Commu-
799
+ nications of the ACM, 63(10): 72–83.
800
+ Cunha, E.; Magno, G.; Comarela, G.; Almeida, V.;
801
+ Gonc¸alves, M. A.; and Benevenuto, F. 2011. Analyzing the
802
+ dynamic evolution of hashtags on twitter: a language-based
803
+ approach. In Proceedings of the workshop on language in
804
+ social media (LSM 2011), 58–65.
805
+ DeLucia, A.; Wu, S.; Mueller, A.; Aguirre, C.; Dredze, M.;
806
+ and Resnik, P. 2022. Bernice: A Multilingual Pre-trained
807
+ Encoder for Twitter.
808
+ Earl, J.; Maher, T. V.; and Pan, J. 2022. The digital repres-
809
+ sion of social movements, protest, and activism: A synthetic
810
+ review. Science Advances, 8(10): eabl8198.
811
+ Elmas, T.; Overdorf, R.; and Aberer, K. 2021. A Dataset of
812
+ State-Censored Tweets. In ICWSM, 1009–1015.
813
+ Ferrara, E.; Varol, O.; Davis, C.; Menczer, F.; and Flammini,
814
+ A. 2016. The rise of social bots. Communications of the
815
+ ACM, 59(7): 96–104.
816
+
817
+ Giorgi, S.; Lynn, V. E.; Gupta, K.; Ahmed, F.; Matz, S.; Un-
818
+ gar, L. H.; and Schwartz, H. A. 2022. Correcting Sociode-
819
+ mographic Selection Biases for Population Prediction from
820
+ Social Media.
821
+ In Proceedings of the International AAAI
822
+ Conference on Web and Social Media, volume 16, 228–240.
823
+ Gonz´alez-Bail´on, S.; and Lelkes, Y. 2022. Do social media
824
+ undermine social cohesion? A critical review. Social Issues
825
+ and Policy Review.
826
+ Gonz´alez-Bail´on,
827
+ S.;
828
+ Wang,
829
+ N.;
830
+ Rivero,
831
+ A.;
832
+ Borge-
833
+ Holthoefer, J.; and Moreno, Y. 2014. Assessing the bias in
834
+ samples of large online networks. Social Networks, 38: 16 –
835
+ 27.
836
+ Grinberg, N.; Joseph, K.; Friedland, L.; Swire-Thompson,
837
+ B.; and Lazer, D. 2019. Fake news on Twitter during the
838
+ 2016 US presidential election.
839
+ Science, 363(6425): 374–
840
+ 378.
841
+ Gruzd, A.; Wellman, B.; and Takhteyev, Y. 2011. Imagining
842
+ Twitter as an imagined community. American Behavioral
843
+ Scientist, 55(10): 1294–1318.
844
+ Jaidka, K.; Giorgi, S.; Schwartz, H. A.; Kern, M. L.; Ungar,
845
+ L. H.; and Eichstaedt, J. C. 2020. Estimating geographic
846
+ subjective well-being from Twitter: A comparison of dictio-
847
+ nary and data-driven language methods. Proceedings of the
848
+ National Academy of Sciences, 117(19): 10165–10171.
849
+ Jaidka, K.; Mukerjee, S.; and Lelkes, Y. 2023. Silenced on
850
+ social media: the gatekeeping functions of shadowbans in
851
+ the American Twitterverse. Journal of Communication.
852
+ Jakesch, M.; Garimella, K.; Eckles, D.; and Naaman, M.
853
+ 2021.
854
+ Trend alert: A cross-platform organization manip-
855
+ ulated Twitter trends in the Indian general election. Pro-
856
+ ceedings of the ACM on Human-Computer Interaction,
857
+ 5(CSCW2): 1–19.
858
+ Kemp, S. 2022. Digital 2022: Global overview report. Tech-
859
+ nical report, DataReportal.
860
+ Maj´o-V´azquez, S.; Congosto, M.; Nicholls, T.; and Nielsen,
861
+ R. K. 2021. The Role of Suspended Accounts in Political
862
+ Discussion on Social Media: Analysis of the 2017 French,
863
+ UK and German Elections. Social Media+ Society, 7(3):
864
+ 20563051211027202.
865
+ Malik, M.; Lamba, H.; Nakos, C.; and Pfeffer, J. 2015. Pop-
866
+ ulation bias in geotagged tweets. In proceedings of the in-
867
+ ternational AAAI conference on web and social media, vol-
868
+ ume 9, 18–27.
869
+ McClain, C. 2021. 70% of U.S. social media users never or
870
+ rarely post or share about political, social issues. Technical
871
+ report, Pew Research Center.
872
+ Mejova, Y.; Weber, I.; and Macy, M. W. 2015. Twitter: a
873
+ digital socioscope. Cambridge University Press.
874
+ Morstatter, F.; Pfeffer, J.; Liu, H.; and Carley, K. M. 2013. Is
875
+ the Sample Good Enough? Comparing Data from Twitter’s
876
+ Streaming API with Twitter’s Firehose. In Seventh Inter-
877
+ national AAAI Conference on Weblogs and Social Media,
878
+ 400–408.
879
+ Mukerjee, S.; Jaidka, K.; and Lelkes, Y. 2022. The Political
880
+ Landscape of the US Twitterverse. Political Communica-
881
+ tion, 1–31.
882
+ Olteanu, A.; Castillo, C.; Diaz, F.; and Kıcıman, E. 2019.
883
+ Social data: Biases, methodological pitfalls, and ethical
884
+ boundaries. Frontiers in Big Data, 2: 13.
885
+ Pfeffer, J.; Mayer, K.; and Morstatter, F. 2018. Tampering
886
+ with Twitter’s Sample API. EPJ Data Science, 7(50).
887
+ Pfeffer, J.; Mooseder, A.; Lasser, J.; Hammer, L.; Stritzel,
888
+ O.; and Garcia, D. 2022. This Sample seems to be good
889
+ enough! Assessing Coverage and Temporal Reliability of
890
+ Twitter’s Academic API.
891
+ Rajadesingan, A.; Budak, C.; and Resnick, P. 2021. Political
892
+ discussion is abundant in non-political subreddits (and less
893
+ toxic). In Proceedings of the Fifteenth International AAAI
894
+ Conference on Web and Social Media, volume 15.
895
+ Ruths, D.; and Pfeffer, J. 2014. Social Media for Large Stud-
896
+ ies of Behavior. Science, 346(6213): 1063–1064.
897
+ Sayyadiharikandeh, M.; Varol, O.; Yang, K.-C.; Flammini,
898
+ A.; and Menczer, F. 2020. Detection of novel social bots
899
+ by ensembles of specialized classifiers. In Proceedings of
900
+ the 29th ACM international conference on information &
901
+ knowledge management, 2725–2732.
902
+ Schwartz, H.; Eichstaedt, J.; Kern, M.; Dziurzynski, L.; Lu-
903
+ cas, R.; Agrawal, M.; Park, G.; Lakshmikanth, S.; Jha, S.;
904
+ Seligman, M.; et al. 2013. Characterizing geographic varia-
905
+ tion in well-being using tweets. In Proceedings of the Inter-
906
+ national AAAI Conference on Web and Social Media, vol-
907
+ ume 7, 583–591.
908
+ Shao, C.; Ciampaglia, G. L.; Varol, O.; Yang, K.-C.; Flam-
909
+ mini, A.; and Menczer, F. 2018.
910
+ The spread of low-
911
+ credibility content by social bots. Nature communications,
912
+ 9(1): 1–9.
913
+ Tanash, R. S.; Chen, Z.; Thakur, T.; Wallach, D. S.; and Sub-
914
+ ramanian, D. 2015. Known unknowns: An analysis of Twit-
915
+ ter censorship in Turkey. In Proceedings of the 14th ACM
916
+ Workshop on Privacy in the Electronic Society, 11–20.
917
+ Tufekci, Z. 2014. Big questions for social media big data:
918
+ Representativeness, validity and other methodological pit-
919
+ falls. In Eighth international AAAI conference on weblogs
920
+ and social media.
921
+ Uyheng, J.; and Carley, K. M. 2021. Computational Analy-
922
+ sis of Bot Activity in the Asia-Pacific: A Comparative Study
923
+ of Four National Elections.
924
+ In Proceedings of the Inter-
925
+ national AAAI Conference on Web and Social Media, vol-
926
+ ume 15, 727–738.
927
+ Van Dijck, J.; Poell, T.; and De Waal, M. 2018. The plat-
928
+ form society: Public values in a connective world. Oxford
929
+ University Press.
930
+ Varol, O. 2016. Spatiotemporal analysis of censored content
931
+ on twitter. In Proceedings of the 8th ACM Conference on
932
+ Web Science, 372–373.
933
+ Varol, O. 2022. Should we agree to disagree about Twitter’s
934
+ bot problem? arXiv preprint arXiv:2209.10006.
935
+ Varol, O.; Ferrara, E.; Davis, C.; Menczer, F.; and Flam-
936
+ mini, A. 2017. Online human-bot interactions: Detection,
937
+ estimation, and characterization. In Proceedings of the in-
938
+ ternational AAAI conference on web and social media, vol-
939
+ ume 11, 280–289.
940
+
941
+ Varol, O.; and Uluturk, I. 2020. Journalists on Twitter: self-
942
+ branding, audiences, and involvement of bots. Journal of
943
+ Computational Social Science, 3(1): 83–101.
944
+ Vosoughi, S.; Roy, D.; and Aral, S. 2018. The spread of true
945
+ and false news online. science, 359(6380): 1146–1151.
946
+ Wilkinson, M. D.; Dumontier, M.; Aalbersberg, I. J.; Apple-
947
+ ton, G.; Axton, M.; Baak, A.; Blomberg, N.; Boiten, J.-W.;
948
+ da Silva Santos, L. B.; Bourne, P. E.; et al. 2016. The FAIR
949
+ Guiding Principles for scientific data management and stew-
950
+ ardship. Scientific data, 3(1): 1–9.
951
+ Wojcieszak, M.; Casas, A.; Yu, X.; Nagler, J.; and Tucker,
952
+ J. A. 2022. Most users do not follow political elites on Twit-
953
+ ter; those who do show overwhelming preferences for ideo-
954
+ logical congruity. Science advances, 8(39): eabn9418.
955
+ Wu, S.; and Dredze, M. 2020. Are All Languages Created
956
+ Equal in Multilingual BERT?
957
+ In Proceedings of the 5th
958
+ Workshop on Representation Learning for NLP, 120–130.
959
+ Wu, S.; Hofman, J. M.; Mason, W. A.; and Watts, D. J. 2011.
960
+ Who says what to whom on twitter. In Proceedings of the
961
+ 20th international conference on World wide web, 705–714.
962
+ Yang, K.-C.; Varol, O.; Hui, P.-M.; and Menczer, F. 2020.
963
+ Scalable and generalizable social bot detection through data
964
+ selection. In Proceedings of the AAAI conference on artifi-
965
+ cial intelligence, volume 34, 1096–1103.
966
+ Zagheni, E.; Garimella, V. R. K.; Weber, I.; and State, B.
967
+ 2014. Inferring international and internal migration patterns
968
+ from twitter data. In Proceedings of the 23rd international
969
+ conference on world wide web, 439–444.
970
+ Zhang, X.; Malkov, Y.; Florez, O.; Park, S.; McWilliams, B.;
971
+ Han, J.; and El-Kishky, A. 2022. TwHIN-BERT: A Socially-
972
+ Enriched Pre-trained Language Model for Multilingual
973
+ Tweet Representations. arXiv preprint arXiv:2209.07562.
974
+ Zhou, A.; and Yang, A. 2021. The Longitudinal Dimension
975
+ of Social-Mediated Movements: Hidden Brokerage and the
976
+ Unsung Tales of Movement Spilloverers.
977
+ Social Media+
978
+ Society, 7(3): 20563051211047545.
979
+ Zimmer, M. 2020. “But the data is already public”: on the
980
+ ethics of research in Facebook. In The Ethics of Information
981
+ Technologies, 229–241. Routledge.
982
+
EtFJT4oBgHgl3EQfCSzh/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
FdE1T4oBgHgl3EQfqwVD/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d604dc22a48a403a0d4f47e9a3f58cf6ac38c97a628338f8c07334aa470c1dc
3
+ size 6553645
INFLT4oBgHgl3EQfIy9R/content/2301.12001v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:304f5c1c69a663779b55c81bafa9f84ec2033c041339f998169d050035f126b7
3
+ size 1378689
INFLT4oBgHgl3EQfIy9R/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3aacccb9a57f502227a34dc65859db9c43457853e6ece7493aa62e393b419a28
3
+ size 177226
IdAyT4oBgHgl3EQfr_me/content/tmp_files/2301.00569v1.pdf.txt ADDED
@@ -0,0 +1,420 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.00569v1 [math.AC] 2 Jan 2023
2
+ ELIAS IDEALS
3
+ HAILONG DAO
4
+ Abstract. Let (R, m) be a one dimensional local Cohen-Macaulay ring. An m-primary
5
+ ideal I of R is Elias if the types of I and of R/I are equal. Canonical and principal ideals
6
+ are Elias, and Elias ideals are closed under inclusion. We give multiple characterizations
7
+ of Elias ideals and concrete criteria to identify them. We connect Elias ideals to other
8
+ well-studied definitions: Ulrich, m-full, integrally closed, trace ideals, etc. Applications are
9
+ given regarding canonical ideals, conductors and the Auslander index.
10
+ Introduction
11
+ Let (R, m) be a local Cohen-Macaulay ring of dimension one and I be an m-primary ideal
12
+ of R. We say that I is Elias if the Cohen-Macaulay types of I and R/I coincide. From
13
+ standard facts, principal ideals or canonical ideals are Elias, and we will soon see that this
14
+ property begets a rather rich and interesting theory.
15
+ Our work is heavily influenced by a nice result in [7], where Elias proves that any ideal
16
+ ω that lies inside a high enough power of m and such that R/ω is Gorenstein must be a
17
+ canonical ideal. Although not stated explicitly there, the proof showed that any ideal that
18
+ lies in a high enough power of m is Elias, in our sense. Another inspiration for the present
19
+ work is [2], where De Stefani studies, in our language, powers of m that are Elias in a
20
+ Gorenstein local ring, and gives a counter-example to a conjecture by Ding (see Section 4
21
+ for the precise connection).
22
+ In this note, we study Elias ideals in depth. They admit many different characterizations,
23
+ and enjoy rather useful properties. For instance, they are closed under inclusion, and
24
+ principal or canonical ideals are Elias. On the other hand, conductor ideals or regular trace
25
+ ideals are not Elias. When R is Gorenstein, they are precisely ideals such that the Auslander
26
+ index δ(R/I) is 1.
27
+ We are able to obtain many criteria to check whether an ideal is Elias, using very accessible
28
+ information such as the minimal number or valuations of generators.
29
+ Combining them
30
+ immediately gives sharp bounds and information on conductor or canonical ideals, which
31
+ can be tricky to obtain otherwise.
32
+ There are several obvious ways to extend the present definitions and results to higher
33
+ dimension rings or to modules. However, we choose to focus on the ideals in dimension one
34
+ case here as they are already interesting enough, and also to keep the paper short. We hope
35
+ to address the more general theory in future works.
36
+ We now describe briefly the structure and key results of the paper.
37
+ • In section 1 we give the formal definition of Elias ideals and prove several key results.
38
+ Theorem 1.2 contains several equivalent characterizations of Elias ideals. Corollary
39
+ 1.3 collects important consequences, for instance that Elias ideals are closed under
40
+ ideal containment. Also, criteria for Elias ideals using colon ideals are given. Next,
41
+ 2020 Mathematics Subject Classification. Primary: 13D02, 13H10. Secondary: 14B99.
42
+ 1
43
+
44
+ Proposition 1.4 establishes the fundamental change of rings result that are used
45
+ frequently in the sequence.
46
+ • Section 2 connects Elias ideals to several well-studied class of ideals: Ulrich ideals,
47
+ m-full ideals, full ideals, integrally closed ideals, etc. After some basic observations,
48
+ (2.3, 2.4, 2.5), we give Theorems 2.7 and Proposition 2.14, which contain concrete
49
+ ways to recognize Elias ideals using basic information such as number of generators
50
+ or valuations. We also derive that conductor ideals or regular trace ideals are not
51
+ Elias (Corollary 2.13).
52
+ This indicates one of the useful application: if we know,
53
+ for instance, that m2 is Elias, then the conductor or any regular trace ideal must
54
+ contains an element of m-adic order 1.
55
+ • Given the previous section, it is natural to study the Elias index eli(R), namely
56
+ the first power of m that is Elias, and we do so in Section 3. The first main result
57
+ here is Theorem 3.2, connecting this index to the generalized L¨oewy length and the
58
+ regularity of the associated graded ring. Next, in Theorem 3.3, we characterize rings
59
+ with small indexes: eli(R) = 1 if and only if R is regular, and eli(R) = 2 plus R is
60
+ Gorenstein is equivalent to e(R) = 2. We give a large class of non-Gorenstein rings
61
+ with Elias index 2 (3.4).
62
+ • Lastly, in Section 4 we focus on the special case of Gorenstein rings. In such situation,
63
+ we observe that Elias ideals are precisely ones whose quotient has Auslander δ-
64
+ invariant one. This immediately allows us to apply what we have to recover old
65
+ results about the Auslander invariant and Auslander index in 4.1 and 4.3. We give
66
+ a counter-example to a Theorem by Ding and also revisit a counter-example to a
67
+ conjecture by Ding given in [2] (Examples 4.4 and 4.5).
68
+ Acknowledgements: It is a pleasure to thank Juan Elias and Alessandro Di Stefani for
69
+ helpful comments and encouragements. The author is partially supported by the Simons
70
+ Collaboration Grant FND0077558.
71
+ 1. Elias ideals: definitions and basic results
72
+ Throughout the paper, let (R, m, k) be Cohen-Macaulay local ring of dimension one. For
73
+ a module M, set typeR(M) = dimk Extdim M
74
+ R
75
+ (k, M). Set Q = Q(R) to be the total ring of
76
+ fractions of R. Set e = e(R), the Hilbert-Samuel multiplicity of R. For an element x ∈ R,
77
+ the m-adic order of x, denoted ord(x) is the smallest a such that x ∈ ma. The order of an
78
+ ideal I, denoted ord(I), is the minimum order of its elements.
79
+ Definition 1.1. We say that a m-primary ideal I is an Elias ideal if it satisfies type(I) =
80
+ type(R/I).
81
+ Theorem 1.2. We always have type(I) ≥ type(R/I). The following are equivalent.
82
+ (1) type(I) = type(R/I).
83
+ (2) For any NZD x ∈ m, xI : m ⊆ (x).
84
+ (3) For any NZD x ∈ m, xI : m = x(I : m).
85
+ (4) For some NZD x ∈ m, xI : m ⊆ (x).
86
+ (5) For some NZD x ∈ m, xI : m = x(I : m).
87
+ (6) I :Q m ⊆ R.
88
+ (7) K ⊆ m(K :Q I) (assuming R admits a canonical ideal K).
89
+ Proof. Let x be a NZD. Then
90
+ type(I) = type(I/xI) = dimk
91
+ xI : m
92
+ xI
93
+ ≥ dimk
94
+ x(I : m)
95
+ xI
96
+ = dimk
97
+ I : m
98
+ I
99
+ = type(R/I)
100
+ 2
101
+
102
+ Thus, type(I) = type(R/I) if and only if xI : m = x(I : m). Now, xI : m ⊆ (x) is equivalent
103
+ to xI : m = xJ for some ideal J, as x is a NZD. Rewriting it as xJm ⊆ xI, which is
104
+ equivalent to Jm ⊆ I, we get J ⊆ I : m. On the other hand x(I : m) ⊆ xI : m, thus
105
+ J = I : m. That establishes the equivalence of first five items.
106
+ Note that for any NZD x ∈ m, xI : m = x(I :Q m). Thus, (6) is equivalent to (3).
107
+ Let K be a canonical ideal.
108
+ Apply HomR(−, K) to the sequence 0 → I → R →
109
+ R/I → 0, and indentifying HomR(I, K) with K :Q I, we get 0 → K → K :Q I →
110
+ Ext1
111
+ R(R/I, K) = ωR/I → 0.
112
+ Since type(I) = µ(K :Q I) and type(R/I) = µ(ωR/I), the
113
+ equivalence of (7) and (1) follows.
114
+
115
+ Corollary 1.3. We have:
116
+ (1) If I is isomorphic to R or the canonical module of R (assuming its existence), then
117
+ I is Elias.
118
+ (2) If I is Elias, then so is J for any ideal J ⊆ I. (being Elias is closed under inclusion)
119
+ (3) Let K be a canonical ideal of R and I be an ideal containing K. Then I is Elias if
120
+ and only if K ⊆ m(K :R I).
121
+ (4) Let K be a canonical ideal of R and I be an ideal such that K ⊆ I. Then K : I is
122
+ Elias if and only if K ⊆ mI.
123
+ (5) Suppose that I contains a canonical ideal K such that ord(K) = 1. Then I is Elias
124
+ if and only if I = K.
125
+ Proof. For the first claim, I :Q m ⊂ I :Q I = R. For the second claim, we have J :Q m ⊂
126
+ I :Q m. For (3), first note that K :Q I ⊂ K :Q K = R, so K :Q I = K :R I, and we can use
127
+ part (7) of Theorem 1.2.
128
+ For part (4), note that K : (K : I) = I hence we can apply part (3).
129
+ For part (5), we again apply part (3): if K ⊊ I, then m(K :R I) ⊆ m2, contradicting
130
+ ord(K) = 1.
131
+
132
+ The following change of rings result would be used frequently in what follows.
133
+ Proposition 1.4. Let (R, m) → (S, n) be a local, flat rings extension such that dim S = 1
134
+ and S is Noetherian. Then I is an Elias ideal of R if and only if IS is an Elias ideal of S.
135
+ Proof. Under the assumption we have typeR(M) typeS/mS(S/mS) = typeS(M ⊗R S) for any
136
+ finitely generated R-module M (see for instance [11]), thus the result follows.
137
+
138
+ 2. Elias ideals and other special ideals
139
+ Definition 2.1. Let I be an m-primary ideal.
140
+ • I is called Ulrich (as an R-module) if µ(I) = e(R). Assuming k is infinite, then I is
141
+ Ulrich if and only if xI = mI for some x ∈ m (equivalently, for any x ∈ m such that
142
+ ℓ(R/xR) = e(R)).
143
+ • I is called m-full if Im : x = I for some x ∈ m.
144
+ • I is called full (or basically full) if Im : m = I.
145
+ Remark 2.2. When the definition of special ideals such as Ulrich or m-full ones involves
146
+ an element x, we say that the property is witnessed by x. Note that being such x is a
147
+ Zariski-open condition (for the image of x in the vector space m/m2). For more on these
148
+ ideals, see [3, 10, 9, 12].
149
+ 3
150
+
151
+ Proposition 2.3. Let I be an m-primary ideal. Let e be the Hilbert-Samuel multiplicity of
152
+ R. The following are equivalent.
153
+ (1) I is Ulrich.
154
+ (2) type(I) = e.
155
+ Proof. We can assume k is infinite by making the flat extension R → R[t](m,t). Let x ∈ m be
156
+ such that ℓ(R/xR) = e. Then ℓ(I/xI) = e. Note that type(I) = ℓ(soc(I/xI)) ≤ ℓ(I/xI) =
157
+ e, and equality happens precisely when m(I/xI) = 0, in other words, I is Ulrich.
158
+
159
+ Proposition 2.4. Let I be an m-primary ideal.
160
+ (1) Suppose k is infinite. If I is Ulrich, then it is m-full.
161
+ (2) Suppose k is infinite. If I is integrally closed, then it is m-full.
162
+ (3) If I is m-full, then it is full.
163
+ Proof. (1): We can find a NZD x such that Ix = Im, so Im : x = Ix : x = I.
164
+ (2): see [8, Theorem 2.4].
165
+ (3): We have I ⊆ Im : m ⊆ Im : x, from which the assertion is clear.
166
+
167
+ Proposition 2.5. If I is m-full, witnessed by a NZD x ∈ m. The following are equivalent:
168
+ (1) I is Elias.
169
+ (2) I = xJ for some Ulrich ideal J.
170
+ Proof. Assume I is Elias, witnessed by a NZD x, so Im : x = I.
171
+ We will show that
172
+ I ⊆ (x). If not, then I contains an element s whose image in R/(x) is in the socle. Thus
173
+ sm ⊂ Im ∩ (x) = x(Im : x) = xI, so s ∈ xI : m ⊂ (x), a contradiction.
174
+ Since I ⊆ (x) we must have I = xJ for some J. We have Jx = I = Im : x = Jxm : x =
175
+ Jm, so J is Ulrich.
176
+ Assume (2). Then I is Ulrich and also full by 2.4, so xI : m = mI : m = I = xJ ⊂ (x),
177
+ thus I is Elias.
178
+
179
+ Corollary 2.6. If e = 2 and k is infinite, then I is Elias if and only if I ⊆ (x) for some
180
+ NZD x ∈ m.
181
+ Proof. Since e = 2, any ideal is either principal or Ulrich, and 2.4 together with 2.5 give
182
+ what we want.
183
+
184
+ Theorem 2.7. The following hold for an m primary ideal I.
185
+ (1) If µ(I) < e and type(R/I) ≥ e − 1, then I is Elias.
186
+ (2) Assume µ(mI) ≤ µ(I) = e − 1. Then Im is Elias and Im : m = I.
187
+ (3) Furthermore, assume R = S/(f) is a hypersurface, here S is a regular local ring of
188
+ dimension 2. Let J be an S ideal minimally generated by e elements, one of them is
189
+ f. Then JR is Elias.
190
+ Proof. By the inequality type(I) ≥ type(R/I), we must have type(I) is e or e − 1. But if
191
+ type(I) = e, then µ(I) = e by 2.3, contradiction.
192
+ Next, we have:
193
+ type(R/Im) = dimk
194
+ Im : m
195
+ Im
196
+ ≥ dimk
197
+ I
198
+ Im = µ(I) ≥ e − 1
199
+ and Im is not Ulrich by assumption. So Im is Elias and type(Im) = e − 1, which by the
200
+ chain above implies that Im : m = I.
201
+ 4
202
+
203
+ For the last part, let I = JR. Then µR(I) = e − 1 and type(R/I) = type(S/J) = e − 1,
204
+ and we can apply the first part.
205
+
206
+ Example 2.8. Let R = k[[t4, t5, t11]] ∼= k[[a, b, c]]/(a4 − bc, b3 − ac, c2 − a3b2). Then m2 is
207
+ Elias: one can check directly or note that µ(m) = µ(m2) = 3 = e(R) − 1 and use 2.7. But
208
+ m2 is not contained in (x) for any (x).
209
+ Example 2.9. Let R = k[[t6, t7, t15]] ∼= k[[a, b, c]]/(a5 − c2, b3 − ac).
210
+ Then the Hilbert
211
+ function is {1, 3, 4, 5, 5, 6, . . .}, thus m4 is Elias. In this case, m4 ⊆ (a), so m4 is trivially
212
+ Elias.
213
+ Let R ⊂ S be a finite birational extension. We recall that the conductor of S in R,
214
+ denoted cR(S), is R :Q(R) S.
215
+ Proposition 2.10. Let R ⊂ S be a finite birational extension. If IS = I (i.e, I is an
216
+ S-module) and I is Elias, then I : m ⊆ cR(S).
217
+ Proof. Let Q = Q(R). We have R ⊃ I :Q m = IS :Q mS ⊃ (I : m)S, so I : m ⊆ R :Q S =
218
+ cR(S) as desired.
219
+
220
+ Note that if IS = I, then trace(I) ⊆ cR(S). So naturally, one can ask to extend 2.10 as
221
+ follows:
222
+ Question 2.11. If I is Elias, do we have I : m ⊆ trace(I)?
223
+ The answer is no. In Example 2.8 above, Let R = k[[t4, t5, t11]] ∼= k[[a, b, c]]/(a4 − bc, b3 −
224
+ ac, c2 − a3b2). One can check that trace(m2) = (a2, ab, b2, c) while m2 : m = m.
225
+ Corollary 2.12. Suppose m2 is Elias (e.g., if R has minimal multiplicity) and is integrally
226
+ closed. If m2 ⊆ cR(R) then m ⊆ cR(R).
227
+ Proof. Apply 2.10 to I = m2.
228
+
229
+ Corollary 2.13. Assume that the integral closure R is finite. Then the conductor of R in
230
+ R is not Elias. A regular trace ideal is not Elias.
231
+ Proof. Let c = cR(R). Then c is a R-module, so if it is Elias we would have c : m ⊆ c,
232
+ absurd! Any regular trace ideal must contain c, see for instance [3], so it can not be Elias
233
+ either by 1.3.
234
+
235
+ The following is simple but quite useful for constructing Elias ideals from minimal gener-
236
+ ators of Ulrich ideals. See the examples that follow.
237
+ Proposition 2.14. Let I ⊂ J be regular ideals with J Ulrich. Let x ∈ m be a minimal
238
+ reduction of m. Assume that my ̸⊆ xI for any minimal generator of J. Then I is Elias.
239
+ Proof. The assumption implies that xI : m ⊆ mJ = xJ ⊂ (x).
240
+
241
+ Example 2.15. Let R = k[[a1, . . . , an]]/(aiaj)1≤i<j≤n. Apply 2.14 with J = m, x = a1 +
242
+ a2 + · · · + an. Note that each element f ∈ m has the form f = � αiasi
243
+ i where αis are units
244
+ or 0. Then aif = αiasi+1
245
+ i
246
+ and xf = � αiasi+1
247
+ i
248
+ . It follows easily then that the condition
249
+ my ̸⊆ xI for any minimal generator y of m is equivalent to a2
250
+ i /∈ xI for each i, which is
251
+ equivalent to ai /∈ I for each i.
252
+ For instance, if R = Q[[a, b, c]]/(ab, bc, ca), I = (a − b, b − c) is Elias.
253
+ Since R/I =
254
+ Q[[a]]/(a2) is Gorenstein, I is a canonical ideal.
255
+ 5
256
+
257
+ One can use valuations to construct Elias ideals from part of a minimal generating set of
258
+ some Ulrich ideal.
259
+ Example 2.16. Let R = k[[tn, tn+1, . . . , t2n−1]]. Let I = (tn, . . . , t2n−2). Apply 2.14 with
260
+ J = m, x = tn. Let ν be the t-adic valuation on R. Note that for any minimal generator of
261
+ y ∈ J = m, 3n − 2 ∈ ν(ym). On the other hand 3n − 2 /∈ ν(xI), so ym ̸⊆ xI. It follows
262
+ that I, and any ideal contained in I, is Elias. Note that again, since R/I is Gorenstein, I is
263
+ actually a canonical ideal.
264
+ 3. Elias index
265
+ Definition 3.1. One defines the following:
266
+ • Let the Elias index of R, denoted by eli(R) be the smallest s such that ms is Elias.
267
+ • Let the generalized L¨oewy length of R, denoted by gll(R), be the infimum of s such
268
+ that ms ⊆ (x) for some x ∈ m.
269
+ • Let the Ulrich index of R, denoted by ulr(R) be the smallest s such that ms is Ulrich,
270
+ that is µ(ms) = e.
271
+ Theorem 3.2. We have:
272
+ (1) eli(R) ≤ gll(R).
273
+ (2) gll(R) ≤ ulr(R) + 1, if the residue field k is infinite.
274
+ (3) Suppose that the associated graded ring grm(R) is Cohen-Macaulay and the residue
275
+ field k is infinite. Then eli(R) = gll(R) = ulr(R) + 1.
276
+ Proof. If ms ⊆ (x) then x must be a NZD. Thus ms is Elias by 1.3. The second statement
277
+ follows from definition. The condition that grm(R) is Cohen-Macaulay implies that ms is
278
+ m-full for all s > 0, so the last assertion follows from 2.5.
279
+
280
+ Theorem 3.3. We have:
281
+ (1) eli(R) = 1 if and only if R is regular.
282
+ (2) Assume R is Gorenstein, then eli(R) = 2 if and only if e(R) = 2.
283
+ (3) Let (A, n) be a Gorenstein local ring of dimension one. Suppose that R = n :Q(A) n
284
+ is local. Then eli(R) ≤ 2.
285
+ Proof. (1): Assume m is Elias.
286
+ To show that R is regular, we can make the extension
287
+ R → R[t](m,t) and assume k is infinite. Choose a NZD x ∈ m − m2, we have m2 : x = m,
288
+ that is m is m-full witnessed by x. Then 2.5 shows that m ⊂ (x), thus m is principal.
289
+ (2): We can assume again by 1.4 that k is infinite. If e = 2, then m2 ⊂ (x) for a minimal
290
+ reduction x of m, thus m2 is Elias. Now, suppose m2 is Elias and e ≥ 3, and we need a
291
+ contradiction. We first claim that any Ulrich ideal I of R must lie in m2. Take any minimal
292
+ reduction x of m. Then Im = xI ⊆ (x), so I ⊂ (x) : m ⊆ (x) + m2 (otherwise the socle of
293
+ R′ = R/xR has order 1, impossible as R′ is Gorenstein of length at least 3). As x is general,
294
+ working inside the vector space m/m2, we see that I ⊆ m2.
295
+ The set of m-primary Ulrich ideals in R is not empty, as it contains high enough powers
296
+ of m. Thus, we can pick an element I in this set maximal with respect to inclusion. By the
297
+ last claim, I ⊆ m2, and hence I is also Elias by 1.3. Now 2.4 and 2.5 imply that I = xJ for
298
+ some NZD x ∈ m, so J is an Ulrich ideal strictly containing I, and that’s the contradiction
299
+ we need.
300
+ (3): If R = A, then n is Elias by 1.2, hence A is regular by part (1). Thus R is also
301
+ regular, and eli(R) = 1. If R strictly contains A, then cA(R) = A :Q(A) R = n, hence
302
+ 6
303
+
304
+ n ∼= HomA(R, A) ∼= ωR. So n is a canonical ideal of R. On the other hand, as A is not
305
+ regular, µA(R) = 2 (dualize the exact sequence 0 → n → A → A/n → 0 and identify R with
306
+ n∗ = HomA(n, A)). Thus ℓA(R/n) = 2, so ℓR(R/n) ≤ 2, which forces m2 ⊂ n, and since n is
307
+ Elias, so is m2 by 1.3.
308
+
309
+ Example 3.4. We give some examples of item (3) in the previous Theorem.
310
+ First let
311
+ A = R[[t, it]] with i2 = −1. Then R = C[[t]].
312
+ Next, let H = ⟨a1, . . . , an⟩ be any symmetric semigroup and b be the Frobenius number
313
+ of H. Let A = k[[H]] be the complete Gorenstein numerical semigroup ring of H. Then
314
+ R = k[[⟨a1, . . . , an, b⟩]] has Elias index 2, unless if H = ⟨2, 3⟩, in which case eli(R) = 1.
315
+ Examples are R = k[[te, te+1, te2−e−1]] for e ≥ 3. For such ring we have type(R) = 2,
316
+ e(R) = e, gll(R) = e − 1, ulr(R) = e − 1, yet eli(R) = 2. These examples show that one can
317
+ not hope to get upper bounds for gll(R) or ulr(R) just using eli(R).
318
+ 4. Elias ideals in Gorenstein rings and Auslander index
319
+ In this section we focus on Gorenstein rings. Throughout this section, let (R, m, k) be
320
+ a local Gorenstein ring of dimension one and I ⊂ R an m-primary ideal. Recall that for
321
+ a finitely generated module M, the Auslander δ invariant of M, δ(M) is defined to be the
322
+ smallese number s such that there is a surjection Rs ⊕ N → M. The first s such that
323
+ δ(R/ms) = 1 is called the Auslander index of R, denoted index(R).
324
+ It turns out that Elias ideals are precisely those who quotient has Auslander invariant
325
+ one. We collect here this fact and a few others. They are mostly known or can be deduced
326
+ easily from results in previous sections, or both.
327
+ Proposition 4.1. Let (R, m, k) be a local Gorenstein ring of dimension one and I ⊂ R an
328
+ m-primary ideal. We have:
329
+ (1) δ(R/I) = 1 if and only if I is Elias.
330
+ (2) Suppose R is Gorenstein.
331
+ Then I is Elias if and only if for each NZD x ∈ I,
332
+ x ∈ m(x : I).
333
+ (3) Suppose R is Gorenstein. For a NZD x ∈ I, x : I is Elias if and only if x ∈ mI. In
334
+ particular, if x ∈ m2, then x : m is Elias.
335
+ (4) I is Elias if and only if 1 ∈ mI−1, where I−1 = R :Q I. If I is Elias, then I ⊆
336
+ m trace(I).
337
+ Proof. Part (1) is a special case of a result by Ding, [6, Proposition 1.2] and our definition
338
+ of Elias ideal. Part (2) and (3) are special cases of (3) and (4) of 1.3, as in that case (x) is
339
+ isomorphic to the canonical module.
340
+ Part (4) is [6, 2.4, 2.5], and also follows easily from results above: the first assertion is
341
+ just a rewriting of (2). For the second assertion, it follows from the first that I ⊆ mII−1 =
342
+ m trace(I).
343
+
344
+ There have been considerable interest in the following question:
345
+ Question 4.2. Given an ideal I with δ(R/I) = 1, when can one say that I ⊂ (x) for some
346
+ NZD x ∈ m?
347
+ For instance, a conjecture of Ding asks whether index(R) = gll(R) always. From our
348
+ point of view, this is of course just a question about Elias ideals and Elias index. Thus, one
349
+ immediately obtains the following.
350
+ 7
351
+
352
+ Corollary 4.3. Let (R, m, k) be a local Gorenstein ring of dimension one and I ⊂ R an
353
+ m-primary ideal.
354
+ (1) If I contains a NZD x of order 1, then I is Elias if and only if I = (x).
355
+ (2) index(R) = eli(R).
356
+ (3) index(R) = gll(R) = ulr(R) + 1 if k is infinite and grm(R) is Cohen-Macaulay (this
357
+ happens for instance if R is standard graded or if R is a hypersurface).
358
+ Proof. For part (1), we apply (5) of Corollary 1.3. Part (2) is trivial from part (1) of 4.1.
359
+ Part (3) is [5, Theorem 2.1], [2, Corollary 2.11], and is also a consequence of 3.2.
360
+
361
+ Example 4.4. (Counter-examples to a result by Ding) In this example, we construct ex-
362
+ amples of homogenous Elias ideals that are not inside principal ideals.
363
+ Let S = k[[x1 . . . , xn]], and J be a homogenous ideal such that R = S/J is Gorenstein.
364
+ Let f ∈ S be an irreducible element of degree at least 2 but lower than the initial degree of
365
+ J, and such that the image of f in R is a NZD. Then I = fR : m is Elias by 4.1 but I is
366
+ not inside any principal ideal. For by the irreducibility of f, we must have fR : m = (f),
367
+ absurd.
368
+ This class of examples contradicts Theorem 3.1 in [6], which claims that for I homogenous
369
+ in a graded Gorenstein R, δ(R/I) = 1 (equivalently, I is Elias) if and only if I ⊆ (x) for
370
+ some x ∈ m.
371
+ For concrete examples, one can take S = Q[[a, b]], J = (a3 − b3), and f = a2 + b2. If one
372
+ wants algebraically closed field, one can take S = C[[a, b, c]], J is a complete intersection of
373
+ two general cubics, and f = a2 + b2 + c2.
374
+ The mistake in [6, Theorem 3.1] is as follows. First, one derives that 1 = � ziyi
375
+ xi
376
+ with
377
+ zi ∈ m and yi
378
+ xi ∈ I−1 and hence there is i such that deg(ziyi) = deg(xi), which is correct.
379
+ Then Ding claimed that there is u ∈ k such that ziyi = uxi. But this is not true. In the
380
+ first example above we have z1 = y1 = a, z2 = y2 = b, x1 = x2 = a2 + b2.
381
+ Example 4.5. (De Stefani’s counter-example to a conjecture of Ding, revisited) As men-
382
+ tioned above, Ding conjectured that index(R) = gll(R) always when R is Gorenstein. De Ste-
383
+ fani gives a clever counter-example in [2]. Let S = k[x, y, z](x,y,z), I = (x2−y5, xy2+yz3−z5).
384
+ Then index(R) = 5 but gll(R) = 6. We now show how some parts of the proof in [2], which
385
+ is quite involved, can be shortened using our results.
386
+ We note that since the Hilbert functions of R are (1, 3, 5, 6, 7, 7, 8, 8...) and e(R) = 8, we
387
+ get that m5 is Elias by Theorem 2.7. To conclude we need to show that m5 is not contained
388
+ in (y) for any NZD y ∈ m. Note that m6 is Ulrich by Hilbert functions. We first show one
389
+ can assume ord(y) = 1. Assume m5 ⊂ (y), m5 = yI, then m5 ∼= I. If ord(y) ≥ 2, then
390
+ ym3 ⊂ m5 = yI, so m3 ⊂ I. But as mI ∼= m6 is Ulrich, we get m2I ⊂ (x) for some minimal
391
+ reduction of m, thus m5 ⊂ m2I ⊂ (x). For the rest, one can follow [2].
392
+ References
393
+ [1] W. Bruns and J. Herzog, Cohen-Macaulay Rings, Cambridge Studies in Advanced Mathematics, 39,
394
+ Cambridge, Cambridge University Press, 1993.
395
+ [2] A. De Stefani, A counterexample to conjecture of Ding, J. Algebra, 452, pp. 324–337, 2016.
396
+ [3] H. Dao, S. Maitra, P. Sridhar, On reflexive and I-Ulrich modules over curves, arXiv:2101.02641, Trans.
397
+ of Amer. Math. Soc., to appear.
398
+ [4] H. Dao, T. Kobayashi and R. Takahashi, Burch ideals and Burch rings, Algebra Number Theory,
399
+ Algebra Number Theory 14 (2020), no. 8, 2121–2150.
400
+ 8
401
+
402
+ [5] S. Ding, The associated graded ring and the index of a Gorenstein local ring, Proc. Amer. Math. Soc.,
403
+ 120 (4) (1994),1029–1033.
404
+ [6] S. Ding, Auslander’s δ-invariants of Gorenstein local rings, Proc. Amer. Math. Soc., 122 (3) (1994),
405
+ 649–656.
406
+ [7] J. Elias, On the canonical ideals of one-dimensional Cohen-Macaulay local rings, Proc. Edinb. Math.
407
+ Soc. (2) 59 (2016), no. 1, 77–90.
408
+ [8] S. Goto, Integral closedness of complete-intersection ideals, J. Algebra 108 (1987), no. 1, 151–160.
409
+ [9] W. Heinzer, L.J Ratliff and D.E. Rush, Basically full ideals in local rings, Journal of Algebra 250
410
+ (2002), 371–396.
411
+ [10] C. Huneke and I. Swanson, Integral closures of ideals, rings and modules, London Math. Society Lecture
412
+ Note Series 336, Cambridge University Press, 2006.
413
+ [11] H-B. Foxby and A. Thorup, Minimal injective resolutions under flat base change, Proc. Amer. Math.
414
+ Soc., 67 (1): 27–31, 1977.
415
+ [12] J. Watanabe, m-full ideals, Nagoya Math. J. 106 (1987), 101–111.
416
+ Hailong Dao, Department of Mathematics, University of Kansas, 405 Snow Hall, 1460
417
+ Jayhawk Blvd., Lawrence, KS 66045
418
+ Email address: [email protected]
419
+ 9
420
+
IdAyT4oBgHgl3EQfr_me/content/tmp_files/load_file.txt ADDED
@@ -0,0 +1,519 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf,len=518
2
+ page_content='arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
3
+ page_content='00569v1 [math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
4
+ page_content='AC] 2 Jan 2023 ELIAS IDEALS HAILONG DAO Abstract.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
5
+ page_content=' Let (R, m) be a one dimensional local Cohen-Macaulay ring.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
6
+ page_content=' An m-primary ideal I of R is Elias if the types of I and of R/I are equal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
7
+ page_content=' Canonical and principal ideals are Elias, and Elias ideals are closed under inclusion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
8
+ page_content=' We give multiple characterizations of Elias ideals and concrete criteria to identify them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
9
+ page_content=' We connect Elias ideals to other well-studied definitions: Ulrich, m-full, integrally closed, trace ideals, etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
10
+ page_content=' Applications are given regarding canonical ideals, conductors and the Auslander index.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
11
+ page_content=' Introduction Let (R, m) be a local Cohen-Macaulay ring of dimension one and I be an m-primary ideal of R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
12
+ page_content=' We say that I is Elias if the Cohen-Macaulay types of I and R/I coincide.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
13
+ page_content=' From standard facts, principal ideals or canonical ideals are Elias, and we will soon see that this property begets a rather rich and interesting theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
14
+ page_content=' Our work is heavily influenced by a nice result in [7], where Elias proves that any ideal ω that lies inside a high enough power of m and such that R/ω is Gorenstein must be a canonical ideal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
15
+ page_content=' Although not stated explicitly there, the proof showed that any ideal that lies in a high enough power of m is Elias, in our sense.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
16
+ page_content=' Another inspiration for the present work is [2], where De Stefani studies, in our language, powers of m that are Elias in a Gorenstein local ring, and gives a counter-example to a conjecture by Ding (see Section 4 for the precise connection).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
17
+ page_content=' In this note, we study Elias ideals in depth.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
18
+ page_content=' They admit many different characterizations, and enjoy rather useful properties.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
19
+ page_content=' For instance, they are closed under inclusion, and principal or canonical ideals are Elias.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
20
+ page_content=' On the other hand, conductor ideals or regular trace ideals are not Elias.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
21
+ page_content=' When R is Gorenstein, they are precisely ideals such that the Auslander index δ(R/I) is 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
22
+ page_content=' We are able to obtain many criteria to check whether an ideal is Elias, using very accessible information such as the minimal number or valuations of generators.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
23
+ page_content=' Combining them immediately gives sharp bounds and information on conductor or canonical ideals, which can be tricky to obtain otherwise.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
24
+ page_content=' There are several obvious ways to extend the present definitions and results to higher dimension rings or to modules.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
25
+ page_content=' However, we choose to focus on the ideals in dimension one case here as they are already interesting enough, and also to keep the paper short.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
26
+ page_content=' We hope to address the more general theory in future works.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
27
+ page_content=' We now describe briefly the structure and key results of the paper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
28
+ page_content=' In section 1 we give the formal definition of Elias ideals and prove several key results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
29
+ page_content=' Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
30
+ page_content='2 contains several equivalent characterizations of Elias ideals.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
31
+ page_content=' Corollary 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
32
+ page_content='3 collects important consequences, for instance that Elias ideals are closed under ideal containment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
33
+ page_content=' Also, criteria for Elias ideals using colon ideals are given.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
34
+ page_content=' Next, 2020 Mathematics Subject Classification.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
35
+ page_content=' Primary: 13D02, 13H10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
36
+ page_content=' Secondary: 14B99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
37
+ page_content=' 1 Proposition 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
38
+ page_content='4 establishes the fundamental change of rings result that are used frequently in the sequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
39
+ page_content=' Section 2 connects Elias ideals to several well-studied class of ideals: Ulrich ideals, m-full ideals, full ideals, integrally closed ideals, etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
40
+ page_content=' After some basic observations, (2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
41
+ page_content='3, 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
42
+ page_content='4, 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
43
+ page_content='5), we give Theorems 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
44
+ page_content='7 and Proposition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
45
+ page_content='14, which contain concrete ways to recognize Elias ideals using basic information such as number of generators or valuations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
46
+ page_content=' We also derive that conductor ideals or regular trace ideals are not Elias (Corollary 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
47
+ page_content='13).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
48
+ page_content=' This indicates one of the useful application: if we know, for instance, that m2 is Elias, then the conductor or any regular trace ideal must contains an element of m-adic order 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
49
+ page_content=' Given the previous section, it is natural to study the Elias index eli(R), namely the first power of m that is Elias, and we do so in Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
50
+ page_content=' The first main result here is Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
51
+ page_content='2, connecting this index to the generalized L¨oewy length and the regularity of the associated graded ring.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
52
+ page_content=' Next, in Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
53
+ page_content='3, we characterize rings with small indexes: eli(R) = 1 if and only if R is regular, and eli(R) = 2 plus R is Gorenstein is equivalent to e(R) = 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
54
+ page_content=' We give a large class of non-Gorenstein rings with Elias index 2 (3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
55
+ page_content='4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
56
+ page_content=' Lastly, in Section 4 we focus on the special case of Gorenstein rings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
57
+ page_content=' In such situation, we observe that Elias ideals are precisely ones whose quotient has Auslander δ- invariant one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
58
+ page_content=' This immediately allows us to apply what we have to recover old results about the Auslander invariant and Auslander index in 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
59
+ page_content='1 and 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
60
+ page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
61
+ page_content=' We give a counter-example to a Theorem by Ding and also revisit a counter-example to a conjecture by Ding given in [2] (Examples 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
62
+ page_content='4 and 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
63
+ page_content='5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
64
+ page_content=' Acknowledgements: It is a pleasure to thank Juan Elias and Alessandro Di Stefani for helpful comments and encouragements.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
65
+ page_content=' The author is partially supported by the Simons Collaboration Grant FND0077558.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
66
+ page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
67
+ page_content=' Elias ideals: definitions and basic results Throughout the paper, let (R, m, k) be Cohen-Macaulay local ring of dimension one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
68
+ page_content=' For a module M, set typeR(M) = dimk Extdim M R (k, M).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
69
+ page_content=' Set Q = Q(R) to be the total ring of fractions of R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
70
+ page_content=' Set e = e(R), the Hilbert-Samuel multiplicity of R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
71
+ page_content=' For an element x ∈ R, the m-adic order of x, denoted ord(x) is the smallest a such that x ∈ ma.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
72
+ page_content=' The order of an ideal I, denoted ord(I), is the minimum order of its elements.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
73
+ page_content=' Definition 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
74
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
75
+ page_content=' We say that a m-primary ideal I is an Elias ideal if it satisfies type(I) = type(R/I).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
76
+ page_content=' Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
77
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
78
+ page_content=' We always have type(I) ≥ type(R/I).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
79
+ page_content=' The following are equivalent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
80
+ page_content=' (1) type(I) = type(R/I).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
81
+ page_content=' (2) For any NZD x ∈ m, xI : m ⊆ (x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
82
+ page_content=' (3) For any NZD x ∈ m, xI : m = x(I : m).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
83
+ page_content=' (4) For some NZD x ∈ m, xI : m ⊆ (x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
84
+ page_content=' (5) For some NZD x ∈ m, xI : m = x(I : m).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
85
+ page_content=' (6) I :Q m ⊆ R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
86
+ page_content=' (7) K ⊆ m(K :Q I) (assuming R admits a canonical ideal K).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
87
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
88
+ page_content=' Let x be a NZD.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
89
+ page_content=' Then type(I) = type(I/xI) = dimk xI : m xI ≥ dimk x(I : m) xI = dimk I : m I = type(R/I) 2 Thus, type(I) = type(R/I) if and only if xI : m = x(I : m).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
90
+ page_content=' Now, xI : m ⊆ (x) is equivalent to xI : m = xJ for some ideal J, as x is a NZD.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
91
+ page_content=' Rewriting it as xJm ⊆ xI, which is equivalent to Jm ⊆ I, we get J ⊆ I : m.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
92
+ page_content=' On the other hand x(I : m) ⊆ xI : m, thus J = I : m.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
93
+ page_content=' That establishes the equivalence of first five items.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
94
+ page_content=' Note that for any NZD x ∈ m, xI : m = x(I :Q m).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
95
+ page_content=' Thus, (6) is equivalent to (3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
96
+ page_content=' Let K be a canonical ideal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
97
+ page_content=' Apply HomR(−, K) to the sequence 0 → I → R → R/I → 0, and indentifying HomR(I, K) with K :Q I, we get 0 → K → K :Q I → Ext1 R(R/I, K) = ωR/I → 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
98
+ page_content=' Since type(I) = µ(K :Q I) and type(R/I) = µ(ωR/I), the equivalence of (7) and (1) follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
99
+ page_content=' □ Corollary 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
100
+ page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
101
+ page_content=' We have: (1) If I is isomorphic to R or the canonical module of R (assuming its existence), then I is Elias.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
102
+ page_content=' (2) If I is Elias, then so is J for any ideal J ⊆ I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
103
+ page_content=' (being Elias is closed under inclusion) (3) Let K be a canonical ideal of R and I be an ideal containing K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
104
+ page_content=' Then I is Elias if and only if K ⊆ m(K :R I).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
105
+ page_content=' (4) Let K be a canonical ideal of R and I be an ideal such that K ⊆ I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
106
+ page_content=' Then K : I is Elias if and only if K ⊆ mI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
107
+ page_content=' (5) Suppose that I contains a canonical ideal K such that ord(K) = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
108
+ page_content=' Then I is Elias if and only if I = K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
109
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
110
+ page_content=' For the first claim, I :Q m ⊂ I :Q I = R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
111
+ page_content=' For the second claim, we have J :Q m ⊂ I :Q m.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
112
+ page_content=' For (3), first note that K :Q I ⊂ K :Q K = R, so K :Q I = K :R I, and we can use part (7) of Theorem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
113
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
114
+ page_content=' For part (4), note that K : (K : I) = I hence we can apply part (3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
115
+ page_content=' For part (5), we again apply part (3): if K ⊊ I, then m(K :R I) ⊆ m2, contradicting ord(K) = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
116
+ page_content=' □ The following change of rings result would be used frequently in what follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
117
+ page_content=' Proposition 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
118
+ page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
119
+ page_content=' Let (R, m) → (S, n) be a local, flat rings extension such that dim S = 1 and S is Noetherian.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
120
+ page_content=' Then I is an Elias ideal of R if and only if IS is an Elias ideal of S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
121
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
122
+ page_content=' Under the assumption we have typeR(M) typeS/mS(S/mS) = typeS(M ⊗R S) for any finitely generated R-module M (see for instance [11]), thus the result follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
123
+ page_content=' □ 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
124
+ page_content=' Elias ideals and other special ideals Definition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
125
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
126
+ page_content=' Let I be an m-primary ideal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
127
+ page_content=' I is called Ulrich (as an R-module) if µ(I) = e(R).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
128
+ page_content=' Assuming k is infinite, then I is Ulrich if and only if xI = mI for some x ∈ m (equivalently, for any x ∈ m such that ℓ(R/xR) = e(R)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
129
+ page_content=' I is called m-full if Im : x = I for some x ∈ m.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
130
+ page_content=' I is called full (or basically full) if Im : m = I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
131
+ page_content=' Remark 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
132
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
133
+ page_content=' When the definition of special ideals such as Ulrich or m-full ones involves an element x, we say that the property is witnessed by x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
134
+ page_content=' Note that being such x is a Zariski-open condition (for the image of x in the vector space m/m2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
135
+ page_content=' For more on these ideals, see [3, 10, 9, 12].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
136
+ page_content=' 3 Proposition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
137
+ page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
138
+ page_content=' Let I be an m-primary ideal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
139
+ page_content=' Let e be the Hilbert-Samuel multiplicity of R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
140
+ page_content=' The following are equivalent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
141
+ page_content=' (1) I is Ulrich.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
142
+ page_content=' (2) type(I) = e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
143
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
144
+ page_content=' We can assume k is infinite by making the flat extension R → R[t](m,t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
145
+ page_content=' Let x ∈ m be such that ℓ(R/xR) = e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
146
+ page_content=' Then ℓ(I/xI) = e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
147
+ page_content=' Note that type(I) = ℓ(soc(I/xI)) ≤ ℓ(I/xI) = e, and equality happens precisely when m(I/xI) = 0, in other words, I is Ulrich.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
148
+ page_content=' □ Proposition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
149
+ page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
150
+ page_content=' Let I be an m-primary ideal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
151
+ page_content=' (1) Suppose k is infinite.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
152
+ page_content=' If I is Ulrich, then it is m-full.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
153
+ page_content=' (2) Suppose k is infinite.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
154
+ page_content=' If I is integrally closed, then it is m-full.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
155
+ page_content=' (3) If I is m-full, then it is full.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
156
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
157
+ page_content=' (1): We can find a NZD x such that Ix = Im, so Im : x = Ix : x = I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
158
+ page_content=' (2): see [8, Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
159
+ page_content='4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
160
+ page_content=' (3): We have I ⊆ Im : m ⊆ Im : x, from which the assertion is clear.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
161
+ page_content=' □ Proposition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
162
+ page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
163
+ page_content=' If I is m-full, witnessed by a NZD x ∈ m.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
164
+ page_content=' The following are equivalent: (1) I is Elias.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
165
+ page_content=' (2) I = xJ for some Ulrich ideal J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
166
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
167
+ page_content=' Assume I is Elias, witnessed by a NZD x, so Im : x = I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
168
+ page_content=' We will show that I ⊆ (x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
169
+ page_content=' If not, then I contains an element s whose image in R/(x) is in the socle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
170
+ page_content=' Thus sm ⊂ Im ∩ (x) = x(Im : x) = xI, so s ∈ xI : m ⊂ (x), a contradiction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
171
+ page_content=' Since I ⊆ (x) we must have I = xJ for some J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
172
+ page_content=' We have Jx = I = Im : x = Jxm : x = Jm, so J is Ulrich.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
173
+ page_content=' Assume (2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
174
+ page_content=' Then I is Ulrich and also full by 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
175
+ page_content='4, so xI : m = mI : m = I = xJ ⊂ (x), thus I is Elias.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
176
+ page_content=' □ Corollary 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
177
+ page_content='6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
178
+ page_content=' If e = 2 and k is infinite, then I is Elias if and only if I ⊆ (x) for some NZD x ∈ m.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
179
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
180
+ page_content=' Since e = 2, any ideal is either principal or Ulrich, and 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
181
+ page_content='4 together with 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
182
+ page_content='5 give what we want.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
183
+ page_content=' □ Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
184
+ page_content='7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
185
+ page_content=' The following hold for an m primary ideal I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
186
+ page_content=' (1) If µ(I) < e and type(R/I) ≥ e − 1, then I is Elias.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
187
+ page_content=' (2) Assume µ(mI) ≤ µ(I) = e − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
188
+ page_content=' Then Im is Elias and Im : m = I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
189
+ page_content=' (3) Furthermore, assume R = S/(f) is a hypersurface, here S is a regular local ring of dimension 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
190
+ page_content=' Let J be an S ideal minimally generated by e elements, one of them is f.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
191
+ page_content=' Then JR is Elias.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
192
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
193
+ page_content=' By the inequality type(I) ≥ type(R/I), we must have type(I) is e or e − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
194
+ page_content=' But if type(I) = e, then µ(I) = e by 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
195
+ page_content='3, contradiction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
196
+ page_content=' Next, we have: type(R/Im) = dimk Im : m Im ≥ dimk I Im = µ(I) ≥ e − 1 and Im is not Ulrich by assumption.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
197
+ page_content=' So Im is Elias and type(Im) = e − 1, which by the chain above implies that Im : m = I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
198
+ page_content=' 4 For the last part, let I = JR.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
199
+ page_content=' Then µR(I) = e − 1 and type(R/I) = type(S/J) = e − 1, and we can apply the first part.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
200
+ page_content=' □ Example 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
201
+ page_content='8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
202
+ page_content=' Let R = k[[t4, t5, t11]] ∼= k[[a, b, c]]/(a4 − bc, b3 − ac, c2 − a3b2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
203
+ page_content=' Then m2 is Elias: one can check directly or note that µ(m) = µ(m2) = 3 = e(R) − 1 and use 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
204
+ page_content='7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
205
+ page_content=' But m2 is not contained in (x) for any (x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
206
+ page_content=' Example 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
207
+ page_content='9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
208
+ page_content=' Let R = k[[t6, t7, t15]] ∼= k[[a, b, c]]/(a5 − c2, b3 − ac).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
209
+ page_content=' Then the Hilbert function is {1, 3, 4, 5, 5, 6, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
210
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
211
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
212
+ page_content=' }, thus m4 is Elias.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
213
+ page_content=' In this case, m4 ⊆ (a), so m4 is trivially Elias.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
214
+ page_content=' Let R ⊂ S be a finite birational extension.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
215
+ page_content=' We recall that the conductor of S in R, denoted cR(S), is R :Q(R) S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
216
+ page_content=' Proposition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
217
+ page_content='10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
218
+ page_content=' Let R ⊂ S be a finite birational extension.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
219
+ page_content=' If IS = I (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
220
+ page_content='e, I is an S-module) and I is Elias, then I : m ⊆ cR(S).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
221
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
222
+ page_content=' Let Q = Q(R).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
223
+ page_content=' We have R ⊃ I :Q m = IS :Q mS ⊃ (I : m)S, so I : m ⊆ R :Q S = cR(S) as desired.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
224
+ page_content=' □ Note that if IS = I, then trace(I) ⊆ cR(S).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
225
+ page_content=' So naturally, one can ask to extend 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
226
+ page_content='10 as follows: Question 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
227
+ page_content='11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
228
+ page_content=' If I is Elias, do we have I : m ⊆ trace(I)?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
229
+ page_content=' The answer is no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
230
+ page_content=' In Example 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
231
+ page_content='8 above, Let R = k[[t4, t5, t11]] ∼= k[[a, b, c]]/(a4 − bc, b3 − ac, c2 − a3b2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
232
+ page_content=' One can check that trace(m2) = (a2, ab, b2, c) while m2 : m = m.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
233
+ page_content=' Corollary 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
234
+ page_content='12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
235
+ page_content=' Suppose m2 is Elias (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
236
+ page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
237
+ page_content=', if R has minimal multiplicity) and is integrally closed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
238
+ page_content=' If m2 ⊆ cR(R) then m ⊆ cR(R).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
239
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
240
+ page_content=' Apply 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
241
+ page_content='10 to I = m2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
242
+ page_content=' □ Corollary 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
243
+ page_content='13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
244
+ page_content=' Assume that the integral closure R is finite.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
245
+ page_content=' Then the conductor of R in R is not Elias.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
246
+ page_content=' A regular trace ideal is not Elias.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
247
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
248
+ page_content=' Let c = cR(R).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
249
+ page_content=' Then c is a R-module, so if it is Elias we would have c : m ⊆ c, absurd!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
250
+ page_content=' Any regular trace ideal must contain c, see for instance [3], so it can not be Elias either by 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
251
+ page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
252
+ page_content=' □ The following is simple but quite useful for constructing Elias ideals from minimal gener- ators of Ulrich ideals.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
253
+ page_content=' See the examples that follow.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
254
+ page_content=' Proposition 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
255
+ page_content='14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
256
+ page_content=' Let I ⊂ J be regular ideals with J Ulrich.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
257
+ page_content=' Let x ∈ m be a minimal reduction of m.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
258
+ page_content=' Assume that my ̸⊆ xI for any minimal generator of J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
259
+ page_content=' Then I is Elias.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
260
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
261
+ page_content=' The assumption implies that xI : m ⊆ mJ = xJ ⊂ (x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
262
+ page_content=' □ Example 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
263
+ page_content='15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
264
+ page_content=' Let R = k[[a1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
265
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
266
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
267
+ page_content=' , an]]/(aiaj)1≤i<j≤n.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
268
+ page_content=' Apply 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
269
+ page_content='14 with J = m, x = a1 + a2 + · · · + an.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
270
+ page_content=' Note that each element f ∈ m has the form f = � αiasi i where αis are units or 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
271
+ page_content=' Then aif = αiasi+1 i and xf = � αiasi+1 i .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
272
+ page_content=' It follows easily then that the condition my ̸⊆ xI for any minimal generator y of m is equivalent to a2 i /∈ xI for each i, which is equivalent to ai /∈ I for each i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
273
+ page_content=' For instance, if R = Q[[a, b, c]]/(ab, bc, ca), I = (a − b, b − c) is Elias.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
274
+ page_content=' Since R/I = Q[[a]]/(a2) is Gorenstein, I is a canonical ideal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
275
+ page_content=' 5 One can use valuations to construct Elias ideals from part of a minimal generating set of some Ulrich ideal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
276
+ page_content=' Example 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
277
+ page_content='16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
278
+ page_content=' Let R = k[[tn, tn+1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
279
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
280
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
281
+ page_content=' , t2n−1]].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
282
+ page_content=' Let I = (tn, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
283
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
284
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
285
+ page_content=' , t2n−2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
286
+ page_content=' Apply 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
287
+ page_content='14 with J = m, x = tn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
288
+ page_content=' Let ν be the t-adic valuation on R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
289
+ page_content=' Note that for any minimal generator of y ∈ J = m, 3n − 2 ∈ ν(ym).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
290
+ page_content=' On the other hand 3n − 2 /∈ ν(xI), so ym ̸⊆ xI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
291
+ page_content=' It follows that I, and any ideal contained in I, is Elias.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
292
+ page_content=' Note that again, since R/I is Gorenstein, I is actually a canonical ideal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
293
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
294
+ page_content=' Elias index Definition 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
295
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
296
+ page_content=' One defines the following: Let the Elias index of R, denoted by eli(R) be the smallest s such that ms is Elias.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
297
+ page_content=' Let the generalized L¨oewy length of R, denoted by gll(R), be the infimum of s such that ms ⊆ (x) for some x ∈ m.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
298
+ page_content=' Let the Ulrich index of R, denoted by ulr(R) be the smallest s such that ms is Ulrich, that is µ(ms) = e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
299
+ page_content=' Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
300
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
301
+ page_content=' We have: (1) eli(R) ≤ gll(R).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
302
+ page_content=' (2) gll(R) ≤ ulr(R) + 1, if the residue field k is infinite.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
303
+ page_content=' (3) Suppose that the associated graded ring grm(R) is Cohen-Macaulay and the residue field k is infinite.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
304
+ page_content=' Then eli(R) = gll(R) = ulr(R) + 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
305
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
306
+ page_content=' If ms ⊆ (x) then x must be a NZD.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
307
+ page_content=' Thus ms is Elias by 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
308
+ page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
309
+ page_content=' The second statement follows from definition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
310
+ page_content=' The condition that grm(R) is Cohen-Macaulay implies that ms is m-full for all s > 0, so the last assertion follows from 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
311
+ page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
312
+ page_content=' □ Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
313
+ page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
314
+ page_content=' We have: (1) eli(R) = 1 if and only if R is regular.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
315
+ page_content=' (2) Assume R is Gorenstein, then eli(R) = 2 if and only if e(R) = 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
316
+ page_content=' (3) Let (A, n) be a Gorenstein local ring of dimension one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
317
+ page_content=' Suppose that R = n :Q(A) n is local.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
318
+ page_content=' Then eli(R) ≤ 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
319
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
320
+ page_content=' (1): Assume m is Elias.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
321
+ page_content=' To show that R is regular, we can make the extension R → R[t](m,t) and assume k is infinite.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
322
+ page_content=' Choose a NZD x ∈ m − m2, we have m2 : x = m, that is m is m-full witnessed by x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
323
+ page_content=' Then 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
324
+ page_content='5 shows that m ⊂ (x), thus m is principal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
325
+ page_content=' (2): We can assume again by 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
326
+ page_content='4 that k is infinite.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
327
+ page_content=' If e = 2, then m2 ⊂ (x) for a minimal reduction x of m, thus m2 is Elias.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
328
+ page_content=' Now, suppose m2 is Elias and e ≥ 3, and we need a contradiction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
329
+ page_content=' We first claim that any Ulrich ideal I of R must lie in m2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
330
+ page_content=' Take any minimal reduction x of m.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
331
+ page_content=' Then Im = xI ⊆ (x), so I ⊂ (x) : m ⊆ (x) + m2 (otherwise the socle of R′ = R/xR has order 1, impossible as R′ is Gorenstein of length at least 3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
332
+ page_content=' As x is general, working inside the vector space m/m2, we see that I ⊆ m2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
333
+ page_content=' The set of m-primary Ulrich ideals in R is not empty, as it contains high enough powers of m.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
334
+ page_content=' Thus, we can pick an element I in this set maximal with respect to inclusion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
335
+ page_content=' By the last claim, I ⊆ m2, and hence I is also Elias by 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
336
+ page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
337
+ page_content=' Now 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
338
+ page_content='4 and 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
339
+ page_content='5 imply that I = xJ for some NZD x ∈ m, so J is an Ulrich ideal strictly containing I, and that’s the contradiction we need.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
340
+ page_content=' (3): If R = A, then n is Elias by 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
341
+ page_content='2, hence A is regular by part (1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
342
+ page_content=' Thus R is also regular, and eli(R) = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
343
+ page_content=' If R strictly contains A, then cA(R) = A :Q(A) R = n, hence 6 n ∼= HomA(R, A) ∼= ωR.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
344
+ page_content=' So n is a canonical ideal of R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
345
+ page_content=' On the other hand, as A is not regular, µA(R) = 2 (dualize the exact sequence 0 → n → A → A/n → 0 and identify R with n∗ = HomA(n, A)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
346
+ page_content=' Thus ℓA(R/n) = 2, so ℓR(R/n) ≤ 2, which forces m2 ⊂ n, and since n is Elias, so is m2 by 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
347
+ page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
348
+ page_content=' □ Example 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
349
+ page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
350
+ page_content=' We give some examples of item (3) in the previous Theorem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
351
+ page_content=' First let A = R[[t, it]] with i2 = −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
352
+ page_content=' Then R = C[[t]].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
353
+ page_content=' Next, let H = ⟨a1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
354
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
355
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
356
+ page_content=' , an⟩ be any symmetric semigroup and b be the Frobenius number of H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
357
+ page_content=' Let A = k[[H]] be the complete Gorenstein numerical semigroup ring of H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
358
+ page_content=' Then R = k[[⟨a1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
359
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
360
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
361
+ page_content=' , an, b⟩]] has Elias index 2, unless if H = ⟨2, 3⟩, in which case eli(R) = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
362
+ page_content=' Examples are R = k[[te, te+1, te2−e−1]] for e ≥ 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
363
+ page_content=' For such ring we have type(R) = 2, e(R) = e, gll(R) = e − 1, ulr(R) = e − 1, yet eli(R) = 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
364
+ page_content=' These examples show that one can not hope to get upper bounds for gll(R) or ulr(R) just using eli(R).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
365
+ page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
366
+ page_content=' Elias ideals in Gorenstein rings and Auslander index In this section we focus on Gorenstein rings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
367
+ page_content=' Throughout this section, let (R, m, k) be a local Gorenstein ring of dimension one and I ⊂ R an m-primary ideal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
368
+ page_content=' Recall that for a finitely generated module M, the Auslander δ invariant of M, δ(M) is defined to be the smallese number s such that there is a surjection Rs ⊕ N → M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
369
+ page_content=' The first s such that δ(R/ms) = 1 is called the Auslander index of R, denoted index(R).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
370
+ page_content=' It turns out that Elias ideals are precisely those who quotient has Auslander invariant one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
371
+ page_content=' We collect here this fact and a few others.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
372
+ page_content=' They are mostly known or can be deduced easily from results in previous sections, or both.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
373
+ page_content=' Proposition 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
374
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
375
+ page_content=' Let (R, m, k) be a local Gorenstein ring of dimension one and I ⊂ R an m-primary ideal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
376
+ page_content=' We have: (1) δ(R/I) = 1 if and only if I is Elias.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
377
+ page_content=' (2) Suppose R is Gorenstein.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
378
+ page_content=' Then I is Elias if and only if for each NZD x ∈ I, x ∈ m(x : I).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
379
+ page_content=' (3) Suppose R is Gorenstein.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
380
+ page_content=' For a NZD x ∈ I, x : I is Elias if and only if x ∈ mI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
381
+ page_content=' In particular, if x ∈ m2, then x : m is Elias.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
382
+ page_content=' (4) I is Elias if and only if 1 ∈ mI−1, where I−1 = R :Q I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
383
+ page_content=' If I is Elias, then I ⊆ m trace(I).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
384
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
385
+ page_content=' Part (1) is a special case of a result by Ding, [6, Proposition 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
386
+ page_content='2] and our definition of Elias ideal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
387
+ page_content=' Part (2) and (3) are special cases of (3) and (4) of 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
388
+ page_content='3, as in that case (x) is isomorphic to the canonical module.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
389
+ page_content=' Part (4) is [6, 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
390
+ page_content='4, 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
391
+ page_content='5], and also follows easily from results above: the first assertion is just a rewriting of (2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
392
+ page_content=' For the second assertion, it follows from the first that I ⊆ mII−1 = m trace(I).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
393
+ page_content=' □ There have been considerable interest in the following question: Question 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
394
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
395
+ page_content=' Given an ideal I with δ(R/I) = 1, when can one say that I ⊂ (x) for some NZD x ∈ m?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
396
+ page_content=' For instance, a conjecture of Ding asks whether index(R) = gll(R) always.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
397
+ page_content=' From our point of view, this is of course just a question about Elias ideals and Elias index.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
398
+ page_content=' Thus, one immediately obtains the following.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
399
+ page_content=' 7 Corollary 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
400
+ page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
401
+ page_content=' Let (R, m, k) be a local Gorenstein ring of dimension one and I ⊂ R an m-primary ideal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
402
+ page_content=' (1) If I contains a NZD x of order 1, then I is Elias if and only if I = (x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
403
+ page_content=' (2) index(R) = eli(R).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
404
+ page_content=' (3) index(R) = gll(R) = ulr(R) + 1 if k is infinite and grm(R) is Cohen-Macaulay (this happens for instance if R is standard graded or if R is a hypersurface).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
405
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
406
+ page_content=' For part (1), we apply (5) of Corollary 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
407
+ page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
408
+ page_content=' Part (2) is trivial from part (1) of 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
409
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
410
+ page_content=' Part (3) is [5, Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
411
+ page_content='1], [2, Corollary 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
412
+ page_content='11], and is also a consequence of 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
413
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
414
+ page_content=' □ Example 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
415
+ page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
416
+ page_content=' (Counter-examples to a result by Ding) In this example, we construct ex- amples of homogenous Elias ideals that are not inside principal ideals.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
417
+ page_content=' Let S = k[[x1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
418
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
419
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
420
+ page_content=' , xn]], and J be a homogenous ideal such that R = S/J is Gorenstein.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
421
+ page_content=' Let f ∈ S be an irreducible element of degree at least 2 but lower than the initial degree of J, and such that the image of f in R is a NZD.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
422
+ page_content=' Then I = fR : m is Elias by 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
423
+ page_content='1 but I is not inside any principal ideal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
424
+ page_content=' For by the irreducibility of f, we must have fR : m = (f), absurd.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
425
+ page_content=' This class of examples contradicts Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
426
+ page_content='1 in [6], which claims that for I homogenous in a graded Gorenstein R, δ(R/I) = 1 (equivalently, I is Elias) if and only if I ⊆ (x) for some x ∈ m.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
427
+ page_content=' For concrete examples, one can take S = Q[[a, b]], J = (a3 − b3), and f = a2 + b2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
428
+ page_content=' If one wants algebraically closed field, one can take S = C[[a, b, c]], J is a complete intersection of two general cubics, and f = a2 + b2 + c2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
429
+ page_content=' The mistake in [6, Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
430
+ page_content='1] is as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
431
+ page_content=' First, one derives that 1 = � ziyi xi with zi ∈ m and yi xi ∈ I−1 and hence there is i such that deg(ziyi) = deg(xi), which is correct.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
432
+ page_content=' Then Ding claimed that there is u ∈ k such that ziyi = uxi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
433
+ page_content=' But this is not true.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
434
+ page_content=' In the first example above we have z1 = y1 = a, z2 = y2 = b, x1 = x2 = a2 + b2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
435
+ page_content=' Example 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
436
+ page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
437
+ page_content=' (De Stefani’s counter-example to a conjecture of Ding, revisited) As men- tioned above, Ding conjectured that index(R) = gll(R) always when R is Gorenstein.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
438
+ page_content=' De Ste- fani gives a clever counter-example in [2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
439
+ page_content=' Let S = k[x, y, z](x,y,z), I = (x2−y5, xy2+yz3−z5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
440
+ page_content=' Then index(R) = 5 but gll(R) = 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
441
+ page_content=' We now show how some parts of the proof in [2], which is quite involved, can be shortened using our results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
442
+ page_content=' We note that since the Hilbert functions of R are (1, 3, 5, 6, 7, 7, 8, 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
443
+ page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
444
+ page_content=') and e(R) = 8, we get that m5 is Elias by Theorem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
445
+ page_content='7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
446
+ page_content=' To conclude we need to show that m5 is not contained in (y) for any NZD y ∈ m.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
447
+ page_content=' Note that m6 is Ulrich by Hilbert functions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
448
+ page_content=' We first show one can assume ord(y) = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
449
+ page_content=' Assume m5 ⊂ (y), m5 = yI, then m5 ∼= I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
450
+ page_content=' If ord(y) ≥ 2, then ym3 ⊂ m5 = yI, so m3 ⊂ I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
451
+ page_content=' But as mI ∼= m6 is Ulrich, we get m2I ⊂ (x) for some minimal reduction of m, thus m5 ⊂ m2I ⊂ (x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
452
+ page_content=' For the rest, one can follow [2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
453
+ page_content=' References [1] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
454
+ page_content=' Bruns and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
455
+ page_content=' Herzog, Cohen-Macaulay Rings, Cambridge Studies in Advanced Mathematics, 39, Cambridge, Cambridge University Press, 1993.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
456
+ page_content=' [2] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
457
+ page_content=' De Stefani, A counterexample to conjecture of Ding, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
458
+ page_content=' Algebra, 452, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
459
+ page_content=' 324–337, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
460
+ page_content=' [3] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
461
+ page_content=' Dao, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
462
+ page_content=' Maitra, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
463
+ page_content=' Sridhar, On reflexive and I-Ulrich modules over curves, arXiv:2101.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
464
+ page_content='02641, Trans.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
465
+ page_content=' of Amer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
466
+ page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
467
+ page_content=' Soc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
468
+ page_content=', to appear.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
469
+ page_content=' [4] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
470
+ page_content=' Dao, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
471
+ page_content=' Kobayashi and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
472
+ page_content=' Takahashi, Burch ideals and Burch rings, Algebra Number Theory, Algebra Number Theory 14 (2020), no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
473
+ page_content=' 8, 2121–2150.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
474
+ page_content=' 8 [5] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
475
+ page_content=' Ding, The associated graded ring and the index of a Gorenstein local ring, Proc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
476
+ page_content=' Amer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
477
+ page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
478
+ page_content=' Soc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
479
+ page_content=', 120 (4) (1994),1029–1033.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
480
+ page_content=' [6] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
481
+ page_content=' Ding, Auslander’s δ-invariants of Gorenstein local rings, Proc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
482
+ page_content=' Amer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
483
+ page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
484
+ page_content=' Soc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
485
+ page_content=', 122 (3) (1994), 649–656.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
486
+ page_content=' [7] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
487
+ page_content=' Elias, On the canonical ideals of one-dimensional Cohen-Macaulay local rings, Proc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
488
+ page_content=' Edinb.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
489
+ page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
490
+ page_content=' Soc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
491
+ page_content=' (2) 59 (2016), no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
492
+ page_content=' 1, 77–90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
493
+ page_content=' [8] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
494
+ page_content=' Goto, Integral closedness of complete-intersection ideals, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
495
+ page_content=' Algebra 108 (1987), no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
496
+ page_content=' 1, 151–160.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
497
+ page_content=' [9] W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
498
+ page_content=' Heinzer, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
499
+ page_content='J Ratliff and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
500
+ page_content='E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
501
+ page_content=' Rush, Basically full ideals in local rings, Journal of Algebra 250 (2002), 371–396.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
502
+ page_content=' [10] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
503
+ page_content=' Huneke and I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
504
+ page_content=' Swanson, Integral closures of ideals, rings and modules, London Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
505
+ page_content=' Society Lecture Note Series 336, Cambridge University Press, 2006.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
506
+ page_content=' [11] H-B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
507
+ page_content=' Foxby and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
508
+ page_content=' Thorup, Minimal injective resolutions under flat base change, Proc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
509
+ page_content=' Amer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
510
+ page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
511
+ page_content=' Soc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
512
+ page_content=', 67 (1): 27–31, 1977.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
513
+ page_content=' [12] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
514
+ page_content=' Watanabe, m-full ideals, Nagoya Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
515
+ page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
516
+ page_content=' 106 (1987), 101–111.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
517
+ page_content=' Hailong Dao, Department of Mathematics, University of Kansas, 405 Snow Hall, 1460 Jayhawk Blvd.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
518
+ page_content=', Lawrence, KS 66045 Email address: hdao@ku.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}
519
+ page_content='edu 9' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdAyT4oBgHgl3EQfr_me/content/2301.00569v1.pdf'}