diff --git "a/data_all_eng_slimpj/shuffled/split2/finalzzacug" "b/data_all_eng_slimpj/shuffled/split2/finalzzacug" new file mode 100644--- /dev/null +++ "b/data_all_eng_slimpj/shuffled/split2/finalzzacug" @@ -0,0 +1,5 @@ +{"text":"\\section{UNet and Attention UNet:}\n\\begin{table}[htbp]\n\\centering\n\\caption{Architecture used: UNet. Patch size: 128x128 pixels (best fold is reported in bold font).}\n\\label{tab:my-table1}\n\\resizebox{\\textwidth}{!}{%\n\\begin{tabular}{|c|c|c|c|c|c|c|c|c|}\n\\hline\n{\\color[HTML]{24292F} \\textbf{fold\\_name}} & {\\color[HTML]{24292F} \\textbf{dev\\_dice}} & {\\color[HTML]{24292F} \\textbf{dev\\_f1}} & {\\color[HTML]{24292F} \\textbf{dev\\_recall}} & {\\color[HTML]{24292F} \\textbf{dev\\_precision}} & {\\color[HTML]{24292F} \\textbf{test\\_dice}} & {\\color[HTML]{24292F} \\textbf{test\\_f1}} & {\\color[HTML]{24292F} \\textbf{test\\_recall}} & {\\color[HTML]{24292F} \\textbf{test\\_precision}} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_00\\_cv\\_00} & {\\color[HTML]{24292F} 0.7151} & {\\color[HTML]{24292F} 0.7165} & {\\color[HTML]{24292F} 0.6674} & {\\color[HTML]{24292F} 0.8017} & {\\color[HTML]{24292F} 0.6753} & {\\color[HTML]{24292F} 0.6707} & {\\color[HTML]{24292F} 0.668} & {\\color[HTML]{24292F} 0.784} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_00\\_cv\\_01} & {\\color[HTML]{24292F} 0.7034} & {\\color[HTML]{24292F} 0.6933} & {\\color[HTML]{24292F} 0.699} & {\\color[HTML]{24292F} 0.718} & {\\color[HTML]{24292F} 0.7046} & {\\color[HTML]{24292F} 0.7035} & {\\color[HTML]{24292F} 0.7495} & {\\color[HTML]{24292F} 0.7475} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_00\\_cv\\_02} & {\\color[HTML]{24292F} 0.6963} & {\\color[HTML]{24292F} 0.6932} & {\\color[HTML]{24292F} 0.6873} & {\\color[HTML]{24292F} 0.7339} & {\\color[HTML]{24292F} 0.6781} & {\\color[HTML]{24292F} 0.6765} & {\\color[HTML]{24292F} 0.7423} & {\\color[HTML]{24292F} 0.7094} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_01\\_cv\\_00} & {\\color[HTML]{24292F} 0.7011} & {\\color[HTML]{24292F} 0.7037} & {\\color[HTML]{24292F} 0.714} & {\\color[HTML]{24292F} 0.7239} & {\\color[HTML]{24292F} 0.7032} & {\\color[HTML]{24292F} 0.6962} & {\\color[HTML]{24292F} 0.6684} & {\\color[HTML]{24292F} 0.8052} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{test\\_01\\_cv\\_01}} & {\\color[HTML]{24292F} \\textbf{0.7231}} & {\\color[HTML]{24292F} \\textbf{0.7118}} & {\\color[HTML]{24292F} \\textbf{0.6763}} & {\\color[HTML]{24292F} \\textbf{0.7801}} & {\\color[HTML]{24292F} \\textbf{0.7248}} & {\\color[HTML]{24292F} \\textbf{0.7192}} & {\\color[HTML]{24292F} \\textbf{0.7105}} & {\\color[HTML]{24292F} \\textbf{0.8067}} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_01\\_cv\\_02} & {\\color[HTML]{24292F} 0.72} & {\\color[HTML]{24292F} 0.7217} & {\\color[HTML]{24292F} 0.7519} & {\\color[HTML]{24292F} 0.7185} & {\\color[HTML]{24292F} 0.7141} & {\\color[HTML]{24292F} 0.7068} & {\\color[HTML]{24292F} 0.6811} & {\\color[HTML]{24292F} 0.8166} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_02\\_cv\\_00} & {\\color[HTML]{24292F} 0.709} & {\\color[HTML]{24292F} 0.7156} & {\\color[HTML]{24292F} 0.7195} & {\\color[HTML]{24292F} 0.7423} & {\\color[HTML]{24292F} 0.7027} & {\\color[HTML]{24292F} 0.7004} & {\\color[HTML]{24292F} 0.7043} & {\\color[HTML]{24292F} 0.7855} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_02\\_cv\\_01} & {\\color[HTML]{24292F} 0.7127} & {\\color[HTML]{24292F} 0.7195} & {\\color[HTML]{24292F} 0.7012} & {\\color[HTML]{24292F} 0.7618} & {\\color[HTML]{24292F} 0.6643} & {\\color[HTML]{24292F} 0.6608} & {\\color[HTML]{24292F} 0.6444} & {\\color[HTML]{24292F} 0.7996} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_02\\_cv\\_02} & {\\color[HTML]{24292F} 0.6807} & {\\color[HTML]{24292F} 0.6838} & {\\color[HTML]{24292F} 0.6862} & {\\color[HTML]{24292F} 0.7167} & {\\color[HTML]{24292F} 0.6306} & {\\color[HTML]{24292F} 0.6296} & {\\color[HTML]{24292F} 0.6634} & {\\color[HTML]{24292F} 0.7316} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_03\\_cv\\_00} & {\\color[HTML]{24292F} 0.6765} & {\\color[HTML]{24292F} 0.6813} & {\\color[HTML]{24292F} 0.6788} & {\\color[HTML]{24292F} 0.7183} & {\\color[HTML]{24292F} 0.6845} & {\\color[HTML]{24292F} 0.6855} & {\\color[HTML]{24292F} 0.8061} & {\\color[HTML]{24292F} 0.666} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_03\\_cv\\_01} & {\\color[HTML]{24292F} 0.6959} & {\\color[HTML]{24292F} 0.6967} & {\\color[HTML]{24292F} 0.6244} & {\\color[HTML]{24292F} 0.8167} & {\\color[HTML]{24292F} 0.6883} & {\\color[HTML]{24292F} 0.879} & {\\color[HTML]{24292F} 0.7981} & {\\color[HTML]{24292F} 0.6777} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_03\\_cv\\_02} & {\\color[HTML]{24292F} 0.6112} & {\\color[HTML]{24292F} 0.6008} & {\\color[HTML]{24292F} 0.61} & {\\color[HTML]{24292F} 0.6325} & {\\color[HTML]{24292F} 0.6521} & {\\color[HTML]{24292F} 0.6545} & {\\color[HTML]{24292F} 0.8018} & {\\color[HTML]{24292F} 0.6245} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{mean}} & {\\color[HTML]{24292F} 0.6954} & {\\color[HTML]{24292F} 0.6948} & {\\color[HTML]{24292F} 0.6847} & {\\color[HTML]{24292F} 0.7387} & {\\color[HTML]{24292F} 0.6852} & {\\color[HTML]{24292F} 0.6986} & {\\color[HTML]{24292F} 0.7198} & {\\color[HTML]{24292F} 0.7462} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{std}} & {\\color[HTML]{24292F} 0.0289} & {\\color[HTML]{24292F} 0.0313} & {\\color[HTML]{24292F} 0.0373} & {\\color[HTML]{24292F} 0.0462} & {\\color[HTML]{24292F} 0.0260} & {\\color[HTML]{24292F} 0.0596} & {\\color[HTML]{24292F} 0.0561} & {\\color[HTML]{24292F} 0.0615} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{max}} & {\\color[HTML]{24292F} 0.7231} & {\\color[HTML]{24292F} 0.7217} & {\\color[HTML]{24292F} 0.7519} & {\\color[HTML]{24292F} 0.8167} & {\\color[HTML]{24292F} 0.7248} & {\\color[HTML]{24292F} 0.879} & {\\color[HTML]{24292F} 0.8061} & {\\color[HTML]{24292F} 0.8166} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{min}} & {\\color[HTML]{24292F} 0.6112} & {\\color[HTML]{24292F} 0.6008} & {\\color[HTML]{24292F} 0.61} & {\\color[HTML]{24292F} 0.6325} & {\\color[HTML]{24292F} 0.6306} & {\\color[HTML]{24292F} 0.6296} & {\\color[HTML]{24292F} 0.6444} & {\\color[HTML]{24292F} 0.6245} \\\\ \\hline\n\\end{tabular}%\n}\n\\end{table}\n\n\\begin{table}[htbp]\n\\centering\n\\caption{Architecture used: Attention UNet. Patch size: 128x128 pixels (best fold is reported in bold font).}\n\\label{tab:my-table2}\n\\resizebox{\\textwidth}{!}{%\n\\begin{tabular}{|c|c|c|c|c|c|c|c|c|}\n\\hline\n{\\color[HTML]{24292F} \\textbf{fold\\_name}} & {\\color[HTML]{24292F} \\textbf{dev\\_dice}} & {\\color[HTML]{24292F} \\textbf{dev\\_f1}} & {\\color[HTML]{24292F} \\textbf{dev\\_recall}} & {\\color[HTML]{24292F} \\textbf{dev\\_precision}} & {\\color[HTML]{24292F} \\textbf{test\\_dice}} & {\\color[HTML]{24292F} \\textbf{test\\_f1}} & {\\color[HTML]{24292F} \\textbf{test\\_recall}} & {\\color[HTML]{24292F} \\textbf{test\\_precision}} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_00\\_cv\\_00} & {\\color[HTML]{24292F} 0.7654} & {\\color[HTML]{24292F} 0.7654} & {\\color[HTML]{24292F} 0.7833} & {\\color[HTML]{24292F} 0.8000} & {\\color[HTML]{24292F} 0.6405} & {\\color[HTML]{24292F} 0.6405} & {\\color[HTML]{24292F} 0.5959} & {\\color[HTML]{24292F} 0.8177} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_00\\_cv\\_01} & {\\color[HTML]{24292F} 0.7216} & {\\color[HTML]{24292F} 0.7216} & {\\color[HTML]{24292F} 0.7293} & {\\color[HTML]{24292F} 0.7890} & {\\color[HTML]{24292F} 0.6734} & {\\color[HTML]{24292F} 0.6734} & {\\color[HTML]{24292F} 0.6387} & {\\color[HTML]{24292F} 0.8258} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_00\\_cv\\_02} & {\\color[HTML]{24292F} 0.7428} & {\\color[HTML]{24292F} 0.7422} & {\\color[HTML]{24292F} 0.7425} & {\\color[HTML]{24292F} 0.8108} & {\\color[HTML]{24292F} 0.6499} & {\\color[HTML]{24292F} 0.6499} & {\\color[HTML]{24292F} 0.6320} & {\\color[HTML]{24292F} 0.7883} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_01\\_cv\\_00} & {\\color[HTML]{24292F} 0.7646} & {\\color[HTML]{24292F} 0.7646} & {\\color[HTML]{24292F} 0.7876} & {\\color[HTML]{24292F} 0.7934} & {\\color[HTML]{24292F} 0.6851} & {\\color[HTML]{24292F} 0.6851} & {\\color[HTML]{24292F} 0.6582} & {\\color[HTML]{24292F} 0.7979} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_01\\_cv\\_01} & {\\color[HTML]{24292F} 0.7272} & {\\color[HTML]{24292F} 0.7272} & {\\color[HTML]{24292F} 0.7916} & {\\color[HTML]{24292F} 0.7299} & {\\color[HTML]{24292F} 0.7128} & {\\color[HTML]{24292F} 0.7122} & {\\color[HTML]{24292F} 0.7454} & {\\color[HTML]{24292F} 0.7522} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_01\\_cv\\_02} & {\\color[HTML]{24292F} 0.7335} & {\\color[HTML]{24292F} 0.7335} & {\\color[HTML]{24292F} 0.7593} & {\\color[HTML]{24292F} 0.7772} & {\\color[HTML]{24292F} 0.6909} & {\\color[HTML]{24292F} 0.6909} & {\\color[HTML]{24292F} 0.6836} & {\\color[HTML]{24292F} 0.7890} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_02\\_cv\\_00} & {\\color[HTML]{24292F} 0.7658} & {\\color[HTML]{24292F} 0.7658} & {\\color[HTML]{24292F} 0.7946} & {\\color[HTML]{24292F} 0.7900} & {\\color[HTML]{24292F} 0.7184} & {\\color[HTML]{24292F} 0.7184} & {\\color[HTML]{24292F} 0.7109} & {\\color[HTML]{24292F} 0.8060} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{test\\_02\\_cv\\_01}} & {\\color[HTML]{24292F} \\textbf{0.7159}} & {\\color[HTML]{24292F} \\textbf{0.7153}} & {\\color[HTML]{24292F} \\textbf{0.7565}} & {\\color[HTML]{24292F} \\textbf{0.7429}} & {\\color[HTML]{24292F} \\textbf{0.7263}} & {\\color[HTML]{24292F} \\textbf{0.7263}} & {\\color[HTML]{24292F} \\textbf{0.7950}} & {\\color[HTML]{24292F} \\textbf{0.7254}} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_02\\_cv\\_02} & {\\color[HTML]{24292F} 0.7809} & {\\color[HTML]{24292F} 0.7809} & {\\color[HTML]{24292F} 0.8232} & {\\color[HTML]{24292F} 0.7855} & {\\color[HTML]{24292F} 0.6948} & {\\color[HTML]{24292F} 0.6948} & {\\color[HTML]{24292F} 0.7241} & {\\color[HTML]{24292F} 0.7488} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_03\\_cv\\_00} & {\\color[HTML]{24292F} 0.8193} & {\\color[HTML]{24292F} 0.8193} & {\\color[HTML]{24292F} 0.8180} & {\\color[HTML]{24292F} 0.8559} & {\\color[HTML]{24292F} 0.6959} & {\\color[HTML]{24292F} 0.6959} & {\\color[HTML]{24292F} 0.6839} & {\\color[HTML]{24292F} 0.8008} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_03\\_cv\\_01} & {\\color[HTML]{24292F} 0.6962} & {\\color[HTML]{24292F} 0.6962} & {\\color[HTML]{24292F} 0.6843} & {\\color[HTML]{24292F} 0.7908} & {\\color[HTML]{24292F} 0.7140} & {\\color[HTML]{24292F} 0.7140} & {\\color[HTML]{24292F} 0.7781} & {\\color[HTML]{24292F} 0.7281} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_03\\_cv\\_02} & {\\color[HTML]{24292F} 0.7857} & {\\color[HTML]{24292F} 0.7857} & {\\color[HTML]{24292F} 0.8297} & {\\color[HTML]{24292F} 0.7896} & {\\color[HTML]{24292F} 0.7024} & {\\color[HTML]{24292F} 0.7024} & {\\color[HTML]{24292F} 0.8435} & {\\color[HTML]{24292F} 0.6571} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{mean}} & {\\color[HTML]{24292F} 0.7516} & {\\color[HTML]{24292F} 0.7515} & {\\color[HTML]{24292F} 0.7750} & {\\color[HTML]{24292F} 0.7879} & {\\color[HTML]{24292F} 0.6920} & {\\color[HTML]{24292F} 0.6920} & {\\color[HTML]{24292F} 0.7074} & {\\color[HTML]{24292F} 0.7698} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{std}} & {\\color[HTML]{24292F} 0.0334} & {\\color[HTML]{24292F} 0.0335} & {\\color[HTML]{24292F} 0.0408} & {\\color[HTML]{24292F} 0.0301} & {\\color[HTML]{24292F} 0.0254} & {\\color[HTML]{24292F} 0.0254} & {\\color[HTML]{24292F} 0.0703} & {\\color[HTML]{24292F} 0.0469} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{max}} & {\\color[HTML]{24292F} 0.8193} & {\\color[HTML]{24292F} 0.8193} & {\\color[HTML]{24292F} 0.8297} & {\\color[HTML]{24292F} 0.8559} & {\\color[HTML]{24292F} 0.7263} & {\\color[HTML]{24292F} 0.7263} & {\\color[HTML]{24292F} 0.8435} & {\\color[HTML]{24292F} 0.8258} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{min}} & {\\color[HTML]{24292F} 0.6962} & {\\color[HTML]{24292F} 0.6962} & {\\color[HTML]{24292F} 0.6843} & {\\color[HTML]{24292F} 0.7299} & {\\color[HTML]{24292F} 0.6405} & {\\color[HTML]{24292F} 0.6405} & {\\color[HTML]{24292F} 0.5959} & {\\color[HTML]{24292F} 0.6571} \\\\ \\hline\n\\end{tabular}%\n}\n\\end{table}\n\n\\begin{table}[htbp]\n\\centering\n\\caption{Architecture used: UNet. Patch size: 256x256 pixels (best fold is reported in bold font).}\n\\label{tab:my-table3}\n\\resizebox{\\textwidth}{!}{%\n\\begin{tabular}{|c|c|c|c|c|c|c|c|c|}\n\\hline\n{\\color[HTML]{24292F} \\textbf{fold\\_name}} & {\\color[HTML]{24292F} \\textbf{dev\\_dice}} & {\\color[HTML]{24292F} \\textbf{dev\\_f1}} & {\\color[HTML]{24292F} \\textbf{dev\\_recall}} & {\\color[HTML]{24292F} \\textbf{dev\\_precision}} & {\\color[HTML]{24292F} \\textbf{test\\_dice}} & {\\color[HTML]{24292F} \\textbf{test\\_f1}} & {\\color[HTML]{24292F} \\textbf{test\\_recall}} & {\\color[HTML]{24292F} \\textbf{test\\_precision}} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_00\\_cv\\_00} & {\\color[HTML]{24292F} 0.707} & {\\color[HTML]{24292F} 0.7095} & {\\color[HTML]{24292F} 0.6557} & {\\color[HTML]{24292F} 0.7962} & {\\color[HTML]{24292F} 0.6423} & {\\color[HTML]{24292F} 0.6363} & {\\color[HTML]{24292F} 0.5839} & {\\color[HTML]{24292F} 0.8071} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_00\\_cv\\_01} & {\\color[HTML]{24292F} 0.6509} & {\\color[HTML]{24292F} 0.6563} & {\\color[HTML]{24292F} 0.6744} & {\\color[HTML]{24292F} 0.6604} & {\\color[HTML]{24292F} 0.6783} & {\\color[HTML]{24292F} 0.6743} & {\\color[HTML]{24292F} 0.6563} & {\\color[HTML]{24292F} 0.7767} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_00\\_cv\\_02} & {\\color[HTML]{24292F} 0.6663} & {\\color[HTML]{24292F} 0.6657} & {\\color[HTML]{24292F} 0.6725} & {\\color[HTML]{24292F} 0.6826} & {\\color[HTML]{24292F} 0.6368} & {\\color[HTML]{24292F} 0.6322} & {\\color[HTML]{24292F} 0.6356} & {\\color[HTML]{24292F} 0.7269} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{test\\_01\\_cv\\_00}} & {\\color[HTML]{24292F} \\textbf{0.6939}} & {\\color[HTML]{24292F} \\textbf{0.7099}} & {\\color[HTML]{24292F} \\textbf{0.7041}} & {\\color[HTML]{24292F} \\textbf{0.7319}} & {\\color[HTML]{24292F} \\textbf{0.6963}} & {\\color[HTML]{24292F} \\textbf{0.6893}} & {\\color[HTML]{24292F} \\textbf{0.6632}} & {\\color[HTML]{24292F} \\textbf{0.7998}} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_01\\_cv\\_01} & {\\color[HTML]{24292F} 0.6411} & {\\color[HTML]{24292F} 0.642} & {\\color[HTML]{24292F} 0.6018} & {\\color[HTML]{24292F} 0.7157} & {\\color[HTML]{24292F} 0.6856} & {\\color[HTML]{24292F} 0.6785} & {\\color[HTML]{24292F} 0.6468} & {\\color[HTML]{24292F} 0.8008} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_01\\_cv\\_02} & {\\color[HTML]{24292F} 0.6793} & {\\color[HTML]{24292F} 0.6829} & {\\color[HTML]{24292F} 0.6892} & {\\color[HTML]{24292F} 0.6979} & {\\color[HTML]{24292F} 0.6718} & {\\color[HTML]{24292F} 0.6654} & {\\color[HTML]{24292F} 0.6487} & {\\color[HTML]{24292F} 0.7732} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_02\\_cv\\_00} & {\\color[HTML]{24292F} 0.6893} & {\\color[HTML]{24292F} 0.7077} & {\\color[HTML]{24292F} 0.716} & {\\color[HTML]{24292F} 0.716} & {\\color[HTML]{24292F} 0.6395} & {\\color[HTML]{24292F} 0.6352} & {\\color[HTML]{24292F} 0.6104} & {\\color[HTML]{24292F} 0.766} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_02\\_cv\\_01} & {\\color[HTML]{24292F} 0.6911} & {\\color[HTML]{24292F} 0.698} & {\\color[HTML]{24292F} 0.6344} & {\\color[HTML]{24292F} 0.8027} & {\\color[HTML]{24292F} 0.621} & {\\color[HTML]{24292F} 0.6155} & {\\color[HTML]{24292F} 0.5748} & {\\color[HTML]{24292F} 0.7816} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_02\\_cv\\_02} & {\\color[HTML]{24292F} 0.6462} & {\\color[HTML]{24292F} 0.649} & {\\color[HTML]{24292F} 0.6593} & {\\color[HTML]{24292F} 0.6638} & {\\color[HTML]{24292F} 0.5949} & {\\color[HTML]{24292F} 0.595} & {\\color[HTML]{24292F} 0.6676} & {\\color[HTML]{24292F} 0.6185} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_03\\_cv\\_00} & {\\color[HTML]{24292F} 0.6432} & {\\color[HTML]{24292F} 0.6529} & {\\color[HTML]{24292F} 0.6323} & {\\color[HTML]{24292F} 0.7029} & {\\color[HTML]{24292F} 0.6507} & {\\color[HTML]{24292F} 0.65} & {\\color[HTML]{24292F} 0.7633} & {\\color[HTML]{24292F} 0.6242} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_03\\_cv\\_01} & {\\color[HTML]{24292F} 0.6747} & {\\color[HTML]{24292F} 0.6877} & {\\color[HTML]{24292F} 0.6581} & {\\color[HTML]{24292F} 0.7453} & {\\color[HTML]{24292F} 0.6507} & {\\color[HTML]{24292F} 0.6521} & {\\color[HTML]{24292F} 0.8146} & {\\color[HTML]{24292F} 0.5945} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_03\\_cv\\_02} & {\\color[HTML]{24292F} 0.5391} & {\\color[HTML]{24292F} 0.5355} & {\\color[HTML]{24292F} 0.5558} & {\\color[HTML]{24292F} 0.5535} & {\\color[HTML]{24292F} 0.5834} & {\\color[HTML]{24292F} 0.5856} & {\\color[HTML]{24292F} 0.8118} & {\\color[HTML]{24292F} 0.5149} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{mean}} & {\\color[HTML]{24292F} 0.660} & {\\color[HTML]{24292F} 0.666} & {\\color[HTML]{24292F} 0.654} & {\\color[HTML]{24292F} 0.706} & {\\color[HTML]{24292F} 0.646} & {\\color[HTML]{24292F} 0.642} & {\\color[HTML]{24292F} 0.673} & {\\color[HTML]{24292F} 0.715} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{std}} & {\\color[HTML]{24292F} 0.042} & {\\color[HTML]{24292F} 0.046} & {\\color[HTML]{24292F} 0.042} & {\\color[HTML]{24292F} 0.063} & {\\color[HTML]{24292F} 0.033} & {\\color[HTML]{24292F} 0.031} & {\\color[HTML]{24292F} 0.077} & {\\color[HTML]{24292F} 0.096} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{max}} & {\\color[HTML]{24292F} 0.707} & {\\color[HTML]{24292F} 0.710} & {\\color[HTML]{24292F} 0.716} & {\\color[HTML]{24292F} 0.803} & {\\color[HTML]{24292F} 0.696} & {\\color[HTML]{24292F} 0.689} & {\\color[HTML]{24292F} 0.815} & {\\color[HTML]{24292F} 0.807} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{min}} & {\\color[HTML]{24292F} 0.5391} & {\\color[HTML]{24292F} 0.5355} & {\\color[HTML]{24292F} 0.5558} & {\\color[HTML]{24292F} 0.5535} & {\\color[HTML]{24292F} 0.5834} & {\\color[HTML]{24292F} 0.5856} & {\\color[HTML]{24292F} 0.5748} & {\\color[HTML]{24292F} 0.5149} \\\\ \\hline\n\\end{tabular}%\n}\n\\end{table}\n\n\\begin{table}[htbp]\n\\centering\n\\caption{Architecture used: Attention UNet. Patch size: 256x256 pixels (best fold is reported in bold font).}\n\\label{tab:my-table4}\n\\resizebox{\\textwidth}{!}{%\n\\begin{tabular}{|c|c|c|c|c|c|c|c|c|}\n\\hline\n{\\color[HTML]{24292F} \\textbf{fold\\_name}} & {\\color[HTML]{24292F} \\textbf{dev\\_dice}} & {\\color[HTML]{24292F} \\textbf{dev\\_f1}} & {\\color[HTML]{24292F} \\textbf{dev\\_recall}} & {\\color[HTML]{24292F} \\textbf{dev\\_precision}} & {\\color[HTML]{24292F} \\textbf{test\\_dice}} & {\\color[HTML]{24292F} \\textbf{test\\_f1}} & {\\color[HTML]{24292F} \\textbf{test\\_recall}} & {\\color[HTML]{24292F} \\textbf{test\\_precision}} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_00\\_cv\\_00} & {\\color[HTML]{24292F} 0.7288} & {\\color[HTML]{24292F} 0.7288} & {\\color[HTML]{24292F} 0.8318} & {\\color[HTML]{24292F} 0.6819} & {\\color[HTML]{24292F} 0.6439} & {\\color[HTML]{24292F} 0.6439} & {\\color[HTML]{24292F} 0.6312} & {\\color[HTML]{24292F} 0.7429} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_00\\_cv\\_01} & {\\color[HTML]{24292F} 0.6698} & {\\color[HTML]{24292F} 0.6698} & {\\color[HTML]{24292F} 0.7152} & {\\color[HTML]{24292F} 0.6855} & {\\color[HTML]{24292F} 0.6038} & {\\color[HTML]{24292F} 0.6038} & {\\color[HTML]{24292F} 0.5860} & {\\color[HTML]{24292F} 0.7462} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_00\\_cv\\_02} & {\\color[HTML]{24292F} 0.6644} & {\\color[HTML]{24292F} 0.6644} & {\\color[HTML]{24292F} 0.6396} & {\\color[HTML]{24292F} 0.7920} & {\\color[HTML]{24292F} 0.5638} & {\\color[HTML]{24292F} 0.5638} & {\\color[HTML]{24292F} 0.5132} & {\\color[HTML]{24292F} 0.7701} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_01\\_cv\\_00} & {\\color[HTML]{24292F} 0.7349} & {\\color[HTML]{24292F} 0.7349} & {\\color[HTML]{24292F} 0.8246} & {\\color[HTML]{24292F} 0.6971} & {\\color[HTML]{24292F} 0.6560} & {\\color[HTML]{24292F} 0.6560} & {\\color[HTML]{24292F} 0.6724} & {\\color[HTML]{24292F} 0.7162} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{test\\_01\\_cv\\_01}} & {\\color[HTML]{24292F} \\textbf{0.6471}} & {\\color[HTML]{24292F} \\textbf{0.6471}} & {\\color[HTML]{24292F} \\textbf{0.6618}} & {\\color[HTML]{24292F} \\textbf{0.7102}} & {\\color[HTML]{24292F} \\textbf{0.6796}} & {\\color[HTML]{24292F} \\textbf{0.6790}} & {\\color[HTML]{24292F} \\textbf{0.6746}} & {\\color[HTML]{24292F} \\textbf{0.7599}} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_01\\_cv\\_02} & {\\color[HTML]{24292F} 0.6774} & {\\color[HTML]{24292F} 0.6774} & {\\color[HTML]{24292F} 0.6539} & {\\color[HTML]{24292F} 0.7842} & {\\color[HTML]{24292F} 0.6307} & {\\color[HTML]{24292F} 0.6307} & {\\color[HTML]{24292F} 0.5998} & {\\color[HTML]{24292F} 0.7847} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_02\\_cv\\_00} & {\\color[HTML]{24292F} 0.7463} & {\\color[HTML]{24292F} 0.7463} & {\\color[HTML]{24292F} 0.8221} & {\\color[HTML]{24292F} 0.7172} & {\\color[HTML]{24292F} 0.6323} & {\\color[HTML]{24292F} 0.6323} & {\\color[HTML]{24292F} 0.6289} & {\\color[HTML]{24292F} 0.7087} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_02\\_cv\\_01} & {\\color[HTML]{24292F} 0.6767} & {\\color[HTML]{24292F} 0.6761} & {\\color[HTML]{24292F} 0.7026} & {\\color[HTML]{24292F} 0.7199} & {\\color[HTML]{24292F} 0.6497} & {\\color[HTML]{24292F} 0.6497} & {\\color[HTML]{24292F} 0.6715} & {\\color[HTML]{24292F} 0.6983} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_02\\_cv\\_02} & {\\color[HTML]{24292F} 0.6883} & {\\color[HTML]{24292F} 0.6883} & {\\color[HTML]{24292F} 0.7973} & {\\color[HTML]{24292F} 0.6619} & {\\color[HTML]{24292F} 0.6194} & {\\color[HTML]{24292F} 0.6194} & {\\color[HTML]{24292F} 0.6652} & {\\color[HTML]{24292F} 0.6538} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_03\\_cv\\_00} & {\\color[HTML]{24292F} 0.7855} & {\\color[HTML]{24292F} 0.7855} & {\\color[HTML]{24292F} 0.7662} & {\\color[HTML]{24292F} 0.8413} & {\\color[HTML]{24292F} 0.6413} & {\\color[HTML]{24292F} 0.6413} & {\\color[HTML]{24292F} 0.6032} & {\\color[HTML]{24292F} 0.7821} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_03\\_cv\\_01} & {\\color[HTML]{24292F} 0.6199} & {\\color[HTML]{24292F} 0.6199} & {\\color[HTML]{24292F} 0.5635} & {\\color[HTML]{24292F} 0.8123} & {\\color[HTML]{24292F} 0.6731} & {\\color[HTML]{24292F} 0.6731} & {\\color[HTML]{24292F} 0.6997} & {\\color[HTML]{24292F} 0.7158} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_03\\_cv\\_02} & {\\color[HTML]{24292F} 0.6783} & {\\color[HTML]{24292F} 0.6783} & {\\color[HTML]{24292F} 0.8009} & {\\color[HTML]{24292F} 0.6438} & {\\color[HTML]{24292F} 0.6169} & {\\color[HTML]{24292F} 0.6169} & {\\color[HTML]{24292F} 0.8428} & {\\color[HTML]{24292F} 0.5326} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{mean}} & {\\color[HTML]{24292F} 0.6931} & {\\color[HTML]{24292F} 0.6931} & {\\color[HTML]{24292F} 0.7316} & {\\color[HTML]{24292F} 0.7289} & {\\color[HTML]{24292F} 0.6342} & {\\color[HTML]{24292F} 0.6342} & {\\color[HTML]{24292F} 0.6490} & {\\color[HTML]{24292F} 0.7176} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{std}} & {\\color[HTML]{24292F} 0.0447} & {\\color[HTML]{24292F} 0.0447} & {\\color[HTML]{24292F} 0.0846} & {\\color[HTML]{24292F} 0.0606} & {\\color[HTML]{24292F} 0.0301} & {\\color[HTML]{24292F} 0.0300} & {\\color[HTML]{24292F} 0.0762} & {\\color[HTML]{24292F} 0.0667} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{max}} & {\\color[HTML]{24292F} 0.7855} & {\\color[HTML]{24292F} 0.7855} & {\\color[HTML]{24292F} 0.8318} & {\\color[HTML]{24292F} 0.8413} & {\\color[HTML]{24292F} 0.6796} & {\\color[HTML]{24292F} 0.6790} & {\\color[HTML]{24292F} 0.8428} & {\\color[HTML]{24292F} 0.7847} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{min}} & {\\color[HTML]{24292F} 0.6199} & {\\color[HTML]{24292F} 0.6199} & {\\color[HTML]{24292F} 0.5635} & {\\color[HTML]{24292F} 0.6438} & {\\color[HTML]{24292F} 0.5638} & {\\color[HTML]{24292F} 0.5638} & {\\color[HTML]{24292F} 0.5132} & {\\color[HTML]{24292F} 0.5326} \\\\ \\hline\n\\end{tabular}%\n}\n\\end{table}\n\n\\section{Scanner differences:}\n\n\\begin{table}[htbp]\n\\centering\n\\caption{Architecture used: UNet. Patch size: 128x128 pixels (best fold is reported in bold font). Scanner: Hamamatsu NanoZoomer 2.0-RS.}\n\\label{tab:my-table5}\n\\resizebox{0.8\\textwidth}{!}{%\n\\begin{tabular}{|c|c|c|c|c|}\n\\hline\n{\\color[HTML]{24292F} \\textbf{fold\\_name}} & {\\color[HTML]{24292F} \\textbf{dev\\_dice}} & {\\color[HTML]{24292F} \\textbf{dev\\_f1}} & {\\color[HTML]{24292F} \\textbf{dev\\_recall}} & {\\color[HTML]{24292F} \\textbf{dev\\_precision}} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_00\\_cv\\_00} & {\\color[HTML]{24292F} 0.7361} & {\\color[HTML]{24292F} 0.7354} & {\\color[HTML]{24292F} 0.693} & {\\color[HTML]{24292F} 0.8106} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_00\\_cv\\_01} & {\\color[HTML]{24292F} 0.7234} & {\\color[HTML]{24292F} 0.7284} & {\\color[HTML]{24292F} 0.7451} & {\\color[HTML]{24292F} 0.734} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{test\\_00\\_cv\\_02}} & {\\color[HTML]{24292F} \\textbf{0.7388}} & {\\color[HTML]{24292F} \\textbf{0.7338}} & {\\color[HTML]{24292F} \\textbf{0.7155}} & {\\color[HTML]{24292F} \\textbf{0.7744}} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_00\\_cv\\_03} & {\\color[HTML]{24292F} 0.7384} & {\\color[HTML]{24292F} 0.732} & {\\color[HTML]{24292F} 0.7063} & {\\color[HTML]{24292F} 0.7818} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{mean}} & {\\color[HTML]{24292F} 0.7342} & {\\color[HTML]{24292F} 0.7324} & {\\color[HTML]{24292F} 0.7150} & {\\color[HTML]{24292F} 0.7752} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{std}} & {\\color[HTML]{24292F} 0.0063} & {\\color[HTML]{24292F} 0.0026} & {\\color[HTML]{24292F} 0.0191} & {\\color[HTML]{24292F} 0.0274} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{max}} & {\\color[HTML]{24292F} 0.7388} & {\\color[HTML]{24292F} 0.7354} & {\\color[HTML]{24292F} 0.7451} & {\\color[HTML]{24292F} 0.8106} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{min}} & {\\color[HTML]{24292F} 0.7234} & {\\color[HTML]{24292F} 0.7284} & {\\color[HTML]{24292F} 0.693} & {\\color[HTML]{24292F} 0.734} \\\\ \\hline\n\\end{tabular}%\n}\n\\end{table}\n\n\\begin{table}[htbp]\n\\centering\n\\caption{Architecture used: UNet. Patch size: 128x128 pixels (best fold is reported in bold font). Scanner: Hamamatsu NanoZoomer S60.}\n\\label{tab:my-table6}\n\\resizebox{0.8\\textwidth}{!}{%\n\\begin{tabular}{|c|c|c|c|c|}\n\\hline\n{\\color[HTML]{24292F} \\textbf{fold\\_name}} & {\\color[HTML]{24292F} \\textbf{dev\\_dice}} & {\\color[HTML]{24292F} \\textbf{dev\\_f1}} & {\\color[HTML]{24292F} \\textbf{dev\\_recall}} & {\\color[HTML]{24292F} \\textbf{dev\\_precision}} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_00\\_cv\\_00} & {\\color[HTML]{24292F} 0.6286} & {\\color[HTML]{24292F} 0.6429} & {\\color[HTML]{24292F} 0.6121} & {\\color[HTML]{24292F} 0.7143} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{test\\_00\\_cv\\_01}} & {\\color[HTML]{24292F} \\textbf{0.6757}} & {\\color[HTML]{24292F} \\textbf{0.6695}} & {\\color[HTML]{24292F} \\textbf{0.6931}} & {\\color[HTML]{24292F} \\textbf{0.6855}} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_00\\_cv\\_02} & {\\color[HTML]{24292F} 0.617} & {\\color[HTML]{24292F} 0.6448} & {\\color[HTML]{24292F} 0.6668} & {\\color[HTML]{24292F} 0.6443} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_00\\_cv\\_03} & {\\color[HTML]{24292F} 0.6167} & {\\color[HTML]{24292F} 0.6445} & {\\color[HTML]{24292F} 0.6653} & {\\color[HTML]{24292F} 0.6452} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{mean}} & {\\color[HTML]{24292F} 0.6345} & {\\color[HTML]{24292F} 0.6504} & {\\color[HTML]{24292F} 0.6593} & {\\color[HTML]{24292F} 0.6723} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{std}} & {\\color[HTML]{24292F} 0.0243} & {\\color[HTML]{24292F} 0.0110} & {\\color[HTML]{24292F} 0.0294} & {\\color[HTML]{24292F} 0.0294} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{max}} & {\\color[HTML]{24292F} 0.6757} & {\\color[HTML]{24292F} 0.6695} & {\\color[HTML]{24292F} 0.6931} & {\\color[HTML]{24292F} 0.7143} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{min}} & {\\color[HTML]{24292F} 0.6167} & {\\color[HTML]{24292F} 0.6429} & {\\color[HTML]{24292F} 0.6121} & {\\color[HTML]{24292F} 0.6443} \\\\ \\hline\n\\end{tabular}%\n}\n\\end{table}\n\n\\end{document}\n\\section{Introduction}\nAccumulations of Amyloid-$\\beta$ and tau protein aggregates, such as plaques in the brain gray matter, are well-known biomarkers of the neurodegenerative Alzheimer's disease (AD) \\cite{ben1}. Quantitative estimation of plaques is typically done by pathologists manually or semi-automatically, using proprietary black-box software from histopathological images of the brain -- a time and effort-intensive process prone to human observation variability and errors. \nIn recent times, deep learning (DL) based methods have shown promising results in digital pathology \\cite{jano1} and incredibly high accuracy segmentation of digital whole slide images \\cite{anant}. In \\cite{wurtz}, three different DL models were used to segment tau aggregates (tangles) and nuclei in postmortem brain Whole Slide Images (WSIs). The three models included a fully convolutional neural network (FCN), UNet, and Segnet, the latter achieving the highest accuracy in terms of IoU. In \\cite{signaevsky}, an FCN was trained on a dataset of 22 WSIs for semantic segmentation of tangle objects from postmortem brain WSIs. Their model can segment tangles of varying morphologies with high accuracy under diverse staining intensities. An FCN model was also used in \\cite{Vega2021} to classify morphologies of tau protein aggregates in the gray and white matter regions from 37 WSIs representing multiple degenerative diseases. In \\cite{manouskova2022}, tau aggregate analysis was done on a dataset of 6 WSIs with a combined classification-segmentation framework which achieved an F1 score of 81.3\\% and 75.8\\% on detection and segmentation tasks, respectively. \nSeveral domains in DL-based histopathological analysis of AD tauopathy remain unexplored. Firstly, most existing studies have used DL to segment tangles rather than plaques, which are harder to identify against the background gray matter due to their diffuse\/sparse appearance. Secondly, annotations of whole slide images are frequently affected by errors by human annotators. In such cases, a DL preliminary model may be trained using weakly annotated data and used to assist the expert in refining annotations. Thirdly, contemporary tau segmentation studies do not consider context information, which is essential in segmenting plaques from brain WSIs as these emerge as sparse objects against an extended background of gray matter. Finally, DL models with explainability features have not yet been applied in tau segmentation from WSIs. This is a critical requirement for DL models used in clinical applications \\cite{explain1} \\cite{Yamamoto2019}. The DL models should not only be able to identify regions of interest precisely but also give clinicians and general users the knowledge about which image features the model found necessary that influenced its decision. \nBased on the above, a DL pipeline for the segmentation of plaque regions in brain WSIs is presented in our study. This pipeline uses context and explainability features with a UNet-based semantic segmentation model to identify plaque features from WSIs.\n\n\\section{Methodology}\n\\label{sec:methodology}\n\n\\subsection{Dataset characteristics}\n\\label{sec:data_characteristics}\nIn this work, we analyzed eight whole slide images containing histological sections from the frontal cortices of patients with AD, which were provided by the French national brain biobank Neuro-CEB. Signed informed consent for autopsy and histologic analysis was obtained in all cases from the patients or their family members. The present cohort represents a common heterogeneity of AD cases, including slides with variable tau pathology (e.g., different object densities), variable staining quality, and variable tissue preservation. Sections of the frontal lobe were stained with AT8 antibody to reveal phosphorylated tau pathology, using a standardized immunohistochemistry protocol. Obtained slides were scanned using two Hamamatsu slide scanners (NanoZoomer 2.0-RS and NanoZoomer s60 with 227 nm\/pixel and 221 nm\/pixel resolution, respectively) at 40x initial magnification. The slides were used for human-CNN iterative object annotation resulting in about 4000 annotated and expert-validated Neuritic plaques. The labels, extracted in an XML format, constitute the segmentation ground truth.\n\n\\subsection{Data preparation}\nFrom the WSIs, at 20x magnification, patches with two levels of context information were generated using an ROI-guided sampling method. The larger patches (256x256 pixels) capture a broader context containing object neighborhood and background pixels, whereas the smaller (128x 128 pixels) mainly focus on the plaque region without much context information. The amount of context present in each patch is quantified using a ratio of the area of annotated ROI to the total area of the patch. The plaque example in different patch sizes is shown in Fig~\\ref{fig:context} (note that the bigger patch has additional objects-plaques). In addition, two different normalizations are used and compared: Macenko~\\cite{macenko} and Vahadane~\\cite{vahadane2015normalisation} methods.\n\nA new scheme for data augmentation was implemented based on ROI-shifting to prevent the networks' bias from focussing on the center location of plaques in the patches. Accordingly, the annotated plaque ROIs are shifted to four corners of a patch, producing a four-fold augmentation of each patch containing an object. This augmentation aims to train the UNet models robustly in the presence of variable neighborhood context information, especially when closely-spaced plaque objects are present. An example of this augmentation is shown in Fig~\\ref{fig:ROI_aug}.\n\n\\begin{figure}[!ht]\n \\centering\n \\includegraphics[scale=0.6] {images\/patchsize2}\n \\caption{Example of plaque image for different levels of context.}\n \\label{fig:context}\n\\end{figure}\n\n\\begin{figure}[!ht]\n \\centering\n \\includegraphics[scale=0.5] {images\/augmentations1}\n \\caption{Example of ROI shifting augmentation.}\n \\label{fig:ROI_aug}\n\\end{figure}\n\n\\subsection{Deep learning architecture for segmentation}\n\nIn order to segment the neuritic plaques, a UNet model adapted from \\cite{Ronnenbunet} is used with modifications for accommodating context information within the WSI patches during training and testing. The model architecture is modified to work with the two datasets containing different patch sizes -- i.e., $128\\times128$ (having low context information) and $256\\times256$ pixels (having more information about the plaque neighborhood). For the first dataset, the UNet architecture consists of 3 downsampling and 3 upsampling convolutional blocks, in addition to the convolutional middle block. For the $256\\times256$-patch-size dataset, we added a downsampling and upsampling convolutional block to the previous UNet model. For the downsampling block, we used a leaky ReLU activation function and ReLU for the upsampling block. In both blocks, we used batch-normalization following the suggestions in \\cite{Ioffe2015BatchNA} and \\cite{manouskova2022}. Dropout was used in each convolutional block with a probability of 0.5.\n\n\\subsection{Deep learning architecture for visual interpretation} \n\nIn addition to the segmentation, we focus on deriving locations within the patches where the DL model found significant features from the plaque objects. Therefore, we used an attention UNet described in \\cite{oktay2018attention}, which allows us to visualize the activated features at each iteration and evaluate qualitatively where the network focuses during training. The attention UNet architecture was also modified for the two different patch-size datasets following a configuration similar to the one described for the UNet.\n\n\\section{Experiments and results}\nData preparation and UNet experiments were executed on an 12-core Intel(R) Core i9-9920X @ 3.5GHz CPU with 128 GB RAM and two 12 GB RAM Nvidia GeForce RTX 2080 Ti GPUs. The attention UNet experiments run on a cluster (1 GPU Tesla V100S-PCIe-32GB, 12 CPU cores Intel(R) Xeon(R) Gold 6126 CPU @ 2.60GHz, and 80 GB of RAM). The average training and evaluation time of the UNet per epoch is approximately 2 minutes for the $128\\times 128$ patch-size database and 5 minutes for the $256\\times 256$ patch-size database. Meanwhile, for the attention UNet, approximately half the time is needed. On the other hand, data preprocessing takes 2 to 5 hours to process using parallel computation. Regarding memory consumption, we used at most 6 GB of GPU RAM for the larger patch dataset. In order to increase the performance, we cache the data and annotations first in CPU RAM and then move them to the GPU.\n\nWe randomly divided the 8 WSIs into 4 folds for the DL experiments. Then, we tested the network using a 4-fold cross-testing scheme, and with the remaining data from each test fold, we also performed a 3-fold cross-validation. In addition, we run a series of tests (using these folds) to select the loss function and the best optimizer for the UNet and attention UNet. We tested 4 loss functions (i.e., Focal loss, BCEwithLogits, Dice, and BCE-Dice loss) and 4 different optimizers (i.e., SGD, Adam, RMSProp, and AdaDelta). After the hyperparameter tuning, we obtained the best performance using the BCE-Dice loss with a 50\\% balance between Dice and BCE (Binary Cross Entropy) and the Adadelta optimizer with $\\rho = 0.9$ and a varying learning rate based on the evolution of the validation loss. Also, we implemented early stopping for training with a patience value of 15 epochs.\n\n\\subsection{Results from UNet architecture}\nThe segmentation evaluation metric used for all of the experiments regarding the UNet is the Dice score which is equivalent to the F1 score for binary segmentation problems. In the first experiment, the UNet model was trained with two datasets having different patch sizes: $128\\times 128$ and $256\\times 256$ pixels. The mean and standard deviations of the Dice coefficient for cross-validation and cross-testing are reported in Table~\\ref{tab:dice_results1}. The patches were previously normalized using the Macenko method and then separated in their corresponding fold for training, validation, and testing following the scheme described above. We observe a decrease in the Dice score for larger patches having additional environmental context from the neuritic plaque.\n\n\\begin{table}[ht]\n\\centering\n\\caption{UNet results (Dice score) for 4-fold cross testing and 3-fold cross validation for different patch sizes.}\n\\begin{tabular}[t]{|c|c|c|c|}\n\\hline\nPatch size & Normalization & Cross validation & Cross testing\\\\\n\\hline\n$128\\times128$ & Macenko & $ 0.6954 \\pm 0.0289 $ & $0.6852 \\pm 0.0260$\\\\\n$256\\times256$ & Macenko & $0.6600 \\pm 0.0420 $ & $0.6460 \\pm 0.0330$ \\\\\n\\hline\n\\end{tabular}\n\\label{tab:dice_results1}\n\\end{table}%\n\nAs described, the WSIs were acquired using two different scanners. Therefore, to study the impact of its properties, we divided the entire cohort into two independent datasets: 4 WSIs belonging to the NanoZoomer 2.0-RS and 4 WSIs scanned with the NanoZoomer s60. For both datasets, we only evaluate the performance of the DL architecture using 4-fold cross-validation and patches of $128\\times 128$ pixels size. Additionally, we normalize each dataset independently (i.e., using two reference patches: one for the NanoZoomer 2.0-RS and one for the NanoZoomer s60) using the Macenko method. The Dice score obtained using the images from the higher resolution Hamamatsu NanoZoomer S60 scanner was $0.6345 \\pm 0.0243$, whereas that from the NanoZoomer 2.0-RS was $0.7342 \\pm 0.0063$.\n\nWe also study the effect of normalization in the entire dataset (8 WSIs). We normalized the patches from the $128\\times 128$ dataset using Macenko and Vahadane methods, and we selected the best fold (i.e., highest Dice score in testing for the first experiment) to train, validate and test the UNet under different input color properties. Opposite to the results reported in~\\cite{manouskova2022}, the Dice score obtained was higher using the Macenko method (0.7248 in testing) than the Vahadane (0.7098 in testing), even in validation (0.72313 for Macenko and 0.6864 for Vahadane). For a full list of results, see supplementary material.\n\n\\subsection{Visual deep learning interpretation}\nThe attention UNet model was trained using the $128\\times 128$ and the $256\\times 256$ patch size dataset, and the results are summarized in Table~\\ref{tab:attunet_results1}. All images were normalized using the Macenko method, and we observed a similar trend as the UNet: better performance using patches containing less background information.\n\n\\begin{table}[ht]\n\\centering\n\\caption{Attention UNet results (Dice score) for 4-fold cross testing and 3-fold cross validation for different patch sizes.}\n\\begin{tabular}[t]{|c|c|c|c|}\n\\hline\nPatch size & Normalization & Cross validation & Cross testing\\\\\n\\hline\n$128\\times128$ & Macenko & $ 0.7516 \\pm 0.0334 $ & $0.6920 \\pm 0.0254$\\\\\n$256\\times256$ & Macenko & $0.6931 \\pm 0.0447 $ & $0.6342 \\pm 0.0301$ \\\\\n\\hline\n\\end{tabular}\n\\label{tab:attunet_results1}\n\\end{table}%\n\nAn example segmentation result from the attention UNet model in a $128\\times128$ patch containing a plaque object and its corresponding ground-truth mask is shown in Fig~\\ref{fig:attunet1}. We observe that the attention UNet model finds significant activation features around the plaque object initially annotated by experts (see ground truth mask in Fig~\\ref{fig:attunet1}). We also notice that the loss at iteration 100 increases over iteration 1; however, we clearly distinguish the region of the object (dark red color). After 1000 iterations, the loss decreases 50\\% due to the fact that the Dice part of the BCE-Dice loss function influences the network into detecting a pattern very similar to the given ground truth.\n\n\\begin{figure}[!ht]\n \\centering\n \\includegraphics[scale=0.25] {images\/att_unet_res1}\n \\caption{Global coherence of attention-UNet result with human annotation.}\n \\label{fig:attunet1}\n\\end{figure}\n\nAnother result from attention UNet is in Fig~\\ref{fig:attunet2}. Here, the attention UNet focuses on 2 plaques initially annotated by a human expert. It also identifies strong activation features in regions with no ground truth annotations, which could indicate missed ROIs by human experts during the annotation process. Thus with the attention UNet, it is not only possible to segment the plaque objects but also to improve or refine the manual annotations by experts.\n\nWeak and imprecise annotations are frequently observed in histopathology arising from human or software errors. In such cases, deep learning attention maps could be useful to provide pathologists and biologists with refined annotations (e.g., precision on the boundaries of ROIs). An example is shown in Fig~\\ref{fig: attunet_vsexpert} where DL attention maps are closer to the shape of actual ROIs compared to human-made annotations.\n\n\\begin{figure}[!ht]\n \\centering\n \\includegraphics[scale=0.25] {images\/att_unet_res2}\n \\caption{Focus progression using successive activation layers of attention-UNet.}\n \\label{fig:attunet2}\n\\end{figure}\n\n\\begin{figure}[!ht]\n \\centering\n \\includegraphics[scale=0.7] {images\/attunet_compare}\n \\caption{Improving human annotations using attention-based DL models.}\n \\label{fig: attunet_vsexpert}\n\\end{figure}\n\n\\section{Discussion and conclusion}\nIn the presented work, we studied\/evaluated a number of factors that contribute to the segmentation of plaques from whole slide images using DL models. The key observations are the following: \n\\begin{enumerate}\n \\item Use of biomarkers: the study in~\\cite{manouskova2022} uses the ALZ50 (used to discover compacted structures) biomarker, while our study uses the AT8 (majorly used in clinics, helps to discover all structures). We focus on AT8 in order to stay close to clinical protocols. The drawback is that this biomarker creates less compact structures meaning a slightly more difficult segmentation of the plaques, as our results support.\n \\item Use of different modalities: using the AT8 biomarker, we analyzed 2 types of WSI scanners (see Section~\\ref{sec:data_characteristics}) with different resolutions. High-resolution scanners amplify the annotation errors (human-software). Accordingly, some results concerning the high-resolution scanners have been affected, generating lower dice scores. \n \\item Context effect on results of DL models: We noticed that increasing the background information in the patches negatively affects the segmentation results, which can be explained by the imbalance between the foreground and background pixels. In future works, this will be addressed using adaptive loss functions to take advantage of context information around the ROIs.\n \\item Attention maps: We observed that using the attention UNet model helps us see the weakness in the human-made annotations (see Fig~\\ref{fig: attunet_vsexpert}), generating precious insights about the segmentation DL protocol, which can be used to refine the annotations by improving the border of the detected objects. These refined patterns can be used for a morphology and topology pipeline toward a robust AD patient's stratification proof. In addition, quantitative results show better performance of the same UNet architecture with attention blocks.\n \\item Comparison with state-of-the-art commercial software: We compared our WSI segmentation results with those generated by a commercial software. This software uses a UNet architecture with a VGG encoder which is different from our model. Our system outperforms this software (Dice score 0.63 for test), using the same WSI as the ones used in this paper. Besides, in this software, neither information about how patches are generated nor the type of normalization or pre-processing perfomed on the dataset is available.\n \n\\end{enumerate}\nWhole slide histopathology images whose sizes range in giga-pixels often contain thousands of objects per image. As seen for plaques in this study, it becomes more challenging when the objects being annotated do not have clear boundaries separating them from their surrounding environments, which may give rise to errors in human-made annotations. We saw an example of how DL models with visual explanation properties can help pathologists refine the ROI identification process. Our future challenge is to create deep learning assistive tools that can improve human-made few and weak annotations, a generic problem of a wide range of biomedical applications. \n\n\\section*{Acknowlegements}\nThis research was supported by Mr Jean-Paul Baudecroux and The Big Brain Theory Program - Paris Brain Institute (ICM). The human samples were obtained from the Neuro-CEB brain bank (\\url{https:\/\/www.neuroceb.org\/en\/}) (BRIF Number 0033-00011), partly funded by the patients' associations ARSEP, ARSLA, \"Conna\u00eetre les Syndromes C\u00e9r\u00e9belleux\", France-DFT, France Parkinson and by Vaincre Alzheimer Fondation, to which we express our gratitude. We are also grateful to the patients and their families.\n\n\\bibliographystyle{splncs04}\n\n\\section{UNet and Attention UNet:}\n\\begin{table}[htbp]\n\\centering\n\\caption{Architecture used: UNet. Patch size: 128x128 pixels (best fold is reported in bold font).}\n\\label{tab:my-table1}\n\\resizebox{\\textwidth}{!}{%\n\\begin{tabular}{|c|c|c|c|c|c|c|c|c|}\n\\hline\n{\\color[HTML]{24292F} \\textbf{fold\\_name}} & {\\color[HTML]{24292F} \\textbf{dev\\_dice}} & {\\color[HTML]{24292F} \\textbf{dev\\_f1}} & {\\color[HTML]{24292F} \\textbf{dev\\_recall}} & {\\color[HTML]{24292F} \\textbf{dev\\_precision}} & {\\color[HTML]{24292F} \\textbf{test\\_dice}} & {\\color[HTML]{24292F} \\textbf{test\\_f1}} & {\\color[HTML]{24292F} \\textbf{test\\_recall}} & {\\color[HTML]{24292F} \\textbf{test\\_precision}} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_00\\_cv\\_00} & {\\color[HTML]{24292F} 0.7151} & {\\color[HTML]{24292F} 0.7165} & {\\color[HTML]{24292F} 0.6674} & {\\color[HTML]{24292F} 0.8017} & {\\color[HTML]{24292F} 0.6753} & {\\color[HTML]{24292F} 0.6707} & {\\color[HTML]{24292F} 0.668} & {\\color[HTML]{24292F} 0.784} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_00\\_cv\\_01} & {\\color[HTML]{24292F} 0.7034} & {\\color[HTML]{24292F} 0.6933} & {\\color[HTML]{24292F} 0.699} & {\\color[HTML]{24292F} 0.718} & {\\color[HTML]{24292F} 0.7046} & {\\color[HTML]{24292F} 0.7035} & {\\color[HTML]{24292F} 0.7495} & {\\color[HTML]{24292F} 0.7475} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_00\\_cv\\_02} & {\\color[HTML]{24292F} 0.6963} & {\\color[HTML]{24292F} 0.6932} & {\\color[HTML]{24292F} 0.6873} & {\\color[HTML]{24292F} 0.7339} & {\\color[HTML]{24292F} 0.6781} & {\\color[HTML]{24292F} 0.6765} & {\\color[HTML]{24292F} 0.7423} & {\\color[HTML]{24292F} 0.7094} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_01\\_cv\\_00} & {\\color[HTML]{24292F} 0.7011} & {\\color[HTML]{24292F} 0.7037} & {\\color[HTML]{24292F} 0.714} & {\\color[HTML]{24292F} 0.7239} & {\\color[HTML]{24292F} 0.7032} & {\\color[HTML]{24292F} 0.6962} & {\\color[HTML]{24292F} 0.6684} & {\\color[HTML]{24292F} 0.8052} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{test\\_01\\_cv\\_01}} & {\\color[HTML]{24292F} \\textbf{0.7231}} & {\\color[HTML]{24292F} \\textbf{0.7118}} & {\\color[HTML]{24292F} \\textbf{0.6763}} & {\\color[HTML]{24292F} \\textbf{0.7801}} & {\\color[HTML]{24292F} \\textbf{0.7248}} & {\\color[HTML]{24292F} \\textbf{0.7192}} & {\\color[HTML]{24292F} \\textbf{0.7105}} & {\\color[HTML]{24292F} \\textbf{0.8067}} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_01\\_cv\\_02} & {\\color[HTML]{24292F} 0.72} & {\\color[HTML]{24292F} 0.7217} & {\\color[HTML]{24292F} 0.7519} & {\\color[HTML]{24292F} 0.7185} & {\\color[HTML]{24292F} 0.7141} & {\\color[HTML]{24292F} 0.7068} & {\\color[HTML]{24292F} 0.6811} & {\\color[HTML]{24292F} 0.8166} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_02\\_cv\\_00} & {\\color[HTML]{24292F} 0.709} & {\\color[HTML]{24292F} 0.7156} & {\\color[HTML]{24292F} 0.7195} & {\\color[HTML]{24292F} 0.7423} & {\\color[HTML]{24292F} 0.7027} & {\\color[HTML]{24292F} 0.7004} & {\\color[HTML]{24292F} 0.7043} & {\\color[HTML]{24292F} 0.7855} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_02\\_cv\\_01} & {\\color[HTML]{24292F} 0.7127} & {\\color[HTML]{24292F} 0.7195} & {\\color[HTML]{24292F} 0.7012} & {\\color[HTML]{24292F} 0.7618} & {\\color[HTML]{24292F} 0.6643} & {\\color[HTML]{24292F} 0.6608} & {\\color[HTML]{24292F} 0.6444} & {\\color[HTML]{24292F} 0.7996} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_02\\_cv\\_02} & {\\color[HTML]{24292F} 0.6807} & {\\color[HTML]{24292F} 0.6838} & {\\color[HTML]{24292F} 0.6862} & {\\color[HTML]{24292F} 0.7167} & {\\color[HTML]{24292F} 0.6306} & {\\color[HTML]{24292F} 0.6296} & {\\color[HTML]{24292F} 0.6634} & {\\color[HTML]{24292F} 0.7316} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_03\\_cv\\_00} & {\\color[HTML]{24292F} 0.6765} & {\\color[HTML]{24292F} 0.6813} & {\\color[HTML]{24292F} 0.6788} & {\\color[HTML]{24292F} 0.7183} & {\\color[HTML]{24292F} 0.6845} & {\\color[HTML]{24292F} 0.6855} & {\\color[HTML]{24292F} 0.8061} & {\\color[HTML]{24292F} 0.666} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_03\\_cv\\_01} & {\\color[HTML]{24292F} 0.6959} & {\\color[HTML]{24292F} 0.6967} & {\\color[HTML]{24292F} 0.6244} & {\\color[HTML]{24292F} 0.8167} & {\\color[HTML]{24292F} 0.6883} & {\\color[HTML]{24292F} 0.879} & {\\color[HTML]{24292F} 0.7981} & {\\color[HTML]{24292F} 0.6777} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_03\\_cv\\_02} & {\\color[HTML]{24292F} 0.6112} & {\\color[HTML]{24292F} 0.6008} & {\\color[HTML]{24292F} 0.61} & {\\color[HTML]{24292F} 0.6325} & {\\color[HTML]{24292F} 0.6521} & {\\color[HTML]{24292F} 0.6545} & {\\color[HTML]{24292F} 0.8018} & {\\color[HTML]{24292F} 0.6245} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{mean}} & {\\color[HTML]{24292F} 0.6954} & {\\color[HTML]{24292F} 0.6948} & {\\color[HTML]{24292F} 0.6847} & {\\color[HTML]{24292F} 0.7387} & {\\color[HTML]{24292F} 0.6852} & {\\color[HTML]{24292F} 0.6986} & {\\color[HTML]{24292F} 0.7198} & {\\color[HTML]{24292F} 0.7462} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{std}} & {\\color[HTML]{24292F} 0.0289} & {\\color[HTML]{24292F} 0.0313} & {\\color[HTML]{24292F} 0.0373} & {\\color[HTML]{24292F} 0.0462} & {\\color[HTML]{24292F} 0.0260} & {\\color[HTML]{24292F} 0.0596} & {\\color[HTML]{24292F} 0.0561} & {\\color[HTML]{24292F} 0.0615} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{max}} & {\\color[HTML]{24292F} 0.7231} & {\\color[HTML]{24292F} 0.7217} & {\\color[HTML]{24292F} 0.7519} & {\\color[HTML]{24292F} 0.8167} & {\\color[HTML]{24292F} 0.7248} & {\\color[HTML]{24292F} 0.879} & {\\color[HTML]{24292F} 0.8061} & {\\color[HTML]{24292F} 0.8166} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{min}} & {\\color[HTML]{24292F} 0.6112} & {\\color[HTML]{24292F} 0.6008} & {\\color[HTML]{24292F} 0.61} & {\\color[HTML]{24292F} 0.6325} & {\\color[HTML]{24292F} 0.6306} & {\\color[HTML]{24292F} 0.6296} & {\\color[HTML]{24292F} 0.6444} & {\\color[HTML]{24292F} 0.6245} \\\\ \\hline\n\\end{tabular}%\n}\n\\end{table}\n\n\\begin{table}[htbp]\n\\centering\n\\caption{Architecture used: Attention UNet. Patch size: 128x128 pixels (best fold is reported in bold font).}\n\\label{tab:my-table2}\n\\resizebox{\\textwidth}{!}{%\n\\begin{tabular}{|c|c|c|c|c|c|c|c|c|}\n\\hline\n{\\color[HTML]{24292F} \\textbf{fold\\_name}} & {\\color[HTML]{24292F} \\textbf{dev\\_dice}} & {\\color[HTML]{24292F} \\textbf{dev\\_f1}} & {\\color[HTML]{24292F} \\textbf{dev\\_recall}} & {\\color[HTML]{24292F} \\textbf{dev\\_precision}} & {\\color[HTML]{24292F} \\textbf{test\\_dice}} & {\\color[HTML]{24292F} \\textbf{test\\_f1}} & {\\color[HTML]{24292F} \\textbf{test\\_recall}} & {\\color[HTML]{24292F} \\textbf{test\\_precision}} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_00\\_cv\\_00} & {\\color[HTML]{24292F} 0.7654} & {\\color[HTML]{24292F} 0.7654} & {\\color[HTML]{24292F} 0.7833} & {\\color[HTML]{24292F} 0.8000} & {\\color[HTML]{24292F} 0.6405} & {\\color[HTML]{24292F} 0.6405} & {\\color[HTML]{24292F} 0.5959} & {\\color[HTML]{24292F} 0.8177} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_00\\_cv\\_01} & {\\color[HTML]{24292F} 0.7216} & {\\color[HTML]{24292F} 0.7216} & {\\color[HTML]{24292F} 0.7293} & {\\color[HTML]{24292F} 0.7890} & {\\color[HTML]{24292F} 0.6734} & {\\color[HTML]{24292F} 0.6734} & {\\color[HTML]{24292F} 0.6387} & {\\color[HTML]{24292F} 0.8258} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_00\\_cv\\_02} & {\\color[HTML]{24292F} 0.7428} & {\\color[HTML]{24292F} 0.7422} & {\\color[HTML]{24292F} 0.7425} & {\\color[HTML]{24292F} 0.8108} & {\\color[HTML]{24292F} 0.6499} & {\\color[HTML]{24292F} 0.6499} & {\\color[HTML]{24292F} 0.6320} & {\\color[HTML]{24292F} 0.7883} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_01\\_cv\\_00} & {\\color[HTML]{24292F} 0.7646} & {\\color[HTML]{24292F} 0.7646} & {\\color[HTML]{24292F} 0.7876} & {\\color[HTML]{24292F} 0.7934} & {\\color[HTML]{24292F} 0.6851} & {\\color[HTML]{24292F} 0.6851} & {\\color[HTML]{24292F} 0.6582} & {\\color[HTML]{24292F} 0.7979} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_01\\_cv\\_01} & {\\color[HTML]{24292F} 0.7272} & {\\color[HTML]{24292F} 0.7272} & {\\color[HTML]{24292F} 0.7916} & {\\color[HTML]{24292F} 0.7299} & {\\color[HTML]{24292F} 0.7128} & {\\color[HTML]{24292F} 0.7122} & {\\color[HTML]{24292F} 0.7454} & {\\color[HTML]{24292F} 0.7522} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_01\\_cv\\_02} & {\\color[HTML]{24292F} 0.7335} & {\\color[HTML]{24292F} 0.7335} & {\\color[HTML]{24292F} 0.7593} & {\\color[HTML]{24292F} 0.7772} & {\\color[HTML]{24292F} 0.6909} & {\\color[HTML]{24292F} 0.6909} & {\\color[HTML]{24292F} 0.6836} & {\\color[HTML]{24292F} 0.7890} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_02\\_cv\\_00} & {\\color[HTML]{24292F} 0.7658} & {\\color[HTML]{24292F} 0.7658} & {\\color[HTML]{24292F} 0.7946} & {\\color[HTML]{24292F} 0.7900} & {\\color[HTML]{24292F} 0.7184} & {\\color[HTML]{24292F} 0.7184} & {\\color[HTML]{24292F} 0.7109} & {\\color[HTML]{24292F} 0.8060} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{test\\_02\\_cv\\_01}} & {\\color[HTML]{24292F} \\textbf{0.7159}} & {\\color[HTML]{24292F} \\textbf{0.7153}} & {\\color[HTML]{24292F} \\textbf{0.7565}} & {\\color[HTML]{24292F} \\textbf{0.7429}} & {\\color[HTML]{24292F} \\textbf{0.7263}} & {\\color[HTML]{24292F} \\textbf{0.7263}} & {\\color[HTML]{24292F} \\textbf{0.7950}} & {\\color[HTML]{24292F} \\textbf{0.7254}} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_02\\_cv\\_02} & {\\color[HTML]{24292F} 0.7809} & {\\color[HTML]{24292F} 0.7809} & {\\color[HTML]{24292F} 0.8232} & {\\color[HTML]{24292F} 0.7855} & {\\color[HTML]{24292F} 0.6948} & {\\color[HTML]{24292F} 0.6948} & {\\color[HTML]{24292F} 0.7241} & {\\color[HTML]{24292F} 0.7488} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_03\\_cv\\_00} & {\\color[HTML]{24292F} 0.8193} & {\\color[HTML]{24292F} 0.8193} & {\\color[HTML]{24292F} 0.8180} & {\\color[HTML]{24292F} 0.8559} & {\\color[HTML]{24292F} 0.6959} & {\\color[HTML]{24292F} 0.6959} & {\\color[HTML]{24292F} 0.6839} & {\\color[HTML]{24292F} 0.8008} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_03\\_cv\\_01} & {\\color[HTML]{24292F} 0.6962} & {\\color[HTML]{24292F} 0.6962} & {\\color[HTML]{24292F} 0.6843} & {\\color[HTML]{24292F} 0.7908} & {\\color[HTML]{24292F} 0.7140} & {\\color[HTML]{24292F} 0.7140} & {\\color[HTML]{24292F} 0.7781} & {\\color[HTML]{24292F} 0.7281} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_03\\_cv\\_02} & {\\color[HTML]{24292F} 0.7857} & {\\color[HTML]{24292F} 0.7857} & {\\color[HTML]{24292F} 0.8297} & {\\color[HTML]{24292F} 0.7896} & {\\color[HTML]{24292F} 0.7024} & {\\color[HTML]{24292F} 0.7024} & {\\color[HTML]{24292F} 0.8435} & {\\color[HTML]{24292F} 0.6571} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{mean}} & {\\color[HTML]{24292F} 0.7516} & {\\color[HTML]{24292F} 0.7515} & {\\color[HTML]{24292F} 0.7750} & {\\color[HTML]{24292F} 0.7879} & {\\color[HTML]{24292F} 0.6920} & {\\color[HTML]{24292F} 0.6920} & {\\color[HTML]{24292F} 0.7074} & {\\color[HTML]{24292F} 0.7698} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{std}} & {\\color[HTML]{24292F} 0.0334} & {\\color[HTML]{24292F} 0.0335} & {\\color[HTML]{24292F} 0.0408} & {\\color[HTML]{24292F} 0.0301} & {\\color[HTML]{24292F} 0.0254} & {\\color[HTML]{24292F} 0.0254} & {\\color[HTML]{24292F} 0.0703} & {\\color[HTML]{24292F} 0.0469} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{max}} & {\\color[HTML]{24292F} 0.8193} & {\\color[HTML]{24292F} 0.8193} & {\\color[HTML]{24292F} 0.8297} & {\\color[HTML]{24292F} 0.8559} & {\\color[HTML]{24292F} 0.7263} & {\\color[HTML]{24292F} 0.7263} & {\\color[HTML]{24292F} 0.8435} & {\\color[HTML]{24292F} 0.8258} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{min}} & {\\color[HTML]{24292F} 0.6962} & {\\color[HTML]{24292F} 0.6962} & {\\color[HTML]{24292F} 0.6843} & {\\color[HTML]{24292F} 0.7299} & {\\color[HTML]{24292F} 0.6405} & {\\color[HTML]{24292F} 0.6405} & {\\color[HTML]{24292F} 0.5959} & {\\color[HTML]{24292F} 0.6571} \\\\ \\hline\n\\end{tabular}%\n}\n\\end{table}\n\n\\begin{table}[htbp]\n\\centering\n\\caption{Architecture used: UNet. Patch size: 256x256 pixels (best fold is reported in bold font).}\n\\label{tab:my-table3}\n\\resizebox{\\textwidth}{!}{%\n\\begin{tabular}{|c|c|c|c|c|c|c|c|c|}\n\\hline\n{\\color[HTML]{24292F} \\textbf{fold\\_name}} & {\\color[HTML]{24292F} \\textbf{dev\\_dice}} & {\\color[HTML]{24292F} \\textbf{dev\\_f1}} & {\\color[HTML]{24292F} \\textbf{dev\\_recall}} & {\\color[HTML]{24292F} \\textbf{dev\\_precision}} & {\\color[HTML]{24292F} \\textbf{test\\_dice}} & {\\color[HTML]{24292F} \\textbf{test\\_f1}} & {\\color[HTML]{24292F} \\textbf{test\\_recall}} & {\\color[HTML]{24292F} \\textbf{test\\_precision}} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_00\\_cv\\_00} & {\\color[HTML]{24292F} 0.707} & {\\color[HTML]{24292F} 0.7095} & {\\color[HTML]{24292F} 0.6557} & {\\color[HTML]{24292F} 0.7962} & {\\color[HTML]{24292F} 0.6423} & {\\color[HTML]{24292F} 0.6363} & {\\color[HTML]{24292F} 0.5839} & {\\color[HTML]{24292F} 0.8071} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_00\\_cv\\_01} & {\\color[HTML]{24292F} 0.6509} & {\\color[HTML]{24292F} 0.6563} & {\\color[HTML]{24292F} 0.6744} & {\\color[HTML]{24292F} 0.6604} & {\\color[HTML]{24292F} 0.6783} & {\\color[HTML]{24292F} 0.6743} & {\\color[HTML]{24292F} 0.6563} & {\\color[HTML]{24292F} 0.7767} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_00\\_cv\\_02} & {\\color[HTML]{24292F} 0.6663} & {\\color[HTML]{24292F} 0.6657} & {\\color[HTML]{24292F} 0.6725} & {\\color[HTML]{24292F} 0.6826} & {\\color[HTML]{24292F} 0.6368} & {\\color[HTML]{24292F} 0.6322} & {\\color[HTML]{24292F} 0.6356} & {\\color[HTML]{24292F} 0.7269} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{test\\_01\\_cv\\_00}} & {\\color[HTML]{24292F} \\textbf{0.6939}} & {\\color[HTML]{24292F} \\textbf{0.7099}} & {\\color[HTML]{24292F} \\textbf{0.7041}} & {\\color[HTML]{24292F} \\textbf{0.7319}} & {\\color[HTML]{24292F} \\textbf{0.6963}} & {\\color[HTML]{24292F} \\textbf{0.6893}} & {\\color[HTML]{24292F} \\textbf{0.6632}} & {\\color[HTML]{24292F} \\textbf{0.7998}} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_01\\_cv\\_01} & {\\color[HTML]{24292F} 0.6411} & {\\color[HTML]{24292F} 0.642} & {\\color[HTML]{24292F} 0.6018} & {\\color[HTML]{24292F} 0.7157} & {\\color[HTML]{24292F} 0.6856} & {\\color[HTML]{24292F} 0.6785} & {\\color[HTML]{24292F} 0.6468} & {\\color[HTML]{24292F} 0.8008} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_01\\_cv\\_02} & {\\color[HTML]{24292F} 0.6793} & {\\color[HTML]{24292F} 0.6829} & {\\color[HTML]{24292F} 0.6892} & {\\color[HTML]{24292F} 0.6979} & {\\color[HTML]{24292F} 0.6718} & {\\color[HTML]{24292F} 0.6654} & {\\color[HTML]{24292F} 0.6487} & {\\color[HTML]{24292F} 0.7732} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_02\\_cv\\_00} & {\\color[HTML]{24292F} 0.6893} & {\\color[HTML]{24292F} 0.7077} & {\\color[HTML]{24292F} 0.716} & {\\color[HTML]{24292F} 0.716} & {\\color[HTML]{24292F} 0.6395} & {\\color[HTML]{24292F} 0.6352} & {\\color[HTML]{24292F} 0.6104} & {\\color[HTML]{24292F} 0.766} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_02\\_cv\\_01} & {\\color[HTML]{24292F} 0.6911} & {\\color[HTML]{24292F} 0.698} & {\\color[HTML]{24292F} 0.6344} & {\\color[HTML]{24292F} 0.8027} & {\\color[HTML]{24292F} 0.621} & {\\color[HTML]{24292F} 0.6155} & {\\color[HTML]{24292F} 0.5748} & {\\color[HTML]{24292F} 0.7816} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_02\\_cv\\_02} & {\\color[HTML]{24292F} 0.6462} & {\\color[HTML]{24292F} 0.649} & {\\color[HTML]{24292F} 0.6593} & {\\color[HTML]{24292F} 0.6638} & {\\color[HTML]{24292F} 0.5949} & {\\color[HTML]{24292F} 0.595} & {\\color[HTML]{24292F} 0.6676} & {\\color[HTML]{24292F} 0.6185} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_03\\_cv\\_00} & {\\color[HTML]{24292F} 0.6432} & {\\color[HTML]{24292F} 0.6529} & {\\color[HTML]{24292F} 0.6323} & {\\color[HTML]{24292F} 0.7029} & {\\color[HTML]{24292F} 0.6507} & {\\color[HTML]{24292F} 0.65} & {\\color[HTML]{24292F} 0.7633} & {\\color[HTML]{24292F} 0.6242} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_03\\_cv\\_01} & {\\color[HTML]{24292F} 0.6747} & {\\color[HTML]{24292F} 0.6877} & {\\color[HTML]{24292F} 0.6581} & {\\color[HTML]{24292F} 0.7453} & {\\color[HTML]{24292F} 0.6507} & {\\color[HTML]{24292F} 0.6521} & {\\color[HTML]{24292F} 0.8146} & {\\color[HTML]{24292F} 0.5945} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_03\\_cv\\_02} & {\\color[HTML]{24292F} 0.5391} & {\\color[HTML]{24292F} 0.5355} & {\\color[HTML]{24292F} 0.5558} & {\\color[HTML]{24292F} 0.5535} & {\\color[HTML]{24292F} 0.5834} & {\\color[HTML]{24292F} 0.5856} & {\\color[HTML]{24292F} 0.8118} & {\\color[HTML]{24292F} 0.5149} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{mean}} & {\\color[HTML]{24292F} 0.660} & {\\color[HTML]{24292F} 0.666} & {\\color[HTML]{24292F} 0.654} & {\\color[HTML]{24292F} 0.706} & {\\color[HTML]{24292F} 0.646} & {\\color[HTML]{24292F} 0.642} & {\\color[HTML]{24292F} 0.673} & {\\color[HTML]{24292F} 0.715} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{std}} & {\\color[HTML]{24292F} 0.042} & {\\color[HTML]{24292F} 0.046} & {\\color[HTML]{24292F} 0.042} & {\\color[HTML]{24292F} 0.063} & {\\color[HTML]{24292F} 0.033} & {\\color[HTML]{24292F} 0.031} & {\\color[HTML]{24292F} 0.077} & {\\color[HTML]{24292F} 0.096} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{max}} & {\\color[HTML]{24292F} 0.707} & {\\color[HTML]{24292F} 0.710} & {\\color[HTML]{24292F} 0.716} & {\\color[HTML]{24292F} 0.803} & {\\color[HTML]{24292F} 0.696} & {\\color[HTML]{24292F} 0.689} & {\\color[HTML]{24292F} 0.815} & {\\color[HTML]{24292F} 0.807} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{min}} & {\\color[HTML]{24292F} 0.5391} & {\\color[HTML]{24292F} 0.5355} & {\\color[HTML]{24292F} 0.5558} & {\\color[HTML]{24292F} 0.5535} & {\\color[HTML]{24292F} 0.5834} & {\\color[HTML]{24292F} 0.5856} & {\\color[HTML]{24292F} 0.5748} & {\\color[HTML]{24292F} 0.5149} \\\\ \\hline\n\\end{tabular}%\n}\n\\end{table}\n\n\\begin{table}[htbp]\n\\centering\n\\caption{Architecture used: Attention UNet. Patch size: 256x256 pixels (best fold is reported in bold font).}\n\\label{tab:my-table4}\n\\resizebox{\\textwidth}{!}{%\n\\begin{tabular}{|c|c|c|c|c|c|c|c|c|}\n\\hline\n{\\color[HTML]{24292F} \\textbf{fold\\_name}} & {\\color[HTML]{24292F} \\textbf{dev\\_dice}} & {\\color[HTML]{24292F} \\textbf{dev\\_f1}} & {\\color[HTML]{24292F} \\textbf{dev\\_recall}} & {\\color[HTML]{24292F} \\textbf{dev\\_precision}} & {\\color[HTML]{24292F} \\textbf{test\\_dice}} & {\\color[HTML]{24292F} \\textbf{test\\_f1}} & {\\color[HTML]{24292F} \\textbf{test\\_recall}} & {\\color[HTML]{24292F} \\textbf{test\\_precision}} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_00\\_cv\\_00} & {\\color[HTML]{24292F} 0.7288} & {\\color[HTML]{24292F} 0.7288} & {\\color[HTML]{24292F} 0.8318} & {\\color[HTML]{24292F} 0.6819} & {\\color[HTML]{24292F} 0.6439} & {\\color[HTML]{24292F} 0.6439} & {\\color[HTML]{24292F} 0.6312} & {\\color[HTML]{24292F} 0.7429} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_00\\_cv\\_01} & {\\color[HTML]{24292F} 0.6698} & {\\color[HTML]{24292F} 0.6698} & {\\color[HTML]{24292F} 0.7152} & {\\color[HTML]{24292F} 0.6855} & {\\color[HTML]{24292F} 0.6038} & {\\color[HTML]{24292F} 0.6038} & {\\color[HTML]{24292F} 0.5860} & {\\color[HTML]{24292F} 0.7462} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_00\\_cv\\_02} & {\\color[HTML]{24292F} 0.6644} & {\\color[HTML]{24292F} 0.6644} & {\\color[HTML]{24292F} 0.6396} & {\\color[HTML]{24292F} 0.7920} & {\\color[HTML]{24292F} 0.5638} & {\\color[HTML]{24292F} 0.5638} & {\\color[HTML]{24292F} 0.5132} & {\\color[HTML]{24292F} 0.7701} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_01\\_cv\\_00} & {\\color[HTML]{24292F} 0.7349} & {\\color[HTML]{24292F} 0.7349} & {\\color[HTML]{24292F} 0.8246} & {\\color[HTML]{24292F} 0.6971} & {\\color[HTML]{24292F} 0.6560} & {\\color[HTML]{24292F} 0.6560} & {\\color[HTML]{24292F} 0.6724} & {\\color[HTML]{24292F} 0.7162} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{test\\_01\\_cv\\_01}} & {\\color[HTML]{24292F} \\textbf{0.6471}} & {\\color[HTML]{24292F} \\textbf{0.6471}} & {\\color[HTML]{24292F} \\textbf{0.6618}} & {\\color[HTML]{24292F} \\textbf{0.7102}} & {\\color[HTML]{24292F} \\textbf{0.6796}} & {\\color[HTML]{24292F} \\textbf{0.6790}} & {\\color[HTML]{24292F} \\textbf{0.6746}} & {\\color[HTML]{24292F} \\textbf{0.7599}} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_01\\_cv\\_02} & {\\color[HTML]{24292F} 0.6774} & {\\color[HTML]{24292F} 0.6774} & {\\color[HTML]{24292F} 0.6539} & {\\color[HTML]{24292F} 0.7842} & {\\color[HTML]{24292F} 0.6307} & {\\color[HTML]{24292F} 0.6307} & {\\color[HTML]{24292F} 0.5998} & {\\color[HTML]{24292F} 0.7847} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_02\\_cv\\_00} & {\\color[HTML]{24292F} 0.7463} & {\\color[HTML]{24292F} 0.7463} & {\\color[HTML]{24292F} 0.8221} & {\\color[HTML]{24292F} 0.7172} & {\\color[HTML]{24292F} 0.6323} & {\\color[HTML]{24292F} 0.6323} & {\\color[HTML]{24292F} 0.6289} & {\\color[HTML]{24292F} 0.7087} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_02\\_cv\\_01} & {\\color[HTML]{24292F} 0.6767} & {\\color[HTML]{24292F} 0.6761} & {\\color[HTML]{24292F} 0.7026} & {\\color[HTML]{24292F} 0.7199} & {\\color[HTML]{24292F} 0.6497} & {\\color[HTML]{24292F} 0.6497} & {\\color[HTML]{24292F} 0.6715} & {\\color[HTML]{24292F} 0.6983} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_02\\_cv\\_02} & {\\color[HTML]{24292F} 0.6883} & {\\color[HTML]{24292F} 0.6883} & {\\color[HTML]{24292F} 0.7973} & {\\color[HTML]{24292F} 0.6619} & {\\color[HTML]{24292F} 0.6194} & {\\color[HTML]{24292F} 0.6194} & {\\color[HTML]{24292F} 0.6652} & {\\color[HTML]{24292F} 0.6538} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_03\\_cv\\_00} & {\\color[HTML]{24292F} 0.7855} & {\\color[HTML]{24292F} 0.7855} & {\\color[HTML]{24292F} 0.7662} & {\\color[HTML]{24292F} 0.8413} & {\\color[HTML]{24292F} 0.6413} & {\\color[HTML]{24292F} 0.6413} & {\\color[HTML]{24292F} 0.6032} & {\\color[HTML]{24292F} 0.7821} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_03\\_cv\\_01} & {\\color[HTML]{24292F} 0.6199} & {\\color[HTML]{24292F} 0.6199} & {\\color[HTML]{24292F} 0.5635} & {\\color[HTML]{24292F} 0.8123} & {\\color[HTML]{24292F} 0.6731} & {\\color[HTML]{24292F} 0.6731} & {\\color[HTML]{24292F} 0.6997} & {\\color[HTML]{24292F} 0.7158} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_03\\_cv\\_02} & {\\color[HTML]{24292F} 0.6783} & {\\color[HTML]{24292F} 0.6783} & {\\color[HTML]{24292F} 0.8009} & {\\color[HTML]{24292F} 0.6438} & {\\color[HTML]{24292F} 0.6169} & {\\color[HTML]{24292F} 0.6169} & {\\color[HTML]{24292F} 0.8428} & {\\color[HTML]{24292F} 0.5326} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{mean}} & {\\color[HTML]{24292F} 0.6931} & {\\color[HTML]{24292F} 0.6931} & {\\color[HTML]{24292F} 0.7316} & {\\color[HTML]{24292F} 0.7289} & {\\color[HTML]{24292F} 0.6342} & {\\color[HTML]{24292F} 0.6342} & {\\color[HTML]{24292F} 0.6490} & {\\color[HTML]{24292F} 0.7176} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{std}} & {\\color[HTML]{24292F} 0.0447} & {\\color[HTML]{24292F} 0.0447} & {\\color[HTML]{24292F} 0.0846} & {\\color[HTML]{24292F} 0.0606} & {\\color[HTML]{24292F} 0.0301} & {\\color[HTML]{24292F} 0.0300} & {\\color[HTML]{24292F} 0.0762} & {\\color[HTML]{24292F} 0.0667} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{max}} & {\\color[HTML]{24292F} 0.7855} & {\\color[HTML]{24292F} 0.7855} & {\\color[HTML]{24292F} 0.8318} & {\\color[HTML]{24292F} 0.8413} & {\\color[HTML]{24292F} 0.6796} & {\\color[HTML]{24292F} 0.6790} & {\\color[HTML]{24292F} 0.8428} & {\\color[HTML]{24292F} 0.7847} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{min}} & {\\color[HTML]{24292F} 0.6199} & {\\color[HTML]{24292F} 0.6199} & {\\color[HTML]{24292F} 0.5635} & {\\color[HTML]{24292F} 0.6438} & {\\color[HTML]{24292F} 0.5638} & {\\color[HTML]{24292F} 0.5638} & {\\color[HTML]{24292F} 0.5132} & {\\color[HTML]{24292F} 0.5326} \\\\ \\hline\n\\end{tabular}%\n}\n\\end{table}\n\n\\section{Scanner differences:}\n\n\\begin{table}[htbp]\n\\centering\n\\caption{Architecture used: UNet. Patch size: 128x128 pixels (best fold is reported in bold font). Scanner: Hamamatsu NanoZoomer 2.0-RS.}\n\\label{tab:my-table5}\n\\resizebox{0.8\\textwidth}{!}{%\n\\begin{tabular}{|c|c|c|c|c|}\n\\hline\n{\\color[HTML]{24292F} \\textbf{fold\\_name}} & {\\color[HTML]{24292F} \\textbf{dev\\_dice}} & {\\color[HTML]{24292F} \\textbf{dev\\_f1}} & {\\color[HTML]{24292F} \\textbf{dev\\_recall}} & {\\color[HTML]{24292F} \\textbf{dev\\_precision}} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_00\\_cv\\_00} & {\\color[HTML]{24292F} 0.7361} & {\\color[HTML]{24292F} 0.7354} & {\\color[HTML]{24292F} 0.693} & {\\color[HTML]{24292F} 0.8106} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_00\\_cv\\_01} & {\\color[HTML]{24292F} 0.7234} & {\\color[HTML]{24292F} 0.7284} & {\\color[HTML]{24292F} 0.7451} & {\\color[HTML]{24292F} 0.734} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{test\\_00\\_cv\\_02}} & {\\color[HTML]{24292F} \\textbf{0.7388}} & {\\color[HTML]{24292F} \\textbf{0.7338}} & {\\color[HTML]{24292F} \\textbf{0.7155}} & {\\color[HTML]{24292F} \\textbf{0.7744}} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_00\\_cv\\_03} & {\\color[HTML]{24292F} 0.7384} & {\\color[HTML]{24292F} 0.732} & {\\color[HTML]{24292F} 0.7063} & {\\color[HTML]{24292F} 0.7818} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{mean}} & {\\color[HTML]{24292F} 0.7342} & {\\color[HTML]{24292F} 0.7324} & {\\color[HTML]{24292F} 0.7150} & {\\color[HTML]{24292F} 0.7752} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{std}} & {\\color[HTML]{24292F} 0.0063} & {\\color[HTML]{24292F} 0.0026} & {\\color[HTML]{24292F} 0.0191} & {\\color[HTML]{24292F} 0.0274} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{max}} & {\\color[HTML]{24292F} 0.7388} & {\\color[HTML]{24292F} 0.7354} & {\\color[HTML]{24292F} 0.7451} & {\\color[HTML]{24292F} 0.8106} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{min}} & {\\color[HTML]{24292F} 0.7234} & {\\color[HTML]{24292F} 0.7284} & {\\color[HTML]{24292F} 0.693} & {\\color[HTML]{24292F} 0.734} \\\\ \\hline\n\\end{tabular}%\n}\n\\end{table}\n\n\\begin{table}[htbp]\n\\centering\n\\caption{Architecture used: UNet. Patch size: 128x128 pixels (best fold is reported in bold font). Scanner: Hamamatsu NanoZoomer S60.}\n\\label{tab:my-table6}\n\\resizebox{0.8\\textwidth}{!}{%\n\\begin{tabular}{|c|c|c|c|c|}\n\\hline\n{\\color[HTML]{24292F} \\textbf{fold\\_name}} & {\\color[HTML]{24292F} \\textbf{dev\\_dice}} & {\\color[HTML]{24292F} \\textbf{dev\\_f1}} & {\\color[HTML]{24292F} \\textbf{dev\\_recall}} & {\\color[HTML]{24292F} \\textbf{dev\\_precision}} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_00\\_cv\\_00} & {\\color[HTML]{24292F} 0.6286} & {\\color[HTML]{24292F} 0.6429} & {\\color[HTML]{24292F} 0.6121} & {\\color[HTML]{24292F} 0.7143} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{test\\_00\\_cv\\_01}} & {\\color[HTML]{24292F} \\textbf{0.6757}} & {\\color[HTML]{24292F} \\textbf{0.6695}} & {\\color[HTML]{24292F} \\textbf{0.6931}} & {\\color[HTML]{24292F} \\textbf{0.6855}} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_00\\_cv\\_02} & {\\color[HTML]{24292F} 0.617} & {\\color[HTML]{24292F} 0.6448} & {\\color[HTML]{24292F} 0.6668} & {\\color[HTML]{24292F} 0.6443} \\\\ \\hline\n{\\color[HTML]{24292F} test\\_00\\_cv\\_03} & {\\color[HTML]{24292F} 0.6167} & {\\color[HTML]{24292F} 0.6445} & {\\color[HTML]{24292F} 0.6653} & {\\color[HTML]{24292F} 0.6452} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{mean}} & {\\color[HTML]{24292F} 0.6345} & {\\color[HTML]{24292F} 0.6504} & {\\color[HTML]{24292F} 0.6593} & {\\color[HTML]{24292F} 0.6723} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{std}} & {\\color[HTML]{24292F} 0.0243} & {\\color[HTML]{24292F} 0.0110} & {\\color[HTML]{24292F} 0.0294} & {\\color[HTML]{24292F} 0.0294} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{max}} & {\\color[HTML]{24292F} 0.6757} & {\\color[HTML]{24292F} 0.6695} & {\\color[HTML]{24292F} 0.6931} & {\\color[HTML]{24292F} 0.7143} \\\\ \\hline\n{\\color[HTML]{24292F} \\textbf{min}} & {\\color[HTML]{24292F} 0.6167} & {\\color[HTML]{24292F} 0.6429} & {\\color[HTML]{24292F} 0.6121} & {\\color[HTML]{24292F} 0.6443} \\\\ \\hline\n\\end{tabular}%\n}\n\\end{table}\n\n\\end{document}\n\\section{Introduction}\nAccumulations of Amyloid-$\\beta$ and tau protein aggregates, such as plaques in the brain gray matter, are well-known biomarkers of the neurodegenerative Alzheimer's disease (AD) \\cite{ben1}. Quantitative estimation of plaques is typically done by pathologists manually or semi-automatically, using proprietary black-box software from histopathological images of the brain -- a time and effort-intensive process prone to human observation variability and errors. \nIn recent times, deep learning (DL) based methods have shown promising results in digital pathology \\cite{jano1} and incredibly high accuracy segmentation of digital whole slide images \\cite{anant}. In \\cite{wurtz}, three different DL models were used to segment tau aggregates (tangles) and nuclei in postmortem brain Whole Slide Images (WSIs). The three models included a fully convolutional neural network (FCN), UNet, and Segnet, the latter achieving the highest accuracy in terms of IoU. In \\cite{signaevsky}, an FCN was trained on a dataset of 22 WSIs for semantic segmentation of tangle objects from postmortem brain WSIs. Their model can segment tangles of varying morphologies with high accuracy under diverse staining intensities. An FCN model was also used in \\cite{Vega2021} to classify morphologies of tau protein aggregates in the gray and white matter regions from 37 WSIs representing multiple degenerative diseases. In \\cite{manouskova2022}, tau aggregate analysis was done on a dataset of 6 WSIs with a combined classification-segmentation framework which achieved an F1 score of 81.3\\% and 75.8\\% on detection and segmentation tasks, respectively. \nSeveral domains in DL-based histopathological analysis of AD tauopathy remain unexplored. Firstly, most existing studies have used DL to segment tangles rather than plaques, which are harder to identify against the background gray matter due to their diffuse\/sparse appearance. Secondly, annotations of whole slide images are frequently affected by errors by human annotators. In such cases, a DL preliminary model may be trained using weakly annotated data and used to assist the expert in refining annotations. Thirdly, contemporary tau segmentation studies do not consider context information, which is essential in segmenting plaques from brain WSIs as these emerge as sparse objects against an extended background of gray matter. Finally, DL models with explainability features have not yet been applied in tau segmentation from WSIs. This is a critical requirement for DL models used in clinical applications \\cite{explain1} \\cite{Yamamoto2019}. The DL models should not only be able to identify regions of interest precisely but also give clinicians and general users the knowledge about which image features the model found necessary that influenced its decision. \nBased on the above, a DL pipeline for the segmentation of plaque regions in brain WSIs is presented in our study. This pipeline uses context and explainability features with a UNet-based semantic segmentation model to identify plaque features from WSIs.\n\n\\section{Methodology}\n\\label{sec:methodology}\n\n\\subsection{Dataset characteristics}\n\\label{sec:data_characteristics}\nIn this work, we analyzed eight whole slide images containing histological sections from the frontal cortices of patients with AD, which were provided by the French national brain biobank Neuro-CEB. Signed informed consent for autopsy and histologic analysis was obtained in all cases from the patients or their family members. The present cohort represents a common heterogeneity of AD cases, including slides with variable tau pathology (e.g., different object densities), variable staining quality, and variable tissue preservation. Sections of the frontal lobe were stained with AT8 antibody to reveal phosphorylated tau pathology, using a standardized immunohistochemistry protocol. Obtained slides were scanned using two Hamamatsu slide scanners (NanoZoomer 2.0-RS and NanoZoomer s60 with 227 nm\/pixel and 221 nm\/pixel resolution, respectively) at 40x initial magnification. The slides were used for human-CNN iterative object annotation resulting in about 4000 annotated and expert-validated Neuritic plaques. The labels, extracted in an XML format, constitute the segmentation ground truth.\n\n\\subsection{Data preparation}\nFrom the WSIs, at 20x magnification, patches with two levels of context information were generated using an ROI-guided sampling method. The larger patches (256x256 pixels) capture a broader context containing object neighborhood and background pixels, whereas the smaller (128x 128 pixels) mainly focus on the plaque region without much context information. The amount of context present in each patch is quantified using a ratio of the area of annotated ROI to the total area of the patch. The plaque example in different patch sizes is shown in Fig~\\ref{fig:context} (note that the bigger patch has additional objects-plaques). In addition, two different normalizations are used and compared: Macenko~\\cite{macenko} and Vahadane~\\cite{vahadane2015normalisation} methods.\n\nA new scheme for data augmentation was implemented based on ROI-shifting to prevent the networks' bias from focussing on the center location of plaques in the patches. Accordingly, the annotated plaque ROIs are shifted to four corners of a patch, producing a four-fold augmentation of each patch containing an object. This augmentation aims to train the UNet models robustly in the presence of variable neighborhood context information, especially when closely-spaced plaque objects are present. An example of this augmentation is shown in Fig~\\ref{fig:ROI_aug}.\n\n\\begin{figure}[!ht]\n \\centering\n \\includegraphics[scale=0.6] {images\/patchsize2}\n \\caption{Example of plaque image for different levels of context.}\n \\label{fig:context}\n\\end{figure}\n\n\\begin{figure}[!ht]\n \\centering\n \\includegraphics[scale=0.5] {images\/augmentations1}\n \\caption{Example of ROI shifting augmentation.}\n \\label{fig:ROI_aug}\n\\end{figure}\n\n\\subsection{Deep learning architecture for segmentation}\n\nIn order to segment the neuritic plaques, a UNet model adapted from \\cite{Ronnenbunet} is used with modifications for accommodating context information within the WSI patches during training and testing. The model architecture is modified to work with the two datasets containing different patch sizes -- i.e., $128\\times128$ (having low context information) and $256\\times256$ pixels (having more information about the plaque neighborhood). For the first dataset, the UNet architecture consists of 3 downsampling and 3 upsampling convolutional blocks, in addition to the convolutional middle block. For the $256\\times256$-patch-size dataset, we added a downsampling and upsampling convolutional block to the previous UNet model. For the downsampling block, we used a leaky ReLU activation function and ReLU for the upsampling block. In both blocks, we used batch-normalization following the suggestions in \\cite{Ioffe2015BatchNA} and \\cite{manouskova2022}. Dropout was used in each convolutional block with a probability of 0.5.\n\n\\subsection{Deep learning architecture for visual interpretation} \n\nIn addition to the segmentation, we focus on deriving locations within the patches where the DL model found significant features from the plaque objects. Therefore, we used an attention UNet described in \\cite{oktay2018attention}, which allows us to visualize the activated features at each iteration and evaluate qualitatively where the network focuses during training. The attention UNet architecture was also modified for the two different patch-size datasets following a configuration similar to the one described for the UNet.\n\n\\section{Experiments and results}\nData preparation and UNet experiments were executed on an 12-core Intel(R) Core i9-9920X @ 3.5GHz CPU with 128 GB RAM and two 12 GB RAM Nvidia GeForce RTX 2080 Ti GPUs. The attention UNet experiments run on a cluster (1 GPU Tesla V100S-PCIe-32GB, 12 CPU cores Intel(R) Xeon(R) Gold 6126 CPU @ 2.60GHz, and 80 GB of RAM). The average training and evaluation time of the UNet per epoch is approximately 2 minutes for the $128\\times 128$ patch-size database and 5 minutes for the $256\\times 256$ patch-size database. Meanwhile, for the attention UNet, approximately half the time is needed. On the other hand, data preprocessing takes 2 to 5 hours to process using parallel computation. Regarding memory consumption, we used at most 6 GB of GPU RAM for the larger patch dataset. In order to increase the performance, we cache the data and annotations first in CPU RAM and then move them to the GPU.\n\nWe randomly divided the 8 WSIs into 4 folds for the DL experiments. Then, we tested the network using a 4-fold cross-testing scheme, and with the remaining data from each test fold, we also performed a 3-fold cross-validation. In addition, we run a series of tests (using these folds) to select the loss function and the best optimizer for the UNet and attention UNet. We tested 4 loss functions (i.e., Focal loss, BCEwithLogits, Dice, and BCE-Dice loss) and 4 different optimizers (i.e., SGD, Adam, RMSProp, and AdaDelta). After the hyperparameter tuning, we obtained the best performance using the BCE-Dice loss with a 50\\% balance between Dice and BCE (Binary Cross Entropy) and the Adadelta optimizer with $\\rho = 0.9$ and a varying learning rate based on the evolution of the validation loss. Also, we implemented early stopping for training with a patience value of 15 epochs.\n\n\\subsection{Results from UNet architecture}\nThe segmentation evaluation metric used for all of the experiments regarding the UNet is the Dice score which is equivalent to the F1 score for binary segmentation problems. In the first experiment, the UNet model was trained with two datasets having different patch sizes: $128\\times 128$ and $256\\times 256$ pixels. The mean and standard deviations of the Dice coefficient for cross-validation and cross-testing are reported in Table~\\ref{tab:dice_results1}. The patches were previously normalized using the Macenko method and then separated in their corresponding fold for training, validation, and testing following the scheme described above. We observe a decrease in the Dice score for larger patches having additional environmental context from the neuritic plaque.\n\n\\begin{table}[ht]\n\\centering\n\\caption{UNet results (Dice score) for 4-fold cross testing and 3-fold cross validation for different patch sizes.}\n\\begin{tabular}[t]{|c|c|c|c|}\n\\hline\nPatch size & Normalization & Cross validation & Cross testing\\\\\n\\hline\n$128\\times128$ & Macenko & $ 0.6954 \\pm 0.0289 $ & $0.6852 \\pm 0.0260$\\\\\n$256\\times256$ & Macenko & $0.6600 \\pm 0.0420 $ & $0.6460 \\pm 0.0330$ \\\\\n\\hline\n\\end{tabular}\n\\label{tab:dice_results1}\n\\end{table}%\n\nAs described, the WSIs were acquired using two different scanners. Therefore, to study the impact of its properties, we divided the entire cohort into two independent datasets: 4 WSIs belonging to the NanoZoomer 2.0-RS and 4 WSIs scanned with the NanoZoomer s60. For both datasets, we only evaluate the performance of the DL architecture using 4-fold cross-validation and patches of $128\\times 128$ pixels size. Additionally, we normalize each dataset independently (i.e., using two reference patches: one for the NanoZoomer 2.0-RS and one for the NanoZoomer s60) using the Macenko method. The Dice score obtained using the images from the higher resolution Hamamatsu NanoZoomer S60 scanner was $0.6345 \\pm 0.0243$, whereas that from the NanoZoomer 2.0-RS was $0.7342 \\pm 0.0063$.\n\nWe also study the effect of normalization in the entire dataset (8 WSIs). We normalized the patches from the $128\\times 128$ dataset using Macenko and Vahadane methods, and we selected the best fold (i.e., highest Dice score in testing for the first experiment) to train, validate and test the UNet under different input color properties. Opposite to the results reported in~\\cite{manouskova2022}, the Dice score obtained was higher using the Macenko method (0.7248 in testing) than the Vahadane (0.7098 in testing), even in validation (0.72313 for Macenko and 0.6864 for Vahadane). For a full list of results, see supplementary material.\n\n\\subsection{Visual deep learning interpretation}\nThe attention UNet model was trained using the $128\\times 128$ and the $256\\times 256$ patch size dataset, and the results are summarized in Table~\\ref{tab:attunet_results1}. All images were normalized using the Macenko method, and we observed a similar trend as the UNet: better performance using patches containing less background information.\n\n\\begin{table}[ht]\n\\centering\n\\caption{Attention UNet results (Dice score) for 4-fold cross testing and 3-fold cross validation for different patch sizes.}\n\\begin{tabular}[t]{|c|c|c|c|}\n\\hline\nPatch size & Normalization & Cross validation & Cross testing\\\\\n\\hline\n$128\\times128$ & Macenko & $ 0.7516 \\pm 0.0334 $ & $0.6920 \\pm 0.0254$\\\\\n$256\\times256$ & Macenko & $0.6931 \\pm 0.0447 $ & $0.6342 \\pm 0.0301$ \\\\\n\\hline\n\\end{tabular}\n\\label{tab:attunet_results1}\n\\end{table}%\n\nAn example segmentation result from the attention UNet model in a $128\\times128$ patch containing a plaque object and its corresponding ground-truth mask is shown in Fig~\\ref{fig:attunet1}. We observe that the attention UNet model finds significant activation features around the plaque object initially annotated by experts (see ground truth mask in Fig~\\ref{fig:attunet1}). We also notice that the loss at iteration 100 increases over iteration 1; however, we clearly distinguish the region of the object (dark red color). After 1000 iterations, the loss decreases 50\\% due to the fact that the Dice part of the BCE-Dice loss function influences the network into detecting a pattern very similar to the given ground truth.\n\n\\begin{figure}[!ht]\n \\centering\n \\includegraphics[scale=0.25] {images\/att_unet_res1}\n \\caption{Global coherence of attention-UNet result with human annotation.}\n \\label{fig:attunet1}\n\\end{figure}\n\nAnother result from attention UNet is in Fig~\\ref{fig:attunet2}. Here, the attention UNet focuses on 2 plaques initially annotated by a human expert. It also identifies strong activation features in regions with no ground truth annotations, which could indicate missed ROIs by human experts during the annotation process. Thus with the attention UNet, it is not only possible to segment the plaque objects but also to improve or refine the manual annotations by experts.\n\nWeak and imprecise annotations are frequently observed in histopathology arising from human or software errors. In such cases, deep learning attention maps could be useful to provide pathologists and biologists with refined annotations (e.g., precision on the boundaries of ROIs). An example is shown in Fig~\\ref{fig: attunet_vsexpert} where DL attention maps are closer to the shape of actual ROIs compared to human-made annotations.\n\n\\begin{figure}[!ht]\n \\centering\n \\includegraphics[scale=0.25] {images\/att_unet_res2}\n \\caption{Focus progression using successive activation layers of attention-UNet.}\n \\label{fig:attunet2}\n\\end{figure}\n\n\\begin{figure}[!ht]\n \\centering\n \\includegraphics[scale=0.7] {images\/attunet_compare}\n \\caption{Improving human annotations using attention-based DL models.}\n \\label{fig: attunet_vsexpert}\n\\end{figure}\n\n\\section{Discussion and conclusion}\nIn the presented work, we studied\/evaluated a number of factors that contribute to the segmentation of plaques from whole slide images using DL models. The key observations are the following: \n\\begin{enumerate}\n \\item Use of biomarkers: the study in~\\cite{manouskova2022} uses the ALZ50 (used to discover compacted structures) biomarker, while our study uses the AT8 (majorly used in clinics, helps to discover all structures). We focus on AT8 in order to stay close to clinical protocols. The drawback is that this biomarker creates less compact structures meaning a slightly more difficult segmentation of the plaques, as our results support.\n \\item Use of different modalities: using the AT8 biomarker, we analyzed 2 types of WSI scanners (see Section~\\ref{sec:data_characteristics}) with different resolutions. High-resolution scanners amplify the annotation errors (human-software). Accordingly, some results concerning the high-resolution scanners have been affected, generating lower dice scores. \n \\item Context effect on results of DL models: We noticed that increasing the background information in the patches negatively affects the segmentation results, which can be explained by the imbalance between the foreground and background pixels. In future works, this will be addressed using adaptive loss functions to take advantage of context information around the ROIs.\n \\item Attention maps: We observed that using the attention UNet model helps us see the weakness in the human-made annotations (see Fig~\\ref{fig: attunet_vsexpert}), generating precious insights about the segmentation DL protocol, which can be used to refine the annotations by improving the border of the detected objects. These refined patterns can be used for a morphology and topology pipeline toward a robust AD patient's stratification proof. In addition, quantitative results show better performance of the same UNet architecture with attention blocks.\n \\item Comparison with state-of-the-art commercial software: We compared our WSI segmentation results with those generated by a commercial software. This software uses a UNet architecture with a VGG encoder which is different from our model. Our system outperforms this software (Dice score 0.63 for test), using the same WSI as the ones used in this paper. Besides, in this software, neither information about how patches are generated nor the type of normalization or pre-processing perfomed on the dataset is available.\n \n\\end{enumerate}\nWhole slide histopathology images whose sizes range in giga-pixels often contain thousands of objects per image. As seen for plaques in this study, it becomes more challenging when the objects being annotated do not have clear boundaries separating them from their surrounding environments, which may give rise to errors in human-made annotations. We saw an example of how DL models with visual explanation properties can help pathologists refine the ROI identification process. Our future challenge is to create deep learning assistive tools that can improve human-made few and weak annotations, a generic problem of a wide range of biomedical applications. \n\n\\section*{Acknowlegements}\nThis research was supported by Mr Jean-Paul Baudecroux and The Big Brain Theory Program - Paris Brain Institute (ICM). The human samples were obtained from the Neuro-CEB brain bank (\\url{https:\/\/www.neuroceb.org\/en\/}) (BRIF Number 0033-00011), partly funded by the patients' associations ARSEP, ARSLA, \"Conna\u00eetre les Syndromes C\u00e9r\u00e9belleux\", France-DFT, France Parkinson and by Vaincre Alzheimer Fondation, to which we express our gratitude. We are also grateful to the patients and their families.\n\n\\bibliographystyle{splncs04}\n","meta":{"redpajama_set_name":"RedPajamaArXiv"}} +{"text":"\\section{Introduction}\\label{Sec1}\n\nFlow and transport are the most fundamental phenomena in subsurface porous media associated with various physical processes, e.g., oil and gas flow in petroleum reservoir \\cite{terry2015applied}, CO$_2$ sequestration \\cite{zhang2015sequentially}, water pollution dispersion \\cite{bear2010modeling}, etc. The numerical simulation and analyses of flow and transport in subsurface porous media are highly demanded in practical engineering and mechanism studies. However, the simulation results are always subject to the influence of uncertainties, mainly stemming from the inherent spatial heterogeneity of media properties caused by complex geological processes \\cite{boschan2012scale}. It has been widely recognized that in natural subsurface porous media, most properties, such as permeability, porosity, etc., exhibit an uneven spatial distribution. For example, the hydraulic conductivity can span several orders of magnitude in an aquifer or reservoir. How to quantificationally identify the influence of uncertainties of porous media properties on the flow and transport behaviors in subsurface physical processes has been a research hot spot in recent years.\n\nTherefore, the uncertainty quantification is an essential task in the simulation of practical subsurface flows where porous media properties that unknown or partially known are taken as the input parameters. A possible way to deal with uncertainties of subsurface porous media is to treat porous media properties as random fields, then perform the stochastic simulation on the subsurface flow governing equations with random coefficients to evaluate the quality of interest (QoI). Among commonly-used stochastic simulation methods, e.g. Monte Carlo(MC) method, stochastic finite element method, stochastic collocation method, the MC method demonstrates apparent advantages such as it is a non-intrusive approach that only the realization of coefficients is needed while the original model code remains unchanged, and it is more easily to be implemented. In the standard MC method, the computer-generated (pseudo) random points are used, and in many cases, the computational efficiency is always unsatisfied for large-scale problems. The Quasi-Monte Carlo (QMC) method improve the demerit of MC by using deterministic quasi-random points. These points exhibit lower discrepancy and distribute more uniformly in the probability space. Moreover, to reduce the sample variance and further improve the computational efficiency, the multilevel Monte Carlo (MLMC) method was proposed and developed by Heinrich \\cite{Heinrich2001MultilevelMC} and Giles \\cite{giles2008multilevel}. It applies the control variates technique that a series of discretization is adopted with increasing resolution and computes the QoI on each of them, the success of which lies in the effective variance reduction sequentially.\n\nIt should be mentioned that in the particular case of subsurface flow with random coefficients, the problem is further aggravated where very detailed geological models are needed (a large number of cells) for an accurate description of the flow. To further alleviate the computational burden connected to the evaluation of random parameter effects on subsurface flow using the MLMC method, in this study, we exploit the similar hierarchies of MLMC and multigrid methods and proposed a full multigrid multilevel (quasi-) Monte Carlo (FMG-MLQMC) approach. In this proposed method, the solution on coarse mesh $Q_l^c$ can be obtained as a byproduct of the full multigrid solution on fine mesh $Q_l^f$ on each mesh level $l$, instead of directly solving the equations on the coarse mesh as the standard MLMC does. The proposed FMG-MLQMC method saves the computation of the $Q_l^c$. There have been works coupling the multigrid solver with the multilevel framework, see \\cite{kumar2017multigrid,robbe2018recycling} for example. However, the FMG-MLQMC method we proposed saves the computational cost without modifying the MLMC framework. We exploit the implementation method for upscaling the random coefficient from fine mesh to neighboring coarse mesh. Although in this study we only focus on the simple single-phase subsurface flow with random coefficients, the proposed approach can be applied and extended naturally to multiphase flow and transfer in porous media and any other flow and transport problems associated with uncertainty effect. \n\nThe rest of the paper is organized as follows: In Section \\ref{Sec2}, we give a brief description of the single-phase subsurface flow and then introduce the proposed full multigrid multilevel (quasi-) Monte Carlo method in detail. The methodology on the upscaling method of random coefficients from the fine mesh to the coarse mesh, which preserves the random structure, is presented as well. In Section \\ref{Sec3}, we verify the effectiveness (a smaller estimator variance and faster convergence rate) of the presented method by comparing with standard MLMC method in two numerical experiments. Finally, in Section \\ref{Sec4}, we report the concluding remarks of this work along with a brief discussion of future directions.\n\n\\section{Algorithms}\\label{Sec2}\n\\subsection{Model problem and MLMC method}\n\nIn this work, we consider the following elliptic problem, \n\n\\begin{equation} \\label{eq:elliptic_pde}\n \\left\\{\n \\begin{aligned} \n -\\nabla \\cdot (k(\\bm{x}, \\omega) \\nabla u(\\bm{x},\\omega)) &= f(\\bm{x}) \\quad \\text{ in } D \\\\\n u(\\bm{x},\\omega) &= g(\\bm{x}) \\quad \\text{ on } \\Gamma_D\\\\\n \\frac{\\partial u(\\bm{x},\\omega)}{\\partial \\bm{n}} &= v(\\bm{x}) \\quad \\text{ on } \\Gamma_N\\\\\n \\end{aligned}\n \\right.\n\\end{equation}\nwhere $k(\\bm{x},\\omega)$ is the random, spatial-varying coefficient, $D$ is the computational domain, $\\omega$ is a sample from the probability triple $(\\Omega, \\mathcal{F}, P)$. $\\Gamma_D$ and $\\Gamma_N$ are Dirichlet and Neumann boundaries respectively. In single-phase flow context, Eq.\\ref{eq:elliptic_pde} corresponds the steady-state situation, when $g$ and $v$ prescribe the pressure and velocity of the fluid at the boundary, then the solution $u$ depicts the pressure in the domain $\\Omega$. \n\nIn this work, we address the random elliptic problem using the multi-level algorithm. Basically, the MLMC method employs a series of control variates, which are often the discretized models with increasing resolution levels. Here, we associate each level with one mesh with given resolution. The approximations of quantity of interest(QoI) on these levels are denoted as $Q_0,Q_1,\\cdots,Q_L$, see Figure \\ref{fig:chap2_MLMC}. \n\n\\begin{figure}[h!]\n \\centering\n \\includegraphics[width=0.8\\textwidth]{MLMC.png}\n \\caption{Multilevel Monte Carlo}\n \\label{fig:chap2_MLMC}\n\\end{figure}\n\nWe are interested in the approximation $Q_L$ on the finest level $L$. The MLMC method not only computes the solution on level $L$ itself, but also calculates the solutions on all the preceding meshes. The expectation of such quantity can be expressed by the following telescoping formula,\n\\begin{equation} \\label{eq:Chap2_Telescoping_Formula}\n \\bb{E}[Q_L] = \\bb{E}[Q_0] + \\sum_{l=1}^{L} (\\bb{E}[Q_l] - \\bb{E}[Q_{l-1}])\n\\end{equation}\n\nWe can approximate each expectation using the Monte Carlo approach as follows,\n\\begin{equation} \\label{eq:Chap2_Telescoping_Formula_approx}\n\\bb{E}[Q_L]\\approx \\frac{1}{N_0}\\sum_{i=1}^{N_0}Q_0(\\omega_{i,0})+\\sum_{l=1}^{L}\\frac{1}{N_l}[\\sum_{i=1}^{N_l}(Q_l(\\omega_{i,l})-Q_{l-1}(\\omega_{i,l})]\n\\end{equation}\n\nHere, we associate level $l$ with the $l$-th term in the telescoping formula \\eqref{eq:Chap2_Telescoping_Formula}. Notice that on each level $l$, we use the same samples $\\omega_{i,l}$ to calculate $Q_l$ and $Q_{l-1}$. Then $Q_l$ and $Q_{l-1}$ are likely to correlate well, and the variance of $Q_l - Q_{l-1}$ will be small, see Eq.\\ref{eq:Chap2_variance_small}. \n\n\\begin{equation}\\label{eq:Chap2_variance_small}\n\\begin{split}\nV_l = \\bb{V}[Q_l - Q_{l-1}] &= \\bb{V} [Q_l] + \\bb{V} [Q_{l-1}] - 2Cov(Q_l,Q_{l-1})\\\\\n& \\ll \\bb{V} [Q_l] + \\bb{V} [Q_{l-1}]\n\\end{split}\n\\end{equation}\nwhere we denote $V_l$ as the variance of the $Q_l - Q_{l-1}$ on level $l$. \n\nAlso notice that on different levels, independent samples are used so that the variance of the multilevel estimator $Q_L$ is the summation of the variance on each level.\\\\\n\\begin{equation} \\label{eq:chap2_variance_sum}\n \\bb{V}[Q_L] = \\sum_{l=0}^{L} \\bb{V}[Q_l - Q_{l-1}] = \\sum_{l=0}^L V_l,\n\\end{equation}\nwhere we let $Q_{-1} = 0$. If we write $Y_0=Q_0$ and $Y_l=Q_l-Q_{l-1}$, then\n\\begin{equation*}\n\\bb{E}[Q_L]=\\sum_{l=0}^{L}\\bb{E}[Y_l]\n\\end{equation*}\nLet $\\hat{Y}_l$ be an unbiased estimator for $E[Y_l]$, \n\\begin{equation*}\n\\begin{aligned}\n\\hat{Y}_0 &=\\frac{1}{N_0}\\sum_{i=1}^{N_0}Q_0(\\omega_{i,0})\\\\\n\\hat{Y}_l &=\\frac{1}{N_l}[\\sum_{i=1}^{N_l}(Q_l(\\omega_{i,l})-Q_{l-1}(\\omega_{i,l})] \\qquad {l=1,2,3,\\cdots,L}\n\\end{aligned}\n\\end{equation*}\nthen the multilevel estimator becomes,\n\\begin{equation}\n\\hat{Q}^{ML}_L=\\sum_{l=0}^{L}\\hat{Y}_l \n\\end{equation}\n\n\n\\subsection{MLMC Complexity Theory}\nLet $Q$ denote a quantity of interest, and $Q_l$ denote the corresponding numerical approximation on $l$-th mesh. If we assume that the weak error and the level variance decreases exponentially while the cost per sample on each level increases exponentially, there exist positive constants $\\alpha$, $\\beta$ and $\\gamma$ satisfying the following,\n\\begin{equation}\n \\begin{split}\n \\left \\lvert \\bb{E}[Q_l-Q] \\right\\rvert &= \\mathcal{O}(2^{-\\alpha l})\\\\\n \\bb{V}[Y_l] &= \\mathcal{O}(2^{-\\beta l})\\\\\n C_l &= \\mathcal{O}(2^{\\gamma l})\n \\end{split}\n \\label{eq:chap2_assumption}\n\\end{equation}\nwhere $C_l$ is the cost per sample on level $l$. With the mean square error less than a threshold,\n\n\\begin{equation}\n\\bb{E}[(\\sum_{l=0}^{L} \\hat{Y}_l - \\bb{E}[Q])^2] = \\sum_{l=0}^L N_l^{-1} V_l+(\\bb{E}[\\hat{Q}^{ML}_L-Q])^2 < \\epsilon ^{2}\\\\\n\\label{eq:chap2_mlmc_computation_goal}\n\\end{equation}\nthe total computational cost satisfies,\n\\[\nC=\\left\\{\n \\begin{array}{ll}\n \\mathcal{O}(\\epsilon^{-2}) & \\beta > \\gamma\\\\\n \\mathcal{O}(\\epsilon^{-2} (\\log \\epsilon )^2) & \\beta = \\gamma\\\\\n \\mathcal{O}(\\epsilon^{-2-(\\gamma-\\beta)\/\\alpha}) & \\beta < \\gamma\\\\\n \\end{array}\n\\right.\n\\]\nas $\\epsilon \\to 0$. \n\n\n\n\\subsection{MLMC Algorithm} \\label{Chapter:MLMC_Algorithm}\nThis subsection gives a MLMC algorithm initially proposed by M.Giles\\cite{giles2008multilevel}.\\\\\n\n\\begin{algorithm}[H]\n\\SetAlgoLined\n Start with $L=2$, set the initial number of samples on level 0,1,2\\\\\n \\While{extra samples need to be evaluated}{\n evaluate $Q_l(\\omega_{i,l})$ and $Q_{l-1}(\\omega_{i,l})$, for $\\{i,l:dN_l \\neq 0, i = 1,\\cdots,dN_l\\}$\\\\\n update estimates for $V_l$, $l=0,\\cdots,L$\\\\\n update optimal $N_l$, compute the number of extra samples $dN_l$\\\\\n \\eIf{$|\\bb{E}[Q_L-Q]| \\approx \\frac{|\\bb{E}[Q_L-Q_{L-1}]|}{(2^{\\alpha}-1)}<\\frac{\\epsilon}{\\sqrt{2}}$}{\n \\textbf{break}\n }{\n $L=L+1$ and initialize $N_L$\\;\n }\n }\n \\caption{MLMC Algorithm}\n \\label{alg:MLMC}\n\\end{algorithm}\n\nIn Algorithm \\ref{alg:MLMC}, the variances $V_l$ are approximated by the sample variances on the run. The weak error $\\abs{\\bb{E}[Q_L-Q]}$ is approximated by Richardson extrapolation $\\frac{\\abs{\\bb{E}[Q_L-Q_{L-1}]}}{(2^{\\alpha}-1)}$. \n\nAnd here we consider a equal split of the estimator variance and the approximation error, i.e.,\n\\begin{align}\n \\sum_{l=0}^L N_l^{-1} V_l &< \\epsilon^2\/2\\\\\n \\bb({E}[\\hat{Q}^{ML}_L - Q])^2 &< \\epsilon^2\/2. \\label{eq:Chap_2_variance_constraint}\n\\end{align}\nHowever, it is possible to determine the split factor in an optimal way\\cite{Collier2015}. \n\nThe optimal number of samples $N_l$ can be obtained by solving a constrained optimization : minimizing the total computational cost subject to the constraint Eq.\\ref{eq:Chap_2_variance_constraint}. \n\nMLMC algorithm will work under the following three conditions. \n\n\\paragraph{Convergence} The sequence $Q_0, Q_1, \\cdots, Q_L, \\cdots$ converges. Otherwise, the telescoping equation \\eqref{eq:Chap2_Telescoping_Formula} does not yield a converging result. \n\\paragraph{Correlation} $Q_l$ and $Q_{l-1}$ are estimated using the same underlying random sample $\\omega_{i,l}$ in equation \\eqref{eq:Chap2_Telescoping_Formula_approx}, and are thus well correlated. In this case, the estimator variance is significantly reduced. \n\\paragraph{Consistency} The telescoping sum \\eqref{eq:Chap2_Telescoping_Formula} introduces no bias error. Notice that in the telescoping equation, the term $Q_{l-1}$ for $l=1,\\cdots,L$ appears twice. However, the two $Q_{l-1}$ may be evaluated differently. If we denote the $l$-th term in the telescoping equation $Q_{l}$ and $Q_{l-1}$ by $Q_{l}^{f}$ and $Q_{l}^{c}$, respectively, which denote the fine mesh solution and coarse mesh solution on level $l$, then the condition $\\bb{E}[Q_{l-1}^f] = \\bb{E}[Q_l^c]$ needs to be satisfied in order to introduce no bias error in equation \\eqref{eq:Chap2_Telescoping_Formula}. The expectation of fine mesh solution on level $l-1$ should be the same as that of the coarse mesh solution on level $l$.\n\n\n\\subsection{MLQMC Algorithm}\n\nThe QMC method can solve integration problems as well. In contrast to the MC method, the QMC method replaces random points with deterministic points. Figure \\ref{fig:MC_Lattice_Sobol} gives an example of Monte Carlo points, lattice rule, and Sobol' sequence. \n\n\\begin{figure}[h!]\n \\centering\n \\begin{subfigure}{.33\\textwidth}\n \\centering\n \\includegraphics[width=0.97\\textwidth]{Monte_Carlo_Random_64.png}\n \\caption{Monte Carlo}\n \\end{subfigure}\n \\begin{subfigure}{0.33\\textwidth}\n \\centering\n \\includegraphics[width=0.97\\textwidth]{Lattice_Sequence_64.png}\n \\caption{Lattice rule}\n \\end{subfigure}\\hfill\n \\begin{subfigure}{0.33\\textwidth}\n \\centering\n \\includegraphics[width=0.97\\textwidth]{Sobol_Sequence_64.png}\n \\caption{Sobol' sequence}\n \\end{subfigure}\n \\caption{An example of Monte Carlo points, Lattice rule and Sobol' sequence in $[0,1]\\times[0,1]$ domain.}\n \\label{fig:MC_Lattice_Sobol}\n\\end{figure}\n\nThe QMC approximation is given by,\n\\[\n\\mathcal{I}_{QMC}^{N}=\\frac{1}{N} \\sum_{i=1}^{N} Q(t_i) \\approx \\bb{E}[Q].\n\\]\n\nNotice that the points $\\{t_i\\}_{i=1}^{N}$ are deterministic. However, deterministic points yield biased estimates. In this work, we use the randomized QMC: random shift and digital scramble for lattice rule and Sobol' sequence respectively. The interested readers may refer to \\cite{dick2013high} for the above mentioned two randomization techniques. \n\nThere have been numerous studies combining the MLMC and QMC methods\\cite{giles2009multilevel,giles2016combining,kuo2017multilevel}. \nWe follow the multilevel quasi-Monte Carlo (MLQMC) settings from these works and list the algorithm here.\n\n\\begin{algorithm}[H]\n\\SetAlgoLined\nStart with $L=2$, set initial number of samples $N_0$ on level 0,1,2\\\\\n\\While{extra samples need to be evaluated}{\n\tevaluate $Q_l(\\omega_{i,l})$ and $Q_{l-1}(\\omega_{i,l})$, for $\\{i,l:dN_l \\neq 0, i = 1,\\cdots,dN_l\\}$\\\\\n\tupdate estimates for $V_l$, $l=0,\\cdots,L$ and compute $\\bb{V}[Q]$\\\\\n\t\\eIf{$\\bb{V}[Q] > \\epsilon^2\/2$}{\n\t\tselect level $l$ such that $l=\\text{argmax} \\frac{\\bb{V}[Y_l]}{N_lC_l}$ and double $N_l$}{\n\t\t\\eIf{$\\bb{E}[|Q_L-Q|]\\approx\\frac{\\abs{\\bb{E}[Q_L-Q_{L-1}]}}{(2^{\\alpha}-1)}<\\frac{\\epsilon}{\\sqrt{2}}$}{\n\t\t\t\\textbf{break}\n\t\t}{\n\t\t\t$L=L+1$ and initialize $N_L$ \n\t\t}\n\t}\n}\n\\caption{MLQMC Algorithm}\n\\end{algorithm}\n\n\\subsection{Multigrid}\n\nThe multigrid method was originally introduced to solve elliptic boundary-value problems efficiently. It has since been developed to solve either linear or non-linear systems. Multigrid methods compute the solution on a sequence of grids. Figure. \\ref{fig:FMG-MLMC} gives an illustration of the full multigrid scheme\n\nWe observe that, when the full multigrid solver is applied to the MLMC problem, based on the same level hierarchies, the solution on the coarse mesh $Q^c_l$ can be obtained as a byproduct of the multigrid solution on fine mesh $Q^f_l$ on each level $l$. Thus, in our proposed FMG-MLMC method, we have saved the computation for $Q^c_l$. Also notice that at the red circles in Figure. \\ref{fig:FMG-MLMC} the solutions are exact, since they are the end point of each V-cycle. \n\n\\begin{figure}[H]\n \\centering\n \\includegraphics[width=0.80\\textwidth]{FMG-MLMC.png} \n \\caption{An illustration of Full-Multigrid-Multilevel Monte Carlo method.}\n \\label{fig:FMG-MLMC}\n\\end{figure}\n\nRecall that $Q_l(\\omega_{i,l})$ and $Q_{l-1}(\\omega_{i,l})$ are evaluated by the same underlying random sample. In level $l$, we denote $K_l^f$ and $K_l^c$ as the coefficients of the fine and coarse models, respectively. Also recall the consistency condition, since we use the same numerical solver for $Q_l^c$ and $Q_{l-1}^f$, we only require that $K_l^c$ and $K_{l-1}^f$ follow the same distribution. \n\n$K_l^f$ can be generated by the matrix decomposition method, KL-expansion method or other random field generation methods, see \\cite{Liu2019} for example. A way to generate $K_l^c$ is to coarsen $K_l^f$. In order to prevent bias error, $K_l^c$ should satisfy the same distribution law as $K_l^f$. Figure \\ref{fig:chap3_coarsening} shows a way of coarsening. \n\n\\begin{figure}[H]\n \\centering\n \\includegraphics[width=0.6\\textwidth]{coarsen.jpg}\n \\caption{Coarsening}\n \\label{fig:chap3_coarsening}\n\\end{figure}\nIn this scheme, the value of the coefficient in the coarse grid is selected to be the corresponding block in the fine grid. We denote the blocks in fine grid and coarse grid by $k^f_{i,j}$ and $k^c_{I,J}$ respectively, then we have the following,\n\n\\begin{equation*}\n k^c_{I,J} = k^f_{2I-1, 2J-1}.\n\\end{equation*}\n\nHere we give an algorithm to describe our proposed FMG-MLMC method. \n\n\\begin{algorithm}[H]\n\\SetAlgoLined\n Start with $L=2$, set the initial number of samples on level 0,1,2\\\\\n \\While{extra samples need to be evaluated}{\n coarsen $K_l^f$ to obtain $K_l^c$\\\\\n use the multigrid solver to compute realizations $Q_l(\\omega_{i,l})$, and obtain $Q_{l-1}(\\omega_{i,l})$ as a byproduct, for $\\{i,l:dN_l \\neq 0, i = 1,\\cdots,dN_l\\}$\\\\\n update estimates for $V_l$, $l=0,\\cdots,L$\\\\\n update optimal $N_l$, compute number of extra samples $dN_l$\\\\\n \\eIf{$|\\bb{E}[Q_L-Q]| \\approx \\frac{|\\bb{E}[Q_L-Q_{L-1}]|}{(2^{\\alpha}-1)}<\\frac{\\epsilon}{\\sqrt{2}}$}{\n \\textbf{break}\n }{\n $L=L+1$ and initialize $N_L$\\;\n }\n }\n \\caption{FMG-MLMC Algorithm}\n\\end{algorithm}\n\nNotice that in our scheme, no correlation is introduced into each levels. Thus the equation \\eqref{eq:chap2_variance_sum} still holds true. \n\n\\section{Numerical Validation}\\label{Sec3}\n\\subsection{Problem Statement}\\label{Sec3.1}\nRecall the elliptic problem \\ref{eq:elliptic_pde} in Section 2, now we focus the physical domain $\\Omega=[0,1]^2$. In this work, we consider cases in two different boundary conditions and quantities of interest, as listed in Table \\ref{tab:Chap4_cases}. Case I is of the Dirichlet boundary type, with pointwise output quantity. Case II is of the mixed Dirichlet-Neumann boundary condition, whose output is the outflow at the east boundary.\n\n\\begin{table}[h]\n\\centering\n\\caption{Case Settings}\n\\begin{tabular}{ccc}\\hline\nCase & Boundary Condition & QoI \\\\ \\hline\n1 & $u\\mid_{\\partial W} = 100$, $u\\mid_{\\partial E} = 0$, $u\\mid_{\\partial N} = 50$, $u\\mid_{\\partial S} = 10$ & $u(0.5,0.5)$ \\\\\n2 & $u\\mid_{\\partial W} = 100$, $u\\mid_{\\partial E} = 0$, $\\frac{\\partial u}{\\partial n}\\mid_{\\partial N}=0$, $\\frac{\\partial u}{\\partial n}\\mid_{\\partial S}=0$ & $\\int_{\\partial E} -k\\nabla udx$ \\\\\\hline\n\\end{tabular}\n\\label{tab:Chap4_cases}\n\\end{table}\n\n\\null\n\\noindent (1) \\textbf{Discretization}\\\\\nThe governing equation (\\ref{eq:elliptic_pde}) is discretized by the finite-volume method on rectangular grids. On level $l$ the degree of freedom is $2^{l+2} \\times 2^{l+2}$.\\\\\n(2) \\noindent\\textbf{Random Fields}\\\\\nWe choose the Mat\\'ern covariance function \n\\begin{equation} \\label{eq:Matern Covariance}\nC_\\nu (d)=\\sigma^2 \\frac{2^{1-\\nu}}{\\Gamma(\\nu)} (\\sqrt{2\\nu} \\frac{d}{\\lambda})^{\\nu} K_\\nu (\\sqrt{2\\nu}\\frac{d}{\\lambda}),\n\\end{equation}\nwhere $d$ is the Euclidean distance of two points, $\\lambda$ is the correlation length, and $\\nu$ controls the smoothness of the field.\n\nThe parameters of Mat\\'ern covariance for four different random fields are given in Table \\ref{table:Random Field Parameters Setting}.\n\\begin{table}[h] \n\\centering\n\\caption{Random Field Parameters Settings}\n\\label{table:Random Field Parameters Setting}\n\\begin{tabular}{cc}\\hline\nRandom Field & Parameters \\\\ \\hline\n1 & $\\nu = 0.5, \\lambda = 0.5, \\sigma^2 = 1$ \\\\\n2 & $\\nu = 0.5, \\lambda = 1, \\sigma^2 = 1$\\\\\n3 & $\\nu = 1, \\lambda = 0.5, \\sigma^2 = 1$\\\\\n4 & $\\nu = 1, \\lambda = 1, \\sigma^2 = 1$\\\\\\hline\n\\end{tabular}\n\\end{table} \n\nThe random fields are generated using the KL-expansion method. The truncation term is determined when $99\\%$ of the variability is captured, meaning that\n\\[\n\\frac{\\sum_{i=1}^{N_{KL}} \\theta_i}{\\sum_{i=1}^{\\infty} \\theta_i} = 99\\%,\n\\]\nwhere the summation of all eigenvalues satisfies the following\n\\[\n\\sum_{i=1}^{\\infty} \\theta_i = \\sigma^2 meas(\\Omega) = \\sigma^2 \\int_{\\Omega} dx,\n\\]\nReaders of interest can see \\cite{ernst2009efficient} for examples. $\\Omega$ is the random field region. \\\\\n(3) \\noindent\\textbf{QMC Method}\\\\\nIn QMC method, the Lattice rule points are generated using the software from \\cite{kuo2016application}, Sobol' matrices from \\cite{joe2008constructing} are used to generate Sobol' sequences. In both cases, 24 randomizations are applied. The confidence intervals are obtained by 10 sets of randomization.\n\n\\subsection{Numerical results}\\label{Sec3.2}\n\nIn this subsection we present the numerical results of the two cases (Tab. \\ref{tab:Chap4_cases}) with four random field settings (Tab. \\ref{table:Random Field Parameters Setting}). In each figure, the first and second row corresponds the cases $\\nu = 0.5$ and $\\nu = 1.0$ respectively, while the first column and second column corresponds the cases $\\lambda = 0.5$ and $\\lambda = 1.0$. We will first give the results of the first case in the following. \n\nThe simulation starts by estimating the asymptotic rates $\\alpha, \\beta, \\gamma$ in the assumptions \\eqref{eq:chap2_assumption}. Figs. \\ref{fig:mlmc_1_var}, \\ref{fig:mlqmc_lattice_1_var} and \\ref{fig:mlqmc_sobol_1_var} plot the variance of the QoI $Q_l$ and $Y_l = Q_l - Q_{l-1}$ against level $l$. By comparison, the QMC method reduces in the variance not in the asymptotic variance convergence rate, but in the y-axis offsets. \n\n\\begin{figure}[H]\n \\begin{center} \n \\hfill\n \\begin{minipage}{0.33\\textwidth}\n \\centering\n \\includegraphics[width=\\textwidth]{{MLMC_Case_I_0.5_0.5_Variance}.png}\n \n \\end{minipage}\n \\begin{minipage}{0.33\\textwidth}\n \\centering\n \\includegraphics[width=\\textwidth]{{MLMC_Case_I_0.5_1.0_Variance}.png}\n \n \\end{minipage}\\hfill\n \\null \\\\\n \\begin{minipage}{0.33\\textwidth}\n \\centering\n \\includegraphics[width=\\textwidth]{{MLMC_Case_I_1.0_0.5_Variance}.png}\n \n \\end{minipage}\n \\begin{minipage}{0.33\\textwidth}\n \\centering\n \\includegraphics[width=\\textwidth]{{MLMC_Case_I_1.0_1.0_Variance}.png}\n \n \\end{minipage}\\hfill\n \\end{center}\n \n \\caption{Variance of $Q_l$ and $Y_l$ for 4 random fields}\n \\label{fig:mlmc_1_var}\n\\end{figure}\n\nFor the results of MLQMC-Lattice, the variance test (Fig. \\ref{fig:mlqmc_lattice_1_var}) is presented. \n\n\\begin{figure}[H]\n \\begin{center}\n \\hfill\n \\begin{minipage}{0.33\\textwidth}\n \\centering\n \\includegraphics[width=\\textwidth]{{Lattice_Case_I_0.5_0.5_Variance}.png}\n \n \\end{minipage}\n \\begin{minipage}{0.33\\textwidth}\n \\centering\n \\includegraphics[width=\\textwidth]{{Lattice_Case_I_0.5_1.0_Variance}.png}\n \n \\end{minipage}\\hfill\n \\null \\\\\n \\begin{minipage}{0.33\\textwidth}\n \\centering\n \\includegraphics[width=\\textwidth]{{Lattice_Case_I_1.0_0.5_Variance}.png}\n \n \\end{minipage}\n \\begin{minipage}{0.33\\textwidth}\n \\centering\n \\includegraphics[width=\\textwidth]{{Lattice_Case_I_1.0_1.0_Variance}.png}\n \n \\end{minipage}\\hfill\n \\end{center}\n \\caption{Variance of $Q_l$ and $Y_l$ for 4 random fields}\n \\label{fig:mlqmc_lattice_1_var}\n\\end{figure}\n\nFor the results of MLQMC-Sobol', the variance (Fig. \\ref{fig:mlqmc_sobol_1_var}) is presented. \n\n\\begin{figure}[H]\n \\begin{center}\n \\hfill\n \\begin{minipage}{0.33\\textwidth}\n \\centering\n \\includegraphics[width=\\textwidth]{{Sobol_Case_I_0.5_0.5_Variance}.png}\n \n \\end{minipage}\n \\begin{minipage}{0.33\\textwidth}\n \\centering\n \\includegraphics[width=\\textwidth]{{Sobol_Case_I_0.5_1.0_Variance}.png}\n \n \\end{minipage}\\hfill\n \\null \\\\\n \\begin{minipage}{0.33\\textwidth}\n \\centering\n \\includegraphics[width=\\textwidth]{{Sobol_Case_I_1.0_0.5_Variance}.png}\n \n \\end{minipage}\n \\begin{minipage}{0.33\\textwidth}\n \\centering\n \\includegraphics[width=\\textwidth]{{Sobol_Case_I_1.0_1.0_Variance}.png}\n \n \\end{minipage}\\hfill\n \\end{center}\n \\caption{Variance of $Q_l$ and $Y_l$ for 4 random fields}\n \\label{fig:mlqmc_sobol_1_var}\n\\end{figure}\n\nThe QMC method does not affect the expectation value nor the computational cost. Here we skip the comparison of $\\alpha$ and $\\gamma$, but present the final computational cost of the three methods, given $\\epsilon$. The results in 4 random fields are presented in Fig. \\ref{fig:case1_comparison}. \n\\begin{figure}[H]\n \\begin{center}\n \\hfill\n \\begin{minipage}{0.33\\textwidth}\n \\centering\n \\includegraphics[width=\\textwidth]{{Comparison_I_0.5_0.5_Total_Cost}.png}\n \n \\end{minipage}\n \\begin{minipage}{0.33\\textwidth}\n \\centering\n \\includegraphics[width=\\textwidth]{{Comparison_I_0.5_1.0_Total_Cost}.png}\n \n \\end{minipage}\\hfill\n \\null \\\\\n \\begin{minipage}{0.33\\textwidth}\n \\centering\n \\includegraphics[width=\\textwidth]{{Comparison_I_1.0_0.5_Total_Cost}.png}\n \n \\end{minipage}\n \\begin{minipage}{0.33\\textwidth}\n \\centering\n \\includegraphics[width=\\textwidth]{{Comparison_I_1.0_1.0_Total_Cost}.png}\n \n \\end{minipage}\\hfill\n \\end{center}\n \\caption{Computational complexity of the three methods; the asymptotic rates are marked on the plot}\n \\label{fig:case1_comparison}\n\\end{figure}\n\nFor case II, we test the QMC convergence rate and then present the computational complexity. \n\nNext the results of MLQMC-Lattice, the convergence test (Fig. \\ref{fig:mlqmc_lattice_2_test}) variance of level estimator $Y_l$ against $N_l$ in each level. The offset between the lines reveals the variance decreases with the levels. The comparison shows Sobol' sequence's advantage in decreasing variance. The random parameter settings have small impact on the variances in this case. \n\n\\begin{figure}[H]\n \\begin{center}\n \\hfill\n \\begin{minipage}{0.33\\textwidth}\n \\centering\n \\includegraphics[width=\\textwidth]{{Lattice_Case_II_0.5_0.5_QMC_Test}.png}\n \n \\end{minipage}\n \\begin{minipage}{0.33\\textwidth}\n \\centering\n \\includegraphics[width=\\textwidth]{{Lattice_Case_II_0.5_1.0_QMC_Test}.png}\n \n \\end{minipage}\\hfill\n \\null \\\\\n \\begin{minipage}{0.33\\textwidth}\n \\centering\n \\includegraphics[width=\\textwidth]{{Lattice_Case_II_1.0_0.5_QMC_Test}.png}\n \n \\end{minipage}\n \\begin{minipage}{0.33\\textwidth}\n \\centering\n \\includegraphics[width=\\textwidth]{{Lattice_Case_II_1.0_1.0_QMC_Test}.png}\n \n \\end{minipage}\\hfill\n \\end{center}\n \\caption{Variance test of MLQMC-Lattice for all levels. The variance of the estimator is plotted as a function of the number of samples $N_l$. The convergence rates $\\lambda$ are marked on the plot}\n \\label{fig:mlqmc_lattice_2_test}\n\\end{figure}\n\nNext, the results of MLQMC-Sobol', the convergence test (Fig. \\ref{fig:mlqmc_sobol_2_test})\n\n\\begin{figure}[H]\n \\begin{center}\n \\hfill\n \\begin{minipage}{0.33\\textwidth}\n \\centering\n \\includegraphics[width=\\textwidth]{{Sobol_Case_II_0.5_0.5_QMC_Test}.png}\n \n \\end{minipage}\n \\begin{minipage}{0.33\\textwidth}\n \\centering\n \\includegraphics[width=\\textwidth]{{Sobol_Case_II_0.5_1.0_QMC_Test}.png}\n \n \\end{minipage}\\hfill\n \\null \\\\\n \\begin{minipage}{0.33\\textwidth}\n \\centering\n \\includegraphics[width=\\textwidth]{{Sobol_Case_II_1.0_0.5_QMC_Test}.png}\n \n \\end{minipage}\n \\begin{minipage}{0.33\\textwidth}\n \\centering\n \\includegraphics[width=\\textwidth]{{Sobol_Case_II_1.0_1.0_QMC_Test}.png}\n \n \\end{minipage}\\hfill\n \\end{center}\n \\caption{Variance test of MLQMC-Sobol for all levels. The variance of the estimator is plotted as a function of the number of samples $N_l$. The convergence rates $\\lambda$ are marked on the plot}\n \\label{fig:mlqmc_sobol_2_test}\n\\end{figure}\n\nFinally, we compare the computational cost of the three methods, given $\\epsilon$. The results in 4 random fields are presented in Fig. \\ref{fig:case2_comparison}. \n\\begin{figure}[H]\n \\begin{center}\n \\hfill\n \\begin{minipage}{0.33\\textwidth}\n \\centering\n \\includegraphics[width=\\textwidth]{{Comparison_Sobol_Case_II_0.5_0.5_Total_Cost}.png}\n \n \\end{minipage}\n \\begin{minipage}{0.33\\textwidth}\n \\centering\n \\includegraphics[width=\\textwidth]{{Comparison_Sobol_Case_II_0.5_1.0_Total_Cost}.png}\n \n \\end{minipage}\\hfill\n \\null \\\\\n \\begin{minipage}{0.33\\textwidth}\n \\centering\n \\includegraphics[width=\\textwidth]{{Comparison_Sobol_Case_II_1.0_0.5_Total_Cost}.png}\n \n \\end{minipage}\n \\begin{minipage}{0.33\\textwidth}\n \\centering\n \\includegraphics[width=\\textwidth]{{Comparison_Sobol_Case_II_1.0_1.0_Total_Cost}.png}\n \n \\end{minipage}\\hfill\n \\end{center}\n \\caption{Computational complexity of the three methods}\n \\label{fig:case2_comparison}\n\\end{figure}\n\n\\section{Conclusions}\\label{Sec4}\n\nIn this work, we combined the MLMC with a full multigrid method. We saved the computation for the coarse grid solution on each level without modifying MLMC hierarchy and introducing correlation between different levels. We applied the consistent coarsening approach such that no bias error was introduced in the telescoping formula \\eqref{eq:Chap2_Telescoping_Formula}. \n\nWe tested our FMG-MLQMC algorithm on 2-D elliptic PDE with random coefficients for two different types of boundary condition settings and QoIs. The random coefficients were modelled as lognormal random fields with the Mat\\'ern covariance function with various parameter settings. We observed that quasi-Monte Carlo approaches have better performance on smoother random fields and problems with more regularity. Also, the comparison of Monte Carlo and quasi-Monte Carlo methods (including Lattice rule and Sobol' sequence) revealed that QMC outperforms MC due to a smaller estimator variance, and Sobol' sequence performs slightly better than Lattice rule.\n\n\n\nOne future work could be the substitution of the geometric multigrid solver with the algebraic multigrid (AMG) solver. In the AMG scheme, the grids are not associated with physical meshes, rather, the grids are fully determined by the matrix entries algebraically. \n\nAnother work could be to extend the elliptic model to more sophisticated models, such as two-phase porous flow. The efficient sampling and fast simulation of multi-phase subsurface flow under heterogeneous media could produce practical values.\n\nFurther work could extend the multilevel model to a multiscale model, and multiscale meshes rather than geometric meshes would be used. In this case, one level could correspond to one scale, and sampling would be performed on each scale. The literature on multiscale modeling can be found in \\cite{jenny2003multi,efendiev2009multiscale}. \n\n\\section*{Acknowledgements}\nThe authors gratefully acknowledge the support from the National Natural Science Foundation of China (Nos. 51874262, 51904031) and the Research Funding from King Abdullah University of Science and Technology (KAUST) through the grants BAS\/1\/1351-01-01.\n\n\n","meta":{"redpajama_set_name":"RedPajamaArXiv"}} +{"text":"\\section{Introduction}\n\n\nThe notion of generalized geometry goes back to the work of Hitchin \\cite{Hi1} (see also \\cite{Hi3}). In this context,\n Witt \\cite{W} introduced a new type of structures on a $7$-dimensional\nmanifold $M$ in terms of a differential form of mixed degree,\nthus generalizing the classical notion of $G_2$-structure\ndetermined by a stable and positive $3$-form. Instead of studying\ngeometry on the tangent bundle $TM$ of the manifold, one\nconsiders the bundle $TM \\oplus T^*M$ endowed with a natural\norientation and an inner product of signature $(7, 7)$, where\n$T^*M$ denotes the cotangent bundle of $M$. In this way, if $M$\nis spin, then the differential form of mixed type can be viewed\nas a $G_2 \\times G_2$-invariant spinor $\\rho$ for the bundle and\nit is called the structure form.\n\nThese structures are called {\\it generalized $G_2$-structures}\nand they induce a Riemannian metric, a $2$-form $b$ (the\n$B$-field), two unit spinors $\\Psi_{\\pm}$ and a function $\\phi$\n(the dilaton). By \\cite{W}, any $G_2 \\times G_2$-invariant\nspinor $\\rho$ is stable and has a canonical expression by $\\rho\n= e^{-\\phi} e^{\\frac{b}{2}} \\wedge (\\Psi_+ \\otimes\n\\Psi_-)^{ev,od}$ in terms of the two spinors, the $B$-field and\nthe dilaton function. In the paper we will restrict to the case\nof constant dilaton, i.e. $\\phi = {\\mbox const}$, and trivial\n$B$-field.\n\nUp to a $B$-field transformation, a generalized $G_2$-structure is\nessentially a pair of $G_2$-structures. If the two spinors\n$\\Psi_+$ and $\\Psi_-$ are linearly independent, then the\nintersection of the two isotropy groups, both isomorphic to\n$G_2$, determined by the two spinors coincides with $SU(3)$.\nTherefore, one can express the structure form in terms of the\nform $\\alpha$ dual to the unit vector stabilized by $SU(3)$ and of\nthe forms $(\\omega, \\psi = \\psi_+ + i \\psi_-)$, associated with\n$SU(3)$, where $\\omega$ is the fundamental form and $\\psi$ is\nthe complex volume form. Assuming that the angle between $\\Psi_+$\nand $\\Psi_-$ is $\\frac {\\pi} {2}$, then it turns out that\n\\begin{equation} \\label{expression}\n\\begin{array}{l}\n\\rho = (\\Psi_+ \\otimes \\Psi_-)^{ev} = \\omega + \\psi_+ \\wedge\n\\alpha - \\frac{1}{6} \\omega^3 \\wedge \\alpha,\\\\ [5pt] \\hat \\rho =\n(\\Psi_+ \\otimes \\Psi_-)^{od} = \\alpha - \\psi_- - \\frac{1}{2}\n\\omega^2 \\wedge \\alpha,\n\\end{array}\n\\end{equation}\nwhere $\\hat \\rho$ is the companion of $\\rho$ and $\\omega^k$\ndenotes the $k$-power wedge of $\\omega$. In this paper we will\nconsider generalized $G_2$-structures defined by the previous\nstructure forms. In this case, the two associated\n$G_2$-structures do not coincide.\n\n\nIf $H$ is a $3$-form (not necessarily closed) on $M$, then one can\n consider two\ntypes of generalized $G_2$-structures with respect to the\n$3$-form $H$: \\newline the {\\em strongly integrable} ones, i.e.\nthose associated to a structure form $\\rho$ which satisfies\n\\begin{equation} \\label{strongnotclosed}\nd \\rho + H \\wedge \\rho = d \\hat \\rho + H \\wedge \\hat \\rho =0,\n\\end{equation}\n and the {\\em\nweakly integrable} ones, i.e. those defined by the condition $$ d\n\\rho + H \\wedge \\rho = \\lambda \\hat \\rho, $$ where $\\lambda$ is a\nnon-zero constant. The previous structures are said of even or\nodd type according to the parity of $\\rho$.\n\nNote that these definitions of integrability are slightly\ndifferent from the ones given in \\cite{W}, where the closure of\nthe $3$-form $H$ is assumed.\n\n\n\nIf $H$ is closed, then the twisted operator $d_H \\cdot = d\n\\cdot + H \\wedge \\cdot$ defines a differential complex and if, in\naddition, $M$ is compact, then the strongly integrable\ngeneralized $G_2$-structures can be interpreted as critical\npoints of a a certain functional \\cite[Theorem 4.1]{W}. In this\ncase the underlying spinors $\\Psi_{\\pm}$ are parallel with\nrespect to the Levi-Civita connection and therefore there exist no\nnon-trivial compact examples with such structures, i.e. there are\nonly the classical examples of manifolds with holonomy contained\nin $G_2$. If $H$ is not closed, then we will show that compact\nexamples can be constructed starting from a $6$-dimensional\nmanifold endowed with an $SU(3)$-structure.\n\nIf $H$ is closed, then the weakly integrable generalized\n$G_2$-structures can be also viewed as critical points of a\nfunctional under a constraint, but they have no classical\ncounterpart. The existence of weakly integrable generalized\n$G_2$-structures with respect to a closed $3$-form $H$ on a\ncompact manifold was posed as an open problem in \\cite{W}. We\n construct such structures on a family of compact manifolds\nand we relate them with $SU(3)$-structures in dimension $7$,\nwhere $SU(3)$ is identified with the subgroup $SU(3) \\times \\{ 1\n\\}$ of $SO(7)$.\n\n\n\n\n\nAfter reviewing the general theory of generalized $G_2$-structures, in\n section 3 we construct a family of compact $7$-dimensional manifolds endowed\n with a weakly integrable generalized $G_2$-structure with respect to\n a closed and non-zero $3$-form $H$ (Theorem \\ref{example}).\n The corresponding structure form is the odd type form $\\hat \\rho$ given by \\eqref{expression}. These manifolds are\n obtained as a compact quotients $M_{\\beta}$ by uniform discrete subgroups (parametrized by the p-th roots of unity $e^{i \\beta}$) of\n a semi-direct product $SU(2) \\ltimes\n \\H$, where $\\H$\n denotes the quaternions. It turns out that these manifolds have an $SU(3)$-structure $(\\omega, \\eta, \\psi)$ such that\n\\begin{equation}\\label{hyposystem}\nd \\eta = \\lambda \\omega, \\quad d (\\eta \\wedge \\psi_{\\pm}) =0.\n\\end{equation}\nIn particular they are contact metric. The structures satisfying\nthe condition \\eqref{hyposystem} can arise on hypersurfaces of\n$8$-dimensional manifolds with an integrable $SU(4)$-structure and\nthey are the analogous of the \\lq \\lq hypo\\rq \\rq\n$SU(2)$-structures in dimension $5$ (see \\cite{CS2}). In the same\nvein of \\cite{Hi1}, we consider a family $(\\omega(t), \\eta(t),\n\\psi(t))$ of $SU(3)$-structures containing the $SU(3)$-structure\n$(\\omega, \\eta, \\psi)$ and the corresponding evolution equations.\nIn this way in section 4 we\n show that on the product of $M_{\\beta}$ with an open interval\nthere exists a Riemannian metric with discrete holonomy\ncontained in $SU(4)$ (Theorem 4.1).\n\nStarting from a $6$-dimensional manifold $N$ endowed with an\n$SU(3)$-structure $(\\omega, g, \\psi)$, it is possible to define in\na natural way a generalized $G_2$-structure with the structure\nform $\\rho$ of even type given by \\eqref{expression} on the\nRiemannian product $(M = N \\times S^1, h)$, with\n$$\nh = g + dt \\otimes dt\n$$\nand $\\alpha = dt$.\n In \\cite{W} an example of this type with\na $6$-dimensional nilmanifold $N$ was considered in order to\nconstruct a compact manifold endowed with a strongly integrable\ngeneralized $G_2$-structure with respect to a non-closed $3$-form\n$H$.\n\n\nWe will prove in general that if $N$ is a $6$-dimensional\nmanifold endowed with an $SU(3)$-structure $(\\omega, g, \\psi)$,\nthen the generalized $G_2$-structure defined by $\\rho$ on $N\n\\times S^1$ satisfies the conditions \\eqref{strongnotclosed}, for\na non-zero $3$-form $H$, if and only if\n\\begin{equation}\\label{symplectichalfflat}\nd \\omega =0, \\quad d \\psi_+ = - \\pi_2 \\wedge \\omega, \\quad d \\psi_- =0,\n\\end{equation}\nwhere the $2$-form $\\pi_2$ is the unique non zero component of\nthe intrinsic torsion (see Theorem \\ref{stronglyint}). We will\ncall $SU(3)$-structures which satisfy the previous conditions\nbelonging to the class ${\\mathcal W}_2^+$. The $3$-form $H$ is\nrelated to\n the component $\\pi_2$ of the intrinsic torsion by $ H = \\pi_2\n \\wedge \\alpha$ and we\nwill show that $H$ will never be closed unless $\\pi_2 = 0$.\n\nIt has to be noted that, if $(\\omega, g, \\psi)$ is in the class\n${\\mathcal W}_2^+$, then the $SU(3)$-structure given by $(\\omega,\ng, i \\psi)$ is symplectic half-flat (see \\cite{CS}), i.e. the\nfundamental form $\\omega$ and the real part of the complex volume\nform are both closed. The half-flat structures turn out to be\nuseful in the construction of metrics with holonomy group\ncontained in $G_2$ (see e.g. \\cite{Hi1,CS,CF}). Indeed, starting\nwith a half-flat structure on $N$, if certain evolution equations\nare satisfied, then there exists a Riemannian metric with holonomy\ncontained in $G_2$ on the product of the manifold $N$ with some\nopen interval. Examples of compact manifolds with symplectic\nhalf-flat structures have been given in \\cite{CT}, where invariant\nsymplectic half-flat structures on nilmanifolds are classified.\nOther examples are considered in \\cite{dBT} where Lagrangian\nsubmanifolds are studied instead.\n\n\n\\medskip\n\n{\\em{Acknowledgements}}\nThe authors thank Simon Salamon, Frederik Witt for useful comments and suggestions. They also thank the Centro di Ricerca Matematica \\lq \\lq Ennio De Giorgi\\rq \\rq, Pisa, for the warm hospitality.\n\n\n\n\n\\section{Generalized $G_2$ structures and spinors}\n\nIn this section we are going to recall some facts on generalized\n$G_2$-structures which have been studied by Jeschek and Witt in\n\\cite{W,W2,JW} in the general case of $\\phi$ non-constant and\nnon-trivial $B$-field. In the next sections we will deal with the\ncase $\\phi = const$ and trivial $B$-field.\n\n\n Let $V$ be a\n$7$-dimensional real vector space and denote by $V^*$ the dual\nspace of $V$. Then $V \\oplus V^*$ has a natural orientation and a\ninner product of signature $(7,7)$ defined by\n$$\n(v + \\xi, v + \\xi) = - \\frac 12 \\xi(v), \\quad \\forall v \\in V, \\,\n\\xi \\in V^*.\n$$\nThe inner product determines a group coniugate to $SO(7,7)$ inside the linear group\n$GL(14)$. Since as $GL(7)$-space ${\\mathfrak {so}} (7,7) = End(V)\n\\oplus \\Lambda^2 V^* \\oplus \\Lambda^2 V$, any $b \\in \\Lambda^2\nV^*$ defines an element (called {\\em B-field}) in ${\\mathfrak\n{so}} (7,7)$. By exponentiating to $SO(7,7)$ the action of\n$\\Lambda^2 V^* \\subset {\\mathfrak {so}} (7,7)$\n$$\nv \\to v \\lrcorner b,\n$$\n one gets an action on $V \\oplus V^*$, given by $ \\exp (b) (v \\oplus\n \\xi) = v \\oplus\n ( v \\lrcorner b+ \\xi)$.\n Then $V \\oplus V^*$ acts on $\\Lambda^* V^*$ by\n$$\n(v + \\xi) \\eta = \\iota (v) \\eta + \\xi \\wedge \\eta,\n$$\nand we have\n$$\n(v + \\xi)^2 \\eta = - (v + \\xi, v + \\xi) \\eta.\n$$\n Therefore $\\Lambda^* V^*$ can be viewed as a module over the Clifford algebra of $V \\oplus V^*$. The space\n $\\Lambda^* V^*$, as the spin\n representation of $Spin(7, 7)$, determines the splitting of $\\Lambda^* V^* \\otimes (\\Lambda^7 V)^{\\frac 12}$\n $$\n \\begin{array} {l}\n S^+ = \\Lambda^{ev} V^* \\otimes (\\Lambda^7 V)^{\\frac 12}\\\\\n S^- = \\Lambda^{od} V^* \\otimes (\\Lambda^7 V)^{\\frac 12}\n \\end{array}\n $$\n into the sum of the two irreducible spin representations.\nBy considering $b \\in \\Lambda^2 V^*$, then one has the following\ninduced action on\n spinors given by\n $$\n \\exp (b) \\eta = (1 + b + \\frac 12 b \\wedge b + \\cdots) \\wedge \\eta = e^b \\wedge \\eta.\n $$\n If $\\sigma$ is the Clifford algebra anti-automorphism defined by $\\sigma (\\gamma^p) = \\epsilon(p) \\gamma^p$, on any element of degree $p$, with\n $$\n \\epsilon (p) = \\left\\{ \\begin{array} {cll}\n 1 \\quad &{\\mbox{for}} &\\quad p \\equiv 0, 3 \\quad {\\mbox{mod}} \\, 4,\\\\\n - 1 \\quad &{\\mbox{for}}& \\quad p \\equiv 1, 2 \\quad {\\mbox{mod}} \\, 4,\n \\end{array}\n \\right.\n $$\n then $S^+$ and $S^-$ are totally isotropic with respect\n to the symmetric bilinear form $q(\\alpha, \\beta)$ defined as the\n top degree component of $\\alpha \\wedge \\sigma (\\beta)$ (see \\cite{W}).\n\n\n\n A {\\it generalized $G_2$-structure} on a $7$-dimensional manifold $M$ is a reduction from the structure group $\\R^* \\times Spin(7,7)$ of\n the bundle $TM \\oplus T^*M$ to $G_2 \\times G_2$.\n Such a structure determines a generalized oriented metric structure\n $(g, b)$, (i.e. a Riemannian metric $g$, a {\\em B}-field $b$ and an orientation on $V$) and a real scalar function $\\phi$ (the {\\em dilaton}).\n Therefore we get a pair of two $G_2$-structures associated with\n two unit spinors $\\Psi_{\\pm}$ in the irreducible spin representation\n $\\Delta = \\R^8$ of $Spin(7)$. There is, up to a scalar, a unique invariant in $\\Lambda^{ev} V^* \\otimes \\Lambda^{od} V^*$, given by the box operator\n $$\n \\Box_{\\rho}: \\Lambda^{ev,od} V^* \\to \\Lambda^{od,ev} V^*, \\quad \\tilde \\rho \\to e^{\\frac{b}{2}} \\wedge \\ast_g \\sigma (e^{- \\frac{b}{2}} \\wedge \\tilde \\rho).\n $$\n\n If $\\rho$ is a $G_2 \\times G_2$-invariant spinor, then its {\\it companion} $\\hat \\rho = \\Box_{\\rho} \\rho$ is still a\n $G_2 \\times G_2$-invariant spinor. To any $G_2 \\times G_2$-invariant spinor $\\rho$ one can associate a volume form $\\mathcal Q$ defined by\n\\begin{equation} \\label{volume}\n {\\mathcal Q}: \\rho \\to q (\\hat \\rho, \\rho).\n\\end{equation}\n Using the isomorphism $\\Delta \\otimes \\Delta \\cong \\Lambda^{ev,od}$, Witt in \\cite[Proposition 2.4]{W} derived the following normal form for\n $[\\Psi_+ \\otimes \\Psi_-]^{ev,od}$ in terms of a suitable orthonormal\n basis\n $(e^1, \\ldots, e^7)$, namely\n $$\n\\begin{array}{lcl}\n(\\Psi_+ \\otimes \\Psi_-)^{ev} & = & \\cos (\\theta) + \\sin( \\theta) (e^{12} + e^{34} + e^{56}) +\\\\[4pt]\n&& \\cos (\\theta) (- e^{1367} - e^{1457} - e^{2357} + e^{2467} - e^{1234} - e^{1256} - e^{3456}) +\\\\[4pt]\n&& \\sin (\\theta )(e^{1357} - e^{1467} - e^{2367} - e^{2457}) - \\sin (\\theta) e^{123456},\\\\[4pt]\n (\\Psi_+ \\otimes \\Psi_-)^{odd} &=& \\sin (\\theta) e^7 + \\sin (\\theta )(-e^{136} - e^{145} - e^{235} + e^{246}) +\\\\[4pt]\n && \\cos (\\theta) (-e^{127} - e^{347} - e^{567} - e^{135} + e^{146} + e^{236} + e^{245}) +\\\\[4pt]\n && \\sin( \\theta) (-e^{12347} - e^{12567} - e^{34567}) +\\cos (\\theta) e^{1234567}, \\end{array}$$\n where $\\theta$ is the angle between $\\Psi_+$ and $\\Psi_-$ and $e^{i \\ldots j}$ denotes the wedge product $e^i \\wedge \\ldots \\wedge e^j$.\n\n If the spinors $\\Psi_+$ and $\\Psi_-$ are linearly independent, then\n (see Corollary 2.5 of \\cite{W})\n $$\n\\begin{array}{lcl}\n(\\Psi_+ \\otimes \\Psi_-)^{ev} & = & \\cos (\\theta) + \\sin(\\theta) \\omega - \\cos (\\theta) (\\psi_- \\wedge \\alpha + \\frac 12 \\omega^2) \\\\[4pt]\n&& + \\sin (\\theta) \\psi_+ \\wedge \\alpha - \\frac{1}{6} \\sin (\\theta) \\omega^3,\\\\[5pt]\n(\\Psi_+ \\otimes \\Psi_-)^{od} & = & \\sin (\\theta) \\alpha - \\cos (\\theta) (\\psi_+ + \\omega \\wedge \\alpha) - \\sin (\\theta) \\psi_- \\\\[5pt]\n&&- \\frac 12 \\sin (\\theta) \\omega^2 \\wedge \\alpha + \\cos (\\theta)\n{\\mbox {vol}}_g,\n\\end{array}\n $$\n where $\\alpha$ denotes the dual of the unit vector in $V$, stabilized by $SU(3)$,\n $$\\omega = e^{12} + e^{34}+ e^{56}$$ is the fundamental form and\n $\\psi_{\\pm}$ are the real and imaginary parts respectively of the complex volume form\n $$\n \\psi = (e^1 + i e^2) \\wedge (e^3 + i e^4) \\wedge (e^5 + i e^6).\n $$\n A $G_2 \\times G_2$-invariant spinor $\\rho$ is stable in the sense of Hitchin (see \\cite{Hi3}), i.e. $\\rho$ lies in an open orbit under the\n action of $\\R^+ \\times Spin(7,7)$.\n\n By \\cite[Theorem 2.9]{W} the generalized $G_2$-structures are in $1-1$ correspondence with lines of spinors\n $\\rho$ in $\\Lambda^{ev}$ (or $\\Lambda^{od} $) whose stabilizer under the action of $Spin(7,7)$ is isomorphic to $G_2 \\times G_2$.\n\n The spinor $\\rho$ is called the {\\it structure form} of the generalized $G_2$ structure and it can be uniquely written\n (modulo a simultaneous change of sign for $\\Psi_+$ and $\\Psi_-$) as\n $$\\rho = e^{-\\phi} (\\Psi_+ \\otimes \\Psi_-)^{ev}_b ,\n $$\nwhere $b$ is the $B$-field, $\\Psi_{\\pm} \\in \\Delta$ are two unit\nspinors, the function $\\phi$ is the dilaton and the subscript $b$\ndenotes the wedge with the exponential $e^{\\frac{b}{2} }$.\n\nA {\\it {\\rm (}topological{\\rm )} generalized $G_2$-structure} over $M$ is a topological $G_2 \\times G_2$-reduction of the $SO(7,7)$-principal\n bundle associated with $TM \\oplus T^* M$ and it is characterized by a stable even or odd spinor $\\rho$ which can be viewed as a form.\n This is equivalent to say that there exists an $SO(7)$-principal fibre bundle which has two $G_2$-subbundles (or equivalently two $G_2{^\\pm}$-structures).\n\n\n In the sequel we will omit topological when we will refer to a generalized $G_2$-structure.\n\n\nLet $H$ be a $3$-form and $\\lambda$ be a real, non-zero constant. A generalized $G_2$-structure $(M, \\rho)$ is called {\\it strongly integrable} with respect to\n$H$ if\n$$\nd_H \\rho = 0, \\quad d_H \\hat \\rho =0,\n$$\nwhere $d_H \\cdot = d \\cdot + H \\wedge \\cdot$ is the twisted operator\nof $d$. By \\cite{W} there are no non-trivial compact examples with\na strongly integrable generalized $G_2$- structure with respect to a\nclosed $3$-form $H$.\n\nIf $$d_H \\rho = \\lambda \\hat \\rho,$$ then the generalized\n$G_2$-structure is said to be {\\it weakly integrable} of {\\it\neven} or {\\it odd} type according to the parity of the form\n$\\rho$. The constant $\\lambda$ (called the {\\em Killing number})\nis the $0$-torsion form of the two underlying $G_2$-structures.\nIndeed, by Corollary 4.6 of \\cite{W}, there exist two unique\ndetermined linear connections $\\nabla^{\\pm}$, preserving the two\n$G_2^\\pm$-structures, with skew-symmetric torsion $\\pm T = \\frac\n12 db + H$. If the structure is of odd type, then\n$$\n\\begin{array}{l}\nd \\varphi_+ = \\frac{12}{7} \\lambda * \\varphi_+ + \\frac 32 d \\phi \\wedge \\varphi_+ - * T_{27}^+,\\\\[5pt]\nd * \\varphi_+ = 2 d \\phi \\wedge * \\varphi_+\n\\end{array}\n$$\nand\n$$\n\\begin{array}{l}\nd \\varphi_- = \\frac{12}{7} \\lambda * \\varphi_- + \\frac 32 d \\phi \\wedge \\varphi_- - * T_{27}^-,\\\\[5pt]\nd * \\varphi_- = 2 d \\phi \\wedge * \\varphi_-,\n\\end{array}\n$$\nwhere $T_{27}^\\pm$ denotes the component of $T$ into the $27$-dimensional irreducible $G_2^\\pm$-module\n$${\\Lambda^3_{27}}^\\pm = \\{ \\gamma \\in \\Lambda^3 \\, \\vert \\, \\gamma \\wedge \\varphi_+ = \\gamma \\wedge \\varphi_- =0 \\}.$$\nThis is equivalent to say that $e^{-\\phi} [\\Psi_+ \\otimes \\Psi_-]$ satisfies the generalized Killing and dilatino equation (see \\cite{W,GMPW}).\n\n\n In both cases there is a characterization in terms of the two metric\n connections $\\nabla^{\\pm}$ with skew symmetric torsion $\\pm T$ (see\n \\cite[Theorem 4.3]{W}). Indeed, a generalized $G_2$-manifold $(M,\n \\rho)$ is weakly integrable with respect to $H$ if and only if\n $$\n \\nabla^{LC} \\Psi_{\\pm} \\pm \\frac 14 ( X \\lrcorner T) \\cdot \\Psi_{\\pm} =0,\\\\\n $$\n where $\\nabla^{LC}$ is the Levi-Civita connection, $X \\lrcorner$ denotes the contraction by $X$ and the following\n additional conditions are satisfied\n $$\n \\left (d \\phi \\pm \\frac 12 ( X \\lrcorner T) \\pm \\lambda \\right) \\cdot \\Psi_{\\pm} =0, \\quad\n $$\n if $\\rho$ is of even type\n or\n $$\n \\left (d \\phi \\pm \\frac 12 ( X \\lrcorner T) + \\lambda \\right) \\cdot \\Psi_{\\pm} =0, \\quad\n $$\n if $\\rho$ is of odd type.\n Taking $\\lambda = 0$ above equations yield strong integrability with respect to $H$, instead.\n\n\nExamples of generalized $G_2$-structures are given by the {\\em\nstraight} generalized $G_2$-structures, i.e. structures defined by\none spinor $\\Psi = \\Psi_+ = \\Psi_-$. These structures are induced by\na classical $G_2$-structure $(M, \\varphi)$ and are strongly\nintegrable with respect to a closed $3$-form $T$ only if the\nholonomy of the metric associated with $\\varphi$ is contained in $G_2$.\n\nIf $H$ is closed, then it has to be noted that, in the compact case, the structure form $\\rho$ of a strongly integrable generalized $G_2$-structure corresponds to a critical point of a functional on stable forms. Indeed, since stability is an open condition, if $M$ is compact then one can consider the functional\n$$\nV (\\rho) = \\int_M {\\mathcal Q}(\\rho),\n$$\nwhere $\\mathcal Q$ is defined as in \\eqref{volume}. By \\cite[Theorem 4.1]{W} a $d_H$-closed stable form\n$\\rho$ is a critical point in its cohomology class if and only if $d_H \\hat \\rho =0$.\n\nAgain in the compact case a $d_H$-exact form $\\hat \\rho \\in \\Lambda^{ev,od} (M)$ is a critical point of the functional\n $V$ under some constraint if and only if\n $d_H \\rho = \\lambda \\hat \\rho$, for a real non zero constant $\\lambda$.\n\n\n\\section{Compact examples of weakly integrable manifolds}\n\nIn this section we will construct examples of compact manifolds endowed with a weakly integrable generalized $G_2$-structure with respect to a closed $3$-form $H$.\n\n\n\nConsider the $7$-dimensional Lie algebra $\\mathfrak g$ with structure equations:\n$$\n\\left \\{ \\begin{array} {l}\nd e^1 = a e^{46},\\\\[3pt]\nd e^2 = - \\frac 12 a e^{36} - \\frac 12 a e^{45} + \\frac 12 a e^{17},\\\\[3pt]\nd e^3 = - \\frac 12 a e^{15} +\\frac 12 a e^{26} - \\frac 12 a e^{47},\\\\[3pt]\nd e^4 = -a e^{16},\\\\[3pt]\nd e^5 = \\frac 12 a e^{13} -\\frac 12 a e^{24} - \\frac 12 a e^{67},\\\\[3pt]\nd e^6 = a e^{14},\\\\[3pt]\nd e^7 = -\\frac 12 a e^{12} -\\frac 12 a e^{34} - \\frac 12 a e^{56},\n\\end{array}\n\\right.\n$$\nwhere $a$ is a real parameter different from zero.\n\nIt can be easily checked that the Lie algebra $\\mathfrak g$ is not solvable since $[{\\mathfrak g},{\\mathfrak g}] ={\\mathfrak g}$ and that it is unimodular. We can also view $\\mathfrak g$ as the semidirect sum\n$$\n{\\mathfrak g} = {\\mathfrak {su}} (2) \\oplus_{\\delta} \\R^4,\n$$\nwhere\n$$\n {\\mathfrak {su}} (2) = {\\mbox {span}} , \\quad \\R^4 = {\\mbox {span}} \n $$\n and $\\delta: {\\mathfrak {su}} (2) \\to {\\mathfrak {Der}} (\\R^4)$ is given by\n $$\n\\delta(e_1) = ad_{e_1} = \\left( \\begin{array}{cccc} 0&0&0&-\\frac 12 a\\\\\n 0&0&\\frac 12 a&0\\\\\n0& - \\frac 12 a&0&0\\\\\n \\frac 12 a &0&0&0 \\end{array} \\right),\n $$\n \\vskip 0.2cm\n $$\n \\delta(e_4) = ad_{e_4} = \\left( \\begin{array}{cccc} 0&0&\\frac 12 a&0\\\\\n 0&0&0&\\frac 12 a\\\\\n - \\frac 12 a&0&0&0\\\\\n 0&- \\frac 12 a &0&0\\end{array} \\right),\n $$\n \\vskip 0.2cm\n $$\n \\delta(e_6) = ad_{e_6} = \\left( \\begin{array}{cccc} 0&-\\frac 12 a&0&0\\\\\n \\frac 12 a&0&0&0\\\\\n 0&0&0& \\frac 12 a\\\\\n 0&0&- \\frac 12 a &0 \\end{array} \\right).\n $$\n If we identify $\\R^4$ with the space $\\H$ of quaternions, then\n $$\n ad_{e_1} = \\frac 12 a L_k, \\quad ad_{e_4} = \\frac 12 a L_{-j}, \\quad ad_{e_6} = \\frac 12 a L_{i},\n $$\n where $L_q$ denotes the left multiplication by the quaternion $q$.\n\nTherefore, the product on the corresponding Lie group $G = SU(2)\n\\ltimes \\H$, for $a = 2$, is given by\n$$\n(A, q) \\cdot (A', q') = (A A', Aq' + q), \\quad A,A' \\in SU(2),\n\\quad q,q' \\in \\H,\n$$\nwhere we identify $SU(2)$ with the group of quaternions of unit\nnorm.\n\n\\begin{theorem} \\label{example} The Lie group $G = SU(2) \\ltimes \\H$ admits compact quotients $M_{\\beta} = G\/ \\Gamma_{\\beta}$, with\n $e^{i \\beta}$ primitive p-th root of unity $(p$ prime$)$, and $M_{\\beta}$ has an invariant weakly integrable generalized $G_2$-structure with respect to a closed $3$-form $H$.\n\\end{theorem}\n\n\\begin{proof}\nConsider the discrete subgroup $\\Gamma_{\\beta} = \\ltimes \\Z^4$, where $$ is the subgroup of $SU(2)$ generated by\n$$\nA_{\\beta} = \\left( \\begin{array}{cc} e^{i \\beta} &0\\\\ 0&e^{-i \\beta} \\end{array} \\right),\n$$\nwith $e^{i \\beta}$ primitive p-th root of unity and $p$ prime.\n\n Then one can check that $\\Gamma_\\beta$ is a closed subgroup of $G$. Let $(A', q')$ be any point of\n$G$. Thus $$ [ (A', q')] = \\{ (A_{\\beta}^m A', A_{\\beta}^m q' +\nr), \\, m \\in \\Z \\, , r \\in \\Z^4 \\}\n$$\nis the equivalence class of $(A', q')$. In particular, $[ (A',\nq')] = [(A', q' + r)]$ and therefore the restriction of the\nprojection $\\pi: G \\to G\/\\Gamma_{\\beta}$ to $SU(2) \\times\n[0,1]^4$ is surjective.\\newline Then the quotient $M_{\\beta} =\n(SU(2) \\ltimes \\H )\/ \\Gamma_{\\beta}$ is a compact manifold.\n\n\nConsider the invariant metric $g$ on $M_{\\beta}$ such that the basis $(e^1, \\ldots, e^7)$ is orthonormal\nand take the generalized $G_2$ structure defined by the structure form of odd type\n$$\n\\rho = e^7 - e^{136}- e^{145} - e^{235}+ e^{246} - e^{12347} - e^{12567} - e^{34567},\n$$\nin terms of the basis $(e^1, \\ldots, e^7)$.\nThe\ncompanion of $\\rho$ is\n$$\n\\hat \\rho = e^{12} + e^{34} + e^{56}+ e^{1357} -e^{1467} - e^{2367}- e^{2457} - e^{123456}.\n$$\nThen the structure form $\\rho$ defines a weakly integrable\ngeneralized $G_2$-structure with respect to a closed $3$-form\n$H$, i.e. $d_H \\rho = \\lambda \\hat \\rho$ ($\\lambda$ non-zero\nconstant),\n if and only if\n\\begin{equation} \\label{weakeq}\n\\left\\{\n \\begin{array}{l}\n d e^7 = \\lambda \\omega,\\\\[5pt]\n d \\psi_- = (H - \\lambda \\psi_+) \\wedge e^7,\\\\[5pt]\n H \\wedge \\psi_- = - \\frac 13 \\lambda \\omega^3,\n \\end{array}\n \\right.\n \\end{equation}\n where $\\omega, \\psi_{\\pm}$ are given by\n\\begin{equation} \\label{definitionforms}\n\\left\\{\n\\begin{array}{lcl}\n\\omega &= & e^{12} + e^{34} + e^{56},\\\\[5pt]\n\\psi_+ &=& e^{135} - e^{146} - e^{236} - e^{245},\\\\[5pt]\n\\psi_- &=& e^{136} + e^{145} + e^{235} - e^{246}.\n\\end{array}\n\\right.\n\\end{equation}\nThe equations \\eqref{weakeq} are satisfied with $\\lambda = - \\frac 12 a$ and\n$$\n H= - a e^{146}.\n$$\n\\end{proof}\n\nObserve that $H$ is also co-closed, i.e. $d*H =0$. Moreover, if $a\n\\leq 1$, $H$ is a calibration in the sense of \\cite{HL}.\n\n In this way we get compact examples with a weakly\nintegrable generalized $G_2$-structure with respect to the closed\n$3$-form $H$. The induced invariant metric on $M_{\\beta}$ is not flat, since the inner product\n$$g = \\sum_{i = 1}^7 (e^i)^2$$\non the Lie algebra $\\mathfrak g$ is not flat. Indeed, the Ricci\ntensor of $g$ is diagonal with respect to the orthonormal basis $(e_1,\n\\ldots, e_7)$ and its non zero components are given by:\n$$ Ric (e_1, e_1) = \\frac 12 a^2 =Ric (e_4, e_4) = Ric (e_6, e_6).\n$$\n\n\\section {Link with $SU(3)$-structures in dimension $7$ and\nevolution equations}\n\nIn this section we will relate the weakly integrable generalized\n$G_2$-structures constructed in the previous section with\n$SU(3)$-structures in dimension $7$.\n\n Since the $1$-form $\\eta = e^7$ is a contact\nform on the Lie algebra ${\\mathfrak g}$, then $M_{\\beta}$ is a\ncontact metric manifold. Moreover, by \\eqref{weakeq} $M_{\\beta}$\nhas an $SU(3)$-structure defined by $(\\omega, \\eta, \\psi = \\psi_+\n+ i \\psi_-)$ such that\n\\begin{equation} \\label{SU3hypo}\n\\left \\{ \\begin{array} {l}\nd \\omega =0,\\\\[5pt]\nd (\\psi_\\pm \\wedge \\eta) =0.\n\\end{array} \\right.\n\\end{equation}\nHere we identify $SU(3)$ as the subgroup $SU(3) \\times \\{1 \\}$ of\n$SO(7)$.\n\n\\smallskip\n\nNote that the $SU(3)$-structures $(\\omega, \\eta, \\psi = \\psi_+ + i\n\\psi_-)$ on $7$-dimensional manifolds for which $d \\omega =0$ and\n$d (\\psi_{\\pm}) =0$ where considered in \\cite{TV}. In this case\none cannot find any closed $3$-form $H$ such that conditions\n\\eqref{SU3hypo} are satisfied since $H$ has to be equal to\n$\\lambda \\psi_+$ and the third equation cannot hold. It would be\ninteresting to investigate if there are other $7$-dimensional\nexamples endowed with an $SU(3)$-structures which satisfy the\nconditions \\eqref{SU3hypo} and giving rise to a weakly integrable\n$G_2$-structure with respect to a closed $3$-form $H$.\n\n\n In general, let ${\\iota}: M^7 \\to N^8$ be an embedding\nof a an oriented $7$-manifold $M^7$ into a $8$-manifold $N^8$ with\nunit normal vector $V$. Then an $SU(4)$-structure $(\\tilde\n\\omega, \\tilde g, \\tilde \\psi)$ (or equivalently a special almost\nHermitian structure, see e.g. \\cite{Ca2}), where $(\\tilde \\omega,\n\\tilde g)$ is a $U(4)$-structure and $\\tilde \\psi= \\tilde \\psi_+ +\ni \\tilde \\psi_-$ is complex $4$-form of unit norm, defines in a\nnatural way an $SU(3)$-structure $(\\omega, \\eta, g, \\psi = \\psi_+\n+ i \\psi_-)$ on $M^7$ given by:\n$$\n\\eta = - V \\lrcorner \\tilde\\omega, \\quad\n\\omega = {\\iota}^* \\tilde \\omega, \\quad\ng = {\\iota}^* g, \\quad\n\\psi_+ = - V \\lrcorner \\tilde\\psi_+, \\quad\n\\psi_- = V \\lrcorner \\tilde\\psi_- .\n$$\nThen, if $\\gamma$ denotes the $1$-form dual to $V$, then we have\n$$\n\\begin{array} {l}\n\\tilde \\omega = \\omega + \\eta \\wedge \\gamma,\\\\[5pt]\n\\tilde \\psi = (\\psi_+ + i \\psi_-) \\wedge (\\eta + i \\gamma).\n\\end{array}\n$$\nThe integrability of the $SU(4)$-structure $(\\tilde \\omega, \\tilde\ng, \\tilde \\psi)$ implies conditions \\eqref{SU3hypo}, which can be\nviewed as the analogous of the equations defining the hypo\n$SU(2)$-structures in dimension 5 (see \\cite{CS}).\n\n\nVice versa, given an $SU(3)$-structure $(\\omega, \\eta, \\psi )$ on $M^7$, an $SU(4)$-structure on $M^7 \\times \\R$ is defined by\n\\begin{equation} \\label{SU4}\n\\begin{array} {l}\n\\tilde\\omega = \\omega + \\eta \\wedge dt,\\\\[5pt]\n\\tilde \\psi = \\psi \\wedge (\\eta + i dt),\n\\end{array}\n\\end{equation}\nwhere $t$ is a coordinate on $\\R$.\n\nIf the $SU(3)$-structure $(\\omega, \\eta, \\psi )$ on $M^7$ belongs to a one-parameter family of $SU(3)$-structures $(\\omega(t), \\eta(t), \\psi(t) )$ satisfying the equations \\eqref{SU3hypo} and such that\n\\begin{equation} \\label{evolutions}\n\\left \\{ \\begin{array} {l}\n\\partial_t \\omega(t) = - \\hat d \\eta(t),\\\\[5pt]\n\\partial_t (\\psi_+(t) \\wedge \\eta (t)) = \\hat d \\psi_-(t),\\\\[5pt]\n\\partial_t (\\psi_-(t) \\wedge \\eta (t)) = -\\hat d \\psi_+(t),\n\\end{array} \\right.\n\\end{equation}\nfor all $t \\in (b, c)$, where $\\partial_t$ denotes the derivative\nwith respect to $t$ and $\\hat d$ is the exterior differential on\n$M^7$, then the $SU(4)$-structure given by \\eqref{SU4} on $M^7\n\\times (b, c)$ is integrable, i.e. $\\tilde \\omega$ and $\\tilde\n\\psi$ are both closed. In particular, the associated Riemannian\nmetric on $M^7 \\times (b, c)$ has holonomy contained in $SU(4)$\nand consequently it is Ricci-flat.\n\nFor the manifolds $M_{\\beta}$ a solution of the evolution\nequations \\eqref{evolutions} is given by\n$$\n\\begin{array}{l}\n\\omega(t) = u(t) v(t) (e^{12} + e^{34} + e^{56}),\\\\[5pt]\n\\psi_+(t) = u(t) v(t)^2 (e^{135} - e^{236} - e^{245}) - u(t)^3\ne^{146},\\\\[5pt]\n\\psi_-(t) = u(t)^2 v(t) (e^{136} + e^{145} - e^{246}) + v(t)^3\ne^{235},\\\\[5pt]\n\\eta(t) = \\frac {1}{v(t)^3} e^7,\n\\end{array}\n$$\nwhere $u(t), v(t)$ solve the system of ordinary differential\nequations\n$$\n\\left\\{ \\begin{array}{l} \\displaystyle\\frac {d}{dt} (u(t) v(t)) =\n\\frac 12 a\n\\displaystyle\\frac {1}{v(t)^3},\\\\[10pt]\n\\displaystyle\\frac {d}{dt} \\left( \\displaystyle\\frac {u(t)}{v(t)}\n\\right) = \\frac 12 a v(t)^3\\,,\n\\end{array} \\right.\n$$\nsuch that $u(0) = v(0) = 1$. The previous system is equivalent to\n\\begin{equation} \\label{odiffsystem}\n\\left\\{ \\begin{array}{l} u'(t) = \\frac 14 a\n\\left(\\displaystyle\\frac {1} {v(t)^4} +\nv(t)^4\\right),\\\\[10pt]\nv'(t) = \\frac 14 a \\left(\\displaystyle\\frac {1} {u(t) v(t)^3} -\n\\displaystyle\\frac{v(t)^5}{u(t)}\\right).\n\\end{array}\n\\right.\n\\end{equation}\n\nThen, by the theorem on existence of solutions for a system of\nordinary differential equations, one can show that on a open\ninterval $(b, c)$ containing $t =0$ the system \\eqref\n{odiffsystem} admits a unique solution $(u(t), v(t))$ satisfying\nthe initial condition $u(0) = v(0) = 1$. Actually, the solution is given by\n$$\nu(t) = 1 + \\frac 12 a t, \\quad v(t) =1.\n$$\n\nHence, we can prove the following\n\n\\begin{theorem}\nOn the product of $M_{\\beta}$ with some open interval $(b, c)$\nthere exists a Riemannian metric with discrete holonomy contained in\n$SU(4)$.\n\\end{theorem}\n\n\\begin{proof}\nThe basis of $1$-forms on the manifold $M_{\\beta} \\times (b,c)$ given by\n$$\n\\begin{array}{l}\nE^1 = (1 + \\frac 12 a t) e^1, \\, \\, E^2 = e^2, \\,\\, E^3 = (1 + \\frac 12 a t) e^3,\\,\\, E^4 = (1 + \\frac 12 a t) e^4, \\\\[10pt]\n E^5 = e^5,\\,\\, E^6 = (1 + \\frac 12 a t) e^6,\\,\\, E^7 = e^7,\\,\\, E^8 = dt\n \\end{array}\n$$\nis orthonormal with respect to the Riemannian metric with holonomy contained in $SU(4)$. By a direct computation\nwe have that the non zero Levi-Civita connection 1-forms are given by\n$$\n\\begin{array} {l}\n\\theta^1_4 = -\\theta^2_3 = \\theta^5_7 = \\theta^6_8 = \\displaystyle\\frac {a} {2 + at} E^6, \\\\[12pt]\n \\theta^1_6 = -\\theta^2_5 = - \\theta^3_7= -\\theta^4_8 = \\displaystyle-\\frac {a} {2 + at} E^4,\\\\[12pt]\n \\theta^1_8 = - \\theta^2_7= \\theta^3_5 = \\theta^4_6 = \\displaystyle\\frac {a} {2 + at} E^1.\n \\end{array}\n $$\nTherefore, all the curvature forms $\\Omega^i_j$ vanish and consequently the holonomy algebra is trivial.\n\\end{proof}\n\n\\section{Strong integrability and $SU(3)$-structures in dimension 6}\nIn this section we are going to consider the structure form $\\rho$ of\neven type\n\\begin{equation} \\label{rho}\n\\rho = \\omega + \\psi_+ \\wedge\\alpha-\\frac{1}{6}\\omega^3\n\\end{equation}\non the product of a $6$-dimensional manifold $N$ endowed with an $SU(3)$-structure cross $S^1$. We will investigate which type of\n$SU(3)$-structures give rise to a strongly integrable generalized\n$G_2$-structure with respect to a non-zero $3$-form.\n\nLet $N$ be a $6$-dimensional manifold. An {\\it $SU(3)$-structure} on $N$ is determined by a Riemannian metric $g$, an orthogonal almost complex structure $J$ and a choice of a complex volume form $\\psi = \\psi_+ + i \\psi_-$ of unit norm. We will denote by $(\\omega, \\psi)$ an $SU(3)$-structure, where $\\omega$ is the fundamental form defined by\n$$\n\\omega(X, Y) = g (J X, Y),\n$$\nfor any pair of vector fields $X, Y$ on $N$. Locally one may choose\nan orthornormal basis $(e^1, \\ldots, e^6)$ of the vector cotangent\nspace $T^*$ such that $\\omega$ and $\\psi_{\\pm}$ are given by\n\\eqref{definitionforms}.\n\nThese forms satisfy the following compatibility relations\n$$\n\\omega \\wedge \\psi_{\\pm} =0, \\quad \\psi_+ \\wedge \\psi_- = \\frac 23 \\omega^3.\n$$\nThe intrinsic torsion of the $SU(3)$-structure belongs to the space (see \\cite{CS})\n$$\nT^* \\otimes {\\mathfrak {su}} (3)^{\\perp} = {\\mathcal W}_1 \\oplus {\\mathcal W}_2 \\oplus {\\mathcal W}_3 \\oplus {\\mathcal W}_4 \\oplus {\\mathcal W}_5,\n$$\n$ {\\mathfrak {su}} (3)^{\\perp} $ being the orthogonal complement of $ {\\mathfrak {su}} (3)$ in ${\\mathfrak {so}} (6)$ and\n$$\n\\begin{array}{ll}\n {\\mathcal W}_1 = {\\mathcal W}^+_1 \\oplus {\\mathcal W}^-_1, &\\quad {\\mathcal W}^{\\pm}_1 \\cong \\R,\\\\[5pt]\n {\\mathcal W}_2 = {\\mathcal W}^+_2 \\oplus {\\mathcal W}^-_2, &\\quad {\\mathcal W}^{\\pm}_2 \\cong {\\mathfrak {su}}(3),\\\\[5pt]\n {\\mathcal W}_3 \\cong [\\![{\\mathrm S}^{2,0}]\\!], &\\quad {\\mathcal W}_4 \\cong {\\mathcal W}_5 \\cong T^*,\n \\end{array}\n $$\n where $[\\![{\\mathrm S}^{2,0}]\\!]$ denotes the real representation associated with the space ${\\mathrm S}^{2,0}$ of complex symmetric tensors of type $(2,0)$.\\newline\n The components of the intrinsic torsion of an $SU(3)$-structure can be expressed by (see e.g. \\cite{CS,BV})\n\\begin{equation} \\label{intrinsicforms}\n\\left\\{\n\\begin{array}{lll}\n d \\omega &=& \\nu_0 \\, \\psi_+ + \\alpha_0 \\, \\psi_- + \\nu_1 \\wedge \\omega+ \\nu_3,\\\\[5pt]\nd \\psi_+ &=& \\frac23 \\alpha_0 \\, \\omega^2 + \\pi_1 \\wedge \\psi_+ -\\pi_2 \\wedge \\omega,\\\\[5pt]\nd \\psi_- &= & - \\frac 23 \\nu_0 \\, \\omega^2 + J \\pi_1 \\wedge \\psi_+ - \\sigma_2 \\wedge \\omega,\\\\\n\\end{array}\n\\right.\n\\end{equation}\nwhere $\\alpha_0 \\in {\\mathcal W}_1^+$, $\\pi_1 \\in {\\mathcal W}_5$, $\\pi_2 \\in {\\mathcal W}_2^+$, $\\nu_0 \\in {\\mathcal W}_1^-$,\n$ \\sigma_2 \\in {\\mathcal W}_2^-$, $\\nu_1 \\in {\\mathcal W}_4$, $\\nu_3 \\in {\\mathcal W}_3$.\n\nBy definition, an $SU(3)$-structure is called {\\it integrable} if the intrinsic torsion vanishes. In this case $\\omega$ and $\\psi$ are both closed. Therefore, the intrinsic torsion measures the failure of the holonomy group of the Levi-Civita connection of $g$ to reduce to $SU(3)$.\n\nIf $(\\omega, \\psi)$ is in the class ${\\mathcal W}_2^+$, then by using \\eqref{intrinsicforms} and taking into account the conditions $d \\omega = d \\psi_- =0$, we get that the components $\\nu_0, \\alpha_0, \\sigma_2, \\nu_3, \\nu_1, \\pi_1$ vanish and hence\n$$d \\psi_+ = -\\pi_2 \\wedge \\omega, $$\nwith $\\pi_2$ belonging to the space\n\\begin{equation} \\label{spacepi2}\n\\begin{array}{lcl}\n{\\mathcal W}_2^+ &\\cong& \\{ \\gamma \\in \\Lambda^2 \\quad \\vert \\quad \\gamma \\wedge \\psi_+=0, \\quad * J \\gamma = -\n\\gamma \\wedge \\omega \\}\\\\[5pt]\n&=& \\{ \\gamma \\in \\Lambda^2 \\quad \\vert \\quad J \\gamma = \\gamma, \\quad \\gamma \\wedge \\omega^2 =0 \\}.\n\\end{array}\n\\end{equation}\n\n\nBy \\cite{BV} the scalar curvature ${\\mbox {scal}} (g)$ of the metric $g$ is given by:\n$$\n{\\mbox {scal}} (g) = - \\frac 12 \\vert \\pi_2 \\vert^2 \\, .\n$$\n\n\\medskip\n\n\nLet $\\alpha$ be a closed 1-form on $S^1$. Consider on the\nproduct $N \\times S^1$, the generalized $G_2$-structure\ndefined by the structure form of even type $\\rho$ given by\n\\eqref{rho} with companion\n$$\n\\hat \\rho = \\alpha - \\psi_- - \\frac12 \\omega^2 \\wedge \\alpha.\n$$\nWe have the following\n\\begin{theorem} \\label{stronglyint}\nLet $(N, \\omega, \\psi)$ be a $6$-dimensional manifold endowed with an\n $SU(3)$-structure. The structure form $\\rho$, given by \\eqref{rho},\n defines a strongly integrable generalized $G_2$-structure on $N \\times S^1$\n with respect to a $3$-form $H$ $($ non necessarily closed$)$, i.e. $\\rho$ satisfies the conditions\n \\begin{equation} \\label{dequations} d_H \\rho = d_H \\hat \\rho =0 \\end{equation}\n if and only if\n $N$ is in the class ${\\mathcal W}_2^+$ and $H = \\pi_2 \\wedge \\alpha$.\n\\end{theorem}\n\n\\begin{proof} By \\eqref{dequations} we get\n$$\n\\left\\{\n\\begin{array}{l}\nd \\omega + d (\\psi_+ \\wedge \\alpha) - \\frac16 d(\\omega^3) +\nH \\wedge \\omega + H \\wedge \\psi^+ \\wedge \\alpha =0,\\\\[5pt]\n d \\hat \\rho + H \\wedge \\hat \\rho = - d \\psi_- - \\frac12 d(\\omega^2 \\wedge \\alpha) + H \\wedge \\alpha - H \\wedge \\psi_- =0.\n\\end{array}\n\\right.\n$$\nThis is equivalent to say:\n\\begin{equation} \\label{strongconditions}\n\\left\\{\n\\begin{array}{l}\nd \\omega =0,\\\\[5pt]\nd (\\psi_+ \\wedge \\alpha) = - H \\wedge \\omega,\\\\[5pt]\nH \\wedge \\psi_+ \\wedge \\alpha=0 \\\\[5pt]\nd \\psi_- = H \\wedge \\alpha,\\\\[5pt]\nH \\wedge \\psi_- =0\\,.\n\\end{array}\n\\right.\n\\end{equation}\nHence, in particular\n$$\nd \\psi_- = 0 , \\quad H \\wedge \\alpha =0.\n$$\nIt follows that $H = S \\wedge f \\alpha$, with $S$ a $2$-form on\n$N$ and $f$ a function on $S^1$. Since $d \\omega = 0$, we obtain\n$$d \\psi^+ \\wedge \\alpha = - S \\wedge \\omega \\wedge f \\alpha,\n$$ we have that $f$ has to be a constant $k$ and $$d \\psi_+ = - k S\n\\wedge \\omega,$$\n with $k S = \\pi_2$. Since $\\pi_2$ is a\n$(1,1)$-form, then $\\pi_2 \\wedge \\psi_{\\pm} =0$. Therefore,\nequations \\eqref{strongconditions} are satisfied if and only if\n$N$ belongs to the class ${\\mathcal W}_2^+$.\n\\end{proof}\n\nNote that $H$ is closed if and only if $d \\pi_2 =0$.\n\n\\smallskip\n\n\n\n\nHomogeneous examples of $6$-dimensional manifolds with a\n$SU(3)$-structure in the class ${\\mathcal W}_2^+$ are given in\n[8]. There it was proved that the $6$-dimensional nilmanifolds\n$\\Gamma \\backslash G$ which carry an invariant $SU(3)$-structures\nin the class ${\\mathcal W }_2^+$ are the torus, the $\\T^2$-bundle\nover $\\T^4$ and the $\\T^3$-bundle over $\\T^3$ associated with the\nfollowing nilpotent Lie algebras\n$$\n\\begin{array}{l}\n(0,0,0,0,0,0),\\\\[3pt]\n(0,0,0,0,12,13),\\\\[3pt]\n(0,0,0,12,13,23),\n\\end{array}\n$$\nwhere the notation $(0,0,0,0, 12,13)$ means that the dual ${\\mathfrak g}^*$ of the Lie algebra ${\\mathfrak g}$ has a basis $(e^1, \\ldots, e^6)$ such that $d e^i =0, i =1, \\ldots, 4$,\n $d e^5 = e^1 \\wedge e^2$ and $d e^6 = e^1 \\wedge e^3$.\n\nIn \\cite{W} the $\\T^2$-bundle over $\\T^4$ has been considered and it has been proved that\nit admits a $SU(3)$-structure in the class ${\\mathcal W}_2^+$.\\newline\nBy \\cite{dBT} the $\\T^3$-bundle over $\\T^3$ admits a family of $SU(3)$-structures in the class ${\\mathcal W}_2^+$ given by\n$$\n\\begin{array}{lcl}\n\\omega &=& e^{16} + \\mu e^{25} + (\\mu - 1) e^{34},\\\\[5pt]\n\\psi_+ &=& (1 - \\mu) e^{124} + \\mu e^{135} - \\mu (\\mu - 1) e^{456} - e^{236},\\\\[5pt]\n\\psi_- &=&- \\mu (1- \\mu) e^{145} + (\\mu - 1) e^{246} + \\mu e^{356} + e^{123},\n\\end{array}\n$$\nwhere $\\mu$ is a real number different from $0$ and $1$. Such a family of $SU(3)$-structures belongs to the class ${\\mathcal W}_2^+$ with $$\n\\pi_2 = \\mu^2 e^{25} - (\\mu- 1)^2 e^{36} - e^{14},\n$$\nand $d \\pi_2 \\neq 0$.\n\\newline\nManifolds in the class ${\\mathcal W}_2^+$ can be also obtained as\nhypersurfaces of $7$-dimensional manifolds with a $G_2$-structure.\nThe $\\T^2$-bundle over $\\T^4$ can be also be viewed as a\nhypersurface of a $7$-dimensional manifold with a calibrated\n$G_2$-structure, i.e. such that the associated stable $3$-form is\nclosed. Indeed, if $(M, \\varphi)$ is a $7$-dimensional manifold\nwith a calibrated $G_2$-structure, then any hypersurface $\\iota: N\n\\hookrightarrow M$ with unit normal vector $\\nu$ such that the Lie\nderivative $L_{\\nu} \\varphi =0$ admits an $SU(3)$-structure\n$(\\omega, \\psi)$ in the class ${\\mathcal W}_2^+$ defined by\n$$\n\\begin{array}{l}\n\\omega = \\nu \\lrcorner \\varphi,\\\\[3pt]\n\\psi_+ = \\nu \\lrcorner * \\varphi,\\\\[3pt]\n\\psi_- = \\iota^* \\varphi.\n\\end{array}\n$$\nFor general theory on an oriented hypersurface of a $7$-dimensional manifold endowed with a $G_2$-structure see \\cite{C}.\n\nIf we consider the 7-dimensional nilmanifold associated with the\nLie algebra (see \\cite{F})\n$$\n(0,0,0,-13,-23,0,0)\n$$\nand the hypersurface which is a maximal integral submanifold of the involutive distribution defined by the $1$-form $e^6$, then one gets the\n $SU(3)$-structure considered above.\n\n Another example of hypersurface (non nilmanifold) can be obtained by the 7-dimensional compact manifold $M = X \\times S^1$, where $X$ is the compact\n solvmanifold\n considered by Nakamura (see \\cite{N}), associated with the solvable Lie algebra\n $$\n (0, 12 - 45, - 13 + 46, 0,15-24,-16+34,0)\n $$\n and endowed with the $G_2$-structure\n $$\n \\varphi = e^{147} + e^{357} - e^{267} + e^{136} + e^{125} + e^{234} - e^{456}.\n $$\n The compact hypersurface, maximal integral submanifold of the involutive distribution defined by the $1$-form $e^7$, has\n an $SU(3)$-structure in the class ${\\mathcal W}_2^+$.\n\n\\medskip\n\n\n\nWe will show that, if the $SU(3)$-structure is not integrable, then\nthe $2$-form $\\pi_2$ cannot be closed. Indeed,\n\n\n\\begin{prop}\\label{strongconstruction} Let $N$ be a $6$-dimensional\n manifold endowed with an $SU(3)$-structure $(\\omega, \\psi)$ in the class ${\\mathcal W}_2^+$.\nIf $\\pi_2$ is closed, then the $SU(3)$-structure is integrable. In particular, the associated Riemannian metric $g$ is Ricci flat.\n\\end{prop}\n\n\\begin{proof} As already remarked, $(\\omega, \\psi)$ is in the class ${\\mathcal W}_2^+$ if and only if \\begin{equation} \\label{W_2^+}\nd \\psi_+ = -\\pi_2 \\wedge \\omega\\,,\\quad\nd \\psi_- = d \\omega = 0,\n\\end{equation}\nwith $\\pi_2$ satisfying the following relations\n$$\n\\begin{array}{ll}\n \\pi_2 \\wedge \\psi_- =0, \\quad & * J \\pi_2 = -\n\\pi_2 \\wedge \\omega\\\\[3pt]\nJ \\pi_2 = \\pi_2, \\quad &\\pi_2 \\wedge \\omega^2 =0.\n\\end{array}\n$$\nBy our assumption that $\\pi_2$ is closed, \\eqref{W_2^+} and the above definition of ${\\mathcal W}^+_2$ (see \\eqref{spacepi2}) we have\n$$\n0 = d (\\pi_2 \\wedge \\psi_+) = \\pi_2 \\wedge d \\psi_+ = \\pi_2 \\wedge d \\psi_+ = \\vert \\pi_2 \\vert^2 * 1 \\,.\n$$\nThen $\\pi_2 =0$ and we get the result.\n\\end{proof}\n\nIn particular, as a consequence we have that if $(N, \\omega,\n\\psi)$ is $6$-dimensional manifold endowed with a (not\nintegrable)\n $SU(3)$-structure in the class ${\\mathcal W}_2^+$, the $3$-form $H = \\pi_2 \\wedge \\alpha$ on $N \\times S^1$ cannot be closed.\n\n\\begin{rem} {\\rm It has to be noted that, in view of Proposition \\ref{strongconstruction}, for $SU(3)$-manifolds in the\nclass ${\\mathcal W}_2^+$, the two conditions\n$$\nd\\pi_2=0\\quad \\hbox{\\rm and}\\quad d\\psi_+=0\n$$\nare equivalent.\\newline Furthermore, under the conditions of\nProposition \\ref{strongconstruction}, the holonomy group of the\nmetric on the manifold $N$ can be properly contained in $SU(3)$. Indeed, for example, if one\ntakes the $6$-manifold $N=M^4 \\times \\mathbb{T}^2$, where $(M^4$, $ \\omega_1, \\omega_2, \\omega_3)$\nis an hyper-K\\\"ahler manifold and\n$\\mathbb{T}^2$ is a $2$-dimensional torus, then an $SU(3)$-structure is defined by\n\\begin{eqnarray*}\n\\omega &=& \\omega_1 + e^5 \\wedge e^6,\\\\\n\\psi_+ &=& \\omega_2 \\wedge e^5 - \\omega_3 \\wedge e^6,\\\\\n\\psi_- &=& \\omega_2 \\wedge e^6 + \\omega_3 \\wedge e^5,\n\\end{eqnarray*}\nwhere $\\{e^5, e^6\\}$ is an orthonormal coframe on $\\mathbb{T}^2$. Since\n$$\nd \\omega_i =0\\,,\\,\\, i = 1,2,3\\,,\\quad \\quad d e^5 = d e^6 =0\\,,\n$$\nwe have\n$$\nd \\omega =0\\,,\\quad d \\psi_{\\pm} =0\\,.\n$$\nTherefore, the manifold $N$ endowed with the $SU(3)$-structure defined by $(\\omega,\\psi)$ belongs to the class ${\\mathcal W}_2^+$ and\nthe holonomy of the associated Riemannian metric is strictly contained in $SU(3)$, since the metric is a product.}\n\\end{rem}\n\\begin{rem} {\\rm Consider on $N \\times \\R$ the generalized $G_2$-structure defined by the structure form $\\rho$ given\nby \\eqref {rho} and let $H$ be a closed non-zero $3$-form.\nIf we drop the condition $d_H \\hat \\rho =0$, then the $SU(3)$-structure $(\\omega, \\psi)$ on $N$ has to be in the class\n${\\mathcal W _2}^+ \\oplus {\\mathcal W _2}^- \\oplus {\\mathcal W _5}$ with\n$$\nd \\psi_+ = \\pi_1 \\wedge \\psi_+ - \\pi_2 \\wedge \\omega = - S \\wedge \\omega, \\quad dS =0.\n$$\n Indeed,\n$\\rho$ is $d_H$-closed if and only if\n$$\n\\left\\{\n\\begin{array}{l}\nd \\omega=0\\,,\\\\[5pt]\nd \\psi_+ \\wedge \\alpha = - H \\wedge \\omega\\,,\\\\[5pt]\nH \\wedge \\psi_+ \\wedge \\alpha =0\\,.\n\\end{array}\n\\right.\n$$\nSetting\n$$\nH = \\tilde H + S \\wedge \\alpha,\n$$\nwith $\\tilde H$ and $S$ a $3$-form and a $2$-form respectively on $N$, then one gets the equivalent conditions:\n$$\n\\left\\{\n\\begin{array}{l}\nd \\omega =0,\\\\[5pt]\nd \\psi_+ = - S \\wedge \\omega\\,,\\\\[5pt]\n\\tilde H \\wedge \\psi_+ = \\tilde H \\wedge \\omega =0\\,,\\\\[5pt]\nd S = d \\tilde H =0\\,.\n\\end{array}\n\\right.\n$$\nIn terms of the components of the intrinsic torsion one has that $\\nu_0, \\alpha_0, \\nu_1, \\nu_3$ vanish and $$\nd \\psi_+ = - S \\wedge \\omega.\n$$\nIn contrast with the case of $SU(3)$-manifolds in the class\n${\\mathcal W}_2^+$ (see Proposition \\ref{strongconstruction}),\n$6$-dimensional compact examples of this type may exist, as\nshowed by the following\n\n\\begin{ex} {\\rm Consider the $6$-dimensional nilpotent Lie algebra $\\mathfrak l$ with structure equations\n$$\n(0,0,0,0,0,25)\n$$\nand the $SU(3)$-structure given by\n$$\n\\begin{array}{l}\n\\omega = e^{12} + e^{34} + e^{56},\\\\[3pt]\n\\psi = (e^1 + i e^2) \\wedge (e^3 + i e^4) \\wedge (e^5 + i e^6).\n\\end{array}\n$$\nLet $H$ be the closed $3$-form\n$$\n\\begin{array} {lcl}\nH &= &- e^{457} + a_1 (e^{124} - e^{456}) + a_2 (e^{125} - e^{345}) - a_3 (e^{134} - e^{156}) + a_4 e^{135} + \\\\[3pt]\n&& a_5 (e^{145} - e^{235}) + a_6 (e^{145} + e^{246}) + a_7 (e^{234} - e^{256}) + a_8 e^{245},\n\\end{array}\n$$\nwith $a_i \\in \\R$, $i = 1, \\ldots, 8$.\nThen $( \\omega, \\psi)$ induces a structure form $\\rho$ on a compact quotient of $L \\times \\R$, where $L$ is the simply connected nilpotent Lie group\nwith Lie algebra $\\mathfrak l$, by a uniform discrete subgroup. A straightforward computation shows that $d_H \\rho =0$ . }\n\\end {ex}\n\n}\n\\end{rem}\n\n\n\n\n\n\n\n\n","meta":{"redpajama_set_name":"RedPajamaArXiv"}} +{"text":"\n\\section{Introduction}\n\nThe purpose of this paper is to point out\nsome connections between:\n\\begin{enumerate}\n\\item The monodromy of periodic linear\ndifferential equations;\n\\item The pentagram map, which\nwe studied in [{\\bf S1\\\/}] and [{\\bf S2\\\/}];\n\\item Dodgson's method of condensation for\ncomputing determinants;\n\\end{enumerate}\nWe discovered most of these connections through\ncomputer experimentation.\n\n\\subsection{Monodromy}\n\nConsider the second order O.D.E.\n\\begin{equation}\n\\label{diff2}\nf''(t)+\\frac{1}{2} q(t) f(t)=0.\n\\end{equation}\nHere $q(t)$ is $1$-periodic.\nIf $\\{f_1,f_2\\}$ is a basis for the solution space of\nEquation \\ref{diff2} then\nthere is some linear $T \\in SL_2(\\R)$ such\nthat $f_j(t+1)=T(f_j(t))$ for $j=1,2$.\nThe trace tr$(T)$, which is independent of\nbasis, is sometimes called the\n{\\it monodromy\\\/} of Equation\n\\ref{diff2}.\nThe ratio $f=f_1\/f_2$ gives a smooth map\nfrom $\\R$ into the projective line.\nHere $q$ is \ngiven by the {\\it Schwarzian derivative\\\/}:\n\\begin{equation}\nq=\\frac{f'''}{f'}-\\frac{3}{2} \\bigg(\n\\frac{f''}{f'}\\bigg)^2.\n\\end{equation}\n\nHere is a discrete analogue of Equation \\ref{diff2}.\nThe {\\it cross ratio\\\/} of $4$ points\n$a,b,c,d \\in \\R$ is given by\n\\begin{equation} \\label{cro}\nx(a,b,c,d)=\\frac{(a-c)(b-d)}{(a-b)(c-d)}.\n\\end{equation}\nA calculation shows that the quantity\n\\begin{equation}\n\\lim_{\\epsilon \\to 0} \\frac{1}{\\epsilon^2}\nx(f(t-3 \\epsilon),f(t-\\epsilon),f(t+\\epsilon),f(t+3 \\epsilon))\n\\end{equation}\nconverges to a multiple of $q$, when $f$ is sufficiently smooth.\nThus, the cross ratio is a \ndiscrete analogue of the Schwarzian derivative.\nSuppose we have an infinite $n$-periodic sequence\n$...q_n,q_1,q_2,...,q_n,q_1,...$.\nWe can find points $...,f_1,f_2,f_3,...$ in \nthe projective line such that \n\\begin{equation}\n\\label{diff1}\nx(f_j,f_{j+1},f_{j+2},f_{j+3})=q_j \\hskip 30 pt \\forall j\n\\end{equation}\nThere will be a projective transformation\n$T$ such that $f_{j+n}=T(f_j)$ for all $j$.\nThe conjugacy class of $T$ only depends on $q$. To obtain\na numerical invariant, we can lift $T$\nto $SL_2(\\R)$ and take its trace. \nThis quantity is a rational function in\nthe variables $q_1,...,q_n$. \n\nA main focus of this paper is a discrete analogue\nfor the third order case. This analogue involves\ninfinite polygons in the projective plane. In\nanalogy to the cross ratio \nwe will define {\\it projective invariants\\\/} of\npolygons in \\S 3.1.\nWe begin with an\ninfinite sequence $...,x_1,x_2,...$ of projective invariants\nhaving period $2n$. \nThese invariants determine, up to a projective\ntransformation, an infinite polygon which is\ninvariant under a projective transformation.\nWe call $P$ a {\\it twisted $n$-gon\\\/}.\nIn other words, we have a map $P: \\Z \\to \\R\\P^2$\nand a projective transformation $T$ such that\n$P(n+j)=T(P(n))$ for all $j$.\n\nThe monodromies $\\Omega_1$ and $\\Omega_2$ corresponding to $T$\nare rational functions of \nthe variables $x_1,...,x_{2n}$. \nLet $[\\cdot ]$ denote the floor function.\nIn \\S 2.1 we will define polynomials\n$O_1,...,O_{[n\/2]},O_n$ and\n$E_1,...,E_{[n\/2]},E_n$. We call these\npolynomials the {\\it pentagram invariants\\\/}.\nWe will express the monodromies explicitly\nin terms of the pentagram invariants:\n\\begin{equation}\n\\label{main}\n\\Omega_1=\\frac{(\\sum_{k=0}^{[n\/2]} O_k)^3}{O_n^2E_n}; \\hskip 40 pt\n\\Omega_2=\\frac{(\\sum_{k=0}^{[n\/2]} E_k)^3}{E_n^2O_n}.\n\\end{equation}\n\n\\subsection{The Pentagram Map}\n\n\nRoughly, the {\\it pentagram map\\\/} is the map which\ntakes the polygon $P$ to the polygon $P'$,\nas indicated in Figre 1. In \\S 4 we\nwill give a precise definition, which\nexpresses the pentagram map as a\ncomposition of two involutions\n$\\alpha_1$ and $\\alpha_2$.\n\n\\begin{center}\n\\psfig{file=Pix\/pix3.ps}\nFigure 1\n\\end{center}\n\nExpressed in our\nprojective invariant coordinates$-$the cross\nratio generalizations discussed in the previous\nsection$-$the pentagram map\nhas the form\n$\\alpha_1(x_1,...,x_{2n})=(x'_1,...,x'_{2n})$ and\n$\\alpha_2(x_1,...,x_{2n})=(x''_1,...,x''_{2n})$ where\n\\begin{eqnarray}\n\\label{basic}\nx_{2k-1}'=x_{2k} \\frac{1-x_{2k+1}x_{2k+2}}{1-x_{2k-3}x_{2k-2}};\n\\hskip 25 pt\nx_{2k}'=x_{2k-1}\\frac{1-x_{2k-3}x_{2k-2}}{1-x_{2k+1}x_{2k+2}};\n\\cr \\cr \\cr\nx_{2k+1}''=x_{2k}\\frac\n{1-x_{2k-2}x_{2k-1}}\n{1-x_{2k+2}x_{2k+3}}\n\\hskip 25 pt\nx_{2k}''=x_{2k+1}\\frac\n{1-x_{2k+2}x_{2k+3}}\n{1-x_{2k-2}x_{2k-1}}\n\\end{eqnarray}\nIn these formulas, the indices are taken mod $2n$.\nWe let $\\alpha=\\alpha_1 \\circ \\alpha_2$.\nIn general, $\\alpha$ has infinite order.\n\nIt turns out that the pentagram invariants are\ninvariant polynomials for the {\\it pentagram map\\\/},\nwhen it is expressed in suitable coordinates.\n\n\\begin{theorem} \n\\label{trans}\n$O_k \\circ \\alpha_j=E_k$ and\n$E_k \\circ \\alpha_j=O_k$ for\n$j=1,2$ and for all $k$.\n\\end{theorem}\n\n\\noindent\nIn \\S 2 we will give a completely algebraic proof of\nTheorem \\ref{trans}. In \\S 3-4 we will give a\nmore conceptual proof which goes roughly as follows:\nThe pentagram map commutes with projective transformations\nand therefore must preserve the monodromies\n$\\Omega_1$ and $\\Omega_2$.\nIt follows from the general homogeneity\nproperties of Equation \\ref{basic} that\nthe pentagram map must preserve the properly\nweighted homogeneous pieces of the\nmonodromies, and these pieces are precisely\nthe pentagram invariants. In \\S 6 we prove\n\n\\begin{theorem}\n\\label{precise}\nThe pentagram invariants are algebraically\nindependent, so that\n$\\alpha$ has at least $2[n\/2]+2$ algebraically independent\npolynomial invariants.\n\\end{theorem}\n\nWe conjecture that the pentagram invariants give the complete\nlist of invariants for the pentagram map, at least when it\nacts on the spaces of twisted $n$-gons. We also\nconjecturethat the algebraic varieties cut out\nby the pentagram invariants are complex\ntori, after a suitable compactification.\nFinally we conjecture that the pentagram map acts\non these complex tori as a translation in\nthe natural flat metric. \n\n\\subsection{The Method of Condensation}\n\nLet $M$ be an $m \\times m$ matrix. Let\n$M_{NW}$ be the $(m-1) \\times (m-1)$ minor\nobtained by crossing off the last row and\ncolumn of $M$. Here $N$ stands for ``north''\nand $W$ stands for ``west''. We define the\nother three $(m-1) \\times (m-1)$ minors $M_{SW}$,\n$M_{NE}$ and $M_{SW}$ in the obvious way.\nFinally, we define\n$M_C$ to be the ``central'' $(m-2) \\times (m-2)$ minor\nobtained by crossing off all the extreme\nrows and columns of $M$. Dodgson's\nidentity says \\begin{equation}\n\\label{lc}\n\\det(M) \\det(M_C)=\\det(M_{NW}) \\det(M_{SE})-\n\\det(M_{SW}) \\det(M_{NE}).\n\\end{equation}\nAssuming that $\\det(M_C)$ is non-zero,\nEquation \\ref{lc} expresses $\\det(M)$ \nas a rational function of determinants\nof matrices of smaller size. \nThis procedure can be iterated, expressing\nthe determinants of these smaller matrices\nas rational functions of determinants of still\nsmaller matrices. And so on.\nThis method of computing matrices\nis called {\\it Dodgson's method of\ncondensation\\\/}. See\n[{\\bf RR\\\/}] for a detailed discussion of\nthis method and the rational functions that arise.\n\nIn \\S 5 we will relate the pentagram map to\nthe method of condensation. In some sense,\n{\\it the pentagram map\ncomputes determinants\\\/}. We exploit this\npoint of view to prove\n\n\\begin{theorem}\n\\label{hyper}\nSuppose that $P$ is a $4n$-gon whose sides\nare alternately parallel to the $x$ and\n$y$ axes. Then (generically) the\n$(2n-2)$nd iterate of the pentagram map transforms\n$P$ into a polygon whose odd vertices are \nall collinear and whose even vertices are all\ncollinear. \n\\end{theorem}\n\nThe surprise in Theorem \\ref{hyper} is that $P$ could\nhave trillions of sides.\nThe pentagram map goes about its business for\ntrillions of iterations and then the whole thing\ncollapses all at once into a polygon whose\nvertices lie on a pair of lines. \nTheorem \\ref{hyper} is closely related\nto the main result in [{\\bf S3\\\/}],\nwhich we proved by geometric methods.\n\n\\subsection{Paper Overview}\n\\begin{tabular}{ll} \n {\\bf \\S 2: The Invariants\\\/}& \\\\\n\\S 2.1: Basic Definitions \\\\\n\\S 2.2: Proof of Theorem \\ref{trans} \\\\ \n\n {\\bf \\S 3: Discrete Monodromy\\\/}& \\\\\n\\S 3.1: PolyPoints and PolyLines & \\\\\n\\S 3.2: Constructing the PolyPoints from its Invariants & \\\\\n\\S 3.3: The Final Calculation & \\\\\n\n {\\bf \\S 4: The Pentagram\\\/}& \\\\\n\\S 4.1: Basic Definitions& \\\\\n\\S 4.2: The Pentagram Map in Coordinates & \\\\\n\\S 4.3: Second Proof of Theorem \\ref{trans} & \\\\\n\\S 4.4: Conic Sections \\\\\n\n {\\bf \\S 5: The Method of Condensation\\\/}& \\\\\n\\S 5.1: Octahedral Tilings & \\\\\n\\S 5.2: Picture of the Pentagram Map & \\\\\n\\S 5.3: Circulent Condensations & \\\\\n\\S 5.4: The Lifting Problem \\\\\n\\S 5.5: Degenerate Polygons \\\\\n\\S 5.6: Proof of Theorem \\ref{hyper}\\\\\n\n{\\bf \\S 6: Proof of Theorem \\ref{precise}\\\/} \\\\\n\\S 6.1: Proof modulo the Vanishing Lemma \\\\\n\\S 6.2: Proof of the Vanishing Lemma\n\\end{tabular}\n\n\\subsection{Acknowledgements}\n\nI would like to thank Peter Doyle, Bill Goldman,\nPat Hooper,\nFrancois Labourie, and John Millson for interesting\nconversations related to this work. \n\\newpage\n\n\\section{The Invariants}\n\n\\subsection{Basic Definitions}\n\nAll our definitions depend on a fixed\ninteger $n \\geq 3$. We will sometimes\nsuppress $n$ from our notation.\nLet $Z=\\{1,2,3,...,2n\\}$.\nWe think of the elements of $Z$ as\nbeing ordered cyclically, so that\n$2n$ and $1$ are consecutive.\nAlso, in our notation all our indices\nare taken cyclically.\n\nWe say that an {\\it odd unit\\\/} of $Z$\nis a subset having one of the two\nforms:\n\\begin{enumerate}\n\\item $U=\\{j\\}$, where $j$ is odd.\n\\item $U=\\{k-1,k,k+1\\}$, where $k$ is even.\n\\end{enumerate}\nWe say that two odd units $U_1$ and $U_2$ are\n{\\it consecutive\\\/} if the set of odd\nnumbers in the union\n$U_1 \\cup U_2$ are consecutive. For\ninstance $\\{1\\}$ and $\\{3,4,5\\}$ are\nconsecutive whereas\n$\\{1,2,3\\}$ and $\\{7,8,9\\}$ are not.\n\nWe say that an {\\it odd admissible subset\\\/} is\na nonempty subset $S \\subset X$ consisting of a\nfinite union of odd units, no two of which\nare consecutive. We define the {\\it weight\\\/}\nof $S$ to be the number of odd units it contains.\nWe denote this quantity by $|S|$. We define the\n{\\it sign\\\/} of $S$ to be the $+1$ is $S$ contains\nan even number of singleton units, and $-1$ if $S$ contains\nan odd number of singleton units.\nAs an example, the subset\n$$\\{1,5,6,7,11\\}=\\{1\\} \\cup \\{5,6,7\\} \\cup \\{11\\}$$\nis an odd admissible subset\nif $n \\geq 7$. This subset has weight $3$ and sign $+1$.\nAs an exception to this rule, we call the\nset $\\{1,3,5,7,...,2n-1\\}$ odd admissible as well.\n\n\nEach odd admissible subset $S$ defines a\nmonomial $O_S \\in R$:\n\\begin{equation}\nO_S={\\rm sign\\\/}(S) \\prod_{j \\in S} x_j.\n\\end{equation}\nLet $O(k)$ denote the set of\nweight $k$ odd admissible subsets of $Z$.\nIf $n$ is even then $O(k)$ is nonempty iff\n$k \\in \\{1,2,...,n\/2,n\\}$. If $n$ is odd then\n$O(k)$ is nonempty iff $k \\in \\{1,2,...,(n-1)\/2,n\\}$.\nWe define\n\\begin{equation}\nO_k=\\sum_{S \\in O(k)} O_S.\n\\end{equation}\nBy convention we set $O_0=1$.\n\nWe can make all the same definitions with the\nword {\\it even\\\/} replacing the word {\\it odd\\\/}.\nThis leads to the definition of the $E$ polymonials.\n\n\\subsection{Proof of Theorem \\ref{precise}}\n\nLet $\\alpha=\\alpha_1 \\circ \\alpha_2$ be as\nin the introduction.\nFor any rational function $f$, we define\n$\\alpha(f)=f \\circ \\alpha$.\n\nBy definition\n\\begin{equation}\nO_n=x_1x_3...x_{2n-1}; \\hskip 20 pt\nE_n=x_2x_4...x_{2n}.\n\\end{equation}\nIf is easy to see directly from\nEquation \\ref{basic} that \n$\\alpha_j(O_n)=E_n$ and\n$\\alpha_j(E_n)=O_n$. When $n$ is even, we have\n\\begin{equation}\nO_{n\/2}=x_1x_5x_9...+x_3x_7x_{11}...; \\hskip 20 pt\nE_{n\/2}=x_2x_6x_{10}...+x_4x_8x_{12}....\n\\end{equation}\nOnce again, it is easy to see directly from\nEquation \\ref{basic} that\n$\\alpha_j(O_{n\/2})=E_{n\/2}$ and\n$\\alpha_j(E_{n\/2})=O_{n\/2}$.\nThe interesting cases, which we now consider,\nare when $k0$. \nLet $\\Delta_j=\\Psi_j(A^c,j)$ be the formal\nsum of sparse adapted measures of mass $j$ which\nare supported in $A^c$. Note\nthat $\\Delta_v=\\Psi'$. \n\nSuppose that $k \\in \\{0,....,v-1\\}$. If\n$j \\geq k$ and $\\tau$ is a summand of\n$\\Phi_j$ there are exactly $j$ choose $k$\nways to write \n$\\tau=\\tau_1 \\cdot \\tau_2$, where\n$\\tau_1 \\in \\Delta_k$ and\n$\\tau_2 \\in \\Theta_{v-k}.$\nThe point is that we can choose the support of\n$\\tau_1$ to be any $k$-element subset of\nthe $A^c$-support of $\\tau$. \nThis way of counting things gives the relation:\n\\begin{equation}\n\\Delta_k \\Theta_{v-k}=\\sum_{j=k}^v \n\\left(\\begin{array}{c} j \\\\k \\end{array} \\right) \\Phi_j,\n\\end{equation}\nfor $k=0,...,v-1$.\nCombining the previous equation with\na familiar corollary of the binomial theorem,\n\\begin{equation}\n\\sum_{k=0}^{v-1} (-1)^k \\Delta_k \\Theta_{v-k}=\n\\Phi_0 + (-1)^v \\Phi_v.\n\\end{equation}\nSince\n$\\langle \\Delta_k \\Theta_{v-k} \\rangle=\n\\langle \\Delta_k \\rangle \\langle \\Theta_{v-k} \\rangle=\n0.$ we have\n$\\langle \\Psi \\rangle=\n\\langle \\Phi_0 \\rangle= \\pm\n\\langle \\Phi_v \\rangle= \n\\langle \\Psi' \\rangle$.\n$\\spadesuit$ \\newline\n\nSince $v0$ for all $z \\in A_v$.\nWe will use induction \nto show that $\\langle \\Psi'(A_v,w) \\rangle>0$ for all $v,w \\geq 1$.\nLet $\\underline \\omega^v$ be the mass $1$ measure\nsupported on $\\omega^v$. If $\\tau$ is a\nmass $w$ sparse measure supported in $A_v^c$ then\nthe support of $\\tau$ intersects\n$\\{\\omega^v,\\omega^{-v}\\}$ in $0$, $1$, or $2$\npoints. Thus\n\\begin{equation}\n\\label{indu}\n\\Psi'(A_v^c,w)=\\left\\{\\matrix{\\Psi'(A^c_{v-1},w) \\cr + \\cr\n(\\underline \\omega^v+\\underline \\omega^{-v}) \n\\cdot \\Psi'(A^c_{v-1},w-1) \\cr + \\cr\n(\\underline \\omega^v \\cdot \\underline \\omega^{-v}) \\cdot \n\\Psi'(A^c_{v-1},w-2).}\\right\\}\n\\end{equation}\nAt least one term on the right is nontrivial. From\n\\begin{equation}\n\\label{indu2}\n\\langle \\underline \\omega^v+\\underline \\omega^{-v}\\rangle=\n2 \\Re(\\omega^v)>0; \\hskip 30 pt\n\\langle \\underline \\omega^v \\cdot \\underline \\omega^{-v} \\rangle=1.\n\\end{equation} \nand induction, any nontrivial term on the\nright hand side of Equation \\ref{indu} evaluates\nto a positive number. Therefore, the\nleft hand side evaluates to a positive\nnumber as well.\n\n\\subsubsection{Case 2: $v \\geq n\/4$}\n\nFor each integer $w \\in (0,n\/4]$ we choose an open arc $B_w$, \ninvariant under complex conjugation, such that\n$-1 \\in B_w$ and\nthere are exactly $w$ $n$th roots of\nunity contained in $B_w$.\nLet $\\Psi(w,k',k)$ denote the formal sum of\nadapted mass $k$ measures $\\mu$ such that\n$\\mu$ is supported in $B_w$ and\n$\\mu(B_w-B_{w-2}) \\leq k'$.\n\nOur goal is to show that\n$\\langle \\Psi(w,v,v) \\rangle \\not = 0$,\nwhere $w$ is the number of $n$th roots of\nunity in $A_v$. \nWe order the triples $(w,k',k)$ lexicographically.\nWe will show inductively that\n$\\langle \\Psi(w,k',k) \\rangle>0$ if $k$ is even\nand $\\langle \\Psi(w,k',k)<0$ if $k$ is odd.\n(These sums are real, by symmetry.)\n\nIf $k=1$ then \n$\\langle \\Psi(w,k',k) \\rangle$ is the sum of numbers\nall of which have negative real part, so that\n$\\langle \\Psi(w,k',k) \\rangle<0$ in this case.\nAlso,\n$\\langle\\Psi(1,k,k) \\rangle=(-1)^k.$\nHenceforth we assume that $w \\geq 2$ and\n$k \\geq 2$. Since $w \\geq 2$ there are\ntwo $n$th roots of unity\n$\\alpha_1$ and $\\alpha_2=\\overline \\alpha_1$\nin $B_w-B_{w-2}$.\n\nSuppose $w=2$.\nA simple counting argument gives\n$$\\Psi(w,k,k)=(\\underline \\alpha_1 + \\underline \\alpha_2) \\cdot\n\\Psi(v,k-1,k-1) + \\underline \\alpha_1 \\cdot \\underline \\alpha_2\n\\cdot \\Psi(v,k-2,k-2).$$\nNote that $\\alpha_1+\\alpha_2<0$.\nBy induction, both terms on the right have the\ndesired sign when evaluated.\nHenceforth we assume that $w \\geq 3$.\n\nSuppose that $k'=1$.\nA counting argument gives\n$$\\Psi(w,1,k)=\\Psi(w-2,k,k)+\n(\\underline \\alpha_1+\\underline \\alpha_2) \\cdot\n\\Psi(w-2,k-1,k-1)$$\nAgain, we note that $\\alpha_1+\\alpha_2<0$.\nSince\n$w \\geq 3$ both terms on the right have the \ndesired sign when evaluated.\n\nSuppose that $k'=2$.\nA counting argument gives\n$$\\Psi(w,2,k)=\\Psi(w-2,k,k)+\n(\\underline \\alpha_1+\\underline \\alpha_2) \\cdot \\Psi(1,k-1)+\n\\underline \\alpha_1 \\cdot \\underline \\alpha_2\n\\cdot \\Psi(w-2,k-2,k-2).$$\nBy induction, all terms on the right have\nthe desired sign when evaluated.\n\nSuppose that $k' \\geq 3$.\nA counting argument gives\n$$\n\\label{induct}\n\\Psi(w,k',k)= \\left\\{ \\matrix{\n\\Psi(w-2,k'-2,k) \\cr + \\cr\n(\\underline \\alpha_1 + \\underline \\alpha_2) \\cdot\n\\Psi(w,k'-1,k-1) \\cr + \\cr\n\\underline \\alpha_1 \\cdot \\underline \\alpha_2\n\\cdot \\Psi(w,k'-2,k'-2)} \\right\\} .\n$$\nBy induction, all three terms on the right\nhave the desired sign when evaluated.\n\nThis completes our proof.\n\n\n\\newpage\n\n\\section{Discrete Monodromy}\n\n\\subsection{PolyPoints and PolyLines}\n\\label{poly}\n\nAs in previous chapters we will fix some\npositive integer $n \\geq 3$.\n\nLet $\\P$ be the projective plane over the field\n$\\F$. Say that a {\\it PolyPoint\\\/} is a \nbi-infinite sequence $A=\\{...A_{-3},A_1,A_5,..\\}.$ of\npoints in $\\P$. (For technical reasons\nwe always index these points by integers\nhaving the same odd congruence mod $4$.)\nWe assume also that there\nis a projective transformation $T$ such\nthat $T(A_j)=A_{j+4n}$ for all $j \\in \\Z$.\nWe call $T$ the {\\it monodromy\\\/} of $A$.\n\nSay that a {\\it PolyLine\\\/} is a \nbi-infinite sequence $B=\\{...B_{-1},B_3,B_7,..\\}$ of\nlines in $\\P$. \nWe assume also that there\nis a projective transformation $T$ such\nthat $T(B_j)=B_{j+4n}$ for all $j \\in \\Z$.\nWe call $T$ the {\\it monodromy\\\/} of $B$.\n\nGiven two points $a,a' \\in \\P$ we let $(aa')$ be the\nline containing these two points. Given two\nlines $b,b' \\in \\P$ we let $(bb')$ be the point\nof intersection of these two lines. Every PolyPoint\n$A$ canonically determines a PolyLine $B$, by the\nrule $B_j=(A_{j-2}A_{j+2})$. At the same time\nevery PolyLine $B$ determines a PolyPoint $A$ by the\nrule $A_j=(B_{j-2}B_{j+2})$. In this case we call\n$A$ and $B$ {\\it associates\\\/}. By construction\nassociates have the same monodromy.\n\nThe {\\it dual space\\\/} to $\\P$ is the space \nof lines in $\\P$. This space, denoted by\n$\\P^*$, is isomorphic to $\\P$.\nIndeed $\\P^*$ is the projectivization of\nthe vector space dual to $\\F^3$.\nAny projective transformation $T: \\P \\to \\P$\nautomatically induces a projective transformation\n$T^*: \\P^* \\to \\P^*$, and {\\it vice versa\\\/}.\nAny point in $\\P$ canonically determines a\nline in $\\P^*$. Likewise, points in $\\P^*$\ncanonically determine lines in $\\P$ and\nlines in $\\P^*$ canonically determine\npoints in $\\P$. The two spaces are on\nan equal footing.\n\nGiven the PolyPoint $A$, we define\n$A^*$ to be the PolyPoint in $\\P^*$ whose\nlines are given by the associate $B$.\nIf the points of $A$ are indexed by\nnumbers congruent to $1$ mod $4$ then\nthe points of $A^*$ are indexed by\nnumbers congruent to $3$ mod $4$, and\n{\\it vice versa\\\/}.\nWe make the same definitions for\nPolyLines.\nBy construction\n$A^{**}=A$ and $B^{**}=B$. If $T$ is\nthe common monodromy of $A$ and $B$ then\n$T^*$ is the common monodromy of\n$A^*$ and $B^*$. We call $A^*$ and\n$B^*$ the {\\it duals\\\/} of $A$ and $B$.\n\nFor any projective transformation $T$,\nacting either on $\\P$ or $\\P^*$ we\ndefine\n\\begin{equation}\n\\Omega_1(T)=\\frac{{\\rm tr\\\/}^3(\\widetilde T)}{\\det(\\widetilde T)};\n\\hskip 30 pt\n\\Omega_2(T)=\\Omega_1(T^*).\n\\end{equation}\nHere $\\widetilde T$ is a linear transformation whose\nprojectivization is $T$. That is, $\\widetilde T$ is a\n{\\it lift\\\/} of $T$.\nIt is easy to see that these quantities are\nindependent of lift. Moreover,\n$\\Omega_j(T)$ only depends on the conjugacy\nclass of $T$. Finally,\n$\\Omega_{3-j}(T^*)=\\Omega_j(T)$ for any projective\ntransformation.\n\nIf $T$ is the monodromy of $A$ we call\n$\\Omega_1(T)$ and $\\Omega_2(T)$ the\n{\\it monodromy invariants\\\/} of $A$.\nBy construction $A^*$ has the same\n{\\it set\\\/} of monodromy invariants as\n$A$, but their order is switched.\nThe same goes for $B$. If $S$ is\nsome other projective transformation,\nthen $A$ and $S(A)$ have the same\nmonodromy invariants. Likewise,\n$B$ and $S(B)$ have the same\nmonodromy invariants.\n\nWe now introduce our $2$-dimensional versions of the\ncross ratio. If $j$ is\none of the indices for the points of $A$ we define\n\\begin{eqnarray}\n\\label{invt}\np_{(j+1)\/2}(A)=\nx(A_{j + 8}, A_{j + 4}, (B_{j + 6} B_{j - 2}),\n (B_{j + 6} B_{j - 6})) \\cr \\cr\nq_{(j-1)\/2}(A)=x(A_{j - 8}, A_{j - 4}, (B_{j - 6} B_{j + 2}),\n (B_{j - 6} B_{j + 6}))\n\\end{eqnarray}\nHere $x$ stands for the ordinary cross ratio,\nas in Equation \\ref{cro}. \nIn the first equation, all $4$ points lie on the\nline $B_{j+6}$. In the second equation, all\n$4$ points lie on $B_{j-6}$. Conpare Figure 3 below.\nIf the points of $A$ are labelled by \nintegers congruent to $1$ mod $4$ then the\ninvariants of $A$ are\n$...q_0,p_1,q_2,p_3,...$.\nIf the points of $A$ are indexed by integers\ncongruent to $3$ mod $4$ then the invariants of\n$A$ are\n$...p_0,q_1,p_2,q_3,...$ In this chapter we will\nonly consider the case when the points of $A$ are\nindexed by integers congruent to $1$ mod $4$,\nthough in the next chapter we will consider both\ncases on an equal footing.\n\nWe can make all the same definitions for $B$, simply\nby interchanging the two roles of $A$ and $B$ in\nEquation \\ref{invt}.\nIt turns out that our invariants are not\njust invariant under projective transformations,\nbut also invariant under projective duality.\nPrecisely, we have\n\\begin{equation}\n\\label{dualinv}\np_j(A)=q_j(A^*); \\hskip 15 pt\nq_j(A)=p_j(A^*); \\hskip 15 pt\np_j(B)=q_j(B^*); \\hskip 15 pt\nq_j(B)=p_j(B^*)\n\\end{equation}\nfor all relevant indices. To see this symmetry,\nwe will consider an example.\n\nSuppose that points of $A$ are labelled by\nintegers congruent to $1$ mod $4$. The\nfirst half of Figure 3 highlights the\n$4$ points whose cross ratio is $p_3(A)$.\nThe second half shows the lines whose\ncross ratio is used to define $q_3(A^*)$.\nThe highlighted\npoints are exactly the intersection points of\nthe highlighted line with an auxilliary line.\nHence, the two cross ratios are the same.\n\n\n\\begin{center}\n\\psfig{file=Pix\/pix4.ps}\nFigure 3\n\\end{center}\n\n\nThis chapter is devoted to establising\nEquation \\ref{main}, which gives the formulas\nfor $\\Omega_1$ and $\\Omega_2$ in terms of\nour invariants. Given the formula for\n$\\Omega_1$, the formula for $\\Omega_2$ follows\nfrom projective duality and from \nEquation \\ref{dualinv}.\nThus, to establish Equation \\ref{main} it\nsuffices to derive the equation for\n$\\Omega_1$.\n\n\n\\subsection{Constructing the PolyPoint from its Invariants}\n\\label{monodromy series}\n\nIn \\S 2 we constructed our polynomials from the variables\n$x_1,...,x_{2n}$. In this section we are going to\nuse the alternate list of variables\n$p_1,q_2,p_3,q_4,...$. The reason for the alternate\nnotation is that it is useful to distinguish the\neven and odd variables in our constructions.\nThe polynomials in \\S 2 are obtained from the ones\nhere using the substitution\n$p_i \\to x_i$ when $i$ is odd and\n$q_i \\to x_i$ when $i$ is even.\n\nSuppose that $p_1,q_2,p_3,q_4,...$ are given variables.\nWe seek an infinite PolyPoint $A$ such that\n\\begin{equation}\n\\label{mainn}\np_{2i-1}(A)=p_{2i-1}; \\hskip 15 pt\n q_{2i}(A)=q_{2i}; \\hskip 15 pt i=1,2,3....\n\\end{equation}\nWhat we mean by Equation \\ref{mainn} is that we\nwish to specify the points of $A$ in such a\nway that the invariants we seek match a\nspecified list $p_1,q_2,p_3,...$.\nLikewise, we seek a formula for the associate $B$.\nFor our purposes we only need the formulas\nfor ``half'' of $A$ and ``half'' of $B$.\nThat is, we just need to know $A_{-3},A_1,A_5,...$ and \n$B_{-5},B_{-1},B_3,...$.\n\nHere we make the same definitions as in \\S 2.1, with respect\n$\\Z$ (the integers) rather than the finite set $Z$.\nTo each admissible\nsequence $S$ we associate a monomial $O_S$ in the formal\npower series ring\n$\\A=\\Z[[...p_1,q_2,p_3...]]$.\n(Again, under the substitution mentioned above,\nthe ring $\\A$ is identified with\n$\\Z[[...x_1,x_2,x_3,...]]$.) \nFor instance if $S=\\{1,2,3,9\\}$ then\n$O_S=-p_1q_2p_3q_9$.\nWe count the empty subset as both even and odd\nadmissible, and we define $O_{\\emptyset}=E_{\\emptyset}=1$.\nLet $O$ be the\nsum over all odd admissible sequences of finite\nweight. Likewise\nlet $E$ be the sum over all even admissible sequences\nof finite weight. \nWe have $O, E \\subset \\A$.\nGiven a pair of odd integers, $(r,s)$ we\ndefine $O_r^s$ to be the polynomial\nobtained from $O$ by setting $p_j$\nequal to zero, for $j \\leq r$ and $j \\geq s$.\nWe make the same definitions, with\neven replacing odd.\n\nLet $A=\\{...A_{-3}, A_{1}, A_{5},...\\}$ and\n$B=\\{...B_{-5}, B_{-1}, B_3, ...\\}$, where\n(in homogeneous coordinates)\n\n\\begin{eqnarray}\n\\label{PA}\nA_{-3}=[0,1,0]; \\hskip 15 pt \n A_{1}=[0,1,1]; \\hskip 15 pt \n A_{5}=[1,1,1]; \\cr \\cr\nA_{4j+1}=[{O_1^{2j-1}},\n\\ {O_{-1}^{2j-1}+p_1 O_3^{2j-1}},\n\\ {O_{-1}^{2j-1}}]; \\hskip 15 pt j=2,4,6... \n\\end{eqnarray}\n\n\\begin{eqnarray}\n\\label{PB}\nB_{-5}=[0,0,1]; \\hskip 15 pt \n B_{-1}=[1,0,0]; \\hskip 15 pt \n B_{3}=[0,1,-1]; \\hskip 15 pt \n B_{7}=[1,-1,0]; \\cr \\cr\nB_{4j+3}=[{-E_2^{2j}+p_1q_2E_4^{2j}},\n\\ {E_0^{2j}},\n\\ {-E_0^{2j}+E_2^{2j}}]; \\hskip 15 pt j=2,4,6...\n\\end{eqnarray}\n\nIn \\S 5.2 we explicitly list out the first $7$ points of $A$.\nWe discovered these formulas as follows. We normalized\nthe first few points of $A$ and then found the\nequations for successive points using the definitions\nof the invariants. At some point we saw a pattern\nin the growing polynomials we were generating.\nThe algebraic proofs we give in this section are\nreally more like verifications. We did everything\non the computer and simply converted our observations\ninto a proof. \n\nThe basic tool for us is the following set\nof relations, which are easily derived.\n\n\\begin{eqnarray}\n\\label{relations}\nO_r^s=0 \\hskip 15 pt \\forall r>s; \\hskip 30 pt\n O_{s-2}^s=O_s^s=1; \\cr\nE_r^s=0 \\hskip 15 pt \\forall r>s; \\hskip 30 pt\n E_{s-2}^s=E_s^s=1; \\cr \\cr\nO_r^s=O_{r+2}^s\n-p_{r+2}O_{r+4}^s+\nP_{r+3} O_{r+6}^s; \\hskip 15pt r 0$, a property which the lower\ndimensional systems did not show in accordance with the theorem\nof Mermin and Wagner.\\cite{MeW:PRL66}\n\n\\begin{figure}[ht!]\n\\centering\n\\includegraphics*[clip,width=60mm]{intermolecular-fig-6.pdf}\n\\caption{Schematic structure of the\n investigated three-dimensional bipartite lattice; solid bonds\n depict interactions $J_1$, dashed bonds $J_2$.} \n\\label{intermolecular-f-6}\n\\end{figure}\n\n\n\n\n\n\\begin{figure}[ht!]\n\\centering\n\\includegraphics*[clip,width=60mm]{intermolecular-fig-7.pdf}\n\\caption{(Color online) Low-temperature magnetization of the\n three-dimensional spin system shown in\n \\figref{intermolecular-f-6} for various interdimer\n couplings $J_2$ and $T=0.1$~K.} \n\\label{intermolecular-f-7}\n\\end{figure}\n\nLooking at the magnetization in \\figref{intermolecular-f-7} one\nimmediately realizes that already a rather small intermolecular\ninteraction of 10~\\% suffices to wash out the magnetization steps\nof the spin cube. It is important to keep in mind that the cube\nhas almost the same singlet-triplet gap as dimer and square, so\nthe effect is not thermal. We thus speculate that the\ndimensionality of the embedding structure, here three, is\nresponsible for the quick disappearance of the molecular\nfingerprints with increasing intermolecular interaction.\n\n\\begin{figure}[ht!]\n\\centering\n\\includegraphics*[clip,width=60mm]{intermolecular-fig-8a.pdf}\n\n\\includegraphics*[clip,width=60mm]{intermolecular-fig-8b.pdf}\n\\caption{(Color online) Zero-field susceptibility and specific heat of the\n three-dimensional spin system shown in\n \\figref{intermolecular-f-6} for various interdimer\n couplings $J_2$ and $B=0$.} \n\\label{intermolecular-f-8}\n\\end{figure}\n\nAlthough the magnetization is already drastically altered by\n10~\\% intermolecular interactions, the temperature dependence\nof the susceptibility does not show much deviation in this case,\ncompare \\figref{intermolecular-f-8}. The same holds for the\nspecific heat. These functions are modified only for larger\nintermolecular interactions in accord with the one- and\ntwo-dimensional cases. The peaks of the specific heat for\n$J_2\/J_1=0.5$ and $J_2\/J_1=1.0$ mark phase transitions to\nthree-dimensional ordered phases -- they correspond exactly to\nthose shown in Ref.~\\onlinecite{SSS:PRB03}. \n\n\n\n\\section{Dimers in various dimensions}\n\\label{sec-4}\n\nIn a second setup we kept the molecular unit fixed as a\ndimer and varied the dimension of the embedding. The\none-dimensional case remains the same. The two-dimensional case\ncan be derived from \\figref{intermolecular-f-1}~(b) by replacing\nall (thick) vertical $J_1$-bonds by (dashed) $J_2$-bonds. For\nthe three-dimensional case the two-dimensional lattices are stacked on top of\neach other with $J_2$-bonds in between. Thus each spin is\nconnected by one $J_1$-bond and one, three, and five $J_2$-bonds\nfor the one-, two-, and three-dimensional case, respectively.\n\n\n\\begin{figure}[ht!]\n\\centering\n\\includegraphics*[clip,width=60mm]{intermolecular-fig-9.pdf}\n\\caption{(Color online) Low-temperature magnetization of dimers\n in one-, two and three-dimensional arrangements for\n $J_2=1.0$~K and $T=0.1$~K.} \n\\label{intermolecular-f-9}\n\\end{figure}\n\nFor the following investigation $J_2\/J_1=0.1$ as well as the\ntemperature were kept constant. As can be clearly seen in\n\\figref{intermolecular-f-9} the magnetization step is more\nstrongly washed out with increasing dimensionality. The\ninfluence on the temperature dependence of both susceptibility\nas well as specific heat is again weak, see\n\\figref{intermolecular-f-10}. \n\n\\begin{figure}[ht!]\n\\centering\n\\includegraphics*[clip,width=60mm]{intermolecular-fig-10a.pdf}\n\n\\includegraphics*[clip,width=60mm]{intermolecular-fig-10b.pdf}\n\\caption{(Color online) Zero-field susceptibility and specific\n heat of dimers in one-, two and three-dimensional\n arrangements for $J_2=1.0$~K and $T=0.1$~K.} \n\\label{intermolecular-f-10}\n\\end{figure}\n\n\n\n\n\n\\section{Comparison with J-strain}\n\\label{sec-5}\n\nFinally, as a supplement to the presented investigations, we\nwould like to discuss the question whether a similar\nmodification of observables could stem from J-strain. The\nassumption of strain, for instance g-strain, is not unusual for\ninstance when modeling EPR lines. J-strain, i.e. a distribution of\n$J$ values about a mean was used in several theoretical models,\nsee e.g. Refs.~\\onlinecite{SPK:PRB08,SFF:JPCM:10,PPS:CPC15}. The\neffect of J-strain is rather similar to that of intermolecular\ninteractions: magnetization steps are smeared out, and\nsusceptibility as well as specific heat as functions of\ntemperature are not much altered. \n\n\n\\begin{figure}[ht!]\n\\centering\n\\includegraphics*[clip,width=60mm]{intermolecular-fig-11.pdf}\n\\caption{(Color online) Low-temperature magnetization of dimers\n in one-, two and three-dimensional arrangements for\n $J_2=1.0$~K and $T=0.1$~K (dashes) compared to isolated dimers\n with a J-strain of $\\Delta = 1.0, 3.0, 5.0$~K (solid curves),\n respectively.}\n\\label{intermolecular-f-11}\n\\end{figure}\n\nIn the following we present an investigation in which\nindependent dimers with a flat distribution of $J_1$-values in the\ninterval $[\\bar{J}-\\Delta,\\bar{J}+\\Delta ]$ have been\nsimulated. $\\Delta$ was chosen such, that the saturation field\nfor the three cases discussed in section \\ref{sec-4} is met.\nFigure~\\xref{intermolecular-f-11} shows a comparison of the\nmagnetization of a single dimer (black solid curve), of dimers\nwith intermolecular interactions in one, two, and three space\ndimensions (dashed curves) as well as of dimers with J-strain\naccording to the flat distribution (solid colored curves). One\nimmediately realizes that the functional form of the\nmagnetization curve with J-strain is different from the behavior\nunder the influence of intermolecular interactions. Although the\nsaturation field is met by tuning $\\Delta$ appropriately, the\nonset of the magnetization curves happens already at smaller\nfields. In addition, at the field value where the magnetization\nstep happens for the unperturbed dimer, the magnetization curves\nof dimers with J-strain cross at half the step height whereas for\nintermolecular interactions the magnetization curves cross at a\nlower magnetization. Overall, the magnetization curves for\nJ-strain are symmetric about the crossing field value. This\nwould also hold if another (more realistic, but also symmetric\nabout $\\bar{J}$) Gaussian\ndistribution of $J_1$ values would have been taken. Intermolecular\ninteractions on the contrary seem to lead to magnetization\ncurves, that do not show any symmetry with respect to the\noriginal crossing field.\n\n\n\\begin{figure}[ht!]\n\\centering\n\\includegraphics*[clip,width=60mm]{intermolecular-fig-12a.pdf}\n\n\\includegraphics*[clip,width=60mm]{intermolecular-fig-12b.pdf}\n\\caption{(Color online) Zero-field susceptibility and specific\n heat of dimers in one-, two and three-dimensional\n arrangements for $J_2=1.0$~K and $T=0.1$~K (dashes) compared\n to isolated dimers with a J-strain of $\\Delta = 1.0, 3.0,\n 5.0$~K (solid curves), respectively.}\n\\label{intermolecular-f-12}\n\\end{figure}\n\nFigure~\\xref{intermolecular-f-12} demonstrates that somewhat\ncontrary to the findings of section \\ref{sec-4} now the\nsusceptibility is only very weakly altered whereas the specific\nheat is more drastically modified especially for the case of the \nlargest J-strain.\n\n\n\n\n\\section{Summary and Outlook}\n\\label{sec-6}\n\nWe investigated the question how intermolecular interactions\ninfluence magnetic observables for small (molecular) magnetic\nunits. In particular we investigated for certain bipartite\nconfigurations how large the intermolecular interaction needs to\nbe compared to the intramolecular interaction in order to mask\nthe molecular behavior. It could be demonstrated that the\nvarious static magnetic observables reflect intermolecular\ninteractions differently: the low-temperature magnetization\nturned out to be most sensitive, since the appearance of\nmagnetization steps appears to be fragile. In addition\ndimensionality plays a role. With increasing space\ndimensionality of the intermolecular coupling the effect of\nmasking molecular properties happens for smaller intermolecular\ncoupling. Finally we discussed briefly whether similar\nmodifications of observables could be misinterpreted as\nJ-strain. We pointed out, that certain features of the\nobservables are different in the two scenarios, so that with\ngood quality of experimental data a discrimination should be\npossible. \n\n\n\\section*{Acknowledgment}\n\nThis work was supported by the Deutsche Forschungsgemeinschaft (DFG\nSCHN 615\/20-1). I would like to thank Arzhang Ardavan, Stephen\nBlundell, Marco Evangelisti, Andreas Honecker, Franziska\nKirschner, Hiroyuki Nojiri and Johannes Richter for valuable\ndiscussions. \n\n\n\n\n","meta":{"redpajama_set_name":"RedPajamaArXiv"}}