andreped commited on
Commit
6a58068
·
1 Parent(s): 5f1997e

feature: added post hoc analysis script for paper

Browse files
Files changed (1) hide show
  1. EvaluationScripts/statistics.py +139 -0
EvaluationScripts/statistics.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from statsmodels.stats.multicomp import pairwise_tukeyhsd
3
+ from statsmodels.stats.multitest import fdrcorrection
4
+ from scipy import stats
5
+ import pandas as pd
6
+
7
+
8
+ # increase length of string in pandas
9
+ pd.options.display.max_colwidth = 100
10
+
11
+
12
+ def post_hoc_ixi():
13
+ file_path = "/Users/andreped/Downloads/ALL_METRICS.csv"
14
+
15
+ df = pd.read_csv(file_path, sep=";")
16
+ df = df.iloc[:, 1:]
17
+ df = df[df["Experiment"] == "IXI"]
18
+ df["Model"] = [x.replace("_", "-") for x in df["Model"]]
19
+
20
+ TRE_values = df["TRE"]
21
+ m_comp = pairwise_tukeyhsd(df["TRE"], df["Model"], alpha=0.05)
22
+ model_names = np.unique(df["Model"])
23
+
24
+ all_pvalues = -1 * np.ones((len(model_names), len(model_names)), dtype=np.float32)
25
+ pvs = m_comp.pvalues
26
+ cnt = 0
27
+ for i in range(len(model_names)):
28
+ for j in range(i + 1, len(model_names)):
29
+ all_pvalues[i, j] = pvs[cnt]
30
+ cnt += 1
31
+ all_pvalues = np.round(all_pvalues, 6)
32
+ all_pvalues = all_pvalues[:-1, 1:]
33
+
34
+ col_new_names = ["\textbf{\rot{\multicolumn{1}{r}{" + n + "}}}" for n in model_names]
35
+
36
+ out_pd = pd.DataFrame(data=all_pvalues, index=model_names[:-1], columns=col_new_names[1:])
37
+ stack = out_pd.stack()
38
+ stack[(0 < stack) & (stack <= 0.001)] = '\cellcolor{green!25}$<$0.001'
39
+
40
+ for i in range(stack.shape[0]):
41
+ try:
42
+ curr = stack[i]
43
+ if (float(curr) > 0.0011) & (float(curr) < 0.05):
44
+ stack[i] = '\cellcolor{green!50}' + str(np.round(stack[i], 3))
45
+ elif (float(curr) >= 0.05) & (float(curr) < 0.1):
46
+ stack[i] = '\cellcolor{red!50}' + str(np.round(stack[i], 3))
47
+ elif (float(curr) >= 0.1):
48
+ stack[i] = '\cellcolor{red!25}' + str(np.round(stack[i], 3))
49
+ except Exception:
50
+ continue
51
+
52
+ out_pd = stack.unstack()
53
+ out_pd = out_pd.replace(-1.0, "-")
54
+ out_pd = out_pd.replace(-0.0, '\cellcolor{green!25}$<$0.001')
55
+
56
+ with open("./tukey_pvalues_result_IXI.txt", "w") as pfile:
57
+ pfile.write("{}".format(out_pd.to_latex(escape=False, column_format="r" + "c"*all_pvalues.shape[1], bold_rows=True)))
58
+
59
+ print(out_pd)
60
+
61
+
62
+ def study_transfer_learning_benefit():
63
+ file_path = "/Users/andreped/Downloads/ALL_METRICS.csv"
64
+
65
+ df = pd.read_csv(file_path, sep=";")
66
+ df = df.iloc[:, 1:]
67
+ df["Model"] = [x.replace("_", "-") for x in df["Model"]]
68
+
69
+ df_tl = df[df["Experiment"] == "COMET_TL_Ft2Stp"]
70
+ df_orig = df[df["Experiment"] == "COMET"]
71
+
72
+ pvs = []
73
+ for model in ["BL-N", "SG-NSD", "UW-NSD"]:
74
+
75
+ curr_tl = df_tl[df_tl["Model"] == model]
76
+ curr_orig = df_orig[df_orig["Model"] == model]
77
+
78
+ TRE_tl = curr_tl["TRE"]
79
+ TRE_orig = curr_orig["TRE"]
80
+
81
+ # perform non-parametric hypothesis test to assess significance
82
+ ret = stats.wilcoxon(TRE_tl, TRE_orig, alternative="less")
83
+ pv = ret.pvalue
84
+ pvs.append(pv)
85
+
86
+ # False discovery rate to get corrected p-values
87
+ corrected_pvs = fdrcorrection(pvs, alpha=0.05, method="indep")[1] # Benjamini/Hochberg -> method="indep"
88
+
89
+ print("BL-N:", corrected_pvs[0])
90
+ print("SG-NSD:", corrected_pvs[1])
91
+ print("UW-NSD:", corrected_pvs[2])
92
+
93
+
94
+ def post_hoc_comet():
95
+ file_path = "/Users/andreped/Downloads/ALL_METRICS.csv"
96
+
97
+ df = pd.read_csv(file_path, sep=";")
98
+ df = df.iloc[:, 1:]
99
+ df["Model"] = [x.replace("_", "-") for x in df["Model"]]
100
+
101
+ df_tl = df[df["Experiment"] == "COMET_TL_Ft2Stp"]
102
+
103
+ filter_ = np.array([x in ["BL-N", "SG-NSD", "UW-NSD"] for x in df_tl["Model"]])
104
+
105
+ df_tl = df_tl[filter_]
106
+
107
+ # Is TRE in SG-NSD significantly lower than TRE in BL-N?
108
+ ret1 = stats.wilcoxon(
109
+ df_tl[df_tl["Model"] == "SG-NSD"]["TRE"],
110
+ df_tl[df_tl["Model"] == "BL-N"]["TRE"],
111
+ alternative="less"
112
+ )
113
+ pv1 = ret1.pvalue
114
+
115
+ # Is TRE in UW-NSD significantly lower than TRE in SG_NSD?
116
+ ret2 = stats.wilcoxon(
117
+ df_tl[df_tl["Model"] == "UW-NSD"]["TRE"],
118
+ df_tl[df_tl["Model"] == "SG-NSD"]["TRE"],
119
+ alternative="less"
120
+ )
121
+ pv2 = ret2.pvalue
122
+
123
+ # False discovery rate to get corrected p-values
124
+ pvs = [pv1, pv2]
125
+ corrected_pvs = fdrcorrection(pvs, alpha=0.05, method="indep")[1] # Benjamini/Hochberg -> method="indep"
126
+
127
+ print("Seg-guiding benefit:", corrected_pvs[0])
128
+ print("Uncertainty-weighting benefit:", corrected_pvs[1])
129
+
130
+
131
+ if __name__ == "__main__":
132
+ print("\nComparing all contrasts in TRE of all models in the IXI dataset:")
133
+ post_hoc_ixi()
134
+
135
+ print("\nTransfer learning benefit (COMET):")
136
+ study_transfer_learning_benefit()
137
+
138
+ print("\nAssessing whether there is a benefit to segmentation-guiding and uncertainty weighting (COMET):")
139
+ post_hoc_comet()