jpdefrutos commited on
Commit
ac29da8
·
1 Parent(s): 7968536

combine_metrics_files.py

Browse files

-----------------------------
Combine metrics.csv files from the different models into a single file, to be used by the statistics.p script

compare_ai_vs_human_annots.py
-----------------------------
Comparison between the AI and humman made segmentations on the OSLO-COMET dataset.

EvaluationScripts/combine_metrics_files.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import os
3
+ from argparse import ArgumentParser
4
+ import re
5
+
6
+
7
+ DICT_MODEL_NAMES = {'BASELINE': 'BL',
8
+ 'SEGGUIDED': 'SG',
9
+ 'UW': 'UW'}
10
+
11
+ DICT_METRICS_NAMES = {'NCC': 'N',
12
+ 'SSIM': 'S',
13
+ 'DICE': 'D',
14
+ 'DICE MACRO': 'D',
15
+ 'HD': 'H', }
16
+
17
+
18
+ DF_COLS = ['SSIM', 'NCC', 'MSE', 'DICE_MACRO', 'HD', 'HD95', 'Time', 'TRE', 'Experiment', 'Model']
19
+
20
+
21
+ def get_model_name(in_path: str) -> str:
22
+ model = re.search('((UW|SEGGUIDED|BASELINE).*)_\d', in_path)
23
+ if model:
24
+ model = model.group(1).rstrip('_')
25
+ model = model.replace('_Lsim', '')
26
+ model = model.replace('_Lseg', '')
27
+ model = model.replace('_L', '')
28
+ model = model.replace('_', ' ')
29
+ model = model.upper()
30
+ elements = model.split()
31
+ model = elements[0]
32
+ metrics = list()
33
+ model = DICT_MODEL_NAMES[model]
34
+ for m in elements[1:]:
35
+ if m != 'MACRO':
36
+ metrics.append(DICT_METRICS_NAMES[m])
37
+
38
+ return '{}-{}'.format(model, ''.join(metrics))
39
+ else:
40
+ try:
41
+ model = re.search('(SyNCC|SyN)', in_path).group(1)
42
+ except AttributeError:
43
+ raise ValueError('Unknown folder name/model: '+ in_path)
44
+ return model
45
+
46
+
47
+ def find_metric_files(root_path: str, folder_filter: str) -> dict:
48
+ metric_files = dict()
49
+ starting_level = root_path.count(os.sep)
50
+ for r, d, f in os.walk(root_path):
51
+ level = r.count(os.sep) - starting_level
52
+ if level < 3:
53
+ for name in f:
54
+ if 'metrics.csv' == name and folder_filter in r.split(os.sep):
55
+ model = get_model_name(os.path.join(r, name))
56
+ metric_files[model] = os.path.join(r, name)
57
+ return metric_files
58
+
59
+
60
+ def read_metrics_files(metrics: dict, experiment: str) -> pd.DataFrame:
61
+ df = pd.DataFrame(columns=DF_COLS)
62
+ for k in metrics.keys():
63
+ csv = pd.read_csv(metrics[k], sep=';')
64
+ csv['Experiment'] = experiment
65
+ csv['Model'] = k
66
+ df = df.append(csv[DF_COLS], ignore_index=True)
67
+ return df
68
+
69
+
70
+ if __name__ == '__main__':
71
+ parser = ArgumentParser()
72
+ parser.add_argument('--output-dir', help='Output directory', default='./')
73
+ parser.add_argument('--ixi-runs', help='Directory were the evaluation outputs are stored')
74
+ parser.add_argument('--comet-runs', help='Directory were the evaluation outputs are stored')
75
+ parser.add_argument('--comet-tl-freezenone-runs', help='Directory were the evaluation outputs are stored')
76
+ parser.add_argument('--comet-tl-encoder-runs', help='Directory were the evaluation outputs are stored')
77
+ parser.add_argument('--ants-runs', help='Directory were the evaluation outputs are stored')
78
+ parser.add_argument('--folder-filter', default='Evaluate')
79
+
80
+ args = parser.parse_args()
81
+
82
+ assert os.path.exists(args.ixi_runs), 'IXI directory not found'
83
+ assert os.path.exists(args.comet_runs), 'COMET directory not found'
84
+ assert os.path.exists(args.comet_tl_freezenone_runs), 'COMET TL Fine Tuned Froze None directory not found'
85
+ assert os.path.exists(args.comet_tl_encoder_runs), 'COMET TL Fine Tuned in 2 Steps directory not found'
86
+ assert os.path.exists(args.ants_runs), 'COMET TL Fine Tuned in 2 Steps directory not found'
87
+
88
+ IXI_metrics = find_metric_files(args.ixi_runs, args.folder_filter)
89
+ COMET_metrics = find_metric_files(args.comet_runs, args.folder_filter)
90
+ COMET_TL_FTFN_metrics = find_metric_files(args.comet_tl_freezenone_runs, args.folder_filter)
91
+ COMET_TL_FT2S_metrics = find_metric_files(args.comet_tl_encoder_runs, args.folder_filter)
92
+ ANTS_metrics = find_metric_files(args.ants_runs, args.folder_filter)
93
+
94
+ IXI_df = read_metrics_files(IXI_metrics, 'IXI')
95
+ COMET_df = read_metrics_files(COMET_metrics, 'COMET')
96
+ COMET_TL_FtFn_df = read_metrics_files(COMET_TL_FTFN_metrics, 'COMET_TL_FtFn')
97
+ COMET_TL_Ft2S_df = read_metrics_files(COMET_TL_FT2S_metrics, 'COMET_TL_Ft2Stp')
98
+ ANTS_df = read_metrics_files(ANTS_metrics, 'ANTs')
99
+
100
+ df = pd.concat([IXI_df, COMET_df, COMET_TL_FtFn_df, COMET_TL_Ft2S_df, ANTS_df], ignore_index=True)
101
+ out_file_path = os.path.join(args.output_dir, 'Combined_metrics.csv')
102
+ if os.path.exists(out_file_path):
103
+ os.remove(out_file_path)
104
+ df.to_csv(out_file_path, sep=';', index=False)
105
+ print('Output file: ' + out_file_path)
EvaluationScripts/compare_ai_vs_human_annots.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, sys
2
+ currentdir = os.path.dirname(os.path.realpath(__file__))
3
+ parentdir = os.path.dirname(currentdir)
4
+ sys.path.append(parentdir) # PYTHON > 3.3 does not allow relative referencing
5
+
6
+ import nibabel as nib
7
+ import re
8
+ import medpy.metric as medpy_metrics
9
+
10
+ import pandas as pd
11
+
12
+ import numpy as np
13
+
14
+ from tqdm import tqdm
15
+
16
+ import multiprocessing as mp
17
+ IMG_DIRECTORY = '/mnt/EncryptedData1/Laparoscopy/OSLO_COMET_dataset/OSLO_COMET_CT/Volumes_nii/test_set'
18
+ VES_SEG_DIRECTORY = '/mnt/EncryptedData1/Laparoscopy/OSLO_COMET_dataset/OSLO_COMET_CT/Vessels'
19
+ PAR_SEG_DIRECTORY = '/mnt/EncryptedData1/Laparoscopy/OSLO_COMET_dataset/OSLO_COMET_CT/Parenchyma'
20
+
21
+ LM_SEG_DIRECTORY = '/mnt/EncryptedData1/Laparoscopy/OSLO_COMET_dataset/OSLO_COMET_CT/LiverMask/test_set'
22
+
23
+ nii_FILENAME_PATTERN = '(.*)_CT.nii'
24
+ LV_VESSEL_FILENAME_PATTERN = '(.*)-vessels.nii'
25
+ LV_PARENCHYMA_FILENAME_PATTERN = '(.*)-livermask.nii'
26
+
27
+ OUT_DIRECTORY = '/mnt/EncryptedData1/Users/javier/ext_datasets/COMET_ai_vs_human'
28
+
29
+
30
+ def process_group(file_list):
31
+ # print('Got: ' + ','.join(file_list))
32
+ img_file, par_file, ves_file, lm_par_file, lm_ves_file = file_list
33
+ img_num = int(re.match(nii_FILENAME_PATTERN, os.path.split(img_file)[-1])[1])
34
+
35
+ human_par = nib.load(par_file)
36
+ header = human_par.header
37
+ human_par = np.asarray(human_par.dataobj)
38
+ human_ves = np.asarray(nib.load(ves_file).dataobj)
39
+
40
+ lm_par = np.asarray(nib.load(lm_par_file).dataobj)
41
+ lm_ves = np.asarray(nib.load(lm_ves_file).dataobj)
42
+ lm_ves[lm_ves > 0] = 1
43
+
44
+ dsc_par = medpy_metrics.dc(human_par, lm_par)
45
+ dsc_ves = medpy_metrics.dc(human_ves, lm_ves)
46
+
47
+ hd_par = medpy_metrics.hd(human_par, lm_par, voxelspacing=header['pixdim'][1:4])
48
+ hd_ves = medpy_metrics.hd(human_ves, lm_ves, voxelspacing=header['pixdim'][1:4])
49
+
50
+ hd95_par = medpy_metrics.hd95(human_par, lm_par, voxelspacing=header['pixdim'][1:4])
51
+ hd95_ves = medpy_metrics.hd95(human_ves, lm_ves, voxelspacing=header['pixdim'][1:4])
52
+
53
+ return img_num, dsc_par, dsc_ves, hd_par, hd_ves, hd95_par, hd95_ves
54
+
55
+
56
+ if __name__ == '__main__':
57
+ img_list = [os.path.join(IMG_DIRECTORY, f) for f in os.listdir(IMG_DIRECTORY) if f.endswith('.nii')]
58
+ img_list.sort()
59
+
60
+ ves_seg_list = [os.path.join(VES_SEG_DIRECTORY, os.path.split(f)[-1]+'.gz') for f in img_list]
61
+ ves_seg_list.sort()
62
+
63
+ par_seg_list = [os.path.join(PAR_SEG_DIRECTORY, os.path.split(f)[-1]+'.gz') for f in img_list]
64
+ par_seg_list.sort()
65
+
66
+ lm_ves_seg_list = [os.path.join(LM_SEG_DIRECTORY, f) for f in os.listdir(LM_SEG_DIRECTORY) if f.endswith('-vessels.nii')]
67
+ lm_ves_seg_list.sort()
68
+
69
+ lm_par_seg_list = [os.path.join(LM_SEG_DIRECTORY, f) for f in os.listdir(LM_SEG_DIRECTORY) if f.endswith('-livermask.nii')]
70
+ lm_par_seg_list.sort()
71
+
72
+ os.makedirs(OUT_DIRECTORY, exist_ok=True)
73
+
74
+ zip_lists = zip(img_list, par_seg_list, ves_seg_list, lm_par_seg_list, lm_ves_seg_list)
75
+ mets = ['DSC', 'DSC', 'HD', 'HD', 'H95', 'H95']
76
+ segs = ['Parenchyma', 'Vessels', 'Parenchyma', 'Vessels', 'Parenchyma', 'Vessels']
77
+ cols = list(zip(*[mets, segs]))
78
+ idx = pd.MultiIndex.from_tuples(cols, names=['Metrics', 'Labels'])
79
+ df = pd.DataFrame(index=idx)
80
+
81
+ print('\nLaunching processes...')
82
+ with mp.Pool(11, maxtasksperchild=1) as p:
83
+ results = p.map_async(process_group, zip_lists)
84
+ for v in results.get():
85
+ df[v[0]] = v[1:]
86
+
87
+ # for i, (img_file, par_file, ves_file, lm_par_file, lm_ves_file) in tqdm(enumerate(zip_lists), total=len(img_list)):
88
+ # img_name = re.match(nii_FILENAME_PATTERN, os.path.split(img_file)[-1])[1]
89
+ #
90
+ # img = nib.load(img_file)
91
+ # human_par = nib.load(par_file)
92
+ # header = human_par.header
93
+ # human_par = np.asarray(human_par.dataobj)
94
+ # human_ves = np.asarray(nib.load(ves_file).dataobj)
95
+ #
96
+ # lm_par = np.asarray(nib.load(lm_par_file).dataobj)
97
+ # lm_ves = np.asarray(nib.load(lm_ves_file).dataobj)
98
+ # lm_ves[lm_ves > 0] = 1
99
+ #
100
+ # dsc_par = medpy_metrics.dc(human_par, lm_par)
101
+ # dsc_ves = medpy_metrics.dc(human_ves, lm_ves)
102
+ #
103
+ # hd_par = medpy_metrics.hd(human_par, lm_par, voxelspacing=header['pixdim'][1:4])
104
+ # hd_ves = medpy_metrics.hd(human_ves, lm_ves, voxelspacing=header['pixdim'][1:4])
105
+ #
106
+ # hd95_par = medpy_metrics.hd95(human_par, lm_par, voxelspacing=header['pixdim'][1:4])
107
+ # hd95_ves = medpy_metrics.hd95(human_ves, lm_ves, voxelspacing=header['pixdim'][1:4])
108
+ #
109
+ # df[i] = [dsc_par, dsc_ves, hd_par, hd_ves, hd95_par, hd95_ves]
110
+ print('\nResults...')
111
+ print(df)
112
+
113
+ print('\nSummary...')
114
+ print(df.describe())
115
+
116
+ df.to_csv('comparison.csv', sep=';')