Nathan12 commited on
Commit
699c62d
·
1 Parent(s): 14e480c

first commit

Browse files
Files changed (3) hide show
  1. .ipynb_checkpoints/app-checkpoint.py +286 -0
  2. app.py +286 -0
  3. requirements.txt +4 -0
.ipynb_checkpoints/app-checkpoint.py ADDED
@@ -0,0 +1,286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fasterbench.benchmark import *
2
+ import torch
3
+ import gradio as gr
4
+ import os
5
+ import plotly
6
+
7
+ # %% ../nbs/00_benchmark.ipynb 5
8
+ import torch
9
+ import time
10
+ from codecarbon import OfflineEmissionsTracker
11
+ import numpy as np
12
+ import os
13
+ from thop import profile, clever_format
14
+ from tqdm.notebook import tqdm
15
+ from prettytable import PrettyTable
16
+ from torchprofile import profile_macs
17
+
18
+ # %% ../nbs/00_benchmark.ipynb 7
19
+ def get_model_size(model, temp_path="temp_model.pth"):
20
+ torch.save(model.state_dict(), temp_path)
21
+ model_size = os.path.getsize(temp_path)
22
+ os.remove(temp_path)
23
+
24
+ return model_size
25
+
26
+ # %% ../nbs/00_benchmark.ipynb 8
27
+ def get_num_parameters(model):
28
+ return sum(p.numel() for p in model.parameters() if p.requires_grad)
29
+
30
+
31
+ # %% ../nbs/00_benchmark.ipynb 11
32
+ @torch.inference_mode()
33
+ def evaluate_cpu_speed(model, dummy_input, warmup_rounds=50, test_rounds=100):
34
+ device = torch.device("cpu")
35
+ model.eval()
36
+ model.to(device)
37
+ dummy_input = dummy_input.to(device)
38
+
39
+ # Warm up CPU
40
+ for _ in range(warmup_rounds):
41
+ _ = model(dummy_input)
42
+
43
+ # Measure Latency
44
+ latencies = []
45
+ for _ in range(test_rounds):
46
+ start_time = time.perf_counter()
47
+ _ = model(dummy_input)
48
+ end_time = time.perf_counter()
49
+ latencies.append(end_time - start_time)
50
+
51
+ latencies = np.array(latencies) * 1000 # Convert to milliseconds
52
+ mean_latency = np.mean(latencies)
53
+ std_latency = np.std(latencies)
54
+
55
+ # Measure Throughput
56
+ throughput = dummy_input.size(0) * 1000 / mean_latency # Inferences per second
57
+
58
+ return mean_latency, std_latency, throughput
59
+
60
+ # %% ../nbs/00_benchmark.ipynb 13
61
+ @torch.inference_mode()
62
+ def get_model_macs(model, inputs) -> int:
63
+ return profile_macs(model, inputs)
64
+
65
+
66
+ # %% ../nbs/00_benchmark.ipynb 16
67
+ @torch.inference_mode()
68
+ def evaluate_emissions(model, dummy_input, warmup_rounds=50, test_rounds=100):
69
+ device = torch.device("cpu")
70
+ model.eval()
71
+ model.to(device)
72
+ dummy_input = dummy_input.to(device)
73
+
74
+ # Warm up GPU
75
+ for _ in range(warmup_rounds):
76
+ _ = model(dummy_input)
77
+
78
+ # Measure Latency
79
+ tracker = OfflineEmissionsTracker(country_iso_code="USA")
80
+ tracker.start()
81
+ for _ in range(test_rounds):
82
+ _ = model(dummy_input)
83
+ tracker.stop()
84
+ total_emissions = tracker.final_emissions
85
+ total_energy_consumed = tracker.final_emissions_data.energy_consumed
86
+
87
+ # Calculate average emissions and energy consumption per inference
88
+ average_emissions_per_inference = total_emissions / test_rounds
89
+ average_energy_per_inference = total_energy_consumed / test_rounds
90
+
91
+ return average_emissions_per_inference, average_energy_per_inference
92
+
93
+ # %% ../nbs/00_benchmark.ipynb 18
94
+ @torch.inference_mode()
95
+ def benchmark(model, dummy_input):
96
+ # Model Size
97
+ print('disk size')
98
+ disk_size = get_model_size(model)
99
+ #num_parameters = get_num_parameters(model)
100
+
101
+ # CPU Speed
102
+ print('cpu speed')
103
+ cpu_latency, cpu_std_latency, cpu_throughput = evaluate_cpu_speed(model, dummy_input)
104
+
105
+ # Model MACs
106
+ #macs = get_model_macs(model, dummy_input)
107
+ print('macs')
108
+ macs, params = profile(model, inputs=(dummy_input, ))
109
+ macs, num_parameters = clever_format([macs, params], "%.3f")
110
+
111
+ print('emissions')
112
+ # Emissions
113
+ avg_emissions, avg_energy = evaluate_emissions(model, dummy_input)
114
+
115
+ # Print results
116
+ print(f"Model Size: {disk_size / 1e6:.2f} MB (disk), {num_parameters} parameters")
117
+ print(f"CPU Latency: {cpu_latency:.3f} ms (± {cpu_std_latency:.3f} ms)")
118
+ print(f"CPU Throughput: {cpu_throughput:.2f} inferences/sec")
119
+ print(f"Model MACs: {macs}")
120
+ print(f"Average Carbon Emissions per Inference: {avg_emissions*1e3:.6f} gCO2e")
121
+ print(f"Average Energy Consumption per Inference: {avg_energy*1e3:.6f} Wh")
122
+
123
+ return {
124
+
125
+ 'disk_size': disk_size,
126
+ 'num_parameters': num_parameters,
127
+ 'cpu_latency': cpu_latency,
128
+ 'cpu_throughput': cpu_throughput,
129
+ 'macs': macs,
130
+ 'avg_emissions': avg_emissions,
131
+ 'avg_energy': avg_energy
132
+
133
+ }
134
+ def parse_metric_value(value_str):
135
+ """Convert string values with units (M, G) to float"""
136
+ if isinstance(value_str, (int, float)):
137
+ return float(value_str)
138
+
139
+ value_str = str(value_str)
140
+ if 'G' in value_str:
141
+ return float(value_str.replace('G', '')) * 1000 # Convert G to M
142
+ elif 'M' in value_str:
143
+ return float(value_str.replace('M', '')) # Keep in M
144
+ elif 'K' in value_str:
145
+ return float(value_str.replace('K', '')) / 1000 # Convert K to M
146
+ else:
147
+ return float(value_str)
148
+
149
+ def create_radar_plot(benchmark_results):
150
+ import plotly.graph_objects as go
151
+
152
+ # Define metrics with icons, hover text format, and units
153
+ metrics = {
154
+ '💾': { # Storage icon
155
+ 'value': benchmark_results['disk_size'] / 1e6,
156
+ 'hover_format': 'Model Size: {:.2f} MB',
157
+ 'unit': 'MB'
158
+ },
159
+ '🧮': { # Calculator icon for parameters
160
+ 'value': parse_metric_value(benchmark_results['num_parameters']),
161
+ 'hover_format': 'Parameters: {:.2f}M',
162
+ 'unit': 'M'
163
+ },
164
+ '⏱️': { # Clock icon for latency
165
+ 'value': benchmark_results['cpu_latency'],
166
+ 'hover_format': 'Latency: {:.2f} ms',
167
+ 'unit': 'ms'
168
+ },
169
+ '⚡': { # Lightning bolt for MACs
170
+ 'value': parse_metric_value(benchmark_results['macs']),
171
+ 'hover_format': 'MACs: {:.2f}G',
172
+ 'unit': 'G'
173
+ },
174
+ '🔋': { # Battery icon for energy
175
+ 'value': benchmark_results['avg_energy'] * 1e6,
176
+ 'hover_format': 'Energy: {:.3f} mWh',
177
+ 'unit': 'mWh'
178
+ }
179
+ }
180
+
181
+ # Find min and max values for each metric
182
+ reference_values = {
183
+ '💾': {'min': 0, 'max': max(metrics['💾']['value'], 1000)}, # Model size (MB)
184
+ '🧮': {'min': 0, 'max': max(metrics['🧮']['value'], 50)}, # Parameters (M)
185
+ '⏱️': {'min': 0, 'max': max(metrics['⏱️']['value'], 200)}, # Latency (ms)
186
+ '⚡': {'min': 0, 'max': max(metrics['⚡']['value'], 5000)}, # MACs (G)
187
+ '🔋': {'min': 0, 'max': max(metrics['🔋']['value'], 10)} # Energy (mWh)
188
+ }
189
+
190
+ # Normalize values and create hover text
191
+ normalized_values = []
192
+ hover_texts = []
193
+ labels = []
194
+
195
+ for icon, metric in metrics.items():
196
+ # Min-max normalization
197
+ normalized_value = (metric['value'] - reference_values[icon]['min']) / \
198
+ (reference_values[icon]['max'] - reference_values[icon]['min'])
199
+ normalized_values.append(normalized_value)
200
+
201
+ # Create hover text with actual value
202
+ hover_texts.append(metric['hover_format'].format(metric['value']))
203
+ labels.append(icon)
204
+
205
+ # Add first values again to close the polygon
206
+ normalized_values.append(normalized_values[0])
207
+ hover_texts.append(hover_texts[0])
208
+ labels.append(labels[0])
209
+
210
+ fig = go.Figure()
211
+
212
+ fig.add_trace(go.Scatterpolar(
213
+ r=normalized_values,
214
+ theta=labels,
215
+ fill='toself',
216
+ name='Model Metrics',
217
+ hovertext=hover_texts,
218
+ hoverinfo='text',
219
+ line=dict(color='#FF8C00'), # Bright orange color
220
+ fillcolor='rgba(255, 140, 0, 0.3)' # Semi-transparent orange
221
+ ))
222
+
223
+ fig.update_layout(
224
+ polar=dict(
225
+ radialaxis=dict(
226
+ visible=True,
227
+ range=[0, 1],
228
+ showticklabels=False, # Hide radial axis labels
229
+ gridcolor='rgba(128, 128, 128, 0.5)', # Semi-transparent grey grid lines
230
+ linecolor='rgba(128, 128, 128, 0.5)' # Semi-transparent grey axis lines
231
+ ),
232
+ angularaxis=dict(
233
+ tickfont=dict(size=24), # Icon labels
234
+ gridcolor='rgba(128, 128, 128, 0.5)' # Semi-transparent grey grid lines
235
+ ),
236
+ bgcolor='rgba(0,0,0,0)' # Transparent background
237
+ ),
238
+ showlegend=False,
239
+
240
+ margin=dict(t=100, b=100, l=100, r=100),
241
+ paper_bgcolor='rgba(0,0,0,0)', # Transparent background
242
+ plot_bgcolor='rgba(0,0,0,0)' # Transparent background
243
+ )
244
+
245
+ return fig
246
+
247
+ # Rest of the code remains the same
248
+
249
+ def benchmark_interface(model_name):
250
+ import torchvision.models as models
251
+
252
+ model_mapping = {
253
+ 'ResNet18': models.resnet18(pretrained=True),
254
+ 'ResNet50': models.resnet50(pretrained=True),
255
+ 'MobileNetV2': models.mobilenet_v2(pretrained=True),
256
+ 'EfficientNet-B0': models.efficientnet_b0(pretrained=True),
257
+ 'VGG16': models.vgg16(pretrained=True),
258
+ 'DenseNet121': models.densenet121(pretrained=True)
259
+ }
260
+
261
+ model = model_mapping[model_name]
262
+ dummy_input = torch.randn(1, 3, 224, 224)
263
+
264
+ # Run benchmark
265
+ results = benchmark(model, dummy_input)
266
+
267
+ # Create radar plot
268
+ plot = create_radar_plot(results)
269
+
270
+ return plot
271
+
272
+ available_models = ['ResNet18', 'ResNet50', 'MobileNetV2', 'EfficientNet-B0', 'VGG16', 'DenseNet121']
273
+
274
+ iface = gr.Interface(
275
+ fn=benchmark_interface,
276
+ inputs=[
277
+ gr.Dropdown(choices=available_models, label="Select Model", value='ResNet18')
278
+ ],
279
+ outputs=[
280
+ gr.Plot(label="Model Benchmark Results")
281
+ ],
282
+ title="FasterAI Model Benchmark",
283
+ description="Select a pre-trained PyTorch model to visualize its performance metrics."
284
+ )
285
+
286
+ iface.launch()
app.py ADDED
@@ -0,0 +1,286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fasterbench.benchmark import *
2
+ import torch
3
+ import gradio as gr
4
+ import os
5
+ import plotly
6
+
7
+ # %% ../nbs/00_benchmark.ipynb 5
8
+ import torch
9
+ import time
10
+ from codecarbon import OfflineEmissionsTracker
11
+ import numpy as np
12
+ import os
13
+ from thop import profile, clever_format
14
+ from tqdm.notebook import tqdm
15
+ from prettytable import PrettyTable
16
+ from torchprofile import profile_macs
17
+
18
+ # %% ../nbs/00_benchmark.ipynb 7
19
+ def get_model_size(model, temp_path="temp_model.pth"):
20
+ torch.save(model.state_dict(), temp_path)
21
+ model_size = os.path.getsize(temp_path)
22
+ os.remove(temp_path)
23
+
24
+ return model_size
25
+
26
+ # %% ../nbs/00_benchmark.ipynb 8
27
+ def get_num_parameters(model):
28
+ return sum(p.numel() for p in model.parameters() if p.requires_grad)
29
+
30
+
31
+ # %% ../nbs/00_benchmark.ipynb 11
32
+ @torch.inference_mode()
33
+ def evaluate_cpu_speed(model, dummy_input, warmup_rounds=50, test_rounds=100):
34
+ device = torch.device("cpu")
35
+ model.eval()
36
+ model.to(device)
37
+ dummy_input = dummy_input.to(device)
38
+
39
+ # Warm up CPU
40
+ for _ in range(warmup_rounds):
41
+ _ = model(dummy_input)
42
+
43
+ # Measure Latency
44
+ latencies = []
45
+ for _ in range(test_rounds):
46
+ start_time = time.perf_counter()
47
+ _ = model(dummy_input)
48
+ end_time = time.perf_counter()
49
+ latencies.append(end_time - start_time)
50
+
51
+ latencies = np.array(latencies) * 1000 # Convert to milliseconds
52
+ mean_latency = np.mean(latencies)
53
+ std_latency = np.std(latencies)
54
+
55
+ # Measure Throughput
56
+ throughput = dummy_input.size(0) * 1000 / mean_latency # Inferences per second
57
+
58
+ return mean_latency, std_latency, throughput
59
+
60
+ # %% ../nbs/00_benchmark.ipynb 13
61
+ @torch.inference_mode()
62
+ def get_model_macs(model, inputs) -> int:
63
+ return profile_macs(model, inputs)
64
+
65
+
66
+ # %% ../nbs/00_benchmark.ipynb 16
67
+ @torch.inference_mode()
68
+ def evaluate_emissions(model, dummy_input, warmup_rounds=50, test_rounds=100):
69
+ device = torch.device("cpu")
70
+ model.eval()
71
+ model.to(device)
72
+ dummy_input = dummy_input.to(device)
73
+
74
+ # Warm up GPU
75
+ for _ in range(warmup_rounds):
76
+ _ = model(dummy_input)
77
+
78
+ # Measure Latency
79
+ tracker = OfflineEmissionsTracker(country_iso_code="USA")
80
+ tracker.start()
81
+ for _ in range(test_rounds):
82
+ _ = model(dummy_input)
83
+ tracker.stop()
84
+ total_emissions = tracker.final_emissions
85
+ total_energy_consumed = tracker.final_emissions_data.energy_consumed
86
+
87
+ # Calculate average emissions and energy consumption per inference
88
+ average_emissions_per_inference = total_emissions / test_rounds
89
+ average_energy_per_inference = total_energy_consumed / test_rounds
90
+
91
+ return average_emissions_per_inference, average_energy_per_inference
92
+
93
+ # %% ../nbs/00_benchmark.ipynb 18
94
+ @torch.inference_mode()
95
+ def benchmark(model, dummy_input):
96
+ # Model Size
97
+ print('disk size')
98
+ disk_size = get_model_size(model)
99
+ #num_parameters = get_num_parameters(model)
100
+
101
+ # CPU Speed
102
+ print('cpu speed')
103
+ cpu_latency, cpu_std_latency, cpu_throughput = evaluate_cpu_speed(model, dummy_input)
104
+
105
+ # Model MACs
106
+ #macs = get_model_macs(model, dummy_input)
107
+ print('macs')
108
+ macs, params = profile(model, inputs=(dummy_input, ))
109
+ macs, num_parameters = clever_format([macs, params], "%.3f")
110
+
111
+ print('emissions')
112
+ # Emissions
113
+ avg_emissions, avg_energy = evaluate_emissions(model, dummy_input)
114
+
115
+ # Print results
116
+ print(f"Model Size: {disk_size / 1e6:.2f} MB (disk), {num_parameters} parameters")
117
+ print(f"CPU Latency: {cpu_latency:.3f} ms (± {cpu_std_latency:.3f} ms)")
118
+ print(f"CPU Throughput: {cpu_throughput:.2f} inferences/sec")
119
+ print(f"Model MACs: {macs}")
120
+ print(f"Average Carbon Emissions per Inference: {avg_emissions*1e3:.6f} gCO2e")
121
+ print(f"Average Energy Consumption per Inference: {avg_energy*1e3:.6f} Wh")
122
+
123
+ return {
124
+
125
+ 'disk_size': disk_size,
126
+ 'num_parameters': num_parameters,
127
+ 'cpu_latency': cpu_latency,
128
+ 'cpu_throughput': cpu_throughput,
129
+ 'macs': macs,
130
+ 'avg_emissions': avg_emissions,
131
+ 'avg_energy': avg_energy
132
+
133
+ }
134
+ def parse_metric_value(value_str):
135
+ """Convert string values with units (M, G) to float"""
136
+ if isinstance(value_str, (int, float)):
137
+ return float(value_str)
138
+
139
+ value_str = str(value_str)
140
+ if 'G' in value_str:
141
+ return float(value_str.replace('G', '')) * 1000 # Convert G to M
142
+ elif 'M' in value_str:
143
+ return float(value_str.replace('M', '')) # Keep in M
144
+ elif 'K' in value_str:
145
+ return float(value_str.replace('K', '')) / 1000 # Convert K to M
146
+ else:
147
+ return float(value_str)
148
+
149
+ def create_radar_plot(benchmark_results):
150
+ import plotly.graph_objects as go
151
+
152
+ # Define metrics with icons, hover text format, and units
153
+ metrics = {
154
+ '💾': { # Storage icon
155
+ 'value': benchmark_results['disk_size'] / 1e6,
156
+ 'hover_format': 'Model Size: {:.2f} MB',
157
+ 'unit': 'MB'
158
+ },
159
+ '🧮': { # Calculator icon for parameters
160
+ 'value': parse_metric_value(benchmark_results['num_parameters']),
161
+ 'hover_format': 'Parameters: {:.2f}M',
162
+ 'unit': 'M'
163
+ },
164
+ '⏱️': { # Clock icon for latency
165
+ 'value': benchmark_results['cpu_latency'],
166
+ 'hover_format': 'Latency: {:.2f} ms',
167
+ 'unit': 'ms'
168
+ },
169
+ '⚡': { # Lightning bolt for MACs
170
+ 'value': parse_metric_value(benchmark_results['macs']),
171
+ 'hover_format': 'MACs: {:.2f}G',
172
+ 'unit': 'G'
173
+ },
174
+ '🔋': { # Battery icon for energy
175
+ 'value': benchmark_results['avg_energy'] * 1e6,
176
+ 'hover_format': 'Energy: {:.3f} mWh',
177
+ 'unit': 'mWh'
178
+ }
179
+ }
180
+
181
+ # Find min and max values for each metric
182
+ reference_values = {
183
+ '💾': {'min': 0, 'max': max(metrics['💾']['value'], 1000)}, # Model size (MB)
184
+ '🧮': {'min': 0, 'max': max(metrics['🧮']['value'], 50)}, # Parameters (M)
185
+ '⏱️': {'min': 0, 'max': max(metrics['⏱️']['value'], 200)}, # Latency (ms)
186
+ '⚡': {'min': 0, 'max': max(metrics['⚡']['value'], 5000)}, # MACs (G)
187
+ '🔋': {'min': 0, 'max': max(metrics['🔋']['value'], 10)} # Energy (mWh)
188
+ }
189
+
190
+ # Normalize values and create hover text
191
+ normalized_values = []
192
+ hover_texts = []
193
+ labels = []
194
+
195
+ for icon, metric in metrics.items():
196
+ # Min-max normalization
197
+ normalized_value = (metric['value'] - reference_values[icon]['min']) / \
198
+ (reference_values[icon]['max'] - reference_values[icon]['min'])
199
+ normalized_values.append(normalized_value)
200
+
201
+ # Create hover text with actual value
202
+ hover_texts.append(metric['hover_format'].format(metric['value']))
203
+ labels.append(icon)
204
+
205
+ # Add first values again to close the polygon
206
+ normalized_values.append(normalized_values[0])
207
+ hover_texts.append(hover_texts[0])
208
+ labels.append(labels[0])
209
+
210
+ fig = go.Figure()
211
+
212
+ fig.add_trace(go.Scatterpolar(
213
+ r=normalized_values,
214
+ theta=labels,
215
+ fill='toself',
216
+ name='Model Metrics',
217
+ hovertext=hover_texts,
218
+ hoverinfo='text',
219
+ line=dict(color='#FF8C00'), # Bright orange color
220
+ fillcolor='rgba(255, 140, 0, 0.3)' # Semi-transparent orange
221
+ ))
222
+
223
+ fig.update_layout(
224
+ polar=dict(
225
+ radialaxis=dict(
226
+ visible=True,
227
+ range=[0, 1],
228
+ showticklabels=False, # Hide radial axis labels
229
+ gridcolor='rgba(128, 128, 128, 0.5)', # Semi-transparent grey grid lines
230
+ linecolor='rgba(128, 128, 128, 0.5)' # Semi-transparent grey axis lines
231
+ ),
232
+ angularaxis=dict(
233
+ tickfont=dict(size=24), # Icon labels
234
+ gridcolor='rgba(128, 128, 128, 0.5)' # Semi-transparent grey grid lines
235
+ ),
236
+ bgcolor='rgba(0,0,0,0)' # Transparent background
237
+ ),
238
+ showlegend=False,
239
+
240
+ margin=dict(t=100, b=100, l=100, r=100),
241
+ paper_bgcolor='rgba(0,0,0,0)', # Transparent background
242
+ plot_bgcolor='rgba(0,0,0,0)' # Transparent background
243
+ )
244
+
245
+ return fig
246
+
247
+ # Rest of the code remains the same
248
+
249
+ def benchmark_interface(model_name):
250
+ import torchvision.models as models
251
+
252
+ model_mapping = {
253
+ 'ResNet18': models.resnet18(pretrained=True),
254
+ 'ResNet50': models.resnet50(pretrained=True),
255
+ 'MobileNetV2': models.mobilenet_v2(pretrained=True),
256
+ 'EfficientNet-B0': models.efficientnet_b0(pretrained=True),
257
+ 'VGG16': models.vgg16(pretrained=True),
258
+ 'DenseNet121': models.densenet121(pretrained=True)
259
+ }
260
+
261
+ model = model_mapping[model_name]
262
+ dummy_input = torch.randn(1, 3, 224, 224)
263
+
264
+ # Run benchmark
265
+ results = benchmark(model, dummy_input)
266
+
267
+ # Create radar plot
268
+ plot = create_radar_plot(results)
269
+
270
+ return plot
271
+
272
+ available_models = ['ResNet18', 'ResNet50', 'MobileNetV2', 'EfficientNet-B0', 'VGG16', 'DenseNet121']
273
+
274
+ iface = gr.Interface(
275
+ fn=benchmark_interface,
276
+ inputs=[
277
+ gr.Dropdown(choices=available_models, label="Select Model", value='ResNet18')
278
+ ],
279
+ outputs=[
280
+ gr.Plot(label="Model Benchmark Results")
281
+ ],
282
+ title="FasterAI Model Benchmark",
283
+ description="Select a pre-trained PyTorch model to visualize its performance metrics."
284
+ )
285
+
286
+ iface.launch()
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ fasterbench
2
+ torch
3
+ plotly
4
+ codecarbon