Allanatrix commited on
Commit
37eab4f
·
verified ·
1 Parent(s): 4ed8573

Update App.py

Browse files
Files changed (1) hide show
  1. App.py +167 -158
App.py CHANGED
@@ -1,158 +1,167 @@
1
- import gradio as gr
2
- import json
3
- import pandas as pd
4
- from Engine import Engine
5
-
6
- def run_study(mode, benchmark_func, optimizers, dim, dataset, epochs, batch_size, lr, use_sa, sa_temp, sa_cooling_rate):
7
- if not optimizers:
8
- raise gr.Error("Please select at least one optimizer.")
9
- if mode == "Benchmark Optimization" and not benchmark_func:
10
- raise gr.Error("Please select a benchmark function.")
11
- if mode == "ML Task Training" and not dataset:
12
- raise gr.Error("Please select a dataset.")
13
-
14
- config = {
15
- 'mode': 'benchmark' if mode == 'Benchmark Optimization' else 'ml_task',
16
- 'benchmark_func': benchmark_func,
17
- 'optimizers': optimizers,
18
- 'dim': int(dim),
19
- 'dataset': dataset,
20
- 'epochs': int(epochs),
21
- 'batch_size': int(batch_size),
22
- 'lr': float(lr),
23
- 'use_sa': use_sa if 'AzureSky' in optimizers else None,
24
- 'sa_temp': float(sa_temp) if 'AzureSky' in optimizers and use_sa else None,
25
- 'sa_cooling_rate': float(sa_cooling_rate) if 'AzureSky' in optimizers and use_sa else None,
26
- 'max_iter': 100
27
- }
28
- runner = Engine()
29
- results = runner.run(config)
30
-
31
- if config['mode'] == 'benchmark':
32
- metrics_df = pd.DataFrame(results['metrics'], index=config['optimizers'])
33
- return results['plot'], None, metrics_df, results['metrics'], json.dumps(results, indent=2), "Study completed successfully."
34
- else:
35
- metrics_df = pd.DataFrame(results['metrics'], index=config['optimizers'])
36
- return results['plot_acc'], results['plot_loss'], metrics_df, results['metrics'], json.dumps(results, indent=2), "Study completed successfully."
37
-
38
- def export_results(results_json):
39
- return results_json, "results.json"
40
-
41
- def toggle_azure_settings(optimizers):
42
- return gr.update(visible='AzureSky' in optimizers)
43
-
44
- with gr.Blocks(theme=gr.themes.Soft(), title="Nexa R&D Studio", css="""
45
- .gr-button { margin-top: 10px; }
46
- .gr-box { border-radius: 8px; }
47
- .status-message { color: green; font-weight: bold; }
48
- """) as app:
49
- gr.Markdown("""
50
- # Nexa R&D Studio
51
- A visual research tool for comparing and evaluating optimizers on benchmark functions and ML tasks.
52
- Select a mode, configure your study, and analyze results with interactive plots and metrics.
53
- """)
54
-
55
- with gr.Tabs() as tabs:
56
- with gr.TabItem("Study Configuration"):
57
- mode = gr.Radio(
58
- ['Benchmark Optimization', 'ML Task Training'],
59
- label='Study Mode',
60
- value='Benchmark Optimization',
61
- info='Choose between optimizing benchmark functions or training on ML datasets.'
62
- )
63
-
64
- with gr.Row():
65
- with gr.Column():
66
- optimizers = gr.CheckboxGroup(
67
- ['AzureSky', 'Adam', 'SGD', 'AdamW', 'RMSprop'],
68
- label='Optimizers',
69
- info='Select optimizers to compare. AzureSky includes a Simulated Annealing option.'
70
- )
71
- with gr.Accordion("AzureSky Ablation Settings", open=False, visible=False) as azure_settings:
72
- use_sa = gr.Checkbox(
73
- label='Enable Simulated Annealing (AzureSky)',
74
- value=True,
75
- info='Toggle Simulated Annealing for AzureSky optimizer.'
76
- )
77
- sa_temp = gr.Number(
78
- label='Initial SA Temperature',
79
- value=1.0,
80
- minimum=0.1,
81
- info='Controls exploration in Simulated Annealing (higher = more exploration).'
82
- )
83
- sa_cooling_rate = gr.Number(
84
- label='SA Cooling Rate',
85
- value=0.95,
86
- minimum=0.1,
87
- maximum=0.99,
88
- info='Rate at which SA temperature decreases (closer to 1 = slower cooling).'
89
- )
90
-
91
- with gr.Column():
92
- with gr.Group(visible=True) as benchmark_tab:
93
- benchmark_func = gr.Dropdown(
94
- ['Himmelblau', 'Ackley', 'Adjiman', 'Brent'],
95
- label='Benchmark Function',
96
- info='Select a mathematical function to optimize.'
97
- )
98
- dim = gr.Number(
99
- label='Dimensionality',
100
- value=2,
101
- minimum=2,
102
- info='Number of dimensions for the benchmark function.'
103
- )
104
- with gr.Group(visible=False) as ml_task_tab:
105
- dataset = gr.Dropdown(
106
- ['MNIST', 'CIFAR-10'],
107
- label='Dataset',
108
- info='Select a dataset for ML training.'
109
- )
110
- epochs = gr.Number(
111
- label='Epochs',
112
- value=10,
113
- minimum=1,
114
- info='Number of training epochs.'
115
- )
116
- batch_size = gr.Number(
117
- label='Batch Size',
118
- value=32,
119
- minimum=1,
120
- info='Number of samples per training batch.'
121
- )
122
- lr = gr.Number(
123
- label='Learning Rate',
124
- value=0.001,
125
- minimum=0,
126
- info='Learning rate for optimizers.'
127
- )
128
-
129
- run_button = gr.Button('Run Study', variant='primary')
130
-
131
- with gr.TabItem("Results"):
132
- status_message = gr.Markdown("Configure and run a study to view results.", elem_classes=["status-message"])
133
- with gr.Row():
134
- plot1 = gr.Plot(label='Main Plot (Benchmark or Accuracy)')
135
- plot2 = gr.Plot(label='Loss Plot (ML Mode)')
136
- metrics_df = gr.Dataframe(label='Metrics Table', headers=['Optimizer'] + [
137
- 'distance', 'final_loss', 'convergence_rate',
138
- 'final_train_acc', 'final_val_acc', 'generalization_gap',
139
- 'final_train_loss', 'final_val_loss', 'best_epoch'
140
- ])
141
- metrics_json = gr.JSON(label='Detailed Metrics')
142
- export_data = gr.State()
143
- export_button = gr.Button('Export Results as JSON')
144
- export_file = gr.File(label='Download Results')
145
-
146
- def toggle_tabs(mode):
147
- return gr.update(visible=mode == 'Benchmark Optimization'), gr.update(visible=mode == 'ML Task Training')
148
-
149
- mode.change(toggle_tabs, inputs=mode, outputs=[benchmark_tab, ml_task_tab])
150
- optimizers.change(toggle_azure_settings, inputs=optimizers, outputs=azure_settings)
151
- run_button.click(
152
- run_study,
153
- inputs=[mode, benchmark_func, optimizers, dim, dataset, epochs, batch_size, lr, use_sa, sa_temp, sa_cooling_rate],
154
- outputs=[plot1, plot2, metrics_df, metrics_json, export_data, status_message]
155
- )
156
- export_button.click(export_results, inputs=[export_data], outputs=[export_file, gr.File()])
157
-
158
- app.launch()
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import json
3
+ import pandas as pd
4
+ import os
5
+ from Engine import Engine
6
+
7
+ def run_study(mode, benchmark_func, optimizers, dim, dataset, epochs, batch_size, lr, use_sa, sa_temp, sa_cooling_rate):
8
+ # Ensure optimizers is a list
9
+ if not isinstance(optimizers, list):
10
+ optimizers = [optimizers] if optimizers else []
11
+
12
+ if not optimizers:
13
+ raise gr.Error("Please select at least one optimizer.")
14
+ if mode == "Benchmark Optimization" and not benchmark_func:
15
+ raise gr.Error("Please select a benchmark function.")
16
+ if mode == "ML Task Training" and not dataset:
17
+ raise gr.Error("Please select a dataset.")
18
+
19
+ config = {
20
+ 'mode': 'benchmark' if mode == 'Benchmark Optimization' else 'ml_task',
21
+ 'benchmark_func': benchmark_func,
22
+ 'optimizers': optimizers,
23
+ 'dim': int(dim),
24
+ 'dataset': dataset,
25
+ 'epochs': int(epochs) if epochs else 10,
26
+ 'batch_size': int(batch_size) if batch_size else 32,
27
+ 'lr': float(lr) if lr else 0.001,
28
+ 'use_sa': use_sa if 'AzureSky' in optimizers else None,
29
+ 'sa_temp': float(sa_temp) if 'AzureSky' in optimizers and use_sa else None,
30
+ 'sa_cooling_rate': float(sa_cooling_rate) if 'AzureSky' in optimizers and use_sa else None,
31
+ 'max_iter': 100
32
+ }
33
+ runner = Engine()
34
+ results = runner.run(config)
35
+
36
+ if config['mode'] == 'benchmark':
37
+ metrics_df = pd.DataFrame(results['metrics'], index=config['optimizers'])
38
+ return results['plot'], None, metrics_df, results['metrics'], json.dumps(results, indent=2), "Study completed successfully."
39
+ else:
40
+ metrics_df = pd.DataFrame(results['metrics'], index=config['optimizers'])
41
+ return results['plot_acc'], results['plot_loss'], metrics_df, results['metrics'], json.dumps(results, indent=2), "Study completed successfully."
42
+
43
+ def export_results(results_json):
44
+ return results_json, "results.json"
45
+
46
+ def toggle_azure_settings(optimizers):
47
+ # Handle case where optimizers is a single value or None
48
+ optimizers = [optimizers] if isinstance(optimizers, str) else optimizers or []
49
+ return gr.update(visible='AzureSky' in optimizers)
50
+
51
+ with gr.Blocks(theme=gr.themes.Soft(), title="Nexa R&D Studio", css="""
52
+ .gr-button { margin-top: 10px; }
53
+ .gr-box { border-radius: 8px; }
54
+ .status-message { color: green; font-weight: bold; }
55
+ """) as app:
56
+ gr.Markdown("""
57
+ # Nexa R&D Studio
58
+ A visual research tool for comparing and evaluating optimizers on benchmark functions and ML tasks.
59
+ Select a mode, configure your study, and analyze results with interactive plots and metrics.
60
+ """)
61
+
62
+ with gr.Tabs() as tabs:
63
+ with gr.TabItem("Study Configuration"):
64
+ mode = gr.Radio(
65
+ ['Benchmark Optimization', 'ML Task Training'],
66
+ label='Study Mode',
67
+ value='Benchmark Optimization',
68
+ info='Choose between optimizing benchmark functions or training on ML datasets.'
69
+ )
70
+
71
+ with gr.Row():
72
+ with gr.Column():
73
+ optimizers = gr.CheckboxGroup(
74
+ ['AzureSky', 'Adam', 'SGD', 'AdamW', 'RMSprop'],
75
+ label='Optimizers',
76
+ info='Select optimizers to compare. AzureSky includes a Simulated Annealing option.'
77
+ )
78
+ with gr.Accordion("AzureSky Ablation Settings", open=False, visible=False) as azure_settings:
79
+ use_sa = gr.Checkbox(
80
+ label='Enable Simulated Annealing (AzureSky)',
81
+ value=True,
82
+ info='Toggle Simulated Annealing for AzureSky optimizer.'
83
+ )
84
+ sa_temp = gr.Number(
85
+ label='Initial SA Temperature',
86
+ value=1.0,
87
+ minimum=0.1,
88
+ info='Controls exploration in Simulated Annealing (higher = more exploration).'
89
+ )
90
+ sa_cooling_rate = gr.Number(
91
+ label='SA Cooling Rate',
92
+ value=0.95,
93
+ minimum=0.1,
94
+ maximum=0.99,
95
+ info='Rate at which SA temperature decreases (closer to 1 = slower cooling).'
96
+ )
97
+
98
+ with gr.Column():
99
+ with gr.Group(visible=True) as benchmark_tab:
100
+ benchmark_func = gr.Dropdown(
101
+ ['Himmelblau', 'Ackley', 'Adjiman', 'Brent'],
102
+ label='Benchmark Function',
103
+ info='Select a mathematical function to optimize.'
104
+ )
105
+ dim = gr.Number(
106
+ label='Dimensionality',
107
+ value=2,
108
+ minimum=2,
109
+ info='Number of dimensions for the benchmark function.'
110
+ )
111
+ with gr.Group(visible=False) as ml_task_tab:
112
+ dataset = gr.Dropdown(
113
+ ['MNIST', 'CIFAR-10'],
114
+ label='Dataset',
115
+ info='Select a dataset for ML training.'
116
+ )
117
+ epochs = gr.Number(
118
+ label='Epochs',
119
+ value=10,
120
+ minimum=1,
121
+ info='Number of training epochs.'
122
+ )
123
+ batch_size = gr.Number(
124
+ label='Batch Size',
125
+ value=32,
126
+ minimum=1,
127
+ info='Number of samples per training batch.'
128
+ )
129
+ lr = gr.Number(
130
+ label='Learning Rate',
131
+ value=0.001,
132
+ minimum=0,
133
+ info='Learning rate for optimizers.'
134
+ )
135
+
136
+ run_button = gr.Button('Run Study', variant='primary')
137
+
138
+ with gr.TabItem("Results"):
139
+ status_message = gr.Markdown("Configure and run a study to view results.", elem_classes=["status-message"])
140
+ with gr.Row():
141
+ plot1 = gr.Plot(label='Main Plot (Benchmark or Accuracy)')
142
+ plot2 = gr.Plot(label='Loss Plot (ML Mode)')
143
+ metrics_df = gr.Dataframe(label='Metrics Table', headers=['Optimizer'] + [
144
+ 'distance', 'final_loss', 'convergence_rate',
145
+ 'final_train_acc', 'final_val_acc', 'generalization_gap',
146
+ 'final_train_loss', 'final_val_loss', 'best_epoch'
147
+ ])
148
+ metrics_json = gr.JSON(label='Detailed Metrics')
149
+ export_data = gr.State()
150
+ export_button = gr.Button('Export Results as JSON')
151
+ export_file = gr.File(label='Download Results')
152
+
153
+ def toggle_tabs(mode):
154
+ return gr.update(visible=mode == 'Benchmark Optimization'), gr.update(visible=mode == 'ML Task Training')
155
+
156
+ mode.change(toggle_tabs, inputs=mode, outputs=[benchmark_tab, ml_task_tab])
157
+ optimizers.change(toggle_azure_settings, inputs=optimizers, outputs=azure_settings)
158
+ run_button.click(
159
+ run_study,
160
+ inputs=[mode, benchmark_func, optimizers, dim, dataset, epochs, batch_size, lr, use_sa, sa_temp, sa_cooling_rate],
161
+ outputs=[plot1, plot2, metrics_df, metrics_json, export_data, status_message]
162
+ )
163
+ export_button.click(export_results, inputs=[export_data], outputs=[export_file, gr.File()])
164
+
165
+ # Configure launch based on environment
166
+ is_huggingface = os.getenv("HF_SPACE") is not None
167
+ app.launch(share=is_huggingface, server_name="0.0.0.0" if is_huggingface else None)