Spaces:
Sleeping
Sleeping
File size: 5,810 Bytes
1b579d5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 |
import numpy as np
from Backend.Benchmarks import Himmelblau, Adjiman, Brent, Ackley
from Backend.optimizers import adam, SGD, Azure, RMSprop
from Backend.ML_Tasks import MNISTRunner, CIFAR10Runner
from Metrics import calculate_benchmark_metrics, calculate_ml_metrics
from Plots import plot_benchmark_surface, plot_ml_curves
class Engine:
def __init__(self):
self.benchmarks = {
"Himmelblau": Himmelblau,
"Adjiman": Adjiman,
"Brent": Brent,
"Ackley": Ackley,
}
self.optimizers = {
"Adam": adam,
"SGD": SGD,
"AzureSky": Azure,
"RMSprop": RMSprop,
}
self.ml_tasks = {"MNIST": MNISTRunner, "CIFAR-10": CIFAR10Runner}
def run(self, config):
"""Run a study based on the provided configuration."""
if config["mode"] == "benchmark":
return self.run_benchmark_study(config)
elif config["mode"] == "ml_task":
return self.run_ml_task_study(config)
else:
raise ValueError(f"Invalid mode: {config['mode']}")
def run_benchmark_study(self, config):
"""Run a benchmark study comparing multiple optimizers."""
benchmark_class = self.benchmarks.get(config["benchmark_func"])
if not benchmark_class:
raise ValueError(f"Unknown benchmark: {config['benchmark_func']}")
benchmark = benchmark_class()
optimizers = []
for opt_name in config["optimizers"]:
opt_class = self.optimizers.get(opt_name)
if not opt_class:
raise ValueError(f"Unknown optimizer: {opt_name}")
# Pass use_sa for AzureSky if specified
kwargs = (
{"use_sa": config["use_sa"]}
if opt_name == "AzureSky" and "use_sa" in config
else {}
)
optimizers.append(opt_class(**kwargs))
initial_point = np.random.randn(config.get("dim", 2))
max_iter = config.get("max_iter", 100)
paths = []
loss_values = []
for opt in optimizers:
path = []
losses = []
x = initial_point.copy()
opt.reset() # Reset optimizer state
for _ in range(max_iter):
grad = benchmark.grad_f(x)
x = opt.step(x, grad)
path.append(x.copy())
losses.append(benchmark.f(x))
paths.append(np.array(path))
loss_values.append(losses)
metrics = [
calculate_benchmark_metrics(path[-1], benchmark.global_min, path, losses)
for path, losses in zip(paths, loss_values)
]
plot = plot_benchmark_surface(benchmark, paths, config["optimizers"])
return {"plot": plot, "metrics": metrics, "paths": paths}
def run_ml_task_study(self, config):
"""Run an ML task study comparing multiple optimizers."""
task_class = self.ml_tasks.get(config["dataset"])
if not task_class:
raise ValueError(f"Unknown dataset: {config['dataset']}")
task_runner = task_class()
optimizers = []
for opt_name in config["optimizers"]:
opt_class = self.optimizers.get(opt_name)
if not opt_class:
raise ValueError(f"Unknown optimizer: {opt_name}")
kwargs = (
{"use_sa": config["use_sa"]}
if opt_name == "AzureSky" and "use_sa" in config
else {}
)
optimizers.append(opt_class(**kwargs))
histories = []
for opt in optimizers:
history = task_runner.run(
optimizer=opt,
epochs=config.get("epochs", 10),
batch_size=config.get("batch_size", 32),
lr=config.get("lr", 0.001),
)
histories.append(history)
metrics = [calculate_ml_metrics(h["train"], h["val"]) for h in histories]
plot_acc = plot_ml_curves(
[h["train"]["accuracy"] for h in histories],
[h["val"]["accuracy"] for h in histories],
config["optimizers"],
"Accuracy",
)
plot_loss = plot_ml_curves(
[h["train"]["loss"] for h in histories],
[h["val"]["loss"] for h in histories],
config["optimizers"],
"Loss",
)
return {
"plot_acc": plot_acc,
"plot_loss": plot_loss,
"metrics": metrics,
"histories": histories,
}
def list_benchmarks(self):
"""Return available benchmark functions."""
return list(self.benchmarks.keys())
def list_optimizers(self):
"""Return available optimizers."""
return list(self.optimizers.keys())
def list_ml_tasks(self):
"""Return available ML tasks."""
return list(self.ml_tasks.keys())
if __name__ == "__main__":
engine = Engine()
# Example benchmark study
config = {
"mode": "benchmark",
"benchmark_func": "Himmelblau",
"optimizers": ["Adam", "AzureSky"],
"dim": 2,
"max_iter": 100,
"use_sa": True,
}
results = engine.run(config)
print("Benchmark Results:", results["metrics"])
# Example ML task study
config = {
"mode": "ml_task",
"dataset": "MNIST",
"optimizers": ["Adam", "AzureSky"],
"epochs": 5,
"batch_size": 32,
"lr": 0.001,
"use_sa": True,
}
results = engine.run(config)
print("ML Task Results:", results['metrics'])
|