Spaces:
Runtime error
Runtime error
Commit
·
b680a21
1
Parent(s):
708612d
fix
Browse files- __init__.py +0 -0
- app.py +9 -10
__init__.py
DELETED
|
File without changes
|
app.py
CHANGED
|
@@ -16,7 +16,7 @@ from optimum_benchmark import (
|
|
| 16 |
)
|
| 17 |
from optimum_benchmark.logging_utils import setup_logging
|
| 18 |
|
| 19 |
-
from
|
| 20 |
get_process_config,
|
| 21 |
get_inference_config,
|
| 22 |
get_openvino_config,
|
|
@@ -45,13 +45,13 @@ def run_benchmark(kwargs, oauth_token: gr.OAuthToken):
|
|
| 45 |
token = oauth_token.token
|
| 46 |
|
| 47 |
create_repo(repo_id, token=token, repo_type="dataset", exist_ok=True)
|
| 48 |
-
gr.Info(f'
|
| 49 |
|
| 50 |
configs = {
|
| 51 |
"process": {},
|
| 52 |
"inference": {},
|
| 53 |
-
"openvino": {},
|
| 54 |
"pytorch": {},
|
|
|
|
| 55 |
}
|
| 56 |
|
| 57 |
for key, value in kwargs.items():
|
|
@@ -74,23 +74,22 @@ def run_benchmark(kwargs, oauth_token: gr.OAuthToken):
|
|
| 74 |
|
| 75 |
configs["process"] = ProcessConfig(**configs.pop("process"))
|
| 76 |
configs["inference"] = InferenceConfig(**configs.pop("inference"))
|
| 77 |
-
|
| 78 |
-
configs["openvino"] = OVConfig(
|
| 79 |
task=task,
|
| 80 |
model=model,
|
| 81 |
device=DEVICE,
|
| 82 |
-
**configs["
|
| 83 |
)
|
| 84 |
-
configs["
|
| 85 |
task=task,
|
| 86 |
model=model,
|
| 87 |
device=DEVICE,
|
| 88 |
-
**configs["
|
| 89 |
)
|
| 90 |
|
| 91 |
outputs = {
|
| 92 |
-
"openvino": "Running benchmark for OpenVINO backend",
|
| 93 |
"pytorch": "Running benchmark for PyTorch backend",
|
|
|
|
| 94 |
}
|
| 95 |
|
| 96 |
yield tuple(outputs[b] for b in BACKENDS)
|
|
@@ -124,7 +123,7 @@ def run_benchmark(kwargs, oauth_token: gr.OAuthToken):
|
|
| 124 |
except Exception:
|
| 125 |
gr.Error(f"Error while running benchmark for {backend}")
|
| 126 |
|
| 127 |
-
outputs[backend] = f"\n{traceback.format_exc()}"
|
| 128 |
|
| 129 |
yield tuple(outputs[b] for b in BACKENDS)
|
| 130 |
|
|
|
|
| 16 |
)
|
| 17 |
from optimum_benchmark.logging_utils import setup_logging
|
| 18 |
|
| 19 |
+
from config_store import (
|
| 20 |
get_process_config,
|
| 21 |
get_inference_config,
|
| 22 |
get_openvino_config,
|
|
|
|
| 45 |
token = oauth_token.token
|
| 46 |
|
| 47 |
create_repo(repo_id, token=token, repo_type="dataset", exist_ok=True)
|
| 48 |
+
gr.Info(f'Created repository "{repo_id}" on the Hub.')
|
| 49 |
|
| 50 |
configs = {
|
| 51 |
"process": {},
|
| 52 |
"inference": {},
|
|
|
|
| 53 |
"pytorch": {},
|
| 54 |
+
"openvino": {},
|
| 55 |
}
|
| 56 |
|
| 57 |
for key, value in kwargs.items():
|
|
|
|
| 74 |
|
| 75 |
configs["process"] = ProcessConfig(**configs.pop("process"))
|
| 76 |
configs["inference"] = InferenceConfig(**configs.pop("inference"))
|
| 77 |
+
configs["pytorch"] = PyTorchConfig(
|
|
|
|
| 78 |
task=task,
|
| 79 |
model=model,
|
| 80 |
device=DEVICE,
|
| 81 |
+
**configs["pytorch"],
|
| 82 |
)
|
| 83 |
+
configs["openvino"] = OVConfig(
|
| 84 |
task=task,
|
| 85 |
model=model,
|
| 86 |
device=DEVICE,
|
| 87 |
+
**configs["openvino"],
|
| 88 |
)
|
| 89 |
|
| 90 |
outputs = {
|
|
|
|
| 91 |
"pytorch": "Running benchmark for PyTorch backend",
|
| 92 |
+
"openvino": "Running benchmark for OpenVINO backend",
|
| 93 |
}
|
| 94 |
|
| 95 |
yield tuple(outputs[b] for b in BACKENDS)
|
|
|
|
| 123 |
except Exception:
|
| 124 |
gr.Error(f"Error while running benchmark for {backend}")
|
| 125 |
|
| 126 |
+
outputs[backend] = f"\n```python\n{traceback.format_exc()}```"
|
| 127 |
|
| 128 |
yield tuple(outputs[b] for b in BACKENDS)
|
| 129 |
|