File size: 4,550 Bytes
33a13ba |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 |
from typing import Dict
import requests
from huggingface_hub import dataset_info, model_info
from huggingface_hub.repocard import metadata_update
from .config import HF_HUB_ALLOWED_TASKS
from .utils.logging import get_logger
logger = get_logger(__name__)
def push_to_hub(
model_id: str,
task_type: str,
dataset_type: str,
dataset_name: str,
metric_type: str,
metric_name: str,
metric_value: float,
task_name: str = None,
dataset_config: str = None,
dataset_split: str = None,
dataset_revision: str = None,
dataset_args: Dict[str, int] = None,
metric_config: str = None,
metric_args: Dict[str, int] = None,
overwrite: bool = False,
):
r"""
Pushes the result of a metric to the metadata of a model repository in the Hub.
Args:
model_id (`str`):
Model id from https://hf.co/models.
task_type (`str`):
Task id, refer to the [Hub allowed tasks](https://github.com/huggingface/evaluate/blob/main/src/evaluate/config.py#L154) for allowed values.
dataset_type (`str`):
Dataset id from https://hf.co/datasets.
dataset_name (`str`):
Pretty name for the dataset.
metric_type (`str`):
Metric id from https://hf.co/metrics.
metric_name (`str`):
Pretty name for the metric.
metric_value (`float`):
Computed metric value.
task_name (`str`, *optional*):
Pretty name for the task.
dataset_config (`str`, *optional*):
Dataset configuration used in [`~datasets.load_dataset`].
See [`~datasets.load_dataset`] for more info.
dataset_split (`str`, *optional*):
Name of split used for metric computation.
dataset_revision (`str`, *optional*):
Git hash for the specific version of the dataset.
dataset_args (`dict[str, int]`, *optional*):
Additional arguments passed to [`~datasets.load_dataset`].
metric_config (`str`, *optional*):
Configuration for the metric (e.g. the GLUE metric has a configuration for each subset).
metric_args (`dict[str, int]`, *optional*):
Arguments passed during [`~evaluate.EvaluationModule.compute`].
overwrite (`bool`, *optional*, defaults to `False`):
If set to `True` an existing metric field can be overwritten, otherwise
attempting to overwrite any existing fields will cause an error.
Example:
```python
>>> push_to_hub(
... model_id="huggingface/gpt2-wikitext2",
... metric_value=0.5
... metric_type="bleu",
... metric_name="BLEU",
... dataset_name="WikiText",
... dataset_type="wikitext",
... dataset_split="test",
... task_type="text-generation",
... task_name="Text Generation"
... )
```"""
if task_type not in HF_HUB_ALLOWED_TASKS:
raise ValueError(f"Task type not supported. Task has to be one of {HF_HUB_ALLOWED_TASKS}")
try:
dataset_info(dataset_type)
except requests.exceptions.HTTPError:
logger.warning(f"Dataset {dataset_type} not found on the Hub at hf.co/datasets/{dataset_type}")
try:
model_info(model_id)
except requests.exceptions.HTTPError:
raise ValueError(f"Model {model_id} not found on the Hub at hf.co/{model_id}")
result = {
"task": {
"type": task_type,
},
"dataset": {
"type": dataset_type,
"name": dataset_name,
},
"metrics": [
{
"type": metric_type,
"value": metric_value,
},
],
}
if dataset_config is not None:
result["dataset"]["config"] = dataset_config
if dataset_split is not None:
result["dataset"]["split"] = dataset_split
if dataset_revision is not None:
result["dataset"]["revision"] = dataset_revision
if dataset_args is not None:
result["dataset"]["args"] = dataset_args
if task_name is not None:
result["task"]["name"] = task_name
if metric_name is not None:
result["metrics"][0]["name"] = metric_name
if metric_config is not None:
result["metrics"][0]["config"] = metric_config
if metric_args is not None:
result["metrics"][0]["args"] = metric_args
metadata = {"model-index": [{"results": [result]}]}
return metadata_update(repo_id=model_id, metadata=metadata, overwrite=overwrite)
|