Hugging Face Hub check fix for PEFT models.
Browse files
src/leaderboard/read_evals.py
CHANGED
@@ -70,8 +70,17 @@ class EvalResult:
|
|
70 |
|
71 |
revision = config.get("model_sha", config.get("model_revision", "main"))
|
72 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
still_on_hub, _, model_config = is_model_on_hub(
|
74 |
-
full_model,
|
75 |
)
|
76 |
architecture = "?"
|
77 |
if model_config is not None:
|
@@ -82,7 +91,7 @@ class EvalResult:
|
|
82 |
likes = 0
|
83 |
if still_on_hub:
|
84 |
try:
|
85 |
-
model_info = API.model_info(repo_id=full_model, revision=revision)
|
86 |
if not model_size:
|
87 |
model_size = get_model_size(model_info=model_info, precision=precision)
|
88 |
license = model_info.cardData.get("license")
|
|
|
70 |
|
71 |
revision = config.get("model_sha", config.get("model_revision", "main"))
|
72 |
|
73 |
+
model_args = {
|
74 |
+
**dict({tuple(arg.split("=")) for arg in config.get("model_args", "").split(",") if len(arg) > 0}),
|
75 |
+
"revision": revision,
|
76 |
+
"trust_remote_code": True,
|
77 |
+
"cache_dir": None
|
78 |
+
}
|
79 |
+
base_model = None
|
80 |
+
if "pretrained" in model_args:
|
81 |
+
base_model = model_args.pop("pretrained")
|
82 |
still_on_hub, _, model_config = is_model_on_hub(
|
83 |
+
base_model or full_model, model_args, test_tokenizer=False, token=TOKEN,
|
84 |
)
|
85 |
architecture = "?"
|
86 |
if model_config is not None:
|
|
|
91 |
likes = 0
|
92 |
if still_on_hub:
|
93 |
try:
|
94 |
+
model_info = API.model_info(repo_id=full_model, revision=revision, token=TOKEN)
|
95 |
if not model_size:
|
96 |
model_size = get_model_size(model_info=model_info, precision=precision)
|
97 |
license = model_info.cardData.get("license")
|
src/submission/check_validity.py
CHANGED
@@ -31,13 +31,14 @@ def check_model_card(repo_id: str) -> tuple[bool, str]:
|
|
31 |
|
32 |
return True, ""
|
33 |
|
34 |
-
def is_model_on_hub(model_name: str,
|
35 |
"""Checks if the model model_name is on the hub, and whether it (and its tokenizer) can be loaded with AutoClasses."""
|
|
|
36 |
try:
|
37 |
-
config = AutoConfig.from_pretrained(model_name,
|
38 |
if test_tokenizer:
|
39 |
try:
|
40 |
-
tk = AutoTokenizer.from_pretrained(model_name,
|
41 |
except ValueError as e:
|
42 |
return (
|
43 |
False,
|
|
|
31 |
|
32 |
return True, ""
|
33 |
|
34 |
+
def is_model_on_hub(model_name: str, model_args: dict = None, token: str = None, test_tokenizer=False) -> tuple[bool, str, Any]:
|
35 |
"""Checks if the model model_name is on the hub, and whether it (and its tokenizer) can be loaded with AutoClasses."""
|
36 |
+
model_args = model_args or {}
|
37 |
try:
|
38 |
+
config = AutoConfig.from_pretrained(model_name, token=token, **model_args)
|
39 |
if test_tokenizer:
|
40 |
try:
|
41 |
+
tk = AutoTokenizer.from_pretrained(model_name, token=token, **model_args)
|
42 |
except ValueError as e:
|
43 |
return (
|
44 |
False,
|
src/submission/submit.py
CHANGED
@@ -39,18 +39,21 @@ def add_new_eval(
|
|
39 |
if model_type is None or model_type == "":
|
40 |
return styled_error("Please select a model type.")
|
41 |
|
|
|
|
|
42 |
# Does the model actually exist?
|
43 |
if revision == "":
|
44 |
revision = "main"
|
|
|
45 |
|
46 |
# Is the model on the hub?
|
47 |
if weight_type in ["Delta", "Adapter"]:
|
48 |
-
base_model_on_hub, error, _ = is_model_on_hub(model_name=base_model,
|
49 |
if not base_model_on_hub:
|
50 |
return styled_error(f'Base model "{base_model}" {error}')
|
51 |
|
52 |
if not weight_type == "Adapter":
|
53 |
-
model_on_hub, error, _ = is_model_on_hub(model_name=model,
|
54 |
if not model_on_hub:
|
55 |
return styled_error(f'Model "{model}" {error}')
|
56 |
|
|
|
39 |
if model_type is None or model_type == "":
|
40 |
return styled_error("Please select a model type.")
|
41 |
|
42 |
+
model_args = {}
|
43 |
+
|
44 |
# Does the model actually exist?
|
45 |
if revision == "":
|
46 |
revision = "main"
|
47 |
+
model_args["revision"] = revision
|
48 |
|
49 |
# Is the model on the hub?
|
50 |
if weight_type in ["Delta", "Adapter"]:
|
51 |
+
base_model_on_hub, error, _ = is_model_on_hub(model_name=base_model, model_args=model_args, token=TOKEN, test_tokenizer=True)
|
52 |
if not base_model_on_hub:
|
53 |
return styled_error(f'Base model "{base_model}" {error}')
|
54 |
|
55 |
if not weight_type == "Adapter":
|
56 |
+
model_on_hub, error, _ = is_model_on_hub(model_name=model, model_args=model_args, token=TOKEN, test_tokenizer=True)
|
57 |
if not model_on_hub:
|
58 |
return styled_error(f'Model "{model}" {error}')
|
59 |
|