KurtMica commited on
Commit
2caaddf
ยท
1 Parent(s): 1b780de

Field customisations.

Browse files
app.py CHANGED
@@ -68,11 +68,11 @@ def init_leaderboard(dataframe):
68
  cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden],
69
  label="Select Columns to Display:",
70
  ),
71
- search_columns=[AutoEvalColumn.model.name, AutoEvalColumn.license.name],
72
  hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
73
  filter_columns=[
74
  ColumnFilter(AutoEvalColumn.model_type.name, type="checkboxgroup", label="Model types"),
75
- ColumnFilter(AutoEvalColumn.precision.name, type="checkboxgroup", label="Precision"),
76
  ColumnFilter(
77
  AutoEvalColumn.params.name,
78
  type="slider",
@@ -149,7 +149,7 @@ with demo:
149
  model_name_textbox = gr.Textbox(label="Model name")
150
  revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
151
  model_type = gr.Dropdown(
152
- choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
153
  label="Model type",
154
  multiselect=False,
155
  value=None,
 
68
  cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden],
69
  label="Select Columns to Display:",
70
  ),
71
+ search_columns=[AutoEvalColumn.model.name],
72
  hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
73
  filter_columns=[
74
  ColumnFilter(AutoEvalColumn.model_type.name, type="checkboxgroup", label="Model types"),
75
+ ColumnFilter(AutoEvalColumn.maltese_training.name, type="checkboxgroup", label="Maltese training"),
76
  ColumnFilter(
77
  AutoEvalColumn.params.name,
78
  type="slider",
 
149
  model_name_textbox = gr.Textbox(label="Model name")
150
  revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
151
  model_type = gr.Dropdown(
152
+ choices=[t.to_str(" : ") for t in ModelType if t != ModelType.NK],
153
  label="Model type",
154
  multiselect=False,
155
  value=None,
src/about.py CHANGED
@@ -1,33 +1,47 @@
1
  from dataclasses import dataclass
2
  from enum import Enum
3
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  @dataclass
5
  class Task:
6
  benchmark: str
7
  metric: str
8
  col_name: str
 
 
9
 
10
 
11
  # Select your tasks here
12
  # ---------------------------------------------------
13
  class Tasks(Enum):
14
  # task_key in the json file, metric_key in the json file, name to display in the leaderboard
15
- task0 = Task("sentiment", "f1,none", "Sentiment Analysis (F1)")
16
- task1 = Task("sib200", "f1,none", "SIB200 (F1)")
17
- task2 = Task("taxi1500", "f1,none", "Taxi1500 (F1)")
18
- task3 = Task("maltese_news_categories", "loglikelihood,none", "Maltese News Categories (F1)")
19
- task4 = Task("multi_eurlex", "loglikelihood,none", "MultiEURLEX (F1)")
20
- task5 = Task("belebele", "acc,none", "Belebele (Accuracy)")
21
- task6 = Task("opus100_en-mt", "bleu,none", "OPUS-100 ENโ†’MT (BLEU)")
22
- task7 = Task("opus100_en-mt", "chrf,none", "OPUS-100 ENโ†’MT (ChrF)")
23
- task8 = Task("flores200_en-mt", "bleu,none", "Flores-200 ENโ†’MT (BLEU)")
24
- task9 = Task("flores200_en-mt", "chrf,none", "Flores-200 ENโ†’MT (ChrF)")
25
- task10 = Task("webnlg", "chrf,none", "WebNLG (ChrF)")
26
- task11 = Task("webnlg", "rouge,none", "WebNLG (Rouge-L)")
27
- task12 = Task("eurlex_sum", "chrf,none", "EUR-Lex-Sum (ChrF)")
28
- task13 = Task("eurlex_sum", "rouge,none", "EUR-Lex-Sum (Rouge-L)")
29
- task14 = Task("maltese_news_headlines", "chrf,none", "Maltese News Headlines (ChrF)")
30
- task15 = Task("maltese_news_headlines", "rouge,none", "Maltese News Headlines (Rouge-L)")
31
 
32
  NUM_FEWSHOT = 0 # Change with your few shot
33
  # ---------------------------------------------------
 
1
  from dataclasses import dataclass
2
  from enum import Enum
3
 
4
+ @dataclass
5
+ class TaskDetails:
6
+ name: str
7
+ display_name: str = ""
8
+ symbol: str = "" # emoji
9
+
10
+
11
+ class TaskType(Enum):
12
+ NLU = TaskDetails("nlu", "NLU", "๐Ÿง ")
13
+ NLG = TaskDetails("nlg", "NLG", "โœ๏ธ")
14
+
15
+
16
  @dataclass
17
  class Task:
18
  benchmark: str
19
  metric: str
20
  col_name: str
21
+ task_type: TaskType
22
+ is_primary_metric: bool = True
23
 
24
 
25
  # Select your tasks here
26
  # ---------------------------------------------------
27
  class Tasks(Enum):
28
  # task_key in the json file, metric_key in the json file, name to display in the leaderboard
29
+ task0 = Task("sentiment", "f1,none", "Sentiment Analysis (F1)", TaskType.NLU)
30
+ task1 = Task("sib200", "f1,none", "SIB200 (F1)", TaskType.NLU)
31
+ task2 = Task("taxi1500", "f1,none", "Taxi1500 (F1)", TaskType.NLU)
32
+ task3 = Task("maltese_news_categories", "loglikelihood,none", "Maltese News Categories (F1)", TaskType.NLU)
33
+ task4 = Task("multi_eurlex", "loglikelihood,none", "MultiEURLEX (F1)", TaskType.NLU)
34
+ task5 = Task("belebele", "acc,none", "Belebele (Accuracy)", TaskType.NLU)
35
+ task6 = Task("opus100_en-mt", "bleu,none", "OPUS-100 ENโ†’MT (BLEU)", TaskType.NLG, False)
36
+ task7 = Task("opus100_en-mt", "chrf,none", "OPUS-100 ENโ†’MT (ChrF)", TaskType.NLG)
37
+ task8 = Task("flores200_en-mt", "bleu,none", "Flores-200 ENโ†’MT (BLEU)", TaskType.NLG, False)
38
+ task9 = Task("flores200_en-mt", "chrf,none", "Flores-200 ENโ†’MT (ChrF)", TaskType.NLG)
39
+ task10 = Task("webnlg", "chrf,none", "WebNLG (ChrF)", TaskType.NLG)
40
+ task11 = Task("webnlg", "rouge,none", "WebNLG (Rouge-L)", TaskType.NLG, False)
41
+ task12 = Task("eurlex_sum", "chrf,none", "EUR-Lex-Sum (ChrF)", TaskType.NLG, False)
42
+ task13 = Task("eurlex_sum", "rouge,none", "EUR-Lex-Sum (Rouge-L)", TaskType.NLG)
43
+ task14 = Task("maltese_news_headlines", "chrf,none", "Maltese News Headlines (ChrF)", TaskType.NLG, False)
44
+ task15 = Task("maltese_news_headlines", "rouge,none", "Maltese News Headlines (Rouge-L)", TaskType.NLG)
45
 
46
  NUM_FEWSHOT = 0 # Change with your few shot
47
  # ---------------------------------------------------
src/display/utils.py CHANGED
@@ -3,7 +3,8 @@ from enum import Enum
3
 
4
  import pandas as pd
5
 
6
- from src.about import Tasks
 
7
 
8
  def fields(raw_class):
9
  return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]
@@ -23,14 +24,18 @@ class ColumnContent:
23
  ## Leaderboard columns
24
  auto_eval_column_dict = []
25
  # Init
26
- auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
27
  auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
28
  #Scores
29
- auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average โฌ†๏ธ", "number", True)])
 
 
30
  for task in Tasks:
31
- auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)])
32
  # Model information
33
  auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)])
 
 
34
  auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)])
35
  auto_eval_column_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", False, True)])
36
  auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False)])
@@ -38,7 +43,7 @@ auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("Hub Licen
38
  auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False)])
39
  auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub โค๏ธ", "number", False)])
40
  auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
41
- auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)])
42
 
43
  # We use make dataclass to dynamically fill the scores from Tasks
44
  AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
@@ -62,26 +67,48 @@ class ModelDetails:
62
 
63
 
64
  class ModelType(Enum):
65
- PT = ModelDetails(name="pretrained", symbol="๐ŸŸข")
66
- FT = ModelDetails(name="fine-tuned", symbol="๐Ÿ”ถ")
67
- IFT = ModelDetails(name="instruction-tuned", symbol="โญ•")
68
- RL = ModelDetails(name="RL-tuned", symbol="๐ŸŸฆ")
69
- Unknown = ModelDetails(name="", symbol="?")
70
 
71
  def to_str(self, separator=" "):
72
  return f"{self.value.symbol}{separator}{self.value.name}"
73
 
74
  @staticmethod
75
  def from_str(type):
76
- if "fine-tuned" in type or "๐Ÿ”ถ" in type:
77
- return ModelType.FT
78
- if "pretrained" in type or "๐ŸŸข" in type:
79
  return ModelType.PT
80
- if "RL-tuned" in type or "๐ŸŸฆ" in type:
81
- return ModelType.RL
82
- if "instruction-tuned" in type or "โญ•" in type:
83
- return ModelType.IFT
84
- return ModelType.Unknown
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
 
86
  class WeightType(Enum):
87
  Adapter = ModelDetails("Adapter")
 
3
 
4
  import pandas as pd
5
 
6
+ from src.about import Tasks, TaskType
7
+
8
 
9
  def fields(raw_class):
10
  return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"]
 
24
  ## Leaderboard columns
25
  auto_eval_column_dict = []
26
  # Init
27
+ auto_eval_column_dict.append(["model_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)])
28
  auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)])
29
  #Scores
30
+ auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Average (All) โฌ†๏ธ", "number", True)])
31
+ for task_type in TaskType:
32
+ auto_eval_column_dict.append([task_type.value.name, ColumnContent, ColumnContent(f"Average ({task_type.value.display_name}) {task_type.value.symbol}", "number", True)])
33
  for task in Tasks:
34
+ auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", task.value.is_primary_metric)])
35
  # Model information
36
  auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)])
37
+ auto_eval_column_dict.append(["maltese_training", ColumnContent, ColumnContent("Maltese Training", "str", False)])
38
+ auto_eval_column_dict.append(["num_languages", ColumnContent, ColumnContent("#Languages", "number", False)])
39
  auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)])
40
  auto_eval_column_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", False, True)])
41
  auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False)])
 
43
  auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False)])
44
  auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub โค๏ธ", "number", False)])
45
  auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)])
46
+ auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model SHA", "str", False, False)])
47
 
48
  # We use make dataclass to dynamically fill the scores from Tasks
49
  AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
 
67
 
68
 
69
  class ModelType(Enum):
70
+ PT = ModelDetails(name="pre-trained", symbol="PT")
71
+ FT = ModelDetails(name="fine-tuned", symbol="FT")
72
+ IT = ModelDetails(name="instruction-tuned", symbol="IT")
73
+ NK = ModelDetails(name="unknown", symbol="?")
 
74
 
75
  def to_str(self, separator=" "):
76
  return f"{self.value.symbol}{separator}{self.value.name}"
77
 
78
  @staticmethod
79
  def from_str(type):
80
+ type = type or ""
81
+ if type == "PT":
 
82
  return ModelType.PT
83
+ if type == "FT":
84
+ return ModelType.FT
85
+ if type == "IT":
86
+ return ModelType.IT
87
+ return ModelType.NK
88
+
89
+
90
+ class MalteseTraining(Enum):
91
+ NO = ModelDetails(name="none", symbol="NO")
92
+ PT = ModelDetails(name="pre-training", symbol="PT")
93
+ FT = ModelDetails(name="fine-tuning", symbol="FT")
94
+ IT = ModelDetails(name="instruction-tuning", symbol="IT")
95
+ NK = ModelDetails(name="unknown", symbol="?")
96
+
97
+ def to_str(self, separator=" "):
98
+ return f"{self.value.symbol}{separator}{self.value.name}"
99
+
100
+ @staticmethod
101
+ def from_str(type):
102
+ type = type or ""
103
+ if type == "NO":
104
+ return MalteseTraining.NO
105
+ if type == "PT":
106
+ return MalteseTraining.PT
107
+ if type == "FT":
108
+ return MalteseTraining.FT
109
+ if type == "IT":
110
+ return MalteseTraining.IT
111
+ return MalteseTraining.NK
112
 
113
  class WeightType(Enum):
114
  Adapter = ModelDetails("Adapter")
src/leaderboard/read_evals.py CHANGED
@@ -1,15 +1,16 @@
1
  import glob
2
  import json
3
- import math
4
  import os
 
5
  from dataclasses import dataclass
6
 
7
  import dateutil
8
  import numpy as np
9
 
10
  from src.display.formatting import make_clickable_model
11
- from src.display.utils import AutoEvalColumn, ModelType, Tasks, Precision, WeightType
12
- from src.submission.check_validity import is_model_on_hub
 
13
 
14
 
15
  @dataclass
@@ -18,12 +19,14 @@ class EvalResult:
18
  """
19
  eval_name: str # org_model_precision (uid)
20
  full_model: str # org/model (path on hub)
21
- org: str
22
  model: str
23
  revision: str # commit hash, "" if main
24
  results: dict
25
  precision: Precision = Precision.Unknown
26
- model_type: ModelType = ModelType.Unknown # Pretrained, fine tuned, ...
 
 
27
  weight_type: WeightType = WeightType.Original # Original or Adapter
28
  architecture: str = "Unknown"
29
  license: str = "?"
@@ -39,10 +42,18 @@ class EvalResult:
39
  data = json.load(fp)
40
 
41
  config = data.get("config")
 
42
 
43
- # Precision
44
  precision = Precision.from_str(config.get("model_dtype"))
45
 
 
 
 
 
 
 
 
 
46
  # Get model and org
47
  org_and_model = config.get("model_name", None)
48
  org_and_model = org_and_model.split("/", 1)
@@ -98,11 +109,14 @@ class EvalResult:
98
  org=org,
99
  model=model,
100
  results=results,
 
 
 
101
  precision=precision,
102
  revision=revision,
103
  still_on_hub=still_on_hub,
104
  architecture=architecture,
105
- likes=likes,
106
  num_params=round(model_size / 1e9, 3),
107
  license=license,
108
  )
@@ -130,7 +144,9 @@ class EvalResult:
130
  "eval_name": self.eval_name, # not a column, just a save name,
131
  AutoEvalColumn.precision.name: self.precision.value.name,
132
  AutoEvalColumn.model_type.name: self.model_type.value.name,
133
- AutoEvalColumn.model_type_symbol.name: self.model_type.value.symbol,
 
 
134
  AutoEvalColumn.weight_type.name: self.weight_type.value.name,
135
  AutoEvalColumn.architecture.name: self.architecture,
136
  AutoEvalColumn.model.name: make_clickable_model(self.full_model),
@@ -142,8 +158,18 @@ class EvalResult:
142
  AutoEvalColumn.still_on_hub.name: self.still_on_hub,
143
  }
144
 
 
145
  for task in Tasks:
146
- data_dict[task.value.col_name] = self.results[task.value.benchmark]
 
 
 
 
 
 
 
 
 
147
 
148
  return data_dict
149
 
@@ -192,7 +218,6 @@ def get_raw_eval_results(results_path: str, requests_path: str) -> list[EvalResu
192
  for model_result_filepath in model_result_filepaths:
193
  # Creation of result
194
  eval_result = EvalResult.init_from_json_file(model_result_filepath)
195
- eval_result.update_with_request_file(requests_path)
196
 
197
  # Store results of same eval together
198
  eval_name = eval_result.eval_name
 
1
  import glob
2
  import json
 
3
  import os
4
+ from collections import defaultdict
5
  from dataclasses import dataclass
6
 
7
  import dateutil
8
  import numpy as np
9
 
10
  from src.display.formatting import make_clickable_model
11
+ from src.display.utils import AutoEvalColumn, ModelType, Tasks, Precision, WeightType, MalteseTraining
12
+ from src.envs import TOKEN, API
13
+ from src.submission.check_validity import is_model_on_hub, get_model_size
14
 
15
 
16
  @dataclass
 
19
  """
20
  eval_name: str # org_model_precision (uid)
21
  full_model: str # org/model (path on hub)
22
+ org: str
23
  model: str
24
  revision: str # commit hash, "" if main
25
  results: dict
26
  precision: Precision = Precision.Unknown
27
+ model_type: ModelType = ModelType.NK # Pretrained, fine tuned, ...
28
+ maltese_training: MalteseTraining = MalteseTraining.NK # none, pre-training, ...
29
+ num_languages: int = None
30
  weight_type: WeightType = WeightType.Original # Original or Adapter
31
  architecture: str = "Unknown"
32
  license: str = "?"
 
42
  data = json.load(fp)
43
 
44
  config = data.get("config")
45
+ metadata = data.get("metadata")
46
 
 
47
  precision = Precision.from_str(config.get("model_dtype"))
48
 
49
+ model_type = ModelType.from_str(metadata.get("model_type"))
50
+
51
+ maltese_training = MalteseTraining.from_str(metadata.get("maltese_training"))
52
+
53
+ num_languages = metadata.get("num_languages")
54
+
55
+ model_size = config.get("model_num_parameters")
56
+
57
  # Get model and org
58
  org_and_model = config.get("model_name", None)
59
  org_and_model = org_and_model.split("/", 1)
 
109
  org=org,
110
  model=model,
111
  results=results,
112
+ model_type=model_type,
113
+ maltese_training=maltese_training,
114
+ num_languages=num_languages or "?",
115
  precision=precision,
116
  revision=revision,
117
  still_on_hub=still_on_hub,
118
  architecture=architecture,
119
+ likes=likes or "?",
120
  num_params=round(model_size / 1e9, 3),
121
  license=license,
122
  )
 
144
  "eval_name": self.eval_name, # not a column, just a save name,
145
  AutoEvalColumn.precision.name: self.precision.value.name,
146
  AutoEvalColumn.model_type.name: self.model_type.value.name,
147
+ AutoEvalColumn.maltese_training.name: self.maltese_training.value.name,
148
+ AutoEvalColumn.model_symbol.name: self.model_type.value.symbol + "/" + self.maltese_training.value.symbol,
149
+ AutoEvalColumn.num_languages.name: self.num_languages,
150
  AutoEvalColumn.weight_type.name: self.weight_type.value.name,
151
  AutoEvalColumn.architecture.name: self.architecture,
152
  AutoEvalColumn.model.name: make_clickable_model(self.full_model),
 
158
  AutoEvalColumn.still_on_hub.name: self.still_on_hub,
159
  }
160
 
161
+ results_by_task_type = defaultdict(list)
162
  for task in Tasks:
163
+ result = self.results[task.value.benchmark]
164
+ data_dict[task.value.col_name] = result
165
+ if task.value.is_primary_metric:
166
+ results_by_task_type[task.value.task_type].append(result)
167
+ results_averages = []
168
+ for task_type, task_type_results in results_by_task_type.items():
169
+ average = sum([score for score in task_type_results if score is not None]) / len(task_type_results)
170
+ data_dict[getattr(AutoEvalColumn, task_type.value.name).name] = average
171
+ results_averages.append(average)
172
+ data_dict[AutoEvalColumn.average.name] = np.mean(results_averages) if len(results_averages) > 1 else results_averages[0]
173
 
174
  return data_dict
175
 
 
218
  for model_result_filepath in model_result_filepaths:
219
  # Creation of result
220
  eval_result = EvalResult.init_from_json_file(model_result_filepath)
 
221
 
222
  # Store results of same eval together
223
  eval_name = eval_result.eval_name
src/submission/check_validity.py CHANGED
@@ -1,8 +1,7 @@
1
  import json
2
  import os
3
- import re
4
  from collections import defaultdict
5
- from datetime import datetime, timedelta, timezone
6
 
7
  import huggingface_hub
8
  from huggingface_hub import ModelCard
@@ -10,6 +9,7 @@ from huggingface_hub.hf_api import ModelInfo
10
  from transformers import AutoConfig
11
  from transformers.models.auto.tokenization_auto import AutoTokenizer
12
 
 
13
  def check_model_card(repo_id: str) -> tuple[bool, str]:
14
  """Checks if the model card and license exist and have been filled"""
15
  try:
@@ -31,7 +31,7 @@ def check_model_card(repo_id: str) -> tuple[bool, str]:
31
 
32
  return True, ""
33
 
34
- def is_model_on_hub(model_name: str, revision: str, token: str = None, trust_remote_code=False, test_tokenizer=False) -> tuple[bool, str]:
35
  """Checks if the model model_name is on the hub, and whether it (and its tokenizer) can be loaded with AutoClasses."""
36
  try:
37
  config = AutoConfig.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)
 
1
  import json
2
  import os
 
3
  from collections import defaultdict
4
+ from typing import Any
5
 
6
  import huggingface_hub
7
  from huggingface_hub import ModelCard
 
9
  from transformers import AutoConfig
10
  from transformers.models.auto.tokenization_auto import AutoTokenizer
11
 
12
+
13
  def check_model_card(repo_id: str) -> tuple[bool, str]:
14
  """Checks if the model card and license exist and have been filled"""
15
  try:
 
31
 
32
  return True, ""
33
 
34
+ def is_model_on_hub(model_name: str, revision: str, token: str = None, trust_remote_code=False, test_tokenizer=False) -> tuple[bool, str, Any]:
35
  """Checks if the model model_name is on the hub, and whether it (and its tokenizer) can be loaded with AutoClasses."""
36
  try:
37
  config = AutoConfig.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token)