Andrea Seveso commited on
Commit
1e1adf6
·
1 Parent(s): 9df8442

Remove weight type

Browse files
Files changed (1) hide show
  1. app.py +28 -23
app.py CHANGED
@@ -21,7 +21,6 @@ from src.display.utils import (
21
  AutoEvalColumn,
22
  ModelType,
23
  fields,
24
- WeightType,
25
  Precision
26
  )
27
  from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
@@ -32,7 +31,8 @@ from src.submission.submit import add_new_eval
32
  def restart_space():
33
  API.restart_space(repo_id=REPO_ID)
34
 
35
- ### Space initialisation
 
36
  try:
37
  print(EVAL_REQUESTS_PATH)
38
  snapshot_download(
@@ -49,7 +49,8 @@ except Exception:
49
  restart_space()
50
 
51
 
52
- LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
 
53
 
54
  (
55
  finished_eval_queue_df,
@@ -57,6 +58,7 @@ LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS,
57
  pending_eval_queue_df,
58
  ) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
59
 
 
60
  def init_leaderboard(dataframe):
61
  if dataframe is None or dataframe.empty:
62
  raise ValueError("Leaderboard DataFrame is empty or None.")
@@ -64,15 +66,20 @@ def init_leaderboard(dataframe):
64
  value=dataframe,
65
  datatype=[c.type for c in fields(AutoEvalColumn)],
66
  select_columns=SelectColumns(
67
- default_selection=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default],
68
- cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden],
 
 
69
  label="Select Columns to Display:",
70
  ),
71
- search_columns=[AutoEvalColumn.model.name, AutoEvalColumn.license.name],
 
72
  hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
73
  filter_columns=[
74
- ColumnFilter(AutoEvalColumn.model_type.name, type="checkboxgroup", label="Model types"),
75
- ColumnFilter(AutoEvalColumn.precision.name, type="checkboxgroup", label="Precision"),
 
 
76
  ColumnFilter(
77
  AutoEvalColumn.params.name,
78
  type="slider",
@@ -104,7 +111,8 @@ with demo:
104
  with gr.TabItem("🚀 Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
105
  with gr.Column():
106
  with gr.Row():
107
- gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
 
108
 
109
  with gr.Column():
110
  with gr.Accordion(
@@ -142,14 +150,17 @@ with demo:
142
  row_count=5,
143
  )
144
  with gr.Row():
145
- gr.Markdown("# ✉️✨ Submit your model here!", elem_classes="markdown-text")
 
146
 
147
  with gr.Row():
148
  with gr.Column():
149
  model_name_textbox = gr.Textbox(label="Model name")
150
- revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
 
151
  model_type = gr.Dropdown(
152
- choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
 
153
  label="Model type",
154
  multiselect=False,
155
  value=None,
@@ -158,20 +169,15 @@ with demo:
158
 
159
  with gr.Column():
160
  precision = gr.Dropdown(
161
- choices=[i.value.name for i in Precision if i != Precision.Unknown],
 
162
  label="Precision",
163
  multiselect=False,
164
  value="float16",
165
  interactive=True,
166
  )
167
- weight_type = gr.Dropdown(
168
- choices=[i.value.name for i in WeightType],
169
- label="Weights type",
170
- multiselect=False,
171
- value="Original",
172
- interactive=True,
173
- )
174
- base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
175
 
176
  submit_button = gr.Button("Submit Eval")
177
  submission_result = gr.Markdown()
@@ -182,7 +188,6 @@ with demo:
182
  base_model_name_textbox,
183
  revision_name_textbox,
184
  precision,
185
- weight_type,
186
  model_type,
187
  ],
188
  submission_result,
@@ -201,4 +206,4 @@ with demo:
201
  scheduler = BackgroundScheduler()
202
  scheduler.add_job(restart_space, "interval", seconds=1800)
203
  scheduler.start()
204
- demo.queue(default_concurrency_limit=40).launch()
 
21
  AutoEvalColumn,
22
  ModelType,
23
  fields,
 
24
  Precision
25
  )
26
  from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO, TOKEN
 
31
  def restart_space():
32
  API.restart_space(repo_id=REPO_ID)
33
 
34
+
35
+ # Space initialisation
36
  try:
37
  print(EVAL_REQUESTS_PATH)
38
  snapshot_download(
 
49
  restart_space()
50
 
51
 
52
+ LEADERBOARD_DF = get_leaderboard_df(
53
+ EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
54
 
55
  (
56
  finished_eval_queue_df,
 
58
  pending_eval_queue_df,
59
  ) = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS)
60
 
61
+
62
  def init_leaderboard(dataframe):
63
  if dataframe is None or dataframe.empty:
64
  raise ValueError("Leaderboard DataFrame is empty or None.")
 
66
  value=dataframe,
67
  datatype=[c.type for c in fields(AutoEvalColumn)],
68
  select_columns=SelectColumns(
69
+ default_selection=[c.name for c in fields(
70
+ AutoEvalColumn) if c.displayed_by_default],
71
+ cant_deselect=[c.name for c in fields(
72
+ AutoEvalColumn) if c.never_hidden],
73
  label="Select Columns to Display:",
74
  ),
75
+ search_columns=[AutoEvalColumn.model.name,
76
+ AutoEvalColumn.license.name],
77
  hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
78
  filter_columns=[
79
+ ColumnFilter(AutoEvalColumn.model_type.name,
80
+ type="checkboxgroup", label="Model types"),
81
+ ColumnFilter(AutoEvalColumn.precision.name,
82
+ type="checkboxgroup", label="Precision"),
83
  ColumnFilter(
84
  AutoEvalColumn.params.name,
85
  type="slider",
 
111
  with gr.TabItem("🚀 Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
112
  with gr.Column():
113
  with gr.Row():
114
+ gr.Markdown(EVALUATION_QUEUE_TEXT,
115
+ elem_classes="markdown-text")
116
 
117
  with gr.Column():
118
  with gr.Accordion(
 
150
  row_count=5,
151
  )
152
  with gr.Row():
153
+ gr.Markdown("# ✉️✨ Submit your model here!",
154
+ elem_classes="markdown-text")
155
 
156
  with gr.Row():
157
  with gr.Column():
158
  model_name_textbox = gr.Textbox(label="Model name")
159
+ revision_name_textbox = gr.Textbox(
160
+ label="Revision commit", placeholder="main")
161
  model_type = gr.Dropdown(
162
+ choices=[t.to_str(" : ")
163
+ for t in ModelType if t != ModelType.Unknown],
164
  label="Model type",
165
  multiselect=False,
166
  value=None,
 
169
 
170
  with gr.Column():
171
  precision = gr.Dropdown(
172
+ choices=[i.value.name for i in Precision if i !=
173
+ Precision.Unknown],
174
  label="Precision",
175
  multiselect=False,
176
  value="float16",
177
  interactive=True,
178
  )
179
+ base_model_name_textbox = gr.Textbox(
180
+ label="Base model (for delta or adapter weights)")
 
 
 
 
 
 
181
 
182
  submit_button = gr.Button("Submit Eval")
183
  submission_result = gr.Markdown()
 
188
  base_model_name_textbox,
189
  revision_name_textbox,
190
  precision,
 
191
  model_type,
192
  ],
193
  submission_result,
 
206
  scheduler = BackgroundScheduler()
207
  scheduler.add_job(restart_space, "interval", seconds=1800)
208
  scheduler.start()
209
+ demo.queue(default_concurrency_limit=40).launch()