tohid.abedini commited on
Commit
75afdc2
·
1 Parent(s): 6813459

[Add] about

Browse files
Files changed (2) hide show
  1. app.py +5 -10
  2. utils.py +18 -32
app.py CHANGED
@@ -71,21 +71,16 @@ with gr.Blocks(css=custom_css) as demo:
71
  with gr.Tab("✉️ Submit"):
72
  gr.Markdown(LLM_BENCHMARKS_SUBMIT_TEXT)
73
  model_name = gr.Textbox(label="Model name")
74
- model_id = gr.Textbox(label="username/space e.g mlsb/alphafold3")
75
  contact_email = gr.Textbox(label="Contact E-Mail")
76
- challenge = gr.Radio(choices=["Persian", "Base"],label="Challenge")
77
- gr.Markdown("Either give a submission id if you submitted to the MLSB workshop or provide a link to the preprint/paper describing the method.")
78
- with gr.Row():
79
- submission_id = gr.Textbox(label="Submission ID on CMT")
80
- paper_link = gr.Textbox(label="Preprint or Paper link")
81
- architecture = gr.Dropdown(choices=["GNN", "CNN","Diffusion Model", "Physics-based", "Other"],label="Model architecture")
82
- license = gr.Dropdown(choices=["mit", "apache-2.0", "gplv2", "gplv3", "lgpl", "mozilla", "bsd", "other"],label="License")
83
  submit_btn = gr.Button("Submit")
84
 
85
- submit_btn.click(submit, inputs=[model_name, model_id, contact_email, challenge, submission_id, paper_link, architecture, license], outputs=[])
86
 
87
  gr.Markdown("""
88
- Please find more information about the challenges on [mlsb.io/#challenge](https://mlsb.io/#challenge)""")
89
 
90
  if __name__ == "__main__":
91
  demo.launch()
 
71
  with gr.Tab("✉️ Submit"):
72
  gr.Markdown(LLM_BENCHMARKS_SUBMIT_TEXT)
73
  model_name = gr.Textbox(label="Model name")
74
+ model_id = gr.Textbox(label="username/space e.g PartAI/Dorna-Llama3-8B-Instruct")
75
  contact_email = gr.Textbox(label="Contact E-Mail")
76
+ section = gr.Radio(choices=["Persian", "Base"], label="Section")
77
+ license = gr.Dropdown(choices=["llama2", "llama3", "llama3.1", "llama3.2", "cc-by-nc-4.0", "mit", "apache-2.0", "other"], label="License")
 
 
 
 
 
78
  submit_btn = gr.Button("Submit")
79
 
80
+ submit_btn.click(submit, inputs=[model_name, model_id, contact_email, section, license], outputs=[])
81
 
82
  gr.Markdown("""
83
+ Please find more information about Part DP AI on [partdp.ai](https://partdp.ai)""")
84
 
85
  if __name__ == "__main__":
86
  demo.launch()
utils.py CHANGED
@@ -153,30 +153,20 @@ The leaderboard represents a significant milestone in Persian language AI and is
153
  """
154
 
155
 
156
- LLM_BENCHMARKS_SUBMIT_TEXT = """## Submit your model
157
- Submit your model to the leaderboard using the below form AFTER following the following steps:
158
- - Create a HuggingFace account and request to join the [MLSB organization](https://huggingface.co/MLSB)
159
- - Create a new space in the MLSB organization and add your model using the inference templates: https://huggingface.co/new-space?owner=MLSB
160
- - Fill the submission form.
161
-
162
- ## Prerequisites:
163
- To qualify for submission, each team must:
164
- - Provide an MLSB submission ID (find it on CMT) or a link to a preprint/paper describing their methodology. This publication does not have to specifically report training or evaluation on the P(L)INDER dataset. Previously published methods, such as DiffDock, only need to link their existing paper. Note that entry into this competition does not equate to an MLSB workshop paper submission.
165
- - Create a copy of the provided [inference templates](https://huggingface.co/MLSB/).
166
- - Go to the top right corner of the page of the respective inference template and click on the drop-down menu (vertical ellipsis) right next to the “Community”, then select “Duplicate this space”.
167
- - Change files in the newly create space to reflect the peculiarities of your model
168
- - Edit `requirements.txt` to capture all python dependencies.
169
- - Modify the Dockerfile as appropriate (including selecting the right base image)
170
- - Include a `inference_app.py` file. This contains a `predict` function that should be modified to reflect the specifics of inference using their model.
171
- - Include a `train.py` file to ensure that training and model selection use only the Persian/Base datasets and to clearly show any additional hyperparameters used.
172
- - Provide a LICENSE file that allows for reuse, derivative works, and distribution of the provided software and weights (e.g., MIT or Apache2 license).
173
- - Submit to the leaderboard via the [form below](https://huggingface.co/spaces/MLSB/leaderboard2024).
174
- - On submission page, add reference to the newly created space in the format username/space (e.g mlsb/alphafold3). You can create the space on your personal Huggingface account and transfer it to MLSB for the submission to get a GPU assigned.
175
-
176
- After a brief technical review by our organizers we will grant you a free GPU until MLSB so that anyone can play with the model and we will run the evaluation.
177
-
178
- If you have a questions please email: [email protected]
179
- """
180
 
181
 
182
  def load_jsonl(input_file):
@@ -212,13 +202,10 @@ def apply_clickable_model(df, column_name):
212
  return df
213
 
214
 
215
- def submit(model_name, model_id, contact_email, challenge, submission_id, paper_link, architecture, license):
216
- if model_name == "" or model_id == "" or challenge == "" or architecture == "" or license == "":
217
  gr.Error("Please fill all the fields")
218
  return
219
- if submission_id == "" and paper_link == "":
220
- gr.Error("Provide either a link to a paper describing the method or a submission ID for the MLSB workshop.")
221
- return
222
  try:
223
  user_name = ""
224
  if "/" in model_id:
@@ -228,9 +215,8 @@ def submit(model_name, model_id, contact_email, challenge, submission_id, paper_
228
  eval_entry = {
229
  "model_name": model_name,
230
  "model_id": model_id,
231
- "challenge": challenge,
232
- "submission_id": submission_id,
233
- "architecture": architecture,
234
  "license": license
235
  }
236
  OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
 
153
  """
154
 
155
 
156
+ LLM_BENCHMARKS_SUBMIT_TEXT = """## Submitting a Model for Evaluation
157
+
158
+ To submit your open-source model for evaluation, follow these steps:
159
+
160
+ 1. **Ensure your model is on Hugging Face**: Your model must be publicly available on [Hugging Face](https://huggingface.co/).
161
+
162
+ 2. **Submit Request**: Send a request with your model's Hugging Face identifier.
163
+
164
+ 3. **Manual Queue**: Please note that the evaluation process is currently handled manually. Submissions will be queued and processed as soon as possible.
165
+
166
+ 4. **Results**: Once the evaluation is complete, your model’s results will be updated on the leaderboard.
167
+
168
+ We appreciate your patience and contributions to the Persian LLM ecosystem!
169
+ """
 
 
 
 
 
 
 
 
 
 
170
 
171
 
172
  def load_jsonl(input_file):
 
202
  return df
203
 
204
 
205
+ def submit(model_name, model_id, contact_email, section, license):
206
+ if model_name == "" or model_id == "" or section == "" or license == "":
207
  gr.Error("Please fill all the fields")
208
  return
 
 
 
209
  try:
210
  user_name = ""
211
  if "/" in model_id:
 
215
  eval_entry = {
216
  "model_name": model_name,
217
  "model_id": model_id,
218
+ "section": section,
219
+ "contact_email": contact_email,
 
220
  "license": license
221
  }
222
  OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"