kostis-init commited on
Commit
e67d561
Β·
1 Parent(s): 27b03dc

add minizinc

Browse files
Files changed (8) hide show
  1. app.py +15 -0
  2. requirements.txt +2 -1
  3. setup.sh +21 -0
  4. small_sample_submission.jsonl +5 -0
  5. src/config.py +11 -1
  6. src/eval.py +77 -5
  7. src/hf_utils.py +17 -3
  8. src/ui.py +22 -7
app.py CHANGED
@@ -1,6 +1,21 @@
 
 
 
1
  from src.ui import create_ui
2
  from src.utils import setup_directories
3
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  setup_directories()
5
 
6
  if __name__ == "__main__":
 
1
+ import os
2
+ import subprocess
3
+
4
  from src.ui import create_ui
5
  from src.utils import setup_directories
6
 
7
+
8
+ # Add MiniZinc to PATH just in case
9
+ os.environ["PATH"] = "/home/user/minizinc/bin:" + os.environ["PATH"]
10
+
11
+ # Check MiniZinc is working
12
+ try:
13
+ output = subprocess.check_output(["minizinc", "--version"], text=True)
14
+ print("MiniZinc version:", output)
15
+ except Exception as e:
16
+ print("MiniZinc not found:", e)
17
+
18
+
19
  setup_directories()
20
 
21
  if __name__ == "__main__":
requirements.txt CHANGED
@@ -14,4 +14,5 @@ tqdm
14
  transformers
15
  tokenizers>=0.15.0
16
  sentencepiece
17
- cpmpy
 
 
14
  transformers
15
  tokenizers>=0.15.0
16
  sentencepiece
17
+ cpmpy
18
+ minizinc
setup.sh ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # Stop if any command fails
4
+ set -e
5
+
6
+ # Define version and paths
7
+ VERSION=2.9.3
8
+ MZN_DIR=/home/user/minizinc
9
+
10
+ # Download and extract MiniZinc
11
+ wget https://github.com/MiniZinc/MiniZincIDE/releases/download/${VERSION}/MiniZincIDE-${VERSION}-bundle-linux-x86_64.tgz
12
+ mkdir -p $MZN_DIR
13
+ tar -xzf MiniZincIDE-${VERSION}-bundle-linux-x86_64.tgz -C $MZN_DIR --strip-components=1
14
+ rm MiniZincIDE-${VERSION}-bundle-linux-x86_64.tgz
15
+
16
+ # Export path for the runtime
17
+ echo "export PATH=$MZN_DIR/bin:\$PATH" >> ~/.bashrc
18
+ echo "export PATH=$MZN_DIR/bin:\$PATH" >> ~/.profile
19
+ export PATH=$MZN_DIR/bin:$PATH
20
+
21
+ echo "βœ… setup.sh finished running" > setup_log.txt
small_sample_submission.jsonl ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {"id": "csplib__csplib_001_car_sequencing", "model": "# Data\nat_most = [1, 2, 2, 2, 1] # The amount of times a property can be present\n# in a group of consecutive timeslots (see next variable)\nper_slots = [2, 3, 3, 5, 5] # The amount of consecutive timeslots\ndemand = [1, 1, 2, 2, 2, 2] # The demand per type of car\nrequires = [[1, 0, 1, 1, 0],\n [0, 0, 0, 1, 0],\n [0, 1, 0, 0, 1],\n [0, 1, 0, 1, 0],\n [1, 0, 1, 0, 0],\n [1, 1, 0, 0, 0]] # The properties per type of car\n# End of data\n\n# Import libraries\nfrom cpmpy import *\nimport json\n\n# Parameters\nn_cars = sum(demand) # The amount of cars to sequence\nn_options = len(at_most) # The amount of different options\nn_types = len(demand) # The amount of different car types\nrequires = cpm_array(requires) # For element constraint\n\n# Decision Variables\nsequence = intvar(0, n_types - 1, shape=n_cars, name=\"sequence\") # The sequence of car types\nsetup = boolvar(shape=(n_cars, n_options), name=\"setup\") # Sequence of different options based on the car type\n\n# Model\nmodel = Model()\n\n# The amount of each type of car in the sequence has to be equal to the demand for that type\nmodel += [sum(sequence == t) == demand[t] for t in range(n_types)]\n\n# Make sure that the options in the setup table correspond to those of the car type\nfor s in range(n_cars):\n model += [setup[s, o] == requires[sequence[s], o] for o in range(n_options)]\n\n# Check that no more than \"at most\" car options are used per \"per_slots\" slots\nfor o in range(n_options):\n for s in range(n_cars - per_slots[o]):\n slot_range = range(s, s + per_slots[o])\n model += (sum(setup[slot_range, o]) <= at_most[o])\n\n# Solve\nmodel.solve()\n\n# Print\nsolution = {\"sequence\": sequence.value().tolist()}\nprint(json.dumps(solution))\n# End of CPMPy script"}
2
+ {"id": "csplib__csplib_002_template_design", "model": "# Data\nn_slots = 9 # The amount of slots on a template\nn_templates = 2 # The amount of templates\nn_var = 7 # The amount of different variations\ndemand = [250, 255, 260, 500, 500, 800, 1100] # The demand per variation\n# End of data\n\n# Import libraries\nfrom cpmpy import *\nimport json\n\n# Parameters\nub = max(demand) # The upper bound for the production\n\n# create model\nmodel = Model()\n\n# decision variables\nproduction = intvar(1, ub, shape=n_templates, name=\"production\")\nlayout = intvar(0, n_var, shape=(n_templates, n_var), name=\"layout\")\n\n# all slots are populated in a template\nmodel += all(sum(layout[i]) == n_slots for i in range(n_templates))\n\n# meet demand\nfor var in range(n_var):\n model += sum(production * layout[:, var]) >= demand[var]\n\n# break symmetry\n# equal demand\nfor i in range(n_var - 1):\n if demand[i] == demand[i + 1]:\n model += layout[0, i] <= layout[0, i + 1]\n for j in range(n_templates - 1):\n model += (layout[j, i] == layout[j, i + 1]).implies(layout[j + 1, i] <= layout[j + 1, i + 1])\n\n# distinguish templates\nfor i in range(n_templates - 1):\n model += production[i] <= production[i + 1]\n\n# static symmetry\nfor i in range(n_var - 1):\n if demand[i] < demand[i + 1]:\n model += sum(production * layout[:, i]) <= sum(production * layout[:, i + 1])\n\n# minimize number of printed sheets\nmodel.minimize(sum(production))\n\n# Solve\nmodel.solve()\n\n# Print\nsolution = {\"production\": production.value().tolist(), \"layout\": layout.value().tolist()}\nprint(json.dumps(solution))\n# End of CPMPy script"}
3
+ {"id": "csplib__csplib_005_autocorrelation", "model": "# Data\nn = 10 # Length of the binary sequence\n# End of data\n\n# Import libraries\nfrom cpmpy import *\nimport numpy as np\nimport json\n\n\n# periodic auto correlation\ndef PAF(arr, s):\n # roll the array 's' indices\n return sum(arr * np.roll(arr, -s))\n\n\n# Decision Variables\nsequence = intvar(-1, 1, shape=n, name=\"sequence\") # binary sequence\nE = sum([PAF(sequence, s) ** 2 for s in range(1, n)]) # energy value\n\nmodel = Model()\n\n# exclude 0\nmodel += sequence != 0\n\n# minimize sum of squares\nmodel.minimize(E)\n\n# Solve\nmodel.solve()\n\n# Print\nsolution = {\"sequence\": sequence.value().tolist(), \"E\": E.value()}\nprint(json.dumps(solution))\n# End of CPMPy script"}
4
+ {"id": "csplib__csplib_006_golomb_rulers", "model": "# Data\nsize = 10 # Number of marks on the Golomb ruler\n# End of data\n\n# Import libraries\nfrom cpmpy import *\nimport json\n\n# Decision variables\nmarks = intvar(0, size * size, shape=size, name=\"marks\")\nlength = marks[-1]\n\n# Model\nmodel = Model()\n\n# first mark is 0\nmodel += (marks[0] == 0)\n\n# marks must be increasing\nmodel += marks[:-1] < marks[1:]\n\n# golomb constraint\ndiffs = [marks[j] - marks[i] for i in range(0, size - 1) for j in range(i + 1, size)]\nmodel += AllDifferent(diffs)\n\n# Symmetry breaking\nmodel += (marks[size - 1] - marks[size - 2] > marks[1] - marks[0])\nmodel += (diffs[0] < diffs[size - 1])\n\n# find optimal ruler\nmodel.minimize(length)\n\n# Solve\nmodel.solve()\n\n# Print\nsolution = {\"marks\": marks.value().tolist(), \"length\": length.value()}\nprint(json.dumps(solution))\n# End of CPMPy script"}
5
+ {"id": "csplib__csplib_007_all_interval", "model": "# Data\nn = 12 # Number of pitch-classes\n# End of data\n\n# Import libraries\nfrom cpmpy import *\nimport numpy as np\nimport json\n\n# Create the solver\nmodel = Model()\n\n# Declare variables\nx = intvar(0, n - 1, shape=n, name=\"x\") # Pitch-classes\ndiffs = intvar(1, n - 1, shape=n - 1, name=\"diffs\") # Intervals\n\n# Constraints\nmodel += [AllDifferent(x),\n AllDifferent(diffs)]\n\n# Differences between successive values\nmodel += diffs == np.abs(x[1:] - x[:-1])\n\n# Symmetry breaking\nmodel += [x[0] < x[-1]] # Mirroring array is equivalent solution\nmodel += [diffs[0] < diffs[1]] # Further symmetry breaking\n\n# Solve\nmodel.solve()\n\n# Print\nsolution = {\"x\": x.value().tolist(), \"diffs\": diffs.value().tolist()}\nprint(json.dumps(solution))\n# End of CPMPy script"}
src/config.py CHANGED
@@ -8,4 +8,14 @@ DS_SUBMISSIONS_PATH = "submissions"
8
  DS_RESULTS_PATH = "results"
9
 
10
  # leaderboard
11
- LDB_COLS = ["Submission Name", "Solution Found (%)", "Consistency (%)", "Final Solution Accuracy (%)", "# of Models submitted"]
 
 
 
 
 
 
 
 
 
 
 
8
  DS_RESULTS_PATH = "results"
9
 
10
  # leaderboard
11
+ LDB_COLS = ["Submission Name", "Solution Found (%)", "Consistency (%)", "Final Solution Accuracy (%)", "# of Models submitted"]
12
+
13
+ # modelling frameworks
14
+ CPMPY_FRAMEWORK = "CPMpy"
15
+ MINIZINC_FRAMEWORK = "MiniZinc"
16
+ ORTOOLS_FRAMEWORK = "OR-Tools"
17
+ SUPPORTED_FRAMEWORKS = [
18
+ CPMPY_FRAMEWORK,
19
+ MINIZINC_FRAMEWORK,
20
+ ORTOOLS_FRAMEWORK,
21
+ ]
src/eval.py CHANGED
@@ -1,4 +1,5 @@
1
  # eval.py
 
2
  import sys
3
  import os
4
  import time
@@ -6,6 +7,8 @@ import json
6
  import subprocess
7
  import tempfile
8
  from pathlib import Path
 
 
9
  from datasets import load_dataset # Hugging Face datasets library
10
  from huggingface_hub import HfApi, hf_hub_download, snapshot_download # For user data dataset
11
  from huggingface_hub.utils import RepositoryNotFoundError
@@ -29,7 +32,8 @@ import subprocess
29
  import threading
30
  from pathlib import Path
31
 
32
- from src.config import EVAL_SCRIPT_PATH, DATASET_REPO_ID, DS_RESULTS_PATH
 
33
 
34
 
35
  def run_evaluation(submission_path):
@@ -92,6 +96,68 @@ def extract_json_from_code_output(output: str):
92
  return None
93
 
94
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
  def exec_code(code: str, timeout=10, modelling_language='cpmpy'):
96
  """
97
  Execute the given code and return the output
@@ -108,21 +174,21 @@ def exec_code(code: str, timeout=10, modelling_language='cpmpy'):
108
  os.makedirs(temp_dir, exist_ok=True)
109
 
110
  # write the code to a temporary file
111
- suffix = '.__hidden_py__' if modelling_language == "cpmpy" or modelling_language == "or-tools" else '.mzn'
112
  with tempfile.NamedTemporaryFile(mode='w', delete=False, suffix=suffix, dir=temp_dir, encoding='utf-8') as temp_file:
113
  temp_instance_path = temp_file.name
114
  temp_file.write(code)
115
 
116
  try:
117
  # execute the code
118
- if modelling_language == "cpmpy" or modelling_language == "or-tools":
119
  command = [sys.executable, temp_instance_path]
120
  result = subprocess.run(command, capture_output=True, text=True, timeout=timeout, encoding='utf-8')
121
 
122
  successfully_executed = (result.returncode == 0)
123
  output = result.stdout if successfully_executed else result.stderr
124
  timeout_occurred = False
125
- elif modelling_language == "minizinc":
126
  successfully_executed, output, timeout_occurred = exec_code_minizinc(code, timeout)
127
  else:
128
  raise ValueError(f"MODELLING_LANGUAGE not supported: {modelling_language}")
@@ -259,8 +325,14 @@ def main(
259
  submitted_models.append(json_obj)
260
  except json.JSONDecodeError as e:
261
  print(f" ERROR: Failed to parse JSON object from line: {line}. Error: {e}", flush=True)
 
 
 
 
 
262
  print(f" Loaded {len(submitted_models)} generated models.", flush=True)
263
 
 
264
  # Statistics
265
  total_submitted_models = 0
266
  models_ran_successfully = 0
@@ -287,7 +359,7 @@ def main(
287
 
288
  summary_f.write(" 1. Running submitted model...\n")
289
 
290
- succ_exec, output, timeout_occurred = exec_code(curr_model, timeout=SCRIPT_EXECUTION_TIMEOUT)
291
 
292
  if timeout_occurred:
293
  summary_f.write(f" - TIMEOUT: Execution time exceeded {SCRIPT_EXECUTION_TIMEOUT} seconds.\n")
 
1
  # eval.py
2
+ import datetime
3
  import sys
4
  import os
5
  import time
 
7
  import subprocess
8
  import tempfile
9
  from pathlib import Path
10
+
11
+ import minizinc
12
  from datasets import load_dataset # Hugging Face datasets library
13
  from huggingface_hub import HfApi, hf_hub_download, snapshot_download # For user data dataset
14
  from huggingface_hub.utils import RepositoryNotFoundError
 
32
  import threading
33
  from pathlib import Path
34
 
35
+ from src.config import EVAL_SCRIPT_PATH, DATASET_REPO_ID, DS_RESULTS_PATH, CPMPY_FRAMEWORK, ORTOOLS_FRAMEWORK, \
36
+ MINIZINC_FRAMEWORK
37
 
38
 
39
  def run_evaluation(submission_path):
 
96
  return None
97
 
98
 
99
+
100
+ def exec_code_minizinc(code: str, timeout_sec):
101
+ """
102
+ Executes a MiniZinc model string using the minizinc-python library.
103
+
104
+ :param code: The MiniZinc model code as a string.
105
+ :param timeout_sec: The maximum time to wait for the solver in seconds.
106
+ :return: A tuple of (success, output, timeout_occured)
107
+ """
108
+ successfully_executed = False
109
+ output = ""
110
+ timeout_occurred = False
111
+ timeout_duration = datetime.timedelta(seconds=timeout_sec)
112
+
113
+ try:
114
+ # 1. Create a MiniZinc model instance
115
+ model = minizinc.Model()
116
+ model.add_string(code)
117
+
118
+ # 2. Find a default solver configured with MiniZinc
119
+ # You can be more specific, e.g., solver = minizinc.Solver.lookup("gecode")
120
+ # If the default solver isn't found or suitable, this will raise an error.
121
+ gecode = minizinc.Solver.lookup("gecode")
122
+ if gecode is None:
123
+ raise RuntimeError("No suitable solver found. Please install a MiniZinc solver.")
124
+
125
+ # 3. Create an Instance to solve
126
+ instance = minizinc.Instance(gecode, model)
127
+
128
+ # 4. Solve the instance with the specified timeout
129
+ # The solve() method handles the timeout internally.
130
+ result = instance.solve(timeout=timeout_duration)
131
+
132
+ # 5. Process the result
133
+ if result.status in {minizinc.Status.SATISFIED, minizinc.Status.OPTIMAL_SOLUTION}:
134
+ successfully_executed = True
135
+ output = str(result.solution) if result.solution is not None else ""
136
+ timeout_occurred = False
137
+ elif result.status == minizinc.Status.UNKNOWN:
138
+ successfully_executed = False
139
+ output = f"Timeout Error: Solver stopped after {timeout_sec} seconds (Status: UNKNOWN)."
140
+ timeout_occurred = True
141
+ else:
142
+ # Handle other non-success statuses (UNSAT, ERROR, etc.)
143
+ successfully_executed = False
144
+ output = f"Solving failed. Status: {result.status}"
145
+ timeout_occurred = False
146
+
147
+ except minizinc.MiniZincError as e:
148
+ # Catch MiniZinc specific errors (e.g., syntax errors, solver not found)
149
+ successfully_executed = False
150
+ output = f"MiniZinc Error: {e}"
151
+ timeout_occurred = False
152
+ except Exception as e:
153
+ # Catch other unexpected errors
154
+ successfully_executed = False
155
+ output = f"Unexpected Error during MiniZinc execution: {e}"
156
+ timeout_occurred = False
157
+
158
+ return successfully_executed, output, timeout_occurred
159
+
160
+
161
  def exec_code(code: str, timeout=10, modelling_language='cpmpy'):
162
  """
163
  Execute the given code and return the output
 
174
  os.makedirs(temp_dir, exist_ok=True)
175
 
176
  # write the code to a temporary file
177
+ suffix = '.__hidden_py__' if modelling_language == CPMPY_FRAMEWORK or modelling_language == ORTOOLS_FRAMEWORK else '.mzn'
178
  with tempfile.NamedTemporaryFile(mode='w', delete=False, suffix=suffix, dir=temp_dir, encoding='utf-8') as temp_file:
179
  temp_instance_path = temp_file.name
180
  temp_file.write(code)
181
 
182
  try:
183
  # execute the code
184
+ if modelling_language == CPMPY_FRAMEWORK or modelling_language == ORTOOLS_FRAMEWORK:
185
  command = [sys.executable, temp_instance_path]
186
  result = subprocess.run(command, capture_output=True, text=True, timeout=timeout, encoding='utf-8')
187
 
188
  successfully_executed = (result.returncode == 0)
189
  output = result.stdout if successfully_executed else result.stderr
190
  timeout_occurred = False
191
+ elif modelling_language == MINIZINC_FRAMEWORK:
192
  successfully_executed, output, timeout_occurred = exec_code_minizinc(code, timeout)
193
  else:
194
  raise ValueError(f"MODELLING_LANGUAGE not supported: {modelling_language}")
 
325
  submitted_models.append(json_obj)
326
  except json.JSONDecodeError as e:
327
  print(f" ERROR: Failed to parse JSON object from line: {line}. Error: {e}", flush=True)
328
+
329
+ # load metadata file
330
+ with open(os.path.join(local_submission_dir, submission_path_in_dataset, "metadata.json"), "r", encoding="utf-8") as f:
331
+ metadata = json.load(f)
332
+
333
  print(f" Loaded {len(submitted_models)} generated models.", flush=True)
334
 
335
+
336
  # Statistics
337
  total_submitted_models = 0
338
  models_ran_successfully = 0
 
359
 
360
  summary_f.write(" 1. Running submitted model...\n")
361
 
362
+ succ_exec, output, timeout_occurred = exec_code(curr_model, timeout=SCRIPT_EXECUTION_TIMEOUT, modelling_language=metadata["modelling_framework"])
363
 
364
  if timeout_occurred:
365
  summary_f.write(f" - TIMEOUT: Execution time exceeded {SCRIPT_EXECUTION_TIMEOUT} seconds.\n")
src/hf_utils.py CHANGED
@@ -1,11 +1,12 @@
1
  """Utilities for interacting with the Hugging Face Hub."""
2
-
 
3
  import os
4
  import shutil
5
  from pathlib import Path
 
6
  import pandas as pd
7
  from huggingface_hub import HfApi, hf_hub_download, list_repo_files
8
- from huggingface_hub.utils import RepositoryNotFoundError, HFValidationError
9
 
10
  from src.config import DATASET_REPO_ID, DS_RESULTS_PATH, DS_SUBMISSIONS_PATH, LDB_COLS
11
 
@@ -98,7 +99,7 @@ def load_leaderboard_data():
98
  return pd.DataFrame(leaderboard_entries)
99
 
100
 
101
- def upload_submission(uploaded_file, dir_name, report_file):
102
  """Upload submission to Hugging Face Dataset."""
103
  if not HF_API:
104
  return False, "Hugging Face API not initialized"
@@ -120,6 +121,19 @@ def upload_submission(uploaded_file, dir_name, report_file):
120
  repo_type="dataset",
121
  commit_message=f"Upload report for submission: {dir_name}"
122
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
123
 
124
  return True, submission_path
125
  except Exception as e:
 
1
  """Utilities for interacting with the Hugging Face Hub."""
2
+ import io
3
+ import json
4
  import os
5
  import shutil
6
  from pathlib import Path
7
+
8
  import pandas as pd
9
  from huggingface_hub import HfApi, hf_hub_download, list_repo_files
 
10
 
11
  from src.config import DATASET_REPO_ID, DS_RESULTS_PATH, DS_SUBMISSIONS_PATH, LDB_COLS
12
 
 
99
  return pd.DataFrame(leaderboard_entries)
100
 
101
 
102
+ def upload_submission(uploaded_file, dir_name, report_file, model_framework):
103
  """Upload submission to Hugging Face Dataset."""
104
  if not HF_API:
105
  return False, "Hugging Face API not initialized"
 
121
  repo_type="dataset",
122
  commit_message=f"Upload report for submission: {dir_name}"
123
  )
124
+
125
+ # create a file for metadata
126
+ metadata = {
127
+ "submission_name": dir_name,
128
+ "modelling_framework": model_framework,
129
+ }
130
+ HF_API.upload_file(
131
+ path_or_fileobj=io.BytesIO(json.dumps(metadata, indent=4).encode('utf-8')),
132
+ path_in_repo=f"{submission_path}/metadata.json",
133
+ repo_id=DATASET_REPO_ID,
134
+ repo_type="dataset",
135
+ commit_message=f"Upload metadata for submission: {dir_name}"
136
+ )
137
 
138
  return True, submission_path
139
  except Exception as e:
src/ui.py CHANGED
@@ -3,12 +3,16 @@ import json
3
  import gradio as gr
4
  from pathlib import Path
5
 
 
6
  from src.hf_utils import load_leaderboard_data, upload_submission, check_name_exists
7
  from src.eval import start_background_evaluation
8
 
9
 
10
- def handle_upload(submission_name, uploaded_file, report_file, progress=gr.Progress()):
11
  """Handle file upload and start evaluation."""
 
 
 
12
  if not uploaded_file:
13
  return "No file uploaded. Please upload a valid submission file."
14
 
@@ -46,7 +50,7 @@ def handle_upload(submission_name, uploaded_file, report_file, progress=gr.Progr
46
  if not found_one:
47
  return "Empty file. Please upload a valid JSONL file."
48
 
49
- success, result = upload_submission(uploaded_file, submission_name, report_file)
50
  if not success:
51
  return f"Upload failed: {result}"
52
 
@@ -77,7 +81,7 @@ def create_ui():
77
  "## How to Submit\n"
78
  "1. **Name your submission**: Choose a unique name for your submission (e.g., `my_cool_submission`). "
79
  "This name will be used to identify your submission on the leaderboard.\n"
80
- "2. **Upload a report (PDF)**: This is optional, but we highly encourage you to upload a report "
81
  " (in PDF format) describing your approach. As this is an open competition, we want to avoid submissions "
82
  " that just copy the models from the dataset. The report can be a short description of your approach, "
83
  " the models you generated, and any other relevant information.\n"
@@ -90,14 +94,14 @@ def create_ui():
90
  "It may take a few minutes for a submission to be evaluated and appear on the leaderboard.\n"
91
  "\n\n"
92
  "## Important Notes\n"
93
- "1. **Submission Name**: The submission name must be unique. If you try to upload a submission with a name that already exists, you will receive an error message.\n"
94
- "2. **File Format**: Ensure that the uploaded files are in the correct format. The submission file must be a `.jsonl` file, and the report must be a PDF.\n"
95
  "3. **Evaluation Script**: It is highly recommended to use the evaluation script provided [here](https://huggingface.co/spaces/kostis-init/CP-Bench-competition/blob/main/user_eval.py) to check your results before submission. You can run the script as follows:\n"
96
  " ```bash\n"
97
  " python user_eval.py --submission_file path/to/my/submission.jsonl\n"
98
  " ```\n"
99
  " This will evaluate your submission locally and print the results to the console.\n"
100
- "4. **Modelling Language**: For now, only CPMpy is supported. More languages will be added in the future.\n"
101
  "\n\n"
102
  "### If you have any questions or issues, please feel free to reach out to us TODO\n"
103
  "---\n"
@@ -113,6 +117,17 @@ def create_ui():
113
  interactive=True,
114
  info="This name will appear on the leaderboard"
115
  )
 
 
 
 
 
 
 
 
 
 
 
116
  with gr.Row():
117
  report_file = gr.File(
118
  label="Upload PDF Report (optional, but recommended)",
@@ -137,7 +152,7 @@ def create_ui():
137
  # Event handlers
138
  upload_button.click(
139
  fn=handle_upload,
140
- inputs=[submission_name, submission_file, report_file],
141
  outputs=[status_box],
142
  show_progress="full",
143
  )
 
3
  import gradio as gr
4
  from pathlib import Path
5
 
6
+ from src.config import SUPPORTED_FRAMEWORKS
7
  from src.hf_utils import load_leaderboard_data, upload_submission, check_name_exists
8
  from src.eval import start_background_evaluation
9
 
10
 
11
+ def handle_upload(submission_name, uploaded_file, report_file, model_framework, progress=gr.Progress()):
12
  """Handle file upload and start evaluation."""
13
+ if model_framework not in SUPPORTED_FRAMEWORKS:
14
+ return f"Unsupported modelling framework: {model_framework}. Supported frameworks are: {', '.join(SUPPORTED_FRAMEWORKS)}"
15
+
16
  if not uploaded_file:
17
  return "No file uploaded. Please upload a valid submission file."
18
 
 
50
  if not found_one:
51
  return "Empty file. Please upload a valid JSONL file."
52
 
53
+ success, result = upload_submission(uploaded_file, submission_name, report_file, model_framework)
54
  if not success:
55
  return f"Upload failed: {result}"
56
 
 
81
  "## How to Submit\n"
82
  "1. **Name your submission**: Choose a unique name for your submission (e.g., `my_cool_submission`). "
83
  "This name will be used to identify your submission on the leaderboard.\n"
84
+ "2. **Upload a PDF report**: This is optional, but we highly encourage you to upload a report "
85
  " (in PDF format) describing your approach. As this is an open competition, we want to avoid submissions "
86
  " that just copy the models from the dataset. The report can be a short description of your approach, "
87
  " the models you generated, and any other relevant information.\n"
 
94
  "It may take a few minutes for a submission to be evaluated and appear on the leaderboard.\n"
95
  "\n\n"
96
  "## Important Notes\n"
97
+ "1. **Submission Name**: The submission name must be different from any existing submission names.\n"
98
+ "2. **File Format**: Ensure that the uploaded files are in the correct format. The submission file must be a `.jsonl` file, and the report must be a `pdf` file.\n"
99
  "3. **Evaluation Script**: It is highly recommended to use the evaluation script provided [here](https://huggingface.co/spaces/kostis-init/CP-Bench-competition/blob/main/user_eval.py) to check your results before submission. You can run the script as follows:\n"
100
  " ```bash\n"
101
  " python user_eval.py --submission_file path/to/my/submission.jsonl\n"
102
  " ```\n"
103
  " This will evaluate your submission locally and print the results to the console.\n"
104
+ "4. **Modelling Frameworks**: Currently, the supported modelling frameworks are MiniZinc, CPMpy and OR-Tools. More frameworks will be added.\n"
105
  "\n\n"
106
  "### If you have any questions or issues, please feel free to reach out to us TODO\n"
107
  "---\n"
 
117
  interactive=True,
118
  info="This name will appear on the leaderboard"
119
  )
120
+ model_framework = gr.Dropdown(
121
+ label="Modelling Framework (required)",
122
+ choices=SUPPORTED_FRAMEWORKS,
123
+ value=None,
124
+ multiselect=False,
125
+ interactive=True,
126
+ info="Select the modelling framework used for your submission.",
127
+ allow_custom_value=False,
128
+ filterable=False,
129
+ )
130
+
131
  with gr.Row():
132
  report_file = gr.File(
133
  label="Upload PDF Report (optional, but recommended)",
 
152
  # Event handlers
153
  upload_button.click(
154
  fn=handle_upload,
155
+ inputs=[submission_name, submission_file, report_file, model_framework],
156
  outputs=[status_box],
157
  show_progress="full",
158
  )