kostis-init commited on
Commit
27b03dc
Β·
1 Parent(s): d1131a9

add user eval

Browse files
Files changed (4) hide show
  1. src/eval.py +1 -1
  2. src/hf_utils.py +31 -17
  3. src/ui.py +62 -10
  4. user_eval.py +293 -0
src/eval.py CHANGED
@@ -231,7 +231,7 @@ def main(
231
  print(f" CRITICAL ERROR - Failed to download submission files: {e_download}", flush=True)
232
  return 1
233
 
234
- # 2. Load ground-truth dataset (remains the same)
235
  print(f" Loading ground-truth dataset '{GT_DATASET_NAME}'...", flush=True)
236
  try:
237
  gt_dataset = load_dataset(GT_DATASET_NAME, split="train", trust_remote_code=True)
 
231
  print(f" CRITICAL ERROR - Failed to download submission files: {e_download}", flush=True)
232
  return 1
233
 
234
+ # 2. Load ground-truth dataset
235
  print(f" Loading ground-truth dataset '{GT_DATASET_NAME}'...", flush=True)
236
  try:
237
  gt_dataset = load_dataset(GT_DATASET_NAME, split="train", trust_remote_code=True)
src/hf_utils.py CHANGED
@@ -36,31 +36,39 @@ def load_leaderboard_data():
36
  if f.endswith("summary.txt") and f.startswith(DS_RESULTS_PATH + "/")
37
  ]
38
  summary_files.sort(reverse=True)
39
-
40
- for file_path in summary_files:
 
 
 
 
 
 
41
  dir_name = Path(file_path).parent.name
42
  if dir_name in processed_result_dirs:
43
  continue
44
-
45
  processed_result_dirs.add(dir_name)
46
- entry = {LDB_COLS[0]: dir_name, LDB_COLS[1]: 'N/A', LDB_COLS[2]: 'N/A', LDB_COLS[3]: 'N/A', LDB_COLS[4]: 0}
47
-
 
 
 
 
 
 
 
 
 
48
  # Download summary file
49
  temp_dir = os.path.join("temp_hf_downloads", dir_name)
50
  local_summary_path = hf_hub_download(
51
  repo_id=DATASET_REPO_ID,
52
- filename=file_path,
53
  repo_type="dataset",
54
  local_dir=temp_dir,
55
  )
56
-
57
- # Count files
58
- files_in_result_dir = [
59
- f for f in repo_files
60
- if f.startswith(f"{DS_RESULTS_PATH}/{dir_name}/") and not f.endswith("/")
61
- ]
62
-
63
- # Parse score from summary
64
  if Path(local_summary_path).exists():
65
  with open(local_summary_path, "r", encoding="utf-8") as f:
66
  for line in f:
@@ -90,15 +98,13 @@ def load_leaderboard_data():
90
  return pd.DataFrame(leaderboard_entries)
91
 
92
 
93
- def upload_submission(uploaded_file, dir_name):
94
  """Upload submission to Hugging Face Dataset."""
95
  if not HF_API:
96
  return False, "Hugging Face API not initialized"
97
 
98
  try:
99
  submission_path = f"{DS_SUBMISSIONS_PATH}/{dir_name}"
100
-
101
- # file_name = os.path.basename(uploaded_file.name)
102
  HF_API.upload_file(
103
  path_or_fileobj=uploaded_file,
104
  path_in_repo=f"{submission_path}/submission.jsonl",
@@ -106,6 +112,14 @@ def upload_submission(uploaded_file, dir_name):
106
  repo_type="dataset",
107
  commit_message=f"Upload submission: {dir_name}"
108
  )
 
 
 
 
 
 
 
 
109
 
110
  return True, submission_path
111
  except Exception as e:
 
36
  if f.endswith("summary.txt") and f.startswith(DS_RESULTS_PATH + "/")
37
  ]
38
  summary_files.sort(reverse=True)
39
+
40
+ submissions = [
41
+ f for f in repo_files
42
+ if f.endswith("submission.jsonl") and f.startswith(DS_SUBMISSIONS_PATH + "/")
43
+ ]
44
+
45
+ # for file_path in summary_files:
46
+ for file_path in submissions:
47
  dir_name = Path(file_path).parent.name
48
  if dir_name in processed_result_dirs:
49
  continue
50
+
51
  processed_result_dirs.add(dir_name)
52
+ entry = {LDB_COLS[0]: dir_name,
53
+ LDB_COLS[1]: 'In Progress...',
54
+ LDB_COLS[2]: 'In Progress...',
55
+ LDB_COLS[3]: 'In Progress...',
56
+ LDB_COLS[4]: 'In Progress...'}
57
+
58
+ # check if summary file exists, otherwise skip
59
+ if f"{DS_RESULTS_PATH}/{dir_name}/summary.txt" not in repo_files:
60
+ leaderboard_entries.append(entry)
61
+ continue
62
+
63
  # Download summary file
64
  temp_dir = os.path.join("temp_hf_downloads", dir_name)
65
  local_summary_path = hf_hub_download(
66
  repo_id=DATASET_REPO_ID,
67
+ filename=f"{DS_RESULTS_PATH}/{dir_name}/summary.txt",
68
  repo_type="dataset",
69
  local_dir=temp_dir,
70
  )
71
+
 
 
 
 
 
 
 
72
  if Path(local_summary_path).exists():
73
  with open(local_summary_path, "r", encoding="utf-8") as f:
74
  for line in f:
 
98
  return pd.DataFrame(leaderboard_entries)
99
 
100
 
101
+ def upload_submission(uploaded_file, dir_name, report_file):
102
  """Upload submission to Hugging Face Dataset."""
103
  if not HF_API:
104
  return False, "Hugging Face API not initialized"
105
 
106
  try:
107
  submission_path = f"{DS_SUBMISSIONS_PATH}/{dir_name}"
 
 
108
  HF_API.upload_file(
109
  path_or_fileobj=uploaded_file,
110
  path_in_repo=f"{submission_path}/submission.jsonl",
 
112
  repo_type="dataset",
113
  commit_message=f"Upload submission: {dir_name}"
114
  )
115
+ if report_file:
116
+ HF_API.upload_file(
117
+ path_or_fileobj=report_file,
118
+ path_in_repo=f"{submission_path}/report.pdf",
119
+ repo_id=DATASET_REPO_ID,
120
+ repo_type="dataset",
121
+ commit_message=f"Upload report for submission: {dir_name}"
122
+ )
123
 
124
  return True, submission_path
125
  except Exception as e:
src/ui.py CHANGED
@@ -7,11 +7,14 @@ from src.hf_utils import load_leaderboard_data, upload_submission, check_name_ex
7
  from src.eval import start_background_evaluation
8
 
9
 
10
- def handle_upload(submission_name, uploaded_file, progress=gr.Progress()):
11
  """Handle file upload and start evaluation."""
12
  if not uploaded_file:
13
  return "No file uploaded. Please upload a valid submission file."
14
 
 
 
 
15
  # normalize the submission name
16
  submission_name = submission_name.strip().replace(" ", "_").lower()
17
  # keep only alphanumeric characters and underscores, restrict to 30 characters
@@ -43,7 +46,7 @@ def handle_upload(submission_name, uploaded_file, progress=gr.Progress()):
43
  if not found_one:
44
  return "Empty file. Please upload a valid JSONL file."
45
 
46
- success, result = upload_submission(uploaded_file, submission_name)
47
  if not success:
48
  return f"Upload failed: {result}"
49
 
@@ -53,7 +56,11 @@ def handle_upload(submission_name, uploaded_file, progress=gr.Progress()):
53
  start_background_evaluation(result)
54
 
55
  progress(1.0, "Process complete")
56
- return f"Upload complete. Evaluation started for: {submission_name}. Refresh the leaderboard to see results. Do not worry if the leaderboard does not update immediately; it may take some time for the results to appear."
 
 
 
 
57
 
58
  except Exception as e:
59
  return f"Error processing upload: {str(e)}"
@@ -61,8 +68,40 @@ def handle_upload(submission_name, uploaded_file, progress=gr.Progress()):
61
 
62
  def create_ui():
63
  """Create and return Gradio UI."""
64
- with gr.Blocks(title="CP-Bench Leaderboard") as demo:
65
  gr.Markdown("# CP-Bench Leaderboard")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
 
67
  with gr.Row():
68
  with gr.Column(scale=1):
@@ -74,18 +113,31 @@ def create_ui():
74
  interactive=True,
75
  info="This name will appear on the leaderboard"
76
  )
77
- upload_button = gr.UploadButton("Click to Upload Submission", file_count="single")
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  status_box = gr.Textbox(label="Status", interactive=False)
79
 
80
- with gr.Column(scale=3):
81
  gr.Markdown("## πŸ† Results Leaderboard")
82
- leaderboard = gr.DataFrame(value=load_leaderboard_data, label="Leaderboard", interactive=False)
83
  refresh_button = gr.Button("πŸ”„ Refresh Leaderboard")
84
 
85
  # Event handlers
86
- upload_button.upload(
87
  fn=handle_upload,
88
- inputs=[submission_name, upload_button],
89
  outputs=[status_box],
90
  show_progress="full",
91
  )
@@ -96,4 +148,4 @@ def create_ui():
96
  outputs=[leaderboard]
97
  )
98
 
99
- return demo
 
7
  from src.eval import start_background_evaluation
8
 
9
 
10
+ def handle_upload(submission_name, uploaded_file, report_file, progress=gr.Progress()):
11
  """Handle file upload and start evaluation."""
12
  if not uploaded_file:
13
  return "No file uploaded. Please upload a valid submission file."
14
 
15
+ if report_file and not report_file.name.endswith(".pdf"):
16
+ return "Invalid report format. Please upload a PDF file."
17
+
18
  # normalize the submission name
19
  submission_name = submission_name.strip().replace(" ", "_").lower()
20
  # keep only alphanumeric characters and underscores, restrict to 30 characters
 
46
  if not found_one:
47
  return "Empty file. Please upload a valid JSONL file."
48
 
49
+ success, result = upload_submission(uploaded_file, submission_name, report_file)
50
  if not success:
51
  return f"Upload failed: {result}"
52
 
 
56
  start_background_evaluation(result)
57
 
58
  progress(1.0, "Process complete")
59
+ return (
60
+ f"βœ… Submission '{submission_name}' uploaded successfully!\n"
61
+ f"Do not worry if the leaderboard does not update immediately; "
62
+ f"it may take some time for the results to appear (around 5-10 minutes). "
63
+ f"Feel free to close the tab and check back later.")
64
 
65
  except Exception as e:
66
  return f"Error processing upload: {str(e)}"
 
68
 
69
  def create_ui():
70
  """Create and return Gradio UI."""
71
+ with gr.Blocks(title="Welcome to the CP-Bench leaderboard!") as demo:
72
  gr.Markdown("# CP-Bench Leaderboard")
73
+ gr.Markdown(
74
+ "This leaderboard is designed to evaluate LLM-generated constraint models for the problems "
75
+ "in the [CP-Bench](https://huggingface.co/datasets/kostis-init/CP-Bench) dataset."
76
+ "\n\n"
77
+ "## How to Submit\n"
78
+ "1. **Name your submission**: Choose a unique name for your submission (e.g., `my_cool_submission`). "
79
+ "This name will be used to identify your submission on the leaderboard.\n"
80
+ "2. **Upload a report (PDF)**: This is optional, but we highly encourage you to upload a report "
81
+ " (in PDF format) describing your approach. As this is an open competition, we want to avoid submissions "
82
+ " that just copy the models from the dataset. The report can be a short description of your approach, "
83
+ " the models you generated, and any other relevant information.\n"
84
+ "3. **Upload your submission**: Upload a **single** `.jsonl` file containing the generated models. "
85
+ " **Each line in the file should be a JSON object with two keys: `id` and `model`.**\n"
86
+ " * `id`: The ID of the problem exactly as it appears in the original dataset (e.g., `csplib__csplib_001_car_sequencing`).\n"
87
+ " * `model`: The generated model for the problem (as a string representing runnable code). Make sure that it eventually outputs the solution as a json with key(s) as described in the `decision_variables` entry and values as would be expected in the problem. This is part of the evaluation as well: unexpected keys, or value types are considered incorrect. This is because our automatic evaluation is based on the solution printed by the submitted models.\n"
88
+ " * An example submission file can be found [here](https://huggingface.co/spaces/kostis-init/CP-Bench-competition/blob/main/sample_submission.jsonl).\n"
89
+ "3. **Check the leaderboard**: After uploading, you can check the leaderboard to see your results. "
90
+ "It may take a few minutes for a submission to be evaluated and appear on the leaderboard.\n"
91
+ "\n\n"
92
+ "## Important Notes\n"
93
+ "1. **Submission Name**: The submission name must be unique. If you try to upload a submission with a name that already exists, you will receive an error message.\n"
94
+ "2. **File Format**: Ensure that the uploaded files are in the correct format. The submission file must be a `.jsonl` file, and the report must be a PDF.\n"
95
+ "3. **Evaluation Script**: It is highly recommended to use the evaluation script provided [here](https://huggingface.co/spaces/kostis-init/CP-Bench-competition/blob/main/user_eval.py) to check your results before submission. You can run the script as follows:\n"
96
+ " ```bash\n"
97
+ " python user_eval.py --submission_file path/to/my/submission.jsonl\n"
98
+ " ```\n"
99
+ " This will evaluate your submission locally and print the results to the console.\n"
100
+ "4. **Modelling Language**: For now, only CPMpy is supported. More languages will be added in the future.\n"
101
+ "\n\n"
102
+ "### If you have any questions or issues, please feel free to reach out to us TODO\n"
103
+ "---\n"
104
+ )
105
 
106
  with gr.Row():
107
  with gr.Column(scale=1):
 
113
  interactive=True,
114
  info="This name will appear on the leaderboard"
115
  )
116
+ with gr.Row():
117
+ report_file = gr.File(
118
+ label="Upload PDF Report (optional, but recommended)",
119
+ file_types=[".pdf"],
120
+ file_count="single",
121
+ interactive=True,
122
+ )
123
+ submission_file = gr.File(
124
+ label="Upload Submission File (required, .jsonl)",
125
+ file_types=[".jsonl"],
126
+ file_count="single",
127
+ interactive=True,
128
+ )
129
+ upload_button = gr.Button("Click to Upload Submission")
130
  status_box = gr.Textbox(label="Status", interactive=False)
131
 
132
+ with gr.Column(scale=2):
133
  gr.Markdown("## πŸ† Results Leaderboard")
134
+ leaderboard = gr.DataFrame(value=load_leaderboard_data, interactive=False)
135
  refresh_button = gr.Button("πŸ”„ Refresh Leaderboard")
136
 
137
  # Event handlers
138
+ upload_button.click(
139
  fn=handle_upload,
140
+ inputs=[submission_name, submission_file, report_file],
141
  outputs=[status_box],
142
  show_progress="full",
143
  )
 
148
  outputs=[leaderboard]
149
  )
150
 
151
+ return demo
user_eval.py ADDED
@@ -0,0 +1,293 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import subprocess
4
+ import sys
5
+ import tempfile
6
+
7
+ import click
8
+ from pathlib import Path
9
+
10
+ from datasets import load_dataset
11
+
12
+ GT_DATASET_NAME = "kostis-init/CP-Bench"
13
+ GT_PROBLEM_NAME_COLUMN = "id"
14
+ GT_MODEL_CODE_COLUMN = "model"
15
+
16
+
17
+ def exec_code(code: str, timeout=10, modelling_language='cpmpy'):
18
+ """
19
+ Execute the given code and return the output
20
+
21
+ :param code: The code to execute as a string
22
+ :param timeout: The maximum time to wait for the code to execute in seconds
23
+ :param modelling_language: The language to use for execution (cpmpy, minizinc, or-tools)
24
+ :return: A tuple of (success, output, timeout_occured)
25
+ """
26
+
27
+ # create a temp directory to store the temporary file
28
+ temp_dir_name = "_temp_dir_for_exec_code"
29
+ temp_dir = os.path.join(os.getcwd(), temp_dir_name)
30
+ os.makedirs(temp_dir, exist_ok=True)
31
+
32
+ # write the code to a temporary file
33
+ suffix = '.__hidden_py__' if modelling_language == "cpmpy" or modelling_language == "or-tools" else '.mzn'
34
+ with tempfile.NamedTemporaryFile(mode='w', delete=False, suffix=suffix, dir=temp_dir, encoding='utf-8') as temp_file:
35
+ temp_instance_path = temp_file.name
36
+ temp_file.write(code)
37
+
38
+ try:
39
+ # execute the code
40
+ if modelling_language == "cpmpy" or modelling_language == "or-tools":
41
+ command = [sys.executable, temp_instance_path]
42
+ result = subprocess.run(command, capture_output=True, text=True, timeout=timeout, encoding='utf-8')
43
+
44
+ successfully_executed = (result.returncode == 0)
45
+ output = result.stdout if successfully_executed else result.stderr
46
+ timeout_occurred = False
47
+ # elif modelling_language == "minizinc":
48
+ # successfully_executed, output, timeout_occurred = exec_code_minizinc(code, timeout)
49
+ else:
50
+ raise ValueError(f"MODELLING_LANGUAGE not supported: {modelling_language}")
51
+
52
+ except subprocess.TimeoutExpired as e:
53
+ successfully_executed = False
54
+ output = f"Timeout Error: Execution time exceeded {timeout} seconds"
55
+ timeout_occurred = True
56
+ except Exception as e:
57
+ successfully_executed = False
58
+ output = f"Error: {e}"
59
+ timeout_occurred = False
60
+
61
+ os.remove(temp_instance_path)
62
+
63
+ return successfully_executed, output, timeout_occurred
64
+
65
+
66
+ def validate_submission_file(file_path: Path) -> tuple[bool, str]:
67
+ """Validate the submission file format and content.
68
+
69
+ Args:
70
+ file_path: Path to the submission file
71
+
72
+ Returns:
73
+ Tuple of (is_valid, error_message)
74
+ """
75
+ if not file_path.exists():
76
+ return False, f"File {file_path} does not exist"
77
+
78
+ if not file_path.name.endswith('.jsonl'):
79
+ return False, "Invalid file format. Please provide a .jsonl file"
80
+
81
+ try:
82
+ with open(file_path, 'r', encoding='utf-8') as file:
83
+ found_one = False
84
+ for line_num, line in enumerate(file, 1):
85
+ found_one = True
86
+ try:
87
+ json_object = json.loads(line)
88
+ if not all(key in json_object for key in ["id", "model"]):
89
+ return False, f"Line {line_num}: Missing required keys 'id' and/or 'model'"
90
+ except json.JSONDecodeError:
91
+ return False, f"Line {line_num}: Invalid JSON format"
92
+
93
+ if not found_one:
94
+ return False, "Empty file. Please provide a valid JSONL file"
95
+
96
+ except Exception as e:
97
+ return False, f"Error reading file: {str(e)}"
98
+
99
+ return True, "File is valid"
100
+
101
+
102
+ def extract_json_from_code_output(output: str):
103
+ try:
104
+ start_index = output.find('{')
105
+ end_index = output.rfind('}') + 1
106
+ # Extract the JSON part
107
+ json_part = output[start_index:end_index]
108
+ return json.loads(json_part)
109
+ except json.JSONDecodeError:
110
+ return None
111
+
112
+
113
+ def add_constraints_as_string(solution):
114
+ """Generate constraints as a string to be added to the original script."""
115
+ constraints = ""
116
+ if solution: # Ensure solution is not None
117
+ for key, value in solution.items():
118
+ # Basic escaping for string values if they occur, though typically solutions are numeric/boolean
119
+ if isinstance(value, str):
120
+ constraints += f"\nmodel += ({key} == \"{value}\")"
121
+ else:
122
+ constraints += f"\nmodel += ({key} == {value})"
123
+ return constraints
124
+
125
+
126
+ def get_modified_script(script_content, solution):
127
+ """Add constraints to the script content and self-consistency checks."""
128
+ constraints_str = add_constraints_as_string(solution)
129
+ modified_script = f"{script_content}\n{constraints_str}"
130
+ modified_script += """
131
+ # Print the absolute path of the current directory along with the script name
132
+ import os
133
+ print(os.path.abspath(__file__))
134
+
135
+ # Keep old objective
136
+ old_objective = None
137
+ if hasattr(model, 'objective_is_min') and model.objective_is_min is not None:
138
+ old_objective = model.objective_value()
139
+
140
+ # Check self-consistency
141
+ if not model.solve():
142
+ print('ERROR: The model is unsatisfiable with the self-consistency constraints')
143
+ else:
144
+ print('SUCCESS: Model is consistent')
145
+
146
+ # Check if the objective value is the same
147
+ if old_objective is None:
148
+ print('SUCCESS: No objective defined')
149
+ elif model.objective_value() != old_objective:
150
+ print('ERROR: The objective value has changed')
151
+ else:
152
+ print('SUCCESS: Objective value is consistent')
153
+ """
154
+ return modified_script
155
+
156
+ @click.command()
157
+ @click.option('--submission_file', required=True, type=click.Path(exists=True, path_type=Path),
158
+ help='Path to the submission JSONL file')
159
+ def main(submission_file: Path):
160
+ """Evaluate a submission file for the CP-Bench competition."""
161
+ is_valid, message = validate_submission_file(submission_file)
162
+ if not is_valid:
163
+ click.echo(f"Error: {message}")
164
+ return
165
+
166
+ click.echo("Starting evaluation...")
167
+
168
+ # load generated models from jsonl to memory
169
+ print(f" Loading models from file...", flush=True)
170
+ submitted_models = []
171
+ with open(submission_file, "r", encoding="utf-8") as f:
172
+ for line in f:
173
+ try:
174
+ json_obj = json.loads(line)
175
+ submitted_models.append(json_obj)
176
+ except json.JSONDecodeError as e:
177
+ print(f" ERROR: Failed to parse JSON object from line: {line}. Error: {e}", flush=True)
178
+ print(f" Loaded {len(submitted_models)} generated models.", flush=True)
179
+
180
+
181
+ # eval
182
+ total_submitted_models = 0
183
+ models_ran_successfully = 0
184
+ consistency_checks_passed = 0
185
+ objective_checks_passed = 0
186
+ all_checks_passed = 0
187
+ gt_models_found = 0
188
+
189
+ # Load ground-truth models
190
+ print(f" Loading ground-truth dataset '{GT_DATASET_NAME}'...", flush=True)
191
+ try:
192
+ gt_dataset = load_dataset(GT_DATASET_NAME, split="train", trust_remote_code=True)
193
+ ground_truth_models = {
194
+ item[GT_PROBLEM_NAME_COLUMN]: item[GT_MODEL_CODE_COLUMN]
195
+ for item in gt_dataset if
196
+ GT_PROBLEM_NAME_COLUMN in item and GT_MODEL_CODE_COLUMN in item and item[GT_MODEL_CODE_COLUMN]
197
+ }
198
+ if not ground_truth_models: raise ValueError("No models in GT dataset.")
199
+ print(f" Loaded {len(ground_truth_models)} ground-truth models.", flush=True)
200
+ except Exception as e_gt:
201
+ print(f" CRITICAL ERROR - Failed to load ground-truth dataset: {e_gt}", flush=True)
202
+ return
203
+
204
+ # Iterate through downloaded submitted models
205
+ for submitted_model in submitted_models:
206
+ curr_model = submitted_model[GT_MODEL_CODE_COLUMN]
207
+
208
+ total_submitted_models += 1
209
+ problem_name = submitted_model[GT_PROBLEM_NAME_COLUMN]
210
+ print(f"\n Processing model: {problem_name}", flush=True)
211
+ print(f"\n--- Model: {problem_name} ---\n")
212
+
213
+ print(" 1. Running submitted model...\n")
214
+
215
+ succ_exec, output, timeout_occurred = exec_code(curr_model, timeout=60)
216
+
217
+ if timeout_occurred:
218
+ print(f" - TIMEOUT: Execution time exceeded 60 seconds.\n")
219
+ continue
220
+ if not succ_exec:
221
+ print(f" - FAILED: Execution failed with error: {output}\n")
222
+ continue
223
+ if output is None or not output.strip():
224
+ print(f" - FAILED: No output from execution.\n")
225
+ continue
226
+ # Attempt to extract JSON from stdout
227
+ generated_solution = extract_json_from_code_output(output)
228
+ if generated_solution is None:
229
+ print(f" - FAILED: Could not extract JSON solution from output: {output}\n")
230
+ continue
231
+
232
+ models_ran_successfully += 1
233
+ print(f" - SUCCESS: Got solution: {generated_solution}\n")
234
+
235
+ print(f" 2. Checking against ground-truth for '{problem_name}'...\n")
236
+ if problem_name not in ground_truth_models:
237
+ print(f" - FAILED: Ground-truth model for '{problem_name}' not found in dataset.\n")
238
+ continue
239
+ gt_models_found += 1
240
+ ground_truth_script_content = ground_truth_models[problem_name]
241
+ print(" - SUCCESS: Found ground-truth model.\n")
242
+
243
+ print(" 3. Performing self-consistency check on ground-truth model...\n")
244
+ modified_gt_script = get_modified_script(ground_truth_script_content, generated_solution)
245
+
246
+ try:
247
+ with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False, encoding='utf-8') as tmp_file:
248
+ tmp_file.write(modified_gt_script)
249
+ tmp_file_path_str = tmp_file.name
250
+
251
+ gt_check_result = subprocess.run(
252
+ [sys.executable, tmp_file_path_str],
253
+ capture_output=True, text=True, timeout=60, encoding='utf-8',
254
+ )
255
+ os.unlink(tmp_file_path_str)
256
+
257
+ gt_stdout = gt_check_result.stdout
258
+ if "SUCCESS: Model is consistent" in gt_stdout:
259
+ print(" - CONSISTENCY: PASSED\n")
260
+ consistency_checks_passed += 1
261
+ else:
262
+ print(
263
+ " - CONSISTENCY: FAILED (Details in logs or stdout)\n")
264
+
265
+ if "SUCCESS: No objective defined" in gt_stdout or "SUCCESS: Objective value is consistent" in gt_stdout:
266
+ print(" - OBJECTIVE: PASSED\n")
267
+ objective_checks_passed += 1
268
+ else:
269
+ print(" - OBJECTIVE: FAILED (Details in logs or stdout)\n")
270
+
271
+ if "SUCCESS: Model is consistent" in gt_stdout and (
272
+ "SUCCESS: No objective defined" in gt_stdout or "SUCCESS: Objective value is consistent" in gt_stdout):
273
+ print(" - SELF-CONSISTENCY CHECK: PASSED fully\n")
274
+ all_checks_passed += 1
275
+
276
+ except Exception as e_gt_run:
277
+ print(f" - SELF-CONSISTENCY CHECK: FAILED (Error: {e_gt_run})\n")
278
+
279
+ # Final statistics (write to summary_f)
280
+ print("\n" + "=" * 30 + "\n")
281
+ print("Overall Evaluation:\n")
282
+ print(f" Total Submitted Models Parsed: {total_submitted_models}\n")
283
+ print(f" Execution perc: {models_ran_successfully / len(ground_truth_models) * 100:.2f}%\n")
284
+ print(f" Consistency perc: {consistency_checks_passed / len(ground_truth_models) * 100:.2f}%\n")
285
+ print(f" Objective perc: {objective_checks_passed / len(ground_truth_models) * 100:.2f}%\n")
286
+ print(f" Final Solution Accuracy perc: {all_checks_passed / len(ground_truth_models) * 100:.2f}%\n")
287
+ print("-" * 30 + "\n")
288
+
289
+ click.echo("Evaluation complete!")
290
+
291
+
292
+ if __name__ == "__main__":
293
+ main()