kostis-init commited on
Commit
ecfa9d2
Β·
1 Parent(s): ce3b0a0

remove subprocess

Browse files
Files changed (3) hide show
  1. Dockerfile +0 -3
  2. app.py +2 -1
  3. src/eval.py +28 -46
Dockerfile CHANGED
@@ -25,7 +25,4 @@ RUN pip install --no-cache-dir -r requirements.txt
25
  ENV MZN_DIR=/opt/minizinc
26
  ENV PATH="${MZN_DIR}/bin:${PATH}"
27
 
28
- # Expose the port Gradio runs on (default is 7860)
29
- #EXPOSE 7860
30
-
31
  CMD ["python", "app.py"]
 
25
  ENV MZN_DIR=/opt/minizinc
26
  ENV PATH="${MZN_DIR}/bin:${PATH}"
27
 
 
 
 
28
  CMD ["python", "app.py"]
app.py CHANGED
@@ -17,4 +17,5 @@ if __name__ == "__main__":
17
  print("Starting the Gradio app...", flush=True)
18
  demo = create_ui()
19
  print("Gradio app created successfully.", flush=True)
20
- demo.queue().launch(server_name="0.0.0.0", server_port=7860, share=True, debug=True)
 
 
17
  print("Starting the Gradio app...", flush=True)
18
  demo = create_ui()
19
  print("Gradio app created successfully.", flush=True)
20
+ demo.queue().launch()
21
+ print("Gradio app launched successfully.", flush=True)
src/eval.py CHANGED
@@ -38,40 +38,22 @@ from src.config import EVAL_SCRIPT_PATH, DATASET_REPO_ID, DS_RESULTS_PATH, CPMPY
38
 
39
  def run_evaluation(submission_path):
40
 
41
- if not Path(EVAL_SCRIPT_PATH).exists():
42
- print(f"ERROR: Eval script '{EVAL_SCRIPT_PATH}' not found")
43
- return
44
 
45
  print(f"Starting evaluation for: {submission_path}")
46
 
47
- command = [
48
- sys.executable,
49
- EVAL_SCRIPT_PATH,
50
- DATASET_REPO_ID,
51
- submission_path,
52
- DS_RESULTS_PATH
53
- ]
54
 
55
- try:
56
- process = subprocess.run(
57
- command,
58
- capture_output=True,
59
- text=True,
60
- check=False,
61
- timeout=600,
62
- encoding='utf-8',
63
- )
64
-
65
- if process.returncode == 0:
66
- print(f"Evaluation successful for: {submission_path}")
67
- else:
68
- print(f"Evaluation failed for: {submission_path}")
69
- print(f"STDERR: {process.stderr}")
70
 
71
- except subprocess.TimeoutExpired:
72
- print(f"Evaluation timed out for: {submission_path}")
73
- except Exception as e:
74
- print(f"Error running evaluation: {e}")
75
 
76
  print(f"Evaluation process complete for: {submission_path}", flush=True)
77
 
@@ -252,7 +234,7 @@ else:
252
 
253
 
254
  # --- Main Evaluation Logic ---
255
- def main(
256
  user_dataset_repo_id: str,
257
  submission_path_in_dataset: str, # e.g., "submissions/uploaded_dir_name"
258
  results_base_path_in_dataset: str # e.g., "results"
@@ -467,19 +449,19 @@ def main(
467
 
468
  elapsed_time = time.time() - start_time
469
  print(f"eval.py: Evaluation finished in {elapsed_time:.2f} seconds.", flush=True)
470
- return 0
471
-
472
-
473
- if __name__ == "__main__":
474
- if len(sys.argv) < 4:
475
- print(
476
- "Usage: python eval.py <user_dataset_repo_id> <submission_path_in_dataset> <results_base_path_in_dataset>")
477
- print("Example: python eval.py your-username/my-storage submissions/run123 results")
478
- sys.exit(1)
479
-
480
- arg_user_dataset_repo_id = sys.argv[1]
481
- arg_submission_path_in_dataset = sys.argv[2]
482
- arg_results_base_path_in_dataset = sys.argv[3]
483
-
484
- exit_code = main(arg_user_dataset_repo_id, arg_submission_path_in_dataset, arg_results_base_path_in_dataset)
485
- sys.exit(exit_code)
 
38
 
39
  def run_evaluation(submission_path):
40
 
41
+ # if not Path(EVAL_SCRIPT_PATH).exists():
42
+ # print(f"ERROR: Eval script '{EVAL_SCRIPT_PATH}' not found")
43
+ # return
44
 
45
  print(f"Starting evaluation for: {submission_path}")
46
 
47
+ # command = [
48
+ # sys.executable,
49
+ # EVAL_SCRIPT_PATH,
50
+ # DATASET_REPO_ID,
51
+ # submission_path,
52
+ # DS_RESULTS_PATH
53
+ # ]
54
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
 
56
+ main_eval(DATASET_REPO_ID, submission_path, DS_RESULTS_PATH)
 
 
 
57
 
58
  print(f"Evaluation process complete for: {submission_path}", flush=True)
59
 
 
234
 
235
 
236
  # --- Main Evaluation Logic ---
237
+ def main_eval(
238
  user_dataset_repo_id: str,
239
  submission_path_in_dataset: str, # e.g., "submissions/uploaded_dir_name"
240
  results_base_path_in_dataset: str # e.g., "results"
 
449
 
450
  elapsed_time = time.time() - start_time
451
  print(f"eval.py: Evaluation finished in {elapsed_time:.2f} seconds.", flush=True)
452
+ # return 0
453
+
454
+
455
+ # if __name__ == "__main__":
456
+ # if len(sys.argv) < 4:
457
+ # print(
458
+ # "Usage: python eval.py <user_dataset_repo_id> <submission_path_in_dataset> <results_base_path_in_dataset>")
459
+ # print("Example: python eval.py your-username/my-storage submissions/run123 results")
460
+ # sys.exit(1)
461
+ #
462
+ # arg_user_dataset_repo_id = sys.argv[1]
463
+ # arg_submission_path_in_dataset = sys.argv[2]
464
+ # arg_results_base_path_in_dataset = sys.argv[3]
465
+ #
466
+ # exit_code = main(arg_user_dataset_repo_id, arg_submission_path_in_dataset, arg_results_base_path_in_dataset)
467
+ # sys.exit(exit_code)