libokj commited on
Commit
11581ed
·
1 Parent(s): faa2df9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -39
app.py CHANGED
@@ -241,21 +241,6 @@ PandasTools.drawOptions.singleColourWedgeBonds = True
241
  PandasTools.drawOptions.useCDKAtomPalette()
242
  PandasTools.molSize = (100, 64)
243
 
244
- session = requests.Session()
245
- ADAPTER = HTTPAdapter(max_retries=Retry(total=5, backoff_factor=0.1, status_forcelist=[500, 502, 503, 504]))
246
- session.mount('http://', ADAPTER)
247
- session.mount('https://', ADAPTER)
248
-
249
- db = TinyDB(f'{SERVER_DATA_DIR}/db.json')
250
- # Set all RUNNING jobs to FAILED at TinyDB initialization
251
- Job = Query()
252
- jobs = db.all()
253
- for job in jobs:
254
- if job['status'] == 'RUNNING':
255
- db.update({'status': 'FAILED'}, Job.id == job['id'])
256
-
257
- scheduler = BackgroundScheduler()
258
-
259
 
260
  def remove_job_record(job_id):
261
  # Delete the job from the database
@@ -637,6 +622,7 @@ def ts_to_str(timestamp, timezone):
637
  def lookup_job(job_id):
638
  gr.Info('Start querying the job database...')
639
  stop = False
 
640
  while not stop:
641
  try:
642
  Job = Query()
@@ -655,11 +641,11 @@ def lookup_job(job_id):
655
  sleep(5)
656
  yield {
657
  pred_lookup_status: f'''
658
- Your **{job_type}** job (ID: {job_id}) started at
659
  **{start_time}** and is **RUNNING...**
660
 
661
  It might take a few minutes up to a few hours depending on the prediction dataset, the model, and the queue status.
662
- You may keep the page open and wait for the completion, or close the page and revisit later to look up the job status
663
  using the job id. You will also receive an email notification once the job is done.
664
  ''',
665
  pred_lookup_btn: gr.Button(visible=False),
@@ -693,9 +679,11 @@ using the job id. You will also receive an email notification once the job is do
693
  tabs: gr.Tabs(selected='Prediction Status Lookup'),
694
  }
695
  else:
696
- stop = True
697
- msg = f'Job ID {job_id} not found.'
698
  gr.Info(msg)
 
 
699
  yield {
700
  pred_lookup_status: msg,
701
  pred_lookup_btn: gr.Button(visible=True),
@@ -707,9 +695,11 @@ using the job id. You will also receive an email notification once the job is do
707
  raise gr.Error(f'Failed to retrieve job status due to error: {str(e)}')
708
 
709
 
710
- def submit_predict(predict_filepath, task, preset, target_family, opts, state):
711
- job_id = state['id']
712
- status = "RUNNING"
 
 
713
  error = None
714
  task_file_abbr = {'Compound-Protein Interaction': 'CPI', 'Compound-Protein Binding Affinity': 'CPA'}
715
  predictions_file = None
@@ -768,7 +758,7 @@ def submit_predict(predict_filepath, task, preset, target_family, opts, state):
768
  task_value = TASK_MAP[task]
769
  preset_value = PRESET_MAP[preset]
770
  predictions_file = (f'{SERVER_DATA_DIR}/'
771
- f'{job_id}_{task_file_abbr[task]}_{preset_value}_{target_family_value}_predictions.csv')
772
 
773
  cfg = hydra.compose(
774
  config_name="webserver_inference",
@@ -884,7 +874,7 @@ def submit_predict(predict_filepath, task, preset, target_family, opts, state):
884
  'error': error,
885
  'input_file': predict_filepath,
886
  'output_file': predictions_file},
887
- job_query)
888
  if job_info := db.search(job_query)[0]:
889
  if job_info.get('email'):
890
  send_email(job_info)
@@ -2156,7 +2146,7 @@ QALAHAYFAQYHDPDDEPVADPYDQSFESRDLLIDEWKSLTYDEVISFVPPPLDQEEMES
2156
 
2157
  def common_job_initiate(job_id, job_type, email, request, task):
2158
  gr.Info('Finished processing inputs. Initiating the prediction job... '
2159
- 'You will be redirected to Prediction Status Lookup after the job is submitted.')
2160
  job_info = {'id': job_id,
2161
  'type': job_type,
2162
  'task': task,
@@ -2168,7 +2158,7 @@ QALAHAYFAQYHDPDDEPVADPYDQSFESRDLLIDEWKSLTYDEVISFVPPPLDQEEMES
2168
  'end_time': None,
2169
  'expiry_time': None,
2170
  'error': None}
2171
- db.insert(job_info)
2172
  return job_info
2173
 
2174
 
@@ -2314,10 +2304,10 @@ QALAHAYFAQYHDPDDEPVADPYDQSFESRDLLIDEWKSLTYDEVISFVPPPLDQEEMES
2314
  concurrency_limit=100,
2315
  )
2316
 
2317
- drug_screen_click.success(
2318
- fn=send_email,
2319
- inputs=[run_state]
2320
- )
2321
 
2322
  drug_screen_click.success(
2323
  fn=submit_predict,
@@ -2404,10 +2394,10 @@ QALAHAYFAQYHDPDDEPVADPYDQSFESRDLLIDEWKSLTYDEVISFVPPPLDQEEMES
2404
  concurrency_limit=100
2405
  )
2406
 
2407
- target_identify_click.success(
2408
- fn=send_email,
2409
- inputs=[run_state]
2410
- )
2411
 
2412
  target_identify_click.success(
2413
  fn=submit_predict,
@@ -2436,10 +2426,10 @@ QALAHAYFAQYHDPDDEPVADPYDQSFESRDLLIDEWKSLTYDEVISFVPPPLDQEEMES
2436
  concurrency_limit=100
2437
  )
2438
 
2439
- pair_infer_click.success(
2440
- fn=send_email,
2441
- inputs=[run_state]
2442
- )
2443
 
2444
  pair_infer_click.success(
2445
  fn=submit_predict,
@@ -2568,7 +2558,24 @@ QALAHAYFAQYHDPDDEPVADPYDQSFESRDLLIDEWKSLTYDEVISFVPPPLDQEEMES
2568
 
2569
  if __name__ == "__main__":
2570
  pandarallel.initialize()
 
2571
  hydra.initialize(version_base="1.3", config_path="configs", job_name="webserver_inference")
2572
- demo.queue(default_concurrency_limit=None, max_size=10).launch(show_api=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2573
  scheduler.add_job(check_expiry, 'interval', hours=1)
2574
  scheduler.start()
 
 
 
241
  PandasTools.drawOptions.useCDKAtomPalette()
242
  PandasTools.molSize = (100, 64)
243
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
244
 
245
  def remove_job_record(job_id):
246
  # Delete the job from the database
 
622
  def lookup_job(job_id):
623
  gr.Info('Start querying the job database...')
624
  stop = False
625
+ retry = 0
626
  while not stop:
627
  try:
628
  Job = Query()
 
641
  sleep(5)
642
  yield {
643
  pred_lookup_status: f'''
644
+ Your **{job_type}** job (ID: **{job_id}**) started at
645
  **{start_time}** and is **RUNNING...**
646
 
647
  It might take a few minutes up to a few hours depending on the prediction dataset, the model, and the queue status.
648
+ You may keep the page open and wait for job completion, or close the page and revisit later to look up the job status
649
  using the job id. You will also receive an email notification once the job is done.
650
  ''',
651
  pred_lookup_btn: gr.Button(visible=False),
 
679
  tabs: gr.Tabs(selected='Prediction Status Lookup'),
680
  }
681
  else:
682
+ stop = (retry > 3)
683
+ msg = f'Job ID {job_id} not found. Retrying... ({retry})'
684
  gr.Info(msg)
685
+ retry += 1
686
+ sleep(5)
687
  yield {
688
  pred_lookup_status: msg,
689
  pred_lookup_btn: gr.Button(visible=True),
 
695
  raise gr.Error(f'Failed to retrieve job status due to error: {str(e)}')
696
 
697
 
698
+ def submit_predict(predict_filepath, task, preset, target_family, opts, job_info):
699
+ job_id = job_info['id']
700
+ status = job_info['status']
701
+ send_email(job_info)
702
+ db.insert(job_info)
703
  error = None
704
  task_file_abbr = {'Compound-Protein Interaction': 'CPI', 'Compound-Protein Binding Affinity': 'CPA'}
705
  predictions_file = None
 
758
  task_value = TASK_MAP[task]
759
  preset_value = PRESET_MAP[preset]
760
  predictions_file = (f'{SERVER_DATA_DIR}/'
761
+ f'{job_id}_{task_file_abbr[task]}_{preset}_{target_family_value}_predictions.csv')
762
 
763
  cfg = hydra.compose(
764
  config_name="webserver_inference",
 
874
  'error': error,
875
  'input_file': predict_filepath,
876
  'output_file': predictions_file},
877
+ job_query)
878
  if job_info := db.search(job_query)[0]:
879
  if job_info.get('email'):
880
  send_email(job_info)
 
2146
 
2147
  def common_job_initiate(job_id, job_type, email, request, task):
2148
  gr.Info('Finished processing inputs. Initiating the prediction job... '
2149
+ 'You will be redirected to Prediction Status Lookup once the job has been submitted.')
2150
  job_info = {'id': job_id,
2151
  'type': job_type,
2152
  'task': task,
 
2158
  'end_time': None,
2159
  'expiry_time': None,
2160
  'error': None}
2161
+ # db.insert(job_info)
2162
  return job_info
2163
 
2164
 
 
2304
  concurrency_limit=100,
2305
  )
2306
 
2307
+ # drug_screen_click.success(
2308
+ # fn=send_email,
2309
+ # inputs=[run_state]
2310
+ # )
2311
 
2312
  drug_screen_click.success(
2313
  fn=submit_predict,
 
2394
  concurrency_limit=100
2395
  )
2396
 
2397
+ # target_identify_click.success(
2398
+ # fn=send_email,
2399
+ # inputs=[run_state]
2400
+ # )
2401
 
2402
  target_identify_click.success(
2403
  fn=submit_predict,
 
2426
  concurrency_limit=100
2427
  )
2428
 
2429
+ # pair_infer_click.success(
2430
+ # fn=send_email,
2431
+ # inputs=[run_state]
2432
+ # )
2433
 
2434
  pair_infer_click.success(
2435
  fn=submit_predict,
 
2558
 
2559
  if __name__ == "__main__":
2560
  pandarallel.initialize()
2561
+
2562
  hydra.initialize(version_base="1.3", config_path="configs", job_name="webserver_inference")
2563
+
2564
+ session = requests.Session()
2565
+ ADAPTER = HTTPAdapter(max_retries=Retry(total=5, backoff_factor=0.1, status_forcelist=[500, 502, 503, 504]))
2566
+ session.mount('http://', ADAPTER)
2567
+ session.mount('https://', ADAPTER)
2568
+
2569
+ db = TinyDB(f'{SERVER_DATA_DIR}/db.json')
2570
+ # Set all RUNNING jobs to FAILED at TinyDB initialization
2571
+ Job = Query()
2572
+ jobs = db.all()
2573
+ for job in jobs:
2574
+ if job['status'] == 'RUNNING':
2575
+ db.update({'status': 'FAILED'}, Job.id == job['id'])
2576
+
2577
+ scheduler = BackgroundScheduler()
2578
  scheduler.add_job(check_expiry, 'interval', hours=1)
2579
  scheduler.start()
2580
+
2581
+ demo.queue(default_concurrency_limit=None, max_size=10).launch(show_api=False)