husseinelsaadi commited on
Commit
14f359b
·
1 Parent(s): 3deb6b2

optimizing the speed

Browse files
Files changed (1) hide show
  1. app.py +15 -4
app.py CHANGED
@@ -1896,8 +1896,8 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
1896
  # Generate audio with Bark (wait for it)
1897
  start = time.perf_counter()
1898
  cleaned_text = first_q.strip().replace("\n", " ")
1899
- audio_future = tts_async(cleaned_text)
1900
- audio_path = audio_future.result()
1901
  print("⏱️ TTS (edge-tts) took", round(time.perf_counter() - start, 2), "seconds")
1902
 
1903
  # Log question
@@ -1942,7 +1942,12 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
1942
  last_q = state["questions"][-1]
1943
  q_eval = state["question_evaluations"][-1]
1944
  ref_answer = generate_reference_answer(last_q, data["job_role"], data["seniority"])
1945
- answer_eval = evaluate_answer(last_q, transcript, ref_answer, data["job_role"], data["seniority"], None)
 
 
 
 
 
1946
  state["answer_evaluations"].append(answer_eval)
1947
  answer_score = answer_eval.get("Score", "medium") if answer_eval else "medium"
1948
 
@@ -1988,7 +1993,13 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
1988
  start = time.time()
1989
  next_q = groq_llm.predict(prompt)
1990
  print("⏱️ Groq LLM Response Time:", round(time.time() - start, 2), "seconds")
1991
- q_eval = eval_question_quality(next_q, data["job_role"], data["seniority"], None)
 
 
 
 
 
 
1992
  state["questions"].append(next_q)
1993
  state["question_evaluations"].append(q_eval)
1994
  state["conversation_history"].append({'role': 'Interviewer', 'content': next_q})
 
1896
  # Generate audio with Bark (wait for it)
1897
  start = time.perf_counter()
1898
  cleaned_text = first_q.strip().replace("\n", " ")
1899
+ audio_future = executor.submit(tts_async, next_q)
1900
+ audio_path = audio_future.result().result()
1901
  print("⏱️ TTS (edge-tts) took", round(time.perf_counter() - start, 2), "seconds")
1902
 
1903
  # Log question
 
1942
  last_q = state["questions"][-1]
1943
  q_eval = state["question_evaluations"][-1]
1944
  ref_answer = generate_reference_answer(last_q, data["job_role"], data["seniority"])
1945
+ answer_eval_future = executor.submit(
1946
+ evaluate_answer,
1947
+ last_q, transcript, ref_answer,
1948
+ data["job_role"], data["seniority"], None
1949
+ )
1950
+ answer_eval = answer_eval_future.result()
1951
  state["answer_evaluations"].append(answer_eval)
1952
  answer_score = answer_eval.get("Score", "medium") if answer_eval else "medium"
1953
 
 
1993
  start = time.time()
1994
  next_q = groq_llm.predict(prompt)
1995
  print("⏱️ Groq LLM Response Time:", round(time.time() - start, 2), "seconds")
1996
+ start = time.time()
1997
+ q_eval_future = executor.submit(
1998
+ eval_question_quality,
1999
+ next_q, data["job_role"], data["seniority"], None
2000
+ )
2001
+ q_eval = q_eval_future.result()
2002
+ print("⏱️ Evaluation time:", round(time.time() - start, 2), "seconds")
2003
  state["questions"].append(next_q)
2004
  state["question_evaluations"].append(q_eval)
2005
  state["conversation_history"].append({'role': 'Interviewer', 'content': next_q})