husseinelsaadi commited on
Commit
d525d89
·
verified ·
1 Parent(s): ba4fd9a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -23
app.py CHANGED
@@ -1664,27 +1664,20 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
1664
  user_audio_input.change(transcribe, user_audio_input, stt_transcript)
1665
 
1666
  def process_answer(transcript, audio_path, video_path, state, data):
1667
- if not transcript and not video_path:
1668
  return state, gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), gr.update()
 
1669
  elapsed = round(time.time() - state.get("q_start_time", time.time()), 2)
1670
  state["timings"].append(elapsed)
1671
  state["answers"].append(transcript)
1672
  state["conversation_history"].append({'role': 'Candidate', 'content': transcript})
1673
 
1674
- # --- 1. Emotion analysis ---
1675
- # voice_label = analyze_audio_emotion(audio_path)
1676
- # face_label = analyze_video_emotions(video_path)
1677
- # state["voice_labels"].append(voice_label)
1678
- # state["face_labels"].append(face_label)
1679
-
1680
- #just for testing
1681
  voice_label = "neutral"
1682
  face_label = "neutral"
1683
  state["voice_labels"].append(voice_label)
1684
  state["face_labels"].append(face_label)
1685
 
1686
-
1687
-
1688
  # --- 2. Evaluate previous Q and Answer ---
1689
  last_q = state["questions"][-1]
1690
  q_eval = state["question_evaluations"][-1] # Already in state
@@ -1701,15 +1694,10 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
1701
  else:
1702
  state["difficulty_adjustment"] = None
1703
 
1704
- # --- 4. Effective confidence ---
1705
- # eff_conf = interpret_confidence(voice_label, face_label, answer_score)
1706
- # state["effective_confidences"].append(eff_conf)
1707
-
1708
- #just for testing:
1709
  eff_conf = {"effective_confidence": 0.6}
1710
  state["effective_confidences"].append(eff_conf)
1711
 
1712
-
1713
  # --- LOG ---
1714
  state["log"].append({
1715
  "type": "answer",
@@ -1739,9 +1727,6 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
1739
  f"- *Answer*: {state['answers'][i]}\n"
1740
  f"- *Q Eval*: {state['question_evaluations'][i]}\n"
1741
  f"- *A Eval*: {state['answer_evaluations'][i]}\n"
1742
- #also this are removed just for testing :(
1743
- # f"- *Face Emotion: {state['face_labels'][i]}, **Voice Emotion*: {state['voice_labels'][i]}\n"
1744
- # f"- *Effective Confidence*: {state['effective_confidences'][i]['effective_confidence']}\n"
1745
  f"- *Time*: {state['timings'][i]}s\n")
1746
  summary += f"\n\n⏺ Full log saved as {log_file}."
1747
  return (state, gr.update(visible=True, value=summary), gr.update(value=None), gr.update(value=None), gr.update(value=None), gr.update(value=None), gr.update(visible=True, value=f"Last Detected — Face: {face_label}, Voice: {voice_label}"))
@@ -1773,14 +1758,15 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
1773
  return (
1774
  state, gr.update(visible=False), audio_path, f"*Question {qidx + 1}:* {next_q}",
1775
  gr.update(value=None), gr.update(value=None),
1776
- gr.update(visible=True, value=f"Last Detected — Face: {face_label}, Voice: {voice_label}"),
1777
  )
 
1778
  confirm_btn.click(
1779
  process_answer,
1780
- [stt_transcript, user_audio_input, interview_state, user_data],
1781
- [interview_state, interview_summary, question_audio, question_text, user_audio_input]
1782
  ).then(
1783
- lambda: (gr.update(value=None), gr.update(value=None)), None, [user_audio_input]
1784
  )
1785
 
1786
  demo.launch(debug=True)
 
1664
  user_audio_input.change(transcribe, user_audio_input, stt_transcript)
1665
 
1666
  def process_answer(transcript, audio_path, video_path, state, data):
1667
+ if not transcript:
1668
  return state, gr.update(), gr.update(), gr.update(), gr.update(), gr.update(), gr.update()
1669
+
1670
  elapsed = round(time.time() - state.get("q_start_time", time.time()), 2)
1671
  state["timings"].append(elapsed)
1672
  state["answers"].append(transcript)
1673
  state["conversation_history"].append({'role': 'Candidate', 'content': transcript})
1674
 
1675
+ # --- 1. Emotion analysis (simplified for testing) ---
 
 
 
 
 
 
1676
  voice_label = "neutral"
1677
  face_label = "neutral"
1678
  state["voice_labels"].append(voice_label)
1679
  state["face_labels"].append(face_label)
1680
 
 
 
1681
  # --- 2. Evaluate previous Q and Answer ---
1682
  last_q = state["questions"][-1]
1683
  q_eval = state["question_evaluations"][-1] # Already in state
 
1694
  else:
1695
  state["difficulty_adjustment"] = None
1696
 
1697
+ # --- 4. Effective confidence (simplified) ---
 
 
 
 
1698
  eff_conf = {"effective_confidence": 0.6}
1699
  state["effective_confidences"].append(eff_conf)
1700
 
 
1701
  # --- LOG ---
1702
  state["log"].append({
1703
  "type": "answer",
 
1727
  f"- *Answer*: {state['answers'][i]}\n"
1728
  f"- *Q Eval*: {state['question_evaluations'][i]}\n"
1729
  f"- *A Eval*: {state['answer_evaluations'][i]}\n"
 
 
 
1730
  f"- *Time*: {state['timings'][i]}s\n")
1731
  summary += f"\n\n⏺ Full log saved as {log_file}."
1732
  return (state, gr.update(visible=True, value=summary), gr.update(value=None), gr.update(value=None), gr.update(value=None), gr.update(value=None), gr.update(visible=True, value=f"Last Detected — Face: {face_label}, Voice: {voice_label}"))
 
1758
  return (
1759
  state, gr.update(visible=False), audio_path, f"*Question {qidx + 1}:* {next_q}",
1760
  gr.update(value=None), gr.update(value=None),
1761
+ gr.update(visible=True, value=eval_md),
1762
  )
1763
+ # Replace your confirm_btn.click with this:
1764
  confirm_btn.click(
1765
  process_answer,
1766
+ [stt_transcript, user_audio_input, None, interview_state, user_data], # Added None for video_path
1767
+ [interview_state, interview_summary, question_audio, question_text, user_audio_input, stt_transcript, evaluation_display]
1768
  ).then(
1769
+ lambda: (gr.update(value=None), gr.update(value=None)), None, [user_audio_input, stt_transcript]
1770
  )
1771
 
1772
  demo.launch(debug=True)