husseinelsaadi commited on
Commit
eddc90a
·
verified ·
1 Parent(s): 2e00a2f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -12
app.py CHANGED
@@ -132,10 +132,7 @@ MODEL_PATH = "mistralai/Mistral-7B-Instruct-v0.3"
132
  #MODEL_PATH = "tiiuae/falcon-rw-1b"
133
 
134
  bnb_config = BitsAndBytesConfig(
135
- load_in_4bit=True,
136
- bnb_4bit_compute_dtype=torch.float16,
137
- bnb_4bit_use_double_quant=True,
138
- bnb_4bit_quant_type="nf4"
139
  )
140
 
141
  mistral_tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH,use_auth_token=True)
@@ -147,6 +144,8 @@ judge_llm = AutoModelForCausalLM.from_pretrained(
147
  use_auth_token=True
148
  )
149
 
 
 
150
  judge_pipeline = pipeline(
151
  "text-generation",
152
  model=judge_llm,
@@ -154,10 +153,14 @@ judge_pipeline = pipeline(
154
  max_new_tokens=128,
155
  temperature=0.3,
156
  top_p=0.9,
157
- do_sample=True,
158
  repetition_penalty=1.1,
159
  )
160
 
 
 
 
 
161
 
162
  # embedding model
163
  from sentence_transformers import SentenceTransformer
@@ -1700,11 +1703,24 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
1700
  skills=data["skills"], seniority=data["seniority"], difficulty_adjustment=None,
1701
  voice_label="neutral", face_label="neutral"
1702
  )
 
 
 
 
 
 
 
 
1703
  first_q = groq_llm.predict(prompt)
1704
- # Evaluate Q for quality
1705
- q_eval = eval_question_quality(first_q, data["job_role"], data["seniority"], None)
 
 
 
1706
  state["questions"].append(first_q)
1707
  state["question_evaluations"].append(q_eval)
 
 
1708
  state["conversation_history"].append({'role': 'Interviewer', 'content': first_q})
1709
  audio_path = bark_tts(first_q)
1710
  # LOG
@@ -1725,11 +1741,19 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
1725
  state["conversation_history"].append({'role': 'Candidate', 'content': transcript})
1726
 
1727
  # --- 1. Emotion analysis ---
1728
- voice_label = analyze_audio_emotion(audio_path)
1729
- face_label = analyze_video_emotions(video_path)
 
 
 
 
 
 
1730
  state["voice_labels"].append(voice_label)
1731
  state["face_labels"].append(face_label)
1732
 
 
 
1733
  # --- 2. Evaluate previous Q and Answer ---
1734
  last_q = state["questions"][-1]
1735
  q_eval = state["question_evaluations"][-1] # Already in state
@@ -1747,9 +1771,14 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
1747
  state["difficulty_adjustment"] = None
1748
 
1749
  # --- 4. Effective confidence ---
1750
- eff_conf = interpret_confidence(voice_label, face_label, answer_score)
 
 
 
 
1751
  state["effective_confidences"].append(eff_conf)
1752
 
 
1753
  # --- LOG ---
1754
  state["log"].append({
1755
  "type": "answer",
@@ -1779,8 +1808,9 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
1779
  f"- *Answer*: {state['answers'][i]}\n"
1780
  f"- *Q Eval*: {state['question_evaluations'][i]}\n"
1781
  f"- *A Eval*: {state['answer_evaluations'][i]}\n"
1782
- f"- *Face Emotion: {state['face_labels'][i]}, **Voice Emotion*: {state['voice_labels'][i]}\n"
1783
- f"- *Effective Confidence*: {state['effective_confidences'][i]['effective_confidence']}\n"
 
1784
  f"- *Time*: {state['timings'][i]}s\n")
1785
  summary += f"\n\n⏺ Full log saved as {log_file}."
1786
  return (state, gr.update(visible=True, value=summary), gr.update(value=None), gr.update(value=None), gr.update(value=None), gr.update(value=None), gr.update(visible=True, value=f"Last Detected — Face: {face_label}, Voice: {voice_label}"))
 
132
  #MODEL_PATH = "tiiuae/falcon-rw-1b"
133
 
134
  bnb_config = BitsAndBytesConfig(
135
+ load_in_8bit=True,
 
 
 
136
  )
137
 
138
  mistral_tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH,use_auth_token=True)
 
144
  use_auth_token=True
145
  )
146
 
147
+ print(judge_llm.hf_device_map)
148
+
149
  judge_pipeline = pipeline(
150
  "text-generation",
151
  model=judge_llm,
 
153
  max_new_tokens=128,
154
  temperature=0.3,
155
  top_p=0.9,
156
+ do_sample=False,
157
  repetition_penalty=1.1,
158
  )
159
 
160
+ output = judge_pipeline("Q: What is Python?\nA:", max_new_tokens=128)[0]['generated_text']
161
+ print(output)
162
+
163
+
164
 
165
  # embedding model
166
  from sentence_transformers import SentenceTransformer
 
1703
  skills=data["skills"], seniority=data["seniority"], difficulty_adjustment=None,
1704
  voice_label="neutral", face_label="neutral"
1705
  )
1706
+ #here the original one
1707
+ # first_q = groq_llm.predict(prompt)
1708
+ # # Evaluate Q for quality
1709
+ # q_eval = eval_question_quality(first_q, data["job_role"], data["seniority"], None)
1710
+ # state["questions"].append(first_q)
1711
+ # state["question_evaluations"].append(q_eval)
1712
+
1713
+ #here the testing one
1714
  first_q = groq_llm.predict(prompt)
1715
+ q_eval = {
1716
+ "Score": "N/A",
1717
+ "Reasoning": "Skipped to reduce processing time",
1718
+ "Improvements": []
1719
+ }
1720
  state["questions"].append(first_q)
1721
  state["question_evaluations"].append(q_eval)
1722
+
1723
+
1724
  state["conversation_history"].append({'role': 'Interviewer', 'content': first_q})
1725
  audio_path = bark_tts(first_q)
1726
  # LOG
 
1741
  state["conversation_history"].append({'role': 'Candidate', 'content': transcript})
1742
 
1743
  # --- 1. Emotion analysis ---
1744
+ # voice_label = analyze_audio_emotion(audio_path)
1745
+ # face_label = analyze_video_emotions(video_path)
1746
+ # state["voice_labels"].append(voice_label)
1747
+ # state["face_labels"].append(face_label)
1748
+
1749
+ #just for testing
1750
+ voice_label = "neutral"
1751
+ face_label = "neutral"
1752
  state["voice_labels"].append(voice_label)
1753
  state["face_labels"].append(face_label)
1754
 
1755
+
1756
+
1757
  # --- 2. Evaluate previous Q and Answer ---
1758
  last_q = state["questions"][-1]
1759
  q_eval = state["question_evaluations"][-1] # Already in state
 
1771
  state["difficulty_adjustment"] = None
1772
 
1773
  # --- 4. Effective confidence ---
1774
+ # eff_conf = interpret_confidence(voice_label, face_label, answer_score)
1775
+ # state["effective_confidences"].append(eff_conf)
1776
+
1777
+ #just for testing:
1778
+ eff_conf = {"effective_confidence": 0.6}
1779
  state["effective_confidences"].append(eff_conf)
1780
 
1781
+
1782
  # --- LOG ---
1783
  state["log"].append({
1784
  "type": "answer",
 
1808
  f"- *Answer*: {state['answers'][i]}\n"
1809
  f"- *Q Eval*: {state['question_evaluations'][i]}\n"
1810
  f"- *A Eval*: {state['answer_evaluations'][i]}\n"
1811
+ #also this are removed just for testing :(
1812
+ # f"- *Face Emotion: {state['face_labels'][i]}, **Voice Emotion*: {state['voice_labels'][i]}\n"
1813
+ # f"- *Effective Confidence*: {state['effective_confidences'][i]['effective_confidence']}\n"
1814
  f"- *Time*: {state['timings'][i]}s\n")
1815
  summary += f"\n\n⏺ Full log saved as {log_file}."
1816
  return (state, gr.update(visible=True, value=summary), gr.update(value=None), gr.update(value=None), gr.update(value=None), gr.update(value=None), gr.update(visible=True, value=f"Last Detected — Face: {face_label}, Voice: {voice_label}"))