reab5555 commited on
Commit
758b4c1
·
verified ·
1 Parent(s): b8382db

Create interview.py

Browse files
Files changed (1) hide show
  1. interview.py +90 -0
interview.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.schema import HumanMessage
2
+ from llm_loader import load_model
3
+ from config import openai_api_key
4
+ from processing import combined_retriever, qa_chain
5
+ from typing import Dict, List
6
+
7
+ # Initialize LLM
8
+ llm = load_model(openai_api_key)
9
+
10
+ class Interview:
11
+ def __init__(self):
12
+ self.question_count = 0
13
+ self.history: List[tuple] = []
14
+ self.general_impression: str = ""
15
+ self.interview_instructions = load_interview_instructions()
16
+
17
+ def set_general_impression(self, impression: str):
18
+ self.general_impression = impression
19
+
20
+ def process_message(self, message: str) -> str:
21
+ # Retrieve relevant documents
22
+ relevant_docs = qa_chain.invoke({"query": message})
23
+
24
+ # Generate next question or final report
25
+ if self.question_count < 10:
26
+ prompt = f"""
27
+ Your are a Psychologist or a Psychiatrist and conducting a clinical interview.
28
+
29
+ Use the following information to generate the next question:
30
+
31
+ General Impression: {self.general_impression}
32
+ Context from knowledge base: {relevant_docs}
33
+ Full conversation history:
34
+ {self._format_history()}
35
+ Current answer: {message}
36
+ Question count: {self.question_count + 1}/10
37
+
38
+ Based on all this information, generate the next appropriate question for the clinical interview.
39
+ Important: Your question MUST be a direct follow-up to the most recent answer, while also considering
40
+ the entire conversation history. Ensure that your question builds upon the information just provided
41
+ and explores it further or shifts to a related topic based on what was just said.
42
+ """
43
+ response = llm.invoke(prompt)
44
+ self.question_count += 1
45
+ self.history.append((message, response.content))
46
+ return response.content
47
+ else:
48
+ prompt = f"""
49
+ Based on the following information, generate a comprehensive clinical report (also make use of technical clinical terms from the provided documents or knowledge base):
50
+ General Impression: {self.general_impression}
51
+ Context from knowledge base: {relevant_docs}
52
+ Full conversation:
53
+ {self._format_history()}
54
+ Final answer: {message}
55
+
56
+ Provide a detailed clinical analysis based on the entire interview.
57
+ """
58
+ response = llm.invoke(prompt)
59
+ self.question_count = 0 # Reset for next interview
60
+ final_report = response.content
61
+ self.history = [] # Clear history
62
+ return "Interview complete. Here's the final report:\n\n" + final_report
63
+
64
+ def start_interview(self) -> str:
65
+ prompt = f"""
66
+ Your are a Psychologist or a Psychiatrist and starting a clinical interview.
67
+
68
+ Based on the following information, generate an appropriate opening question:
69
+
70
+ General Impression: {self.general_impression}
71
+ Interview Instructions: {self.interview_instructions}
72
+
73
+ Provide a warm, engaging opening question that sets the tone for the clinical interview and encourages the individual to start sharing about themselves.
74
+ """
75
+ response = llm.invoke(prompt)
76
+ return response.content
77
+
78
+ def get_results(self) -> List[tuple]:
79
+ return self.history
80
+
81
+ def _format_history(self) -> str:
82
+ formatted_history = ""
83
+ for i, (question, answer) in enumerate(self.history, start=1):
84
+ formatted_history += f"Q{i}: {question}\nA{i}: {answer}\n\n"
85
+ return formatted_history
86
+
87
+ interview = Interview()
88
+
89
+ def get_interview_instance():
90
+ return interview