File size: 5,411 Bytes
758b4c1
 
 
 
 
92b5afd
758b4c1
 
 
 
 
 
 
 
914b175
758b4c1
 
92b5afd
758b4c1
 
 
92b5afd
 
 
 
758b4c1
92b5afd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
758b4c1
92b5afd
 
 
914b175
92b5afd
 
 
758b4c1
 
92b5afd
 
 
 
 
 
 
 
758b4c1
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
from langchain.schema import HumanMessage
from llm_loader import load_model
from config import openai_api_key
from processing import combined_retriever, qa_chain
from typing import Dict, List
import traceback

# Initialize LLM
llm = load_model(openai_api_key)

class Interview:
    def __init__(self):
        self.question_count = 0
        self.history: List[tuple] = []
        self.general_impression: str = ""  # This will be set from the previous analysis

    def set_general_impression(self, impression: str):
        print(f"Setting general impression: {impression}")
        self.general_impression = impression

    def process_message(self, message: str) -> str:
        try:
            print(f"Interview.process_message received message: {message}")
            # Retrieve relevant documents
            relevant_docs = qa_chain.invoke({"query": message})
            
            # Generate next question or final report
            if self.question_count < 10:
                prompt = f"""
                You are a Psychologist or a Psychiatrist conducting a clinical interview about the speaker analyzed in the video.
                Use the following information to generate the next question:
                
                General Impression from previous analysis: {self.general_impression}
                Context from knowledge base: {relevant_docs}
                Full conversation history:
                {self._format_history()}
                Current answer: {message}
                Question count: {self.question_count + 1}/10

                Based on all this information, generate the next appropriate question for the clinical interview about the speaker.
                Important: Your question MUST be a direct follow-up to the most recent answer, while also considering
                the entire conversation history and the initial general impression. Ensure that your question builds upon 
                the information just provided and explores it further or shifts to a related topic based on what was just said.
                """
                response = llm.invoke(prompt)
                self.question_count += 1
                self.history.append((message, response.content))
                print(f"Interview.process_message generated response: {response.content}")
                print(f"Type of response: {type(response.content)}")
                return response.content
            else:
                prompt = f"""
                Based on the following information, generate a comprehensive clinical report about the speaker 
                (also make use of technical clinical terms from the provided documents or knowledge base):
                General Impression from previous analysis: {self.general_impression}
                Context from knowledge base: {relevant_docs}
                Full conversation:
                {self._format_history()}
                Final answer: {message}

                Provide a detailed clinical analysis of the speaker based on the initial general impression and the entire interview.
                Compare and contrast the insights gained from the interview with the initial general impression.
                """
                response = llm.invoke(prompt)
                self.question_count = 0  # Reset for next interview
                final_report = response.content
                self.history = []  # Clear history
                print(f"Interview.process_message generated final report: {final_report}")
                print(f"Type of final report: {type(final_report)}")
                return "Interview complete. Here's the final report:\n\n" + final_report
        except Exception as e:
            print(f"Error in Interview.process_message: {str(e)}")
            print(traceback.format_exc())
            return "Error occurred in processing message"

    def start_interview(self) -> str:
        try:
            prompt = f"""
            You are a Psychologist or a Psychiatrist starting a clinical interview about the speaker analyzed in the video.
            Based on the following information, generate an appropriate opening question:
            
            General Impression from previous analysis: {self.general_impression}

            Provide a warm, engaging opening question that sets the tone for the clinical interview and encourages 
            discussion about the speaker, based on the general impression provided from the previous analysis.
            """
            response = llm.invoke(prompt)
            opening_question = response.content
            print(f"Interview.start_interview generated opening question: {opening_question}")
            print(f"Type of opening question: {type(opening_question)}")
            return opening_question
        except Exception as e:
            print(f"Error in Interview.start_interview: {str(e)}")
            print(traceback.format_exc())
            return "Error occurred in starting interview"

    def get_results(self) -> List[tuple]:
        return self.history

    def _format_history(self) -> str:
        formatted_history = ""
        for i, (question, answer) in enumerate(self.history, start=1):
            formatted_history += f"Q{i}: {question}\nA{i}: {answer}\n\n"
        return formatted_history

interview = Interview()

def get_interview_instance():
    return interview