Spaces:
Runtime error
Runtime error
from langchain.schema import HumanMessage | |
from llm_loader import load_model | |
from config import openai_api_key | |
from processing import combined_retriever, qa_chain | |
from typing import Dict, List | |
import traceback | |
# Initialize LLM | |
llm = load_model(openai_api_key) | |
class Interview: | |
def __init__(self): | |
self.question_count = 0 | |
self.history: List[tuple] = [] | |
self.general_impression: str = "" # This will be set from the previous analysis | |
def set_general_impression(self, impression: str): | |
print(f"Setting general impression: {impression}") | |
self.general_impression = impression | |
def process_message(self, message: str) -> str: | |
try: | |
print(f"Interview.process_message received message: {message}") | |
# Retrieve relevant documents | |
relevant_docs = qa_chain.invoke({"query": message}) | |
# Generate next question or final report | |
if self.question_count < 10: | |
prompt = f""" | |
You are a Psychologist or a Psychiatrist conducting a clinical interview about the speaker analyzed in the video. | |
Use the following information to generate the next question: | |
General Impression from previous analysis: {self.general_impression} | |
Context from knowledge base: {relevant_docs} | |
Full conversation history: | |
{self._format_history()} | |
Current answer: {message} | |
Question count: {self.question_count + 1}/10 | |
Based on all this information, generate the next appropriate question for the clinical interview about the speaker. | |
Important: Your question MUST be a direct follow-up to the most recent answer, while also considering | |
the entire conversation history and the initial general impression. Ensure that your question builds upon | |
the information just provided and explores it further or shifts to a related topic based on what was just said. | |
""" | |
response = llm.invoke(prompt) | |
self.question_count += 1 | |
self.history.append((message, response.content)) | |
print(f"Interview.process_message generated response: {response.content}") | |
print(f"Type of response: {type(response.content)}") | |
return response.content | |
else: | |
prompt = f""" | |
Based on the following information, generate a comprehensive clinical report about the speaker | |
(also make use of technical clinical terms from the provided documents or knowledge base): | |
General Impression from previous analysis: {self.general_impression} | |
Context from knowledge base: {relevant_docs} | |
Full conversation: | |
{self._format_history()} | |
Final answer: {message} | |
Provide a detailed clinical analysis of the speaker based on the initial general impression and the entire interview. | |
Compare and contrast the insights gained from the interview with the initial general impression. | |
""" | |
response = llm.invoke(prompt) | |
self.question_count = 0 # Reset for next interview | |
final_report = response.content | |
self.history = [] # Clear history | |
print(f"Interview.process_message generated final report: {final_report}") | |
print(f"Type of final report: {type(final_report)}") | |
return "Interview complete. Here's the final report:\n\n" + final_report | |
except Exception as e: | |
print(f"Error in Interview.process_message: {str(e)}") | |
print(traceback.format_exc()) | |
return "Error occurred in processing message" | |
def start_interview(self) -> str: | |
try: | |
prompt = f""" | |
You are a Psychologist or a Psychiatrist starting a clinical interview about the speaker analyzed in the video. | |
Based on the following information, generate an appropriate opening question: | |
General Impression from previous analysis: {self.general_impression} | |
Provide a warm, engaging opening question that sets the tone for the clinical interview and encourages | |
discussion about the speaker, based on the general impression provided from the previous analysis. | |
""" | |
response = llm.invoke(prompt) | |
opening_question = response.content | |
print(f"Interview.start_interview generated opening question: {opening_question}") | |
print(f"Type of opening question: {type(opening_question)}") | |
return opening_question | |
except Exception as e: | |
print(f"Error in Interview.start_interview: {str(e)}") | |
print(traceback.format_exc()) | |
return "Error occurred in starting interview" | |
def get_results(self) -> List[tuple]: | |
return self.history | |
def _format_history(self) -> str: | |
formatted_history = "" | |
for i, (question, answer) in enumerate(self.history, start=1): | |
formatted_history += f"Q{i}: {question}\nA{i}: {answer}\n\n" | |
return formatted_history | |
interview = Interview() | |
def get_interview_instance(): | |
return interview |