Spaces:
Runtime error
Runtime error
| from langchain.schema import HumanMessage | |
| from llm_loader import load_model | |
| from config import openai_api_key | |
| from processing import combined_retriever, qa_chain | |
| from typing import Dict, List | |
| # Initialize LLM | |
| llm = load_model(openai_api_key) | |
| class Interview: | |
| def __init__(self): | |
| self.question_count = 0 | |
| self.history: List[tuple] = [] | |
| self.general_impression: str = "" | |
| self.interview_instructions = load_interview_instructions() | |
| def set_general_impression(self, impression: str): | |
| self.general_impression = impression | |
| def process_message(self, message: str) -> str: | |
| # Retrieve relevant documents | |
| relevant_docs = qa_chain.invoke({"query": message}) | |
| # Generate next question or final report | |
| if self.question_count < 10: | |
| prompt = f""" | |
| Your are a Psychologist or a Psychiatrist and conducting a clinical interview. | |
| Use the following information to generate the next question: | |
| General Impression: {self.general_impression} | |
| Context from knowledge base: {relevant_docs} | |
| Full conversation history: | |
| {self._format_history()} | |
| Current answer: {message} | |
| Question count: {self.question_count + 1}/10 | |
| Based on all this information, generate the next appropriate question for the clinical interview. | |
| Important: Your question MUST be a direct follow-up to the most recent answer, while also considering | |
| the entire conversation history. Ensure that your question builds upon the information just provided | |
| and explores it further or shifts to a related topic based on what was just said. | |
| """ | |
| response = llm.invoke(prompt) | |
| self.question_count += 1 | |
| self.history.append((message, response.content)) | |
| return response.content | |
| else: | |
| prompt = f""" | |
| Based on the following information, generate a comprehensive clinical report (also make use of technical clinical terms from the provided documents or knowledge base): | |
| General Impression: {self.general_impression} | |
| Context from knowledge base: {relevant_docs} | |
| Full conversation: | |
| {self._format_history()} | |
| Final answer: {message} | |
| Provide a detailed clinical analysis based on the entire interview. | |
| """ | |
| response = llm.invoke(prompt) | |
| self.question_count = 0 # Reset for next interview | |
| final_report = response.content | |
| self.history = [] # Clear history | |
| return "Interview complete. Here's the final report:\n\n" + final_report | |
| def start_interview(self) -> str: | |
| prompt = f""" | |
| Your are a Psychologist or a Psychiatrist and starting a clinical interview. | |
| Based on the following information, generate an appropriate opening question: | |
| General Impression: {self.general_impression} | |
| Interview Instructions: {self.interview_instructions} | |
| Provide a warm, engaging opening question that sets the tone for the clinical interview and encourages the individual to start sharing about themselves. | |
| """ | |
| response = llm.invoke(prompt) | |
| return response.content | |
| def get_results(self) -> List[tuple]: | |
| return self.history | |
| def _format_history(self) -> str: | |
| formatted_history = "" | |
| for i, (question, answer) in enumerate(self.history, start=1): | |
| formatted_history += f"Q{i}: {question}\nA{i}: {answer}\n\n" | |
| return formatted_history | |
| interview = Interview() | |
| def get_interview_instance(): | |
| return interview |