File size: 4,673 Bytes
e3551a8
3524dd9
6dee2d5
2355280
6724fb5
 
5cb6a2f
b88e12c
baa51c6
b88e12c
baa51c6
b88e12c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
baa51c6
e3551a8
baa51c6
e3551a8
 
 
1a67fa6
3b9f50e
e3551a8
 
 
8948925
e3551a8
 
cdb5785
 
 
 
 
 
e3551a8
 
cdb5785
baa51c6
 
 
 
 
b88e12c
e3551a8
cdb5785
 
 
 
 
 
b88e12c
e3551a8
cdb5785
3524dd9
 
 
 
 
e3551a8
 
 
cdb5785
9796cc7
8948925
 
 
1a67fa6
0a8dc2e
 
 
 
 
 
 
6724fb5
cdb5785
f33ef48
71b710d
 
8948925
3524dd9
 
 
 
cdb5785
 
f33ef48
 
71b710d
3524dd9
 
 
 
f33ef48
 
cdb5785
e3551a8
8948925
71b710d
3524dd9
 
 
 
8948925
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
from langchain.schema import HumanMessage
from output_parser import output_parser
from langchain_openai import OpenAIEmbeddings
from langchain_community.vectorstores import FAISS
from llm_loader import load_model
from config import openai_api_key
from langchain.chains import RetrievalQA
import os
import json

embedding_model = OpenAIEmbeddings(openai_api_key=openai_api_key)

knowledge_files = {
    "attachments": "knowledge/bartholomew_attachments_definitions.txt",
    "bigfive": "knowledge/bigfive_definitions.txt",
    "personalities": "knowledge/personalities_definitions.txt"
}

documents = []
for key, file_path in knowledge_files.items():
    with open(file_path, 'r', encoding='utf-8') as file:
        content = file.read().strip()
        documents.append(content)

faiss_index = FAISS.from_texts(documents, embedding_model)

llm = load_model(openai_api_key)

qa_chain = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=faiss_index.as_retriever())

def load_text(file_path: str) -> str:
    with open(file_path, 'r', encoding='utf-8') as file:
        return file.read().strip()

def truncate_text(text: str, max_tokens: int = 10000) -> str:
    words = text.split()
    if len(words) > max_tokens:
        return ' '.join(words[:max_tokens])
    return text

def process_input(input_text: str, llm):
    general_task = load_text("tasks/general_task.txt")
    attachments_task = load_text("tasks/Attachments_task.txt")
    bigfive_task = load_text("tasks/BigFive_task.txt")
    personalities_task = load_text("tasks/Personalities_task.txt")

    truncated_input = truncate_text(input_text)

    relevant_docs = qa_chain.invoke({"query": truncated_input})
    
    if isinstance(relevant_docs, dict) and 'result' in relevant_docs:
        retrieved_knowledge = relevant_docs['result']
    else:
        retrieved_knowledge = str(relevant_docs)

    prompt = f"""{general_task}
Attachment Styles Task:
{attachments_task}
Big Five Traits Task:
{bigfive_task}
Personality Disorders Task:
{personalities_task}
Retrieved Knowledge: {retrieved_knowledge}
Input: {truncated_input}
Please provide a comprehensive analysis for each speaker, including:
1. General Impression
2. Attachment styles (use the format from the Attachment Styles Task)
3. Big Five traits (use the format from the Big Five Traits Task)
4. Personality disorders (use the format from the Personality Disorders Task)
Respond with a JSON object containing an array of speaker analyses under the key 'speaker_analyses'. Each speaker analysis should include all four aspects mentioned above.
Analysis:"""

    messages = [HumanMessage(content=prompt)]
    response = llm.invoke(messages)
    
    print("Raw LLM Model Output:")
    print(response.content)

    try:
        content = response.content
        if content.startswith("```json"):
            content = content.split("```json", 1)[1]
        if content.endswith("```"):
            content = content.rsplit("```", 1)[0]
        
        parsed_json = json.loads(content.strip())
        
        results = {}
        speaker_analyses = parsed_json.get('speaker_analyses', [])
        for i, speaker_analysis in enumerate(speaker_analyses, 1):
            speaker_id = f"Speaker {i}"
            results[speaker_id] = {
                'general_impression': speaker_analysis.get('GeneralImpression', 'No general impression provided'),
                'attachments': output_parser.parse_speaker_analysis(speaker_analysis).AttachmentStyle,
                'bigfive': output_parser.parse_speaker_analysis(speaker_analysis).BigFiveTraits,
                'personalities': output_parser.parse_speaker_analysis(speaker_analysis).PersonalityDisorder
            }
        
        if not results:
            print("Warning: No speaker analyses found in the parsed JSON.")
            return {"Speaker 1": {
                'general_impression': 'No general impression provided',
                'attachments': output_parser.parse_speaker_analysis({}).AttachmentStyle,
                'bigfive': output_parser.parse_speaker_analysis({}).BigFiveTraits,
                'personalities': output_parser.parse_speaker_analysis({}).PersonalityDisorder
            }}
        
        return results
    except Exception as e:
        print(f"Error processing input: {e}")
        return {"Speaker 1": {
            'general_impression': 'No general impression provided',
            'attachments': output_parser.parse_speaker_analysis({}).AttachmentStyle,
            'bigfive': output_parser.parse_speaker_analysis({}).BigFiveTraits,
            'personalities': output_parser.parse_speaker_analysis({}).PersonalityDisorder
        }}