File size: 8,606 Bytes
fe69671
7329024
 
fe69671
7329024
fe69671
7329024
fe69671
08bb700
 
 
fe69671
08bb700
 
7329024
fe69671
 
7329024
08bb700
7329024
 
 
 
fe69671
 
 
7329024
 
 
fe69671
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7329024
 
 
fe69671
 
 
7329024
 
 
 
fe69671
7329024
fe69671
 
 
 
7329024
 
 
 
fe69671
 
 
 
 
 
 
7329024
fe69671
 
 
 
7329024
fe69671
 
 
 
 
 
7329024
 
 
fe69671
 
 
 
7329024
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fe69671
 
7329024
 
 
fe69671
 
 
7329024
fe69671
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7329024
fe69671
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7329024
fe69671
 
 
 
7329024
fe69671
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
import os
import requests
import datetime
import streamlit as st
from functools import partial
from tempfile import NamedTemporaryFile
from typing import List, Callable, Literal, Optional
from streamlit.runtime.uploaded_file_manager import UploadedFile

# LangChain and associated modules
from langchain_openai import ChatOpenAI, OpenAIEmbeddings
from langchain.schema import HumanMessage, SystemMessage
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_community.document_loaders import PyPDFLoader, Docx2txtLoader, TextLoader
from langchain_community.utilities import BingSearchAPIWrapper
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.vectorstores import FAISS
from langchain.tools import Tool
from langchain.tools.retriever import create_retriever_tool
from langchain.agents import create_openai_tools_agent, AgentExecutor
from langchain_community.agent_toolkits.load_tools import load_tools
from langchain.pydantic_v1 import BaseModel, Field


# -- SESSION STATE INITIALIZATION -----------------------------------------
def initialize_session_state_variables() -> None:
    """
    Initialize all the session state variables.
    """
    session_defaults = {
        "ready": False,
        "model": "gpt-4o",
        "topic": "",
        "positive": "",
        "negative": "",
        "agent_descriptions": {},
        "new_debate": True,
        "conversations": [],
        "conversations4print": [],
        "simulator": None,
        "tools": [],
        "retriever_tool": None,
        "analytics": {
            "total_messages": 0,
            "word_counts": {},
            "sentiments": {},
        },
        "conclusions": "",
        "comments_key": 0,
    }
    for key, value in session_defaults.items():
        if key not in st.session_state:
            st.session_state[key] = value


# -- DOCUMENT PROCESSING & VECTOR STORE -----------------------------------
def get_vector_store(uploaded_files: List[UploadedFile]) -> Optional[FAISS]:
    """
    Process uploaded files into FAISS vector store.
    """
    if not uploaded_files:
        return None

    # Ensure the directory for temporary files exists
    temp_dir = "files"
    os.makedirs(temp_dir, exist_ok=True)  # Create 'files/' directory if it doesn't exist

    documents = []
    filepaths = []
    loader_map = {".txt": TextLoader, ".pdf": PyPDFLoader, ".docx": Docx2txtLoader}

    try:
        for uploaded_file in uploaded_files:
            with NamedTemporaryFile(dir=temp_dir, delete=False) as file:
                file.write(uploaded_file.getbuffer())
                filepath = file.name
            filepaths.append(filepath)

            file_ext = os.path.splitext(uploaded_file.name.lower())[1]
            loader_class = loader_map.get(file_ext)
            if not loader_class:
                st.error(f"Unsupported file type: {file_ext}", icon="🚨")
                return None

            loader = loader_class(filepath)
            documents.extend(loader.load())

        text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
        doc = text_splitter.split_documents(documents)

        embeddings = OpenAIEmbeddings(model="text-embedding-3-large", dimensions=1536)
        vector_store = FAISS.from_documents(doc, embeddings)
    except Exception as e:
        st.error(f"An error occurred: {e}", icon="🚨")
        vector_store = None
    finally:
        for filepath in filepaths:
            if os.path.exists(filepath):
                os.remove(filepath)

    return vector_store


# -- AGENT & SIMULATOR SETUP ----------------------------------------------
class DialogueSimulator:
    """
    Simulates a debate between agents with analytics tracking.
    """
    def __init__(self, agents: List[dict], selection_function: Callable[[int, List[dict]], int]):
        self.agents = agents
        self._step = 0
        self.select_next_speaker = selection_function
        self.analytics = {
            "total_messages": 0,
            "word_counts": {agent["name"]: 0 for agent in agents},
        }

    def reset(self):
        for agent in self.agents:
            agent["message_history"] = []
        self.analytics = {
            "total_messages": 0,
            "word_counts": {agent["name"]: 0 for agent in self.agents},
        }

    def step(self):
        speaker_idx = self.select_next_speaker(self._step, self.agents)
        speaker = self.agents[speaker_idx]
        output = speaker["llm"].invoke({"input": "\n".join(speaker["message_history"])})
        message = output.content
        word_count = len(message.split())

        # Update analytics
        self.analytics["total_messages"] += 1
        self.analytics["word_counts"][speaker["name"]] += word_count

        for agent in self.agents:
            agent["message_history"].append(f"{speaker['name']}: {message}")
        self._step += 1
        return speaker["name"], message


def select_next_speaker(step: int, agents: List[dict]) -> int:
    return step % len(agents)


# -- DEBATE INSIGHTS ------------------------------------------------------
def generate_summary():
    """
    Generate a summary of the debate based on conversation history.
    """
    conversation = "\n".join(st.session_state.conversations)
    summary_prompt = [
        SystemMessage(content="You are an insightful moderator."),
        HumanMessage(content=f"Summarize the following debate:\n{conversation}\n"),
    ]
    moderator_llm = ChatOpenAI(model="gpt-4o", temperature=0.2)
    with st.spinner("Generating summary..."):
        summary = moderator_llm.invoke(summary_prompt).content
    return summary


def show_insights():
    """
    Display the analytics and insights for the debate.
    """
    st.header("Debate Insights")

    # Display the summary
    st.subheader("Summary of the Debate")
    summary = generate_summary()
    st.write(summary)

    # Show message counts
    st.subheader("Message Contributions")
    total_messages = st.session_state.simulator.analytics["total_messages"]
    word_counts = st.session_state.simulator.analytics["word_counts"]
    st.write(f"Total Messages: {total_messages}")

    # Display Word Count Breakdown
    st.write("Word Count by Participant:")
    for name, count in word_counts.items():
        st.write(f"{name}: {count} words")

    # Show charts
    st.subheader("Visual Analytics")
    st.bar_chart(word_counts)


# -- MAIN APP LOGIC -------------------------------------------------------
def multi_agent_debate():
    """
    Main app logic for the multi-agent debate.
    """
    initialize_session_state_variables()
    st.title("Advanced Multi-Agent Debate System")

    # Sidebar for API Keys
    with st.sidebar:
        st.write("### API Configuration")
        openai_key = st.text_input("Enter OpenAI API Key", type="password")
        if openai_key:
            os.environ["OPENAI_API_KEY"] = openai_key
            st.session_state.ready = True
        else:
            st.error("Please enter a valid OpenAI API Key.")
            st.stop()

    # Upload Supporting Documents
    st.write("### Upload Supporting Documents")
    uploaded_files = st.file_uploader("Upload Documents (PDF, TXT, DOCX)", type=["pdf", "txt", "docx"], accept_multiple_files=True)
    if uploaded_files:
        vector_store = get_vector_store(uploaded_files)
        if vector_store:
            st.session_state.retriever_tool = create_retriever_tool(
                retriever=vector_store.as_retriever(),
                name="document_retriever",
                description="Retrieve information from uploaded documents."
            )

    # Start Debate Button
    if st.button("Start Debate"):
        agents = [
            {
                "name": "AI Optimist",
                "llm": ChatOpenAI(model="gpt-4o", temperature=0.7),
                "message_history": ["You are an optimistic advocate of AI."]
            },
            {
                "name": "AI Pessimist",
                "llm": ChatOpenAI(model="gpt-4o", temperature=0.7),
                "message_history": ["You are skeptical of AI advancements."]
            },
        ]

        simulator = DialogueSimulator(
            agents=agents, selection_function=select_next_speaker
        )
        st.session_state.simulator = simulator
        simulator.reset()

        st.write("## Debate In Progress")

    # View Insights Button
    if st.button("View Insights"):
        show_insights()


# Run the app
if __name__ == "__main__":
    multi_agent_debate()