Update app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,10 @@
|
|
1 |
import os
|
|
|
|
|
2 |
import streamlit as st
|
|
|
3 |
from tempfile import NamedTemporaryFile
|
4 |
-
from typing import List, Optional
|
5 |
from streamlit.runtime.uploaded_file_manager import UploadedFile
|
6 |
|
7 |
# LangChain and associated modules
|
@@ -9,12 +12,21 @@ from langchain_openai import ChatOpenAI, OpenAIEmbeddings
|
|
9 |
from langchain.schema import HumanMessage, SystemMessage
|
10 |
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
11 |
from langchain_community.document_loaders import PyPDFLoader, Docx2txtLoader, TextLoader
|
|
|
12 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
13 |
from langchain_community.vectorstores import FAISS
|
|
|
14 |
from langchain.tools.retriever import create_retriever_tool
|
|
|
|
|
|
|
|
|
15 |
|
16 |
# -- SESSION STATE INITIALIZATION -----------------------------------------
|
17 |
def initialize_session_state_variables() -> None:
|
|
|
|
|
|
|
18 |
session_defaults = {
|
19 |
"ready": False,
|
20 |
"model": "gpt-4o",
|
@@ -43,52 +55,147 @@ def initialize_session_state_variables() -> None:
|
|
43 |
|
44 |
# -- DOCUMENT PROCESSING & VECTOR STORE -----------------------------------
|
45 |
def get_vector_store(uploaded_files: List[UploadedFile]) -> Optional[FAISS]:
|
|
|
|
|
|
|
46 |
if not uploaded_files:
|
47 |
return None
|
48 |
|
|
|
|
|
|
|
|
|
49 |
documents = []
|
|
|
50 |
loader_map = {".txt": TextLoader, ".pdf": PyPDFLoader, ".docx": Docx2txtLoader}
|
51 |
|
52 |
try:
|
53 |
for uploaded_file in uploaded_files:
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
|
59 |
-
# Determine file extension and load the document
|
60 |
file_ext = os.path.splitext(uploaded_file.name.lower())[1]
|
61 |
loader_class = loader_map.get(file_ext)
|
62 |
if not loader_class:
|
63 |
st.error(f"Unsupported file type: {file_ext}", icon="🚨")
|
64 |
return None
|
65 |
|
66 |
-
loader = loader_class(
|
67 |
documents.extend(loader.load())
|
68 |
|
69 |
-
# Process documents into FAISS vector store
|
70 |
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
|
71 |
doc = text_splitter.split_documents(documents)
|
|
|
72 |
embeddings = OpenAIEmbeddings(model="text-embedding-3-large", dimensions=1536)
|
73 |
vector_store = FAISS.from_documents(doc, embeddings)
|
74 |
-
|
75 |
except Exception as e:
|
76 |
st.error(f"An error occurred: {e}", icon="🚨")
|
77 |
vector_store = None
|
78 |
finally:
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
|
83 |
return vector_store
|
84 |
|
85 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
86 |
# -- MAIN APP LOGIC -------------------------------------------------------
|
87 |
def multi_agent_debate():
|
|
|
|
|
|
|
88 |
initialize_session_state_variables()
|
89 |
st.title("Advanced Multi-Agent Debate System")
|
90 |
|
91 |
-
# Sidebar for API
|
92 |
with st.sidebar:
|
93 |
st.write("### API Configuration")
|
94 |
openai_key = st.text_input("Enter OpenAI API Key", type="password")
|
@@ -111,7 +218,7 @@ def multi_agent_debate():
|
|
111 |
description="Retrieve information from uploaded documents."
|
112 |
)
|
113 |
|
114 |
-
# Start Debate
|
115 |
if st.button("Start Debate"):
|
116 |
agents = [
|
117 |
{
|
@@ -134,10 +241,11 @@ def multi_agent_debate():
|
|
134 |
|
135 |
st.write("## Debate In Progress")
|
136 |
|
137 |
-
# View Insights
|
138 |
if st.button("View Insights"):
|
139 |
show_insights()
|
140 |
|
141 |
|
|
|
142 |
if __name__ == "__main__":
|
143 |
multi_agent_debate()
|
|
|
1 |
import os
|
2 |
+
import requests
|
3 |
+
import datetime
|
4 |
import streamlit as st
|
5 |
+
from functools import partial
|
6 |
from tempfile import NamedTemporaryFile
|
7 |
+
from typing import List, Callable, Literal, Optional
|
8 |
from streamlit.runtime.uploaded_file_manager import UploadedFile
|
9 |
|
10 |
# LangChain and associated modules
|
|
|
12 |
from langchain.schema import HumanMessage, SystemMessage
|
13 |
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
14 |
from langchain_community.document_loaders import PyPDFLoader, Docx2txtLoader, TextLoader
|
15 |
+
from langchain_community.utilities import BingSearchAPIWrapper
|
16 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
17 |
from langchain_community.vectorstores import FAISS
|
18 |
+
from langchain.tools import Tool
|
19 |
from langchain.tools.retriever import create_retriever_tool
|
20 |
+
from langchain.agents import create_openai_tools_agent, AgentExecutor
|
21 |
+
from langchain_community.agent_toolkits.load_tools import load_tools
|
22 |
+
from langchain.pydantic_v1 import BaseModel, Field
|
23 |
+
|
24 |
|
25 |
# -- SESSION STATE INITIALIZATION -----------------------------------------
|
26 |
def initialize_session_state_variables() -> None:
|
27 |
+
"""
|
28 |
+
Initialize all the session state variables.
|
29 |
+
"""
|
30 |
session_defaults = {
|
31 |
"ready": False,
|
32 |
"model": "gpt-4o",
|
|
|
55 |
|
56 |
# -- DOCUMENT PROCESSING & VECTOR STORE -----------------------------------
|
57 |
def get_vector_store(uploaded_files: List[UploadedFile]) -> Optional[FAISS]:
|
58 |
+
"""
|
59 |
+
Process uploaded files into FAISS vector store.
|
60 |
+
"""
|
61 |
if not uploaded_files:
|
62 |
return None
|
63 |
|
64 |
+
# Ensure the directory for temporary files exists
|
65 |
+
temp_dir = "files"
|
66 |
+
os.makedirs(temp_dir, exist_ok=True) # Create 'files/' directory if it doesn't exist
|
67 |
+
|
68 |
documents = []
|
69 |
+
filepaths = []
|
70 |
loader_map = {".txt": TextLoader, ".pdf": PyPDFLoader, ".docx": Docx2txtLoader}
|
71 |
|
72 |
try:
|
73 |
for uploaded_file in uploaded_files:
|
74 |
+
with NamedTemporaryFile(dir=temp_dir, delete=False) as file:
|
75 |
+
file.write(uploaded_file.getbuffer())
|
76 |
+
filepath = file.name
|
77 |
+
filepaths.append(filepath)
|
78 |
|
|
|
79 |
file_ext = os.path.splitext(uploaded_file.name.lower())[1]
|
80 |
loader_class = loader_map.get(file_ext)
|
81 |
if not loader_class:
|
82 |
st.error(f"Unsupported file type: {file_ext}", icon="🚨")
|
83 |
return None
|
84 |
|
85 |
+
loader = loader_class(filepath)
|
86 |
documents.extend(loader.load())
|
87 |
|
|
|
88 |
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
|
89 |
doc = text_splitter.split_documents(documents)
|
90 |
+
|
91 |
embeddings = OpenAIEmbeddings(model="text-embedding-3-large", dimensions=1536)
|
92 |
vector_store = FAISS.from_documents(doc, embeddings)
|
|
|
93 |
except Exception as e:
|
94 |
st.error(f"An error occurred: {e}", icon="🚨")
|
95 |
vector_store = None
|
96 |
finally:
|
97 |
+
for filepath in filepaths:
|
98 |
+
if os.path.exists(filepath):
|
99 |
+
os.remove(filepath)
|
100 |
|
101 |
return vector_store
|
102 |
|
103 |
|
104 |
+
# -- AGENT & SIMULATOR SETUP ----------------------------------------------
|
105 |
+
class DialogueSimulator:
|
106 |
+
"""
|
107 |
+
Simulates a debate between agents with analytics tracking.
|
108 |
+
"""
|
109 |
+
def __init__(self, agents: List[dict], selection_function: Callable[[int, List[dict]], int]):
|
110 |
+
self.agents = agents
|
111 |
+
self._step = 0
|
112 |
+
self.select_next_speaker = selection_function
|
113 |
+
self.analytics = {
|
114 |
+
"total_messages": 0,
|
115 |
+
"word_counts": {agent["name"]: 0 for agent in agents},
|
116 |
+
}
|
117 |
+
|
118 |
+
def reset(self):
|
119 |
+
for agent in self.agents:
|
120 |
+
agent["message_history"] = []
|
121 |
+
self.analytics = {
|
122 |
+
"total_messages": 0,
|
123 |
+
"word_counts": {agent["name"]: 0 for agent in self.agents},
|
124 |
+
}
|
125 |
+
|
126 |
+
def step(self):
|
127 |
+
speaker_idx = self.select_next_speaker(self._step, self.agents)
|
128 |
+
speaker = self.agents[speaker_idx]
|
129 |
+
output = speaker["llm"].invoke({"input": "\n".join(speaker["message_history"])})
|
130 |
+
message = output.content
|
131 |
+
word_count = len(message.split())
|
132 |
+
|
133 |
+
# Update analytics
|
134 |
+
self.analytics["total_messages"] += 1
|
135 |
+
self.analytics["word_counts"][speaker["name"]] += word_count
|
136 |
+
|
137 |
+
for agent in self.agents:
|
138 |
+
agent["message_history"].append(f"{speaker['name']}: {message}")
|
139 |
+
self._step += 1
|
140 |
+
return speaker["name"], message
|
141 |
+
|
142 |
+
|
143 |
+
def select_next_speaker(step: int, agents: List[dict]) -> int:
|
144 |
+
return step % len(agents)
|
145 |
+
|
146 |
+
|
147 |
+
# -- DEBATE INSIGHTS ------------------------------------------------------
|
148 |
+
def generate_summary():
|
149 |
+
"""
|
150 |
+
Generate a summary of the debate based on conversation history.
|
151 |
+
"""
|
152 |
+
conversation = "\n".join(st.session_state.conversations)
|
153 |
+
summary_prompt = [
|
154 |
+
SystemMessage(content="You are an insightful moderator."),
|
155 |
+
HumanMessage(content=f"Summarize the following debate:\n{conversation}\n"),
|
156 |
+
]
|
157 |
+
moderator_llm = ChatOpenAI(model="gpt-4o", temperature=0.2)
|
158 |
+
with st.spinner("Generating summary..."):
|
159 |
+
summary = moderator_llm.invoke(summary_prompt).content
|
160 |
+
return summary
|
161 |
+
|
162 |
+
|
163 |
+
def show_insights():
|
164 |
+
"""
|
165 |
+
Display the analytics and insights for the debate.
|
166 |
+
"""
|
167 |
+
st.header("Debate Insights")
|
168 |
+
|
169 |
+
# Display the summary
|
170 |
+
st.subheader("Summary of the Debate")
|
171 |
+
summary = generate_summary()
|
172 |
+
st.write(summary)
|
173 |
+
|
174 |
+
# Show message counts
|
175 |
+
st.subheader("Message Contributions")
|
176 |
+
total_messages = st.session_state.simulator.analytics["total_messages"]
|
177 |
+
word_counts = st.session_state.simulator.analytics["word_counts"]
|
178 |
+
st.write(f"Total Messages: {total_messages}")
|
179 |
+
|
180 |
+
# Display Word Count Breakdown
|
181 |
+
st.write("Word Count by Participant:")
|
182 |
+
for name, count in word_counts.items():
|
183 |
+
st.write(f"{name}: {count} words")
|
184 |
+
|
185 |
+
# Show charts
|
186 |
+
st.subheader("Visual Analytics")
|
187 |
+
st.bar_chart(word_counts)
|
188 |
+
|
189 |
+
|
190 |
# -- MAIN APP LOGIC -------------------------------------------------------
|
191 |
def multi_agent_debate():
|
192 |
+
"""
|
193 |
+
Main app logic for the multi-agent debate.
|
194 |
+
"""
|
195 |
initialize_session_state_variables()
|
196 |
st.title("Advanced Multi-Agent Debate System")
|
197 |
|
198 |
+
# Sidebar for API Keys
|
199 |
with st.sidebar:
|
200 |
st.write("### API Configuration")
|
201 |
openai_key = st.text_input("Enter OpenAI API Key", type="password")
|
|
|
218 |
description="Retrieve information from uploaded documents."
|
219 |
)
|
220 |
|
221 |
+
# Start Debate Button
|
222 |
if st.button("Start Debate"):
|
223 |
agents = [
|
224 |
{
|
|
|
241 |
|
242 |
st.write("## Debate In Progress")
|
243 |
|
244 |
+
# View Insights Button
|
245 |
if st.button("View Insights"):
|
246 |
show_insights()
|
247 |
|
248 |
|
249 |
+
# Run the app
|
250 |
if __name__ == "__main__":
|
251 |
multi_agent_debate()
|