Upload 2 files
Browse files- app.py +19 -15
- requirements.txt +5 -5
app.py
CHANGED
@@ -1,7 +1,6 @@
|
|
1 |
import os
|
2 |
import streamlit as st
|
3 |
import instructor
|
4 |
-
import httpx
|
5 |
from atomic_agents.lib.components.agent_memory import AgentMemory
|
6 |
from atomic_agents.lib.components.system_prompt_generator import SystemPromptGenerator
|
7 |
from atomic_agents.agents.base_agent import BaseAgent, BaseAgentConfig, BaseAgentInputSchema, BaseAgentOutputSchema
|
@@ -23,22 +22,28 @@ def setup_client(provider):
|
|
23 |
if not api_key:
|
24 |
st.error("OPENAI_API_KEY not set in environment variables.")
|
25 |
return None, None, None
|
26 |
-
|
27 |
-
http_client = httpx.AsyncClient()
|
28 |
-
# Initialize AsyncOpenAI with the custom HTTP client
|
29 |
-
openai_client = AsyncOpenAI(api_key=api_key, http_client=http_client)
|
30 |
-
client = instructor.from_openai(openai_client)
|
31 |
model = "gpt-4o-mini"
|
32 |
display_model = "OpenAI (gpt-4o-mini)"
|
33 |
elif provider == "ollama":
|
34 |
from openai import AsyncOpenAI as OllamaClient
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
ollama_client = OllamaClient(base_url="http://localhost:11434/v1", api_key="ollama", http_client=http_client)
|
39 |
-
client = instructor.from_openai(ollama_client, mode=instructor.Mode.JSON)
|
40 |
model = "llama3"
|
41 |
display_model = "Ollama (llama3)"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
else:
|
43 |
st.error(f"Unsupported provider: {provider}")
|
44 |
return None, None, None
|
@@ -71,8 +76,7 @@ if "agent" not in st.session_state or st.session_state.get("current_model") != m
|
|
71 |
if "memory" not in st.session_state:
|
72 |
st.session_state.memory = AgentMemory()
|
73 |
initial_message = BaseAgentOutputSchema(chat_message="Hello! I'm here to help with math problems. What can I assist you with today?")
|
74 |
-
|
75 |
-
st.session_state.memory.add_message("assistant", initial_message.chat_message)
|
76 |
st.session_state.conversation = [("assistant", initial_message.chat_message)]
|
77 |
st.session_state.agent = BaseAgent(config=BaseAgentConfig(
|
78 |
client=client,
|
@@ -104,7 +108,7 @@ if user_input:
|
|
104 |
# Add user message to conversation and memory
|
105 |
st.session_state.conversation.append(("user", user_input))
|
106 |
input_schema = BaseAgentInputSchema(chat_message=user_input)
|
107 |
-
st.session_state.memory.add_message("user", input_schema
|
108 |
|
109 |
# Display user message immediately
|
110 |
with st.chat_message("user"):
|
@@ -123,7 +127,7 @@ if user_input:
|
|
123 |
|
124 |
# After streaming completes, add the final response to conversation and memory
|
125 |
st.session_state.conversation.append(("assistant", current_response))
|
126 |
-
st.session_state.memory.add_message("assistant", current_response)
|
127 |
|
128 |
# Run the async function
|
129 |
asyncio.run(stream_response())
|
|
|
1 |
import os
|
2 |
import streamlit as st
|
3 |
import instructor
|
|
|
4 |
from atomic_agents.lib.components.agent_memory import AgentMemory
|
5 |
from atomic_agents.lib.components.system_prompt_generator import SystemPromptGenerator
|
6 |
from atomic_agents.agents.base_agent import BaseAgent, BaseAgentConfig, BaseAgentInputSchema, BaseAgentOutputSchema
|
|
|
22 |
if not api_key:
|
23 |
st.error("OPENAI_API_KEY not set in environment variables.")
|
24 |
return None, None, None
|
25 |
+
client = instructor.from_openai(AsyncOpenAI(api_key=api_key))
|
|
|
|
|
|
|
|
|
26 |
model = "gpt-4o-mini"
|
27 |
display_model = "OpenAI (gpt-4o-mini)"
|
28 |
elif provider == "ollama":
|
29 |
from openai import AsyncOpenAI as OllamaClient
|
30 |
+
client = instructor.from_openai(
|
31 |
+
OllamaClient(base_url="http://localhost:11434/v1", api_key="ollama"), mode=instructor.Mode.JSON
|
32 |
+
)
|
|
|
|
|
33 |
model = "llama3"
|
34 |
display_model = "Ollama (llama3)"
|
35 |
+
# elif provider == "gemini":
|
36 |
+
# from openai import AsyncOpenAI
|
37 |
+
# api_key = os.getenv("GEMINI_API_KEY")
|
38 |
+
# if not api_key:
|
39 |
+
# st.error("GEMINI_API_KEY not set in environment variables.")
|
40 |
+
# return None, None, None
|
41 |
+
# client = instructor.from_openai(
|
42 |
+
# AsyncOpenAI(api_key=api_key, base_url="https://generativelanguage.googleapis.com/v1beta/openai/"),
|
43 |
+
# mode=instructor.Mode.JSON,
|
44 |
+
# )
|
45 |
+
# model = "gemini-2.0-flash-exp"
|
46 |
+
# display_model = "Gemini (gemini-2.0-flash-exp)"
|
47 |
else:
|
48 |
st.error(f"Unsupported provider: {provider}")
|
49 |
return None, None, None
|
|
|
76 |
if "memory" not in st.session_state:
|
77 |
st.session_state.memory = AgentMemory()
|
78 |
initial_message = BaseAgentOutputSchema(chat_message="Hello! I'm here to help with math problems. What can I assist you with today?")
|
79 |
+
st.session_state.memory.add_message("assistant", initial_message)
|
|
|
80 |
st.session_state.conversation = [("assistant", initial_message.chat_message)]
|
81 |
st.session_state.agent = BaseAgent(config=BaseAgentConfig(
|
82 |
client=client,
|
|
|
108 |
# Add user message to conversation and memory
|
109 |
st.session_state.conversation.append(("user", user_input))
|
110 |
input_schema = BaseAgentInputSchema(chat_message=user_input)
|
111 |
+
st.session_state.memory.add_message("user", input_schema)
|
112 |
|
113 |
# Display user message immediately
|
114 |
with st.chat_message("user"):
|
|
|
127 |
|
128 |
# After streaming completes, add the final response to conversation and memory
|
129 |
st.session_state.conversation.append(("assistant", current_response))
|
130 |
+
st.session_state.memory.add_message("assistant", BaseAgentOutputSchema(chat_message=current_response))
|
131 |
|
132 |
# Run the async function
|
133 |
asyncio.run(stream_response())
|
requirements.txt
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
-
streamlit==1.
|
2 |
-
instructor==1.3
|
3 |
-
atomic-agents
|
4 |
python-dotenv==1.0.1
|
5 |
-
openai==1.
|
6 |
-
httpx==0.27.
|
|
|
1 |
+
streamlit==1.44.1
|
2 |
+
instructor==1.6.3
|
3 |
+
-e git+https://github.com/BrainBlend-AI/atomic-agents.git@283da044f6a22b993dae0751c08dbae52ab2d2df#egg=atomic_agents
|
4 |
python-dotenv==1.0.1
|
5 |
+
openai==1.54.3
|
6 |
+
httpx==0.27.2
|