Entz commited on
Commit
e5687b3
·
verified ·
1 Parent(s): 87660bb

Upload 3 files

Browse files
Files changed (3) hide show
  1. Dockerfile +27 -0
  2. app.py +133 -0
  3. requirements.txt +4 -0
Dockerfile ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use an official Python runtime as a parent image
2
+ FROM python:3.11-slim
3
+
4
+ # Set working directory
5
+ WORKDIR /app
6
+
7
+ # Copy the requirements file
8
+ COPY requirements.txt .
9
+
10
+ # Install dependencies
11
+ RUN pip install --no-cache-dir -r requirements.txt
12
+
13
+ # Install Ollama
14
+ RUN apt-get update && apt-get install -y curl
15
+ RUN curl -fsSL https://ollama.com/install.sh | sh
16
+
17
+ # Pre-download the llama3 model
18
+ RUN ollama pull llama3
19
+
20
+ # Copy the app code
21
+ COPY app.py .
22
+
23
+ # Expose the Streamlit port
24
+ EXPOSE 8501
25
+
26
+ # Start Ollama in the background and then run Streamlit
27
+ CMD ollama serve & streamlit run app.py --server.port 8501 --server.address 0.0.0.0
app.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import streamlit as st
3
+ import instructor
4
+ from atomic_agents.lib.components.agent_memory import AgentMemory
5
+ from atomic_agents.lib.components.system_prompt_generator import SystemPromptGenerator
6
+ from atomic_agents.agents.base_agent import BaseAgent, BaseAgentConfig, BaseAgentInputSchema, BaseAgentOutputSchema
7
+ from dotenv import load_dotenv
8
+ import asyncio
9
+
10
+ # Load environment variables
11
+ load_dotenv()
12
+
13
+ # Initialize Streamlit app
14
+ st.title("Math Reasoning Chatbot")
15
+ st.write("Select a provider and chat with the bot to solve math problems!")
16
+
17
+ # Function to set up the client based on the chosen provider
18
+ def setup_client(provider):
19
+ if provider == "openai":
20
+ from openai import AsyncOpenAI
21
+ api_key = os.getenv("OPENAI_API_KEY")
22
+ if not api_key:
23
+ st.error("OPENAI_API_KEY not set in environment variables.")
24
+ return None, None, None
25
+ client = instructor.from_openai(AsyncOpenAI(api_key=api_key))
26
+ model = "gpt-4o-mini"
27
+ display_model = "OpenAI (gpt-4o-mini)"
28
+ elif provider == "ollama":
29
+ from openai import AsyncOpenAI as OllamaClient
30
+ client = instructor.from_openai(
31
+ OllamaClient(base_url="http://localhost:11434/v1", api_key="ollama"), mode=instructor.Mode.JSON
32
+ )
33
+ model = "llama3"
34
+ display_model = "Ollama (llama3)"
35
+ # elif provider == "gemini":
36
+ # from openai import AsyncOpenAI
37
+ # api_key = os.getenv("GEMINI_API_KEY")
38
+ # if not api_key:
39
+ # st.error("GEMINI_API_KEY not set in environment variables.")
40
+ # return None, None, None
41
+ # client = instructor.from_openai(
42
+ # AsyncOpenAI(api_key=api_key, base_url="https://generativelanguage.googleapis.com/v1beta/openai/"),
43
+ # mode=instructor.Mode.JSON,
44
+ # )
45
+ # model = "gemini-2.0-flash-exp"
46
+ # display_model = "Gemini (gemini-2.0-flash-exp)"
47
+ else:
48
+ st.error(f"Unsupported provider: {provider}")
49
+ return None, None, None
50
+ return client, model, display_model
51
+
52
+ # Custom system prompt
53
+ system_prompt_generator = SystemPromptGenerator(
54
+ background=["You are a math genius."],
55
+ steps=["Think logically step by step and solve a math problem."],
56
+ output_instructions=[
57
+ "Summarise your lengthy thinking processes into experienced problems and solutions with thinking order numbers. Do not speak of all the processes.",
58
+ "Answer in plain English plus formulas.",
59
+ "Always respond using the proper JSON schema.",
60
+ "Always use the available additional information and context to enhance the response.",
61
+ ],
62
+ )
63
+
64
+ # Provider selection
65
+ providers_list = ["openai", "ollama"]
66
+ selected_provider = st.selectbox("Choose a provider:", providers_list, key="provider_select")
67
+
68
+ # Set up client and agent based on the selected provider
69
+ client, model, display_model = setup_client(selected_provider)
70
+ if client is None:
71
+ st.stop()
72
+
73
+ # Initialize or update the agent
74
+ st.session_state.display_model = display_model
75
+ if "agent" not in st.session_state or st.session_state.get("current_model") != model:
76
+ if "memory" not in st.session_state:
77
+ st.session_state.memory = AgentMemory()
78
+ initial_message = BaseAgentOutputSchema(chat_message="Hello! I'm here to help with math problems. What can I assist you with today?")
79
+ st.session_state.memory.add_message("assistant", initial_message)
80
+ st.session_state.conversation = [("assistant", initial_message.chat_message)]
81
+ st.session_state.agent = BaseAgent(config=BaseAgentConfig(
82
+ client=client,
83
+ model=model,
84
+ system_prompt_generator=system_prompt_generator,
85
+ memory=st.session_state.memory,
86
+ system_role="developer",
87
+ ))
88
+ st.session_state.current_model = model # Track the current model to detect changes
89
+
90
+ # Display the selected model
91
+ st.markdown(f"**Selected Model:** {st.session_state.display_model}")
92
+
93
+ # Display the system prompt in an expander
94
+ with st.expander("View System Prompt"):
95
+ system_prompt = system_prompt_generator.generate_prompt()
96
+ st.text(system_prompt)
97
+
98
+ # Display conversation history using st.chat_message
99
+ for role, message in st.session_state.conversation:
100
+ with st.chat_message(role):
101
+ st.markdown(message)
102
+
103
+ # User input using st.chat_input
104
+ user_input = st.chat_input(placeholder="e.g., x^4 + a^4 = 0 find cf")
105
+
106
+ # Process the input and stream the response
107
+ if user_input:
108
+ # Add user message to conversation and memory
109
+ st.session_state.conversation.append(("user", user_input))
110
+ input_schema = BaseAgentInputSchema(chat_message=user_input)
111
+ st.session_state.memory.add_message("user", input_schema)
112
+
113
+ # Display user message immediately
114
+ with st.chat_message("user"):
115
+ st.markdown(user_input)
116
+
117
+ # Stream the response
118
+ with st.chat_message("assistant"):
119
+ response_container = st.empty()
120
+ async def stream_response():
121
+ current_response = ""
122
+ async for partial_response in st.session_state.agent.run_async(input_schema):
123
+ if hasattr(partial_response, "chat_message") and partial_response.chat_message:
124
+ if partial_response.chat_message != current_response:
125
+ current_response = partial_response.chat_message
126
+ response_container.markdown(current_response)
127
+
128
+ # After streaming completes, add the final response to conversation and memory
129
+ st.session_state.conversation.append(("assistant", current_response))
130
+ st.session_state.memory.add_message("assistant", BaseAgentOutputSchema(chat_message=current_response))
131
+
132
+ # Run the async function
133
+ asyncio.run(stream_response())
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ streamlit==1.38.0
2
+ instructor==1.5.0
3
+ atomic-agents==0.2.0
4
+ python-dotenv==1.0.1