Entz commited on
Commit
71aa5c6
·
verified ·
1 Parent(s): ec01080

Upload 2 files

Browse files
Files changed (2) hide show
  1. Dockerfile +2 -2
  2. app.py +17 -10
Dockerfile CHANGED
@@ -23,5 +23,5 @@ COPY app.py .
23
  # Expose the Streamlit port
24
  EXPOSE 8501
25
 
26
- # Start Ollama in the background, wait 5 seconds, then run Streamlit
27
- CMD ollama serve & sleep 5 && streamlit run app.py --server.port 8501 --server.address 0.0.0.0
 
23
  # Expose the Streamlit port
24
  EXPOSE 8501
25
 
26
+ # Start Ollama in the background, log output, wait 10 seconds, then run Streamlit
27
+ CMD ollama serve > ollama.log 2>&1 & sleep 10 && streamlit run app.py --server.port 8501 --server.address 0.0.0.0
app.py CHANGED
@@ -7,6 +7,7 @@ from atomic_agents.agents.base_agent import BaseAgent, BaseAgentConfig, BaseAgen
7
  from dotenv import load_dotenv
8
  import asyncio
9
  import httpx
 
10
 
11
  # Load environment variables
12
  load_dotenv()
@@ -15,14 +16,20 @@ load_dotenv()
15
  st.title("Math Reasoning Chatbot")
16
  st.write("Select a provider and chat with the bot to solve math problems!")
17
 
18
- # Function to check if the Ollama server is running
19
- async def check_ollama_health():
20
- try:
21
- async with httpx.AsyncClient() as client:
22
- response = await client.get("http://localhost:11434/v1")
23
- return response.status_code == 200
24
- except httpx.RequestError:
25
- return False
 
 
 
 
 
 
26
 
27
  # Function to set up the client based on the chosen provider
28
  def setup_client(provider):
@@ -37,9 +44,9 @@ def setup_client(provider):
37
  display_model = "OpenAI (gpt-4o-mini)"
38
  elif provider == "ollama":
39
  from openai import AsyncOpenAI as OllamaClient
40
- # Check if Ollama server is running
41
  if not asyncio.run(check_ollama_health()):
42
- st.error("Ollama server is not running or not accessible at http://localhost:11434. Please try again later or select a different provider.")
43
  return None, None, None
44
  client = instructor.from_openai(
45
  OllamaClient(base_url="http://localhost:11434/v1", api_key="ollama"), mode=instructor.Mode.JSON
 
7
  from dotenv import load_dotenv
8
  import asyncio
9
  import httpx
10
+ import time
11
 
12
  # Load environment variables
13
  load_dotenv()
 
16
  st.title("Math Reasoning Chatbot")
17
  st.write("Select a provider and chat with the bot to solve math problems!")
18
 
19
+ # Function to check if the Ollama server is running with retries
20
+ async def check_ollama_health(max_retries=5, retry_delay=5):
21
+ for attempt in range(max_retries):
22
+ try:
23
+ async with httpx.AsyncClient() as client:
24
+ response = await client.get("http://localhost:11434/v1")
25
+ if response.status_code == 200:
26
+ return True
27
+ except httpx.RequestError:
28
+ if attempt < max_retries - 1:
29
+ st.warning(f"Ollama server not yet available (attempt {attempt + 1}/{max_retries}). Retrying in {retry_delay} seconds...")
30
+ await asyncio.sleep(retry_delay)
31
+ continue
32
+ return False
33
 
34
  # Function to set up the client based on the chosen provider
35
  def setup_client(provider):
 
44
  display_model = "OpenAI (gpt-4o-mini)"
45
  elif provider == "ollama":
46
  from openai import AsyncOpenAI as OllamaClient
47
+ # Check if Ollama server is running with retries
48
  if not asyncio.run(check_ollama_health()):
49
+ st.error("Ollama server is not running or not accessible at http://localhost:11434 after multiple attempts. Please try again later or select a different provider.")
50
  return None, None, None
51
  client = instructor.from_openai(
52
  OllamaClient(base_url="http://localhost:11434/v1", api_key="ollama"), mode=instructor.Mode.JSON