Entz commited on
Commit
7149301
·
verified ·
1 Parent(s): 81420d9

Upload 3 files

Browse files
Files changed (2) hide show
  1. Dockerfile +45 -0
  2. app.py +9 -14
Dockerfile ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use an official Python runtime as the base image
2
+ FROM python:3.11-slim
3
+
4
+ # Set working directory
5
+ WORKDIR /app
6
+
7
+ # Install system dependencies (curl for Ollama, git for pip)
8
+ RUN apt-get update && apt-get install -y \
9
+ curl \
10
+ git \
11
+ && rm -rf /var/lib/apt/lists/*
12
+
13
+ # Install Ollama
14
+ RUN curl -fsSL https://ollama.com/install.sh | sh
15
+
16
+ ########################################################
17
+ # Pull llama3 model during build to avoid runtime delays
18
+ RUN ollama serve & \
19
+ until curl -s http://localhost:11434 > /dev/null; do \
20
+ echo 'Waiting for Ollama...'; sleep 1; \
21
+ done && \
22
+ ollama pull llama3 && \
23
+ ollama list > /app/models.txt && \
24
+ cat /app/models.txt
25
+ ########################################################
26
+
27
+
28
+ # Copy requirements file first (optimization for caching)
29
+ COPY requirements.txt .
30
+
31
+ # Install Python dependencies
32
+ RUN pip install --no-cache-dir -r requirements.txt
33
+
34
+ # Copy only necessary application files (exclude .env)
35
+ COPY app.py .
36
+
37
+ # Expose the port Hugging Face Spaces expects
38
+ EXPOSE 7860
39
+
40
+ # Set environment variables for Ollama
41
+ ENV OLLAMA_HOST=0.0.0.0
42
+ ENV OLLAMA_PORT=11434
43
+
44
+ # Start Ollama and run Streamlit
45
+ CMD bash -c "ollama serve & until curl -s http://localhost:11434 > /dev/null; do echo 'Waiting for Ollama...'; sleep 1; done && streamlit run app.py --server.port 7860 --server.address 0.0.0.0"
app.py CHANGED
@@ -7,7 +7,7 @@ from atomic_agents.agents.base_agent import BaseAgent, BaseAgentConfig, BaseAgen
7
  from dotenv import load_dotenv
8
  import asyncio
9
 
10
- # Load environment variables
11
  load_dotenv()
12
 
13
  # Initialize Streamlit app
@@ -20,23 +20,18 @@ def setup_client(provider):
20
  from openai import AsyncOpenAI
21
  api_key = os.getenv("OPENAI_API_KEY")
22
  if not api_key:
23
- st.error("OPENAI_API_KEY not set in environment variables.")
24
- return None, None, None
25
  client = instructor.from_openai(AsyncOpenAI(api_key=api_key))
26
  model = "gpt-4o-mini"
27
  display_model = "OpenAI (gpt-4o-mini)"
28
  elif provider == "ollama":
29
  from openai import AsyncOpenAI as OllamaClient
30
- try:
31
- client = instructor.from_openai(
32
- OllamaClient(base_url="http://localhost:11434/v1", api_key="ollama"),
33
- mode=instructor.Mode.JSON
34
- )
35
- model = "llama-3.2-1b-instruct-q8_0" # Use the local name after pulling
36
- display_model = "Ollama (Llama 3.2 1B)"
37
- except Exception as e:
38
- st.error(f"Failed to connect to Ollama: {str(e)}. Ensure Ollama is running and the model is pulled.")
39
- return None, None, None
40
  else:
41
  st.error(f"Unsupported provider: {provider}")
42
  return None, None, None
@@ -55,7 +50,7 @@ system_prompt_generator = SystemPromptGenerator(
55
  )
56
 
57
  # Provider selection
58
- providers_list = ["openai", "ollama"]
59
  selected_provider = st.selectbox("Choose a provider:", providers_list, key="provider_select")
60
 
61
  # Set up client and agent based on the selected provider
 
7
  from dotenv import load_dotenv
8
  import asyncio
9
 
10
+ # Load environment variables (optional if using Hugging Face Secrets)
11
  load_dotenv()
12
 
13
  # Initialize Streamlit app
 
20
  from openai import AsyncOpenAI
21
  api_key = os.getenv("OPENAI_API_KEY")
22
  if not api_key:
23
+ st.warning("OpenAI provider unavailable: OPENAI_API_KEY not set. Falling back to Ollama.")
24
+ return setup_client("ollama") # Fallback to Ollama
25
  client = instructor.from_openai(AsyncOpenAI(api_key=api_key))
26
  model = "gpt-4o-mini"
27
  display_model = "OpenAI (gpt-4o-mini)"
28
  elif provider == "ollama":
29
  from openai import AsyncOpenAI as OllamaClient
30
+ client = instructor.from_openai(
31
+ OllamaClient(base_url="http://localhost:11434/v1", api_key="ollama"), mode=instructor.Mode.JSON
32
+ )
33
+ model = "llama3"
34
+ display_model = "Ollama (llama3)"
 
 
 
 
 
35
  else:
36
  st.error(f"Unsupported provider: {provider}")
37
  return None, None, None
 
50
  )
51
 
52
  # Provider selection
53
+ providers_list = ["ollama", "openai"] # Prioritize Ollama since it’s guaranteed to work
54
  selected_provider = st.selectbox("Choose a provider:", providers_list, key="provider_select")
55
 
56
  # Set up client and agent based on the selected provider