Entz commited on
Commit
710a86a
·
verified ·
1 Parent(s): 7c9e9de

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +9 -4
  2. requirements.txt +2 -1
app.py CHANGED
@@ -1,6 +1,7 @@
1
  import os
2
  import streamlit as st
3
  import instructor
 
4
  from atomic_agents.lib.components.agent_memory import AgentMemory
5
  from atomic_agents.lib.components.system_prompt_generator import SystemPromptGenerator
6
  from atomic_agents.agents.base_agent import BaseAgent, BaseAgentConfig, BaseAgentInputSchema, BaseAgentOutputSchema
@@ -22,15 +23,19 @@ def setup_client(provider):
22
  if not api_key:
23
  st.error("OPENAI_API_KEY not set in environment variables.")
24
  return None, None, None
25
- # Explicitly pass only the required arguments to AsyncOpenAI
26
- openai_client = AsyncOpenAI(api_key=api_key)
 
 
27
  client = instructor.from_openai(openai_client)
28
  model = "gpt-4o-mini"
29
  display_model = "OpenAI (gpt-4o-mini)"
30
  elif provider == "ollama":
31
  from openai import AsyncOpenAI as OllamaClient
32
- # Explicitly pass only the required arguments to OllamaClient
33
- ollama_client = OllamaClient(base_url="http://localhost:11434/v1", api_key="ollama")
 
 
34
  client = instructor.from_openai(ollama_client, mode=instructor.Mode.JSON)
35
  model = "llama3"
36
  display_model = "Ollama (llama3)"
 
1
  import os
2
  import streamlit as st
3
  import instructor
4
+ import httpx
5
  from atomic_agents.lib.components.agent_memory import AgentMemory
6
  from atomic_agents.lib.components.system_prompt_generator import SystemPromptGenerator
7
  from atomic_agents.agents.base_agent import BaseAgent, BaseAgentConfig, BaseAgentInputSchema, BaseAgentOutputSchema
 
23
  if not api_key:
24
  st.error("OPENAI_API_KEY not set in environment variables.")
25
  return None, None, None
26
+ # Create a custom HTTP client without proxies
27
+ http_client = httpx.AsyncClient()
28
+ # Initialize AsyncOpenAI with the custom HTTP client
29
+ openai_client = AsyncOpenAI(api_key=api_key, http_client=http_client)
30
  client = instructor.from_openai(openai_client)
31
  model = "gpt-4o-mini"
32
  display_model = "OpenAI (gpt-4o-mini)"
33
  elif provider == "ollama":
34
  from openai import AsyncOpenAI as OllamaClient
35
+ # Create a custom HTTP client without proxies
36
+ http_client = httpx.AsyncClient()
37
+ # Initialize OllamaClient with the custom HTTP client
38
+ ollama_client = OllamaClient(base_url="http://localhost:11434/v1", api_key="ollama", http_client=http_client)
39
  client = instructor.from_openai(ollama_client, mode=instructor.Mode.JSON)
40
  model = "llama3"
41
  display_model = "Ollama (llama3)"
requirements.txt CHANGED
@@ -2,4 +2,5 @@ streamlit==1.38.0
2
  instructor==1.3.4
3
  atomic-agents==0.2.1
4
  python-dotenv==1.0.1
5
- openai==1.35.12
 
 
2
  instructor==1.3.4
3
  atomic-agents==0.2.1
4
  python-dotenv==1.0.1
5
+ openai==1.35.12
6
+ httpx==0.27.0