Alberto Carmona commited on
Commit
2ca40cc
·
1 Parent(s): 0c88f90

Remove AzureOpenAI references and update imports to use only llm_openai

Browse files
Files changed (3) hide show
  1. basic_llama_agent.py +1 -1
  2. llms.py +0 -11
  3. tools.py +5 -5
basic_llama_agent.py CHANGED
@@ -1,7 +1,7 @@
1
 
2
  from llama_index.core.agent.workflow import FunctionAgent
3
 
4
- from llms import llm_azure_openai, llm_openai
5
  from tools import (generate_implications, get_lead_up_events, get_news,
6
  get_social_media_opinions)
7
 
 
1
 
2
  from llama_index.core.agent.workflow import FunctionAgent
3
 
4
+ from llms import llm_openai
5
  from tools import (generate_implications, get_lead_up_events, get_news,
6
  get_social_media_opinions)
7
 
llms.py CHANGED
@@ -1,18 +1,7 @@
1
  import os
2
 
3
  from llama_index.llms.openai import OpenAI
4
- from llama_index.llms.azure_openai import AzureOpenAI
5
 
6
- # llm_azure_openai = AzureOpenAI(
7
- # model="gpt-4o-mini",
8
- # engine=os.environ.get("AZURE_OPENAI_MODEL"),
9
- # temperature=0.0,
10
- # azure_endpoint=os.environ.get("AZURE_OPENAI_ENDPOINT"),
11
- # api_key=os.environ.get("AZURE_OPENAI_API_KEY"),
12
- # api_version=os.environ.get("AZURE_OPENAI_API_VERSION"),
13
- # )
14
-
15
- llm_azure_openai = None
16
 
17
  llm_openai = OpenAI(
18
  model="gpt-4o-mini",
 
1
  import os
2
 
3
  from llama_index.llms.openai import OpenAI
 
4
 
 
 
 
 
 
 
 
 
 
 
5
 
6
  llm_openai = OpenAI(
7
  model="gpt-4o-mini",
tools.py CHANGED
@@ -5,7 +5,7 @@ from typing import Dict, List
5
  import requests
6
  from llama_index.core.llms import ChatMessage
7
 
8
- from llms import llm_openai as llm_azure_openai
9
 
10
 
11
  last_news = []
@@ -76,7 +76,7 @@ def analyze_sentiment(text: str) -> str:
76
  question: Tell me if the sentiment of this news is positive, negative, or neutral?
77
  context: {text}"""
78
  try:
79
- result = llm_azure_openai.chat(
80
  messages=[ChatMessage(role="user", content=prompt)]
81
  )
82
  except Exception as e:
@@ -105,7 +105,7 @@ def recognize_entities(text: str) -> List[str]:
105
  question: Tell me entities mentioned in this news?
106
  context: {text}"""
107
  try:
108
- result = llm_azure_openai.chat(
109
  messages=[ChatMessage(role="user", content=prompt)]
110
  )
111
  except Exception as e:
@@ -132,7 +132,7 @@ def generate_implications(article_index: int) -> str:
132
  summary = article["summary"]
133
  prompt = f"question: What are the possible implications of this news? context: {summary}"
134
  try:
135
- result = llm_azure_openai.chat(
136
  messages=[ChatMessage(role="user", content=prompt)]
137
  )
138
  except Exception as e:
@@ -206,7 +206,7 @@ def get_lead_up_events(article_index: int) -> str:
206
  </historical_context>
207
  """
208
  try:
209
- result = llm_azure_openai.chat(
210
  messages=[ChatMessage(role="user", content=prompt)]
211
  )
212
  except Exception as e:
 
5
  import requests
6
  from llama_index.core.llms import ChatMessage
7
 
8
+ from llms import llm_openai
9
 
10
 
11
  last_news = []
 
76
  question: Tell me if the sentiment of this news is positive, negative, or neutral?
77
  context: {text}"""
78
  try:
79
+ result = llm_openai.chat(
80
  messages=[ChatMessage(role="user", content=prompt)]
81
  )
82
  except Exception as e:
 
105
  question: Tell me entities mentioned in this news?
106
  context: {text}"""
107
  try:
108
+ result = llm_openai.chat(
109
  messages=[ChatMessage(role="user", content=prompt)]
110
  )
111
  except Exception as e:
 
132
  summary = article["summary"]
133
  prompt = f"question: What are the possible implications of this news? context: {summary}"
134
  try:
135
+ result = llm_openai.chat(
136
  messages=[ChatMessage(role="user", content=prompt)]
137
  )
138
  except Exception as e:
 
206
  </historical_context>
207
  """
208
  try:
209
+ result = llm_openai.chat(
210
  messages=[ChatMessage(role="user", content=prompt)]
211
  )
212
  except Exception as e: