Spaces:
Sleeping
Sleeping
Alberto Carmona
commited on
Commit
·
75f0fa4
1
Parent(s):
0ff0452
Refactor Llama agent implementation and remove unused chat interface
Browse files- app_llamaindex.py +0 -43
- basic_llama_agent.py +7 -10
- llms.py +7 -0
- tools.py +1 -1
app_llamaindex.py
DELETED
@@ -1,43 +0,0 @@
|
|
1 |
-
|
2 |
-
import gradio as gr
|
3 |
-
|
4 |
-
from basic_llama_agent import BasicLammaAgent
|
5 |
-
|
6 |
-
agent_instance = BasicLammaAgent()
|
7 |
-
|
8 |
-
|
9 |
-
async def chat_with_agent(user_input,
|
10 |
-
history: list[tuple[str, str]],
|
11 |
-
system_message,
|
12 |
-
max_tokens,
|
13 |
-
temperature,
|
14 |
-
top_p):
|
15 |
-
# Use llama_index agent instead of smolagents agent
|
16 |
-
response = await agent_instance(user_input)
|
17 |
-
# Format news response for clarity
|
18 |
-
if user_input.lower() == "get news" and isinstance(response, list):
|
19 |
-
formatted_response = "Here are the latest news for your topics:\n"
|
20 |
-
for article in response:
|
21 |
-
if "error" in article:
|
22 |
-
formatted_response += article["error"] + "\n"
|
23 |
-
else:
|
24 |
-
formatted_response += (
|
25 |
-
f"{article['index']}. Title: '{article['title']}'\n"
|
26 |
-
f" Summary: '{article['summary']}'\n"
|
27 |
-
f" Sentiment: {article['sentiment']}\n"
|
28 |
-
f" Entities: {', '.join(article['entities']) if article['entities'] else 'None'}\n"
|
29 |
-
)
|
30 |
-
formatted_response += "Would you like to know implications, why it happened, or social media reactions for any of these articles? (e.g., 'implications for 1', 'why happened for 1', 'social media reaction for 1')"
|
31 |
-
return formatted_response
|
32 |
-
return response
|
33 |
-
|
34 |
-
demo = gr.ChatInterface(
|
35 |
-
fn=chat_with_agent,
|
36 |
-
title="Personalized News Agent (llama_index)",
|
37 |
-
description="An agent that helps you manage your interests and get personalized news with sentiment, entity analysis, implications, background events, and social media opinion levels. (llama_index version)",
|
38 |
-
type="messages"
|
39 |
-
)
|
40 |
-
|
41 |
-
|
42 |
-
if __name__ == "__main__":
|
43 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
basic_llama_agent.py
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
|
2 |
from llama_index.core.agent.workflow import FunctionAgent
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
|
8 |
SYSTEM_PROMPT = """
|
9 |
You are a news assistant.
|
@@ -22,24 +22,21 @@ For 'social media reaction', search for related posts, analyze their sentiment,
|
|
22 |
|
23 |
class BasicLammaAgent:
|
24 |
def __init__(self):
|
25 |
-
self.llm =
|
26 |
|
27 |
self.agent = FunctionAgent(
|
28 |
llm=self.llm,
|
29 |
system_prompt=SYSTEM_PROMPT,
|
30 |
-
|
31 |
-
|
32 |
-
tools=[get_news, generate_implications, get_lead_up_events, get_social_media_opinions],
|
33 |
)
|
34 |
|
35 |
async def __call__(self, q: str):
|
36 |
response = await self.agent.run(user_msg=q)
|
37 |
|
38 |
-
# Extract final output message from AgentOutput
|
39 |
if hasattr(response, "final_output") and hasattr(response.final_output, "content"):
|
40 |
return response.final_output.content
|
41 |
|
42 |
-
# Fallbacks for unexpected formats
|
43 |
if isinstance(response, str):
|
44 |
return response
|
45 |
|
|
|
1 |
|
2 |
from llama_index.core.agent.workflow import FunctionAgent
|
3 |
+
|
4 |
+
from llms import llm_azure_openai, llm_openai
|
5 |
+
from tools import (generate_implications, get_lead_up_events, get_news,
|
6 |
+
get_social_media_opinions)
|
7 |
|
8 |
SYSTEM_PROMPT = """
|
9 |
You are a news assistant.
|
|
|
22 |
|
23 |
class BasicLammaAgent:
|
24 |
def __init__(self):
|
25 |
+
self.llm = llm_openai
|
26 |
|
27 |
self.agent = FunctionAgent(
|
28 |
llm=self.llm,
|
29 |
system_prompt=SYSTEM_PROMPT,
|
30 |
+
tools=[get_news, generate_implications,
|
31 |
+
get_lead_up_events, get_social_media_opinions],
|
|
|
32 |
)
|
33 |
|
34 |
async def __call__(self, q: str):
|
35 |
response = await self.agent.run(user_msg=q)
|
36 |
|
|
|
37 |
if hasattr(response, "final_output") and hasattr(response.final_output, "content"):
|
38 |
return response.final_output.content
|
39 |
|
|
|
40 |
if isinstance(response, str):
|
41 |
return response
|
42 |
|
llms.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
import os
|
2 |
|
|
|
3 |
from llama_index.llms.azure_openai import AzureOpenAI
|
4 |
|
5 |
llm_azure_openai = AzureOpenAI(
|
@@ -10,3 +11,9 @@ llm_azure_openai = AzureOpenAI(
|
|
10 |
api_key=os.environ.get("AZURE_OPENAI_API_KEY"),
|
11 |
api_version=os.environ.get("AZURE_OPENAI_API_VERSION"),
|
12 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import os
|
2 |
|
3 |
+
from llama_index.llms.openai import OpenAI
|
4 |
from llama_index.llms.azure_openai import AzureOpenAI
|
5 |
|
6 |
llm_azure_openai = AzureOpenAI(
|
|
|
11 |
api_key=os.environ.get("AZURE_OPENAI_API_KEY"),
|
12 |
api_version=os.environ.get("AZURE_OPENAI_API_VERSION"),
|
13 |
)
|
14 |
+
|
15 |
+
llm_openai = OpenAI(
|
16 |
+
model="gpt-4o-mini",
|
17 |
+
temperature=0.0,
|
18 |
+
api_key=os.environ.get("OPENAI_API_KEY"),
|
19 |
+
)
|
tools.py
CHANGED
@@ -5,7 +5,7 @@ from typing import Dict, List
|
|
5 |
import requests
|
6 |
from llama_index.core.llms import ChatMessage
|
7 |
|
8 |
-
from llms import llm_azure_openai
|
9 |
|
10 |
|
11 |
last_news = []
|
|
|
5 |
import requests
|
6 |
from llama_index.core.llms import ChatMessage
|
7 |
|
8 |
+
from llms import llm_openai as llm_azure_openai
|
9 |
|
10 |
|
11 |
last_news = []
|