Spaces:
Sleeping
Sleeping
Alberto Carmona
Update news fetching logic to use 'query' instead of 'topics' in get_news function and refine documentation
c28c6ad
from llama_index.core.agent.workflow import FunctionAgent | |
from llms import llm_openai | |
from tools import (generate_implications, get_lead_up_events, get_news, | |
get_social_media_opinions) | |
SYSTEM_PROMPT = """ | |
You are a news assistant. | |
You can get news with 'get news'. | |
When getting news, fetch the latest articles for the current query, analyze their sentiment and extract entities, then present them. | |
After presenting news, ask if the user wants to know implications (e.g., 'implications for 1'), why it happened (e.g., 'why happened for 1'), or social media reactions (e.g., 'social media reaction for 1'). | |
For 'implications', generate possible implications. | |
For 'why happened', extract the key event, search Web for its history, and present a chronological list. | |
For 'social media reaction', search for related posts, analyze their sentiment, and present the level of positive and negative opinions as 'low', 'medium', or 'high'. | |
""" | |
class BasicLammaAgent: | |
def __init__(self): | |
self.llm = llm_openai | |
self.agent = FunctionAgent( | |
llm=self.llm, | |
system_prompt=SYSTEM_PROMPT, | |
tools=[get_news, generate_implications, | |
get_lead_up_events, get_social_media_opinions], | |
) | |
async def __call__(self, q: str): | |
response = await self.agent.run(user_msg=q) | |
if hasattr(response, "final_output") and hasattr(response.final_output, "content"): | |
return response.final_output.content | |
if isinstance(response, str): | |
return response | |
return str(response) | |