news_agent / tools.py
Alberto Carmona
Remove sentiment analysis and entity recognition functions from get_news implementation
f4631b5
import os
from typing import Dict, List
import requests
from llama_index.core.llms import ChatMessage
from tavily import TavilyClient
from llms import llm_openai
last_news = []
def get_news(query: str) -> List[Dict]:
"""
Fetches news articles related to the specified query, analyzes their sentiment and named entities, and returns a list of processed news items.
Args:
query: A string representing the search query for news articles.
Returns:
List[Dict]: A list of dictionaries, each containing:
- index (int): The position of the article in the result list.
- title (str): The title of the news article.
- summary (str): The description or summary of the article.
- sentiment: The sentiment analysis result of the summary.
- entities: The named entities recognized in the summary.
If an error occurs during the API request, returns a list with a single dictionary containing an "error" key and the error message.
Raises:
None. All exceptions are caught and returned as error messages in the result.
"""
global last_news
print(f"Fetching news for query: {query}")
last_news.clear() # Clear previous news to avoid duplication
search_results = web_search(f'Find the latest news related to: {query}')
if search_results:
for res in search_results:
last_news.append({
"index": len(last_news) + 1,
"title": res.get("title", "No title available"),
"summary": res.get("content", "No summary available"),
})
print(f"Found {len(last_news)} articles.")
return last_news
def generate_implications(article_index: int) -> str:
"""
Generates a string describing the possible implications of a news article based on its index.
Args:
article_index: The 1-based index of the article in the global `last_news` list.
Returns:
str: A message containing the implications for the specified article, or an error message if the index is invalid.
"""
global last_news
print(f"Generating implications for article index: {article_index}")
if not (1 <= article_index <= len(last_news)):
return "Invalid article index."
article = last_news[article_index - 1]
summary = article["summary"]
prompt = f"question: What are the possible implications of this news? context: {summary}"
try:
result = llm_openai.chat(
messages=[ChatMessage(role="user", content=prompt)]
)
except Exception as e:
return f"Error generating implications: {str(e)}"
print(f"Generated implications: {result.message.content}")
return f"Implications for article {article_index}: {result.message.content}"
def web_search(query: str) -> List[Dict]:
"""
Performs a web search and returns a list of results.
Args:
query: The search query string.
Returns:
List[Dict]: A list of dictionaries containing search results, each with 'title', 'snippet', and 'url'.
"""
client = TavilyClient(os.environ.get("TAVILY_API_KEY"))
response = client.search(query)
print(f"Web search results for query '{query}': {response}")
return response['results'] if 'results' in response else []
def browse_page(url: str, query: str) -> str:
"""
Fetches the content of a web page and returns it as a string.
Args:
url: The URL of the web page to fetch.
query: A query string to search within the page content.
Returns:
str: The content of the web page, or an error message if the page cannot be fetched.
"""
try:
response = requests.get(url)
response.raise_for_status()
return response.text
except requests.RequestException as e:
return f"Error fetching page: {str(e)}"
def get_lead_up_events(article_index: int) -> str:
"""
Retrieves a brief timeline or background of events leading up to a news article's topic.
Given the index of a news article, this function searches for relevant historical context and generates a summary of the lead-up events.
Args:
article_index: The 1-based index of the article in the global `last_news` list.
Returns:
str: A formatted string summarizing the lead-up events or background information for the article's topic.
"""
global last_news
print(f"Getting lead-up events for article index: {article_index}")
if not (1 <= article_index <= len(last_news)):
return "Invalid article index."
article = last_news[article_index - 1]
historical_context = web_search(
'What are the events leading up to ' + article["title"])
prompt = f"""
Make a chronology of what are the leading events for this article.
<article>
{article["title"]}
</article>
<historical_context>
{historical_context}
</historical_context>
"""
try:
result = llm_openai.chat(
messages=[ChatMessage(role="user", content=prompt)]
)
except Exception as e:
return f"Error generating background information: {str(e)}"
print(f"Generated background information: {result.message.content}")
return f"Background information for article {article_index}: {result.message.content}"
def call_llm(prompt: str) -> str:
"""
Calls the LLM with a given prompt and returns the response.
Args:
prompt: The input prompt to send to the LLM.
Returns:
str: The response from the LLM.
"""
try:
result = llm_openai.chat(
messages=[ChatMessage(role="user", content=prompt)]
)
return result.message.content
except Exception as e:
return f"Error calling LLM: {str(e)}"
def get_social_media_opinions(article_index: int) -> str:
"""
Analyzes social media opinions related to a news article by its index.
Given the index of a news article and a search function, this function retrieves relevant social media posts,
analyzes their sentiment, and categorizes the number of positive and negative opinions as 'low', 'medium', or 'high'.
Args:
article_index: The 1-based index of the article in the global `last_news` list.
Returns:
str: A summary string indicating the categorized number of positive and negative opinions about the event.
"""
global last_news
print(f"Getting social media opinions for article index: {article_index}")
if not (1 <= article_index <= len(last_news)):
return "Invalid article index."
article = last_news[article_index - 1]
title = article["title"]
pos_posts = web_search(
f'What are the positive social media reactions related to: {title}?')
neg_posts = web_search(
f'What are the negative social media reactions related to: {title}?')
# haz un resumen con el llm de los posts positivos
pos_summary = call_llm(
'Make a summary of the following social media posts: ' + str(pos_posts))
neg_summary = call_llm(
'Make a summary of the following social media posts: ' + str(neg_posts))
print(f"Positive summary: {pos_summary}")
print(f"Negative summary: {neg_summary}")
return f"""
Social Media Opinions for Article {article_index}:
Positive Summary: {pos_summary}
Negative Summary: {neg_summary}
"""