Alberto Carmona commited on
Commit
a7ce4c8
·
1 Parent(s): d4972e3

Add initial implementation of news agent with llama_index integration

Browse files
Files changed (7) hide show
  1. .gitignore +4 -0
  2. app.py +108 -4
  3. app_llamaindex.py +55 -0
  4. basic_llama_agent.py +49 -0
  5. llms.py +12 -0
  6. requirements.txt +8 -1
  7. tools.py +309 -0
.gitignore ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ venv
2
+ .env
3
+ .vscode
4
+ __pycache__
app.py CHANGED
@@ -1,11 +1,48 @@
 
 
 
1
  import gradio as gr
 
2
  from huggingface_hub import InferenceClient
 
 
 
 
 
 
 
3
 
4
  """
5
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
  """
7
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
  def respond(
11
  message,
@@ -40,15 +77,82 @@ def respond(
40
  yield response
41
 
42
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
  """
44
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
  """
46
- demo = gr.ChatInterface(
47
  respond,
48
  additional_inputs=[
49
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
 
 
 
52
  gr.Slider(
53
  minimum=0.1,
54
  maximum=1.0,
 
1
+ import os
2
+ from typing import Dict, List
3
+
4
  import gradio as gr
5
+ import requests
6
  from huggingface_hub import InferenceClient
7
+ from smolagents import CodeAgent, OpenAIServerModel, tool, AzureOpenAIServerModel
8
+ import openai
9
+ from transformers import pipeline
10
+
11
+ from tools import (add_topic, analyze_sentiment, generate_implications,
12
+ get_lead_up_events, get_news, get_social_media_opinions,
13
+ recognize_entities, remove_topic)
14
 
15
  """
16
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
17
  """
18
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
19
 
20
+ model = AzureOpenAIServerModel(
21
+ model_id = os.environ.get("AZURE_OPENAI_MODEL"),
22
+ azure_endpoint=os.environ.get("AZURE_OPENAI_ENDPOINT"),
23
+ api_key=os.environ.get("AZURE_OPENAI_API_KEY"),
24
+ api_version=os.environ.get("OPENAI_API_VERSION")
25
+ )
26
+
27
+
28
+ # import openai
29
+
30
+ # client = openai.OpenAI(
31
+ # api_key=os.environ.get("SAMBANOVA_API_KEY"),
32
+ # base_url="https://api.sambanova.ai/v1",
33
+ # )
34
+
35
+ SAMBANOVA_API_KEY = os.environ.get("SAMBANOVA_API_KEY")
36
+ SAMBANOVA_API_BASE_URL = "https://api.sambanova.ai/v1"
37
+
38
+
39
+ def get_model(model_id):
40
+ return OpenAIServerModel( # Using OpenAIServerModel for SambaNova
41
+ model_id=model_id,
42
+ api_key=SAMBANOVA_API_KEY,
43
+ api_base=SAMBANOVA_API_BASE_URL
44
+ )
45
+
46
 
47
  def respond(
48
  message,
 
77
  yield response
78
 
79
 
80
+ # Initialize the agent
81
+ agent = CodeAgent(
82
+ # model=get_model('Llama-4-Maverick-17B-128E-Instruct'),
83
+ model=model,
84
+ system_prompt="""
85
+ You are a news assistant.
86
+ You can add topics with 'add: topic', remove with 'remove: topic', and get news with 'get news'.
87
+
88
+ When getting news, fetch the latest articles for the current topics, analyze their sentiment and extract entities, then present them.
89
+ After presenting news, ask if the user wants to know implications (e.g., 'implications for 1'), why it happened (e.g., 'why happened for 1'), or social media reactions (e.g., 'social media reaction for 1').
90
+
91
+ For 'implications', generate possible implications.
92
+
93
+ For 'why happened', extract the key event, search Wikipedia for its history, and present a chronological list.
94
+
95
+ For 'social media reaction', search X for related posts, analyze their sentiment, and present the level of positive and negative opinions as 'low', 'medium', or 'high'.
96
+
97
+ Authorized imports:
98
+ {{authorized_imports}}
99
+
100
+ Tools:
101
+ {{tool_descriptions}}
102
+
103
+ Managed Agents:
104
+ {{managed_agents_descriptions}}""",
105
+ # tools = []
106
+ tools=[add_topic, remove_topic, get_news, analyze_sentiment, recognize_entities,
107
+ generate_implications, get_lead_up_events, get_social_media_opinions],
108
+ )
109
+
110
+ # Gradio chat interface
111
+
112
+
113
+ def chat_with_agent(user_input,
114
+ history: list[tuple[str, str]],
115
+ system_message,
116
+ max_tokens,
117
+ temperature,
118
+ top_p):
119
+ response = agent.run(user_input)
120
+ # Format news response for clarity
121
+ if user_input.lower() == "get news" and isinstance(response, list):
122
+ formatted_response = "Here are the latest news for your topics:\n"
123
+ for article in response:
124
+ if "error" in article:
125
+ formatted_response += article["error"] + "\n"
126
+ else:
127
+ formatted_response += (
128
+ f"{article['index']}. Title: '{article['title']}'\n"
129
+ f" Summary: '{article['summary']}'\n"
130
+ f" Sentiment: {article['sentiment']}\n"
131
+ f" Entities: {', '.join(article['entities']) if article['entities'] else 'None'}\n"
132
+ )
133
+ formatted_response += "Would you like to know implications, why it happened, or social media reactions for any of these articles? (e.g., 'implications for 1', 'why happened for 1', 'social media reaction for 1')"
134
+ return formatted_response
135
+ return response
136
+
137
+
138
+ demo = gr.ChatInterface(
139
+ fn=chat_with_agent,
140
+ title="Personalized News Agent",
141
+ description="An agent that helps you manage your interests and get personalized news with sentiment, entity analysis, implications, background events, and social media opinion levels."
142
+ )
143
+
144
  """
145
  For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
146
  """
147
+ demo2 = gr.ChatInterface(
148
  respond,
149
  additional_inputs=[
150
+ gr.Textbox(value="You are a friendly Chatbot.",
151
+ label="System message"),
152
+ gr.Slider(minimum=1, maximum=2048, value=512,
153
+ step=1, label="Max new tokens"),
154
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7,
155
+ step=0.1, label="Temperature"),
156
  gr.Slider(
157
  minimum=0.1,
158
  maximum=1.0,
app_llamaindex.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import Dict, List
3
+
4
+ import gradio as gr
5
+ import requests
6
+ from huggingface_hub import InferenceClient
7
+ # from smolagents import CodeAgent, OpenAIServerModel, tool, AzureOpenAIServerModel
8
+ # import openai
9
+ from transformers import pipeline
10
+
11
+ from tools import (add_topic, analyze_sentiment, generate_implications,
12
+ get_lead_up_events, get_news, get_social_media_opinions,
13
+ recognize_entities, remove_topic)
14
+ from llama_index.llms.azure_openai import AzureOpenAI
15
+ from llama_index.core.agent.workflow import FunctionAgent
16
+ from basic_llama_agent import BasicLammaAgent
17
+
18
+ agent_instance = BasicLammaAgent()
19
+
20
+
21
+ async def chat_with_agent(user_input,
22
+ history: list[tuple[str, str]],
23
+ system_message,
24
+ max_tokens,
25
+ temperature,
26
+ top_p):
27
+ # Use llama_index agent instead of smolagents agent
28
+ response = await agent_instance(user_input)
29
+ # Format news response for clarity
30
+ if user_input.lower() == "get news" and isinstance(response, list):
31
+ formatted_response = "Here are the latest news for your topics:\n"
32
+ for article in response:
33
+ if "error" in article:
34
+ formatted_response += article["error"] + "\n"
35
+ else:
36
+ formatted_response += (
37
+ f"{article['index']}. Title: '{article['title']}'\n"
38
+ f" Summary: '{article['summary']}'\n"
39
+ f" Sentiment: {article['sentiment']}\n"
40
+ f" Entities: {', '.join(article['entities']) if article['entities'] else 'None'}\n"
41
+ )
42
+ formatted_response += "Would you like to know implications, why it happened, or social media reactions for any of these articles? (e.g., 'implications for 1', 'why happened for 1', 'social media reaction for 1')"
43
+ return formatted_response
44
+ return response
45
+
46
+ demo = gr.ChatInterface(
47
+ fn=chat_with_agent,
48
+ title="Personalized News Agent (llama_index)",
49
+ description="An agent that helps you manage your interests and get personalized news with sentiment, entity analysis, implications, background events, and social media opinion levels. (llama_index version)",
50
+ type="messages"
51
+ )
52
+
53
+
54
+ if __name__ == "__main__":
55
+ demo.launch()
basic_llama_agent.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from llama_index.llms.azure_openai import AzureOpenAI
4
+ from llama_index.core.agent.workflow import FunctionAgent
5
+ from tools import (add_topic, remove_topic, get_news, analyze_sentiment,
6
+ recognize_entities, generate_implications, get_lead_up_events,
7
+ get_social_media_opinions)
8
+ from llms import llm_azure_openai
9
+
10
+ SYSTEM_PROMPT = """
11
+ You are a news assistant.
12
+ You can add topics with 'add: topic', remove with 'remove: topic', and get news with 'get news'.
13
+
14
+ When getting news, fetch the latest articles for the current topics, analyze their sentiment and extract entities, then present them.
15
+ After presenting news, ask if the user wants to know implications (e.g., 'implications for 1'), why it happened (e.g., 'why happened for 1'), or social media reactions (e.g., 'social media reaction for 1').
16
+
17
+ For 'implications', generate possible implications.
18
+
19
+ For 'why happened', extract the key event, search Wikipedia for its history, and present a chronological list.
20
+
21
+ For 'social media reaction', search X for related posts, analyze their sentiment, and present the level of positive and negative opinions as 'low', 'medium', or 'high'.
22
+ """
23
+
24
+
25
+ class BasicLammaAgent:
26
+ def __init__(self):
27
+ self.llm = llm_azure_openai
28
+
29
+ self.agent = FunctionAgent(
30
+ llm=self.llm,
31
+ system_prompt=SYSTEM_PROMPT,
32
+ # Add tools if llama_index agent supports tool usage
33
+ # tools=[]
34
+ tools=[add_topic, remove_topic, get_news, analyze_sentiment, recognize_entities,
35
+ generate_implications, get_lead_up_events, get_social_media_opinions],
36
+ )
37
+
38
+ async def __call__(self, q: str):
39
+ response = await self.agent.run(user_msg=q)
40
+
41
+ # Extract final output message from AgentOutput
42
+ if hasattr(response, "final_output") and hasattr(response.final_output, "content"):
43
+ return response.final_output.content
44
+
45
+ # Fallbacks for unexpected formats
46
+ if isinstance(response, str):
47
+ return response
48
+
49
+ return str(response)
llms.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from llama_index.llms.azure_openai import AzureOpenAI
4
+
5
+ llm_azure_openai = AzureOpenAI(
6
+ engine=os.environ.get("AZURE_OPENAI_MODEL"),
7
+ model="gpt-4o-mini",
8
+ temperature=0.0,
9
+ azure_endpoint=os.environ.get("AZURE_OPENAI_ENDPOINT"),
10
+ api_key=os.environ.get("AZURE_OPENAI_API_KEY"),
11
+ api_version=os.environ.get("AZURE_OPENAI_API_VERSION"),
12
+ )
requirements.txt CHANGED
@@ -1 +1,8 @@
1
- huggingface_hub==0.25.2
 
 
 
 
 
 
 
 
1
+ huggingface_hub==0.25.2
2
+ gradio
3
+ smolagents
4
+ transformers
5
+ requests
6
+ openai
7
+ llama-index
8
+ llama-index-llms-azure-openai
tools.py ADDED
@@ -0,0 +1,309 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import Dict, List
3
+
4
+ import requests
5
+
6
+ from llms import llm_azure_openai
7
+
8
+ from llama_index.core.llms import ChatMessage
9
+
10
+ # Global state for simplicity (use database in production)
11
+ topics = []
12
+ last_news = []
13
+
14
+ # Initialize pipelines for sentiment analysis, NER, and generative QA
15
+
16
+ # Tool to add a topic to the interest list
17
+
18
+
19
+ def add_topic(new_topic: str) -> str:
20
+ """
21
+ Adds a new topic to the global topics list.
22
+
23
+ Args:
24
+ new_topic: The topic to be added.
25
+
26
+ Returns:
27
+ str: A confirmation message indicating the topic was added.
28
+ """
29
+ global topics
30
+ topics.append(new_topic)
31
+ return f"Added {new_topic} to topics."
32
+
33
+ # Tool to remove a topic from the interest list
34
+
35
+
36
+ def remove_topic(topic: str) -> str:
37
+ """
38
+ Removes a specified topic from the global topics list.
39
+
40
+ Args:
41
+ topic: The topic to be removed from the topics list.
42
+
43
+ Returns:
44
+ str: A message indicating whether the topic was removed or not found.
45
+ """
46
+ global topics
47
+ if topic in topics:
48
+ topics.remove(topic)
49
+ return f"Removed {topic} from topics."
50
+ else:
51
+ return f"{topic} not found in topics."
52
+
53
+ # Tool to fetch news articles using NewsAPI
54
+
55
+
56
+ def get_news(topics: List[str]) -> List[Dict]:
57
+ """
58
+ Fetches news articles related to the specified topics using the NewsAPI, analyzes their sentiment and named entities, and returns a list of processed news items.
59
+
60
+ Args:
61
+ topics: A list of topics to search for in news articles.
62
+
63
+ Returns:
64
+ List[Dict]: A list of dictionaries, each containing:
65
+ - index (int): The position of the article in the result list.
66
+ - title (str): The title of the news article.
67
+ - summary (str): The description or summary of the article.
68
+ - sentiment: The sentiment analysis result of the summary.
69
+ - entities: The named entities recognized in the summary.
70
+ If an error occurs during the API request, returns a list with a single dictionary containing an "error" key and the error message.
71
+
72
+ Raises:
73
+ None. All exceptions are caught and returned as error messages in the result.
74
+ """
75
+ global last_news
76
+ api_key = os.environ.get("NEWS_API_KEY")
77
+ base_url = "https://newsapi.org/v2/everything"
78
+ queries = " OR ".join(topics)
79
+ params = {
80
+ "q": queries,
81
+ "apiKey": api_key,
82
+ "pageSize": 10,
83
+ }
84
+ try:
85
+ response = requests.get(base_url, params=params)
86
+ response.raise_for_status()
87
+ data = response.json()
88
+ articles = data.get("articles", [])
89
+ last_news = []
90
+ for idx, article in enumerate(articles, 1):
91
+ summary = article.get("description", "No description available")
92
+ sentiment = analyze_sentiment(summary)
93
+ entities = recognize_entities(summary)
94
+ last_news.append({
95
+ "index": idx,
96
+ "title": article.get("title", "No title"),
97
+ "summary": summary,
98
+ "sentiment": sentiment,
99
+ "entities": entities
100
+ })
101
+ return last_news
102
+ except requests.RequestException as e:
103
+ return [{"error": f"Failed to fetch news: {str(e)}"}]
104
+
105
+ # Tool for sentiment analysis
106
+
107
+
108
+ def analyze_sentiment(text: str) -> str:
109
+ """
110
+ Analyzes the sentiment of the given text and returns the sentiment label.
111
+
112
+ Args:
113
+ text: The input text to analyze.
114
+
115
+ Returns:
116
+ str: The sentiment label, such as 'positive', 'negative', or 'neutral'.
117
+ """
118
+ return 'positive'
119
+
120
+ # Tool for named entity recognition
121
+
122
+
123
+ def recognize_entities(text: str) -> List[str]:
124
+ """
125
+ Recognizes named entities in the given text.
126
+
127
+ Args:
128
+ text: The input text in which to recognize entities.
129
+
130
+ Returns:
131
+ List[str]: A list of recognized entity names as strings.
132
+ """
133
+ dummy_entities = ["Entity1", "Entity2",
134
+ "Entity3"] # Dummy entities for testing
135
+ return dummy_entities
136
+
137
+ # Tool to generate implications for an article
138
+
139
+
140
+ def generate_implications(article_index: int) -> str:
141
+ """
142
+ Generates a string describing the possible implications of a news article based on its index.
143
+
144
+ Args:
145
+ article_index: The 1-based index of the article in the global `last_news` list.
146
+
147
+ Returns:
148
+ str: A message containing the implications for the specified article, or an error message if the index is invalid.
149
+ """
150
+ global last_news
151
+ if not (1 <= article_index <= len(last_news)):
152
+ return "Invalid article index."
153
+ article = last_news[article_index - 1]
154
+ summary = article["summary"]
155
+ prompt = f"question: What are the possible implications of this news? context: {summary}"
156
+ try:
157
+ result = llm_azure_openai.chat(
158
+ messages=[ChatMessage(role="user", content=prompt)]
159
+ )
160
+ except Exception as e:
161
+ return f"Error generating implications: {str(e)}"
162
+ return f"Implications for article {article_index}: {result.message.content}"
163
+
164
+
165
+ def web_search(query: str) -> List[Dict]:
166
+ """
167
+ Performs a web search and returns a list of results.
168
+
169
+ Args:
170
+ query: The search query string.
171
+
172
+ Returns:
173
+ List[Dict]: A list of dictionaries containing search results, each with 'title', 'snippet', and 'url'.
174
+ """
175
+ # Dummy implementation for testing
176
+ return [
177
+ {"title": "Example Result 1", "snippet": "This is an example snippet.",
178
+ "url": "http://example.com/1"},
179
+ {"title": "Example Result 2", "snippet": "This is another example snippet.",
180
+ "url": "http://example.com/2"},
181
+ ]
182
+
183
+
184
+ def browse_page(url: str, query: str) -> str:
185
+ """
186
+ Fetches the content of a web page and returns it as a string.
187
+
188
+ Args:
189
+ url: The URL of the web page to fetch.
190
+ query: A query string to search within the page content.
191
+
192
+ Returns:
193
+ str: The content of the web page, or an error message if the page cannot be fetched.
194
+ """
195
+ try:
196
+ response = requests.get(url)
197
+ response.raise_for_status()
198
+ return response.text # Return the raw HTML content for simplicity
199
+ except requests.RequestException as e:
200
+ return f"Error fetching page: {str(e)}"
201
+
202
+ # Tool to get chronological events leading to an article
203
+
204
+
205
+ def get_lead_up_events(article_index: int) -> str:
206
+ """
207
+ Retrieves a brief timeline or background of events leading up to a news article's topic.
208
+
209
+ Given the index of an article, this function attempts to find a Wikipedia page related to the article's title,
210
+ extracts historical events (preferably from a "history" section), and returns a summary of up to five relevant events.
211
+ If no Wikipedia timeline is found, it falls back to a general web search and provides background snippets.
212
+
213
+ Args:
214
+ article_index: The 1-based index of the article in the global `last_news` list.
215
+
216
+ Returns:
217
+ str: A formatted string summarizing the lead-up events or background information for the article's topic.
218
+ """
219
+ global last_news
220
+ if not (1 <= article_index <= len(last_news)):
221
+ return "Invalid article index."
222
+ article = last_news[article_index - 1]
223
+ title = article["title"]
224
+ # Search Wikipedia first
225
+ search_results = web_search(f"{title} Wikipedia")
226
+ wiki_url = None
227
+ for result in search_results:
228
+ if "wikipedia.org" in result["url"]:
229
+ wiki_url = result["url"]
230
+ break
231
+ if wiki_url:
232
+ history_content = browse_page(wiki_url, query="history")
233
+ # Simple parsing for timeline (assumes content contains chronological events)
234
+ events = []
235
+ for line in history_content.split("\n"):
236
+ if line.strip() and any(year in line for year in [str(y) for y in range(2000, 2026)]):
237
+ events.append(line.strip())
238
+ if events:
239
+ return f"Events leading up to '{title}':\n" + "\n".join(f"- {event}" for event in events[:5]) + f"\n(Source: {wiki_url})"
240
+ else:
241
+ return f"No clear timeline found on Wikipedia for '{title}'."
242
+ else:
243
+ # Fallback to general web search
244
+ search_results = web_search(f"background of {title}")
245
+ results = [
246
+ f"- {result['title']}: {result['snippet']}" for result in search_results[:3]]
247
+ return f"No Wikipedia timeline found for '{title}'. General background:\n" + "\n".join(results)
248
+
249
+
250
+ def x_search(query: str) -> List[Dict]:
251
+ """
252
+ Searches social media (e.g., X/Twitter) for posts related to a query.
253
+
254
+ Args:
255
+ query: The search query string.
256
+
257
+ Returns:
258
+ List[Dict]: A list of dictionaries containing social media posts, each with 'content'.
259
+ """
260
+ # Dummy implementation for testing
261
+ return [
262
+ {"content": "This is a positive post about the event."},
263
+ {"content": "This is a negative post about the event."},
264
+ {"content": "Neutral post with no strong sentiment."},
265
+ ]
266
+
267
+ # Tool to get social media opinion levels
268
+
269
+
270
+ def get_social_media_opinions(article_index: int) -> str:
271
+ """
272
+ Analyzes social media opinions related to a news article by its index.
273
+
274
+ Given the index of a news article and a search function, this function retrieves relevant social media posts,
275
+ analyzes their sentiment, and categorizes the number of positive and negative opinions as 'low', 'medium', or 'high'.
276
+
277
+ Args:
278
+ article_index: The 1-based index of the article in the global `last_news` list.
279
+
280
+ Returns:
281
+ str: A summary string indicating the categorized number of positive and negative opinions about the event.
282
+ """
283
+ global last_news
284
+ if not (1 <= article_index <= len(last_news)):
285
+ return "Invalid article index."
286
+ article = last_news[article_index - 1]
287
+ title = article["title"]
288
+ # Use embedding-based search for relevant posts
289
+ posts = x_search(title)
290
+ positive_count = 0
291
+ negative_count = 0
292
+ for post in posts:
293
+ sentiment = analyze_sentiment(post["content"])
294
+ if sentiment == "POSITIVE":
295
+ positive_count += 1
296
+ elif sentiment == "NEGATIVE":
297
+ negative_count += 1
298
+ # Categorize counts
299
+
300
+ def categorize(count):
301
+ if count <= 10:
302
+ return "low"
303
+ elif count <= 30:
304
+ return "medium"
305
+ else:
306
+ return "high"
307
+ positive_level = categorize(positive_count)
308
+ negative_level = categorize(negative_count)
309
+ return f"There are a {positive_level} number of positive opinions and a {negative_level} number of negative opinions about this event."