File size: 1,609 Bytes
a7ce4c8
 
75f0fa4
2ca40cc
75f0fa4
 
a7ce4c8
 
 
0ff0452
a7ce4c8
c28c6ad
a7ce4c8
 
 
 
0ff0452
a7ce4c8
0ff0452
a7ce4c8
 
 
 
 
75f0fa4
a7ce4c8
 
 
 
75f0fa4
 
a7ce4c8
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44

from llama_index.core.agent.workflow import FunctionAgent

from llms import llm_openai
from tools import (generate_implications, get_lead_up_events, get_news,
                   get_social_media_opinions)

SYSTEM_PROMPT = """
You are a news assistant. 
You can get news with 'get news'. 

When getting news, fetch the latest articles for the current query, analyze their sentiment and extract entities, then present them. 
After presenting news, ask if the user wants to know implications (e.g., 'implications for 1'), why it happened (e.g., 'why happened for 1'), or social media reactions (e.g., 'social media reaction for 1'). 

For 'implications', generate possible implications. 

For 'why happened', extract the key event, search Web for its history, and present a chronological list. 

For 'social media reaction', search for related posts, analyze their sentiment, and present the level of positive and negative opinions as 'low', 'medium', or 'high'.  
"""


class BasicLammaAgent:
    def __init__(self):
        self.llm = llm_openai

        self.agent = FunctionAgent(
            llm=self.llm,
            system_prompt=SYSTEM_PROMPT,
            tools=[get_news, generate_implications,
                   get_lead_up_events, get_social_media_opinions],
        )

    async def __call__(self, q: str):
        response = await self.agent.run(user_msg=q)

        if hasattr(response, "final_output") and hasattr(response.final_output, "content"):
            return response.final_output.content

        if isinstance(response, str):
            return response

        return str(response)