File size: 1,659 Bytes
9b7ca42
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
import gradio as gr
from langchain.agents import Agent
from langchain.llms import HuggingFaceHub
from langchain.vectorstores import FAISS
from langchain.embeddings import HuggingFaceEmbeddings

# Initialize the LLM from Hugging Face Hub
llm = HuggingFaceHub(repo_id="gpt2")

# Initialize embeddings
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")

# Initialize the vector database (FAISS)
vectorstore = FAISS(embeddings.embed_query, embeddings.embed_documents)

# Define the agents with distinct roles
class ResearchAgent(Agent):
    def run(self, query):
        return llm(query + " Please provide a detailed explanation.")

class SummaryAgent(Agent):
    def run(self, query):
        return llm(query + " Summarize the information briefly.")

class QAAgent(Agent):
    def run(self, query):
        return llm(query + " Answer the following question: " + query)

# Create instances of the agents
research_agent = ResearchAgent()
summary_agent = SummaryAgent()
qa_agent = QAAgent()

# Function to handle the interaction with the agents
def agent_interaction(query, agent_type):
    if agent_type == "Research":
        return research_agent.run(query)
    elif agent_type == "Summary":
        return summary_agent.run(query)
    elif agent_type == "Q&A":
        return qa_agent.run(query)

# Create a Gradio interface
interface = gr.Interface(
    fn=agent_interaction,
    inputs=[
        gr.inputs.Textbox(lines=2, placeholder="Enter your query here..."),
        gr.inputs.Radio(["Research", "Summary", "Q&A"], label="Agent Type")
    ],
    outputs="text"
)

if __name__ == "__main__":
    interface.launch()