File size: 2,420 Bytes
881b92f
82690a5
 
881b92f
 
 
 
 
 
 
 
 
 
82690a5
 
 
 
 
 
 
aa259ec
82690a5
 
 
aa259ec
881b92f
82690a5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
aa259ec
82690a5
 
 
 
aa259ec
82690a5
 
 
 
aa259ec
82690a5
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
import os
import streamlit as st
from langchain_community.llms import HuggingFaceHub
from dotenv import load_dotenv

# Load .env if running locally
load_dotenv()

# Set your Hugging Face API token (must be set in Hugging Face Spaces Secrets for online use)
HUGGINGFACE_API_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN")
if not HUGGINGFACE_API_TOKEN:
    st.error("⚠️ Hugging Face API token not found. Please add it as a secret with name 'HUGGINGFACEHUB_API_TOKEN'.")
    st.stop()

# Models for each task
SUMMARY_MODEL = "google/flan-t5-small"
QUESTIONS_MODEL = "tiiuae/falcon-rw-1b"
KEYWORDS_MODEL = "google/flan-t5-small"

# Function to get LangChain LLM
def get_llm(model_id, task):
    return HuggingFaceHub(
        repo_id=model_id,
        model_kwargs={"temperature": 0.5, "max_new_tokens": 150},
        task=task,  # Changed to a generic task for text generation
        huggingfacehub_api_token=HUGGINGFACE_API_TOKEN
    )

# Streamlit app UI
st.set_page_config(page_title="🧠 Multi-LLM Research Assistant")
st.title("🧠 Research Assistant using Multiple LLMs via LangChain")

topic = st.text_input("πŸ” Enter your research topic")

if st.button("Run Multi-LLM Analysis"):
    if not topic.strip():
        st.warning("Please enter a topic to continue.")
    else:
        with st.spinner("Generating..."):

            # Step 1: Summary
            summary_prompt = f"Provide a short summary about: {topic}"
            summary_model = get_llm(SUMMARY_MODEL, task="text-generation")  # Corrected task
            summary = summary_model.predict(summary_prompt)

            # Step 2: Research Questions
            questions_prompt = f"Give three research questions about: {topic}"
            questions_model = get_llm(QUESTIONS_MODEL, task="text-generation")  # Corrected task
            questions = questions_model.predict(questions_prompt)

            # Step 3: Keywords
            keywords_prompt = f"List five keywords related to: {topic}"
            keywords_model = get_llm(KEYWORDS_MODEL, task="text-generation")  # Corrected task
            keywords = keywords_model.predict(keywords_prompt)

        # Display results
        st.success("βœ… Done! Here's your research output:")
        st.subheader("πŸ“„ Summary")
        st.write(summary)

        st.subheader("❓ Research Questions")
        st.write(questions)

        st.subheader("πŸ”‘ Keywords")
        st.write(keywords)