Spaces:
Sleeping
Sleeping
File size: 4,252 Bytes
fbec6c3 aee8230 fbec6c3 f3f61db fbec6c3 f3f61db fbec6c3 f3f61db fbec6c3 f3f61db fbec6c3 f3f61db fbec6c3 f3f61db fbec6c3 f3f61db fbec6c3 f3f61db fbec6c3 f3f61db fbec6c3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 |
import os
import streamlit as st
import arxiv
import datetime
# -------------------------------
# Groq API Client
# -------------------------------
from groq import Groq
client = Groq(
api_key=os.environ.get("GROQ_API_KEY"),
)
# -------------------------------
# Helper Functions (Groq-based)
# -------------------------------
def groq_summarize(text: str) -> str:
response = client.chat.completions.create(
messages=[
{"role": "user", "content": f"Summarize the following text concisely:\n\n{text}"}
],
model="llama-3.3-70b-versatile",
)
return response.choices[0].message.content.strip()
def groq_eli5(text: str) -> str:
response = client.chat.completions.create(
messages=[
{"role": "user", "content": f"Explain this like I'm 5 years old:\n\n{text}"}
],
model="llama-3.3-70b-versatile",
)
return response.choices[0].message.content.strip()
def groq_key_takeaways(text: str) -> str:
response = client.chat.completions.create(
messages=[
{"role": "user", "content": f"List the key takeaways from this research:\n\n{text}"}
],
model="llama-3.3-70b-versatile",
)
return response.choices[0].message.content.strip()
# -------------------------------
# Paper Retrieval & Processing
# -------------------------------
def retrieve_papers(query, max_results=5):
search = arxiv.Search(query=query, max_results=max_results)
papers = []
for result in search.results():
paper = {
"title": result.title,
"summary": result.summary,
"url": result.pdf_url,
"authors": [author.name for author in result.authors],
"published": result.published
}
papers.append(paper)
return papers
# -------------------------------
# Streamlit Interface
# -------------------------------
st.title("π PaperPilot β Intelligent Academic Navigator")
st.write("""
PaperPilot helps you quickly analyze research papers by summarizing them, highlighting key takeaways, and explaining complex topics in simple terms.
Enter a query and get structured insights instantly!
""")
with st.sidebar:
st.header("π Search Parameters")
query = st.text_input("Research topic or question:")
if st.button("π Find Articles"):
if query.strip():
with st.spinner("Searching arXiv..."):
papers = retrieve_papers(query)
if papers:
st.session_state.papers = papers
st.success(f"Found {len(papers)} papers!")
st.session_state.active_section = "review"
else:
st.error("No papers found. Try different keywords.")
else:
st.warning("Please enter a search query")
if 'papers' in st.session_state and st.session_state.papers:
papers = st.session_state.papers
if st.session_state.active_section == "review":
st.header("π Literature Review & Summary")
for idx, paper in enumerate(papers, 1):
with st.expander(f"{idx}. {paper['title']}"):
st.markdown(f"**Authors:** {', '.join(paper['authors'])}")
pub_date = paper['published'].strftime('%Y-%m-%d') if isinstance(paper['published'], datetime.datetime) else "n.d."
st.markdown(f"**Published:** {pub_date}")
st.markdown(f"**Link:** [PDF]({paper['url']})")
with st.spinner("Generating insights..."):
short_description = groq_summarize(paper['summary'])
key_takeaways = groq_key_takeaways(paper['summary'])
eli5_explanation = groq_eli5(paper['summary'])
st.subheader("Short Description")
st.write(short_description)
st.subheader("Key Takeaways")
st.write(key_takeaways)
st.subheader("Explain Like I'm 5 (ELI5)")
st.write(eli5_explanation)
else:
st.info("Enter a query in the sidebar and click 'Find Articles' to get started.")
st.caption("Built with β€οΈ using AI")
|