Spaces:
Sleeping
Sleeping
import requests | |
from bs4 import BeautifulSoup | |
import pandas as pd | |
import gradio as gr | |
import time | |
import os | |
import json | |
import PyPDF2 | |
import io | |
import markdown | |
import asyncio | |
import aiohttp | |
import aiofiles | |
from concurrent.futures import ThreadPoolExecutor | |
# ... (keep the existing functions like get_rank_papers, load_cached_data, save_cached_data, format_dataframe, load_and_cache_data, update_display, load_all_data) | |
async def download_and_convert_pdf(session, title, paper_info): | |
pdf_url = paper_info['pdf_link'] | |
cache_file = f"cache/{title.replace(' ', '_')}.md" | |
if os.path.exists(cache_file): | |
async with aiofiles.open(cache_file, 'r') as f: | |
return await f.read() | |
if not pdf_url: | |
return f"# {title}\n\nNo PDF link available.\n\n---\n\n" | |
try: | |
async with session.get(pdf_url) as response: | |
pdf_content = await response.read() | |
pdf_file = io.BytesIO(pdf_content) | |
pdf_reader = PyPDF2.PdfReader(pdf_file) | |
text = "" | |
for page in pdf_reader.pages: | |
text += page.extract_text() | |
markdown_text = f"# {title}\n\n{text}\n\n---\n\n" | |
os.makedirs('cache', exist_ok=True) | |
async with aiofiles.open(cache_file, 'w') as f: | |
await f.write(markdown_text) | |
return markdown_text | |
except Exception as e: | |
return f"# {title}\n\nError processing PDF: {str(e)}\n\n---\n\n" | |
async def process_papers(data, progress=gr.Progress()): | |
async with aiohttp.ClientSession() as session: | |
tasks = [] | |
for title, paper_info in data.items(): | |
task = asyncio.ensure_future(download_and_convert_pdf(session, title, paper_info)) | |
tasks.append(task) | |
consolidated_text = "" | |
for i, task in enumerate(asyncio.as_completed(tasks), start=1): | |
markdown_text = await task | |
consolidated_text += markdown_text | |
progress(i / len(tasks), f"Processed {i}/{len(tasks)} papers") | |
return consolidated_text | |
def download_all_papers(progress=gr.Progress()): | |
all_data = {} | |
for category in ["top", "latest", "greatest"]: | |
cache_file = f"{category}_papers_cache.json" | |
data = load_cached_data(cache_file) | |
if data: | |
all_data.update(data) | |
consolidated_text = asyncio.run(process_papers(all_data, progress)) | |
with open("consolidated_papers.md", "w", encoding="utf-8") as f: | |
f.write(consolidated_text) | |
return "All papers have been downloaded and consolidated into 'consolidated_papers.md'" | |
with gr.Blocks() as demo: | |
gr.Markdown("<h1><center>Papers Leaderboard</center></h1>") | |
with gr.Tab("Top Trending Papers"): | |
top_count = gr.Textbox(label="Number of Papers Fetched") | |
top_html = gr.HTML() | |
top_button = gr.Button("Refresh Leaderboard") | |
top_button.click(fn=lambda: update_display("top"), inputs=None, outputs=[top_count, top_html]) | |
with gr.Tab("New Papers"): | |
new_count = gr.Textbox(label="Number of Papers Fetched") | |
new_html = gr.HTML() | |
new_button = gr.Button("Refresh Leaderboard") | |
new_button.click(fn=lambda: update_display("latest"), inputs=None, outputs=[new_count, new_html]) | |
with gr.Tab("Greatest Papers"): | |
greatest_count = gr.Textbox(label="Number of Papers Fetched") | |
greatest_html = gr.HTML() | |
greatest_button = gr.Button("Refresh Leaderboard") | |
greatest_button.click(fn=lambda: update_display("greatest"), inputs=None, outputs=[greatest_count, greatest_html]) | |
download_button = gr.Button("π Download All Papers", variant="primary") | |
download_output = gr.Textbox(label="Download Status") | |
markdown_output = gr.Markdown(label="Paper Content") | |
download_button.click(fn=download_all_papers, inputs=None, outputs=[download_output, markdown_output]) | |
# Load initial data for all tabs | |
demo.load(fn=load_all_data, outputs=[top_count, top_html, new_count, new_html, greatest_count, greatest_html]) | |
# Launch the Gradio interface with a public link | |
demo.launch(share=True) |