|
import gradio as gr |
|
import torch |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
from huggingface_hub import ModelCard, DatasetCard, model_info, dataset_info |
|
import logging |
|
from typing import Tuple, Literal |
|
import functools |
|
import spaces |
|
from cachetools import TTLCache |
|
from cachetools.func import ttl_cache |
|
import time |
|
|
|
logging.basicConfig(level=logging.INFO) |
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
MODEL_NAME = "davanstrien/Smol-Hub-tldr" |
|
model = None |
|
tokenizer = None |
|
device = None |
|
CACHE_TTL = 6 * 60 * 60 |
|
CACHE_MAXSIZE = 100 |
|
|
|
def load_model(): |
|
global model, tokenizer, device |
|
logger.info("Loading model and tokenizer...") |
|
try: |
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, use_fast=True) |
|
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME) |
|
model = model.to(device) |
|
model.eval() |
|
return True |
|
except Exception as e: |
|
logger.error(f"Failed to load model: {e}") |
|
return False |
|
|
|
@functools.lru_cache(maxsize=100) |
|
def get_card_info(hub_id: str) -> Tuple[str, str]: |
|
"""Get card information from a Hugging Face hub_id.""" |
|
try: |
|
info = model_info(hub_id) |
|
card = ModelCard.load(hub_id) |
|
return "model", card.text |
|
except Exception as e: |
|
logger.error(f"Error fetching model card for {hub_id}: {e}") |
|
try: |
|
info = dataset_info(hub_id) |
|
card = DatasetCard.load(hub_id) |
|
return "dataset", card.text |
|
except Exception as e: |
|
logger.error(f"Error fetching dataset card for {hub_id}: {e}") |
|
raise ValueError(f"Could not find model or dataset with id {hub_id}") |
|
|
|
@spaces.GPU |
|
def _generate_summary_gpu(card_text: str, card_type: str) -> str: |
|
"""Internal function that runs on GPU.""" |
|
|
|
prefix = "<MODEL_CARD>" if card_type == "model" else "<DATASET_CARD>" |
|
|
|
|
|
messages = [{"role": "user", "content": f"{prefix}{card_text}"}] |
|
inputs = tokenizer.apply_chat_template( |
|
messages, add_generation_prompt=True, return_tensors="pt" |
|
) |
|
inputs = inputs.to(device) |
|
|
|
|
|
with torch.no_grad(): |
|
outputs = model.generate( |
|
inputs, |
|
max_new_tokens=60, |
|
pad_token_id=tokenizer.pad_token_id, |
|
eos_token_id=tokenizer.eos_token_id, |
|
temperature=0.4, |
|
do_sample=True, |
|
use_cache=True, |
|
) |
|
|
|
|
|
input_length = inputs.shape[1] |
|
response = tokenizer.decode(outputs[0][input_length:], skip_special_tokens=False) |
|
|
|
|
|
try: |
|
summary = response.split("<CARD_SUMMARY>")[-1].split("</CARD_SUMMARY>")[0].strip() |
|
except IndexError: |
|
summary = response.strip() |
|
|
|
return summary |
|
|
|
@ttl_cache(maxsize=CACHE_MAXSIZE, ttl=CACHE_TTL) |
|
def generate_summary(card_text: str, card_type: str) -> str: |
|
"""Cached wrapper for generate_summary with TTL.""" |
|
return _generate_summary_gpu(card_text, card_type) |
|
|
|
def summarize(hub_id: str = "", card_type: str = "model") -> Tuple[str, str]: |
|
"""Interface function for Gradio. Returns both text and JSON formats.""" |
|
try: |
|
if hub_id: |
|
|
|
inferred_type, card_text = get_card_info(hub_id) |
|
if card_type and card_type != inferred_type: |
|
error_msg = f"Error: Provided card_type '{card_type}' doesn't match inferred type '{inferred_type}'" |
|
return error_msg, f'{{"error": "{error_msg}"}}' |
|
card_type = inferred_type |
|
else: |
|
error_msg = "Error: Hub ID must be provided" |
|
return error_msg, f'{{"error": "{error_msg}"}}' |
|
|
|
|
|
summary = generate_summary(card_text, card_type) |
|
json_output = f'{{"summary": "{summary}", "type": "{card_type}", "hub_id": "{hub_id}"}}' |
|
return summary, json_output |
|
|
|
except Exception as e: |
|
error_msg = str(e) |
|
return f"Error: {error_msg}", f'{{"error": "{error_msg}"}}' |
|
|
|
def create_interface(): |
|
interface = gr.Interface( |
|
fn=summarize, |
|
inputs=[ |
|
gr.Textbox(label="Hub ID", placeholder="e.g., huggingface/llama-7b"), |
|
gr.Radio(choices=["model", "dataset"], label="Card Type", value="model"), |
|
], |
|
outputs=[ |
|
gr.Textbox(label="Summary"), |
|
gr.JSON(label="JSON Output") |
|
], |
|
title="Hugging Face Hub TLDR Generator", |
|
description="Generate concise summaries of model and dataset cards from the Hugging Face Hub.", |
|
) |
|
return interface |
|
|
|
if __name__ == "__main__": |
|
if load_model(): |
|
interface = create_interface() |
|
interface.launch() |
|
else: |
|
print("Failed to load model. Please check the logs for details.") |
|
|