File size: 703 Bytes
f6f280c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
import torch, gradio as gr
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline

MODEL_ID = "ogrnz/t5-chat-titles"
TOKENIZER_ID = "google-t5/t5-small"

tokenizer = AutoTokenizer.from_pretrained(TOKENIZER_ID)
model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_ID)
device = 0 if torch.cuda.is_available() else -1
summarizer = pipeline("summarization", model=model, tokenizer=tokenizer, device=device)
summarizer("warm‑up")

def make_title(prompt: str) -> str:
    return summarizer(prompt, max_length=16, min_length=3)[0]["summary_text"]

demo = gr.Interface(fn=make_title, inputs="text", outputs="text", title="Title Generator")
demo.launch(server_name="0.0.0.0", server_port=5000)