Spaces:
Sleeping
Sleeping
from fastapi import FastAPI | |
from pydantic import BaseModel | |
from transformers import pipeline, AutoTokenizer | |
import os | |
app = FastAPI() | |
os.environ["TRANSFORMERS_CACHE"] = "/home/user/.cache/huggingface" | |
os.environ["HF_HOME"] = "/home/user/.cache/huggingface" | |
HF_AUTH_TOKEN = os.environ.get("HF_TOKEN") | |
MODEL_NAME = "VincentMuriuki/legal-summarizer" | |
summarizer = pipeline("summarization", model=MODEL_NAME, use_auth_token=HF_AUTH_TOKEN) | |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, use_auth_token=HF_AUTH_TOKEN) | |
class SummarizeInput(BaseModel): | |
text: str | |
class ChunkInput(BaseModel): | |
text: str | |
max_tokens: int = 512 | |
def summarize_text(data: SummarizeInput): | |
summary = summarizer(data.text, max_length=150, min_length=30, do_sample=False) | |
return {"summary": summary[0]["summary_text"]} | |
def chunk_text(data: ChunkInput): | |
tokens = tokenizer.encode(data.text, truncation=False) | |
chunks = [] | |
for i in range(0, len(tokens), data.max_tokens): | |
chunk_tokens = tokens[i:i + data.max_tokens] | |
chunk_text = tokenizer.decode(chunk_tokens, skip_special_tokens=True) | |
chunks.append(chunk_text.strip()) | |
return {"chunks": chunks} | |