Spaces:
Running
on
Zero
Running
on
Zero
File size: 1,564 Bytes
6d9f617 95f1b2e 28e483a 6d9f617 95f1b2e 6d9f617 95f1b2e 6d9f617 ed35e8f d7de9f7 185e4f4 d7de9f7 95f1b2e d7de9f7 6d9f617 95f1b2e 6d9f617 95f1b2e 6d9f617 95f1b2e 6d9f617 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import gradio as gr
import torch
# Load the grammar correction model
model_path = "vennify/t5-base-grammar-correction"
tokenizer = AutoTokenizer.from_pretrained(model_path)
model = AutoModelForSeq2SeqLM.from_pretrained(model_path)
# Inference function
def correct_grammar(text):
if not text.strip():
return "⚠️ Please enter some text to correct."
prompt = f"grammar: {text}"
try:
inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_length=1024,
num_beams=5,
early_stopping=True,
temperature=0.7,
do_sample=False
)
return tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
except Exception as e:
return f"⚠️ Error: {str(e)}"
# Gradio UI
iface = gr.Interface(
fn=correct_grammar,
inputs=gr.Textbox(label="Enter text to correct", placeholder="Type a sentence...", lines=3),
outputs=gr.Textbox(label="Corrected text", lines=3),
title="📝 Grammar Correction",
description="Fix grammar issues using a fine-tuned T5 model",
examples=[
"She go to school every day.",
"I is a boy.",
"He don't like apples.",
"We was playing outside."
]
)
# Launch the app
if __name__ == "__main__":
iface.launch()
|