Spaces:
Sleeping
Sleeping
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM | |
import gradio as gr | |
def envit5_translation(text): | |
inputs = [f"en: {text}"] | |
outputs = model.generate(tokenizer(inputs, return_tensors="pt", padding=True).input_ids, max_length=512) | |
results = tokenizer.batch_decode(outputs, skip_special_tokens=True) | |
return results[0][4:] | |
def my_translation(text): | |
return "My Translation" | |
def finetune_BERT(text): | |
return "BERT" | |
def translation(text): | |
output1 = my_translation(text) | |
output2 = envit5_translation(text) | |
output3 = finetune_BERT(text) | |
return (output1, output2, output3) | |
if __name__ == "__main__": | |
model_name = "VietAI/envit5-translation" | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForSeq2SeqLM.from_pretrained(model_name) | |
inputs = [ | |
"textbox" | |
] | |
with gr.Blocks() as app: | |
gr.Interface( | |
fn=translation, | |
inputs=inputs, | |
outputs=["textbox", "textbox", "textbox"] | |
) | |
app.launch(shared = True) |