|
|
|
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline,GPTNeoXForCausalLM, AutoConfig |
|
from gradio import Interface |
|
import gradio as gr |
|
from accelerate import init_empty_weights |
|
import json |
|
|
|
MODELS = { |
|
"T5": "lmsys/fastchat-t5-3b-v1.0", |
|
"LSpanishGPT2": "PlanTL-GOB-ES/gpt2-large-bne", |
|
"GPT2": "datificate/gpt2-small-spanish", |
|
"OpenAssistant": "OpenAssistant" |
|
} |
|
|
|
|
|
def generate_and_analyze(model_name, input_text): |
|
model= MODELS[model_name] |
|
tokenizer = AutoTokenizer.from_pretrained(model) |
|
TOKENIZERS[model_name] = tokenizer |
|
|
|
text_generator = pipeline('text-generation', model=model, tokenizer=tokenizer, device=0) |
|
result = text_generator(input_text, max_length=250, do_sample=True)[0] |
|
return result['generated_text'] |
|
|
|
|
|
iface = gr.Interface( |
|
fn=generate_and_analyze, |
|
inputs=[ |
|
gr.inputs.Dropdown(choices=list(MODELS.keys()) + ["bloom"], label="Model"), |
|
gr.inputs.Textbox(lines=2, label="Input Text") |
|
], |
|
outputs="text" |
|
) |
|
iface.launch() |
|
|