GramAPP / app.py
Merlintxu's picture
Update app.py
d49c9f2
raw
history blame
1.13 kB
## app.py ##
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline,GPTNeoXForCausalLM, AutoConfig
from gradio import Interface
import gradio as gr
from accelerate import init_empty_weights
import json
# Create a dictionary of models
MODELS = {
"T5": "lmsys/fastchat-t5-3b-v1.0",
"LSpanishGPT2": "PlanTL-GOB-ES/gpt2-large-bne",
"GPT2": "datificate/gpt2-small-spanish",
"OpenAssistant": "OpenAssistant"
}
# Define your function
def generate_and_analyze(model_name, input_text):
model= MODELS[model_name]
tokenizer = AutoTokenizer.from_pretrained(model)
TOKENIZERS[model_name] = tokenizer
text_generator = pipeline('text-generation', model=model, tokenizer=tokenizer, device=0) # Use GPU if available
result = text_generator(input_text, max_length=250, do_sample=True)[0]
return result['generated_text']
# Define your interface
iface = gr.Interface(
fn=generate_and_analyze,
inputs=[
gr.inputs.Dropdown(choices=list(MODELS.keys()) + ["bloom"], label="Model"),
gr.inputs.Textbox(lines=2, label="Input Text")
],
outputs="text"
)
iface.launch()