File size: 945 Bytes
cadb09a c2fc6b8 c7be05a a8bb7b0 c7be05a c34ea7e 9abd04f c34ea7e c7be05a be336e7 c7be05a cadb09a 6947168 c7be05a 6c338fd c7be05a 6c338fd cadb09a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 |
## app.py ##
from transformers import pipeline
from gradio import Interface
import gradio as gr
# Create a dictionary of models
MODELS = {
"T5": "lmsys/fastchat-t5-3b-v1.0",
"Bert": "bert-base-multilingual-cased",
"GPT2": "datificate/gpt2-small-spanish",
"bloom":"bigscience/bloom"
}
# Define your function
def generate_and_analyze(model_name, input_text):
# Load the model from the dictionary using the selected model name
model = MODELS[model_name]
text_generator = pipeline('text-generation', model=model, device=0) # Use GPU if available
result = text_generator(input_text, max_length=250, do_sample=True)[0]
return result['generated_text']
# Define your interface
iface = gr.Interface(
fn=generate_and_analyze,
inputs=[
gr.inputs.Dropdown(choices=list(MODELS.keys()), label="Model"),
gr.inputs.Textbox(lines=2, label="Input Text")
],
outputs="text"
)
iface.launch()
|