File size: 1,134 Bytes
cadb09a
a6f7e05
c7be05a
 
a6f7e05
 
c7be05a
 
c34ea7e
78fcbcb
9abd04f
d49c9f2
c7be05a
be336e7
c7be05a
 
d49c9f2
4d5764e
 
38047c4
 
6947168
c7be05a
6c338fd
c7be05a
 
 
 
38047c4
c7be05a
 
 
 
6c338fd
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
## app.py ##
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline,GPTNeoXForCausalLM, AutoConfig
from gradio import Interface
import gradio as gr
from accelerate import init_empty_weights
import json 
# Create a dictionary of models
MODELS = {
    "T5": "lmsys/fastchat-t5-3b-v1.0",
    "LSpanishGPT2": "PlanTL-GOB-ES/gpt2-large-bne",
    "GPT2": "datificate/gpt2-small-spanish",
    "OpenAssistant": "OpenAssistant"
}

# Define your function
def generate_and_analyze(model_name, input_text):
    model= MODELS[model_name]
    tokenizer = AutoTokenizer.from_pretrained(model)
    TOKENIZERS[model_name] = tokenizer

    text_generator = pipeline('text-generation', model=model, tokenizer=tokenizer, device=0) # Use GPU if available
    result = text_generator(input_text, max_length=250, do_sample=True)[0]
    return result['generated_text']

# Define your interface
iface = gr.Interface(
    fn=generate_and_analyze, 
    inputs=[
        gr.inputs.Dropdown(choices=list(MODELS.keys()) + ["bloom"], label="Model"),
        gr.inputs.Textbox(lines=2, label="Input Text")
    ], 
    outputs="text"
)
iface.launch()