Centaur / app.py
marcelbinz's picture
Update app.py
b916df8 verified
raw
history blame
863 Bytes
import spaces
import gradio as gr
import torch
from transformers import pipeline
pipe = pipeline(
"text-generation",
model="marcelbinz/Llama-3.1-Minitaur-8B",
device_map="auto",
torch_dtype=torch.bfloat16
)
@spaces.GPU
def infer(prompt):
return pipe(prompt, max_new_tokens=1, do_sample=True, temperature=1.0, return_full_text=False)[0]["generated_text"]
with gr.Blocks(fill_width=True, css="""
#prompt-box textarea {height:200px}
#answer-box textarea {height:320px}
""") as demo:
with gr.Row(equal_height=True):
inp = gr.Textbox(label="Prompt", elem_id="prompt-box",
lines=12, max_lines=12, scale=3)
outp = gr.Textbox(label="Response", elem_id="answer-box",
lines=1, interactive=False, scale=3)
run = gr.Button("Run")
run.click(infer, inp, outp)
demo.queue().launch()