Centaur / app.py
marcelbinz's picture
Update app.py
bb856a6 verified
raw
history blame
592 Bytes
import gradio as gr, transformers, torch
pipe = transformers.pipeline("text-generation",
model="marcelbinz/Llama-3.1-Minitaur-8B",
device_map="auto",
torch_dtype=torch.bfloat16)
pipe.to('cuda')
@spaces.GPU
def infer(prompt):
return pipe(prompt, max_new_tokens=1, do_sample=True, temperature=1.0)[0]["generated_text"]
demo = gr.Interface(
fn=infer,
inputs=gr.Text(),
outputs="text",
title="Minitaur",
description="Just type and hit *Run*"
).queue()
demo.launch()