Centaur / app.py
marcelbinz's picture
Update app.py
9704a98 verified
raw
history blame
468 Bytes
import gradio as gr, transformers
pipe = transformers.pipeline("text-generation",
model="marcelbinz/Llama-3.1-Minitaur-8B")
pipe.to('cuda')
@spaces.GPU
def infer(prompt, max_tokens):
return pipe(prompt, max_new_tokens=int(max_tokens))[0]["generated_text"]
demo = gr.Interface(
fn=infer,
inputs=gr.Text(),
outputs="text",
title="Minitaur",
description="Just type and hit *Run*"
)
demo.launch()