File size: 592 Bytes
5591535
9704a98
 
5591535
 
 
9704a98
 
 
bb856a6
5591535
9704a98
 
 
 
 
 
 
5591535
69649ce
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
import gradio as gr, transformers, torch

pipe = transformers.pipeline("text-generation",
                            model="marcelbinz/Llama-3.1-Minitaur-8B",
                            device_map="auto",
                            torch_dtype=torch.bfloat16)
pipe.to('cuda')

@spaces.GPU
def infer(prompt):
    return pipe(prompt, max_new_tokens=1, do_sample=True, temperature=1.0)[0]["generated_text"]

demo = gr.Interface(
        fn=infer,
        inputs=gr.Text(),
        outputs="text",
        title="Minitaur",
        description="Just type and hit *Run*"
).queue()
demo.launch()