File size: 1,283 Bytes
53f3bb2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
import os
import gradio as gr
import spaces
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

model_id = "futurehouse/ether0"

tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(
    model_id,
    device_map="auto",
    torch_dtype=torch.float32
)

@spaces.GPU
def chat_fn(prompt, max_tokens=512):
    max_tokens = min(int(max_tokens), 32_000)
    messages = [{"role": "user", "content": prompt}]
    chat_prompt = tokenizer.apply_chat_template(
        messages, tokenize=False, add_generation_prompt=True
    )
    inputs = tokenizer(chat_prompt, return_tensors="pt").to(model.device)
    
    # Generate with proper parameters
    outputs = model.generate(
        **inputs, 
        max_new_tokens=max_tokens,
        do_sample=True,
        temperature=0.1,
        pad_token_id=tokenizer.eos_token_id
    )
    
    # Decode only the new tokens (not the input)
    generated_text = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True)
    return generated_text

gr.Interface(
    fn=chat_fn,
    inputs=[
        gr.Textbox(label="prompt"),
        gr.Number(label="max_tokens", value=512, precision=0)
    ],
    outputs="text",
    title="Ether0"
).launch(ssr_mode=False)