Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
@@ -12,17 +12,17 @@ model = AutoModelForCausalLM.from_pretrained(MODEL_ID, torch_dtype=torch.bfloat1
|
|
12 |
# -------------------------------------------------
|
13 |
# Optional tool(s)
|
14 |
# -------------------------------------------------
|
15 |
-
TOOLS = [{
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
}]
|
26 |
|
27 |
# -------------------------------------------------
|
28 |
# Helpers
|
@@ -37,14 +37,14 @@ def build_messages(history, enable_thinking: bool):
|
|
37 |
messages.insert(0, {"role": "system", "content": system_flag})
|
38 |
return messages
|
39 |
|
40 |
-
def chat_fn(history, enable_thinking, temperature, top_p):
|
41 |
"""Generate a streaming response."""
|
42 |
messages = build_messages(history, enable_thinking)
|
43 |
text = tokenizer.apply_chat_template(
|
44 |
messages,
|
45 |
tokenize=False,
|
46 |
add_generation_prompt=True,
|
47 |
-
xml_tools=TOOLS
|
48 |
)
|
49 |
inputs = tokenizer(text, return_tensors="pt").to(DEVICE)
|
50 |
|
@@ -54,8 +54,8 @@ def chat_fn(history, enable_thinking, temperature, top_p):
|
|
54 |
do_sample=True,
|
55 |
temperature=temperature,
|
56 |
top_p=top_p,
|
57 |
-
top_k=
|
58 |
-
repetition_penalty=
|
59 |
pad_token_id=tokenizer.eos_token_id,
|
60 |
streamer=None # we'll yield manually
|
61 |
)
|
@@ -77,6 +77,8 @@ with gr.Blocks(title="SmolLM3-3B Chat") as demo:
|
|
77 |
enable_think = gr.Checkbox(label="Enable Extended Thinking (/think)", value=False)
|
78 |
temperature = gr.Slider(0.0, 1.0, value=0.6, label="Temperature")
|
79 |
top_p = gr.Slider(0.0, 1.0, value=0.95, label="Top-p")
|
|
|
|
|
80 |
chatbot = gr.Chatbot(type="messages")
|
81 |
msg = gr.Textbox(placeholder="Type your message here…", lines=1)
|
82 |
clear = gr.Button("Clear")
|
@@ -87,7 +89,7 @@ with gr.Blocks(title="SmolLM3-3B Chat") as demo:
|
|
87 |
msg.submit(
|
88 |
user_fn, [msg, chatbot], [msg, chatbot], queue=False
|
89 |
).then(
|
90 |
-
chat_fn, [chatbot, enable_think, temperature, top_p], chatbot
|
91 |
)
|
92 |
clear.click(lambda: None, None, chatbot, queue=False)
|
93 |
|
|
|
12 |
# -------------------------------------------------
|
13 |
# Optional tool(s)
|
14 |
# -------------------------------------------------
|
15 |
+
# TOOLS = [{
|
16 |
+
# "name": "get_weather",
|
17 |
+
# "description": "Get the current weather in a given city",
|
18 |
+
# "parameters": {
|
19 |
+
# "type": "object",
|
20 |
+
# "properties": {
|
21 |
+
# "city": {"type": "string", "description": "City name"}
|
22 |
+
# },
|
23 |
+
# "required": ["city"]
|
24 |
+
# }
|
25 |
+
# }]
|
26 |
|
27 |
# -------------------------------------------------
|
28 |
# Helpers
|
|
|
37 |
messages.insert(0, {"role": "system", "content": system_flag})
|
38 |
return messages
|
39 |
|
40 |
+
def chat_fn(history, enable_thinking, temperature, top_p, top_k, repetition_penalty):
|
41 |
"""Generate a streaming response."""
|
42 |
messages = build_messages(history, enable_thinking)
|
43 |
text = tokenizer.apply_chat_template(
|
44 |
messages,
|
45 |
tokenize=False,
|
46 |
add_generation_prompt=True,
|
47 |
+
# xml_tools=TOOLS
|
48 |
)
|
49 |
inputs = tokenizer(text, return_tensors="pt").to(DEVICE)
|
50 |
|
|
|
54 |
do_sample=True,
|
55 |
temperature=temperature,
|
56 |
top_p=top_p,
|
57 |
+
top_k=top_k,
|
58 |
+
repetition_penalty=repetition_penalty,
|
59 |
pad_token_id=tokenizer.eos_token_id,
|
60 |
streamer=None # we'll yield manually
|
61 |
)
|
|
|
77 |
enable_think = gr.Checkbox(label="Enable Extended Thinking (/think)", value=False)
|
78 |
temperature = gr.Slider(0.0, 1.0, value=0.6, label="Temperature")
|
79 |
top_p = gr.Slider(0.0, 1.0, value=0.95, label="Top-p")
|
80 |
+
top_k = gr.Slider(1,40,value=20,label="Top_k")
|
81 |
+
repetition_penalty = gr.Slider(1.0,1.4,value=1.1,label="Repetition_Penalty")
|
82 |
chatbot = gr.Chatbot(type="messages")
|
83 |
msg = gr.Textbox(placeholder="Type your message here…", lines=1)
|
84 |
clear = gr.Button("Clear")
|
|
|
89 |
msg.submit(
|
90 |
user_fn, [msg, chatbot], [msg, chatbot], queue=False
|
91 |
).then(
|
92 |
+
chat_fn, [chatbot, enable_think, temperature, top_p, top_k, repetition_penalty], chatbot
|
93 |
)
|
94 |
clear.click(lambda: None, None, chatbot, queue=False)
|
95 |
|