Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -10,16 +10,17 @@ import ollama
|
|
10 |
# List of available models for selection.
|
11 |
# IMPORTANT: These names must correspond to models that have been either
|
12 |
|
|
|
13 |
# Model from run.sh
|
14 |
-
|
15 |
-
'hf.co/bartowski/Qwen_Qwen3-4B-Instruct-2507-GGUF:Q4_K_M',
|
16 |
-
#'hf.co/bartowski/Qwen_Qwen3-4B-Thinking-2507-GGUF:Q4_K_M',
|
17 |
-
'smollm2:360m-instruct-q5_K_M',
|
18 |
-
'hf.co/bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M', # OK speed with CPU
|
19 |
-
#'gemma3n:e2b-it-q4_K_M',
|
20 |
-
'granite3.3:2b',
|
21 |
-
'hf.co/bartowski/tencent_Hunyuan-4B-Instruct-GGUF:Q4_K_M'
|
22 |
-
|
23 |
|
24 |
|
25 |
# Default System Prompt
|
@@ -36,9 +37,9 @@ with gr.Blocks(theme=gr.themes.Default(primary_hue="blue", secondary_hue="neutra
|
|
36 |
|
37 |
# Model Selection
|
38 |
with gr.Row():
|
39 |
-
|
40 |
-
choices=
|
41 |
-
value=
|
42 |
label="Select Model",
|
43 |
info="Choose the LLM model to chat with.",
|
44 |
interactive=True
|
@@ -75,7 +76,10 @@ with gr.Blocks(theme=gr.themes.Default(primary_hue="blue", secondary_hue="neutra
|
|
75 |
|
76 |
# --- New: System Prompt Options ---
|
77 |
SYSTEM_PROMPT_OPTIONS = {
|
78 |
-
"Smart & Accurate (
|
|
|
|
|
|
|
79 |
"Friendly & Conversational": """Respond in a warm, friendly, and engaging tone. Use natural language and offer helpful suggestions. Keep responses concise but personable.""",
|
80 |
"Professional & Formal": """Maintain a formal and professional tone. Use precise language, avoid slang, and ensure responses are suitable for business or academic contexts.""",
|
81 |
"Elon Musk style": "You must chat in Elon Musk style!"
|
@@ -123,13 +127,15 @@ with gr.Blocks(theme=gr.themes.Default(primary_hue="blue", secondary_hue="neutra
|
|
123 |
|
124 |
# --- Core Chat Logic ---
|
125 |
# This function is the heart of the application.
|
126 |
-
def respond(history, system_prompt, stream_output,
|
127 |
"""
|
128 |
This is the single function that handles the entire chat process.
|
129 |
It takes the history, prepends the system prompt, calls the Ollama API,
|
130 |
and streams the response back to the chatbot.
|
131 |
"""
|
132 |
|
|
|
|
|
133 |
#Disable Qwen3 thinking
|
134 |
if "Qwen3".lower() in current_selected_model:
|
135 |
system_prompt = system_prompt+" /no_think"
|
@@ -176,9 +182,9 @@ with gr.Blocks(theme=gr.themes.Default(primary_hue="blue", secondary_hue="neutra
|
|
176 |
queue=False
|
177 |
).then(
|
178 |
respond,
|
179 |
-
inputs=[chatbot, system_prompt_textbox, stream_checkbox,
|
180 |
outputs=[chatbot]
|
181 |
)
|
182 |
|
183 |
# Launch the Gradio interface
|
184 |
-
demo.launch(server_name="0.0.0.0", server_port=7860)
|
|
|
10 |
# List of available models for selection.
|
11 |
# IMPORTANT: These names must correspond to models that have been either
|
12 |
|
13 |
+
|
14 |
# Model from run.sh
|
15 |
+
MODEL_ID_MAP = {
|
16 |
+
"Qwen3-4B-Instruct-2507": 'hf.co/bartowski/Qwen_Qwen3-4B-Instruct-2507-GGUF:Q4_K_M',
|
17 |
+
#"Qwen3-4B-Thinking-2507": 'hf.co/bartowski/Qwen_Qwen3-4B-Thinking-2507-GGUF:Q4_K_M',
|
18 |
+
"SmolLM2-360M": 'smollm2:360m-instruct-q5_K_M',
|
19 |
+
"Llama3.2-3B-Instruct": 'hf.co/bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M', # OK speed with CPU
|
20 |
+
#"Gemma3n-e2b-it": 'gemma3n:e2b-it-q4_K_M',
|
21 |
+
"Granite3.3-2B": 'granite3.3:2b',
|
22 |
+
"Hunyuan-4B-Instruct": 'hf.co/bartowski/tencent_Hunyuan-4B-Instruct-GGUF:Q4_K_M'
|
23 |
+
}
|
24 |
|
25 |
|
26 |
# Default System Prompt
|
|
|
37 |
|
38 |
# Model Selection
|
39 |
with gr.Row():
|
40 |
+
selected_model_label = gr.Radio(
|
41 |
+
choices=list(MODEL_ID_MAP.keys()),
|
42 |
+
value=list(MODEL_ID_MAP.keys())[0], # Default to first display name
|
43 |
label="Select Model",
|
44 |
info="Choose the LLM model to chat with.",
|
45 |
interactive=True
|
|
|
76 |
|
77 |
# --- New: System Prompt Options ---
|
78 |
SYSTEM_PROMPT_OPTIONS = {
|
79 |
+
"Smart & Accurate (Auto TC/EN)": DEFAULT_SYSTEM_PROMPT,
|
80 |
+
"繁體中文回答":"無論如何,必須使用標準繁體中文回答. Answer everything in simple, smart, relevant and accurate style. No chatty!",
|
81 |
+
"简体中文回答":"无论如何,必须使用标准简体中文回答. Answer everything in simple, smart, relevant and accurate style. No chatty!",
|
82 |
+
"English Caht":"You must reply by English. Answer everything in simple, smart, relevant and accurate style. No chatty!",
|
83 |
"Friendly & Conversational": """Respond in a warm, friendly, and engaging tone. Use natural language and offer helpful suggestions. Keep responses concise but personable.""",
|
84 |
"Professional & Formal": """Maintain a formal and professional tone. Use precise language, avoid slang, and ensure responses are suitable for business or academic contexts.""",
|
85 |
"Elon Musk style": "You must chat in Elon Musk style!"
|
|
|
127 |
|
128 |
# --- Core Chat Logic ---
|
129 |
# This function is the heart of the application.
|
130 |
+
def respond(history, system_prompt, stream_output, selected_model_name, selected_prompt_key, use_custom_prompt): # Added selected_model_name
|
131 |
"""
|
132 |
This is the single function that handles the entire chat process.
|
133 |
It takes the history, prepends the system prompt, calls the Ollama API,
|
134 |
and streams the response back to the chatbot.
|
135 |
"""
|
136 |
|
137 |
+
current_selected_model = MODEL_ID_MAP[selected_model_name]
|
138 |
+
|
139 |
#Disable Qwen3 thinking
|
140 |
if "Qwen3".lower() in current_selected_model:
|
141 |
system_prompt = system_prompt+" /no_think"
|
|
|
182 |
queue=False
|
183 |
).then(
|
184 |
respond,
|
185 |
+
inputs=[chatbot, system_prompt_textbox, stream_checkbox, selected_model_label, system_prompt_selector, use_custom_prompt_checkbox], # Pass new inputs
|
186 |
outputs=[chatbot]
|
187 |
)
|
188 |
|
189 |
# Launch the Gradio interface
|
190 |
+
demo.launch(server_name="0.0.0.0", server_port=7860)
|