SkyNetWalker commited on
Commit
2f59c4c
Β·
verified Β·
1 Parent(s): 849ae95

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -10
app.py CHANGED
@@ -13,14 +13,14 @@ import ollama
13
 
14
  # Model from run.sh
15
  MODEL_ID_MAP = {
16
- "Tencentζ··ε…ƒ1.8B":'hf.co/bartowski/tencent_Hunyuan-1.8B-Instruct-GGUF:Q4_K_M',
17
- "Qwen3-4B-Instruct-2507": 'hf.co/bartowski/Qwen_Qwen3-4B-Instruct-2507-GGUF:Q4_K_M',
18
- #"Qwen3-4B-Thinking-2507": 'hf.co/bartowski/Qwen_Qwen3-4B-Thinking-2507-GGUF:Q4_K_M',
19
- "SmolLM2-360M": 'smollm2:360m-instruct-q5_K_M',
20
- "Llama3.2-3B-Instruct": 'hf.co/bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M', # OK speed with CPU
21
- #"Gemma3n-e2b-it": 'gemma3n:e2b-it-q4_K_M',
22
- "Granite3.3-2B": 'granite3.3:2b',
23
- "Hunyuan-4B-Instruct": 'hf.co/bartowski/tencent_Hunyuan-4B-Instruct-GGUF:Q4_K_M'
24
  }
25
 
26
 
@@ -32,7 +32,7 @@ DEFAULT_SYSTEM_PROMPT = """Answer everything in simple, smart, relevant and accu
32
 
33
  # --- Gradio Interface ---
34
  with gr.Blocks(theme=gr.themes.Default(primary_hue="blue", secondary_hue="neutral")) as demo:
35
- gr.Markdown(f"## Small Language Model (SLM) run with CPU") # Changed title to be more generic
36
  gr.Markdown(f"(Run-Location-As: `{check_ipinfo}`)")
37
  gr.Markdown("Chat with the model, customize its behavior with a system prompt, and toggle streaming output.")
38
 
@@ -90,7 +90,7 @@ with gr.Blocks(theme=gr.themes.Default(primary_hue="blue", secondary_hue="neutra
90
  system_prompt_selector = gr.Radio(
91
  label="Choose a System Prompt Style",
92
  choices=list(SYSTEM_PROMPT_OPTIONS.keys()),
93
- value="Smart & Accurate (Default)",
94
  interactive=True
95
  )
96
 
 
13
 
14
  # Model from run.sh
15
  MODEL_ID_MAP = {
16
+ "(Tencent)ζ··ε…ƒ-1.8B-Instruct":'hf.co/bartowski/tencent_Hunyuan-1.8B-Instruct-GGUF:Q4_K_M',
17
+ "(ι˜Ώι‡Œεƒε•)Qwen3-4B-Instruct-2507": 'hf.co/bartowski/Qwen_Qwen3-4B-Instruct-2507-GGUF:Q4_K_M',
18
+ #"(ι˜Ώι‡Œεƒε•)Qwen3-4B-Thinking-2507": 'hf.co/bartowski/Qwen_Qwen3-4B-Thinking-2507-GGUF:Q4_K_M',
19
+ "(HuggingFace)SmolLM2-360M": 'smollm2:360m-instruct-q5_K_M',
20
+ "(Meta)Llama3.2-3B-Instruct": 'hf.co/bartowski/Llama-3.2-3B-Instruct-GGUF:Q4_K_M', # OK speed with CPU
21
+ #"(Google)Gemma3n-e2b-it": 'gemma3n:e2b-it-q4_K_M',
22
+ "(IBM)Granite3.3-2B": 'granite3.3:2b',
23
+ "(Tencent)ζ··ε…ƒ-4B-Instruct": 'hf.co/bartowski/tencent_Hunyuan-4B-Instruct-GGUF:Q4_K_M'
24
  }
25
 
26
 
 
32
 
33
  # --- Gradio Interface ---
34
  with gr.Blocks(theme=gr.themes.Default(primary_hue="blue", secondary_hue="neutral")) as demo:
35
+ gr.Markdown(f"## LLM/SLM running with CPU") # Changed title to be more generic
36
  gr.Markdown(f"(Run-Location-As: `{check_ipinfo}`)")
37
  gr.Markdown("Chat with the model, customize its behavior with a system prompt, and toggle streaming output.")
38
 
 
90
  system_prompt_selector = gr.Radio(
91
  label="Choose a System Prompt Style",
92
  choices=list(SYSTEM_PROMPT_OPTIONS.keys()),
93
+ value="Smart & Accurate (Auto TC/EN)",
94
  interactive=True
95
  )
96