akhaliq HF Staff commited on
Commit
3b8474b
·
1 Parent(s): f168ae4

qwen coder with cerebras

Browse files
Files changed (1) hide show
  1. app.py +7 -5
app.py CHANGED
@@ -427,7 +427,7 @@ AVAILABLE_MODELS = [
427
  "description": "Qwen3-235B-A22B-Instruct-2507 model for code generation and general tasks"
428
  },
429
  {
430
- "name": "Qwen3-Coder-480B-A35B",
431
  "id": "Qwen/Qwen3-Coder-480B-A35B-Instruct",
432
  "description": "Qwen3-Coder-480B-A35B-Instruct model for advanced code generation and programming tasks"
433
  },
@@ -590,6 +590,8 @@ def get_inference_client(model_id, provider="auto"):
590
  provider = "cerebras"
591
  elif model_id == "Qwen/Qwen3-235B-A22B-Thinking-2507":
592
  provider = "cerebras"
 
 
593
  return InferenceClient(
594
  provider=provider,
595
  api_key=HF_TOKEN,
@@ -2271,7 +2273,7 @@ def generate_requirements_txt_with_llm(import_statements):
2271
 
2272
  # Use a lightweight model for this task
2273
  try:
2274
- client = get_inference_client("Qwen/Qwen3-Coder-480B-A35B", "auto")
2275
 
2276
  imports_text = '\n'.join(import_statements)
2277
 
@@ -2307,7 +2309,7 @@ Generate a comprehensive requirements.txt that ensures the application will work
2307
  ]
2308
 
2309
  response = client.chat.completions.create(
2310
- model="Qwen/Qwen3-Coder-480B-A35B",
2311
  messages=messages,
2312
  max_tokens=1024,
2313
  temperature=0.1
@@ -2585,7 +2587,7 @@ with gr.Blocks(
2585
  setting = gr.State({
2586
  "system": HTML_SYSTEM_PROMPT,
2587
  })
2588
- current_model = gr.State(AVAILABLE_MODELS[0]) # Moonshot Kimi-K2
2589
  open_panel = gr.State(None)
2590
  last_login_state = gr.State(None)
2591
 
@@ -2668,7 +2670,7 @@ with gr.Blocks(
2668
  )
2669
  model_dropdown = gr.Dropdown(
2670
  choices=[model['name'] for model in AVAILABLE_MODELS],
2671
- value="Qwen3-Coder-480B-A35B",
2672
  label="Model",
2673
  visible=True
2674
  )
 
427
  "description": "Qwen3-235B-A22B-Instruct-2507 model for code generation and general tasks"
428
  },
429
  {
430
+ "name": "Qwen3-Coder-480B-A35B-Instruct",
431
  "id": "Qwen/Qwen3-Coder-480B-A35B-Instruct",
432
  "description": "Qwen3-Coder-480B-A35B-Instruct model for advanced code generation and programming tasks"
433
  },
 
590
  provider = "cerebras"
591
  elif model_id == "Qwen/Qwen3-235B-A22B-Thinking-2507":
592
  provider = "cerebras"
593
+ elif model_id == "Qwen/Qwen3-Coder-480B-A35B-Instruct":
594
+ provider = "cerebras"
595
  return InferenceClient(
596
  provider=provider,
597
  api_key=HF_TOKEN,
 
2273
 
2274
  # Use a lightweight model for this task
2275
  try:
2276
+ client = get_inference_client("Qwen/Qwen3-Coder-480B-A35B-Instruct", "auto")
2277
 
2278
  imports_text = '\n'.join(import_statements)
2279
 
 
2309
  ]
2310
 
2311
  response = client.chat.completions.create(
2312
+ model="Qwen/Qwen3-Coder-480B-A35B-Instruct",
2313
  messages=messages,
2314
  max_tokens=1024,
2315
  temperature=0.1
 
2587
  setting = gr.State({
2588
  "system": HTML_SYSTEM_PROMPT,
2589
  })
2590
+ current_model = gr.State(AVAILABLE_MODELS[10]) # Qwen3-Coder-480B-A35B-Instruct
2591
  open_panel = gr.State(None)
2592
  last_login_state = gr.State(None)
2593
 
 
2670
  )
2671
  model_dropdown = gr.Dropdown(
2672
  choices=[model['name'] for model in AVAILABLE_MODELS],
2673
+ value="Qwen3-Coder-480B-A35B-Instruct",
2674
  label="Model",
2675
  visible=True
2676
  )