femtowin commited on
Commit
a53c3cd
·
1 Parent(s): a67e589

Fix preset override issue - always use current UI values for execution

Browse files
Files changed (1) hide show
  1. app_with_mcp.py +20 -25
app_with_mcp.py CHANGED
@@ -191,38 +191,33 @@ async def minion_respond_async(query: str, preset_model: str, api_type: str,
191
  use_tools: bool):
192
  """Respond to query using specified configuration with optional MCP tools"""
193
 
194
- # Build configuration based on preset or custom settings
 
 
 
 
 
195
  if preset_model != "Custom":
196
- # Use preset as base, but allow UI overrides for temperature and max_tokens
197
- config_obj = preset_configs.get(preset_model, default_config)
198
- llm_config_dict = {
199
- 'api_type': config_obj.api_type,
200
- 'api_key': config_obj.api_key,
201
- 'base_url': config_obj.base_url,
202
- 'api_version': config_obj.api_version,
203
- 'model': config_obj.model,
204
- 'temperature': temperature, # Always use UI value for dynamic parameters
205
- 'max_tokens': max_tokens, # Always use UI value for dynamic parameters
206
- 'vision_enabled': config_obj.vision_enabled
207
- }
208
- print(f"🔧 Using preset '{preset_model}' with overrides:")
209
- print(f" - Temperature: {temperature} (preset: {config_obj.temperature})")
210
- print(f" - Max tokens: {max_tokens} (preset: {config_obj.max_tokens})")
211
- print(f" - Model: {config_obj.model}")
212
- print(f" - API Type: {config_obj.api_type}")
213
  else:
214
- # Use completely custom configuration
215
- llm_config_dict = create_custom_llm_config(
216
- api_type, api_key, base_url, api_version, model, temperature, max_tokens
217
- )
218
  print(f"🔧 Using custom configuration:")
 
 
 
 
219
  print(f" - Temperature: {temperature}")
220
  print(f" - Max tokens: {max_tokens}")
221
- print(f" - Model: {model}")
222
- print(f" - API Type: {api_type}")
223
 
224
- # Always rebuild brain with current configuration
225
  print(f"🧠 Building brain with final config:")
 
 
226
  print(f" - Final temperature: {llm_config_dict['temperature']}")
227
  print(f" - Final max_tokens: {llm_config_dict['max_tokens']}")
228
  brain = build_brain_with_config(llm_config_dict)
 
191
  use_tools: bool):
192
  """Respond to query using specified configuration with optional MCP tools"""
193
 
194
+ # Always use the current UI values, regardless of preset selection
195
+ # Preset is only used for initializing UI fields, not for actual execution
196
+ llm_config_dict = create_custom_llm_config(
197
+ api_type, api_key, base_url, api_version, model, temperature, max_tokens
198
+ )
199
+
200
  if preset_model != "Custom":
201
+ print(f"🔧 Using preset '{preset_model}' base with UI overrides:")
202
+ print(f" - API Type: {api_type}")
203
+ print(f" - Model: {model}")
204
+ print(f" - Base URL: {base_url}")
205
+ print(f" - API Version: {api_version}")
206
+ print(f" - Temperature: {temperature}")
207
+ print(f" - Max tokens: {max_tokens}")
 
 
 
 
 
 
 
 
 
 
208
  else:
 
 
 
 
209
  print(f"🔧 Using custom configuration:")
210
+ print(f" - API Type: {api_type}")
211
+ print(f" - Model: {model}")
212
+ print(f" - Base URL: {base_url}")
213
+ print(f" - API Version: {api_version}")
214
  print(f" - Temperature: {temperature}")
215
  print(f" - Max tokens: {max_tokens}")
 
 
216
 
217
+ # Always rebuild brain with current UI configuration
218
  print(f"🧠 Building brain with final config:")
219
+ print(f" - Final API type: {llm_config_dict['api_type']}")
220
+ print(f" - Final model: {llm_config_dict['model']}")
221
  print(f" - Final temperature: {llm_config_dict['temperature']}")
222
  print(f" - Final max_tokens: {llm_config_dict['max_tokens']}")
223
  brain = build_brain_with_config(llm_config_dict)