femtowin commited on
Commit
a67e589
·
1 Parent(s): 92617e9

Improve configuration handling with better logging and preset override clarity

Browse files
Files changed (1) hide show
  1. app_with_mcp.py +21 -9
app_with_mcp.py CHANGED
@@ -191,8 +191,9 @@ async def minion_respond_async(query: str, preset_model: str, api_type: str,
191
  use_tools: bool):
192
  """Respond to query using specified configuration with optional MCP tools"""
193
 
194
- # If a preset model is selected, use preset configuration as base but allow overrides
195
  if preset_model != "Custom":
 
196
  config_obj = preset_configs.get(preset_model, default_config)
197
  llm_config_dict = {
198
  'api_type': config_obj.api_type,
@@ -200,21 +201,31 @@ async def minion_respond_async(query: str, preset_model: str, api_type: str,
200
  'base_url': config_obj.base_url,
201
  'api_version': config_obj.api_version,
202
  'model': config_obj.model,
203
- 'temperature': temperature, # Use UI value, not preset value
204
- 'max_tokens': max_tokens, # Use UI value, not preset value
205
  'vision_enabled': config_obj.vision_enabled
206
  }
207
- print(f"Using preset '{preset_model}' with temperature={temperature}, max_tokens={max_tokens}")
 
 
 
 
208
  else:
209
- # Use custom configuration
210
  llm_config_dict = create_custom_llm_config(
211
  api_type, api_key, base_url, api_version, model, temperature, max_tokens
212
  )
213
- print(f"Using custom config with temperature={temperature}, max_tokens={max_tokens}")
 
 
 
 
214
 
215
  # Always rebuild brain with current configuration
 
 
 
216
  brain = build_brain_with_config(llm_config_dict)
217
- print(f"Brain rebuilt with config: api_type={llm_config_dict['api_type']}, model={llm_config_dict['model']}")
218
 
219
  # Handle empty route selection for auto route
220
  route_param = route if route else None
@@ -231,10 +242,11 @@ async def minion_respond_async(query: str, preset_model: str, api_type: str,
231
  try:
232
  tools = await get_available_tools()
233
  kwargs['tools'] = tools
234
- print(f"Using {len(tools)} tools: {[tool.name for tool in tools]}")
235
  except Exception as e:
236
- print(f"Warning: Failed to get tools: {e}")
237
 
 
238
  obs, score, *_ = await brain.step(**kwargs)
239
  return obs
240
 
 
191
  use_tools: bool):
192
  """Respond to query using specified configuration with optional MCP tools"""
193
 
194
+ # Build configuration based on preset or custom settings
195
  if preset_model != "Custom":
196
+ # Use preset as base, but allow UI overrides for temperature and max_tokens
197
  config_obj = preset_configs.get(preset_model, default_config)
198
  llm_config_dict = {
199
  'api_type': config_obj.api_type,
 
201
  'base_url': config_obj.base_url,
202
  'api_version': config_obj.api_version,
203
  'model': config_obj.model,
204
+ 'temperature': temperature, # Always use UI value for dynamic parameters
205
+ 'max_tokens': max_tokens, # Always use UI value for dynamic parameters
206
  'vision_enabled': config_obj.vision_enabled
207
  }
208
+ print(f"🔧 Using preset '{preset_model}' with overrides:")
209
+ print(f" - Temperature: {temperature} (preset: {config_obj.temperature})")
210
+ print(f" - Max tokens: {max_tokens} (preset: {config_obj.max_tokens})")
211
+ print(f" - Model: {config_obj.model}")
212
+ print(f" - API Type: {config_obj.api_type}")
213
  else:
214
+ # Use completely custom configuration
215
  llm_config_dict = create_custom_llm_config(
216
  api_type, api_key, base_url, api_version, model, temperature, max_tokens
217
  )
218
+ print(f"🔧 Using custom configuration:")
219
+ print(f" - Temperature: {temperature}")
220
+ print(f" - Max tokens: {max_tokens}")
221
+ print(f" - Model: {model}")
222
+ print(f" - API Type: {api_type}")
223
 
224
  # Always rebuild brain with current configuration
225
+ print(f"🧠 Building brain with final config:")
226
+ print(f" - Final temperature: {llm_config_dict['temperature']}")
227
+ print(f" - Final max_tokens: {llm_config_dict['max_tokens']}")
228
  brain = build_brain_with_config(llm_config_dict)
 
229
 
230
  # Handle empty route selection for auto route
231
  route_param = route if route else None
 
242
  try:
243
  tools = await get_available_tools()
244
  kwargs['tools'] = tools
245
+ print(f"🔧 Using {len(tools)} tools: {[tool.name for tool in tools]}")
246
  except Exception as e:
247
+ print(f"⚠️ Warning: Failed to get tools: {e}")
248
 
249
+ print(f"🚀 Executing brain.step with route='{route_param}', check={check_enabled}")
250
  obs, score, *_ = await brain.step(**kwargs)
251
  return obs
252