Spaces:
Sleeping
Sleeping
Fix brain configuration update issue - ensure UI values override preset values
Browse files- app_with_mcp.py +8 -3
app_with_mcp.py
CHANGED
@@ -191,7 +191,7 @@ async def minion_respond_async(query: str, preset_model: str, api_type: str,
|
|
191 |
use_tools: bool):
|
192 |
"""Respond to query using specified configuration with optional MCP tools"""
|
193 |
|
194 |
-
# If a preset model is selected, use preset configuration
|
195 |
if preset_model != "Custom":
|
196 |
config_obj = preset_configs.get(preset_model, default_config)
|
197 |
llm_config_dict = {
|
@@ -200,17 +200,22 @@ async def minion_respond_async(query: str, preset_model: str, api_type: str,
|
|
200 |
'base_url': config_obj.base_url,
|
201 |
'api_version': config_obj.api_version,
|
202 |
'model': config_obj.model,
|
203 |
-
'temperature':
|
204 |
-
'max_tokens':
|
205 |
'vision_enabled': config_obj.vision_enabled
|
206 |
}
|
|
|
207 |
else:
|
208 |
# Use custom configuration
|
209 |
llm_config_dict = create_custom_llm_config(
|
210 |
api_type, api_key, base_url, api_version, model, temperature, max_tokens
|
211 |
)
|
|
|
212 |
|
|
|
213 |
brain = build_brain_with_config(llm_config_dict)
|
|
|
|
|
214 |
# Handle empty route selection for auto route
|
215 |
route_param = route if route else None
|
216 |
|
|
|
191 |
use_tools: bool):
|
192 |
"""Respond to query using specified configuration with optional MCP tools"""
|
193 |
|
194 |
+
# If a preset model is selected, use preset configuration as base but allow overrides
|
195 |
if preset_model != "Custom":
|
196 |
config_obj = preset_configs.get(preset_model, default_config)
|
197 |
llm_config_dict = {
|
|
|
200 |
'base_url': config_obj.base_url,
|
201 |
'api_version': config_obj.api_version,
|
202 |
'model': config_obj.model,
|
203 |
+
'temperature': temperature, # Use UI value, not preset value
|
204 |
+
'max_tokens': max_tokens, # Use UI value, not preset value
|
205 |
'vision_enabled': config_obj.vision_enabled
|
206 |
}
|
207 |
+
print(f"Using preset '{preset_model}' with temperature={temperature}, max_tokens={max_tokens}")
|
208 |
else:
|
209 |
# Use custom configuration
|
210 |
llm_config_dict = create_custom_llm_config(
|
211 |
api_type, api_key, base_url, api_version, model, temperature, max_tokens
|
212 |
)
|
213 |
+
print(f"Using custom config with temperature={temperature}, max_tokens={max_tokens}")
|
214 |
|
215 |
+
# Always rebuild brain with current configuration
|
216 |
brain = build_brain_with_config(llm_config_dict)
|
217 |
+
print(f"Brain rebuilt with config: api_type={llm_config_dict['api_type']}, model={llm_config_dict['model']}")
|
218 |
+
|
219 |
# Handle empty route selection for auto route
|
220 |
route_param = route if route else None
|
221 |
|