Spaces:
Running
Running
add new mistral model
Browse files
app.py
CHANGED
@@ -517,6 +517,11 @@ AVAILABLE_MODELS = [
|
|
517 |
"id": "codestral-2508",
|
518 |
"description": "Mistral Codestral model - specialized for code generation and programming tasks"
|
519 |
},
|
|
|
|
|
|
|
|
|
|
|
520 |
{
|
521 |
"name": "Gemini 2.5 Flash",
|
522 |
"id": "gemini-2.5-flash",
|
@@ -676,8 +681,8 @@ def get_inference_client(model_id, provider="auto"):
|
|
676 |
api_key=os.getenv("STEP_API_KEY"),
|
677 |
base_url="https://api.stepfun.com/v1"
|
678 |
)
|
679 |
-
elif model_id == "codestral-2508":
|
680 |
-
# Use Mistral client for
|
681 |
return Mistral(api_key=os.getenv("MISTRAL_API_KEY"))
|
682 |
elif model_id == "gemini-2.5-flash":
|
683 |
# Use Google Gemini (OpenAI-compatible) client
|
@@ -2684,7 +2689,7 @@ This will help me create a better design for you."""
|
|
2684 |
messages.append({'role': 'user', 'content': enhanced_query})
|
2685 |
try:
|
2686 |
# Handle Mistral API method difference
|
2687 |
-
if _current_model["id"]
|
2688 |
completion = client.chat.stream(
|
2689 |
model=_current_model["id"],
|
2690 |
messages=messages,
|
@@ -2728,7 +2733,7 @@ This will help me create a better design for you."""
|
|
2728 |
for chunk in completion:
|
2729 |
# Handle different response formats for Mistral vs others
|
2730 |
chunk_content = None
|
2731 |
-
if _current_model["id"]
|
2732 |
# Mistral format: chunk.data.choices[0].delta.content
|
2733 |
if (
|
2734 |
hasattr(chunk, "data") and chunk.data and
|
|
|
517 |
"id": "codestral-2508",
|
518 |
"description": "Mistral Codestral model - specialized for code generation and programming tasks"
|
519 |
},
|
520 |
+
{
|
521 |
+
"name": "Mistral Medium 2508",
|
522 |
+
"id": "mistral-medium-2508",
|
523 |
+
"description": "Mistral Medium 2508 model via Mistral API for general tasks and coding"
|
524 |
+
},
|
525 |
{
|
526 |
"name": "Gemini 2.5 Flash",
|
527 |
"id": "gemini-2.5-flash",
|
|
|
681 |
api_key=os.getenv("STEP_API_KEY"),
|
682 |
base_url="https://api.stepfun.com/v1"
|
683 |
)
|
684 |
+
elif model_id == "codestral-2508" or model_id == "mistral-medium-2508":
|
685 |
+
# Use Mistral client for Mistral models
|
686 |
return Mistral(api_key=os.getenv("MISTRAL_API_KEY"))
|
687 |
elif model_id == "gemini-2.5-flash":
|
688 |
# Use Google Gemini (OpenAI-compatible) client
|
|
|
2689 |
messages.append({'role': 'user', 'content': enhanced_query})
|
2690 |
try:
|
2691 |
# Handle Mistral API method difference
|
2692 |
+
if _current_model["id"] in ("codestral-2508", "mistral-medium-2508"):
|
2693 |
completion = client.chat.stream(
|
2694 |
model=_current_model["id"],
|
2695 |
messages=messages,
|
|
|
2733 |
for chunk in completion:
|
2734 |
# Handle different response formats for Mistral vs others
|
2735 |
chunk_content = None
|
2736 |
+
if _current_model["id"] in ("codestral-2508", "mistral-medium-2508"):
|
2737 |
# Mistral format: chunk.data.choices[0].delta.content
|
2738 |
if (
|
2739 |
hasattr(chunk, "data") and chunk.data and
|