khurrameycon commited on
Commit
3f982d9
·
verified ·
1 Parent(s): e2ebf16

updated with nebius and mistral model

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -165,7 +165,7 @@ def llm_chat_response(text, image_base64=None):
165
 
166
  logger.info("Initializing InferenceClient...")
167
  client = InferenceClient(
168
- provider="novita", # Using novita as in your working example
169
  api_key=HF_TOKEN
170
  )
171
 
@@ -199,7 +199,7 @@ def llm_chat_response(text, image_base64=None):
199
  logger.info("Sending request to model...")
200
  try:
201
  completion = client.chat.completions.create(
202
- model="meta-llama/Llama-3.2-11B-Vision-Instruct",
203
  messages=messages,
204
  max_tokens=500
205
  )
 
165
 
166
  logger.info("Initializing InferenceClient...")
167
  client = InferenceClient(
168
+ provider="nebius", # Using novita as in your working example
169
  api_key=HF_TOKEN
170
  )
171
 
 
199
  logger.info("Sending request to model...")
200
  try:
201
  completion = client.chat.completions.create(
202
+ model="mistralai/Mistral-Small-3.1-24B-Instruct-2503",
203
  messages=messages,
204
  max_tokens=500
205
  )