Spaces:
Running
Running
Update agents.py
Browse files
agents.py
CHANGED
|
@@ -236,7 +236,7 @@ class GeminiAgent(LLMAgentBase):
|
|
| 236 |
prompt,
|
| 237 |
generation_config={"temperature": 0.5}
|
| 238 |
)
|
| 239 |
-
print("GEMINI RESPONSE : "response)
|
| 240 |
if not response.candidates:
|
| 241 |
finish_reason_str = "No candidates found"
|
| 242 |
try:
|
|
@@ -318,7 +318,7 @@ class OpenAIAgent(LLMAgentBase):
|
|
| 318 |
temperature=0.5,
|
| 319 |
)
|
| 320 |
message = response.choices[0].message
|
| 321 |
-
print("OPENAI RESPONSE : "response)
|
| 322 |
# Check for tool calls in the response
|
| 323 |
if message.tool_calls:
|
| 324 |
tool_call = message.tool_calls[0] # Get the first tool call
|
|
@@ -378,7 +378,7 @@ class MistralAgent(LLMAgentBase):
|
|
| 378 |
tool_choice="auto", # Let the model choose
|
| 379 |
temperature=0.5,
|
| 380 |
)
|
| 381 |
-
print("Mistral RESPONSE : "response)
|
| 382 |
message = response.choices[0].message
|
| 383 |
# Check for tool calls in the response
|
| 384 |
if message.tool_calls:
|
|
|
|
| 236 |
prompt,
|
| 237 |
generation_config={"temperature": 0.5}
|
| 238 |
)
|
| 239 |
+
print("GEMINI RESPONSE : ",response)
|
| 240 |
if not response.candidates:
|
| 241 |
finish_reason_str = "No candidates found"
|
| 242 |
try:
|
|
|
|
| 318 |
temperature=0.5,
|
| 319 |
)
|
| 320 |
message = response.choices[0].message
|
| 321 |
+
print("OPENAI RESPONSE : ",response)
|
| 322 |
# Check for tool calls in the response
|
| 323 |
if message.tool_calls:
|
| 324 |
tool_call = message.tool_calls[0] # Get the first tool call
|
|
|
|
| 378 |
tool_choice="auto", # Let the model choose
|
| 379 |
temperature=0.5,
|
| 380 |
)
|
| 381 |
+
print("Mistral RESPONSE : ",response)
|
| 382 |
message = response.choices[0].message
|
| 383 |
# Check for tool calls in the response
|
| 384 |
if message.tool_calls:
|