Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -22,19 +22,23 @@ class GroqLLM:
|
|
22 |
"""Allows use as callable (legacy compatibility)"""
|
23 |
return self.generate(prompt)
|
24 |
|
25 |
-
def generate(self, prompt: Union[str, dict, List[Dict]]) -> str:
|
26 |
-
"""Generate text
|
27 |
try:
|
28 |
if isinstance(prompt, (dict, list)):
|
29 |
prompt_str = str(prompt)
|
30 |
else:
|
31 |
prompt_str = str(prompt)
|
32 |
|
|
|
|
|
|
|
|
|
33 |
completion = self.client.chat.completions.create(
|
34 |
model=self.model_name,
|
35 |
messages=[{"role": "user", "content": prompt_str}],
|
36 |
-
temperature=
|
37 |
-
max_tokens=
|
38 |
stream=False
|
39 |
)
|
40 |
|
|
|
22 |
"""Allows use as callable (legacy compatibility)"""
|
23 |
return self.generate(prompt)
|
24 |
|
25 |
+
def generate(self, prompt: Union[str, dict, List[Dict]], **kwargs) -> str:
|
26 |
+
"""Generate text using Groq API. Handles optional kwargs from smolagents."""
|
27 |
try:
|
28 |
if isinstance(prompt, (dict, list)):
|
29 |
prompt_str = str(prompt)
|
30 |
else:
|
31 |
prompt_str = str(prompt)
|
32 |
|
33 |
+
temperature = kwargs.get("temperature", 0.7)
|
34 |
+
max_tokens = kwargs.get("max_tokens", 1024)
|
35 |
+
# 'stop_sequences' is not supported by Groq API directly; you can ignore or handle manually.
|
36 |
+
|
37 |
completion = self.client.chat.completions.create(
|
38 |
model=self.model_name,
|
39 |
messages=[{"role": "user", "content": prompt_str}],
|
40 |
+
temperature=temperature,
|
41 |
+
max_tokens=max_tokens,
|
42 |
stream=False
|
43 |
)
|
44 |
|