Spaces:
Sleeping
Sleeping
adding temperature
Browse files
agent.py
CHANGED
@@ -22,6 +22,7 @@ class QAgent:
|
|
22 |
provider: Optional[str] = None, # for InferenceClientModel
|
23 |
timeout: Optional[int] = None, # for InferenceClientModel
|
24 |
system_prompt: Optional[str] = None,
|
|
|
25 |
verbose: bool = False # Verbose logging or not
|
26 |
):
|
27 |
"""
|
@@ -44,7 +45,7 @@ class QAgent:
|
|
44 |
# self.model = HfApiModel(
|
45 |
# model_id=model_id or "Qwen/Qwen2.5-Coder-32B-Instruct", # précédemment : or "meta-llama/Llama-3-70B-Instruct",
|
46 |
# token=api_key
|
47 |
-
#
|
48 |
# )
|
49 |
# el
|
50 |
if model_type == "InferenceClientModel":
|
@@ -61,7 +62,7 @@ class QAgent:
|
|
61 |
provider=provider or "nebius", # or "hf-inference",
|
62 |
token=api_key,
|
63 |
timeout=timeout or 120
|
64 |
-
|
65 |
)
|
66 |
elif model_type == "OpenAIServerModel":
|
67 |
print(f"Trying to configure OpenAIServerModel.")
|
|
|
22 |
provider: Optional[str] = None, # for InferenceClientModel
|
23 |
timeout: Optional[int] = None, # for InferenceClientModel
|
24 |
system_prompt: Optional[str] = None,
|
25 |
+
temperature: float = 0.2,
|
26 |
verbose: bool = False # Verbose logging or not
|
27 |
):
|
28 |
"""
|
|
|
45 |
# self.model = HfApiModel(
|
46 |
# model_id=model_id or "Qwen/Qwen2.5-Coder-32B-Instruct", # précédemment : or "meta-llama/Llama-3-70B-Instruct",
|
47 |
# token=api_key
|
48 |
+
# temperature=temperature
|
49 |
# )
|
50 |
# el
|
51 |
if model_type == "InferenceClientModel":
|
|
|
62 |
provider=provider or "nebius", # or "hf-inference",
|
63 |
token=api_key,
|
64 |
timeout=timeout or 120
|
65 |
+
temperature=temperature
|
66 |
)
|
67 |
elif model_type == "OpenAIServerModel":
|
68 |
print(f"Trying to configure OpenAIServerModel.")
|