Spaces:
Sleeping
Sleeping
import os | |
from smolagents import ( | |
CodeAgent, | |
DuckDuckGoSearchTool, | |
VisitWebpageTool, | |
PythonInterpreterTool, | |
InferenceClientModel, | |
OpenAIServerModel, | |
# HfApiModel, # import bug from smolagents after adding duckduckgo-search in requirements | |
tool | |
) | |
from typing import List, Dict, Any, Optional | |
class QAgent: | |
def __init__( | |
self, | |
model_type: str = "InferenceClientModel", | |
model_id: Optional[str] = None, | |
api_key: Optional[str] = None, | |
provider: Optional[str] = None, # for InferenceClientModel | |
timeout: Optional[int] = None, # for InferenceClientModel | |
system_prompt: Optional[str] = None, | |
temperature: float = 0.2, | |
verbose: bool = False # Verbose logging or not | |
): | |
""" | |
QAgent description | |
""" | |
self.verbose = verbose | |
self.system_prompt = system_prompt | |
# if model_type == "HfApiModel": | |
# if api_key is None: | |
# api_key = os.getenv("Q_NEB_TOK") | |
# if not api_key: | |
# raise ValueError("No API Key found for HuggingFace. Please set Q_NEB_TOK or pass api_key.") | |
# | |
# if self.verbose: | |
# print(f"Using Hugging Face token: {api_key[:5]}... (HfApiModel mode)") | |
# | |
# self.model = HfApiModel( | |
# model_id=model_id or "Qwen/Qwen2.5-Coder-32B-Instruct", # précédemment : or "meta-llama/Llama-3-70B-Instruct", | |
# token=api_key, | |
# temperature=temperature | |
# ) | |
# el | |
if model_type == "InferenceClientModel": | |
if api_key is None: | |
api_key = os.getenv("Q_NEB_TOK") | |
if not api_key: | |
raise ValueError("No API Key found for HuggingFace. Please set SP_HF_TOK or pass api_key.") | |
if self.verbose: | |
print(f"Using Hugging Face token: {api_key[:5]}... (InferenceClientModel mode)") | |
self.model = InferenceClientModel( | |
model_id=model_id or "Qwen/Qwen2.5-Coder-32B-Instruct", # précédemment : or "meta-llama/Llama-3-70B-Instruct", | |
provider=provider or "nebius", # or "hf-inference", | |
token=api_key, | |
timeout=timeout or 120, | |
temperature=temperature | |
) | |
elif model_type == "OpenAIServerModel": | |
print(f"Trying to configure OpenAIServerModel.") | |
# Check for xAI API key and base URL first | |
xai_api_key = os.getenv("XAI_API_KEY") | |
# xai_api_base = os.getenv("XAI_API_BASE") # Ne sais pas à quoi ça sert.. | |
# If xAI credentials are available, use them | |
if xai_api_key and api_key is None: | |
api_key = xai_api_key | |
if self.verbose: | |
print(f"Using xAI API key: {api_key[:5]}...") | |
# If no API key specified, fall back to OPENAI_API_KEY | |
if api_key is None: | |
api_key = os.getenv("Q_OAI_TOK") | |
if not api_key: | |
raise ValueError("No OpenAI API key provided. Please set Q_OAI_TOK or XAI_API_KEY environment variable or pass api_key parameter.") | |
self.model = OpenAIServerModel( | |
model_id=model_id or "gpt-4o", | |
api_key=api_key, | |
# api_base=api_base, | |
temperature=temperature | |
) | |
else: | |
raise ValueError(f"Unknown model type: {model_type}") | |
if self.verbose: | |
print(f"Model initialized: {model_type} - {self.model.model_id} - prov: {self.model.provider}") | |
# Initialize tools | |
self.tools = [ | |
DuckDuckGoSearchTool(), | |
PythonInterpreterTool(), | |
# save_and_read_file, | |
# download_file_from_url, | |
# analyze_csv_file, | |
# analyze_excel_file | |
] | |
# Setup imports | |
self.imports = ["pandas", "numpy", "datetime", "json", "re", "math", "os", "requests", "csv", "urllib"] | |
# Create CodeAgent | |
self.agent = CodeAgent( | |
tools=self.tools, | |
model=self.model, | |
# additional_authorized_imports=self.imports, | |
# executor_type=executor_type, | |
# executor_kwargs=executor_kwargs, | |
verbosity_level=2 if self.verbose else 0 | |
) | |
if self.verbose: | |
print("CodeAgent initialized") | |
def invoke(self, prompt: str) -> str: | |
print(f"Agent invoked with prompt: {prompt[:80]}...") | |
result = self.agent.run(prompt) | |
print(result) | |
return result | |