Spaces:
Sleeping
Sleeping
| from .llm import LLM | |
| from modals.inputs import LLMConfig | |
| from typing import List, Dict, Any | |
| class AgentBase(LLM): | |
| def __init__(self, system_prompt=None, llm_config: LLMConfig = None): | |
| super().__init__() | |
| self.llm_config = llm_config | |
| self.system_prompt = system_prompt | |
| def generate_response(self, messages: List[Dict[str, str]] = None): | |
| if self.system_prompt: | |
| messages = [{"role": "system", "content": self.system_prompt}] + messages | |
| output = self.step(messages=messages, llm_config=self.llm_config) | |
| return output | |
| if __name__ == "__main__": | |
| llm_config = LLMConfig( | |
| api_key="AIzaSyCsstACK4dJx61ad2_fhWugtvCcEDcTiTE", | |
| base_url="https://generativelanguage.googleapis.com/v1beta/openai/", | |
| model="gemini-2.0-flash", | |
| ) | |
| messages = [ | |
| {"role": "system", "content": "You are a helpful AI assistant."}, | |
| {"role": "user", "content": "Tell me a fun fact about space."} | |
| ] | |
| agent = AgentBase(llm_config=llm_config) | |
| response = agent.generate_response(messages=messages) | |
| print(response) | |