Spaces:
Runtime error
Runtime error
from langchain_google_genai import ChatGoogleGenerativeAI | |
from pydantic import PrivateAttr | |
from langchain_core.tools.base import BaseTool | |
from langchain_core.messages import AnyMessage, SystemMessage, HumanMessage | |
class AnswerQuestionTool(BaseTool): | |
name : str = "answer_question_tool" | |
description: str = "Use this tool to answer any elementary question that you can solve without needing access to any external tool. Simply provide the question in input, reporting the whole question including desired output format." | |
_llm = PrivateAttr() | |
_system_prompt = PrivateAttr() | |
def __init__(self): | |
super().__init__() | |
self._llm = ChatGoogleGenerativeAI( | |
model="gemini-2.0-flash", | |
temperature=0) | |
self._system_prompt = SystemMessage("""You are a helpful assistant. | |
You will be given a question and you will have to answer that question. | |
You MUST NOT apologise, explain your reasoning nor nothing else. | |
You MUST answer the question and provide the answer in the REQUIRED FORMAT. | |
YOU MUST ABSOLUTELY NOT MAKE ANY KIND OF ASSUMPTION!! If you don't know an answer, say you don't know! | |
If the format is incorrect, the answer is considered wrong. | |
If you are given a list or a transcript and you need to do something on a list of object, think thoroughly on how you should return the output! | |
""") | |
def _run(self, question: str) -> str: | |
human_message = HumanMessage( | |
content=[ | |
{"type": "text", "text": question}, | |
] | |
) | |
response = self._llm.invoke([self._system_prompt, human_message]) | |
return response |