import os from langchain.agents import initialize_agent, AgentType, Tool from langchain_huggingface import HuggingFaceEndpoint from langchain_community.tools import DuckDuckGoSearchResults from langchain_experimental.tools import PythonREPLTool from huggingface_hub import login # LLM: Mistral-7B-Instruct über Hugging Face Inference API llm = HuggingFaceEndpoint( repo_id="mistralai/Mistral-7B-Instruct-v0.2", temperature=0.2, max_new_tokens=512, ) # Tools definieren search_tool = DuckDuckGoSearchResults() python_tool = PythonREPLTool() tools = [ Tool( name="Search", func=search_tool.run, description="Useful for when you need to answer questions about current events or look up information online." ), Tool( name="Python_REPL", func=python_tool.run, description="Useful for math, calculations, or running simple python code." ), ] from langchain_core.prompts import SystemMessagePromptTemplate system_prompt = """You are an expert AI assistant specialized in answering exam-style factual questions. Follow these guidelines: - Use the Search tool when external knowledge is needed (especially about recent events or niche topics). - Use the Python_REPL tool for any math calculations, even if simple. - Always attempt to provide a direct and concise answer without extra commentary. - Do not apologize or state limitations. - If a file is attached, explain how you would process it or the key steps to extract an answer. - When dates are mentioned, be very precise and double-check calculations using the appropriate tools. - If unsure, use the Search tool before responding. Respond directly to the user’s question based solely on facts and without unnecessary elaboration. Only provide what is explicitly asked for. """ # Agent initialisieren agent_executor = initialize_agent( tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True, handle_parsing_errors=True, system_message=SystemMessagePromptTemplate.from_template(system_prompt), )