from langchain_core.tools import tool import datetime import requests import openai import os import tempfile from urllib.parse import urlparse from openai import OpenAI from youtube_transcript_api import YouTubeTranscriptApi from pytube import extract from openai import OpenAI @tool def add(a: float, b: float) -> float: """ Adds two numbers. Args: a (float): first number b (float): second number """ return a + b @tool def subtract(a: float, b: float) -> int: """ Subtracts two numbers. Args: a (float): first number b (float): second number """ return a - b @tool def multiply(a: float, b: float) -> float: """ Multiplies two numbers. Args: a (float): first number b (float): second number """ return a * b @tool def divide(a: float, b: float) -> float: """ Divides two numbers. Args: a (float): first number b (float): second number """ if b == 0: raise ValueError("Cannot divide by zero.") return a / b @tool def power(a: float, b: float) -> float: """ Calculates the power of two numbers. Args: a (float): first number b (float): second number """ return a**b calculator_basic = [add, subtract, multiply, divide, power] @tool def current_date(_) -> str : """ Returns the current date in YYYY-MM-DD format """ return datetime.datetime.now().strftime("%Y-%m-%d") @tool def day_of_week(_) -> str : """ Returns the current day of the week (e.g., Monday, Tuesday) """ return datetime.datetime.now().strftime("%A") @tool def days_until(date_str: str) -> str : """ Returns the number of days from today until a given date (input format: YYYY-MM-DD) """ try: future_date = datetime.datetime.strptime(date_str, "%Y-%m-%d").date() today = datetime.date.today() delta_days = (future_date - today).days return f"{delta_days} days until {date_str}" except Exception as e: return f"Error parsing date: {str(e)}" datetime_tools = [current_date, day_of_week, days_until] @tool def transcribe_audio(audio_file: str, file_extension: str) -> str: """ Transcribes an audio file to text Args: audio_file (str): local file path to the audio file (.mp3, .m4a, etc.) file_extension (str): file extension of the audio, e.g. mp3 Returns: str: The transcribed text from the audio. """ try: response = requests.get(audio_file) # download the audio_file response.raise_for_status() # check if the http request was successful # clean file extension and save to disk file_extension = file_extension.replace('.','') filename = f'tmp.{file_extension}' with open(filename, 'wb') as file: # opens a new file for writing with a name like, e.g. tmp.mp3 file.write(response.content) # write(w) the binary(b) contents (audio file) to disk # transcribe audio with OpenAI Whisper client = OpenAI() # read(r) the audio file from disk in binary(b) mode "rb"; the "with" block ensures the file is automatically closed afterward with open(filename, "rb") as audio_content: transcription = client.audio.transcriptions.create( model="whisper-1", file=audio_content ) return transcription.text except Exception as e: return f"transcribe_audio failed: {e}" # TESTING @tool def transcribe_youtube(youtube_url: str) -> str: """ Transcribes a YouTube video Args: youtube_url (str): youtube video's url """ try: video_id = extract.video_id(youtube_url) transcript = YouTubeTranscriptApi.get_transcript(video_id) # keep only text text = '\n'.join([s['text'] for s in transcript]) return text except Exception as e: return f"transcribe_youtube failed: {e}" # TESTING @tool def query_image(query: str, image_url: str) -> str: """ Ask anything about an image using a Vision Language Model Args: query (str): the query about the image, e.g. how many animals are on the image? image_url (str): the image's URL """ try: client = OpenAI() response = client.responses.create( model="gpt-4o-mini", input=[ { "role": "user", "content": [ {"type": "input_text", "text": query}, {"type": "input_image","image_url": image_url}, ], } ], ) return response.output_text except Exception as e: return f"query_image failed: {e}"