File size: 2,912 Bytes
809f87e 3568413 809f87e 283e426 3568413 809f87e 283e426 809f87e 283e426 3568413 809f87e 283e426 809f87e 283e426 3568413 283e426 3568413 283e426 3568413 283e426 809f87e 283e426 3568413 809f87e 283e426 809f87e 283e426 3568413 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 |
from langchain_core.tools import tool
from huggingface_hub import InferenceClient
# --- Basic operations --- #
@tool
def multiply(a: float, b: float) -> float:
"""Multiplies two numbers.
Args:
a (float): the first number
b (float): the second number
"""
return a * b
@tool
def add(a: float, b: float) -> float:
"""Adds two numbers.
Args:
a (float): the first number
b (float): the second number
"""
return a + b
@tool
def subtract(a: float, b: float) -> int:
"""Subtracts two numbers.
Args:
a (float): the first number
b (float): the second number
"""
return a - b
@tool
def divide(a: float, b: float) -> float:
"""Divides two numbers.
Args:
a (float): the first float number
b (float): the second float number
"""
if b == 0:
raise ValueError("Cannot divided by zero.")
return a / b
@tool
def modulus(a: int, b: int) -> int:
"""Get the modulus of two numbers.
Args:
a (int): the first number
b (int): the second number
"""
return a % b
@tool
def power(a: float, b: float) -> float:
"""Get the power of two numbers.
Args:
a (float): the first number
b (float): the second number
"""
return a**b
# --- Functions --- #
@tool
def query_image(query: str, image_url: str) -> str:
"""Ask anything about an image using a Vision Language Model
Args:
query (str): the query about the image, e.g. how many persons are on the image?
image_url (str): the URL to the image
"""
client = InferenceClient(provider="nebius")
try:
completion = client.chat.completions.create(
# model="google/gemma-3-27b-it",
model="Qwen/Qwen2.5-VL-72B-Instruct",
messages=[
{
"role": "user",
"content": [
{
"type": "text",
"text": query
},
{
"type": "image_url",
"image_url": {
"url": image_url
}
}
]
}
],
max_tokens=512,
)
return completion.choices[0].message
except Exception as e:
return f"query_image failed: {e}"
@tool
def automatic_speech_recognition(file_url: str) -> str:
"""Transcribe an audio file to text
Args:
file_url (str): the URL to the audio file
"""
client = InferenceClient(provider="fal-ai")
try:
return client.automatic_speech_recognition(file_url, model="openai/whisper-large-v3")
except Exception as e:
return f"automatic_speech_recognition failed: {e}"
|