Spaces:
Sleeping
Sleeping
File size: 1,511 Bytes
c0a983b ec10d0e a537e5e 6d73c15 c0a983b ec10d0e a537e5e 6d73c15 a537e5e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
from io import BytesIO
import soundfile
import numpy as np
from PIL import Image
from typing import Literal,TypeAlias
import tiktoken
from loguru import logger
def audio_array_to_buffer(audio_array: np.array, sample_rate: int) -> BytesIO:
buffer = BytesIO()
soundfile.write(buffer, audio_array, sample_rate, format="wav")
buffer.seek(0)
return buffer
def img_to_bytes(
image: Image.Image, img_format: Literal["PNG", "JPEG"] = "PNG"
) -> bytes:
buffer = BytesIO()
image.save(buffer, format=img_format)
return buffer.getvalue()
SupportedModels: TypeAlias = Literal["gpt-3.5", "gpt-4"]
PriceTable: TypeAlias = dict[SupportedModels, float]
price_table: PriceTable = {"gpt-3.5": 0.0030, "gpt-4": 0.0200}
def count_tokens(text: str | None) -> int:
if text is None:
logger.warning("Response is None. Assuming 0 tokens used")
return 0
enc = tiktoken.encoding_for_model("gpt-4o")
return len(enc.encode(text))
def calculate_usage_costs(
prompt: str,
response: str | None,
model: SupportedModels,
) -> tuple[float, float, float]:
if model not in price_table:
# raise at runtime - in case someone ignores type errors
raise ValueError(f"Cost calculation is not supported for {model} model.")
price = price_table[model]
req_costs = price * count_tokens(prompt) / 1000
res_costs = price * count_tokens(response) / 1000
total_costs = req_costs + res_costs
return req_costs, res_costs, total_costs |