Spaces:
Sleeping
Sleeping
Commit
·
a537e5e
1
Parent(s):
428a61d
added logger , type checking
Browse files- __pycache__/main.cpython-311.pyc +0 -0
- __pycache__/models.cpython-311.pyc +0 -0
- __pycache__/schemas.cpython-311.pyc +0 -0
- main.py +5 -2
- requirements.txt +2 -1
- utils.py +32 -2
__pycache__/main.cpython-311.pyc
ADDED
Binary file (5.59 kB). View file
|
|
__pycache__/models.cpython-311.pyc
ADDED
Binary file (4.03 kB). View file
|
|
__pycache__/schemas.cpython-311.pyc
ADDED
Binary file (298 Bytes). View file
|
|
main.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
# main.py
|
2 |
from fastapi import FastAPI,status,Response,Request
|
3 |
-
from fastapi.responses import StreamingResponse
|
4 |
from models import load_text_model,generate_text,load_audio_model,generate_audio,load_image_model, generate_image
|
5 |
from schemas import VoicePresets
|
6 |
from utils import audio_array_to_buffer,img_to_bytes
|
@@ -15,7 +15,7 @@ models = {}
|
|
15 |
|
16 |
@asynccontextmanager
|
17 |
async def lifespan(_: FastAPI) -> AsyncIterator[None]:
|
18 |
-
models["text2image"] = load_image_model()
|
19 |
yield
|
20 |
models.clear()
|
21 |
|
@@ -70,6 +70,9 @@ def serve_language_model_controller(prompt: str) -> str:
|
|
70 |
output = generate_text(pipe, prompt)
|
71 |
return output
|
72 |
|
|
|
|
|
|
|
73 |
|
74 |
@app.get(
|
75 |
"/generate/audio",
|
|
|
1 |
# main.py
|
2 |
from fastapi import FastAPI,status,Response,Request
|
3 |
+
from fastapi.responses import StreamingResponse,FileResponse
|
4 |
from models import load_text_model,generate_text,load_audio_model,generate_audio,load_image_model, generate_image
|
5 |
from schemas import VoicePresets
|
6 |
from utils import audio_array_to_buffer,img_to_bytes
|
|
|
15 |
|
16 |
@asynccontextmanager
|
17 |
async def lifespan(_: FastAPI) -> AsyncIterator[None]:
|
18 |
+
# models["text2image"] = load_image_model()
|
19 |
yield
|
20 |
models.clear()
|
21 |
|
|
|
70 |
output = generate_text(pipe, prompt)
|
71 |
return output
|
72 |
|
73 |
+
@app.get("/logs")
|
74 |
+
def get_logs():
|
75 |
+
return FileResponse("path/to/logs.csv", media_type='text/csv', filename="logs.csv")
|
76 |
|
77 |
@app.get(
|
78 |
"/generate/audio",
|
requirements.txt
CHANGED
@@ -5,4 +5,5 @@ torch
|
|
5 |
pydantic
|
6 |
bitsandbytes
|
7 |
soundfile
|
8 |
-
diffusers
|
|
|
|
5 |
pydantic
|
6 |
bitsandbytes
|
7 |
soundfile
|
8 |
+
diffusers
|
9 |
+
loguru
|
utils.py
CHANGED
@@ -2,7 +2,8 @@ from io import BytesIO
|
|
2 |
import soundfile
|
3 |
import numpy as np
|
4 |
from PIL import Image
|
5 |
-
from typing import Literal
|
|
|
6 |
|
7 |
def audio_array_to_buffer(audio_array: np.array, sample_rate: int) -> BytesIO:
|
8 |
buffer = BytesIO()
|
@@ -16,4 +17,33 @@ def img_to_bytes(
|
|
16 |
) -> bytes:
|
17 |
buffer = BytesIO()
|
18 |
image.save(buffer, format=img_format)
|
19 |
-
return buffer.getvalue()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
import soundfile
|
3 |
import numpy as np
|
4 |
from PIL import Image
|
5 |
+
from typing import Literal,TypeAlias
|
6 |
+
import tiktoken
|
7 |
|
8 |
def audio_array_to_buffer(audio_array: np.array, sample_rate: int) -> BytesIO:
|
9 |
buffer = BytesIO()
|
|
|
17 |
) -> bytes:
|
18 |
buffer = BytesIO()
|
19 |
image.save(buffer, format=img_format)
|
20 |
+
return buffer.getvalue()
|
21 |
+
|
22 |
+
|
23 |
+
|
24 |
+
|
25 |
+
|
26 |
+
SupportedModels: TypeAlias = Literal["gpt-3.5", "gpt-4"]
|
27 |
+
PriceTable: TypeAlias = dict[SupportedModels, float]
|
28 |
+
price_table: PriceTable = {"gpt-3.5": 0.0030, "gpt-4": 0.0200}
|
29 |
+
|
30 |
+
def count_tokens(text: str | None) -> int:
|
31 |
+
if text is None:
|
32 |
+
logger.warning("Response is None. Assuming 0 tokens used")
|
33 |
+
return 0
|
34 |
+
enc = tiktoken.encoding_for_model("gpt-4o")
|
35 |
+
return len(enc.encode(text))
|
36 |
+
|
37 |
+
def calculate_usage_costs(
|
38 |
+
prompt: str,
|
39 |
+
response: str | None,
|
40 |
+
model: SupportedModels,
|
41 |
+
) -> tuple[float, float, float]:
|
42 |
+
if model not in price_table:
|
43 |
+
# raise at runtime - in case someone ignores type errors
|
44 |
+
raise ValueError(f"Cost calculation is not supported for {model} model.")
|
45 |
+
price = price_table[model] 9
|
46 |
+
req_costs = price * count_tokens(prompt) / 1000
|
47 |
+
res_costs = price * count_tokens(response) / 1000
|
48 |
+
total_costs = req_costs + res_costs
|
49 |
+
return req_costs, res_costs, total_costs
|