genai_service / schemas.py
ahmed-eisa's picture
fixed optional values
c31af98
raw
history blame
2.05 kB
from typing import Literal
from datetime import datetime
from typing import Annotated, Literal,Optional
from uuid import uuid4
from pydantic import BaseModel, Field, HttpUrl, IPvAnyAddress, PositiveInt,AfterValidator,validate_call
VoicePresets = Literal["v2/en_speaker_1", "v2/en_speaker_9"]
class ModelRequest(BaseModel):
prompt: Annotated[str, Field(min_length=1, max_length=10000)]
class ModelResponse(BaseModel):
request_id: Annotated[str, Field(default_factory=lambda: uuid4().hex)]
ip: Annotated[Optional[IPvAnyAddress], Field(default=None)]
content: Annotated[Optional[str], Field(min_length=0, max_length=10000,default=None)]
created_at: datetime = datetime.now()
class TextModelRequest(ModelRequest):
model: Literal["gpt-3.5-turbo", "gpt-4o"]
temperature: Annotated[float, Field(ge=0.0, le=1.0, default=0.0)]
class TextModelResponse(ModelResponse):
tokens: Annotated[Optional[int], Field(ge=0,default=None)]
ImageSize = Annotated[tuple[PositiveInt, PositiveInt], "Width and height of an image in pixels"]
SupportedModels = Annotated[
Literal["tinysd", "sd1.5"], "Supported Image Generation Models"
]
@validate_call
def is_square_image(value: ImageSize) -> ImageSize:
if value[0] / value[1] != 1:
raise ValueError("Only square images are supported")
if value[0] not in [512, 1024]:
raise ValueError(f"Invalid output size: {value} - expected 512 or 1024")
return value
@validate_call
def is_valid_inference_step(
num_inference_steps: int, model: SupportedModels
) -> int:
if model == "tinysd" and num_inference_steps > 2000:
raise ValueError(
"TinySD model cannot have more than 2000 inference steps"
)
return num_inference_steps
class ImageModelRequest(ModelRequest):
model: SupportedModels
output_size: ImageSize
num_inference_steps: Annotated[int, Field(ge=0, le=2000)] = 200
class ImageModelResponse(ModelResponse):
size: ImageSize
url: Annotated[Optional[HttpUrl], Field(default=None)]