File size: 2,045 Bytes
c0a983b
6d73c15
a8c8062
6d73c15
 
 
c0a983b
 
6d73c15
 
 
 
 
 
 
a8c8062
c31af98
6d73c15
 
 
 
 
 
 
 
c31af98
6d73c15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c31af98
6d73c15
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
from typing import Literal
from datetime import datetime
from typing import Annotated, Literal,Optional
from uuid import uuid4
from pydantic import BaseModel, Field, HttpUrl, IPvAnyAddress, PositiveInt,AfterValidator,validate_call


VoicePresets = Literal["v2/en_speaker_1", "v2/en_speaker_9"]

class ModelRequest(BaseModel): 
    prompt: Annotated[str, Field(min_length=1, max_length=10000)]


class ModelResponse(BaseModel): 
    request_id: Annotated[str, Field(default_factory=lambda: uuid4().hex)]
    ip: Annotated[Optional[IPvAnyAddress], Field(default=None)]
    content: Annotated[Optional[str], Field(min_length=0, max_length=10000,default=None)]
    created_at: datetime = datetime.now()


class TextModelRequest(ModelRequest):
    model: Literal["gpt-3.5-turbo", "gpt-4o"]
    temperature: Annotated[float, Field(ge=0.0, le=1.0, default=0.0)]

class TextModelResponse(ModelResponse):
    tokens: Annotated[Optional[int], Field(ge=0,default=None)]

ImageSize = Annotated[tuple[PositiveInt, PositiveInt], "Width and height of an image in pixels"]
SupportedModels = Annotated[
    Literal["tinysd", "sd1.5"], "Supported Image Generation Models"
]

@validate_call 
def is_square_image(value: ImageSize) -> ImageSize: 
    if value[0] / value[1] != 1:
        raise ValueError("Only square images are supported")
    if value[0] not in [512, 1024]:
        raise ValueError(f"Invalid output size: {value} - expected 512 or 1024")
    return value

@validate_call 
def is_valid_inference_step(
    num_inference_steps: int, model: SupportedModels
) -> int:
    if model == "tinysd" and num_inference_steps > 2000: 
        raise ValueError(
            "TinySD model cannot have more than 2000 inference steps"
        )
    return num_inference_steps

class ImageModelRequest(ModelRequest): 
    model: SupportedModels
    output_size: ImageSize
    num_inference_steps: Annotated[int, Field(ge=0, le=2000)] = 200

class ImageModelResponse(ModelResponse): 
    size: ImageSize
    url: Annotated[Optional[HttpUrl], Field(default=None)]