Niansuh commited on
Commit
5b9363f
·
verified ·
1 Parent(s): ede8b1e

Rename api/typing.py to api/gizai.py

Browse files
Files changed (2) hide show
  1. api/gizai.py +169 -0
  2. api/typing.py +0 -7
api/gizai.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # api/gizai.py
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ from aiohttp import ClientSession
7
+ from typing import AsyncGenerator, Union
8
+
9
+ from .models import AsyncResult, Messages, ImageResponse
10
+ from .utils import strip_model_prefix
11
+
12
+ class AsyncGeneratorProvider:
13
+ @classmethod
14
+ async def create_async_generator(cls, *args, **kwargs) -> AsyncGenerator:
15
+ """Abstract method to create an asynchronous generator."""
16
+ raise NotImplementedError
17
+
18
+ class ProviderModelMixin:
19
+ @classmethod
20
+ def get_model(cls, model: str) -> str:
21
+ """Abstract method to get the actual model name."""
22
+ raise NotImplementedError
23
+
24
+ class GizAI(AsyncGeneratorProvider, ProviderModelMixin):
25
+ url = "https://app.giz.ai/assistant/"
26
+ api_endpoint = "https://app.giz.ai/api/data/users/inferenceServer.infer"
27
+ working = True
28
+
29
+ supports_system_message = True
30
+ supports_message_history = True
31
+
32
+ # Chat models
33
+ default_model = 'chat-gemini-flash'
34
+ chat_models = [
35
+ default_model,
36
+ 'chat-gemini-pro',
37
+ 'chat-gpt4m',
38
+ 'chat-gpt4',
39
+ 'claude-sonnet',
40
+ 'claude-haiku',
41
+ 'llama-3-70b',
42
+ 'llama-3-8b',
43
+ 'mistral-large',
44
+ 'chat-o1-mini'
45
+ ]
46
+
47
+ # Image models
48
+ image_models = [
49
+ 'flux1',
50
+ 'sdxl',
51
+ 'sd',
52
+ 'sd35',
53
+ ]
54
+
55
+ models = [*chat_models, *image_models]
56
+
57
+ model_aliases = {
58
+ # Chat model aliases
59
+ "gemini-flash": "chat-gemini-flash",
60
+ "gemini-pro": "chat-gemini-pro",
61
+ "gpt-4o-mini": "chat-gpt4m",
62
+ "gpt-4o": "chat-gpt4",
63
+ "claude-3.5-sonnet": "claude-sonnet",
64
+ "claude-3-haiku": "claude-haiku",
65
+ "llama-3.1-70b": "llama-3-70b",
66
+ "llama-3.1-8b": "llama-3-8b",
67
+ "o1-mini": "chat-o1-mini",
68
+ # Image model aliases
69
+ "sd-1.5": "sd",
70
+ "sd-3.5": "sd35",
71
+ "flux-schnell": "flux1",
72
+ }
73
+
74
+ @classmethod
75
+ def get_model(cls, model: str) -> str:
76
+ """Retrieve the actual model name, handling aliases."""
77
+ if model in cls.models:
78
+ return model
79
+ elif model in cls.model_aliases:
80
+ return cls.model_aliases[model]
81
+ else:
82
+ return cls.default_model
83
+
84
+ @classmethod
85
+ def is_image_model(cls, model: str) -> bool:
86
+ """Determine if the given model is an image generation model."""
87
+ return model in cls.image_models
88
+
89
+ @classmethod
90
+ async def create_async_generator(
91
+ cls,
92
+ model: str,
93
+ messages: Messages,
94
+ proxy: str = None,
95
+ **kwargs
96
+ ) -> AsyncResult:
97
+ """Create an asynchronous generator for processing requests."""
98
+ model = cls.get_model(model)
99
+
100
+ headers = {
101
+ 'Accept': 'application/json, text/plain, */*',
102
+ 'Accept-Language': 'en-US,en;q=0.9',
103
+ 'Cache-Control': 'no-cache',
104
+ 'Connection': 'keep-alive',
105
+ 'Content-Type': 'application/json',
106
+ 'Origin': 'https://app.giz.ai',
107
+ 'Pragma': 'no-cache',
108
+ 'Sec-Fetch-Dest': 'empty',
109
+ 'Sec-Fetch-Mode': 'cors',
110
+ 'Sec-Fetch-Site': 'same-origin',
111
+ 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
112
+ 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
113
+ 'sec-ch-ua-mobile': '?0',
114
+ 'sec-ch-ua-platform': '"Linux"'
115
+ }
116
+
117
+ async with ClientSession() as session:
118
+ if cls.is_image_model(model):
119
+ # Image generation
120
+ prompt = messages[-1]["content"]
121
+ data = {
122
+ "model": model,
123
+ "input": {
124
+ "width": "1024",
125
+ "height": "1024",
126
+ "steps": 4,
127
+ "output_format": "webp",
128
+ "batch_size": 1,
129
+ "mode": "plan",
130
+ "prompt": prompt
131
+ }
132
+ }
133
+ async with session.post(
134
+ cls.api_endpoint,
135
+ headers=headers,
136
+ data=json.dumps(data),
137
+ proxy=proxy
138
+ ) as response:
139
+ response.raise_for_status()
140
+ response_data = await response.json()
141
+ if response_data.get('status') == 'completed' and response_data.get('output'):
142
+ for url in response_data['output']:
143
+ yield ImageResponse(images=url, alt="Generated Image")
144
+ else:
145
+ # Chat completion
146
+ # Directly format the prompt without using a separate helper
147
+ prompt = "\n".join([f"{msg['role']}: {msg['content']}" for msg in messages])
148
+ data = {
149
+ "model": model,
150
+ "input": {
151
+ "messages": [
152
+ {
153
+ "type": "human",
154
+ "content": prompt
155
+ }
156
+ ],
157
+ "mode": "plan"
158
+ },
159
+ "noStream": True
160
+ }
161
+ async with session.post(
162
+ cls.api_endpoint,
163
+ headers=headers,
164
+ data=json.dumps(data),
165
+ proxy=proxy
166
+ ) as response:
167
+ response.raise_for_status()
168
+ result = await response.json()
169
+ yield result.get('output', '')
api/typing.py DELETED
@@ -1,7 +0,0 @@
1
- # api/typing.py
2
-
3
- from typing import AsyncGenerator, Union, List, Dict
4
- from .image import ImageResponse
5
-
6
- Messages = List[Dict[str, Union[str, list]]]
7
- AsyncResult = AsyncGenerator[Union[str, ImageResponse], None]