AIMaster7 commited on
Commit
a5cd74c
Β·
verified Β·
1 Parent(s): bfd5a3c

Rename package.json to app/main.py

Browse files
Files changed (2) hide show
  1. app/main.py +221 -0
  2. package.json +0 -14
app/main.py ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ import os, re, json, uuid, random, string, logging, asyncio
3
+ from datetime import datetime, timedelta
4
+ from typing import List, Callable, Any, Optional
5
+
6
+ import httpx
7
+ from fastapi import FastAPI, HTTPException
8
+ from fastapi.responses import StreamingResponse, JSONResponse
9
+ from pydantic import BaseModel, Field
10
+
11
+ # ────────────────────────── logging ──────────────────────────────────────
12
+ logging.basicConfig(
13
+ level=os.getenv("LOG_LEVEL", "INFO"),
14
+ format="%(asctime)s | %(levelname)-8s | %(name)s | %(message)s",
15
+ )
16
+ log = logging.getLogger("snapzion-service")
17
+ log.info("snapzion service starting …")
18
+
19
+ # ────────────────────────── ENV & constants ─────────────────────────────
20
+ SYSTEM_PROMPT = os.getenv(
21
+ "SYSTEM_PROMPT",
22
+ "You are a prompt-safety model. Decide if the prompt is safe. "
23
+ "Respond with 'safe' or 'not safe'.",
24
+ )
25
+ SAFETY_API_KEY = os.getenv("SAFETY_API_KEY", "sk-F8l9ALDrJSpVCWJ3G1XbqP09oE3UD09Jf0t4WSlnrSJFdTtX")
26
+ SAFETY_MODEL_URL = os.getenv(
27
+ "SAFETY_MODEL_URL",
28
+ "https://api.typegpt.net/v1/chat/completions",
29
+ )
30
+
31
+ MAX_RETRIES = int(os.getenv("MAX_RETRIES", "5"))
32
+ INITIAL_DELAY = float(os.getenv("INITIAL_DELAY", "0.5"))
33
+ MAX_DELAY = float(os.getenv("MAX_DELAY", "2.5"))
34
+
35
+ # ────────────────────────── FastAPI / HTTPX ────────────────────────────
36
+ app = FastAPI(title="Snapzion Image-Gen API | NAI", version="2.4.1")
37
+ _http: Optional[httpx.AsyncClient] = None
38
+
39
+ @app.on_event("startup")
40
+ async def _startup():
41
+ global _http
42
+ _http = httpx.AsyncClient(
43
+ timeout=30,
44
+ limits=httpx.Limits(max_connections=100, max_keepalive_connections=40),
45
+ )
46
+ log.info("HTTPX pool ready βœ“")
47
+
48
+ # ────────────────────────── Pydantic models ────────────────────────────
49
+ class ChatMessage(BaseModel):
50
+ role: str
51
+ content: str
52
+
53
+ class ChatRequest(BaseModel):
54
+ model: str
55
+ messages: List[ChatMessage]
56
+ stream: bool = Field(default=False)
57
+
58
+ # ────────────────────────── Helpers ────────────────────────────────────
59
+ def _fake_user() -> tuple[str, str, str]:
60
+ first = random.choice("Alice Bob Carol David Evelyn Frank Grace Hector Ivy Jackie".split())
61
+ last = random.choice("Smith Johnson Davis Miller Thompson Garcia Brown Wilson Martin Clark".split())
62
+ email = ''.join(random.choices(string.ascii_lowercase + string.digits, k=8)) + "@example.com"
63
+ cust = "cus_" + ''.join(random.choices(string.ascii_letters + string.digits, k=14))
64
+ return f"{first} {last}", email, cust
65
+
66
+ async def _retry(fn: Callable, *a, **kw) -> Any:
67
+ max_tries = kw.pop("max_retries", MAX_RETRIES)
68
+ delay = INITIAL_DELAY
69
+ for n in range(1, max_tries + 1):
70
+ try:
71
+ return await fn(*a, **kw)
72
+ except httpx.HTTPStatusError as exc:
73
+ if exc.response.status_code == 400:
74
+ log.warning("%s try %d/%d: HTTP 400 error: %s", fn.__name__, n, max_tries, exc)
75
+ if n == max_tries:
76
+ log.error("%s failed after %d tries: HTTP 400 error: %s", fn.__name__, n, exc)
77
+ raise
78
+ else:
79
+ log.error("%s failed with status %d: %s", fn.__name__, exc.response.status_code, exc)
80
+ raise
81
+ except Exception as exc:
82
+ if n == max_tries:
83
+ log.error("%s failed after %d tries: %s", fn.__name__, n, exc)
84
+ raise
85
+ log.warning("%s try %d/%d: %s", fn.__name__, n, max_tries, exc)
86
+ await asyncio.sleep(delay + random.uniform(0, 0.4))
87
+ delay = min(delay * 2, MAX_DELAY)
88
+
89
+ # ────────────────────────── Safety check ───────────────────────────────
90
+ async def _raw_safety(prompt: str) -> bool:
91
+ assert _http
92
+ hdrs = {"Authorization": f"Bearer {SAFETY_API_KEY}", "Content-Type": "application/json"}
93
+ payload = {
94
+ "model": "meta-llama/Meta-Llama-3-8B-Instruct-Lite",
95
+ "messages": [
96
+ {"role": "system", "content": SYSTEM_PROMPT},
97
+ {"role": "user", "content": prompt},
98
+ ],
99
+ }
100
+ r = await _http.post(SAFETY_MODEL_URL, json=payload, headers=hdrs)
101
+ r.raise_for_status()
102
+
103
+ raw = r.json()["choices"][0]["message"]["content"].strip().lower()
104
+ log.debug("Safety raw reply: %r", raw)
105
+
106
+ if re.search(r"\b(not\s+safe|unsafe)\b", raw):
107
+ log.warning("Prompt-safety verdict: NOT SAFE")
108
+ return False
109
+ if re.search(r"\bsafe\b", raw):
110
+ log.info("Prompt-safety verdict: SAFE")
111
+ return True
112
+
113
+ log.warning("Prompt-safety unknown reply %r β†’ NOT SAFE", raw)
114
+ return False
115
+
116
+ async def is_safe(prompt: str) -> bool:
117
+ return await _retry(_raw_safety, prompt)
118
+
119
+ # ────────────────────────── Blackbox Image API ─────────────────────────
120
+ async def _raw_blackbox(prompt: str) -> str:
121
+ assert _http
122
+ name, email, _ = _fake_user()
123
+ user_id = ''.join(random.choices(string.digits, k=21))
124
+ expiry = (datetime.utcnow().replace(microsecond=0) + timedelta(days=30)).isoformat() + "Z"
125
+
126
+ payload = {
127
+ "query": prompt,
128
+ "session": {
129
+ "user": {
130
+ "name": name,
131
+ "email": email,
132
+ "image": "https://lh3.googleusercontent.com/a/ACg8ocI-ze5Qe42S-j8xaCL6X7KSVwfiOae4fONqpTxzt0d2_a2FIld1=s96-c",
133
+ "id": user_id
134
+ },
135
+ "expires": expiry
136
+ }
137
+ }
138
+
139
+ headers = {
140
+ "accept": "*/*",
141
+ "accept-language": "en-US,en;q=0.9,ru;q=0.8",
142
+ "content-type": "text/plain;charset=UTF-8",
143
+ "origin": "https://www.blackbox.ai",
144
+ "priority": "u=1, i",
145
+ "referer": "https://www.blackbox.ai/",
146
+ "sec-ch-ua": '"Google Chrome";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
147
+ "sec-ch-ua-mobile": "?0",
148
+ "sec-ch-ua-platform": '"Windows"',
149
+ "sec-fetch-dest": "empty",
150
+ "sec-fetch-mode": "cors",
151
+ "sec-fetch-site": "same-origin",
152
+ "user-agent": (
153
+ "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
154
+ "AppleWebKit/537.36 (KHTML, like Gecko) "
155
+ "Chrome/135.0.0.0 Safari/537.36"
156
+ ),
157
+ }
158
+
159
+ resp = await _http.post("https://www.blackbox.ai/api/image-generator", json=payload, headers=headers)
160
+ resp.raise_for_status()
161
+
162
+ try:
163
+ return resp.json().get("markdown", "").strip()
164
+ except json.JSONDecodeError:
165
+ return resp.text.strip()
166
+
167
+ async def blackbox(prompt: str) -> str:
168
+ return await _retry(_raw_blackbox, prompt)
169
+
170
+ # ────────────────────────── Main route ─────────────────────────────────
171
+ @app.post("/v1/chat/completions")
172
+ async def chat(req: ChatRequest):
173
+ if _http is None:
174
+ raise HTTPException(503, "HTTP client not ready")
175
+
176
+ user_prompt = next((m.content for m in reversed(req.messages) if m.role == "user"), "")
177
+ if not user_prompt:
178
+ raise HTTPException(400, "User prompt missing")
179
+
180
+ try:
181
+ if not await is_safe(user_prompt):
182
+ return JSONResponse({"error": "Your prompt is considered unsafe."}, status_code=400)
183
+ except httpx.HTTPStatusError as exc:
184
+ return JSONResponse({"error": f"Safety check failed: HTTP {exc.response.status_code}", "reason": str(exc)}, status_code=503)
185
+
186
+ try:
187
+ md = await blackbox(user_prompt)
188
+ except httpx.HTTPStatusError as exc:
189
+ return JSONResponse({"error": f"Image generation failed: HTTP {exc.response.status_code}", "reason": str(exc)}, status_code=503)
190
+ except Exception as exc:
191
+ return JSONResponse({"error": "Image generation failed after retries.", "reason": str(exc)}, status_code=503)
192
+
193
+ md = re.sub(r"!\[[^\]]*\]\(https://storage\.googleapis\.com([^\)]*)\)",
194
+ f"![{user_prompt}](https://cdn.snapzion.com\\1)", md)
195
+
196
+ uid, ts = str(uuid.uuid4()), int(datetime.now().timestamp())
197
+
198
+ if not req.stream:
199
+ return {
200
+ "id": uid,
201
+ "object": "chat.completion",
202
+ "created": ts,
203
+ "model": "Image-Generator",
204
+ "choices": [{
205
+ "index": 0,
206
+ "message": {"role": "assistant", "content": md},
207
+ "finish_reason": "stop",
208
+ }],
209
+ "usage": None,
210
+ }
211
+
212
+ async def sse():
213
+ chunk1 = {"id": uid, "object": "chat.completion.chunk", "created": ts, "model": "Image-Generator",
214
+ "choices": [{"index": 0, "delta": {"role": "assistant", "content": md}, "finish_reason": None}], "usage": None}
215
+ yield f"data: {json.dumps(chunk1)}\n\n"
216
+ chunk2 = {"id": uid, "object": "chat.completion.chunk", "created": ts, "model": "Image-Generator",
217
+ "choices": [{"index": 0, "delta": {"role": "assistant", "content": ""}, "finish_reason": "stop"}], "usage": None}
218
+ yield f"data: {json.dumps(chunk2)}\n\n"
219
+ yield "data: [DONE]\n\n"
220
+
221
+ return StreamingResponse(sse(), media_type="text/event-stream")
package.json DELETED
@@ -1,14 +0,0 @@
1
- {
2
- "name": "codesandbox-manager",
3
- "version": "1.0.0",
4
- "description": "Manage CodeSandbox via API",
5
- "main": "index.js",
6
- "scripts": {
7
- "start": "node index.js"
8
- },
9
- "dependencies": {
10
- "dotenv": "^16.0.3",
11
- "express": "^4.18.2",
12
- "node-fetch": "^2.6.7"
13
- }
14
- }