seawolf2357 commited on
Commit
8716617
·
verified ·
1 Parent(s): 9a83d45

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +848 -0
app.py ADDED
@@ -0,0 +1,848 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import spaces
3
+ import gradio as gr
4
+ import numpy as np
5
+ from PIL import Image
6
+ import random
7
+ from diffusers import StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline, EulerAncestralDiscreteScheduler
8
+ import torch
9
+ from transformers import pipeline as transformers_pipeline
10
+ import re
11
+ from cohere import ClientV2 # Changed from HuggingFace to Cohere
12
+
13
+ # ------------------------------------------------------------
14
+ # DEVICE SETUP
15
+ # ------------------------------------------------------------
16
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
17
+
18
+ # ------------------------------------------------------------
19
+ # STABLE DIFFUSION XL PIPELINE (Text-to-Image)
20
+ # ------------------------------------------------------------
21
+ pipe = StableDiffusionXLPipeline.from_pretrained(
22
+ "Heartsync/NSFW-Uncensored",
23
+ torch_dtype=torch.float16,
24
+ variant="fp16",
25
+ use_safetensors=True,
26
+ )
27
+ pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
28
+ pipe.to(device)
29
+
30
+ # Force important sub-modules to fp16 for VRAM efficiency
31
+ for sub in (pipe.text_encoder, pipe.text_encoder_2, pipe.vae, pipe.unet):
32
+ sub.to(torch.float16)
33
+
34
+ # ------------------------------------------------------------
35
+ # STABLE DIFFUSION XL PIPELINE (Image-to-Image)
36
+ # ------------------------------------------------------------
37
+ img2img_pipe = StableDiffusionXLImg2ImgPipeline.from_pretrained(
38
+ "Heartsync/NSFW-Uncensored",
39
+ torch_dtype=torch.float16,
40
+ variant="fp16",
41
+ use_safetensors=True,
42
+ )
43
+ img2img_pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(img2img_pipe.scheduler.config)
44
+ img2img_pipe.to(device)
45
+
46
+ # Force important sub-modules to fp16 for VRAM efficiency
47
+ for sub in (img2img_pipe.text_encoder, img2img_pipe.text_encoder_2, img2img_pipe.vae, img2img_pipe.unet):
48
+ sub.to(torch.float16)
49
+
50
+ # ------------------------------------------------------------
51
+ # INITIALIZE COHERE CLIENT FOR TRANSLATIONS AND PROMPT GENERATION
52
+ # ------------------------------------------------------------
53
+ coh_api_key = os.getenv("COH_API")
54
+ if not coh_api_key:
55
+ print("[WARNING] COH_API environment variable not found. LLM features will not work.")
56
+ coh_client = None
57
+ else:
58
+ try:
59
+ coh_client = ClientV2(api_key=coh_api_key)
60
+ print("[INFO] Cohere client initialized successfully.")
61
+ except Exception as e:
62
+ print(f"[ERROR] Failed to initialize Cohere client: {str(e)}")
63
+ coh_client = None
64
+
65
+
66
+ # 1. 비영어 문자 감지 정규식을 더 명확하게 수정
67
+ # 한글, 일본어, 중국어를 명시적으로 포함
68
+ non_english_regex = re.compile(r'[\uac00-\ud7a3\u3040-\u30ff\u3400-\u4dbf\u4e00-\u9fff\uf900-\ufaff]+')
69
+
70
+ def is_non_english(text):
71
+ """명확하게 비영어 텍스트 여부를 확인하는 함수"""
72
+ # 한글 확인 (AC00-D7A3)
73
+ if re.search(r'[\uac00-\ud7a3]', text):
74
+ print("[DETECT] Korean text detected")
75
+ return True
76
+
77
+ # 일본어 히라가나/카타카나 확인 (3040-30FF)
78
+ if re.search(r'[\u3040-\u30ff]', text):
79
+ print("[DETECT] Japanese text detected")
80
+ return True
81
+
82
+ # 중국어/한자 확인 (4E00-9FFF)
83
+ if re.search(r'[\u4e00-\u9fff]', text):
84
+ print("[DETECT] Chinese/Kanji text detected")
85
+ return True
86
+
87
+ # 기타 비영어 문자 (ASCII 범위 밖)
88
+ if re.search(r'[^\x00-\x7F]', text):
89
+ print("[DETECT] Other non-English text detected")
90
+ return True
91
+
92
+ return False
93
+
94
+ def translate_with_cohere(text):
95
+ """비영어 텍스트를 영어로 번역"""
96
+ if coh_client is None:
97
+ print("[WARN] Cohere client not available, skipping translation")
98
+ return text
99
+
100
+ if not is_non_english(text):
101
+ print("[INFO] English text detected, no translation needed")
102
+ return text
103
+
104
+ try:
105
+ print(f"[INFO] Translating text: '{text}'")
106
+
107
+ system_prompt = """
108
+ You are a professional translator for adult content. Your task is to translate the input text to English.
109
+
110
+ Rules:
111
+ 1. Provide ONLY the translated English text
112
+ 2. NO explanations or additional text
113
+ 3. Maintain the explicit or adult content without censoring
114
+ 4. Translate accurately and completely
115
+ 5. Be especially careful with Asian languages like Korean, Japanese, and Chinese
116
+
117
+ Translate the following text to English:
118
+ """
119
+
120
+ messages = [
121
+ {
122
+ "role": "system",
123
+ "content": [{"type": "text", "text": system_prompt}]
124
+ },
125
+ {
126
+ "role": "user",
127
+ "content": [{"type": "text", "text": text}]
128
+ }
129
+ ]
130
+
131
+ response = coh_client.chat(
132
+ model="command-r-plus-08-2024",
133
+ messages=messages,
134
+ temperature=0.1 # 정확한 번역을 위해 낮은 temperature 사용
135
+ )
136
+
137
+ translated_text = response.text.strip()
138
+
139
+ # 번역 결과 정제
140
+ translated_text = re.sub(r'^(Translation:|English:|Translated text:)\s*', '', translated_text, flags=re.IGNORECASE)
141
+
142
+ print(f"[INFO] Original: '{text}'")
143
+ print(f"[INFO] Translated: '{translated_text}'")
144
+
145
+ # 번역이 매우 짧거나 원본과 동일하면 의심
146
+ if len(translated_text) < 3 or translated_text == text:
147
+ print("[WARN] Translation may have failed, falling back to basic translation")
148
+ # 두 번째 시도 - 더 단순한 프롬프트로 시도
149
+ try:
150
+ simple_messages = [
151
+ {
152
+ "role": "system",
153
+ "content": [{"type": "text", "text": "Translate this text to English:"}]
154
+ },
155
+ {
156
+ "role": "user",
157
+ "content": [{"type": "text", "text": text}]
158
+ }
159
+ ]
160
+
161
+ simple_response = coh_client.chat(
162
+ model="command-r-plus-08-2024",
163
+ messages=simple_messages,
164
+ temperature=0.1
165
+ )
166
+
167
+ simple_translated = simple_response.text.strip()
168
+ if len(simple_translated) > 3 and simple_translated != text:
169
+ print(f"[INFO] Second attempt translation: '{simple_translated}'")
170
+ return simple_translated
171
+ except Exception as e:
172
+ print(f"[ERROR] Second translation attempt failed: {str(e)}")
173
+
174
+ return text
175
+
176
+ return translated_text
177
+ except Exception as e:
178
+ print(f"[ERROR] Translation failed: {str(e)}")
179
+ import traceback
180
+ traceback.print_exc()
181
+ return text # 번역 실패 시 원본 반환
182
+
183
+
184
+
185
+ # ------------------------------------------------------------
186
+ # EXAMPLES (Hidden from UI but used for RANDOM button)
187
+ # ------------------------------------------------------------
188
+ prompt_examples = [
189
+ "The shy college girl, with glasses and a tight plaid skirt, nervously approaches her professor",
190
+ "Her skirt rose a little higher with each gentle push, a soft blush of blush spreading across her cheeks as she felt the satisfying warmth of his breath on her cheek.",
191
+ "a girl in a school uniform having her skirt pulled up by a boy, and then being fucked",
192
+ "Moody mature anime scene of two lovers fuck under neon rain, sensual atmosphere",
193
+ "Moody mature anime scene of two lovers kissing under neon rain, sensual atmosphere",
194
+ "The girl sits on the boy's lap by the window, his hands resting on her waist. She is unbuttoning his shirt, her expression focused and intense.",
195
+ "A girl with long, black hair is sleeping on her desk in the classroom. Her skirt has ridden up, revealing her thighs, and a trail of drool escapes her slightly parted lips.",
196
+ "The waves rolled gently, a slow, sweet kiss of the lip, a slow, slow build of anticipation as their toes bumped gently – a slow, sweet kiss of the lip, a promise of more to come.",
197
+ "Her elegant silk gown swayed gracefully as she approached him, the delicate fabric brushing against her legs. A warm blush spread across her cheeks as she felt his breath on her face.",
198
+ "Her white blouse and light cotton skirt rose a little higher with each gentle push, a soft blush spreading across her cheeks as she felt the satisfying warmth of his breath on her cheek.",
199
+ "A woman in a business suit having her skirt lifted by a man, and then being sexually assaulted.",
200
+ "The older woman sits on the man's lap by the fireplace, his hands resting on her hips. She is unbuttoning his vest, her expression focused and intense. He takes control of the situation as she finishes unbuttoning his shirt, pushing her onto her back and begins to have sex with her.",
201
+ "There is a woman with long black hair. Her face features alluring eyes and full lips, with a slender figure adorned in black lace lingerie. She lies on the bed, loosening her lingerie strap with one hand while seductively glancing downward.",
202
+ "In a dimly lit room, the same woman teases with her dark, flowing hair, now covering her voluptuous breasts, while a black garter belt accentuates her thighs. She sits on the sofa, leaning back, lifting one leg to expose her most private areas through the sheer lingerie.",
203
+ "A woman with glasses, lying on the bed in just her bra, spreads her legs wide, revealing all! She wears a sultry expression, gazing directly at the viewer with her brown eyes, her short black hair cascading over the pillow. Her slim figure, accentuated by the lacy lingerie, exudes a seductive aura.",
204
+ "A soft focus on the girl's face, eyes closed, biting her lip, as her roommate performs oral pleasure, the experienced woman's hair cascading between her thighs.",
205
+ "A woman in a blue hanbok sits on a wooden floor, her legs folded beneath her, gazing out of a window, the sunlight highlighting the graceful lines of her clothing.",
206
+ "The couple, immersed in a wooden outdoor bath, share an intimate moment, her wet kimono clinging to her curves, his hands exploring her body beneath the water's surface.",
207
+ "A steamy shower scene, the twins embrace under the warm water, their soapy hands gliding over each other's curves, their passion intensifying as they explore uncharted territories.",
208
+ "The teacher, with a firm grip, pins the student against the blackboard, her skirt hiked up, exposing her delicate lace panties. Their heavy breathing echoes in the quiet room as they share an intense, intimate moment.",
209
+ "After hours, the girl sits on top of the teacher's lap, riding him on the classroom floor, her hair cascading over her face as she moves with increasing intensity, their bodies glistening with sweat.",
210
+ "In the dimly lit dorm room, the roommates lay entangled in a passionate embrace, their naked bodies glistening with sweat, as the experienced woman teaches her lover the art of kissing and touching.",
211
+ "The once-innocent student, now confident, takes charge, straddling her lover on the couch, their bare skin illuminated by the warm glow of the sunset through the window.",
212
+ "A close-up of the secretary's hand unzipping her boss's dress shirt, her fingers gently caressing his chest, their eyes locked in a heated embrace in the supply closet.",
213
+ "The secretary, in a tight pencil skirt and silk blouse, leans back on the boss's desk, her legs wrapped around his waist, her blouse unbuttoned, revealing her lace bra, as he passionately kisses her, his hands exploring her body.",
214
+ "On the living room couch, one twin sits astride her sister's lap, their lips locked in a passionate kiss, their hands tangled in each other's hair, unraveling a new level of intimacy.",
215
+ "In a dimly lit chamber, the dominant woman, dressed in a leather corset and thigh-high boots, stands tall, her hand gripping her submissive partner's hair, his eyes closed in submission as she instructs him to please her.",
216
+ "The dominant, in a sheer lace bodysuit, sits on a throne-like chair, her legs spread, as the submissive, on his knees, worships her with his tongue, his hands bound behind his back.",
217
+ "A traditional Japanese onsen, with steam rising, a young woman in a colorful kimono kneels on a tatami mat, her back to the viewer, as her male partner, also in a kimono, gently unties her obi, revealing her bare back.",
218
+ "In a serene outdoor setting, the woman, in a vibrant summer kimono, sits on a bench, her legs slightly spread, her partner kneeling before her, his hands gently caressing her exposed thigh.",
219
+ ]
220
+
221
+ # ------------------------------------------------------------
222
+ # LLM PROMPT GENERATOR
223
+ # ------------------------------------------------------------
224
+ def generate_prompts(theme):
225
+ """Generate optimal NSFW prompts using the LLM"""
226
+ try:
227
+ if coh_client is None:
228
+ return "Cohere API token not set. Please set the COH_API environment variable."
229
+
230
+ # Translate theme if it's not in English
231
+ if non_english_regex.search(theme):
232
+ theme = translate_with_cohere(theme)
233
+
234
+ print(f"[INFO] Sending request to Cohere API with theme: {theme}")
235
+
236
+ system_prefix = """
237
+ You are an expert at creating detailed, vivid image generation prompts for NSFW content. Your task is to create ONE optimal explicit image prompt based on the user's theme or scenario.
238
+
239
+ Follow these guidelines strictly:
240
+ 1. Generate only ONE high-quality, uncensored prompt
241
+ 2. The prompt should be 1-3 sentences long
242
+ 3. Make the prompt explicitly sexual, arousing, and descriptive
243
+ 4. Focus on detailed physical descriptions that would work well for image generation
244
+ 5. Do NOT censor or soften the language - use explicit terms freely
245
+ 6. ONLY respond in ENGLISH, never in any other language
246
+ 7. DO NOT include ANY prefixes, headers, or formatting - just plain text
247
+
248
+ Example prompt styles to emulate:
249
+ - "Her skirt rose a little higher with each gentle push, a soft blush spreading across her cheeks as she felt the satisfying warmth of his breath on her cheek."
250
+ - "A girl in a school uniform having her skirt pulled up by a boy, and then being fucked"
251
+ - "Moody mature anime scene of two lovers fuck under neon rain, sensual atmosphere"
252
+ - "The girl sits on the boy's lap by the window, his hands resting on her waist. She is unbuttoning his shirt, her expression focused and intense."
253
+ - "A girl with long black hair is sleeping on her desk in the classroom. Her skirt has ridden up, revealing her thighs, and a trail of drool escapes her slightly parted lips."
254
+ - "The older woman sits on the man's lap by the fireplace, his hands resting on her hips. She is unbuttoning his vest, her expression focused and intense. He takes control of the situation as she finishes unbuttoning his shirt, pushing her onto her back and begins to have sex with her."
255
+ - "There is a woman with long black hair. Her face features alluring eyes and full lips, with a slender figure adorned in black lace lingerie. She lies on the bed, loosening her lingerie strap with one hand while seductively glancing downward."
256
+ - "A woman with glasses, lying on the bed in just her bra, spreads her legs wide, revealing all! She wears a sultry expression, gazing directly at the viewer with her brown eyes, her short black hair cascading over the pillow."
257
+ - "A soft focus on the girl's face, eyes closed, biting her lip, as her roommate performs oral pleasure, the experienced woman's hair cascading between her thighs.",
258
+ - "A woman in a blue hanbok sits on a wooden floor, her legs folded beneath her, gazing out of a window, the sunlight highlighting the graceful lines of her clothing.",
259
+ - "The couple, immersed in a wooden outdoor bath, share an intimate moment, her wet kimono clinging to her curves, his hands exploring her body beneath the water's surface.",
260
+ - "A steamy shower scene, the twins embrace under the warm water, their soapy hands gliding over each other's curves, their passion intensifying as they explore uncharted territories.",
261
+ - "The teacher, with a firm grip, pins the student against the blackboard, her skirt hiked up, exposing her delicate lace panties. Their heavy breathing echoes in the quiet room as they share an intense, intimate moment.",
262
+ - "After hours, the girl sits on top of the teacher's lap, riding him on the classroom floor, her hair cascading over her face as she moves with increasing intensity, their bodies glistening with sweat.",
263
+ - "In the dimly lit dorm room, the roommates lay entangled in a passionate embrace, their naked bodies glistening with sweat, as the experienced woman teaches her lover the art of kissing and touching.",
264
+ - "The once-innocent student, now confident, takes charge, straddling her lover on the couch, their bare skin illuminated by the warm glow of the sunset through the window.",
265
+ - "A close-up of the secretary's hand unzipping her boss's dress shirt, her fingers gently caressing his chest, their eyes locked in a heated embrace in the supply closet.",
266
+ - "The secretary, in a tight pencil skirt and silk blouse, leans back on the boss's desk, her legs wrapped around his waist, her blouse unbuttoned, revealing her lace bra, as he passionately kisses her, his hands exploring her body.",
267
+ - "On the living room couch, one twin sits astride her sister's lap, their lips locked in a passionate kiss, their hands tangled in each other's hair, unraveling a new level of intimacy.",
268
+ - "In a dimly lit chamber, the dominant woman, dressed in a leather corset and thigh-high boots, stands tall, her hand gripping her submissive partner's hair, his eyes closed in submission as she instructs him to please her.",
269
+ - "The dominant, in a sheer lace bodysuit, sits on a throne-like chair, her legs spread, as the submissive, on his knees, worships her with his tongue, his hands bound behind his back.",
270
+ - "A traditional Japanese onsen, with steam rising, a young woman in a colorful kimono kneels on a tatami mat, her back to the viewer, as her male partner, also in a kimono, gently unties her obi, revealing her bare back.",
271
+ - "In a serene outdoor setting, the woman, in a vibrant summer kimono, sits on a bench, her legs slightly spread, her partner kneeling before her, his hands gently caressing her exposed thigh.",
272
+
273
+ Respond ONLY with the single prompt text in ENGLISH with NO PREFIXES of any kind.
274
+ """
275
+
276
+ # Format messages for Cohere API
277
+ messages = [
278
+ {
279
+ "role": "system",
280
+ "content": [{"type": "text", "text": system_prefix}]
281
+ },
282
+ {
283
+ "role": "user",
284
+ "content": [{"type": "text", "text": theme}]
285
+ }
286
+ ]
287
+
288
+ # Generate response using Cohere
289
+ response = coh_client.chat(
290
+ model="command-r-plus-08-2024",
291
+ messages=messages,
292
+ temperature=0.8
293
+ )
294
+
295
+ # Extract only the text content without any debug information
296
+ if hasattr(response, 'text'):
297
+ generated_prompt = response.text
298
+ else:
299
+ # Handle different response formats
300
+ try:
301
+ # Try to extract just the text content from the response
302
+ response_str = str(response)
303
+ # If it's a complex object with nested structure
304
+ if 'text=' in response_str:
305
+ text_match = re.search(r"text=['\"]([^'\"]+)['\"]", response_str)
306
+ if text_match:
307
+ generated_prompt = text_match.group(1)
308
+ else:
309
+ generated_prompt = response_str
310
+ else:
311
+ generated_prompt = response_str
312
+ except:
313
+ generated_prompt = str(response)
314
+
315
+ # FORCE translation to English if there's any non-English content
316
+ if non_english_regex.search(generated_prompt):
317
+ print("[INFO] Translating non-English prompt to English")
318
+ generated_prompt = translate_with_cohere(generated_prompt)
319
+
320
+ # Clean the prompt
321
+ generated_prompt = re.sub(r'^AI🐼:\s*', '', generated_prompt)
322
+ generated_prompt = re.sub(r'^\d+[\.\)]\s*', '', generated_prompt)
323
+ generated_prompt = re.sub(r'^(Prompt|Response|Result|Output):\s*', '', generated_prompt)
324
+ generated_prompt = re.sub(r'^["\']+|["\']+$', '', generated_prompt)
325
+ generated_prompt = generated_prompt.strip()
326
+ generated_prompt = re.sub(r'\s+', ' ', generated_prompt)
327
+
328
+ print(f"[INFO] Generated prompt: {generated_prompt}")
329
+
330
+ # Final verification - check length and ensure it's English
331
+ if len(generated_prompt) > 10:
332
+ return generated_prompt
333
+ else:
334
+ return "Failed to generate a valid prompt"
335
+
336
+ except Exception as e:
337
+ print(f"[ERROR] Prompt generation failed: {str(e)}")
338
+ import traceback
339
+ traceback.print_exc()
340
+ return f"Error generating prompt: {str(e)}"
341
+
342
+
343
+ # ------------------------------------------------------------
344
+ # SDXL INFERENCE WRAPPER (Text-to-Image)
345
+ # ------------------------------------------------------------
346
+ MAX_SEED = np.iinfo(np.int32).max
347
+ MAX_IMAGE_SIZE = 1216
348
+
349
+ @spaces.GPU
350
+ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
351
+ """
352
+ 중요: 프롬프트 텍스트에 한글이나 다른 비영어 문자가 있으면 반드시 영어로 번역해야 합니다.
353
+ """
354
+ print(f"[DEBUG] Original prompt received: '{prompt}'")
355
+ print(f"[DEBUG] Original negative prompt received: '{negative_prompt}'")
356
+
357
+ # 한글/비영어 감지 및 번역 (prompt)
358
+ has_korean = bool(re.search(r'[\uac00-\ud7a3]', prompt))
359
+ has_non_english = bool(re.search(r'[^\x00-\x7F]', prompt))
360
+
361
+ if has_korean or has_non_english:
362
+ print(f"[ALERT] 비영어 프롬프트 감지됨: '{prompt}'")
363
+
364
+ # Cohere를 사용하여 직접 번역
365
+ if coh_client:
366
+ try:
367
+ # 번역용 시스템 프롬프트
368
+ trans_system = "You are a translator. Translate the following text to English accurately. Only provide the translation, no comments or explanations."
369
+
370
+ # 번역 요청
371
+ trans_response = coh_client.chat(
372
+ model="command-r-plus-08-2024",
373
+ messages=[
374
+ {"role": "system", "content": [{"type": "text", "text": trans_system}]},
375
+ {"role": "user", "content": [{"type": "text", "text": prompt}]}
376
+ ],
377
+ temperature=0.1
378
+ )
379
+
380
+ # 응답 처리 - 다양한 속성 접근 방법 시도
381
+ translated_prompt = None
382
+
383
+ # 방법 1: response.text
384
+ try:
385
+ if hasattr(trans_response, 'text'):
386
+ translated_prompt = trans_response.text
387
+ print("[DEBUG] 방법 1 (text 속성) 성공")
388
+ except:
389
+ pass
390
+
391
+ # 방법 2: response.response
392
+ if translated_prompt is None:
393
+ try:
394
+ if hasattr(trans_response, 'response'):
395
+ translated_prompt = trans_response.response
396
+ print("[DEBUG] 방법 2 (response 속성) 성공")
397
+ except:
398
+ pass
399
+
400
+ # 방법 3: response dictionary access
401
+ if translated_prompt is None:
402
+ try:
403
+ # 응답이 dictionary인 경우
404
+ if isinstance(trans_response, dict) and 'text' in trans_response:
405
+ translated_prompt = trans_response['text']
406
+ print("[DEBUG] 방법 3 (dictionary access) 성공")
407
+ except:
408
+ pass
409
+
410
+ # 방법 4: 문자열 변환 후 파싱
411
+ if translated_prompt is None:
412
+ try:
413
+ response_str = str(trans_response)
414
+ print(f"[DEBUG] Response structure: {response_str[:200]}...")
415
+
416
+ # text= 패턴 찾기
417
+ match = re.search(r"text=['\"](.*?)['\"]", response_str)
418
+ if match:
419
+ translated_prompt = match.group(1)
420
+ print("[DEBUG] 방법 4 (정규식 파싱) 성공")
421
+
422
+ # content 패턴 찾기
423
+ if not translated_prompt and 'content=' in response_str:
424
+ match = re.search(r"content=['\"](.*?)['\"]", response_str)
425
+ if match:
426
+ translated_prompt = match.group(1)
427
+ print("[DEBUG] 방법 4.1 (content 정규식) 성공")
428
+ except Exception as parse_err:
429
+ print(f"[DEBUG] 정규식 파싱 오류: {parse_err}")
430
+
431
+ # 최종 결과 확인
432
+ if translated_prompt:
433
+ translated_prompt = translated_prompt.strip()
434
+ print(f"[SUCCESS] 번역됨: '{prompt}' -> '{translated_prompt}'")
435
+ prompt = translated_prompt
436
+ else:
437
+ # 마지막 수단: 전체 응답 구조 로깅
438
+ print(f"[DEBUG] Full response type: {type(trans_response)}")
439
+ print(f"[DEBUG] Full response dir: {dir(trans_response)}")
440
+ print(f"[DEBUG] Could not extract translation, keeping original prompt")
441
+ except Exception as e:
442
+ print(f"[ERROR] 프롬프트 번역 실패: {str(e)}")
443
+ import traceback
444
+ traceback.print_exc()
445
+ # 번역 실패 시 원본 유지
446
+
447
+ # 한글/비영어 감지 및 번역 (negative_prompt)
448
+ has_korean = bool(re.search(r'[\uac00-\ud7a3]', negative_prompt))
449
+ has_non_english = bool(re.search(r'[^\x00-\x7F]', negative_prompt))
450
+
451
+ if has_korean or has_non_english:
452
+ print(f"[ALERT] 비영어 네거티브 프롬프트 감지됨: '{negative_prompt}'")
453
+
454
+ # Cohere를 사용하여 직접 번역 (위와 동일한 방식으로)
455
+ if coh_client:
456
+ try:
457
+ trans_system = "You are a translator. Translate the following text to English accurately. Only provide the translation, no comments or explanations."
458
+
459
+ trans_response = coh_client.chat(
460
+ model="command-r-plus-08-2024",
461
+ messages=[
462
+ {"role": "system", "content": [{"type": "text", "text": trans_system}]},
463
+ {"role": "user", "content": [{"type": "text", "text": negative_prompt}]}
464
+ ],
465
+ temperature=0.1
466
+ )
467
+
468
+ # 다양한 방법으로 응답 처리 (프롬프트 처리와 동일)
469
+ translated_negative = None
470
+
471
+ # 각종 접근 방법 (동일한 로직 적용)
472
+ try:
473
+ if hasattr(trans_response, 'text'):
474
+ translated_negative = trans_response.text
475
+ elif hasattr(trans_response, 'response'):
476
+ translated_negative = trans_response.response
477
+ elif isinstance(trans_response, dict) and 'text' in trans_response:
478
+ translated_negative = trans_response['text']
479
+ else:
480
+ response_str = str(trans_response)
481
+ match = re.search(r"text=['\"](.*?)['\"]", response_str)
482
+ if match:
483
+ translated_negative = match.group(1)
484
+ elif 'content=' in response_str:
485
+ match = re.search(r"content=['\"](.*?)['\"]", response_str)
486
+ if match:
487
+ translated_negative = match.group(1)
488
+ except Exception as parse_err:
489
+ print(f"[DEBUG] 네거티브 파싱 오류: {parse_err}")
490
+
491
+ if translated_negative:
492
+ translated_negative = translated_negative.strip()
493
+ print(f"[SUCCESS] 네거티브 번역됨: '{negative_prompt}' -> '{translated_negative}'")
494
+ negative_prompt = translated_negative
495
+ except Exception as e:
496
+ print(f"[ERROR] 네거티브 프롬프트 번역 실패: {str(e)}")
497
+
498
+ print(f"[INFO] 최종 사용될 프롬프트: '{prompt}'")
499
+ print(f"[INFO] 최종 사용될 네거티브 프롬프트: '{negative_prompt}'")
500
+
501
+ if len(prompt.split()) > 60:
502
+ print("[WARN] Prompt >60 words — CLIP may truncate it.")
503
+
504
+ if randomize_seed:
505
+ seed = random.randint(0, MAX_SEED)
506
+
507
+ generator = torch.Generator(device=device).manual_seed(seed)
508
+
509
+ try:
510
+ output_image = pipe(
511
+ prompt=prompt,
512
+ negative_prompt=negative_prompt,
513
+ guidance_scale=guidance_scale,
514
+ num_inference_steps=num_inference_steps,
515
+ width=width,
516
+ height=height,
517
+ generator=generator,
518
+ ).images[0]
519
+ return output_image, seed
520
+ except RuntimeError as e:
521
+ print(f"[ERROR] Diffusion failed → {e}")
522
+ return Image.new("RGB", (width, height), color=(0, 0, 0)), seed
523
+
524
+ # ------------------------------------------------------------
525
+ # SDXL INFERENCE WRAPPER (Image-to-Image)
526
+ # ------------------------------------------------------------
527
+ @spaces.GPU
528
+ def img2img_infer(init_image, prompt, negative_prompt, strength, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
529
+ """
530
+ Image-to-Image generation function
531
+ """
532
+ if init_image is None:
533
+ return None, seed
534
+
535
+ print(f"[DEBUG] Image-to-Image prompt received: '{prompt}'")
536
+
537
+ # 한글/비영어 감지 및 번역 (prompt)
538
+ if is_non_english(prompt):
539
+ print(f"[ALERT] 비영어 프롬프트 감지됨: '{prompt}'")
540
+ prompt = translate_with_cohere(prompt)
541
+ print(f"[INFO] 번역된 프롬프트: '{prompt}'")
542
+
543
+ # 한글/비영어 감지 및 번역 (negative_prompt)
544
+ if is_non_english(negative_prompt):
545
+ print(f"[ALERT] 비영어 네거티브 프롬프트 감지됨: '{negative_prompt}'")
546
+ negative_prompt = translate_with_cohere(negative_prompt)
547
+ print(f"[INFO] 번역된 네거티브 프롬프트: '{negative_prompt}'")
548
+
549
+ if randomize_seed:
550
+ seed = random.randint(0, MAX_SEED)
551
+
552
+ generator = torch.Generator(device=device).manual_seed(seed)
553
+
554
+ # 이미지 전처리
555
+ init_image = init_image.convert("RGB")
556
+ init_image = init_image.resize((width, height), Image.Resampling.LANCZOS)
557
+
558
+ try:
559
+ output_image = img2img_pipe(
560
+ prompt=prompt,
561
+ negative_prompt=negative_prompt,
562
+ image=init_image,
563
+ strength=strength,
564
+ guidance_scale=guidance_scale,
565
+ num_inference_steps=num_inference_steps,
566
+ generator=generator,
567
+ ).images[0]
568
+ return output_image, seed
569
+ except RuntimeError as e:
570
+ print(f"[ERROR] Image-to-Image generation failed → {e}")
571
+ return None, seed
572
+
573
+ # Function to select a random example prompt
574
+ def get_random_prompt():
575
+ return random.choice(prompt_examples)
576
+
577
+ # ------------------------------------------------------------
578
+ # UI LAYOUT + THEME (Enhanced Visual Design)
579
+ # ------------------------------------------------------------
580
+ css = """
581
+ body {background: linear-gradient(135deg, #f2e6ff 0%, #e6f0ff 100%); color: #222; font-family: 'Noto Sans', sans-serif;}
582
+ #col-container {margin: 0 auto; max-width: 768px; padding: 15px; background: rgba(255, 255, 255, 0.8); border-radius: 15px; box-shadow: 0 8px 32px rgba(31, 38, 135, 0.2);}
583
+ .gr-button {background: #7fbdf6; color: #fff; border-radius: 8px; transition: all 0.3s ease; font-weight: bold;}
584
+ .gr-button:hover {background: #5a9ae6; transform: translateY(-2px); box-shadow: 0 5px 15px rgba(0,0,0,0.1);}
585
+ #prompt-box textarea {font-size: 1.1rem; height: 9rem !important; background: #fff; color: #222; border-radius: 10px; border: 1px solid #d1c1e0;}
586
+ .boost-btn {background: #ff7eb6; margin-top: 5px;}
587
+ .boost-btn:hover {background: #ff5aa5;}
588
+ .random-btn {background: #9966ff; margin-top: 5px;}
589
+ .random-btn:hover {background: #8040ff;}
590
+ .container {animation: fadeIn 0.5s ease-in-out;}
591
+ .title {color: #6600cc; text-shadow: 1px 1px 2px rgba(0,0,0,0.1);}
592
+ .gr-form {border: none !important; background: transparent !important;}
593
+ .gr-input {border-radius: 8px !important;}
594
+ .gr-slider {height: 12px !important;}
595
+ .gr-slider .handle {height: 20px !important; width: 20px !important;}
596
+ .panel {border-radius: 12px; overflow: hidden; box-shadow: 0 4px 15px rgba(0,0,0,0.1);}
597
+ .gr-image {border-radius: 12px; overflow: hidden; transition: all 0.3s ease;}
598
+ .gr-image:hover {transform: scale(1.02); box-shadow: 0 8px 25px rgba(0,0,0,0.15);}
599
+ @keyframes fadeIn {
600
+ from {opacity: 0; transform: translateY(20px);}
601
+ to {opacity: 1; transform: translateY(0);}
602
+ }
603
+ .gr-accordion {border-radius: 10px; overflow: hidden; transition: all 0.3s ease;}
604
+ .gr-accordion:hover {box-shadow: 0 5px 15px rgba(0,0,0,0.1);}
605
+ """
606
+
607
+
608
+ author_note = (
609
+ "**ℹ️ This code provides a test environment to experiment with the censorship limits and technical possibilities of AI-based adult content generation. It serves as an educational tool that offers researchers and creators the opportunity to test various prompt methods and understand the ethical boundaries of image generation technology. Uncensored, natural motion video generation service has also been developed but will be released in the future. You can enter prompts in any language - Korean, English, Japanese, Chinese, or any other language - and our AI will automatically translate and generate images based on your input.**"
610
+ )
611
+
612
+
613
+ # Function to boost prompt with LLM
614
+ def boost_prompt(keyword):
615
+ if not keyword or keyword.strip() == "":
616
+ return "Please enter a keyword or theme first"
617
+
618
+ if coh_client is None:
619
+ return "Cohere API token not set. Please set the COH_API environment variable."
620
+
621
+ print(f"[INFO] Generating boosted prompt for keyword: {keyword}")
622
+ prompt = generate_prompts(keyword)
623
+
624
+ # Final verification that we're only returning valid content
625
+ if isinstance(prompt, str) and len(prompt) > 10 and not prompt.startswith("Error") and not prompt.startswith("Failed"):
626
+ return prompt.strip()
627
+ else:
628
+ return "Failed to generate a suitable prompt. Please try again with a different keyword."
629
+
630
+
631
+ with gr.Blocks(
632
+ css=css,
633
+ theme=gr.themes.Soft(),
634
+ head="""
635
+ <!-- Google tag (gtag.js) -->
636
+ <script async src="https://www.googletagmanager.com/gtag/js?id=G-GTFK201G22"></script>
637
+ <script>
638
+ window.dataLayer = window.dataLayer || [];
639
+ function gtag(){dataLayer.push(arguments);}
640
+ gtag('js', new Date());
641
+ gtag('config', 'G-GTFK201G22');
642
+ </script>
643
+ """
644
+ ) as demo:
645
+ gr.Markdown(
646
+ f"""
647
+ ## 🖌️ NSFW Uncensored Text & Imagery: AI Limits Explorer
648
+
649
+ **New Update: Image-to-Image functionality has been added as a new tab! Upload your images and experiment with various transformations.**
650
+
651
+ {author_note}
652
+ """, elem_classes=["title"]
653
+ )
654
+
655
+
656
+ with gr.Group(elem_classes="model-description"):
657
+ gr.HTML("""
658
+ <p>
659
+ <strong>Adult AI Image & Video Generator: REAL</strong><br>
660
+ </p>
661
+ <div style="display: flex; justify-content: center; align-items: center; gap: 10px; flex-wrap: wrap; margin-top: 10px; margin-bottom: 20px;">
662
+
663
+ <a href="https://huggingface.co/spaces/Heartsync/FREE-NSFW-HUB" target="_blank">
664
+ <img src="https://img.shields.io/static/v1?label=huggingface&message=FREE%20NSFW%20HUB&color=%230000ff&labelColor=%23800080&logo=huggingface&logoColor=%23ffa500&style=for-the-badge" alt="badge">
665
+ </a>
666
+ <a href="https://huggingface.co/spaces/Heartsync/NSFW-Uncensored-Real" target="_blank">
667
+ <img src="https://img.shields.io/static/v1?label=Text%20to%20Image%28Real%29&message=NSFW%20Uncensored&color=%230000ff&labelColor=%23800080&logo=Huggingface&logoColor=%23ffa500&style=for-the-badge" alt="badge">
668
+ </a>
669
+ <a href="https://huggingface.co/spaces/Heartsync/Novel-NSFW" target="_blank">
670
+ <img src="https://img.shields.io/static/v1?label=NOVEL%20GENERATOR&message=NSFW%20Uncensored&color=%23ffc0cb&labelColor=%23ffff00&logo=huggingface&logoColor=%23ffa500&style=for-the-badge" alt="badge">
671
+ </a>
672
+ <a href="https://huggingface.co/spaces/Heartsync/NSFW-Uncensored" target="_blank">
673
+ <img src="https://img.shields.io/static/v1?label=Text%20to%20Image%28Anime%29&message=NSFW%20Uncensored&color=%230000ff&labelColor=%23800080&logo=Huggingface&logoColor=%23ffa500&style=for-the-badge" alt="badge">
674
+ </a>
675
+ <a href="https://huggingface.co/spaces/Heartsync/NSFW-Uncensored-video2" target="_blank">
676
+ <img src="https://img.shields.io/static/v1?label=Image%20to%20Video%282%29&message=NSFW%20Uncensored&color=%230000ff&labelColor=%23800080&logo=Huggingface&logoColor=%23ffa500&style=for-the-badge" alt="badge">
677
+ </a>
678
+ <a href="https://huggingface.co/spaces/Heartsync/adult" target="_blank">
679
+ <img src="https://img.shields.io/static/v1?label=Text%20to%20Image%20to%20Video&message=ADULT&color=%23ff00ff&labelColor=%23000080&logo=Huggingface&logoColor=%23ffa500&style=for-the-badge" alt="badge">
680
+ </a>
681
+ </div>
682
+ <p>
683
+ <small style="opacity: 0.8;">High-quality image generation powered by StableDiffusionXL with video generation capability. Supports long prompts and various artistic styles.</small>
684
+ </p>
685
+ """)
686
+
687
+ # Create state variables to store the current image
688
+ current_image = gr.State(None)
689
+ current_seed = gr.State(0)
690
+
691
+ # Tabs for Text-to-Image and Image-to-Image
692
+ with gr.Tabs():
693
+ # Text-to-Image Tab
694
+ with gr.TabItem("Text to Image"):
695
+ with gr.Column(elem_id="col-container", elem_classes=["container", "panel"]):
696
+ # Add keyword input and boost button
697
+ with gr.Row():
698
+ keyword_input = gr.Text(
699
+ label="Keyword Input",
700
+ show_label=True,
701
+ max_lines=1,
702
+ placeholder="Enter a keyword or theme in any language to generate an optimal prompt",
703
+ value="random",
704
+ )
705
+ boost_button = gr.Button("BOOST", elem_classes=["boost-btn"])
706
+ random_button = gr.Button("RANDOM", elem_classes=["random-btn"])
707
+
708
+ with gr.Row():
709
+ prompt = gr.Text(
710
+ label="Prompt",
711
+ elem_id="prompt-box",
712
+ show_label=True,
713
+ max_lines=3, # Increased to 3 lines (3x original)
714
+ placeholder="Enter your prompt in any language (Korean, English, Japanese, etc.)",
715
+ )
716
+ run_button = gr.Button("Generate", scale=0)
717
+
718
+ # Image output area
719
+ result = gr.Image(label="Generated Image", elem_classes=["gr-image"])
720
+
721
+ with gr.Accordion("Advanced Settings", open=False, elem_classes=["gr-accordion"]):
722
+ negative_prompt = gr.Text(
723
+ label="Negative prompt",
724
+ max_lines=1,
725
+ placeholder="Enter a negative prompt in any language",
726
+ value="text, talk bubble, low quality, watermark, signature",
727
+ )
728
+
729
+ seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
730
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
731
+
732
+ with gr.Row():
733
+ width = gr.Slider(label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024)
734
+ height = gr.Slider(label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024)
735
+
736
+ with gr.Row():
737
+ guidance_scale = gr.Slider(label="Guidance scale", minimum=0.0, maximum=20.0, step=0.1, value=7)
738
+ num_inference_steps = gr.Slider(label="Inference steps", minimum=1, maximum=28, step=1, value=28)
739
+
740
+ # Image-to-Image Tab
741
+ with gr.TabItem("Image to Image"):
742
+ with gr.Column(elem_id="col-container", elem_classes=["container", "panel"]):
743
+ # Input image
744
+ input_image = gr.Image(
745
+ label="Input Image",
746
+ type="pil",
747
+ elem_classes=["gr-image"]
748
+ )
749
+
750
+ # Prompt input
751
+ with gr.Row():
752
+ img2img_prompt = gr.Text(
753
+ label="Prompt",
754
+ show_label=True,
755
+ max_lines=3,
756
+ placeholder="Describe how you want to transform the image (any language)",
757
+ )
758
+ img2img_run_button = gr.Button("Transform", scale=0)
759
+
760
+ # Output image
761
+ img2img_result = gr.Image(label="Transformed Image", elem_classes=["gr-image"])
762
+
763
+ # Image-to-Image advanced settings
764
+ with gr.Accordion("Advanced Settings", open=False, elem_classes=["gr-accordion"]):
765
+ img2img_negative_prompt = gr.Text(
766
+ label="Negative prompt",
767
+ max_lines=1,
768
+ placeholder="What to avoid in the transformation",
769
+ value="low quality, watermark, signature",
770
+ )
771
+
772
+ strength = gr.Slider(
773
+ label="Transformation Strength",
774
+ minimum=0.0,
775
+ maximum=1.0,
776
+ step=0.01,
777
+ value=0.75,
778
+ info="Lower values preserve more of the original image"
779
+ )
780
+
781
+ img2img_seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
782
+ img2img_randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
783
+
784
+ with gr.Row():
785
+ img2img_width = gr.Slider(label="Width", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024)
786
+ img2img_height = gr.Slider(label="Height", minimum=256, maximum=MAX_IMAGE_SIZE, step=32, value=1024)
787
+
788
+ with gr.Row():
789
+ img2img_guidance_scale = gr.Slider(label="Guidance scale", minimum=0.0, maximum=20.0, step=0.1, value=7.5)
790
+ img2img_num_inference_steps = gr.Slider(label="Inference steps", minimum=1, maximum=50, step=1, value=30)
791
+
792
+ # Define a function to store the generated image in state
793
+ def update_image_state(img, seed_val):
794
+ return img, seed_val
795
+
796
+ # Connect boost button to generate prompt
797
+ boost_button.click(
798
+ fn=boost_prompt,
799
+ inputs=[keyword_input],
800
+ outputs=[prompt]
801
+ )
802
+
803
+ # Connect random button to insert random example
804
+ random_button.click(
805
+ fn=get_random_prompt,
806
+ inputs=[],
807
+ outputs=[prompt]
808
+ )
809
+
810
+ # Connect image generation button
811
+ run_button.click(
812
+ fn=infer,
813
+ inputs=[
814
+ prompt,
815
+ negative_prompt,
816
+ seed,
817
+ randomize_seed,
818
+ width,
819
+ height,
820
+ guidance_scale,
821
+ num_inference_steps,
822
+ ],
823
+ outputs=[result, current_seed]
824
+ ).then(
825
+ fn=update_image_state,
826
+ inputs=[result, current_seed],
827
+ outputs=[current_image, current_seed]
828
+ )
829
+
830
+ # Connect Image-to-Image button
831
+ img2img_run_button.click(
832
+ fn=img2img_infer,
833
+ inputs=[
834
+ input_image,
835
+ img2img_prompt,
836
+ img2img_negative_prompt,
837
+ strength,
838
+ img2img_seed,
839
+ img2img_randomize_seed,
840
+ img2img_width,
841
+ img2img_height,
842
+ img2img_guidance_scale,
843
+ img2img_num_inference_steps
844
+ ],
845
+ outputs=[img2img_result, img2img_seed]
846
+ )
847
+
848
+ demo.queue().launch()