Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -2,6 +2,7 @@ from flask import Flask, request, Response
|
|
2 |
import requests
|
3 |
import json
|
4 |
import time
|
|
|
5 |
|
6 |
app = Flask(__name__)
|
7 |
|
@@ -35,9 +36,23 @@ def req_flux(token, prompt_value, aspect_ratio="1:1", output_format="webp", num_
|
|
35 |
print(f"Error making request: {e}")
|
36 |
return None
|
37 |
|
38 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
39 |
chunks = [
|
40 |
-
"I'm generating an
|
|
|
|
|
41 |
"The image is being processed...",
|
42 |
"Almost there...",
|
43 |
f"Image generated successfully! Here it is:\n\n"
|
@@ -56,21 +71,31 @@ def chat_completions():
|
|
56 |
stream = data.get('stream', False)
|
57 |
|
58 |
# Extract the prompt from the last user message
|
59 |
-
|
|
|
|
|
|
|
60 |
|
61 |
-
|
62 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
|
|
|
64 |
token = get_token()
|
65 |
if not token:
|
66 |
return Response(json.dumps({'error': 'Failed to get token'}), status=500, mimetype='application/json')
|
67 |
|
68 |
-
image_url = req_flux(token,
|
69 |
if not image_url:
|
70 |
return Response(json.dumps({'error': 'Failed to generate image'}), status=500, mimetype='application/json')
|
71 |
|
72 |
if stream:
|
73 |
-
return Response(generate_fake_stream(image_url), mimetype='text/event-stream')
|
74 |
else:
|
75 |
response = {
|
76 |
"id": f"chatcmpl-{int(time.time())}",
|
@@ -82,15 +107,15 @@ def chat_completions():
|
|
82 |
"index": 0,
|
83 |
"message": {
|
84 |
"role": "assistant",
|
85 |
-
"content": f"I've generated an
|
86 |
},
|
87 |
"finish_reason": "stop"
|
88 |
}
|
89 |
],
|
90 |
"usage": {
|
91 |
-
"prompt_tokens": len(
|
92 |
-
"completion_tokens": 20, # Approximate
|
93 |
-
"total_tokens": len(
|
94 |
}
|
95 |
}
|
96 |
return Response(json.dumps(response), mimetype='application/json')
|
|
|
2 |
import requests
|
3 |
import json
|
4 |
import time
|
5 |
+
from openai import OpenAI
|
6 |
|
7 |
app = Flask(__name__)
|
8 |
|
|
|
36 |
print(f"Error making request: {e}")
|
37 |
return None
|
38 |
|
39 |
+
def generate_optimized_prompt(api_key, api_base, system_prompt, user_input):
|
40 |
+
client = OpenAI(api_key=api_key, base_url=api_base)
|
41 |
+
|
42 |
+
completion = client.chat.completions.create(
|
43 |
+
model="gpt-4o-mini",
|
44 |
+
messages=[
|
45 |
+
{"role": "system", "content": system_prompt},
|
46 |
+
{"role": "user", "content": user_input}
|
47 |
+
]
|
48 |
+
)
|
49 |
+
return completion.choices[0].message.content
|
50 |
+
|
51 |
+
def generate_fake_stream(image_url, optimized_prompt):
|
52 |
chunks = [
|
53 |
+
"I'm generating an optimized prompt based on your input...",
|
54 |
+
f"Optimized prompt: {optimized_prompt}",
|
55 |
+
"Now generating an image based on the optimized prompt...",
|
56 |
"The image is being processed...",
|
57 |
"Almost there...",
|
58 |
f"Image generated successfully! Here it is:\n\n"
|
|
|
71 |
stream = data.get('stream', False)
|
72 |
|
73 |
# Extract the prompt from the last user message
|
74 |
+
user_input = next((msg['content'] for msg in reversed(messages) if msg['role'] == 'user'), None)
|
75 |
+
|
76 |
+
if not user_input:
|
77 |
+
return Response(json.dumps({'error': 'No valid user input provided'}), status=400, mimetype='application/json')
|
78 |
|
79 |
+
# Generate optimized prompt using GPT-4-mini
|
80 |
+
'''
|
81 |
+
secret_variable:
|
82 |
+
api_key = "" # Replace with your actual API key
|
83 |
+
api_base = "" # Replace with your actual API base URL
|
84 |
+
'''
|
85 |
+
system_prompt = """作为 Stable Diffusion Prompt 提示词专家,您将从关键词中创建提示,通常来自 Danbooru 等数据库。提示通常描述图像,使用常见词汇,按重要性排列,并用逗号分隔。避免使用"-"或".",但可以接受空格和自然语言。避免词汇重复。为了强调关键词,请将其放在括号中以增加其权重。例如,"(flowers)"将'flowers'的权重增加1.1倍,而"(((flowers)))"将其增加1.331倍。使用"(flowers:1.5)"将'flowers'的权重增加1.5倍。只为重要的标签增加权重。提示包括三个部分:前缀(质量标签+风格词+效果器)+ 主题(图像的主要焦点)+ 场景(背景、环境)。前缀影响图像质量。像"masterpiece"、"best quality"、"ultra-detailed"、"high resolution"、"photorealistic" 这样的标签可以显著提高图像的细节和整体质量。像"illustration"、"lensflare"、"cinematic lighting" 这样的风格词定义图像的风格和光影效果。像"best lighting"、"volumetric lighting"、"depth of field" 这样的效果器会影响光照和深度。主题是图像的主要焦点,如角色或场景。对主题进行详细描述可以确保图像丰富而详细。增加主题的权重以增强其清晰度。对于角色,描述面部、头发、身体、服装、姿势等特征,同时加入细致的纹理和高光处理。场景描述环境。没有场景,图像的背景是平淡的,主题显得过大。某些主题本身包含场景(例如建筑物、风景)。像"lush greenery"、"golden sunlight"、"crystal clear river" 这样的环境词可以丰富场景,并增强其视觉吸引力。考虑添加天气效果,如"soft morning mist"、"sunset glow" 来进一步增强场景的氛围。你的任务是设计图像生成的提示。请按照以下步骤进行操作:我会发送给您一个图像场景。需要你生成详细的图像描述。图像描述必须是英文,输出为Positive Prompt。确保提示词仅用于描述图像内容,不包含会显示在图像中的文本。示例:我发送:二战时期的护士。您回复只回复:A WWII-era nurse in a German uniform, holding a wine bottle and stethoscope, sitting at a table in white attire, with a table in the background, masterpiece, ultra-detailed, high resolution, photorealistic, illustration style, best lighting, volumetric lighting, depth of field, sharp focus, detailed character, richly textured environment."""
|
86 |
+
optimized_prompt = generate_optimized_prompt(api_key, api_base, system_prompt, user_input)
|
87 |
|
88 |
+
# Generate image using the optimized prompt
|
89 |
token = get_token()
|
90 |
if not token:
|
91 |
return Response(json.dumps({'error': 'Failed to get token'}), status=500, mimetype='application/json')
|
92 |
|
93 |
+
image_url = req_flux(token, optimized_prompt)
|
94 |
if not image_url:
|
95 |
return Response(json.dumps({'error': 'Failed to generate image'}), status=500, mimetype='application/json')
|
96 |
|
97 |
if stream:
|
98 |
+
return Response(generate_fake_stream(image_url, optimized_prompt), mimetype='text/event-stream')
|
99 |
else:
|
100 |
response = {
|
101 |
"id": f"chatcmpl-{int(time.time())}",
|
|
|
107 |
"index": 0,
|
108 |
"message": {
|
109 |
"role": "assistant",
|
110 |
+
"content": f"I've generated an optimized prompt based on your input: '{optimized_prompt}'\n\nUsing this prompt, I've created an image. Here it is:\n\n"
|
111 |
},
|
112 |
"finish_reason": "stop"
|
113 |
}
|
114 |
],
|
115 |
"usage": {
|
116 |
+
"prompt_tokens": len(user_input.split()),
|
117 |
+
"completion_tokens": len(optimized_prompt.split()) + 20, # Approximate
|
118 |
+
"total_tokens": len(user_input.split()) + len(optimized_prompt.split()) + 20
|
119 |
}
|
120 |
}
|
121 |
return Response(json.dumps(response), mimetype='application/json')
|