Spaces:
Running
Running
File size: 6,816 Bytes
4d7cb47 7f3f885 d039d52 7f3f885 d039d52 c630ad1 d039d52 e04a756 7f3f885 4695331 e04a756 7f3f885 e04a756 7f3f885 d039d52 48b41e4 c630ad1 48b41e4 c630ad1 48b41e4 c630ad1 4d7cb47 5de94da 875de5d 5de94da 4d7cb47 875de5d 4d7cb47 d039d52 4d7cb47 d039d52 7f3f885 c630ad1 d039d52 7f3f885 b33840e adbb54f b33840e d039d52 7f3f885 d039d52 7f3f885 d039d52 7f3f885 d039d52 7f3f885 d039d52 4d7cb47 c630ad1 4d7cb47 c630ad1 4d7cb47 c630ad1 d039d52 4d7cb47 d039d52 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 |
from flask import Flask, request, Response
import os
import requests
import json
import time
from openai import OpenAI
app = Flask(__name__)
def get_token():
url = "https://fluxaiweb.com/flux/getToken"
response = requests.get(url)
if response.status_code == 200:
response_json = response.json()
return response_json.get("data", {}).get("token")
return None
def req_flux(token, prompt_value, aspect_ratio="1:1", output_format="webp", num_outputs=1, output_quality=90):
url = "https://fluxaiweb.com/flux/generateImage"
payload = {
"prompt": prompt_value,
"aspectRatio": aspect_ratio,
"outputFormat": output_format,
"numOutputs": num_outputs,
"outputQuality": output_quality
}
headers = {
'Content-Type': 'application/json',
'token': token
}
try:
response = requests.post(url, headers=headers, json=payload)
response.raise_for_status()
data = response.json()
return data.get("data", {}).get("image")
except requests.exceptions.RequestException as e:
print(f"Error making request: {e}")
return None
def generate_optimized_prompt(api_key, api_base, user_input):
client = OpenAI(api_key=api_key, base_url=api_base)
prompt = (
f"As a Stable Diffusion prompt expert, your task is to directly modify the following sentence: '{user_input}'"
"into a proper Stable Diffusion prompt in English, formatted according to the guidelines provided: "
"1. Create prompts using common vocabulary, arranged by importance and separated by commas. Avoid using '-' or '.', "
"but spaces and natural language are acceptable. Avoid repeating words. "
"2. To emphasize keywords, enclose them in parentheses to increase their weight. For example, '(flowers)' increases "
"the weight of 'flowers' by 1.1 times, '(((flowers)))' increases it by 1.331 times, and '(flowers:1.5)' increases "
"the weight of 'flowers' by 1.5 times. Only increase the weight for important tags. "
"3. The prompt should consist of three parts: prefix (quality tags + style words + effectors), subject "
"(main focus of the image), and scene (background, environment). The prefix affects image quality; tags like "
"'masterpiece,' 'best quality,' and '4k' enhance image detail, style words like 'illustration' and 'lensflare' "
"define the style of the image, and effectors like 'best lighting,' 'lensflare,' and 'depth of field' influence "
"lighting and depth. The subject is the main focus of the image, such as a character or scene. Providing a detailed "
"description of the subject ensures a rich and detailed image, and increasing the subject's weight enhances its clarity. "
"For characters, describe features like the face, hair, body, clothing, and pose. The scene describes the environment; "
"without a scene, the image's background appears plain, making the subject seem overly large. Some subjects inherently "
"include scenes (such as buildings or landscapes), and environmental words like 'flower meadow,' 'sunlight,' and 'river' "
"can enrich the scene. "
"Return only the revised prompt in English."
)
completion = client.chat.completions.create(
model="gpt-4o-mini",
messages=[
{"role": "system", "content": prompt},
{"role": "user", "content": user_input}
]
)
return completion.choices[0].message.content
def generate_fake_stream(image_url, optimized_prompt):
chunks = [
"✨ 开始生成...\n正在根据您的输入生成优化提示词...\n",
f"🎨 优化后的提示词:\n{optimized_prompt}\n",
"🖼️ 正在根据优化后的提示词生成图像...\n",
"🔄 图像正在处理中...\n",
"⏳ 即将完成...\n",
f"🌟 生成成功!\n图像生成完毕,以下是结果:\n\n"
]
for i, chunk in enumerate(chunks):
yield f"data: {json.dumps({'id': f'chatcmpl-{int(time.time())}', 'object': 'chat.completion.chunk', 'created': int(time.time()), 'model': 'flux-ai-image-generator', 'choices': [{'index': 0, 'delta': {'role': 'assistant' if i == 0 else None, 'content': chunk}, 'finish_reason': None if i < len(chunks) - 1 else 'stop'}]})}\n\n"
time.sleep(0.5) # 模拟处理时间
yield "data: [DONE]\n\n"
@app.route('/hf/v1/chat/completions', methods=['POST'])
def chat_completions():
data = request.json
messages = data.get('messages', [])
stream = data.get('stream', False)
# Extract the prompt from the last user message
user_input = next((msg['content'] for msg in reversed(messages) if msg['role'] == 'user'), None)
if not user_input:
return Response(json.dumps({'error': 'No valid user input provided'}), status=400, mimetype='application/json')
# Generate optimized prompt using GPT-4-mini
api_key = os.getenv('api_key')
api_base = os.getenv('api_base')
optimized_prompt = generate_optimized_prompt(api_key, api_base, user_input)
# Generate image using the optimized prompt
token = get_token()
if not token:
return Response(json.dumps({'error': 'Failed to get token'}), status=500, mimetype='application/json')
image_url = req_flux(token, optimized_prompt)
if not image_url:
return Response(json.dumps({'error': 'Failed to generate image'}), status=500, mimetype='application/json')
if stream:
return Response(generate_fake_stream(image_url, optimized_prompt), mimetype='text/event-stream')
else:
response = {
"id": f"chatcmpl-{int(time.time())}",
"object": "chat.completion",
"created": int(time.time()),
"model": "flux-ai-image-generator",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": f"I've generated an optimized prompt based on your input: '{optimized_prompt}'\n\nUsing this prompt, I've created an image. Here it is:\n\n"
},
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": len(user_input.split()),
"completion_tokens": len(optimized_prompt.split()) + 20, # Approximate
"total_tokens": len(user_input.split()) + len(optimized_prompt.split()) + 20
}
}
return Response(json.dumps(response), mimetype='application/json')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=7860) |