flux2api / app.py
tianlong12's picture
Update app.py
be2bddc verified
raw
history blame
8.46 kB
import json
import random
from flask import Flask, request, Response
import requests
import time
from openai import OpenAI
app = Flask(__name__)
# 解析JSON数据并创建代理池
with open('proxy_list.json', 'r') as f:
proxy_data = json.load(f)
proxy_pool = {}
for country, proxies in proxy_data['proxy_list'].items():
proxy_pool[country] = [
{
'url': f"{p['type'].lower()}://{p['host']}:{p['port']}",
'type': p['type'].lower()
} for p in proxies
]
def get_random_proxy(country=None):
if country and country in proxy_pool:
return random.choice(proxy_pool[country])
else:
all_proxies = [proxy for proxies in proxy_pool.values() for proxy in proxies]
return random.choice(all_proxies)
def make_request_with_proxy(method, url, **kwargs):
proxy = get_random_proxy()
proxies = {proxy['type']: proxy['url']}
try:
response = requests.request(method, url, proxies=proxies, timeout=30, **kwargs)
response.raise_for_status()
return response
except requests.exceptions.RequestException as e:
print(f"Error with proxy {proxy['url']}: {e}")
return None
def get_token():
url = "https://fluxaiweb.com/flux/getToken"
response = make_request_with_proxy('GET', url)
if response and response.status_code == 200:
response_json = response.json()
return response_json.get("data", {}).get("token")
return None
def req_flux(token, prompt_value, aspect_ratio="1:1", output_format="webp", num_outputs=1, output_quality=90):
url = "https://fluxaiweb.com/flux/generateImage"
payload = {
"prompt": prompt_value,
"aspectRatio": aspect_ratio,
"outputFormat": output_format,
"numOutputs": num_outputs,
"outputQuality": output_quality
}
headers = {
'Content-Type': 'application/json',
'token': token
}
response = make_request_with_proxy('POST', url, headers=headers, json=payload)
if response:
data = response.json()
return data.get("data", {}).get("image")
return None
def generate_optimized_prompt(api_key, api_base, system_prompt, user_input):
client = OpenAI(api_key=api_key, base_url=api_base)
completion = client.chat.completions.create(
model="gpt-4o-mini",
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_input}
]
)
return completion.choices[0].message.content
def generate_fake_stream(image_url, optimized_prompt):
chunks = [
"I'm generating an optimized prompt based on your input...",
f"Optimized prompt: {optimized_prompt}",
"Now generating an image based on the optimized prompt...",
"The image is being processed...",
"Almost there...",
f"Image generated successfully! Here it is:\n\n![Generated Image]({image_url})"
]
for i, chunk in enumerate(chunks):
yield f"data: {json.dumps({'id': f'chatcmpl-{int(time.time())}', 'object': 'chat.completion.chunk', 'created': int(time.time()), 'model': 'flux-ai-image-generator', 'choices': [{'index': 0, 'delta': {'role': 'assistant' if i == 0 else None, 'content': chunk}, 'finish_reason': None if i < len(chunks) - 1 else 'stop'}]})}\n\n"
time.sleep(1) # Simulate processing time
yield "data: [DONE]\n\n"
@app.route('/hf/v1/chat/completions', methods=['POST'])
def chat_completions():
data = request.json
messages = data.get('messages', [])
stream = data.get('stream', False)
user_input = next((msg['content'] for msg in reversed(messages) if msg['role'] == 'user'), None)
if not user_input:
return Response(json.dumps({'error': 'No valid user input provided'}), status=400, mimetype='application/json')
api_key = os.getenv('api_key')
api_base = os.getenv('api_base')
system_prompt = """作为 Stable Diffusion Prompt 提示词专家,您将从关键词中创建提示,通常来自 Danbooru 等数据库。提示通常描述图像,使用常见词汇,按重要性排列,并用逗号分隔。避免使用"-"或".",但可以接受空格和自然语言。避免词汇重复。为了强调关键词,请将其放在括号中以增加其权重。例如,"(flowers)"将'flowers'的权重增加1.1倍,而"(((flowers)))"将其增加1.331倍。使用"(flowers:1.5)"将'flowers'的权重增加1.5倍。只为重要的标签增加权重。提示包括三个部分:前缀(质量标签+风格词+效果器)+ 主题(图像的主要焦点)+ 场景(背景、环境)。前缀影响图像质量。像"masterpiece"、"best quality"、"ultra-detailed"、"high resolution"、"photorealistic" 这样的标签可以显著提高图像的细节和整体质量。像"illustration"、"lensflare"、"cinematic lighting" 这样的风格词定义图像的风格和光影效果。像"best lighting"、"volumetric lighting"、"depth of field" 这样的效果器会影响光照和深度。主题是图像的主要焦点,如角色或场景。对主题进行详细描述可以确保图像丰富而详细。增加主题的权重以增强其清晰度。对于角色,描述面部、头发、身体、服装、姿势等特征,同时加入细致的纹理和高光处理。场景描述环境。没有场景,图像的背景是平淡的,主题显得过大。某些主题本身包含场景(例如建筑物、风景)。像"lush greenery"、"golden sunlight"、"crystal clear river" 这样的环境词可以丰富场景,并增强其视觉吸引力。考虑添加天气效果,如"soft morning mist"、"sunset glow" 来进一步增强场景的氛围。你的任务是设计图像生成的提示。请按照以下步骤进行操作:我会发送给您一个图像场景。需要你生成详细的图像描述。图像描述必须是英文,输出为Positive Prompt。确保提示词仅用于描述图像内容,不包含会显示在图像中的文本。示例:我发送:二战时期的护士。您回复只回复:A WWII-era nurse in a German uniform, holding a wine bottle and stethoscope, sitting at a table in white attire, with a table in the background, masterpiece, ultra-detailed, high resolution, photorealistic, illustration style, best lighting, volumetric lighting, depth of field, sharp focus, detailed character, richly textured environment."""
optimized_prompt = generate_optimized_prompt(api_key, api_base, system_prompt, user_input)
token = None
for _ in range(3): # 尝试最多3次
token = get_token()
if token:
break
time.sleep(1) # 在重试之前等待1秒
if not token:
return Response(json.dumps({'error': 'Failed to get token after multiple attempts'}), status=500, mimetype='application/json')
image_url = None
for _ in range(3): # 尝试最多3次
image_url = req_flux(token, optimized_prompt)
if image_url:
break
time.sleep(1) # 在重试之前等待1秒
if not image_url:
return Response(json.dumps({'error': 'Failed to generate image after multiple attempts'}), status=500, mimetype='application/json')
if stream:
return Response(generate_fake_stream(image_url, optimized_prompt), mimetype='text/event-stream')
else:
response = {
"id": f"chatcmpl-{int(time.time())}",
"object": "chat.completion",
"created": int(time.time()),
"model": "flux-ai-image-generator",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": f"I've generated an optimized prompt based on your input: '{optimized_prompt}'\n\nUsing this prompt, I've created an image. Here it is:\n\n![Generated Image]({image_url})"
},
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": len(user_input.split()),
"completion_tokens": len(optimized_prompt.split()) + 20, # Approximate
"total_tokens": len(user_input.split()) + len(optimized_prompt.split()) + 20
}
}
return Response(json.dumps(response), mimetype='application/json')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=7860)