flux2api / app.py
tianlong12's picture
Update app.py
4695331 verified
raw
history blame
3.74 kB
from flask import Flask, request, Response
import requests
import json
import time
app = Flask(__name__)
def get_token():
url = "https://fluxaiweb.com/flux/getToken"
response = requests.get(url)
if response.status_code == 200:
response_json = response.json()
return response_json.get("data", {}).get("token")
return None
def req_flux(token, prompt_value, aspect_ratio="1:1", output_format="webp", num_outputs=1, output_quality=90):
url = "https://fluxaiweb.com/flux/generateImage"
payload = {
"prompt": prompt_value,
"aspectRatio": aspect_ratio,
"outputFormat": output_format,
"numOutputs": num_outputs,
"outputQuality": output_quality
}
headers = {
'Content-Type': 'application/json',
'token': token
}
try:
response = requests.post(url, headers=headers, json=payload)
response.raise_for_status()
data = response.json()
return data.get("data", {}).get("image")
except requests.exceptions.RequestException as e:
print(f"Error making request: {e}")
return None
def generate_fake_stream(image_url):
chunks = [
"I'm generating an image based on your prompt...",
"The image is being processed...",
"Almost there...",
f"Image generated successfully! Here it is:\n\n![Generated Image]({image_url})"
]
for i, chunk in enumerate(chunks):
yield f"data: {json.dumps({'id': f'chatcmpl-{int(time.time())}', 'object': 'chat.completion.chunk', 'created': int(time.time()), 'model': 'flux-ai-image-generator', 'choices': [{'index': 0, 'delta': {'role': 'assistant' if i == 0 else None, 'content': chunk}, 'finish_reason': None if i < len(chunks) - 1 else 'stop'}]})}\n\n"
time.sleep(1) # Simulate processing time
yield "data: [DONE]\n\n"
@app.route('/hf/v1/chat/completions', methods=['POST'])
def chat_completions():
data = request.json
messages = data.get('messages', [])
stream = data.get('stream', False)
# Extract the prompt from the last user message
prompt = next((msg['content'] for msg in reversed(messages) if msg['role'] == 'user'), None)
if not prompt:
return Response(json.dumps({'error': 'No valid prompt provided'}), status=400, mimetype='application/json')
token = get_token()
if not token:
return Response(json.dumps({'error': 'Failed to get token'}), status=500, mimetype='application/json')
image_url = req_flux(token, prompt)
if not image_url:
return Response(json.dumps({'error': 'Failed to generate image'}), status=500, mimetype='application/json')
if stream:
return Response(generate_fake_stream(image_url), mimetype='text/event-stream')
else:
response = {
"id": f"chatcmpl-{int(time.time())}",
"object": "chat.completion",
"created": int(time.time()),
"model": "flux-ai-image-generator",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": f"I've generated an image based on your prompt. Here it is:\n\n![Generated Image]({image_url})"
},
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": len(prompt.split()),
"completion_tokens": 20, # Approximate
"total_tokens": len(prompt.split()) + 20
}
}
return Response(json.dumps(response), mimetype='application/json')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=7860)