Spaces:
Running
Running
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from flask import Flask, request, jsonify
|
2 |
+
import requests
|
3 |
+
import time
|
4 |
+
|
5 |
+
app = Flask(__name__)
|
6 |
+
|
7 |
+
def get_token():
|
8 |
+
url = "https://fluxaiweb.com/flux/getToken"
|
9 |
+
response = requests.get(url)
|
10 |
+
if response.status_code == 200:
|
11 |
+
response_json = response.json()
|
12 |
+
return response_json.get("data", {}).get("token")
|
13 |
+
return None
|
14 |
+
|
15 |
+
def req_flux(token, prompt_value, aspect_ratio="1:1", output_format="webp", num_outputs=1, output_quality=90):
|
16 |
+
url = "https://fluxaiweb.com/flux/generateImage"
|
17 |
+
payload = {
|
18 |
+
"prompt": prompt_value,
|
19 |
+
"aspectRatio": aspect_ratio,
|
20 |
+
"outputFormat": output_format,
|
21 |
+
"numOutputs": num_outputs,
|
22 |
+
"outputQuality": output_quality
|
23 |
+
}
|
24 |
+
headers = {
|
25 |
+
'Content-Type': 'application/json',
|
26 |
+
'token': token
|
27 |
+
}
|
28 |
+
try:
|
29 |
+
response = requests.post(url, headers=headers, json=payload)
|
30 |
+
response.raise_for_status()
|
31 |
+
data = response.json()
|
32 |
+
return data.get("data", {}).get("image")
|
33 |
+
except requests.exceptions.RequestException as e:
|
34 |
+
print(f"Error making request: {e}")
|
35 |
+
return None
|
36 |
+
|
37 |
+
@app.route('/hf/v1/chat/completions', methods=['POST'])
|
38 |
+
def chat_completions():
|
39 |
+
data = request.json
|
40 |
+
messages = data.get('messages', [])
|
41 |
+
|
42 |
+
# Extract the prompt from the last user message
|
43 |
+
prompt = next((msg['content'] for msg in reversed(messages) if msg['role'] == 'user'), None)
|
44 |
+
|
45 |
+
if not prompt:
|
46 |
+
return jsonify({'error': 'No valid prompt provided'}), 400
|
47 |
+
|
48 |
+
token = get_token()
|
49 |
+
if not token:
|
50 |
+
return jsonify({'error': 'Failed to get token'}), 500
|
51 |
+
|
52 |
+
image_url = req_flux(token, prompt)
|
53 |
+
if not image_url:
|
54 |
+
return jsonify({'error': 'Failed to generate image'}), 500
|
55 |
+
|
56 |
+
# Construct response in ChatCompletion format
|
57 |
+
response = {
|
58 |
+
"id": f"chatcmpl-{int(time.time())}",
|
59 |
+
"object": "chat.completion",
|
60 |
+
"created": int(time.time()),
|
61 |
+
"model": "flux-ai-image-generator",
|
62 |
+
"choices": [
|
63 |
+
{
|
64 |
+
"index": 0,
|
65 |
+
"message": {
|
66 |
+
"role": "assistant",
|
67 |
+
"content": f"I've generated an image based on your prompt. You can view it here: {image_url}"
|
68 |
+
},
|
69 |
+
"finish_reason": "stop"
|
70 |
+
}
|
71 |
+
],
|
72 |
+
"usage": {
|
73 |
+
"prompt_tokens": len(prompt.split()),
|
74 |
+
"completion_tokens": 20, # Approximate
|
75 |
+
"total_tokens": len(prompt.split()) + 20
|
76 |
+
}
|
77 |
+
}
|
78 |
+
|
79 |
+
return jsonify(response)
|
80 |
+
|
81 |
+
if __name__ == '__main__':
|
82 |
+
app.run(host='0.0.0.0', port=7860)
|