Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
-
from flask import Flask, request,
|
2 |
import requests
|
|
|
3 |
import time
|
4 |
|
5 |
app = Flask(__name__)
|
@@ -34,49 +35,65 @@ def req_flux(token, prompt_value, aspect_ratio="1:1", output_format="webp", num_
|
|
34 |
print(f"Error making request: {e}")
|
35 |
return None
|
36 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
@app.route('/hf/v1/chat/completions', methods=['POST'])
|
38 |
def chat_completions():
|
39 |
data = request.json
|
40 |
messages = data.get('messages', [])
|
|
|
41 |
|
42 |
# Extract the prompt from the last user message
|
43 |
prompt = next((msg['content'] for msg in reversed(messages) if msg['role'] == 'user'), None)
|
44 |
|
45 |
if not prompt:
|
46 |
-
return
|
47 |
|
48 |
token = get_token()
|
49 |
if not token:
|
50 |
-
return
|
51 |
|
52 |
image_url = req_flux(token, prompt)
|
53 |
if not image_url:
|
54 |
-
return
|
55 |
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
"
|
67 |
-
"
|
68 |
-
|
69 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
}
|
71 |
-
],
|
72 |
-
"usage": {
|
73 |
-
"prompt_tokens": len(prompt.split()),
|
74 |
-
"completion_tokens": 20, # Approximate
|
75 |
-
"total_tokens": len(prompt.split()) + 20
|
76 |
}
|
77 |
-
|
78 |
-
|
79 |
-
return jsonify(response)
|
80 |
|
81 |
if __name__ == '__main__':
|
82 |
app.run(host='0.0.0.0', port=7860)
|
|
|
1 |
+
from flask import Flask, request, Response
|
2 |
import requests
|
3 |
+
import json
|
4 |
import time
|
5 |
|
6 |
app = Flask(__name__)
|
|
|
35 |
print(f"Error making request: {e}")
|
36 |
return None
|
37 |
|
38 |
+
def generate_fake_stream(image_url):
|
39 |
+
chunks = [
|
40 |
+
"I'm generating an image based on your prompt...",
|
41 |
+
"The image is being processed...",
|
42 |
+
"Almost there...",
|
43 |
+
f"Image generated successfully! You can view it here: {image_url}"
|
44 |
+
]
|
45 |
+
|
46 |
+
for i, chunk in enumerate(chunks):
|
47 |
+
yield f"data: {json.dumps({'id': f'chatcmpl-{int(time.time())}', 'object': 'chat.completion.chunk', 'created': int(time.time()), 'model': 'flux-ai-image-generator', 'choices': [{'index': 0, 'delta': {'role': 'assistant' if i == 0 else None, 'content': chunk}, 'finish_reason': None if i < len(chunks) - 1 else 'stop'}]})}\n\n"
|
48 |
+
time.sleep(1) # Simulate processing time
|
49 |
+
|
50 |
+
yield "data: [DONE]\n\n"
|
51 |
+
|
52 |
@app.route('/hf/v1/chat/completions', methods=['POST'])
|
53 |
def chat_completions():
|
54 |
data = request.json
|
55 |
messages = data.get('messages', [])
|
56 |
+
stream = data.get('stream', False)
|
57 |
|
58 |
# Extract the prompt from the last user message
|
59 |
prompt = next((msg['content'] for msg in reversed(messages) if msg['role'] == 'user'), None)
|
60 |
|
61 |
if not prompt:
|
62 |
+
return Response(json.dumps({'error': 'No valid prompt provided'}), status=400, mimetype='application/json')
|
63 |
|
64 |
token = get_token()
|
65 |
if not token:
|
66 |
+
return Response(json.dumps({'error': 'Failed to get token'}), status=500, mimetype='application/json')
|
67 |
|
68 |
image_url = req_flux(token, prompt)
|
69 |
if not image_url:
|
70 |
+
return Response(json.dumps({'error': 'Failed to generate image'}), status=500, mimetype='application/json')
|
71 |
|
72 |
+
if stream:
|
73 |
+
return Response(generate_fake_stream(image_url), mimetype='text/event-stream')
|
74 |
+
else:
|
75 |
+
response = {
|
76 |
+
"id": f"chatcmpl-{int(time.time())}",
|
77 |
+
"object": "chat.completion",
|
78 |
+
"created": int(time.time()),
|
79 |
+
"model": "flux-ai-image-generator",
|
80 |
+
"choices": [
|
81 |
+
{
|
82 |
+
"index": 0,
|
83 |
+
"message": {
|
84 |
+
"role": "assistant",
|
85 |
+
"content": f"I've generated an image based on your prompt. You can view it here: {image_url}"
|
86 |
+
},
|
87 |
+
"finish_reason": "stop"
|
88 |
+
}
|
89 |
+
],
|
90 |
+
"usage": {
|
91 |
+
"prompt_tokens": len(prompt.split()),
|
92 |
+
"completion_tokens": 20, # Approximate
|
93 |
+
"total_tokens": len(prompt.split()) + 20
|
94 |
}
|
|
|
|
|
|
|
|
|
|
|
95 |
}
|
96 |
+
return Response(json.dumps(response), mimetype='application/json')
|
|
|
|
|
97 |
|
98 |
if __name__ == '__main__':
|
99 |
app.run(host='0.0.0.0', port=7860)
|