File size: 3,741 Bytes
4d7cb47
d039d52
4d7cb47
d039d52
 
 
 
e04a756
 
4695331
 
 
 
e04a756
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4695331
e04a756
 
 
 
4695331
e04a756
d039d52
4d7cb47
 
 
 
 
adc2bc0
4d7cb47
 
 
 
 
 
 
 
d039d52
 
 
 
4d7cb47
d039d52
4695331
d039d52
 
 
4d7cb47
d039d52
 
 
4d7cb47
d039d52
 
 
4d7cb47
d039d52
4d7cb47
 
 
 
 
 
 
 
 
 
 
 
 
adc2bc0
4d7cb47
 
 
 
 
 
4695331
4d7cb47
d039d52
 
4d7cb47
d039d52
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
from flask import Flask, request, Response
import requests
import json
import time

app = Flask(__name__)

def get_token():
    url = "https://fluxaiweb.com/flux/getToken"
    response = requests.get(url)
    if response.status_code == 200:
        response_json = response.json()
        return response_json.get("data", {}).get("token")
    return None

def req_flux(token, prompt_value, aspect_ratio="1:1", output_format="webp", num_outputs=1, output_quality=90):
    url = "https://fluxaiweb.com/flux/generateImage"
    payload = {
        "prompt": prompt_value,
        "aspectRatio": aspect_ratio,
        "outputFormat": output_format,
        "numOutputs": num_outputs,
        "outputQuality": output_quality
    }
    headers = {
        'Content-Type': 'application/json',
        'token': token
    }
    try:
        response = requests.post(url, headers=headers, json=payload)
        response.raise_for_status()
        data = response.json()
        return data.get("data", {}).get("image")
    except requests.exceptions.RequestException as e:
        print(f"Error making request: {e}")
        return None

def generate_fake_stream(image_url):
    chunks = [
        "I'm generating an image based on your prompt...",
        "The image is being processed...",
        "Almost there...",
        f"Image generated successfully! Here it is:\n\n![Generated Image]({image_url})"
    ]
    
    for i, chunk in enumerate(chunks):
        yield f"data: {json.dumps({'id': f'chatcmpl-{int(time.time())}', 'object': 'chat.completion.chunk', 'created': int(time.time()), 'model': 'flux-ai-image-generator', 'choices': [{'index': 0, 'delta': {'role': 'assistant' if i == 0 else None, 'content': chunk}, 'finish_reason': None if i < len(chunks) - 1 else 'stop'}]})}\n\n"
        time.sleep(1)  # Simulate processing time
    
    yield "data: [DONE]\n\n"

@app.route('/hf/v1/chat/completions', methods=['POST'])
def chat_completions():
    data = request.json
    messages = data.get('messages', [])
    stream = data.get('stream', False)
    
    # Extract the prompt from the last user message
    prompt = next((msg['content'] for msg in reversed(messages) if msg['role'] == 'user'), None)
    
    if not prompt:
        return Response(json.dumps({'error': 'No valid prompt provided'}), status=400, mimetype='application/json')
    
    token = get_token()
    if not token:
        return Response(json.dumps({'error': 'Failed to get token'}), status=500, mimetype='application/json')
    
    image_url = req_flux(token, prompt)
    if not image_url:
        return Response(json.dumps({'error': 'Failed to generate image'}), status=500, mimetype='application/json')
    
    if stream:
        return Response(generate_fake_stream(image_url), mimetype='text/event-stream')
    else:
        response = {
            "id": f"chatcmpl-{int(time.time())}",
            "object": "chat.completion",
            "created": int(time.time()),
            "model": "flux-ai-image-generator",
            "choices": [
                {
                    "index": 0,
                    "message": {
                        "role": "assistant",
                        "content": f"I've generated an image based on your prompt. Here it is:\n\n![Generated Image]({image_url})"
                    },
                    "finish_reason": "stop"
                }
            ],
            "usage": {
                "prompt_tokens": len(prompt.split()),
                "completion_tokens": 20,  # Approximate
                "total_tokens": len(prompt.split()) + 20
            }
        }
        return Response(json.dumps(response), mimetype='application/json')

if __name__ == '__main__':
    app.run(host='0.0.0.0', port=7860)