File size: 3,607 Bytes
fdd3761
 
8124057
 
 
38d816c
c1ad0ba
38d816c
 
4e7c237
38d816c
d3a3bf1
fdd3761
d3a3bf1
fdd3761
 
4e7c237
d3a3bf1
4e7c237
d3a3bf1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4e7c237
d3a3bf1
 
 
 
 
8124057
d3a3bf1
fdd3761
d3a3bf1
4e7c237
d3a3bf1
 
 
 
 
 
 
 
 
 
 
 
 
4e7c237
d3a3bf1
fdd3761
8124057
d3a3bf1
38d816c
d3a3bf1
 
 
8124057
fdd3761
8124057
d3a3bf1
38d816c
8124057
38d816c
 
 
fdd3761
38d816c
8124057
38d816c
fdd3761
 
 
 
 
 
38d816c
 
 
 
4e7c237
 
38d816c
8124057
d3a3bf1
38d816c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
# app.py

import gradio as gr
from PIL import Image
import base64
import requests
import os
from io import BytesIO

from utils.planner import extract_scene_plan  # 🧠 Brain Layer

# πŸ” Hugging Face keys
HF_API_KEY = os.getenv("HF_API_KEY")
SDXL_MODEL_ID = "fal-ai/fast-sdxl/image-to-image"  # βœ… Correct model for image-to-image
SDXL_API_URL = f"https://api-inference.huggingface.co/models/{SDXL_MODEL_ID}"
HEADERS = {"Authorization": f"Bearer {HF_API_KEY}"}

# πŸš€ Image generation (img2img)
def process_image(prompt, image, num_variations):
    try:
        print("🧠 Prompt received:", prompt)
        
        # Step 1: Brain Layer
        reasoning_json = extract_scene_plan(prompt)
        print("🧠 Scene plan extracted:", reasoning_json)

        # Step 2: Encode input image
        buffered = BytesIO()
        image.save(buffered, format="JPEG")
        img_bytes = buffered.getvalue()
        encoded_image = base64.b64encode(img_bytes).decode("utf-8")

        # Step 3: Send image + prompt to HF API
        outputs = []
        for i in range(num_variations):
            payload = {
                "image": encoded_image,
                "prompt": prompt,
                "negative_prompt": "blurry, deformed, cropped",
                "num_inference_steps": 25,
                "guidance_scale": 7.5
            }

            print(f"πŸ“€ Sending request to HF (variation {i+1})")
            response = requests.post(SDXL_API_URL, headers=HEADERS, json=payload)

            if response.status_code == 200:
                try:
                    result_json = response.json()
                    if "images" in result_json:
                        base64_img = result_json["images"][0]
                        result_image = Image.open(BytesIO(base64.b64decode(base64_img)))
                        outputs.append(result_image)
                        print(f"βœ… Decoded image variation {i+1} successfully")
                    else:
                        print(f"⚠️ No 'images' key found in response")
                        outputs.append("❌ No image in response.")
                except Exception as decode_err:
                    print("❌ Image decode error:", decode_err)
                    outputs.append("❌ Failed to decode image.")
            else:
                print(f"❌ HF API error: {response.status_code} - {response.text}")
                outputs.append(f"Error {response.status_code}: {response.text}")

        return outputs, reasoning_json

    except Exception as e:
        print("❌ General Exception in process_image:", e)
        return ["Processing error occurred"], {"error": str(e)}

# 🎨 Gradio UI
with gr.Blocks() as demo:
    gr.Markdown("# 🧠 NewCrux AI Demo: Image-to-Image using Fast SDXL + Brain Layer")

    with gr.Row():
        with gr.Column():
            prompt_input = gr.Textbox(label="Enter Prompt")
            image_input = gr.Image(type="pil", label="Upload Product Image")
            variation_slider = gr.Slider(1, 4, step=1, value=1, label="Number of Variations")
            generate_btn = gr.Button("Generate")

        with gr.Column():
            output_gallery = gr.Gallery(
                label="Generated Image Variations",
                columns=2,
                rows=2,
                height="auto"
            )
            json_output = gr.JSON(label="Brain Layer Reasoning (Scene Plan)")

    generate_btn.click(
        fn=process_image,
        inputs=[prompt_input, image_input, variation_slider],
        outputs=[output_gallery, json_output]
    )

demo.launch(share=True)