Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -76,8 +76,8 @@ def chat_fn(message, chat_history, seed, randomize_seed, guidance_scale, steps,
|
|
76 |
pipe = pipe.to("cpu")
|
77 |
torch.cuda.empty_cache()
|
78 |
|
79 |
-
# Return the
|
80 |
-
return
|
81 |
|
82 |
# --- UI Definition using gr.ChatInterface ---
|
83 |
|
@@ -102,6 +102,7 @@ demo = gr.ChatInterface(
|
|
102 |
<br>
|
103 |
Find the model on <a href='https://huggingface.co/black-forest-labs/FLUX.1-Kontext-dev' target='_blank'>Hugging Face</a>.
|
104 |
</p>""",
|
|
|
105 |
textbox=gr.MultimodalTextbox(
|
106 |
file_types=["image"],
|
107 |
placeholder="Type a prompt and/or upload an image...",
|
|
|
76 |
pipe = pipe.to("cpu")
|
77 |
torch.cuda.empty_cache()
|
78 |
|
79 |
+
# Return the PIL Image directly - ChatInterface will handle it properly
|
80 |
+
return image
|
81 |
|
82 |
# --- UI Definition using gr.ChatInterface ---
|
83 |
|
|
|
102 |
<br>
|
103 |
Find the model on <a href='https://huggingface.co/black-forest-labs/FLUX.1-Kontext-dev' target='_blank'>Hugging Face</a>.
|
104 |
</p>""",
|
105 |
+
multimodal=True, # This is important for MultimodalTextbox to work
|
106 |
textbox=gr.MultimodalTextbox(
|
107 |
file_types=["image"],
|
108 |
placeholder="Type a prompt and/or upload an image...",
|