Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -6,7 +6,6 @@ from google.genai import types
|
|
6 |
import gradio as gr
|
7 |
import io
|
8 |
from PIL import Image
|
9 |
-
import tempfile
|
10 |
|
11 |
def save_binary_file(file_name, data):
|
12 |
f = open(file_name, "wb")
|
@@ -63,7 +62,7 @@ def generate_image(prompt, output_filename="generated_image"):
|
|
63 |
filename = f"{output_filename}{file_extension}"
|
64 |
save_binary_file(filename, inline_data.data)
|
65 |
|
66 |
-
# Convert binary data to PIL Image
|
67 |
img = Image.open(io.BytesIO(inline_data.data))
|
68 |
return img, f"Image saved as {filename}"
|
69 |
else:
|
@@ -72,7 +71,7 @@ def generate_image(prompt, output_filename="generated_image"):
|
|
72 |
return None, "No image generated"
|
73 |
|
74 |
# Function to handle chat interaction
|
75 |
-
def chat_handler(user_input, chat_history):
|
76 |
# Add user message to chat history
|
77 |
chat_history.append({"role": "user", "content": user_input})
|
78 |
|
@@ -80,31 +79,29 @@ def chat_handler(user_input, chat_history):
|
|
80 |
img, status = generate_image(user_input)
|
81 |
|
82 |
# Add AI response to chat history
|
83 |
-
if img:
|
84 |
-
# Save the PIL Image to a temporary file so Gradio can display it
|
85 |
-
with tempfile.NamedTemporaryFile(delete=False, suffix=".png") as tmp_file:
|
86 |
-
img.save(tmp_file.name)
|
87 |
-
# Add the image as a file path that Gradio can serve
|
88 |
-
chat_history.append({"role": "assistant", "content": tmp_file.name})
|
89 |
-
|
90 |
-
# Add the status message
|
91 |
chat_history.append({"role": "assistant", "content": status})
|
92 |
|
93 |
-
return chat_history, ""
|
94 |
|
95 |
# Create Gradio interface with chatbot layout
|
96 |
with gr.Blocks(title="Image Editing Chatbot") as demo:
|
97 |
gr.Markdown("# Image Editing Chatbot")
|
98 |
gr.Markdown("Type a prompt to generate or edit an image using Google's Gemini model")
|
99 |
|
100 |
-
# Chatbot display area
|
101 |
chatbot = gr.Chatbot(
|
102 |
label="Chat",
|
103 |
-
height=
|
104 |
type="messages", # Explicitly set to 'messages' format
|
105 |
avatar_images=(None, None) # No avatars for simplicity
|
106 |
)
|
107 |
|
|
|
|
|
|
|
|
|
|
|
|
|
108 |
# Input area
|
109 |
with gr.Row():
|
110 |
prompt_input = gr.Textbox(
|
@@ -118,19 +115,20 @@ with gr.Blocks(title="Image Editing Chatbot") as demo:
|
|
118 |
|
119 |
# State to maintain chat history
|
120 |
chat_state = gr.State([])
|
|
|
121 |
|
122 |
# Connect the button to the chat handler
|
123 |
run_btn.click(
|
124 |
fn=chat_handler,
|
125 |
-
inputs=[prompt_input, chat_state],
|
126 |
-
outputs=[chatbot, prompt_input]
|
127 |
)
|
128 |
|
129 |
# Also allow Enter key to submit
|
130 |
prompt_input.submit(
|
131 |
fn=chat_handler,
|
132 |
-
inputs=[prompt_input, chat_state],
|
133 |
-
outputs=[chatbot, prompt_input]
|
134 |
)
|
135 |
|
136 |
if __name__ == "__main__":
|
|
|
6 |
import gradio as gr
|
7 |
import io
|
8 |
from PIL import Image
|
|
|
9 |
|
10 |
def save_binary_file(file_name, data):
|
11 |
f = open(file_name, "wb")
|
|
|
62 |
filename = f"{output_filename}{file_extension}"
|
63 |
save_binary_file(filename, inline_data.data)
|
64 |
|
65 |
+
# Convert binary data to PIL Image for Gradio display
|
66 |
img = Image.open(io.BytesIO(inline_data.data))
|
67 |
return img, f"Image saved as {filename}"
|
68 |
else:
|
|
|
71 |
return None, "No image generated"
|
72 |
|
73 |
# Function to handle chat interaction
|
74 |
+
def chat_handler(user_input, chat_history, current_image):
|
75 |
# Add user message to chat history
|
76 |
chat_history.append({"role": "user", "content": user_input})
|
77 |
|
|
|
79 |
img, status = generate_image(user_input)
|
80 |
|
81 |
# Add AI response to chat history
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
chat_history.append({"role": "assistant", "content": status})
|
83 |
|
84 |
+
return chat_history, img, ""
|
85 |
|
86 |
# Create Gradio interface with chatbot layout
|
87 |
with gr.Blocks(title="Image Editing Chatbot") as demo:
|
88 |
gr.Markdown("# Image Editing Chatbot")
|
89 |
gr.Markdown("Type a prompt to generate or edit an image using Google's Gemini model")
|
90 |
|
91 |
+
# Chatbot display area for text conversation
|
92 |
chatbot = gr.Chatbot(
|
93 |
label="Chat",
|
94 |
+
height=200,
|
95 |
type="messages", # Explicitly set to 'messages' format
|
96 |
avatar_images=(None, None) # No avatars for simplicity
|
97 |
)
|
98 |
|
99 |
+
# Image display area
|
100 |
+
image_output = gr.Image(
|
101 |
+
label="Generated Image",
|
102 |
+
height=400
|
103 |
+
)
|
104 |
+
|
105 |
# Input area
|
106 |
with gr.Row():
|
107 |
prompt_input = gr.Textbox(
|
|
|
115 |
|
116 |
# State to maintain chat history
|
117 |
chat_state = gr.State([])
|
118 |
+
image_state = gr.State(None)
|
119 |
|
120 |
# Connect the button to the chat handler
|
121 |
run_btn.click(
|
122 |
fn=chat_handler,
|
123 |
+
inputs=[prompt_input, chat_state, image_state],
|
124 |
+
outputs=[chatbot, image_output, prompt_input]
|
125 |
)
|
126 |
|
127 |
# Also allow Enter key to submit
|
128 |
prompt_input.submit(
|
129 |
fn=chat_handler,
|
130 |
+
inputs=[prompt_input, chat_state, image_state],
|
131 |
+
outputs=[chatbot, image_output, prompt_input]
|
132 |
)
|
133 |
|
134 |
if __name__ == "__main__":
|