Spaces:
Runtime error
Runtime error
lionelgarnier
commited on
Commit
·
6c80f3e
1
Parent(s):
67b0366
refactor prompt refinement and interface output handling for improved clarity
Browse files
app.py
CHANGED
@@ -111,6 +111,11 @@ def refine_prompt(prompt, system_prompt=DEFAULT_SYSTEM_PROMPT, progress=gr.Progr
|
|
111 |
if not assistant_messages:
|
112 |
return "", "Error: No assistant response found"
|
113 |
assistant_content = assistant_messages[-1]['content']
|
|
|
|
|
|
|
|
|
|
|
114 |
return assistant_content, "Prompt refined successfully!"
|
115 |
except (KeyError, IndexError):
|
116 |
return "", "Error: Unexpected response format from the model"
|
@@ -206,10 +211,10 @@ def process_example_pipeline(example_prompt, system_prompt=DEFAULT_SYSTEM_PROMPT
|
|
206 |
refined, status = refine_prompt(example_prompt, system_prompt, progress)
|
207 |
|
208 |
if not refined:
|
209 |
-
return
|
210 |
|
211 |
# Return only the refined prompt and status - don't generate image
|
212 |
-
return
|
213 |
|
214 |
def create_interface():
|
215 |
# Preload models if needed
|
@@ -225,15 +230,13 @@ def create_interface():
|
|
225 |
with gr.Column(elem_id="col-container"):
|
226 |
gr.Markdown("# Text to Product\nUsing Mistral-7B-Instruct-v0.3 + FLUX.1-dev + Trellis")
|
227 |
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
)
|
236 |
-
prompt_button = gr.Button("Refine prompt with Mistral")
|
237 |
|
238 |
refined_prompt = gr.Text(
|
239 |
show_label=False,
|
@@ -242,10 +245,12 @@ def create_interface():
|
|
242 |
container=False,
|
243 |
max_length=2048,
|
244 |
)
|
245 |
-
|
246 |
visual_button = gr.Button("Create visual with Flux")
|
|
|
247 |
generated_image = gr.Image(show_label=False)
|
248 |
-
|
|
|
|
|
249 |
label="Status Messages",
|
250 |
interactive=False,
|
251 |
placeholder="Status messages will appear here",
|
@@ -293,7 +298,7 @@ def create_interface():
|
|
293 |
examples=examples, # Now just a list of prompts
|
294 |
fn=process_example_pipeline,
|
295 |
inputs=[prompt], # Add system_prompt as input
|
296 |
-
outputs=[
|
297 |
cache_examples=True,
|
298 |
)
|
299 |
|
@@ -302,14 +307,14 @@ def create_interface():
|
|
302 |
triggers=[prompt_button.click, prompt.submit],
|
303 |
fn=refine_prompt,
|
304 |
inputs=[prompt, system_prompt], # Add system_prompt as input
|
305 |
-
outputs=[refined_prompt,
|
306 |
)
|
307 |
|
308 |
gr.on(
|
309 |
triggers=[visual_button.click],
|
310 |
fn=infer,
|
311 |
inputs=[refined_prompt, seed, randomize_seed, width, height, num_inference_steps],
|
312 |
-
outputs=[generated_image,
|
313 |
)
|
314 |
|
315 |
return demo
|
|
|
111 |
if not assistant_messages:
|
112 |
return "", "Error: No assistant response found"
|
113 |
assistant_content = assistant_messages[-1]['content']
|
114 |
+
|
115 |
+
# Remove quotation marks at the beginning and end
|
116 |
+
if assistant_content.startswith('"') and assistant_content.endswith('"'):
|
117 |
+
assistant_content = assistant_content[1:-1]
|
118 |
+
|
119 |
return assistant_content, "Prompt refined successfully!"
|
120 |
except (KeyError, IndexError):
|
121 |
return "", "Error: Unexpected response format from the model"
|
|
|
211 |
refined, status = refine_prompt(example_prompt, system_prompt, progress)
|
212 |
|
213 |
if not refined:
|
214 |
+
return "", "Failed to refine prompt: " + status
|
215 |
|
216 |
# Return only the refined prompt and status - don't generate image
|
217 |
+
return refined, "Prompt refined successfully!"
|
218 |
|
219 |
def create_interface():
|
220 |
# Preload models if needed
|
|
|
230 |
with gr.Column(elem_id="col-container"):
|
231 |
gr.Markdown("# Text to Product\nUsing Mistral-7B-Instruct-v0.3 + FLUX.1-dev + Trellis")
|
232 |
|
233 |
+
prompt = gr.Text(
|
234 |
+
show_label=False,
|
235 |
+
max_lines=1,
|
236 |
+
placeholder="Enter basic object prompt",
|
237 |
+
container=False,
|
238 |
+
)
|
239 |
+
prompt_button = gr.Button("Refine prompt with Mistral")
|
|
|
|
|
240 |
|
241 |
refined_prompt = gr.Text(
|
242 |
show_label=False,
|
|
|
245 |
container=False,
|
246 |
max_length=2048,
|
247 |
)
|
|
|
248 |
visual_button = gr.Button("Create visual with Flux")
|
249 |
+
|
250 |
generated_image = gr.Image(show_label=False)
|
251 |
+
gen3d_button = gr.Button("Create 3D visual with Trellis")
|
252 |
+
|
253 |
+
message_box = gr.Textbox(
|
254 |
label="Status Messages",
|
255 |
interactive=False,
|
256 |
placeholder="Status messages will appear here",
|
|
|
298 |
examples=examples, # Now just a list of prompts
|
299 |
fn=process_example_pipeline,
|
300 |
inputs=[prompt], # Add system_prompt as input
|
301 |
+
outputs=[refined_prompt, message_box], # Don't output image
|
302 |
cache_examples=True,
|
303 |
)
|
304 |
|
|
|
307 |
triggers=[prompt_button.click, prompt.submit],
|
308 |
fn=refine_prompt,
|
309 |
inputs=[prompt, system_prompt], # Add system_prompt as input
|
310 |
+
outputs=[refined_prompt, message_box]
|
311 |
)
|
312 |
|
313 |
gr.on(
|
314 |
triggers=[visual_button.click],
|
315 |
fn=infer,
|
316 |
inputs=[refined_prompt, seed, randomize_seed, width, height, num_inference_steps],
|
317 |
+
outputs=[generated_image, message_box]
|
318 |
)
|
319 |
|
320 |
return demo
|