Spaces:
Running
Running
import gradio as gr | |
import requests | |
prefix = "https://smartfeed-custom-tools.hf.space/gradio_api/file=" | |
extract_model_tag_list = [ | |
"o3(chat)", | |
"o3(responses)", | |
"o3(poe)", | |
"gpt-5(chat)", | |
"gpt-5(responses)", | |
"o4-mini(openrouter)", | |
"o4-mini-high(openrouter)", | |
] | |
extract_model_map = { | |
"o3(chat)": "o3-2025-04-16", | |
"o3(responses)": "o3-2025-04-16(responses)", | |
"o3(poe)": "o3(poe)", | |
"gpt-5(chat)": "gpt-5-2025-08-07", | |
"gpt-5(responses)": "gpt-5-2025-08-07(responses)", | |
"o4-mini(openrouter)": "openai/o4-mini", | |
"o4-mini-high(openrouter)": "openai/o4-mini-high", | |
} | |
def change_image_style( | |
image_url, | |
style_image_url, | |
edit_prompt, | |
extract_model_tag, | |
extract_prompt, | |
prompt_prefix | |
): | |
extract_model = extract_model_map.get(extract_model_tag, "o3") | |
data = { | |
"image_url": image_url, | |
"style_image_url": style_image_url, | |
"edit_prompt": edit_prompt, | |
"extract_model": extract_model, | |
"extract_prompt": extract_prompt, | |
"prompt_prefix": prompt_prefix | |
} | |
try: | |
response = requests.post( | |
"http://3.20.241.172/tools/change-image-style", | |
json=data, | |
headers={"content-type": "application/json"} | |
) | |
except Exception as e: | |
error_msg = f"Error occurred while requesting backend tool: {e}" | |
return "", "", error_msg | |
if response.status_code == 200: | |
data = response.json().get("data") | |
resultImageUrl = data.get("image_url") | |
edit_prompt = data.get("edit_prompt") | |
# replace \n to <br> | |
edit_prompt = edit_prompt.replace("\n", "<br>") | |
return resultImageUrl, edit_prompt, "" | |
else: | |
error_msg = f"Error: {response.status_code} - {response.text}" | |
return "", "", error_msg | |
def generate_image( | |
input_image, | |
style_images, | |
prompt_count: int, | |
image_count: int, | |
extract_model, | |
extract_prompt, | |
prompt_prefix | |
): | |
if not input_image: | |
raise gr.Error(f"Please upload an input image! Refer to step 1️⃣") | |
if style_images is None: | |
raise gr.Error(f"Cannot find any style image! Please refer to step 1️⃣") | |
style_size = len(style_images) | |
if style_size < 1: | |
raise gr.Error(f"Please upload at least one style image! Refer to step 1️⃣") | |
total_count = style_size * prompt_count * image_count | |
completed_count = 0 | |
success_count = 0 | |
error_count = 0 | |
inputImageUrl = prefix + input_image | |
result_image_list = [] | |
error_list = [] | |
markdownStr = "" | |
for style_index in range(style_size): | |
style_image = style_images[style_index] | |
if not style_image: | |
error_list.append(f"Style image {style_index + 1} is empty.") | |
continue | |
for prompt_index in range(prompt_count): | |
old_edit_prompt = "" | |
for image_index in range(image_count): | |
completed_count += 1 | |
styleImageUrl = prefix + style_image[0] | |
result_image, edit_prompt, error_msg = change_image_style( | |
inputImageUrl, styleImageUrl, old_edit_prompt, extract_model, extract_prompt, prompt_prefix | |
) | |
if result_image: | |
success_count += 1 | |
result_image_list.append(result_image) | |
if old_edit_prompt: | |
markdownStr += f"<img src='{result_image}' style='zoom: 33%;' />" | |
else: | |
markdownStr += f"{edit_prompt}<img src='{result_image}' style='zoom: 33%;' />" | |
old_edit_prompt = edit_prompt | |
else: | |
error_count += 1 | |
error_list.append(f"Error processing style image {style_index + 1}, prompt {prompt_index + 1}, image {image_index + 1}: {error_msg}") | |
status = "processing" if completed_count < total_count else "completed" | |
status_message = f"Status: {status} ({completed_count}/{total_count}) success: {success_count}, error: {error_count}" | |
yield status_message, error_list, markdownStr | |
def swap_to_gallery(images): | |
return gr.update(value=images, visible=True), gr.update(visible=True), gr.update(visible=False) | |
def remove_back_to_files(): | |
return gr.update(visible=False), gr.update(visible=False), gr.update(visible=True) | |
with gr.Blocks() as demo: | |
with gr.Row(): | |
with gr.Column(): | |
files = gr.File( | |
label="Drag (Select) 1 or more style images", | |
file_types=["image"], | |
file_count="multiple" | |
) | |
uploaded_files = gr.Gallery(label="Your images", visible=False, columns=5, rows=1, height=200) | |
with gr.Column(visible=False) as clear_button: | |
remove_and_reupload = gr.ClearButton(value="Remove and upload new ones", components=files, size="sm") | |
input_image = gr.Image(label="Input Image", type="filepath", interactive=True) | |
with gr.Row(): | |
with gr.Column(): | |
prompt_count = gr.Number(label="Prompt Count", value=4, precision=0) | |
with gr.Column(): | |
image_count = gr.Number(label="Image Count", value=3, precision=0) | |
extract_model = gr.Dropdown(choices=extract_model_tag_list, label="Extract Model", value="o3(chat)") | |
extract_prompt = gr.Textbox(lines=2, label="Extracted Prompt", value="Accurately extract the hairstyle and makeup from this photo into a prompt especially excluding the backgrounds. And Turn to the side, wearing deep blue Off-shoulder Knitted top. I want to replicate it perfectly. need to be a complete paragraph") | |
prompt_prefix = gr.Textbox(lines=1, label="Prompt Prefix", value="change the backgrounds to match this:") | |
submit = gr.Button("Submit") | |
with gr.Column(): | |
status_text = gr.Textbox(label="Status", show_label=False, interactive=False) | |
error_text = gr.Textbox(label="Error List", show_label=False, interactive=False) | |
markdown = gr.Markdown(label="Generated Images") | |
files.upload(fn=swap_to_gallery, inputs=files, outputs=[uploaded_files, clear_button, files]) | |
remove_and_reupload.click(fn=remove_back_to_files, outputs=[uploaded_files, clear_button, files]) | |
submit.click( | |
fn=generate_image, | |
inputs=[input_image, uploaded_files, prompt_count, image_count, extract_model, extract_prompt, prompt_prefix], | |
outputs=[status_text, error_text, markdown] | |
) | |
demo.launch() |