Spaces:
Running
Running
| # Copyright (2024) Bytedance Ltd. and/or its affiliates | |
| # | |
| # This source code is licensed under the license found in the | |
| # LICENSE file in the root directory of this source tree. | |
| from __future__ import annotations | |
| import os | |
| import random | |
| import uuid | |
| import gradio as gr | |
| import numpy as np | |
| from loguru import logger | |
| from caller import ( | |
| SeedT2ICaller, | |
| SeedEditCaller | |
| ) | |
| from PIL import Image | |
| help_text = """ | |
| ## How to use this Demo | |
| Step 1. Type in the caption/instruction text box, and click "Generate" to generate an initial image using Seed-T2I. | |
| Step 2. Type in the caption/instruction text box, and click "Edit" to edit the current image using Seed-Edit. | |
| This is a demo with limited QPS and a simple interface. | |
| For a better experience, please use [Doubao](https://www.doubao.com/chat/)/[Dreamina](https://dreamina.capcut.com/ai-tool/image/generate) APP. | |
| - The current demo does not support multi-round editing, which may lead to overexposure with multiple rounds of upload and download edits. | |
| - Higher-quality input images will produce higher-quality edited results. For low-quality images, unwanted changes, e.g. facial id, may occur. | |
| <font size=2>Note: This demo is governed by the license of CC BY-NC \ | |
| We strongly advise users not to knowingly generate or allow others to knowingly generate harmful content, \ | |
| including hate speech, violence, pornography, deception, etc. \ | |
| (注:本演示受CC BY-NC的许可协议限制。我们强烈建议,用户不应传播及不应允许他人传播以下内容,\ | |
| 包括但不限于仇恨言论、暴力、色情、欺诈相关的有害信息。) | |
| """ | |
| example_instructions = [ | |
| "Make it a picasso painting", | |
| "close its eye", | |
| "convert to a bronze statue", | |
| "make it wearing a hat", | |
| "make it wearing a PhD suit", | |
| "Turn it into an anime.", | |
| "have it look like a graphic novel", | |
| "make it gain weight", | |
| "what would he look like bald?", | |
| "Have it smile", | |
| "Put in a cocktail party.", | |
| "move to the beach.", | |
| "add dramatic lighting", | |
| "Convert to black and white", | |
| "What if it were snowing?", | |
| "Give a leather jacket", | |
| "Turn into a cyborg!", | |
| ] | |
| def main(): | |
| resolution = 1024 | |
| cfg = {"resolution": resolution} | |
| model_t2i = SeedT2ICaller(cfg) | |
| cfg_edit = {} | |
| model_edit = SeedEditCaller(cfg_edit) | |
| logger.info("All models loaded") | |
| def load_example(): | |
| example_image = Image.open(f"uni_test/test.jpg").convert("RGB") | |
| example_instruction = random.choice(example_instructions) | |
| edited_image, example_instruction = generate(example_image, | |
| example_instruction, | |
| cfg_scale=0.5) | |
| return example_image, example_instruction, edited_image | |
| def generate_t2i(instruction: str, cfg_scale: float = 0.5): | |
| if not instruction: | |
| return None, "" | |
| logger.info("Generate images ...") | |
| # Call model and capture the status | |
| gen_image, success = model_t2i.generate(instruction, batch_size=1, cfg_scale=cfg_scale) | |
| if not success or gen_image is None: | |
| logger.error("Image generation failed or returned None. please retry") | |
| return None, instruction | |
| return gen_image, instruction | |
| def generate(input_image: Image.Image, instruction: str = None, cfg_scale: float = 0.5): | |
| logger.info("Generating images ...") | |
| if not instruction or input_image is None: | |
| return input_image, "" | |
| logger.info("Running diffusion models ...") | |
| edited_image, success = model_edit.edit(input_image, instruction, batch_size=1, cfg_scale=cfg_scale) | |
| if not success or edited_image is None: | |
| logger.error("Image editting failed or returned None.") | |
| return None, instruction | |
| return edited_image, instruction | |
| def reset(): | |
| return None, None, "" | |
| with gr.Blocks(css="footer {visibility: hidden}") as demo: | |
| with gr.Row(): | |
| with gr.Column(scale=1, min_width=100): | |
| generate_button = gr.Button("Generate") | |
| with gr.Column(scale=1, min_width=100): | |
| edit_button = gr.Button("Edit") | |
| with gr.Column(scale=1, min_width=100): | |
| load_button = gr.Button("Load Example") | |
| with gr.Column(scale=1, min_width=100): | |
| reset_button = gr.Button("Reset") | |
| with gr.Row(): | |
| with gr.Column(scale=3): | |
| instruction = gr.Textbox(lines=1, label="Caption/Edit Instruction", interactive=True, value=None) | |
| with gr.Column(scale=1): | |
| cfg_scale = gr.Slider(value=0.5, minimum=0.0, maximum=1.0, step=0.1, label="Edit/Text Strength (CFG)", interactive=True) | |
| with gr.Row(): | |
| input_image = gr.Image(label="Input Image", type="pil", interactive=True, | |
| height=resolution, width=resolution) | |
| edited_image = gr.Image(label="Edited Image", type="pil", interactive=False, | |
| height=resolution, width=resolution) | |
| gr.Markdown(help_text) | |
| load_button.click( | |
| fn=load_example, | |
| inputs=[], | |
| outputs=[input_image, instruction, edited_image] | |
| ) | |
| generate_button.click( | |
| fn=generate_t2i, | |
| inputs=[instruction, cfg_scale], | |
| outputs=[input_image, instruction] | |
| ) | |
| edit_button.click( | |
| fn=generate, | |
| inputs=[input_image, instruction, cfg_scale], | |
| outputs=[edited_image, instruction] | |
| ) | |
| reset_button.click( | |
| fn=reset, | |
| inputs=[], | |
| outputs=[input_image, edited_image, instruction] | |
| ) | |
| # demo.launch(server_name="0.0.0.0", server_port=8024) | |
| demo.queue().launch(share=False) | |
| if __name__ == "__main__": | |
| main() | |