Spaces:
Running
on
Zero
Running
on
Zero
import os | |
import spaces | |
import random | |
import shutil | |
import gradio as gr | |
from glob import glob | |
from pathlib import Path | |
import uuid | |
import argparse | |
import torch | |
parser = argparse.ArgumentParser() | |
parser.add_argument("--model_path", type=str, default='tencent/Hunyuan3D-2mini') | |
parser.add_argument("--subfolder", type=str, default='hunyuan3d-dit-v2-mini-turbo') | |
parser.add_argument("--texgen_model_path", type=str, default='tencent/Hunyuan3D-2') | |
parser.add_argument('--port', type=int, default=7860) | |
parser.add_argument('--host', type=str, default='0.0.0.0') | |
parser.add_argument('--device', type=str, default='cuda') | |
parser.add_argument('--mc_algo', type=str, default='mc') | |
parser.add_argument('--cache_path', type=str, default='gradio_cache') | |
parser.add_argument('--enable_t23d', action='store_true') | |
parser.add_argument('--disable_tex', action='store_true') | |
parser.add_argument('--enable_flashvdm', action='store_true') | |
parser.add_argument('--compile', action='store_true') | |
parser.add_argument('--low_vram_mode', action='store_true') | |
args = parser.parse_args() | |
args.enable_flashvdm = True | |
SAVE_DIR = args.cache_path | |
os.makedirs(SAVE_DIR, exist_ok=True) | |
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int: | |
if randomize_seed: | |
seed = random.randint(0, MAX_SEED) | |
return seed | |
def gen_save_folder(max_size=200): | |
os.makedirs(SAVE_DIR, exist_ok=True) | |
# 获取所有文件夹路径 | |
dirs = [f for f in Path(SAVE_DIR).iterdir() if f.is_dir()] | |
# 如果文件夹数量超过 max_size,删除创建时间最久的文件夹 | |
if len(dirs) >= max_size: | |
# 按创建时间排序,最久的排在前面 | |
oldest_dir = min(dirs, key=lambda x: x.stat().st_ctime) | |
shutil.rmtree(oldest_dir) | |
print(f"Removed the oldest folder: {oldest_dir}") | |
# 生成一个新的 uuid 文件夹名称 | |
new_folder = os.path.join(SAVE_DIR, str(uuid.uuid4())) | |
os.makedirs(new_folder, exist_ok=True) | |
print(f"Created new folder: {new_folder}") | |
return new_folder | |
from hy3dgen.shapegen import FaceReducer, FloaterRemover, DegenerateFaceRemover, MeshSimplifier, \ | |
Hunyuan3DDiTFlowMatchingPipeline | |
from hy3dgen.rembg import BackgroundRemover | |
rmbg_worker = BackgroundRemover() | |
i23d_worker = Hunyuan3DDiTFlowMatchingPipeline.from_pretrained( | |
args.model_path, | |
subfolder=args.subfolder, | |
use_safetensors=True, | |
device=args.device, | |
) | |
if args.enable_flashvdm: | |
mc_algo = 'mc' if args.device in ['cpu', 'mps'] else args.mc_algo | |
i23d_worker.enable_flashvdm(mc_algo=mc_algo) | |
if args.compile: | |
i23d_worker.compile() | |
progress=gr.Progress() | |
def gen_shape( | |
image=None, | |
steps=50, | |
guidance_scale=7.5, | |
seed=1234, | |
octree_resolution=256, | |
num_chunks=200000, | |
target_face_num=10000, | |
randomize_seed: bool = False, | |
): | |
def callback(step_idx, timestep, outputs): | |
progress_value = (step_idx+1.0)/steps | |
progress(progress_value, desc=f"Mesh generating, {step_idx + 1}/{steps} steps") | |
if image is None: | |
raise gr.Error("Please provide either a caption or an image.") | |
seed = int(randomize_seed_fn(seed, randomize_seed)) | |
octree_resolution = int(octree_resolution) | |
save_folder = gen_save_folder() | |
image = rmbg_worker(image.convert('RGB')) | |
generator = torch.Generator() | |
generator = generator.manual_seed(int(seed)) | |
outputs = i23d_worker( | |
image=image, | |
num_inference_steps=steps, | |
guidance_scale=guidance_scale, | |
generator=generator, | |
octree_resolution=octree_resolution, | |
num_chunks=num_chunks, | |
output_type='mesh', | |
callback=callback | |
) | |
print(outputs) | |
def get_example_img_list(): | |
print('Loading example img list ...') | |
return sorted(glob('./assets/example_images/**/*.png', recursive=True)) | |
example_imgs = get_example_img_list() | |
HTML_OUTPUT_PLACEHOLDER = f""" | |
<div style='height: {650}px; width: 100%; border-radius: 8px; border-color: #e5e7eb; border-style: solid; border-width: 1px; display: flex; justify-content: center; align-items: center;'> | |
<div style='text-align: center; font-size: 16px; color: #6b7280;'> | |
<p style="color: #8d8d8d;">No mesh here.</p> | |
</div> | |
</div> | |
""" | |
MAX_SEED = 1e7 | |
title = "## Image to 3D" | |
description = "A lightweight image to 3D converter" | |
with gr.Blocks().queue() as demo: | |
gr.Markdown(title) | |
gr.Markdown(description) | |
with gr.Row(): | |
with gr.Column(scale=3): | |
gr.Markdown("#### Image Prompt") | |
image = gr.Image(sources=["upload"], label='Image', type='pil', image_mode='RGBA', height=290) | |
gen_button = gr.Button(value='Generate Shape', variant='primary') | |
with gr.Accordion("Advanced Options", open=False): | |
with gr.Column(): | |
seed = gr.Slider( | |
label="Seed", | |
minimum=0, | |
maximum=MAX_SEED, | |
step=1, | |
value=1234, | |
min_width=100, | |
) | |
randomize_seed = gr.Checkbox(label="Randomize seed", value=True) | |
with gr.Column(): | |
num_steps = gr.Slider(maximum=100, minimum=1, value=5, step=1, label='Inference Steps') | |
octree_resolution = gr.Slider(maximum=512, minimum=16, value=256, label='Octree Resolution') | |
with gr.Column(): | |
cfg_scale = gr.Slider(maximum=20.0, minimum=1.0, value=5.5, step=0.1, label='Guidance Scale') | |
num_chunks = gr.Slider(maximum=5000000, minimum=1000, value=8000, label='Number of Chunks') | |
target_face_num = gr.Slider(maximum=1000000, minimum=100, value=10000, label='Target Face Number') | |
with gr.Column(scale=6): | |
gr.Markdown("#### Generated Mesh") | |
html_export_mesh = gr.HTML(HTML_OUTPUT_PLACEHOLDER, label='Output') | |
with gr.Column(scale=3): | |
gr.Markdown("#### Image Examples") | |
gr.Examples(examples=example_imgs, inputs=[image], | |
label=None, examples_per_page=18) | |
gen_button.click( | |
fn=gen_shape, | |
inputs=[image,num_steps,cfg_scale,seed,octree_resolution,num_chunks,target_face_num, randomize_seed], | |
outputs=[html_export_mesh] | |
) | |
demo.launch() |