# MIT License # (see original notice and terms) import os import types import zipfile import importlib from typing import * import gradio as gr import numpy as np import torch import tempfile # ---- Force CPU-only environment globally ---- os.environ["CUDA_VISIBLE_DEVICES"] = "-1" # hide GPUs from torch os.environ.setdefault("ATTN_BACKEND", "sdpa") # avoid xformers path os.environ.setdefault("SPCONV_ALGO", "native") # safe sparseconv algo # --------------------------------------------- # --------------------------------------------------------------------------- # Ensure bundled hi3dgen sources are available (extracted from hi3dgen.zip) # --------------------------------------------------------------------------- def _ensure_hi3dgen_available(): pkg_name = 'hi3dgen' here = os.path.dirname(__file__) pkg_dir = os.path.join(here, pkg_name) if os.path.isdir(pkg_dir): return archive_path = os.path.join(here, f"{pkg_name}.zip") if not os.path.isfile(archive_path): raise FileNotFoundError( f"Required archive {archive_path} is missing. Upload hi3dgen.zip next to app.py." ) try: with zipfile.ZipFile(archive_path, 'r') as zf: zf.extractall(here) except Exception as e: raise RuntimeError(f"Failed to extract {archive_path}: {e}") _ensure_hi3dgen_available() # --------------------------------------------------------------------------- # xformers stub (CPU-friendly fallback for xformers.ops.memory_efficient_attention) # --------------------------------------------------------------------------- def _ensure_xformers_stub(): import sys if 'xformers.ops' in sys.modules: return import torch.nn.functional as F xf_mod = types.ModuleType('xformers') ops_mod = types.ModuleType('xformers.ops') def memory_efficient_attention(query, key, value, attn_bias=None): # SDPA fallback return F.scaled_dot_product_attention(query, key, value, attn_bias) ops_mod.memory_efficient_attention = memory_efficient_attention xf_mod.ops = ops_mod sys.modules['xformers'] = xf_mod sys.modules['xformers.ops'] = ops_mod _ensure_xformers_stub() # --------------------------------------------------------------------------- # Patch CUDA hotspots to CPU **BEFORE** importing the pipeline # --------------------------------------------------------------------------- print("[PATCH] Applying CPU monkey-patches to hi3dgen") # 1) utils_cube.construct_dense_grid(..., device=...) -> force CPU uc = importlib.import_module("hi3dgen.representations.mesh.utils_cube") if not hasattr(uc, "_CPU_PATCHED"): _uc_orig_construct_dense_grid = uc.construct_dense_grid def _construct_dense_grid_cpu(res, device=None): # ignore any requested device, always CPU return _uc_orig_construct_dense_grid(res, device="cpu") uc.construct_dense_grid = _construct_dense_grid_cpu uc._CPU_PATCHED = True print("[PATCH] utils_cube.construct_dense_grid -> CPU") # 2) cube2mesh.EnhancedMarchingCubes default device -> force CPU (flexible) cm = importlib.import_module("hi3dgen.representations.mesh.cube2mesh") M = cm.EnhancedMarchingCubes if not hasattr(M, "_CPU_PATCHED"): _orig_init = M.__init__ def _init_cpu(self, *args, **kwargs): # ensure device ends up on CPU regardless of how it's passed if "device" in kwargs: kwargs["device"] = torch.device("cpu") else: kwargs.setdefault("device", torch.device("cpu")) return _orig_init(self, *args, **kwargs) M.__init__ = _init_cpu M._CPU_PATCHED = True print("[PATCH] cube2mesh.EnhancedMarchingCubes.__init__ -> CPU (flex)") # 3) IMPORTANT: cube2mesh does "from .utils_cube import construct_dense_grid" # so we must override the BOUND symbol inside cube2mesh as well. if getattr(cm, "construct_dense_grid", None) is not _construct_dense_grid_cpu: cm.construct_dense_grid = _construct_dense_grid_cpu print("[PATCH] cube2mesh.construct_dense_grid (bound name) -> CPU") # 4) Belt & suspenders: coerce torch.arange(device='cuda') to CPU if anything slips through if not hasattr(torch, "_ARANGE_CPU_PATCHED"): _orig_arange = torch.arange def _arange_cpu(*args, **kwargs): dev = kwargs.get("device", None) if dev is not None and str(dev).startswith("cuda"): kwargs["device"] = "cpu" return _orig_arange(*args, **kwargs) torch.arange = _arange_cpu torch._ARANGE_CPU_PATCHED = True print("[PATCH] torch.arange(device='cuda') -> CPU") # --------------------------------------------------------------------------- # Now import pipeline (AFTER patches so bound names are already overridden) # --------------------------------------------------------------------------- from hi3dgen.pipelines import Hi3DGenPipeline import trimesh MAX_SEED = np.iinfo(np.int32).max TMP_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tmp') WEIGHTS_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'weights') os.makedirs(TMP_DIR, exist_ok=True) os.makedirs(WEIGHTS_DIR, exist_ok=True) # --------------------------------------------------------------------------- # Weights caching # --------------------------------------------------------------------------- def cache_weights(weights_dir: str) -> dict: from huggingface_hub import snapshot_download os.makedirs(weights_dir, exist_ok=True) model_ids = [ "Stable-X/trellis-normal-v0-1", "Stable-X/yoso-normal-v1-8-1", "ZhengPeng7/BiRefNet", ] cached_paths = {} for model_id in model_ids: print(f"Caching weights for: {model_id}") local_path = os.path.join(weights_dir, model_id.split("/")[-1]) if os.path.exists(local_path): print(f"Already cached at: {local_path}") cached_paths[model_id] = local_path continue print(f"Downloading and caching model: {model_id}") local_path = snapshot_download( repo_id=model_id, local_dir=os.path.join(weights_dir, model_id.split("/")[-1]), force_download=False ) cached_paths[model_id] = local_path print(f"Cached at: {local_path}") return cached_paths # --------------------------------------------------------------------------- # Pre/Post processing and generation # --------------------------------------------------------------------------- def preprocess_mesh(mesh_prompt): print("Processing mesh") trimesh_mesh = trimesh.load_mesh(mesh_prompt) out_path = mesh_prompt + '.glb' trimesh_mesh.export(out_path) return out_path def preprocess_image(image): if image is None: return None return hi3dgen_pipeline.preprocess_image(image, resolution=1024) def generate_3d( image, seed: int = -1, ss_guidance_strength: float = 3, ss_sampling_steps: int = 50, slat_guidance_strength: float = 3, slat_sampling_steps: int = 6, ): if image is None: return None, None, None if seed == -1: seed = np.random.randint(0, MAX_SEED) image = hi3dgen_pipeline.preprocess_image(image, resolution=1024) normal_image = normal_predictor( image, resolution=768, match_input_resolution=True, data_type='object' ) outputs = hi3dgen_pipeline.run( normal_image, seed=seed, formats=["mesh"], preprocess_image=False, sparse_structure_sampler_params={ "steps": ss_sampling_steps, "cfg_strength": ss_guidance_strength, }, slat_sampler_params={ "steps": slat_sampling_steps, "cfg_strength": slat_guidance_strength, }, ) generated_mesh = outputs['mesh'][0] import datetime output_id = datetime.datetime.now().strftime("%Y%m%d%H%M%S") os.makedirs(os.path.join(TMP_DIR, output_id), exist_ok=True) mesh_path = f"{TMP_DIR}/{output_id}/mesh.glb" trimesh_mesh = generated_mesh.to_trimesh(transform_pose=True) trimesh_mesh.export(mesh_path) return normal_image, mesh_path, mesh_path def convert_mesh(mesh_path, export_format): if not mesh_path: return None temp_file = tempfile.NamedTemporaryFile(suffix=f".{export_format}", delete=False) temp_file_path = temp_file.name mesh = trimesh.load_mesh(mesh_path) mesh.export(temp_file_path) return temp_file_path # --------------------------------------------------------------------------- # UI # --------------------------------------------------------------------------- with gr.Blocks(css="footer {visibility: hidden}") as demo: gr.Markdown( """
V0.1, Introduced By GAP Lab (CUHKSZ) and Game-AIGC Team (ByteDance)
""" ) with gr.Row(): gr.Markdown(""" """) with gr.Row(): with gr.Column(scale=1): with gr.Tabs(): with gr.Tab("Single Image"): with gr.Row(): image_prompt = gr.Image(label="Image Prompt", image_mode="RGBA", type="pil") normal_output = gr.Image(label="Normal Bridge", image_mode="RGBA", type="pil") with gr.Tab("Multiple Images"): gr.Markdown( "