andrew3d commited on
Commit
3a704bc
Β·
verified Β·
1 Parent(s): e57cb55

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -20
app.py CHANGED
@@ -1,8 +1,16 @@
1
  # MIT License
2
  # (see original notice and terms)
3
 
4
- import gradio as gr
5
  import os
 
 
 
 
 
 
 
 
 
6
 
7
  # ---- Force CPU-only environment globally ----
8
  os.environ["CUDA_VISIBLE_DEVICES"] = "-1" # hide GPUs from torch
@@ -10,14 +18,6 @@ os.environ.setdefault("ATTN_BACKEND", "sdpa") # avoid xformers path
10
  os.environ.setdefault("SPCONV_ALGO", "native") # safe sparseconv algo
11
  # ---------------------------------------------
12
 
13
- from typing import *
14
- import torch
15
- import numpy as np
16
- import tempfile
17
- import zipfile
18
- import types
19
- import importlib
20
-
21
  # ---------------------------------------------------------------------------
22
  # Ensure bundled hi3dgen sources are available (extracted from hi3dgen.zip)
23
  # ---------------------------------------------------------------------------
@@ -52,6 +52,7 @@ def _ensure_xformers_stub():
52
  ops_mod = types.ModuleType('xformers.ops')
53
 
54
  def memory_efficient_attention(query, key, value, attn_bias=None):
 
55
  return F.scaled_dot_product_attention(query, key, value, attn_bias)
56
 
57
  ops_mod.memory_efficient_attention = memory_efficient_attention
@@ -62,20 +63,19 @@ def _ensure_xformers_stub():
62
  _ensure_xformers_stub()
63
 
64
  # ---------------------------------------------------------------------------
65
- # Import pipeline AFTER stubbing xformers, then patch CUDA-hotspots to CPU
66
  # ---------------------------------------------------------------------------
67
- from hi3dgen.pipelines import Hi3DGenPipeline
68
- import trimesh
69
-
70
- # ---- Force CPU inside hi3dgen (avoid any CUDA paths) ----
71
  print("[PATCH] Applying CPU monkey-patches to hi3dgen")
72
 
73
  # 1) utils_cube.construct_dense_grid(..., device=...) -> force CPU
74
  uc = importlib.import_module("hi3dgen.representations.mesh.utils_cube")
75
  if not hasattr(uc, "_CPU_PATCHED"):
76
- _orig_construct_dense_grid = uc.construct_dense_grid
 
77
  def _construct_dense_grid_cpu(res, device=None):
78
- return _orig_construct_dense_grid(res, device="cpu")
 
 
79
  uc.construct_dense_grid = _construct_dense_grid_cpu
80
  uc._CPU_PATCHED = True
81
  print("[PATCH] utils_cube.construct_dense_grid -> CPU")
@@ -85,29 +85,44 @@ cm = importlib.import_module("hi3dgen.representations.mesh.cube2mesh")
85
  M = cm.EnhancedMarchingCubes
86
  if not hasattr(M, "_CPU_PATCHED"):
87
  _orig_init = M.__init__
 
88
  def _init_cpu(self, *args, **kwargs):
89
- # Ensure device is CPU regardless of how it's passed
90
  if "device" in kwargs:
91
  kwargs["device"] = torch.device("cpu")
92
  else:
93
  kwargs.setdefault("device", torch.device("cpu"))
94
  return _orig_init(self, *args, **kwargs)
 
95
  M.__init__ = _init_cpu
96
  M._CPU_PATCHED = True
97
  print("[PATCH] cube2mesh.EnhancedMarchingCubes.__init__ -> CPU (flex)")
98
 
99
- # 3) Belt & suspenders: coerce torch.arange(device='cuda') to CPU if any call slips through
 
 
 
 
 
 
100
  if not hasattr(torch, "_ARANGE_CPU_PATCHED"):
101
  _orig_arange = torch.arange
 
102
  def _arange_cpu(*args, **kwargs):
103
  dev = kwargs.get("device", None)
104
  if dev is not None and str(dev).startswith("cuda"):
105
  kwargs["device"] = "cpu"
106
  return _orig_arange(*args, **kwargs)
 
107
  torch.arange = _arange_cpu
108
  torch._ARANGE_CPU_PATCHED = True
109
  print("[PATCH] torch.arange(device='cuda') -> CPU")
110
- # ----------------------------------------------------------
 
 
 
 
 
111
 
112
  MAX_SEED = np.iinfo(np.int32).max
113
  TMP_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tmp')
@@ -326,7 +341,7 @@ with gr.Blocks(css="footer {visibility: hidden}") as demo:
326
  gr.Markdown(
327
  """
328
  **Acknowledgments**: Hi3DGen is built on the shoulders of giants. We would like to express our gratitude to the open-source research community and the developers of these pioneering projects:
329
- - **3D Modeling:** Our 3D Model is finetuned from the SOTA open-source 3D foundation model [Trellis](https://github.com/microsoft/TRELLIS); inspired by Rodin, Tripo, and Dora.
330
  - **Normal Estimation:** Builds on StableNormal and GenPercept.
331
  """
332
  )
 
1
  # MIT License
2
  # (see original notice and terms)
3
 
 
4
  import os
5
+ import types
6
+ import zipfile
7
+ import importlib
8
+ from typing import *
9
+
10
+ import gradio as gr
11
+ import numpy as np
12
+ import torch
13
+ import tempfile
14
 
15
  # ---- Force CPU-only environment globally ----
16
  os.environ["CUDA_VISIBLE_DEVICES"] = "-1" # hide GPUs from torch
 
18
  os.environ.setdefault("SPCONV_ALGO", "native") # safe sparseconv algo
19
  # ---------------------------------------------
20
 
 
 
 
 
 
 
 
 
21
  # ---------------------------------------------------------------------------
22
  # Ensure bundled hi3dgen sources are available (extracted from hi3dgen.zip)
23
  # ---------------------------------------------------------------------------
 
52
  ops_mod = types.ModuleType('xformers.ops')
53
 
54
  def memory_efficient_attention(query, key, value, attn_bias=None):
55
+ # SDPA fallback
56
  return F.scaled_dot_product_attention(query, key, value, attn_bias)
57
 
58
  ops_mod.memory_efficient_attention = memory_efficient_attention
 
63
  _ensure_xformers_stub()
64
 
65
  # ---------------------------------------------------------------------------
66
+ # Patch CUDA hotspots to CPU **BEFORE** importing the pipeline
67
  # ---------------------------------------------------------------------------
 
 
 
 
68
  print("[PATCH] Applying CPU monkey-patches to hi3dgen")
69
 
70
  # 1) utils_cube.construct_dense_grid(..., device=...) -> force CPU
71
  uc = importlib.import_module("hi3dgen.representations.mesh.utils_cube")
72
  if not hasattr(uc, "_CPU_PATCHED"):
73
+ _uc_orig_construct_dense_grid = uc.construct_dense_grid
74
+
75
  def _construct_dense_grid_cpu(res, device=None):
76
+ # ignore any requested device, always CPU
77
+ return _uc_orig_construct_dense_grid(res, device="cpu")
78
+
79
  uc.construct_dense_grid = _construct_dense_grid_cpu
80
  uc._CPU_PATCHED = True
81
  print("[PATCH] utils_cube.construct_dense_grid -> CPU")
 
85
  M = cm.EnhancedMarchingCubes
86
  if not hasattr(M, "_CPU_PATCHED"):
87
  _orig_init = M.__init__
88
+
89
  def _init_cpu(self, *args, **kwargs):
90
+ # ensure device ends up on CPU regardless of how it's passed
91
  if "device" in kwargs:
92
  kwargs["device"] = torch.device("cpu")
93
  else:
94
  kwargs.setdefault("device", torch.device("cpu"))
95
  return _orig_init(self, *args, **kwargs)
96
+
97
  M.__init__ = _init_cpu
98
  M._CPU_PATCHED = True
99
  print("[PATCH] cube2mesh.EnhancedMarchingCubes.__init__ -> CPU (flex)")
100
 
101
+ # 3) IMPORTANT: cube2mesh does "from .utils_cube import construct_dense_grid"
102
+ # so we must override the BOUND symbol inside cube2mesh as well.
103
+ if getattr(cm, "construct_dense_grid", None) is not _construct_dense_grid_cpu:
104
+ cm.construct_dense_grid = _construct_dense_grid_cpu
105
+ print("[PATCH] cube2mesh.construct_dense_grid (bound name) -> CPU")
106
+
107
+ # 4) Belt & suspenders: coerce torch.arange(device='cuda') to CPU if anything slips through
108
  if not hasattr(torch, "_ARANGE_CPU_PATCHED"):
109
  _orig_arange = torch.arange
110
+
111
  def _arange_cpu(*args, **kwargs):
112
  dev = kwargs.get("device", None)
113
  if dev is not None and str(dev).startswith("cuda"):
114
  kwargs["device"] = "cpu"
115
  return _orig_arange(*args, **kwargs)
116
+
117
  torch.arange = _arange_cpu
118
  torch._ARANGE_CPU_PATCHED = True
119
  print("[PATCH] torch.arange(device='cuda') -> CPU")
120
+
121
+ # ---------------------------------------------------------------------------
122
+ # Now import pipeline (AFTER patches so bound names are already overridden)
123
+ # ---------------------------------------------------------------------------
124
+ from hi3dgen.pipelines import Hi3DGenPipeline
125
+ import trimesh
126
 
127
  MAX_SEED = np.iinfo(np.int32).max
128
  TMP_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tmp')
 
341
  gr.Markdown(
342
  """
343
  **Acknowledgments**: Hi3DGen is built on the shoulders of giants. We would like to express our gratitude to the open-source research community and the developers of these pioneering projects:
344
+ - **3D Modeling:** Finetuned from the SOTA open-source 3D foundation model [Trellis].
345
  - **Normal Estimation:** Builds on StableNormal and GenPercept.
346
  """
347
  )