Spaces:
Runtime error
Runtime error
Commit
Β·
3057b36
1
Parent(s):
4a3087a
Update
Browse files
app.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
import gradio as gr
|
|
|
|
| 2 |
# from gradio_litmodel3d import LitModel3D
|
| 3 |
|
| 4 |
import os
|
|
@@ -23,6 +24,7 @@ def preprocess_image(image: Image.Image) -> Image.Image:
|
|
| 23 |
return pipeline.preprocess_image(image)
|
| 24 |
|
| 25 |
|
|
|
|
| 26 |
def image_to_3d(image: Image.Image) -> Tuple[dict, str]:
|
| 27 |
"""
|
| 28 |
Convert an image to a 3D model.
|
|
@@ -44,6 +46,7 @@ def image_to_3d(image: Image.Image) -> Tuple[dict, str]:
|
|
| 44 |
return model, video_path
|
| 45 |
|
| 46 |
|
|
|
|
| 47 |
def extract_glb(model: dict, mesh_simplify: float, texture_size: int) -> Tuple[str, str]:
|
| 48 |
"""
|
| 49 |
Extract a GLB file from the 3D model.
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
+
import spaces
|
| 3 |
# from gradio_litmodel3d import LitModel3D
|
| 4 |
|
| 5 |
import os
|
|
|
|
| 24 |
return pipeline.preprocess_image(image)
|
| 25 |
|
| 26 |
|
| 27 |
+
@spaces.GPU
|
| 28 |
def image_to_3d(image: Image.Image) -> Tuple[dict, str]:
|
| 29 |
"""
|
| 30 |
Convert an image to a 3D model.
|
|
|
|
| 46 |
return model, video_path
|
| 47 |
|
| 48 |
|
| 49 |
+
@spaces.GPU
|
| 50 |
def extract_glb(model: dict, mesh_simplify: float, texture_size: int) -> Tuple[str, str]:
|
| 51 |
"""
|
| 52 |
Extract a GLB file from the 3D model.
|
trellis/models/structured_latent_vae/decoder_mesh.py
CHANGED
|
@@ -102,8 +102,8 @@ class SLatMeshDecoder(SparseTransformerBase):
|
|
| 102 |
)
|
| 103 |
self.resolution = resolution
|
| 104 |
self.rep_config = representation_config
|
| 105 |
-
mesh_extractor = SparseFeatures2Mesh(
|
| 106 |
-
self.out_channels = mesh_extractor.feats_channels
|
| 107 |
self.upsample = nn.ModuleList([
|
| 108 |
SparseSubdivideBlock3d(
|
| 109 |
channels=model_channels,
|
|
@@ -153,9 +153,8 @@ class SLatMeshDecoder(SparseTransformerBase):
|
|
| 153 |
list of representations
|
| 154 |
"""
|
| 155 |
ret = []
|
| 156 |
-
mesh_extractor = SparseFeatures2Mesh(x.device, res=self.resolution*4, use_color=self.rep_config.get('use_color', False))
|
| 157 |
for i in range(x.shape[0]):
|
| 158 |
-
mesh = mesh_extractor(x[i], training=self.training)
|
| 159 |
ret.append(mesh)
|
| 160 |
return ret
|
| 161 |
|
|
|
|
| 102 |
)
|
| 103 |
self.resolution = resolution
|
| 104 |
self.rep_config = representation_config
|
| 105 |
+
self.mesh_extractor = SparseFeatures2Mesh(res=self.resolution*4, use_color=self.rep_config.get('use_color', False))
|
| 106 |
+
self.out_channels = self.mesh_extractor.feats_channels
|
| 107 |
self.upsample = nn.ModuleList([
|
| 108 |
SparseSubdivideBlock3d(
|
| 109 |
channels=model_channels,
|
|
|
|
| 153 |
list of representations
|
| 154 |
"""
|
| 155 |
ret = []
|
|
|
|
| 156 |
for i in range(x.shape[0]):
|
| 157 |
+
mesh = self.mesh_extractor(x[i], training=self.training)
|
| 158 |
ret.append(mesh)
|
| 159 |
return ret
|
| 160 |
|