Spaces:
Sleeping
Sleeping
chore: remove try
Browse files
app.py
CHANGED
|
@@ -178,58 +178,53 @@ def _default_render_cameras(batch_size: int = 1):
|
|
| 178 |
|
| 179 |
@spaces.GPU
|
| 180 |
def generate_mesh(image, source_size=512, render_size=384, mesh_size=512, export_mesh=False, export_video=False, fps=30):
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
|
| 205 |
-
|
| 206 |
-
|
| 207 |
-
|
| 208 |
-
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
|
| 212 |
-
|
| 213 |
-
|
| 214 |
-
|
| 215 |
-
|
| 216 |
-
|
| 217 |
-
|
| 218 |
-
|
| 219 |
-
|
| 220 |
-
|
| 221 |
-
|
| 222 |
-
|
| 223 |
-
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
|
| 228 |
-
return None, None
|
| 229 |
-
|
| 230 |
-
except Exception as e:
|
| 231 |
-
print(f"Error: {str(e)}")
|
| 232 |
-
return None, None
|
| 233 |
|
| 234 |
def step_1_generate_planes(image):
|
| 235 |
planes, _ = generate_mesh(image)
|
|
|
|
| 178 |
|
| 179 |
@spaces.GPU
|
| 180 |
def generate_mesh(image, source_size=512, render_size=384, mesh_size=512, export_mesh=False, export_video=False, fps=30):
|
| 181 |
+
image = preprocess_image(image, source_size).to(model_wrapper.device)
|
| 182 |
+
source_camera = _default_source_camera(batch_size=1).to(model_wrapper.device)
|
| 183 |
+
|
| 184 |
+
with torch.no_grad():
|
| 185 |
+
planes = model_wrapper.forward(image, source_camera)
|
| 186 |
+
|
| 187 |
+
if export_mesh:
|
| 188 |
+
grid_out = model_wrapper.model.synthesizer.forward_grid(planes=planes, grid_size=mesh_size)
|
| 189 |
+
vtx, faces = mcubes.marching_cubes(grid_out['sigma'].float().squeeze(0).squeeze(-1).cpu().numpy(), 1.0)
|
| 190 |
+
vtx = vtx / (mesh_size - 1) * 2 - 1
|
| 191 |
+
vtx_tensor = torch.tensor(vtx, dtype=torch.float32, device=model_wrapper.device).unsqueeze(0)
|
| 192 |
+
vtx_colors = model_wrapper.model.synthesizer.forward_points(planes, vtx_tensor)['rgb'].float().squeeze(0).cpu().numpy()
|
| 193 |
+
vtx_colors = (vtx_colors * 255).astype(np.uint8)
|
| 194 |
+
mesh = trimesh.Trimesh(vertices=vtx, faces=faces, vertex_colors=vtx_colors)
|
| 195 |
+
|
| 196 |
+
mesh_path = "xiaoxis_mesh.obj"
|
| 197 |
+
mesh.export(mesh_path, 'obj')
|
| 198 |
+
|
| 199 |
+
return None, mesh_path
|
| 200 |
+
|
| 201 |
+
if export_video:
|
| 202 |
+
render_cameras = _default_render_cameras(batch_size=1).to(model_wrapper.device)
|
| 203 |
+
frames = []
|
| 204 |
+
chunk_size = 1
|
| 205 |
+
for i in range(0, render_cameras.shape[1], chunk_size):
|
| 206 |
+
frame_chunk = model_wrapper.model.synthesizer(
|
| 207 |
+
planes,
|
| 208 |
+
render_cameras[:, i:i + chunk_size],
|
| 209 |
+
render_size,
|
| 210 |
+
render_size,
|
| 211 |
+
0,
|
| 212 |
+
0
|
| 213 |
+
)
|
| 214 |
+
frames.append(frame_chunk['images_rgb'])
|
| 215 |
+
|
| 216 |
+
frames = torch.cat(frames, dim=1)
|
| 217 |
+
frames = frames.squeeze(0)
|
| 218 |
+
frames = (frames.permute(0, 2, 3, 1).cpu().numpy() * 255).astype(np.uint8)
|
| 219 |
+
|
| 220 |
+
video_path = "xiaoxis_video.mp4"
|
| 221 |
+
imageio.mimwrite(video_path, frames, fps=fps)
|
| 222 |
+
|
| 223 |
+
return None, video_path
|
| 224 |
+
|
| 225 |
+
return planes, None
|
| 226 |
+
|
| 227 |
+
return None, None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 228 |
|
| 229 |
def step_1_generate_planes(image):
|
| 230 |
planes, _ = generate_mesh(image)
|