lionelgarnier commited on
Commit
99459b9
·
1 Parent(s): 6c80f3e

implement 3D model generation and extraction features with session management

Browse files
Files changed (1) hide show
  1. app.py +212 -0
app.py CHANGED
@@ -8,6 +8,16 @@ from diffusers import DiffusionPipeline
8
  from transformers import pipeline, AutoTokenizer
9
  from huggingface_hub import login
10
  from PIL import Image
 
 
 
 
 
 
 
 
 
 
11
 
12
  hf_token = os.getenv("hf_token")
13
  login(token=hf_token)
@@ -35,9 +45,23 @@ DEFAULT_NUM_INFERENCE_STEPS = 6
35
  DEFAULT_GUIDANCE_SCALE = 0.0
36
  DEFAULT_TEMPERATURE = 0.9
37
 
 
 
 
38
  _text_gen_pipeline = None
39
  _image_gen_pipeline = None
40
 
 
 
 
 
 
 
 
 
 
 
 
41
  @spaces.GPU()
42
  def get_image_gen_pipeline():
43
  global _image_gen_pipeline
@@ -199,6 +223,147 @@ def preload_models():
199
  print(status)
200
  return success
201
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
202
  # Create a combined function that handles the whole pipeline from example to image
203
  # This version gets the parameters from the UI components
204
  @spaces.GPU()
@@ -250,6 +415,13 @@ def create_interface():
250
  generated_image = gr.Image(show_label=False)
251
  gen3d_button = gr.Button("Create 3D visual with Trellis")
252
 
 
 
 
 
 
 
 
253
  message_box = gr.Textbox(
254
  label="Status Messages",
255
  interactive=False,
@@ -292,6 +464,23 @@ def create_interface():
292
  step=1,
293
  value=DEFAULT_NUM_INFERENCE_STEPS,
294
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
295
 
296
  # Examples section - simplified version that only updates the prompt fields
297
  gr.Examples(
@@ -317,8 +506,31 @@ def create_interface():
317
  outputs=[generated_image, message_box]
318
  )
319
 
 
 
 
 
 
 
 
320
  return demo
321
 
322
  if __name__ == "__main__":
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
323
  demo = create_interface()
324
  demo.launch()
 
8
  from transformers import pipeline, AutoTokenizer
9
  from huggingface_hub import login
10
  from PIL import Image
11
+ from gradio_litmodel3d import LitModel3D
12
+ import shutil
13
+ os.environ['SPCONV_ALGO'] = 'native'
14
+ from typing import *
15
+ import imageio
16
+ from easydict import EasyDict as edict
17
+ from trellis.pipelines import TrellisImageTo3DPipeline
18
+ from trellis.representations import Gaussian, MeshExtractResult
19
+ from trellis.utils import render_utils, postprocessing_utils
20
+
21
 
22
  hf_token = os.getenv("hf_token")
23
  login(token=hf_token)
 
45
  DEFAULT_GUIDANCE_SCALE = 0.0
46
  DEFAULT_TEMPERATURE = 0.9
47
 
48
+ TMP_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tmp')
49
+ os.makedirs(TMP_DIR, exist_ok=True)
50
+
51
  _text_gen_pipeline = None
52
  _image_gen_pipeline = None
53
 
54
+
55
+ def start_session(req: gr.Request):
56
+ user_dir = os.path.join(TMP_DIR, str(req.session_hash))
57
+ os.makedirs(user_dir, exist_ok=True)
58
+
59
+
60
+ def end_session(req: gr.Request):
61
+ user_dir = os.path.join(TMP_DIR, str(req.session_hash))
62
+ shutil.rmtree(user_dir)
63
+
64
+
65
  @spaces.GPU()
66
  def get_image_gen_pipeline():
67
  global _image_gen_pipeline
 
223
  print(status)
224
  return success
225
 
226
+
227
+ def pack_state(gs: Gaussian, mesh: MeshExtractResult) -> dict:
228
+ return {
229
+ 'gaussian': {
230
+ **gs.init_params,
231
+ '_xyz': gs._xyz.cpu().numpy(),
232
+ '_features_dc': gs._features_dc.cpu().numpy(),
233
+ '_scaling': gs._scaling.cpu().numpy(),
234
+ '_rotation': gs._rotation.cpu().numpy(),
235
+ '_opacity': gs._opacity.cpu().numpy(),
236
+ },
237
+ 'mesh': {
238
+ 'vertices': mesh.vertices.cpu().numpy(),
239
+ 'faces': mesh.faces.cpu().numpy(),
240
+ },
241
+ }
242
+
243
+
244
+ def unpack_state(state: dict) -> Tuple[Gaussian, edict, str]:
245
+ gs = Gaussian(
246
+ aabb=state['gaussian']['aabb'],
247
+ sh_degree=state['gaussian']['sh_degree'],
248
+ mininum_kernel_size=state['gaussian']['mininum_kernel_size'],
249
+ scaling_bias=state['gaussian']['scaling_bias'],
250
+ opacity_bias=state['gaussian']['opacity_bias'],
251
+ scaling_activation=state['gaussian']['scaling_activation'],
252
+ )
253
+ gs._xyz = torch.tensor(state['gaussian']['_xyz'], device='cuda')
254
+ gs._features_dc = torch.tensor(state['gaussian']['_features_dc'], device='cuda')
255
+ gs._scaling = torch.tensor(state['gaussian']['_scaling'], device='cuda')
256
+ gs._rotation = torch.tensor(state['gaussian']['_rotation'], device='cuda')
257
+ gs._opacity = torch.tensor(state['gaussian']['_opacity'], device='cuda')
258
+
259
+ mesh = edict(
260
+ vertices=torch.tensor(state['mesh']['vertices'], device='cuda'),
261
+ faces=torch.tensor(state['mesh']['faces'], device='cuda'),
262
+ )
263
+
264
+ return gs, mesh
265
+
266
+
267
+ @spaces.GPU
268
+ def image_to_3d(
269
+ image: Image.Image,
270
+ seed: int,
271
+ ss_guidance_strength: float,
272
+ ss_sampling_steps: int,
273
+ slat_guidance_strength: float,
274
+ slat_sampling_steps: int,
275
+ multiimage_algo: Literal["multidiffusion", "stochastic"],
276
+ req: gr.Request,
277
+ ) -> Tuple[dict, str]:
278
+ """
279
+ Convert an image to a 3D model.
280
+
281
+ Args:
282
+ image (Image.Image): The input image.
283
+ seed (int): The random seed.
284
+ ss_guidance_strength (float): The guidance strength for sparse structure generation.
285
+ ss_sampling_steps (int): The number of sampling steps for sparse structure generation.
286
+ slat_guidance_strength (float): The guidance strength for structured latent generation.
287
+ slat_sampling_steps (int): The number of sampling steps for structured latent generation.
288
+ multiimage_algo (Literal["multidiffusion", "stochastic"]): The algorithm for multi-image generation.
289
+
290
+ Returns:
291
+ dict: The information of the generated 3D model.
292
+ str: The path to the video of the 3D model.
293
+ """
294
+ user_dir = os.path.join(TMP_DIR, str(req.session_hash))
295
+
296
+ outputs = pipeline.run(
297
+ image,
298
+ seed=seed,
299
+ formats=["gaussian", "mesh"],
300
+ preprocess_image=False,
301
+ sparse_structure_sampler_params={
302
+ "steps": ss_sampling_steps,
303
+ "cfg_strength": ss_guidance_strength,
304
+ },
305
+ slat_sampler_params={
306
+ "steps": slat_sampling_steps,
307
+ "cfg_strength": slat_guidance_strength,
308
+ },
309
+ )
310
+
311
+ video = render_utils.render_video(outputs['gaussian'][0], num_frames=120)['color']
312
+ video_geo = render_utils.render_video(outputs['mesh'][0], num_frames=120)['normal']
313
+ video = [np.concatenate([video[i], video_geo[i]], axis=1) for i in range(len(video))]
314
+ video_path = os.path.join(user_dir, 'sample.mp4')
315
+ imageio.mimsave(video_path, video, fps=15)
316
+ state = pack_state(outputs['gaussian'][0], outputs['mesh'][0])
317
+ torch.cuda.empty_cache()
318
+ return state, video_path
319
+
320
+
321
+ @spaces.GPU(duration=90)
322
+ def extract_glb(
323
+ state: dict,
324
+ mesh_simplify: float,
325
+ texture_size: int,
326
+ req: gr.Request,
327
+ ) -> Tuple[str, str]:
328
+ """
329
+ Extract a GLB file from the 3D model.
330
+
331
+ Args:
332
+ state (dict): The state of the generated 3D model.
333
+ mesh_simplify (float): The mesh simplification factor.
334
+ texture_size (int): The texture resolution.
335
+
336
+ Returns:
337
+ str: The path to the extracted GLB file.
338
+ """
339
+ user_dir = os.path.join(TMP_DIR, str(req.session_hash))
340
+ gs, mesh = unpack_state(state)
341
+ glb = postprocessing_utils.to_glb(gs, mesh, simplify=mesh_simplify, texture_size=texture_size, verbose=False)
342
+ glb_path = os.path.join(user_dir, 'sample.glb')
343
+ glb.export(glb_path)
344
+ torch.cuda.empty_cache()
345
+ return glb_path, glb_path
346
+
347
+
348
+ @spaces.GPU
349
+ def extract_gaussian(state: dict, req: gr.Request) -> Tuple[str, str]:
350
+ """
351
+ Extract a Gaussian file from the 3D model.
352
+
353
+ Args:
354
+ state (dict): The state of the generated 3D model.
355
+
356
+ Returns:
357
+ str: The path to the extracted Gaussian file.
358
+ """
359
+ user_dir = os.path.join(TMP_DIR, str(req.session_hash))
360
+ gs, _ = unpack_state(state)
361
+ gaussian_path = os.path.join(user_dir, 'sample.ply')
362
+ gs.save_ply(gaussian_path)
363
+ torch.cuda.empty_cache()
364
+ return gaussian_path, gaussian_path
365
+
366
+
367
  # Create a combined function that handles the whole pipeline from example to image
368
  # This version gets the parameters from the UI components
369
  @spaces.GPU()
 
415
  generated_image = gr.Image(show_label=False)
416
  gen3d_button = gr.Button("Create 3D visual with Trellis")
417
 
418
+ video_output = gr.Video(label="Generated 3D Asset", autoplay=True, loop=True, height=300)
419
+ model_output = LitModel3D(label="Extracted GLB/Gaussian", exposure=10.0, height=300)
420
+
421
+ with gr.Row():
422
+ download_glb = gr.DownloadButton(label="Download GLB", interactive=False)
423
+ download_gs = gr.DownloadButton(label="Download Gaussian", interactive=False)
424
+
425
  message_box = gr.Textbox(
426
  label="Status Messages",
427
  interactive=False,
 
464
  step=1,
465
  value=DEFAULT_NUM_INFERENCE_STEPS,
466
  )
467
+
468
+ with gr.Tab("3D Generation Settings"):
469
+ seed = gr.Slider(0, MAX_SEED, label="Seed", value=0, step=1)
470
+ randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
471
+ gr.Markdown("Stage 1: Sparse Structure Generation")
472
+ with gr.Row():
473
+ ss_guidance_strength = gr.Slider(0.0, 10.0, label="Guidance Strength", value=7.5, step=0.1)
474
+ ss_sampling_steps = gr.Slider(1, 50, label="Sampling Steps", value=12, step=1)
475
+ gr.Markdown("Stage 2: Structured Latent Generation")
476
+ with gr.Row():
477
+ slat_guidance_strength = gr.Slider(0.0, 10.0, label="Guidance Strength", value=3.0, step=0.1)
478
+ slat_sampling_steps = gr.Slider(1, 50, label="Sampling Steps", value=12, step=1)
479
+
480
+ with gr.Tab("GLB Extraction Settings"):
481
+ mesh_simplify = gr.Slider(0.9, 0.98, label="Simplify", value=0.95, step=0.01)
482
+ texture_size = gr.Slider(512, 2048, label="Texture Size", value=1024, step=512)
483
+
484
 
485
  # Examples section - simplified version that only updates the prompt fields
486
  gr.Examples(
 
506
  outputs=[generated_image, message_box]
507
  )
508
 
509
+ gr.on(
510
+ triggers=[gen3d_button.click],
511
+ fn=image_to_3d,
512
+ inputs=[generated_image, seed, ss_guidance_strength, ss_sampling_steps, slat_guidance_strength, slat_sampling_steps, multiimage_algo],
513
+ outputs=[output_buf, video_output],
514
+ )
515
+
516
  return demo
517
 
518
  if __name__ == "__main__":
519
+ demo = create_interface()
520
+ demo.launch()
521
+ demo.load(start_session)
522
+ demo.unload(end_session)
523
+ ```
524
+
525
+ if __name__ == "__main__":
526
+ # Initialize the Trellis pipeline before creating the interface
527
+ pipeline = TrellisImageTo3DPipeline.from_pretrained("JeffreyXiang/TRELLIS-image-large")
528
+ pipeline.cuda()
529
+ try:
530
+ # Preload rembg
531
+ pipeline.preprocess_image(Image.fromarray(np.zeros((512, 512, 3), dtype=np.uint8)))
532
+ except Exception as e:
533
+ print(f"Warning when preloading rembg: {e}")
534
+
535
  demo = create_interface()
536
  demo.launch()