hujiecpp commited on
Commit
607d75e
·
1 Parent(s): 2da96e5

init project

Browse files
Files changed (1) hide show
  1. app.py +14 -6
app.py CHANGED
@@ -38,8 +38,8 @@ from modules.pe3r.models import Models
38
  import torchvision.transforms as tvf
39
 
40
  silent = False
41
- device = 'cuda' if torch.cuda.is_available() else 'cpu'
42
- pe3r = Models(device) #
43
 
44
  def _convert_scene_output_to_glb(outdir, imgs, pts3d, mask, focals, cams2world, cam_size=0.05,
45
  cam_color=None, as_pointcloud=False,
@@ -244,7 +244,9 @@ def slerp_multiple(vectors, t_values):
244
  @torch.no_grad
245
  def get_mask_from_img_sam1(sam1_image, yolov8_image, original_size, input_size, transform):
246
 
247
- # device = 'cuda' if torch.cuda.is_available() else 'cpu'
 
 
248
 
249
  sam_mask=[]
250
  img_area = original_size[0] * original_size[1]
@@ -300,7 +302,10 @@ def get_mask_from_img_sam1(sam1_image, yolov8_image, original_size, input_size,
300
  @torch.no_grad
301
  def get_cog_feats(images):
302
 
303
- # device = 'cuda' if torch.cuda.is_available() else 'cpu'
 
 
 
304
 
305
  cog_seg_maps = []
306
  rev_cog_seg_maps = []
@@ -446,7 +451,8 @@ def get_reconstructed_scene(outdir, filelist, schedule, niter, min_conf_thr,
446
  then run get_3D_model_from_scene
447
  """
448
 
449
- # device = 'cuda' if torch.cuda.is_available() else 'cpu'
 
450
 
451
  if len(filelist) < 2:
452
  raise gradio.Error("Please input at least 2 images.")
@@ -511,7 +517,9 @@ def get_reconstructed_scene(outdir, filelist, schedule, niter, min_conf_thr,
511
  def get_3D_object_from_scene(outdir, text, threshold, scene, min_conf_thr, as_pointcloud,
512
  mask_sky, clean_depth, transparent_cams, cam_size):
513
 
514
- # device = 'cuda' if torch.cuda.is_available() else 'cpu'
 
 
515
 
516
  texts = [text]
517
  inputs = pe3r.siglip_tokenizer(text=texts, padding="max_length", return_tensors="pt")
 
38
  import torchvision.transforms as tvf
39
 
40
  silent = False
41
+ # device = 'cuda' if torch.cuda.is_available() else 'cpu'
42
+ pe3r = Models('cpu') #
43
 
44
  def _convert_scene_output_to_glb(outdir, imgs, pts3d, mask, focals, cams2world, cam_size=0.05,
45
  cam_color=None, as_pointcloud=False,
 
244
  @torch.no_grad
245
  def get_mask_from_img_sam1(sam1_image, yolov8_image, original_size, input_size, transform):
246
 
247
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
248
+ pe3r.yolov8.to(device)
249
+ pe3r.mobilesamv2.to(device)
250
 
251
  sam_mask=[]
252
  img_area = original_size[0] * original_size[1]
 
302
  @torch.no_grad
303
  def get_cog_feats(images):
304
 
305
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
306
+ pe3r.sam2.to(device)
307
+ pe3r.siglip_processor.to(device)
308
+ pe3r.siglip.to(device)
309
 
310
  cog_seg_maps = []
311
  rev_cog_seg_maps = []
 
451
  then run get_3D_model_from_scene
452
  """
453
 
454
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
455
+ pe3r.mast3r.to(device)
456
 
457
  if len(filelist) < 2:
458
  raise gradio.Error("Please input at least 2 images.")
 
517
  def get_3D_object_from_scene(outdir, text, threshold, scene, min_conf_thr, as_pointcloud,
518
  mask_sky, clean_depth, transparent_cams, cam_size):
519
 
520
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
521
+ pe3r.siglip_tokenizer.to(device)
522
+
523
 
524
  texts = [text]
525
  inputs = pe3r.siglip_tokenizer(text=texts, padding="max_length", return_tensors="pt")