Spaces:
Runtime error
Runtime error
add whl for kaolin
Browse files- app.py +6 -6
- apps/infer.py +23 -24
app.py
CHANGED
|
@@ -10,11 +10,12 @@ import subprocess
|
|
| 10 |
|
| 11 |
if os.getenv('SYSTEM') == 'spaces':
|
| 12 |
subprocess.run('pip install pyembree'.split())
|
| 13 |
-
subprocess.run(
|
|
|
|
| 14 |
subprocess.run(
|
| 15 |
'pip install torch==1.11.0+cu113 torchvision==0.12.0+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html'.split())
|
| 16 |
subprocess.run(
|
| 17 |
-
'pip
|
| 18 |
subprocess.run('pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/py38_cu113_pyt1110/download.html'.split())
|
| 19 |
subprocess.run(
|
| 20 |
'pip install git+https://github.com/Project-Splinter/human_det.git'.split())
|
|
@@ -94,7 +95,8 @@ def generate_image(seed, psi):
|
|
| 94 |
|
| 95 |
random.seed(2022)
|
| 96 |
model_types = ['icon-filter', 'pifu', 'pamir']
|
| 97 |
-
examples = [[item, random.choice(model_types)]
|
|
|
|
| 98 |
|
| 99 |
with gr.Blocks() as demo:
|
| 100 |
gr.Markdown(description)
|
|
@@ -139,11 +141,9 @@ with gr.Blocks() as demo:
|
|
| 139 |
clear_color=[0.0, 0.0, 0.0, 0.0], label="Refined Recon")
|
| 140 |
out_final_download = gr.File(
|
| 141 |
label="Download refined clothed human mesh")
|
| 142 |
-
out_kaolin_download = gr.File(
|
| 143 |
-
label="Kaolin")
|
| 144 |
|
| 145 |
out_lst = [out_smpl, out_smpl_download, out_smpl_npy_download, out_recon, out_recon_download,
|
| 146 |
-
out_final, out_final_download, out_vid, out_vid_download, overlap_inp
|
| 147 |
|
| 148 |
btn_submit.click(fn=generate_model, inputs=[
|
| 149 |
inp, radio_choice], outputs=out_lst)
|
|
|
|
| 10 |
|
| 11 |
if os.getenv('SYSTEM') == 'spaces':
|
| 12 |
subprocess.run('pip install pyembree'.split())
|
| 13 |
+
subprocess.run(
|
| 14 |
+
'pip install git+https://github.com/YuliangXiu/rembg.git@hf'.split())
|
| 15 |
subprocess.run(
|
| 16 |
'pip install torch==1.11.0+cu113 torchvision==0.12.0+cu113 -f https://download.pytorch.org/whl/cu113/torch_stable.html'.split())
|
| 17 |
subprocess.run(
|
| 18 |
+
'pip install https://download.is.tue.mpg.de/icon/HF/kaolin-0.11.0-cp38-cp38-linux_x86_64.whl'.split())
|
| 19 |
subprocess.run('pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/py38_cu113_pyt1110/download.html'.split())
|
| 20 |
subprocess.run(
|
| 21 |
'pip install git+https://github.com/Project-Splinter/human_det.git'.split())
|
|
|
|
| 95 |
|
| 96 |
random.seed(2022)
|
| 97 |
model_types = ['icon-filter', 'pifu', 'pamir']
|
| 98 |
+
examples = [[item, random.choice(model_types)]
|
| 99 |
+
for item in glob.glob('examples/*.png')]
|
| 100 |
|
| 101 |
with gr.Blocks() as demo:
|
| 102 |
gr.Markdown(description)
|
|
|
|
| 141 |
clear_color=[0.0, 0.0, 0.0, 0.0], label="Refined Recon")
|
| 142 |
out_final_download = gr.File(
|
| 143 |
label="Download refined clothed human mesh")
|
|
|
|
|
|
|
| 144 |
|
| 145 |
out_lst = [out_smpl, out_smpl_download, out_smpl_npy_download, out_recon, out_recon_download,
|
| 146 |
+
out_final, out_final_download, out_vid, out_vid_download, overlap_inp]
|
| 147 |
|
| 148 |
btn_submit.click(fn=generate_model, inputs=[
|
| 149 |
inp, radio_choice], outputs=out_lst)
|
apps/infer.py
CHANGED
|
@@ -14,7 +14,8 @@
|
|
| 14 |
#
|
| 15 |
# Contact: [email protected]
|
| 16 |
|
| 17 |
-
import os
|
|
|
|
| 18 |
|
| 19 |
import logging
|
| 20 |
from lib.common.config import cfg
|
|
@@ -46,7 +47,7 @@ logging.getLogger("trimesh").setLevel(logging.ERROR)
|
|
| 46 |
|
| 47 |
|
| 48 |
def generate_model(in_path, model_type):
|
| 49 |
-
|
| 50 |
torch.cuda.empty_cache()
|
| 51 |
|
| 52 |
config_dict = {'loop_smpl': 100,
|
|
@@ -59,7 +60,7 @@ def generate_model(in_path, model_type):
|
|
| 59 |
# cfg read and merge
|
| 60 |
cfg.merge_from_file(config_dict['config'])
|
| 61 |
cfg.merge_from_file("./lib/pymaf/configs/pymaf_config.yaml")
|
| 62 |
-
|
| 63 |
os.makedirs(config_dict['out_dir'], exist_ok=True)
|
| 64 |
|
| 65 |
cfg_show_list = [
|
|
@@ -262,7 +263,6 @@ def generate_model(in_path, model_type):
|
|
| 262 |
os.makedirs(os.path.join(config_dict['out_dir'],
|
| 263 |
cfg.name, "obj"), exist_ok=True)
|
| 264 |
|
| 265 |
-
|
| 266 |
norm_pred = (
|
| 267 |
((in_tensor["normal_F"][0].permute(1, 2, 0) + 1.0) * 255.0 / 2.0)
|
| 268 |
.detach()
|
|
@@ -334,7 +334,8 @@ def generate_model(in_path, model_type):
|
|
| 334 |
recon_obj = trimesh.Trimesh(
|
| 335 |
verts_pr, faces_pr, process=False, maintains_order=True
|
| 336 |
)
|
| 337 |
-
recon_obj.visual.vertex_colors = (
|
|
|
|
| 338 |
recon_obj.export(
|
| 339 |
os.path.join(config_dict['out_dir'], cfg.name,
|
| 340 |
f"obj/{data['name']}_recon.obj")
|
|
@@ -343,7 +344,7 @@ def generate_model(in_path, model_type):
|
|
| 343 |
os.path.join(config_dict['out_dir'], cfg.name,
|
| 344 |
f"obj/{data['name']}_recon.glb")
|
| 345 |
)
|
| 346 |
-
|
| 347 |
# Isotropic Explicit Remeshing for better geometry topology
|
| 348 |
verts_refine, faces_refine = remesh(os.path.join(config_dict['out_dir'], cfg.name,
|
| 349 |
f"obj/{data['name']}_recon.obj"), 0.5, device)
|
|
@@ -411,23 +412,21 @@ def generate_model(in_path, model_type):
|
|
| 411 |
optimizer_cloth.step()
|
| 412 |
scheduler_cloth.step(cloth_loss)
|
| 413 |
|
| 414 |
-
|
| 415 |
final = trimesh.Trimesh(
|
| 416 |
mesh_pr.verts_packed().detach().squeeze(0).cpu(),
|
| 417 |
mesh_pr.faces_packed().detach().squeeze(0).cpu(),
|
| 418 |
process=False, maintains_order=True
|
| 419 |
)
|
| 420 |
-
|
| 421 |
-
|
| 422 |
# without front texture
|
| 423 |
-
final_colors = (mesh_pr.verts_normals_padded().squeeze(
|
|
|
|
| 424 |
final.visual.vertex_colors = final_colors
|
| 425 |
final.export(
|
| 426 |
f"{config_dict['out_dir']}/{cfg.name}/obj/{data['name']}_refine.obj")
|
| 427 |
final.export(
|
| 428 |
f"{config_dict['out_dir']}/{cfg.name}/obj/{data['name']}_refine.glb")
|
| 429 |
|
| 430 |
-
|
| 431 |
# always export visualized video regardless of the cloth refinment
|
| 432 |
if final is not None:
|
| 433 |
verts_lst = [verts_pr, final.vertices]
|
|
@@ -444,7 +443,7 @@ def generate_model(in_path, model_type):
|
|
| 444 |
os.path.join(config_dict['out_dir'], cfg.name,
|
| 445 |
f"vid/{data['name']}_cloth.mp4"),
|
| 446 |
)
|
| 447 |
-
|
| 448 |
smpl_obj_path = f"{config_dict['out_dir']}/{cfg.name}/obj/{data['name']}_smpl.obj"
|
| 449 |
smpl_glb_path = f"{config_dict['out_dir']}/{cfg.name}/obj/{data['name']}_smpl.glb"
|
| 450 |
smpl_npy_path = f"{config_dict['out_dir']}/{cfg.name}/obj/{data['name']}_smpl.npy"
|
|
@@ -452,21 +451,21 @@ def generate_model(in_path, model_type):
|
|
| 452 |
recon_glb_path = f"{config_dict['out_dir']}/{cfg.name}/obj/{data['name']}_recon.glb"
|
| 453 |
refine_obj_path = f"{config_dict['out_dir']}/{cfg.name}/obj/{data['name']}_refine.obj"
|
| 454 |
refine_glb_path = f"{config_dict['out_dir']}/{cfg.name}/obj/{data['name']}_refine.glb"
|
| 455 |
-
|
| 456 |
-
video_path = os.path.join(
|
| 457 |
-
|
| 458 |
-
|
|
|
|
|
|
|
| 459 |
# clean all the variables
|
| 460 |
for element in dir():
|
| 461 |
if 'path' not in element:
|
| 462 |
del locals()[element]
|
| 463 |
gc.collect()
|
| 464 |
torch.cuda.empty_cache()
|
| 465 |
-
|
| 466 |
-
|
| 467 |
-
|
| 468 |
-
|
| 469 |
-
|
| 470 |
-
|
| 471 |
-
refine_glb_path, refine_obj_path,
|
| 472 |
-
video_path, video_path, overlap_path, kaolin_path]
|
|
|
|
| 14 |
#
|
| 15 |
# Contact: [email protected]
|
| 16 |
|
| 17 |
+
import os
|
| 18 |
+
import gc
|
| 19 |
|
| 20 |
import logging
|
| 21 |
from lib.common.config import cfg
|
|
|
|
| 47 |
|
| 48 |
|
| 49 |
def generate_model(in_path, model_type):
|
| 50 |
+
|
| 51 |
torch.cuda.empty_cache()
|
| 52 |
|
| 53 |
config_dict = {'loop_smpl': 100,
|
|
|
|
| 60 |
# cfg read and merge
|
| 61 |
cfg.merge_from_file(config_dict['config'])
|
| 62 |
cfg.merge_from_file("./lib/pymaf/configs/pymaf_config.yaml")
|
| 63 |
+
|
| 64 |
os.makedirs(config_dict['out_dir'], exist_ok=True)
|
| 65 |
|
| 66 |
cfg_show_list = [
|
|
|
|
| 263 |
os.makedirs(os.path.join(config_dict['out_dir'],
|
| 264 |
cfg.name, "obj"), exist_ok=True)
|
| 265 |
|
|
|
|
| 266 |
norm_pred = (
|
| 267 |
((in_tensor["normal_F"][0].permute(1, 2, 0) + 1.0) * 255.0 / 2.0)
|
| 268 |
.detach()
|
|
|
|
| 334 |
recon_obj = trimesh.Trimesh(
|
| 335 |
verts_pr, faces_pr, process=False, maintains_order=True
|
| 336 |
)
|
| 337 |
+
recon_obj.visual.vertex_colors = (
|
| 338 |
+
recon_obj.vertex_normals+1.0)*255.0*0.5
|
| 339 |
recon_obj.export(
|
| 340 |
os.path.join(config_dict['out_dir'], cfg.name,
|
| 341 |
f"obj/{data['name']}_recon.obj")
|
|
|
|
| 344 |
os.path.join(config_dict['out_dir'], cfg.name,
|
| 345 |
f"obj/{data['name']}_recon.glb")
|
| 346 |
)
|
| 347 |
+
|
| 348 |
# Isotropic Explicit Remeshing for better geometry topology
|
| 349 |
verts_refine, faces_refine = remesh(os.path.join(config_dict['out_dir'], cfg.name,
|
| 350 |
f"obj/{data['name']}_recon.obj"), 0.5, device)
|
|
|
|
| 412 |
optimizer_cloth.step()
|
| 413 |
scheduler_cloth.step(cloth_loss)
|
| 414 |
|
|
|
|
| 415 |
final = trimesh.Trimesh(
|
| 416 |
mesh_pr.verts_packed().detach().squeeze(0).cpu(),
|
| 417 |
mesh_pr.faces_packed().detach().squeeze(0).cpu(),
|
| 418 |
process=False, maintains_order=True
|
| 419 |
)
|
| 420 |
+
|
|
|
|
| 421 |
# without front texture
|
| 422 |
+
final_colors = (mesh_pr.verts_normals_padded().squeeze(
|
| 423 |
+
0).detach().cpu() + 1.0) * 0.5 * 255.0
|
| 424 |
final.visual.vertex_colors = final_colors
|
| 425 |
final.export(
|
| 426 |
f"{config_dict['out_dir']}/{cfg.name}/obj/{data['name']}_refine.obj")
|
| 427 |
final.export(
|
| 428 |
f"{config_dict['out_dir']}/{cfg.name}/obj/{data['name']}_refine.glb")
|
| 429 |
|
|
|
|
| 430 |
# always export visualized video regardless of the cloth refinment
|
| 431 |
if final is not None:
|
| 432 |
verts_lst = [verts_pr, final.vertices]
|
|
|
|
| 443 |
os.path.join(config_dict['out_dir'], cfg.name,
|
| 444 |
f"vid/{data['name']}_cloth.mp4"),
|
| 445 |
)
|
| 446 |
+
|
| 447 |
smpl_obj_path = f"{config_dict['out_dir']}/{cfg.name}/obj/{data['name']}_smpl.obj"
|
| 448 |
smpl_glb_path = f"{config_dict['out_dir']}/{cfg.name}/obj/{data['name']}_smpl.glb"
|
| 449 |
smpl_npy_path = f"{config_dict['out_dir']}/{cfg.name}/obj/{data['name']}_smpl.npy"
|
|
|
|
| 451 |
recon_glb_path = f"{config_dict['out_dir']}/{cfg.name}/obj/{data['name']}_recon.glb"
|
| 452 |
refine_obj_path = f"{config_dict['out_dir']}/{cfg.name}/obj/{data['name']}_refine.obj"
|
| 453 |
refine_glb_path = f"{config_dict['out_dir']}/{cfg.name}/obj/{data['name']}_refine.glb"
|
| 454 |
+
|
| 455 |
+
video_path = os.path.join(
|
| 456 |
+
config_dict['out_dir'], cfg.name, f"vid/{data['name']}_cloth.mp4")
|
| 457 |
+
overlap_path = os.path.join(
|
| 458 |
+
config_dict['out_dir'], cfg.name, f"png/{data['name']}_overlap.png")
|
| 459 |
+
|
| 460 |
# clean all the variables
|
| 461 |
for element in dir():
|
| 462 |
if 'path' not in element:
|
| 463 |
del locals()[element]
|
| 464 |
gc.collect()
|
| 465 |
torch.cuda.empty_cache()
|
| 466 |
+
|
| 467 |
+
return [smpl_glb_path, smpl_obj_path,
|
| 468 |
+
smpl_npy_path,
|
| 469 |
+
recon_glb_path, recon_obj_path,
|
| 470 |
+
refine_glb_path, refine_obj_path,
|
| 471 |
+
video_path, video_path, overlap_path]
|
|
|
|
|
|