Ali Mohsin
commited on
Commit
·
be97fdc
1
Parent(s):
bdd42db
more elegent fixes
Browse files- NeuralJacobianFields/PoissonSystem.py +5 -1
- app.py +25 -3
- loop.py +34 -1
- main.py +2 -2
- utilities/helpers.py +6 -3
NeuralJacobianFields/PoissonSystem.py
CHANGED
@@ -49,7 +49,11 @@ if USE_SCIPY:
|
|
49 |
|
50 |
|
51 |
if USE_TORCH_SPARSE:
|
52 |
-
|
|
|
|
|
|
|
|
|
53 |
|
54 |
|
55 |
USE_UGLY_PATCH_FOR_CUPY_ERROR = False
|
|
|
49 |
|
50 |
|
51 |
if USE_TORCH_SPARSE:
|
52 |
+
try:
|
53 |
+
import torch_sparse
|
54 |
+
except ImportError:
|
55 |
+
print("Warning: torch_sparse not available, falling back to built-in PyTorch sparse operations")
|
56 |
+
USE_TORCH_SPARSE = False
|
57 |
|
58 |
|
59 |
USE_UGLY_PATCH_FOR_CUPY_ERROR = False
|
app.py
CHANGED
@@ -461,8 +461,22 @@ def process_garment(input_type, text_prompt, base_text_prompt, mesh_target_image
|
|
461 |
print(error_message)
|
462 |
return error_message
|
463 |
|
|
|
|
|
|
|
|
|
|
|
|
|
464 |
# Run the loop with error handling
|
465 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
466 |
except RuntimeError as e:
|
467 |
print(f"Runtime error during processing: {e}")
|
468 |
if "operator torchvision::nms does not exist" in str(e):
|
@@ -534,7 +548,7 @@ def process_garment(input_type, text_prompt, base_text_prompt, mesh_target_image
|
|
534 |
return image_files[0] # Return an image if no mesh was found
|
535 |
else:
|
536 |
print("No output files found")
|
537 |
-
return
|
538 |
|
539 |
except Exception as e:
|
540 |
import traceback
|
@@ -704,10 +718,18 @@ def create_interface():
|
|
704 |
try:
|
705 |
result = process_garment(*args)
|
706 |
if result is None:
|
707 |
-
return None, "Processing
|
708 |
elif isinstance(result, str) and result.startswith("Error:"):
|
|
|
709 |
return None, result
|
|
|
|
|
|
|
|
|
|
|
|
|
710 |
else:
|
|
|
711 |
return result, "Processing completed successfully! Download your 3D garment file below."
|
712 |
except Exception as e:
|
713 |
import traceback
|
|
|
461 |
print(error_message)
|
462 |
return error_message
|
463 |
|
464 |
+
# Check if loop is a dummy function (which raises RuntimeError)
|
465 |
+
if hasattr(loop, '__name__') and loop.__name__ == 'dummy_loop':
|
466 |
+
error_message = "Error: Processing engine not available. Please check dependencies."
|
467 |
+
print(error_message)
|
468 |
+
return error_message
|
469 |
+
|
470 |
# Run the loop with error handling
|
471 |
+
try:
|
472 |
+
loop(config)
|
473 |
+
except RuntimeError as e:
|
474 |
+
if "Processing engine not available" in str(e):
|
475 |
+
error_message = "Error: Processing engine not available. Please check dependencies."
|
476 |
+
print(error_message)
|
477 |
+
return error_message
|
478 |
+
else:
|
479 |
+
raise e
|
480 |
except RuntimeError as e:
|
481 |
print(f"Runtime error during processing: {e}")
|
482 |
if "operator torchvision::nms does not exist" in str(e):
|
|
|
548 |
return image_files[0] # Return an image if no mesh was found
|
549 |
else:
|
550 |
print("No output files found")
|
551 |
+
return None
|
552 |
|
553 |
except Exception as e:
|
554 |
import traceback
|
|
|
718 |
try:
|
719 |
result = process_garment(*args)
|
720 |
if result is None:
|
721 |
+
return None, "Processing completed but no output files were generated. Please check the logs for more details."
|
722 |
elif isinstance(result, str) and result.startswith("Error:"):
|
723 |
+
# Return None for the file output and the error message for status
|
724 |
return None, result
|
725 |
+
elif isinstance(result, str) and os.path.exists(result):
|
726 |
+
# Valid file path
|
727 |
+
return result, "Processing completed successfully! Download your 3D garment file below."
|
728 |
+
elif isinstance(result, str):
|
729 |
+
# Some other string that's not an error and not a file path
|
730 |
+
return None, f"Unexpected result: {result}"
|
731 |
else:
|
732 |
+
# Should be a file path or None
|
733 |
return result, "Processing completed successfully! Download your 3D garment file below."
|
734 |
except Exception as e:
|
735 |
import traceback
|
loop.py
CHANGED
@@ -3,12 +3,45 @@ import kornia
|
|
3 |
import os
|
4 |
import sys
|
5 |
import pathlib
|
6 |
-
import torchvision
|
7 |
import logging
|
8 |
import yaml
|
9 |
import nvdiffrast.torch as dr
|
10 |
from easydict import EasyDict
|
11 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
from NeuralJacobianFields import SourceMesh
|
13 |
|
14 |
from nvdiffmodeling.src import render
|
|
|
3 |
import os
|
4 |
import sys
|
5 |
import pathlib
|
|
|
6 |
import logging
|
7 |
import yaml
|
8 |
import nvdiffrast.torch as dr
|
9 |
from easydict import EasyDict
|
10 |
|
11 |
+
# Apply torchvision compatibility fixes
|
12 |
+
try:
|
13 |
+
import torchvision
|
14 |
+
print(f"torchvision {torchvision.__version__} imported successfully")
|
15 |
+
except (RuntimeError, AttributeError) as e:
|
16 |
+
if "operator torchvision::nms does not exist" in str(e) or "extension" in str(e):
|
17 |
+
print("Applying torchvision compatibility fixes...")
|
18 |
+
# Apply the same fixes as in app.py
|
19 |
+
import types
|
20 |
+
if not hasattr(torch, 'ops'):
|
21 |
+
torch.ops = types.SimpleNamespace()
|
22 |
+
if not hasattr(torch.ops, 'torchvision'):
|
23 |
+
torch.ops.torchvision = types.SimpleNamespace()
|
24 |
+
|
25 |
+
# Create dummy functions for problematic operators
|
26 |
+
torchvision_ops = ['nms', 'roi_align', 'roi_pool', 'ps_roi_align', 'ps_roi_pool']
|
27 |
+
for op_name in torchvision_ops:
|
28 |
+
if not hasattr(torch.ops.torchvision, op_name):
|
29 |
+
if op_name == 'nms':
|
30 |
+
setattr(torch.ops.torchvision, op_name, lambda *args, **kwargs: torch.zeros(0, dtype=torch.int64))
|
31 |
+
else:
|
32 |
+
setattr(torch.ops.torchvision, op_name, lambda *args, **kwargs: torch.zeros(0))
|
33 |
+
|
34 |
+
# Try importing again
|
35 |
+
try:
|
36 |
+
import torchvision
|
37 |
+
print("torchvision imported successfully after fixes")
|
38 |
+
except Exception as e2:
|
39 |
+
print(f"torchvision still has issues, but continuing: {e2}")
|
40 |
+
else:
|
41 |
+
print(f"Other torchvision error: {e}")
|
42 |
+
except ImportError:
|
43 |
+
print("torchvision not available, continuing without it")
|
44 |
+
|
45 |
from NeuralJacobianFields import SourceMesh
|
46 |
|
47 |
from nvdiffmodeling.src import render
|
main.py
CHANGED
@@ -44,7 +44,7 @@ def main():
|
|
44 |
## Camera Parameters ##
|
45 |
parser.add_argument('--fov_min', help='Minimum camera field of view angle during renders', type=float, default=argparse.SUPPRESS)
|
46 |
parser.add_argument('--fov_max', help='Maximum camera field of view angle during renders', type=float, default=argparse.SUPPRESS)
|
47 |
-
parser.add_argument('--
|
48 |
parser.add_argument('--dist_max', help='Maximum distance of camera from mesh during renders', type=float, default=argparse.SUPPRESS)
|
49 |
parser.add_argument('--light_power', help='Light intensity', type=float, default=argparse.SUPPRESS)
|
50 |
parser.add_argument('--elev_alpha', help='Alpha parameter for Beta distribution for elevation sampling', type=float, default=argparse.SUPPRESS)
|
@@ -58,7 +58,7 @@ def main():
|
|
58 |
parser.add_argument('--adapt_dist', help='Adjust camera distance to account for scale of shape', type=int, default=argparse.SUPPRESS, choices=[0, 1])
|
59 |
|
60 |
# Logging
|
61 |
-
parser.add_argument('--
|
62 |
parser.add_argument('--log_interval_im', help='Interval for logging renders image, every X epochs', type=int, default=argparse.SUPPRESS)
|
63 |
parser.add_argument('--log_elev', help='Logging elevation angle', type=float, default=argparse.SUPPRESS)
|
64 |
parser.add_argument('--log_fov', help='Logging field of view', type=float, default=argparse.SUPPRESS)
|
|
|
44 |
## Camera Parameters ##
|
45 |
parser.add_argument('--fov_min', help='Minimum camera field of view angle during renders', type=float, default=argparse.SUPPRESS)
|
46 |
parser.add_argument('--fov_max', help='Maximum camera field of view angle during renders', type=float, default=argparse.SUPPRESS)
|
47 |
+
parser.add_argument('--dist_min', help='Minimum distance of camera from mesh during renders', type=float, default=argparse.SUPPRESS)
|
48 |
parser.add_argument('--dist_max', help='Maximum distance of camera from mesh during renders', type=float, default=argparse.SUPPRESS)
|
49 |
parser.add_argument('--light_power', help='Light intensity', type=float, default=argparse.SUPPRESS)
|
50 |
parser.add_argument('--elev_alpha', help='Alpha parameter for Beta distribution for elevation sampling', type=float, default=argparse.SUPPRESS)
|
|
|
58 |
parser.add_argument('--adapt_dist', help='Adjust camera distance to account for scale of shape', type=int, default=argparse.SUPPRESS, choices=[0, 1])
|
59 |
|
60 |
# Logging
|
61 |
+
parser.add_argument('--log_interval', help='Interval for logging, every X epochs', type=int, default=argparse.SUPPRESS)
|
62 |
parser.add_argument('--log_interval_im', help='Interval for logging renders image, every X epochs', type=int, default=argparse.SUPPRESS)
|
63 |
parser.add_argument('--log_elev', help='Logging elevation angle', type=float, default=argparse.SUPPRESS)
|
64 |
parser.add_argument('--log_fov', help='Logging field of view', type=float, default=argparse.SUPPRESS)
|
utilities/helpers.py
CHANGED
@@ -56,13 +56,16 @@ class Vgg19(torch.nn.Module):
|
|
56 |
|
57 |
cosine_sim = torch.nn.CosineSimilarity()
|
58 |
render_loss = torch.nn.L1Loss()
|
59 |
-
|
60 |
-
|
|
|
|
|
61 |
|
62 |
class VGGLoss(nn.Module):
|
63 |
def __init__(self):
|
64 |
super(VGGLoss, self).__init__()
|
65 |
-
|
|
|
66 |
self.criterion = nn.L1Loss()
|
67 |
self.weights = [1.0/32, 1.0/16, 1.0/8, 1.0/4, 1.0]
|
68 |
|
|
|
56 |
|
57 |
cosine_sim = torch.nn.CosineSimilarity()
|
58 |
render_loss = torch.nn.L1Loss()
|
59 |
+
# Use CUDA if available, otherwise CPU
|
60 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
61 |
+
mean = torch.tensor([0.485, 0.456, 0.406], device=device)
|
62 |
+
std = torch.tensor([0.229, 0.224, 0.225], device=device)
|
63 |
|
64 |
class VGGLoss(nn.Module):
|
65 |
def __init__(self):
|
66 |
super(VGGLoss, self).__init__()
|
67 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
68 |
+
self.vgg = Vgg19().to(device)
|
69 |
self.criterion = nn.L1Loss()
|
70 |
self.weights = [1.0/32, 1.0/16, 1.0/8, 1.0/4, 1.0]
|
71 |
|