Spaces:
Runtime error
Runtime error
Update src/utils/dependencies/XPose/models/UniPose/ops/setup.py
Browse files
src/utils/dependencies/XPose/models/UniPose/ops/setup.py
CHANGED
@@ -27,36 +27,25 @@ def get_extensions():
|
|
27 |
|
28 |
main_file = glob.glob(os.path.join(extensions_dir, "*.cpp"))
|
29 |
source_cpu = glob.glob(os.path.join(extensions_dir, "cpu", "*.cpp"))
|
30 |
-
source_cuda = glob.glob(os.path.join(extensions_dir, "cuda", "*.cu"))
|
31 |
|
|
|
|
|
32 |
sources = main_file + source_cpu
|
33 |
-
extension = CppExtension
|
34 |
extra_compile_args = {"cxx": []}
|
35 |
-
define_macros = []
|
36 |
|
37 |
-
#
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
define_macros += [("WITH_CUDA", None)]
|
43 |
-
extra_compile_args["nvcc"] = [
|
44 |
-
"-DCUDA_HAS_FP16=1",
|
45 |
-
"-D__CUDA_NO_HALF_OPERATORS__",
|
46 |
-
"-D__CUDA_NO_HALF_CONVERSIONS__",
|
47 |
-
"-D__CUDA_NO_HALF2_OPERATORS__",
|
48 |
-
]
|
49 |
-
else:
|
50 |
-
# Allow build to continue even if CUDA not detected initially
|
51 |
-
print("Warning: CUDA not found during build setup for MultiScaleDeformableAttention. Attempting build anyway, may fall back to CPU.")
|
52 |
-
pass # Avoid raising error
|
53 |
|
54 |
sources = [os.path.join(extensions_dir, s) for s in sources]
|
55 |
include_dirs = [extensions_dir]
|
56 |
-
# Ensure extension type is set correctly based on initial check
|
57 |
-
extension_type = CUDAExtension if extension == CUDAExtension else CppExtension
|
58 |
ext_modules = [
|
59 |
-
|
60 |
"MultiScaleDeformableAttention",
|
61 |
sources,
|
62 |
include_dirs=include_dirs,
|
|
|
27 |
|
28 |
main_file = glob.glob(os.path.join(extensions_dir, "*.cpp"))
|
29 |
source_cpu = glob.glob(os.path.join(extensions_dir, "cpu", "*.cpp"))
|
30 |
+
# source_cuda = glob.glob(os.path.join(extensions_dir, "cuda", "*.cu")) # Don't include CUDA sources
|
31 |
|
32 |
+
# Force CPU Build Path
|
33 |
+
print("Forcing CPU build path for MultiScaleDeformableAttention.")
|
34 |
sources = main_file + source_cpu
|
35 |
+
extension = CppExtension # Force CppExtension
|
36 |
extra_compile_args = {"cxx": []}
|
37 |
+
define_macros = [] # No CUDA macros
|
38 |
|
39 |
+
# Remove the CUDA check block entirely
|
40 |
+
# if torch.cuda.is_available() and CUDA_HOME is not None:
|
41 |
+
# ...
|
42 |
+
# else:
|
43 |
+
# ...
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
|
45 |
sources = [os.path.join(extensions_dir, s) for s in sources]
|
46 |
include_dirs = [extensions_dir]
|
|
|
|
|
47 |
ext_modules = [
|
48 |
+
extension( # Use the forced CppExtension type
|
49 |
"MultiScaleDeformableAttention",
|
50 |
sources,
|
51 |
include_dirs=include_dirs,
|