Spaces:
Sleeping
Sleeping
Commit
·
85dc93e
1
Parent(s):
ea8057a
server compatibility
Browse files- app.py +1 -1
- app_texnet.py +28 -28
- model.py +20 -23
- settings.py +2 -2
app.py
CHANGED
@@ -12,7 +12,7 @@ from settings import ALLOW_CHANGING_BASE_MODEL, DEFAULT_MODEL_ID, SHOW_DUPLICATE
|
|
12 |
DESCRIPTION = "# Material Authoring Demo v0.1. Under Construction"
|
13 |
|
14 |
if not torch.cuda.is_available():
|
15 |
-
DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
|
16 |
|
17 |
# model = Model(base_model_id=DEFAULT_MODEL_ID, task_name="Canny")
|
18 |
model = Model(base_model_id=DEFAULT_MODEL_ID, task_name="texnet")
|
|
|
12 |
DESCRIPTION = "# Material Authoring Demo v0.1. Under Construction"
|
13 |
|
14 |
if not torch.cuda.is_available():
|
15 |
+
DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p> Check if the 'CUDA_VISIBLE_DEVICES' are set incorrectly in settings.py"
|
16 |
|
17 |
# model = Model(base_model_id=DEFAULT_MODEL_ID, task_name="Canny")
|
18 |
model = Model(base_model_id=DEFAULT_MODEL_ID, task_name="texnet")
|
app_texnet.py
CHANGED
@@ -174,52 +174,52 @@ def create_demo(process):
|
|
174 |
"examples/bunny/frame_0001.png", # /dgxusers/Users/jyang/project/ObjectReal/data/control/preprocess/bunny/uv_normal/fused.png
|
175 |
"examples/bunny/uv_normal.png", # /dgxusers/Users/jyang/project/ObjectReal/data/control/preprocess/bunny/uv_normal/fused.png
|
176 |
"feather",
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
],
|
185 |
[
|
186 |
"monkey",
|
187 |
"examples/monkey/frame_0001.png", # /dgxusers/Users/jyang/project/ObjectReal/data/control/preprocess/monkey/uv_normal/fused.png
|
188 |
"examples/monkey/uv_normal.png", # /dgxusers/Users/jyang/project/ObjectReal/data/control/preprocess/monkey/uv_normal/fused.png
|
189 |
"wood",
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
],
|
198 |
[
|
199 |
"tshirt",
|
200 |
"examples/tshirt/frame_0001.png", # /dgxusers/Users/jyang/project/ObjectReal/data/control/preprocess/monkey/uv_normal/fused.png
|
201 |
"examples/tshirt/uv_normal.png", # /dgxusers/Users/jyang/project/ObjectReal/data/control/preprocess/monkey/uv_normal/fused.png
|
202 |
"wood",
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
],
|
211 |
[
|
212 |
"highheel",
|
213 |
"examples/highheel/frame_0001.png", # /dgxusers/Users/jyang/project/ObjectReal/data/control/preprocess/monkey/uv_normal/fused.png
|
214 |
"examples/highheel/uv_normal.png", # /dgxusers/Users/jyang/project/ObjectReal/data/control/preprocess/monkey/uv_normal/fused.png
|
215 |
"wood",
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
],
|
224 |
],
|
225 |
)
|
|
|
174 |
"examples/bunny/frame_0001.png", # /dgxusers/Users/jyang/project/ObjectReal/data/control/preprocess/bunny/uv_normal/fused.png
|
175 |
"examples/bunny/uv_normal.png", # /dgxusers/Users/jyang/project/ObjectReal/data/control/preprocess/bunny/uv_normal/fused.png
|
176 |
"feather",
|
177 |
+
a_prompt.value,
|
178 |
+
n_prompt.value,
|
179 |
+
num_samples.value,
|
180 |
+
image_resolution.value,
|
181 |
+
num_steps.value,
|
182 |
+
guidance_scale.value,
|
183 |
+
seed.value,
|
184 |
],
|
185 |
[
|
186 |
"monkey",
|
187 |
"examples/monkey/frame_0001.png", # /dgxusers/Users/jyang/project/ObjectReal/data/control/preprocess/monkey/uv_normal/fused.png
|
188 |
"examples/monkey/uv_normal.png", # /dgxusers/Users/jyang/project/ObjectReal/data/control/preprocess/monkey/uv_normal/fused.png
|
189 |
"wood",
|
190 |
+
a_prompt.value,
|
191 |
+
n_prompt.value,
|
192 |
+
num_samples.value,
|
193 |
+
image_resolution.value,
|
194 |
+
num_steps.value,
|
195 |
+
guidance_scale.value,
|
196 |
+
seed.value,
|
197 |
],
|
198 |
[
|
199 |
"tshirt",
|
200 |
"examples/tshirt/frame_0001.png", # /dgxusers/Users/jyang/project/ObjectReal/data/control/preprocess/monkey/uv_normal/fused.png
|
201 |
"examples/tshirt/uv_normal.png", # /dgxusers/Users/jyang/project/ObjectReal/data/control/preprocess/monkey/uv_normal/fused.png
|
202 |
"wood",
|
203 |
+
a_prompt.value,
|
204 |
+
n_prompt.value,
|
205 |
+
num_samples.value,
|
206 |
+
image_resolution.value,
|
207 |
+
num_steps.value,
|
208 |
+
guidance_scale.value,
|
209 |
+
seed.value,
|
210 |
],
|
211 |
[
|
212 |
"highheel",
|
213 |
"examples/highheel/frame_0001.png", # /dgxusers/Users/jyang/project/ObjectReal/data/control/preprocess/monkey/uv_normal/fused.png
|
214 |
"examples/highheel/uv_normal.png", # /dgxusers/Users/jyang/project/ObjectReal/data/control/preprocess/monkey/uv_normal/fused.png
|
215 |
"wood",
|
216 |
+
a_prompt.value,
|
217 |
+
n_prompt.value,
|
218 |
+
num_samples.value,
|
219 |
+
image_resolution.value,
|
220 |
+
num_steps.value,
|
221 |
+
guidance_scale.value,
|
222 |
+
seed.value,
|
223 |
],
|
224 |
],
|
225 |
)
|
model.py
CHANGED
@@ -66,6 +66,24 @@ class Model:
|
|
66 |
).to(self.device)
|
67 |
self.preprocessor = Preprocessor()
|
68 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
69 |
def load_pipe(self, base_model_id: str, task_name: str) -> DiffusionPipeline:
|
70 |
if (
|
71 |
base_model_id == self.base_model_id
|
@@ -293,18 +311,7 @@ class Model:
|
|
293 |
return photo, return_list, prompts
|
294 |
|
295 |
# Load rgb2x pipeline
|
296 |
-
|
297 |
-
"zheng95z/rgb-to-x",
|
298 |
-
torch_dtype=torch.float16,
|
299 |
-
# cache_dir=os.path.join(current_directory, "model_cache"),
|
300 |
-
).to(self.pipe.device)
|
301 |
-
pipe.scheduler = DDIMScheduler.from_config(
|
302 |
-
pipe.scheduler.config, rescale_betas_zero_snr=True, timestep_spacing="trailing"
|
303 |
-
)
|
304 |
-
pipe.set_progress_bar_config(disable=True)
|
305 |
-
pipe.to(self.pipe.device)
|
306 |
-
|
307 |
-
_, preds, prompts = rgb2x(pipe, torchvision.transforms.PILToTensor()(tex_fine[0]).to(self.pipe.device), inference_step=num_steps, num_samples=num_images)
|
308 |
|
309 |
base_color_path = image_to_temp_path(tex_fine[0].rotate(90), "base_color")
|
310 |
normal_map_path = image_to_temp_path(preds[0].rotate(90), "normal_map")
|
@@ -313,7 +320,6 @@ class Model:
|
|
313 |
output_blend_path = os.path.join(os.getcwd(), "output", f"{obj_name}.blend") # replace with desired output path
|
314 |
os.makedirs(os.path.dirname(output_blend_path), exist_ok=True)
|
315 |
|
316 |
-
#
|
317 |
def run_blend_generation(
|
318 |
blender_path,
|
319 |
generate_script_path,
|
@@ -331,15 +337,8 @@ class Model:
|
|
331 |
subprocess.run(cmd, check=True)
|
332 |
|
333 |
# check if the blender_path exists, if not download
|
334 |
-
blender_path = '/tmp/blender-3.2.2-linux-x64/blender'
|
335 |
-
if not os.path.exists(blender_path):
|
336 |
-
print("Downloading Blender...")
|
337 |
-
subprocess.run(["wget", "https://download.blender.org/release/Blender3.2/blender-3.2.2-linux-x64.tar.xz", "-O", "/tmp/blender-3.2.2-linux-x64.tar.xz"], check=True)
|
338 |
-
subprocess.run(["tar", "-xf", "/tmp/blender-3.2.2-linux-x64.tar.xz", "-C", "/tmp"], check=True)
|
339 |
-
print("Blender downloaded and extracted.")
|
340 |
-
|
341 |
run_blend_generation(
|
342 |
-
blender_path=blender_path,
|
343 |
generate_script_path="rgb2x/generate_blend.py",
|
344 |
obj_path=f"examples/{obj_name}/mesh.obj", # replace with actual mesh path
|
345 |
base_color_path=base_color_path,
|
@@ -351,8 +350,6 @@ class Model:
|
|
351 |
|
352 |
# gallary
|
353 |
return [*tex_fine], [preds[1]], [preds[2]], [preds[3]], [output_blend_path]
|
354 |
-
# image
|
355 |
-
# return tex_fine[0], preds[1], preds[2], preds[3], output_blend_path
|
356 |
|
357 |
# @spaces.GPU #[uncomment to use ZeroGPU]
|
358 |
@torch.inference_mode()
|
|
|
66 |
).to(self.device)
|
67 |
self.preprocessor = Preprocessor()
|
68 |
|
69 |
+
# set up pipe_rgb2x
|
70 |
+
self.pipe_rgb2x = StableDiffusionAOVMatEstPipeline.from_pretrained(
|
71 |
+
"zheng95z/rgb-to-x",
|
72 |
+
torch_dtype=torch.float16,
|
73 |
+
).to(self.device)
|
74 |
+
self.pipe_rgb2x.scheduler = DDIMScheduler.from_config(
|
75 |
+
self.pipe_rgb2x.scheduler.config, rescale_betas_zero_snr=True, timestep_spacing="trailing"
|
76 |
+
)
|
77 |
+
self.pipe_rgb2x.set_progress_bar_config(disable=True)
|
78 |
+
|
79 |
+
# setup blender
|
80 |
+
self.blender_path = '/tmp/blender-3.2.2-linux-x64/blender'
|
81 |
+
if not os.path.exists(self.blender_path):
|
82 |
+
print("Downloading Blender...")
|
83 |
+
subprocess.run(["wget", "https://download.blender.org/release/Blender3.2/blender-3.2.2-linux-x64.tar.xz", "-O", "/tmp/blender-3.2.2-linux-x64.tar.xz"], check=True)
|
84 |
+
subprocess.run(["tar", "-xf", "/tmp/blender-3.2.2-linux-x64.tar.xz", "-C", "/tmp"], check=True)
|
85 |
+
print("Blender downloaded and extracted.")
|
86 |
+
|
87 |
def load_pipe(self, base_model_id: str, task_name: str) -> DiffusionPipeline:
|
88 |
if (
|
89 |
base_model_id == self.base_model_id
|
|
|
311 |
return photo, return_list, prompts
|
312 |
|
313 |
# Load rgb2x pipeline
|
314 |
+
_, preds, prompts = rgb2x(self.pipe_rgb2x, torchvision.transforms.PILToTensor()(tex_fine[0]).to(self.pipe.device), inference_step=num_steps, num_samples=num_images)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
315 |
|
316 |
base_color_path = image_to_temp_path(tex_fine[0].rotate(90), "base_color")
|
317 |
normal_map_path = image_to_temp_path(preds[0].rotate(90), "normal_map")
|
|
|
320 |
output_blend_path = os.path.join(os.getcwd(), "output", f"{obj_name}.blend") # replace with desired output path
|
321 |
os.makedirs(os.path.dirname(output_blend_path), exist_ok=True)
|
322 |
|
|
|
323 |
def run_blend_generation(
|
324 |
blender_path,
|
325 |
generate_script_path,
|
|
|
337 |
subprocess.run(cmd, check=True)
|
338 |
|
339 |
# check if the blender_path exists, if not download
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
340 |
run_blend_generation(
|
341 |
+
blender_path=self.blender_path,
|
342 |
generate_script_path="rgb2x/generate_blend.py",
|
343 |
obj_path=f"examples/{obj_name}/mesh.obj", # replace with actual mesh path
|
344 |
base_color_path=base_color_path,
|
|
|
350 |
|
351 |
# gallary
|
352 |
return [*tex_fine], [preds[1]], [preds[2]], [preds[3]], [output_blend_path]
|
|
|
|
|
353 |
|
354 |
# @spaces.GPU #[uncomment to use ZeroGPU]
|
355 |
@torch.inference_mode()
|
settings.py
CHANGED
@@ -18,5 +18,5 @@ MAX_SEED = np.iinfo(np.int32).max
|
|
18 |
|
19 |
# setup CUDA
|
20 |
# disable the following when deployting to hugging face
|
21 |
-
if os.getenv("CUDA_VISIBLE_DEVICES") is None:
|
22 |
-
os.environ["CUDA_VISIBLE_DEVICES"] = "7"
|
|
|
18 |
|
19 |
# setup CUDA
|
20 |
# disable the following when deployting to hugging face
|
21 |
+
# if os.getenv("CUDA_VISIBLE_DEVICES") is None:
|
22 |
+
# os.environ["CUDA_VISIBLE_DEVICES"] = "7"
|