Commit
·
a7b40a9
1
Parent(s):
cc054b5
bug fixes
Browse files- app.py +21 -19
- requirements.txt +1 -1
app.py
CHANGED
@@ -108,6 +108,26 @@ def gen_image(input_image, seed, scale, step):
|
|
108 |
return Image.fromarray(np_imgs), Image.fromarray(np_xyzs), glb_path#, obj_path
|
109 |
|
110 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
111 |
parser = argparse.ArgumentParser()
|
112 |
parser.add_argument(
|
113 |
"--stage1_config",
|
@@ -159,6 +179,7 @@ _DESCRIPTION = '''
|
|
159 |
* If you find the output unsatisfying, try using different seeds:)
|
160 |
'''
|
161 |
|
|
|
162 |
with gr.Blocks() as demo:
|
163 |
gr.Markdown("# CRM: Single Image to 3D Textured Mesh with Convolutional Reconstruction Model")
|
164 |
gr.Markdown(_DESCRIPTION)
|
@@ -233,22 +254,3 @@ with gr.Blocks() as demo:
|
|
233 |
)
|
234 |
|
235 |
demo.queue().launch()
|
236 |
-
|
237 |
-
def process_and_generate(input_image, background_choice, foreground_ratio, backgroud_color, seed, scale, step):
|
238 |
-
"""Process the input image and generate the 3D model in a single function"""
|
239 |
-
if input_image is None:
|
240 |
-
raise gr.Error("No image uploaded!")
|
241 |
-
|
242 |
-
# Preprocess the image
|
243 |
-
processed = preprocess_image(input_image, background_choice, foreground_ratio, backgroud_color)
|
244 |
-
|
245 |
-
# Generate the 3D model
|
246 |
-
pipeline.set_seed(seed)
|
247 |
-
rt_dict = pipeline(processed, scale=scale, step=step)
|
248 |
-
stage1_images = rt_dict["stage1_images"]
|
249 |
-
stage2_images = rt_dict["stage2_images"]
|
250 |
-
np_imgs = np.concatenate(stage1_images, 1)
|
251 |
-
np_xyzs = np.concatenate(stage2_images, 1)
|
252 |
-
|
253 |
-
glb_path = generate3d(model, np_imgs, np_xyzs, args.device)
|
254 |
-
return Image.fromarray(np_imgs), Image.fromarray(np_xyzs), glb_path
|
|
|
108 |
return Image.fromarray(np_imgs), Image.fromarray(np_xyzs), glb_path#, obj_path
|
109 |
|
110 |
|
111 |
+
def process_and_generate(input_image, background_choice, foreground_ratio, backgroud_color, seed, scale, step):
|
112 |
+
"""Process the input image and generate the 3D model in a single function"""
|
113 |
+
if input_image is None:
|
114 |
+
raise gr.Error("No image uploaded!")
|
115 |
+
|
116 |
+
# Preprocess the image
|
117 |
+
processed = preprocess_image(input_image, background_choice, foreground_ratio, backgroud_color)
|
118 |
+
|
119 |
+
# Generate the 3D model
|
120 |
+
pipeline.set_seed(seed)
|
121 |
+
rt_dict = pipeline(processed, scale=scale, step=step)
|
122 |
+
stage1_images = rt_dict["stage1_images"]
|
123 |
+
stage2_images = rt_dict["stage2_images"]
|
124 |
+
np_imgs = np.concatenate(stage1_images, 1)
|
125 |
+
np_xyzs = np.concatenate(stage2_images, 1)
|
126 |
+
|
127 |
+
glb_path = generate3d(model, np_imgs, np_xyzs, args.device)
|
128 |
+
return Image.fromarray(np_imgs), Image.fromarray(np_xyzs), glb_path
|
129 |
+
|
130 |
+
# Model initialization code
|
131 |
parser = argparse.ArgumentParser()
|
132 |
parser.add_argument(
|
133 |
"--stage1_config",
|
|
|
179 |
* If you find the output unsatisfying, try using different seeds:)
|
180 |
'''
|
181 |
|
182 |
+
# Gradio interface
|
183 |
with gr.Blocks() as demo:
|
184 |
gr.Markdown("# CRM: Single Image to 3D Textured Mesh with Convolutional Reconstruction Model")
|
185 |
gr.Markdown(_DESCRIPTION)
|
|
|
254 |
)
|
255 |
|
256 |
demo.queue().launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
requirements.txt
CHANGED
@@ -8,7 +8,7 @@ opencv-contrib-python-headless==4.9.0.80
|
|
8 |
opencv-python-headless==4.9.0.80
|
9 |
xformers
|
10 |
omegaconf
|
11 |
-
gradio==
|
12 |
rembg
|
13 |
git+https://github.com/NVlabs/nvdiffrast
|
14 |
pygltflib
|
|
|
8 |
opencv-python-headless==4.9.0.80
|
9 |
xformers
|
10 |
omegaconf
|
11 |
+
gradio==4.44.1
|
12 |
rembg
|
13 |
git+https://github.com/NVlabs/nvdiffrast
|
14 |
pygltflib
|