Spaces:
Runtime error
Runtime error
lionelgarnier
commited on
Commit
·
862266f
1
Parent(s):
9e3e77c
debug
Browse files
app.py
CHANGED
@@ -22,8 +22,10 @@ os.makedirs(TMP_DIR, exist_ok=True)
|
|
22 |
|
23 |
|
24 |
def start_session(req: gr.Request):
|
|
|
25 |
user_dir = os.path.join(TMP_DIR, str(req.session_hash))
|
26 |
os.makedirs(user_dir, exist_ok=True)
|
|
|
27 |
|
28 |
|
29 |
def end_session(req: gr.Request):
|
@@ -45,21 +47,6 @@ def preprocess_image(image: Image.Image) -> Image.Image:
|
|
45 |
return processed_image
|
46 |
|
47 |
|
48 |
-
def preprocess_images(images: List[Tuple[Image.Image, str]]) -> List[Image.Image]:
|
49 |
-
"""
|
50 |
-
Preprocess a list of input images.
|
51 |
-
|
52 |
-
Args:
|
53 |
-
images (List[Tuple[Image.Image, str]]): The input images.
|
54 |
-
|
55 |
-
Returns:
|
56 |
-
List[Image.Image]: The preprocessed images.
|
57 |
-
"""
|
58 |
-
images = [image[0] for image in images]
|
59 |
-
processed_images = [pipeline.preprocess_image(image) for image in images]
|
60 |
-
return processed_images
|
61 |
-
|
62 |
-
|
63 |
def pack_state(gs: Gaussian, mesh: MeshExtractResult) -> dict:
|
64 |
return {
|
65 |
'gaussian': {
|
@@ -115,7 +102,6 @@ def image_to_3d(
|
|
115 |
ss_sampling_steps: int,
|
116 |
slat_guidance_strength: float,
|
117 |
slat_sampling_steps: int,
|
118 |
-
multiimage_algo: Literal["multidiffusion", "stochastic"],
|
119 |
req: gr.Request,
|
120 |
) -> Tuple[dict, str]:
|
121 |
"""
|
@@ -128,7 +114,6 @@ def image_to_3d(
|
|
128 |
ss_sampling_steps (int): The number of sampling steps for sparse structure generation.
|
129 |
slat_guidance_strength (float): The guidance strength for structured latent generation.
|
130 |
slat_sampling_steps (int): The number of sampling steps for structured latent generation.
|
131 |
-
multiimage_algo (Literal["multidiffusion", "stochastic"]): The algorithm for multi-image generation.
|
132 |
|
133 |
Returns:
|
134 |
dict: The information of the generated 3D model.
|
@@ -222,11 +207,8 @@ def split_image(image: Image.Image) -> List[Image.Image]:
|
|
222 |
|
223 |
with gr.Blocks(delete_cache=(600, 600)) as demo:
|
224 |
gr.Markdown("""
|
225 |
-
##
|
226 |
* Upload an image and click "Generate" to create a 3D asset. If the image has alpha channel, it be used as the mask. Otherwise, we use `rembg` to remove the background.
|
227 |
-
* If you find the generated 3D asset satisfactory, click "Extract GLB" to extract the GLB file and download it.
|
228 |
-
|
229 |
-
✨New: 1) Experimental multi-image support. 2) Gaussian file extraction.
|
230 |
""")
|
231 |
|
232 |
with gr.Row():
|
@@ -244,7 +226,6 @@ with gr.Blocks(delete_cache=(600, 600)) as demo:
|
|
244 |
with gr.Row():
|
245 |
slat_guidance_strength = gr.Slider(0.0, 10.0, label="Guidance Strength", value=3.0, step=0.1)
|
246 |
slat_sampling_steps = gr.Slider(1, 50, label="Sampling Steps", value=12, step=1)
|
247 |
-
multiimage_algo = gr.Radio(["stochastic", "multidiffusion"], label="Multi-image Algorithm", value="stochastic")
|
248 |
|
249 |
generate_btn = gr.Button("Generate")
|
250 |
|
@@ -255,6 +236,7 @@ with gr.Blocks(delete_cache=(600, 600)) as demo:
|
|
255 |
with gr.Row():
|
256 |
extract_glb_btn = gr.Button("Extract GLB", interactive=False)
|
257 |
extract_gs_btn = gr.Button("Extract Gaussian", interactive=False)
|
|
|
258 |
gr.Markdown("""
|
259 |
*NOTE: Gaussian file can be very large (~50MB), it will take a while to display and download.*
|
260 |
""")
|
@@ -287,7 +269,7 @@ with gr.Blocks(delete_cache=(600, 600)) as demo:
|
|
287 |
outputs=[seed],
|
288 |
).then(
|
289 |
image_to_3d,
|
290 |
-
inputs=[image_prompt, seed, ss_guidance_strength, ss_sampling_steps, slat_guidance_strength, slat_sampling_steps
|
291 |
outputs=[output_buf, video_output],
|
292 |
).then(
|
293 |
lambda: tuple([gr.Button(interactive=True), gr.Button(interactive=True)]),
|
|
|
22 |
|
23 |
|
24 |
def start_session(req: gr.Request):
|
25 |
+
gr.Warning('start start session')
|
26 |
user_dir = os.path.join(TMP_DIR, str(req.session_hash))
|
27 |
os.makedirs(user_dir, exist_ok=True)
|
28 |
+
gr.Warning('end start session')
|
29 |
|
30 |
|
31 |
def end_session(req: gr.Request):
|
|
|
47 |
return processed_image
|
48 |
|
49 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
def pack_state(gs: Gaussian, mesh: MeshExtractResult) -> dict:
|
51 |
return {
|
52 |
'gaussian': {
|
|
|
102 |
ss_sampling_steps: int,
|
103 |
slat_guidance_strength: float,
|
104 |
slat_sampling_steps: int,
|
|
|
105 |
req: gr.Request,
|
106 |
) -> Tuple[dict, str]:
|
107 |
"""
|
|
|
114 |
ss_sampling_steps (int): The number of sampling steps for sparse structure generation.
|
115 |
slat_guidance_strength (float): The guidance strength for structured latent generation.
|
116 |
slat_sampling_steps (int): The number of sampling steps for structured latent generation.
|
|
|
117 |
|
118 |
Returns:
|
119 |
dict: The information of the generated 3D model.
|
|
|
207 |
|
208 |
with gr.Blocks(delete_cache=(600, 600)) as demo:
|
209 |
gr.Markdown("""
|
210 |
+
## Text to 3D Asset with Mistral + Flux + Trellis (https://trellis3d.github.io/)
|
211 |
* Upload an image and click "Generate" to create a 3D asset. If the image has alpha channel, it be used as the mask. Otherwise, we use `rembg` to remove the background.
|
|
|
|
|
|
|
212 |
""")
|
213 |
|
214 |
with gr.Row():
|
|
|
226 |
with gr.Row():
|
227 |
slat_guidance_strength = gr.Slider(0.0, 10.0, label="Guidance Strength", value=3.0, step=0.1)
|
228 |
slat_sampling_steps = gr.Slider(1, 50, label="Sampling Steps", value=12, step=1)
|
|
|
229 |
|
230 |
generate_btn = gr.Button("Generate")
|
231 |
|
|
|
236 |
with gr.Row():
|
237 |
extract_glb_btn = gr.Button("Extract GLB", interactive=False)
|
238 |
extract_gs_btn = gr.Button("Extract Gaussian", interactive=False)
|
239 |
+
|
240 |
gr.Markdown("""
|
241 |
*NOTE: Gaussian file can be very large (~50MB), it will take a while to display and download.*
|
242 |
""")
|
|
|
269 |
outputs=[seed],
|
270 |
).then(
|
271 |
image_to_3d,
|
272 |
+
inputs=[image_prompt, seed, ss_guidance_strength, ss_sampling_steps, slat_guidance_strength, slat_sampling_steps],
|
273 |
outputs=[output_buf, video_output],
|
274 |
).then(
|
275 |
lambda: tuple([gr.Button(interactive=True), gr.Button(interactive=True)]),
|