Update app.py
Browse files
app.py
CHANGED
@@ -203,8 +203,6 @@ def clear_fields():
|
|
203 |
|
204 |
|
205 |
if __name__ == "__main__":
|
206 |
-
server_name = "127.0.0.1"
|
207 |
-
server_port = None
|
208 |
share = True
|
209 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
210 |
|
@@ -267,40 +265,13 @@ if __name__ == "__main__":
|
|
267 |
"""
|
268 |
)
|
269 |
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
</a>
|
277 |
-
<a title="arXiv" href="https://arxiv.org/pdf/2505.23716" target="_blank" rel="noopener noreferrer" style="display: inline-block;">
|
278 |
-
<img src="https://www.obukhov.ai/img/badges/badge-pdf.svg">
|
279 |
-
</a>
|
280 |
-
<a title="Github" href="https://github.com/OpenRobotLab/AnySplat" target="_blank" rel="noopener noreferrer" style="display: inline-block;">
|
281 |
-
<img src="https://img.shields.io/badge/Github-Page-black" alt="badge-github-stars">
|
282 |
-
</a>
|
283 |
-
|
284 |
-
</p>
|
285 |
-
"""
|
286 |
-
)
|
287 |
-
with gr.Row():
|
288 |
-
gr.Markdown(
|
289 |
-
"""
|
290 |
-
### Getting Started:
|
291 |
-
|
292 |
-
1. Upload Your Data: Use the "Upload Video" or "Upload Images" buttons on the left to provide your input. Videos will be automatically split into individual frames (one frame per second).
|
293 |
-
|
294 |
-
2. Preview: Your uploaded images will appear in the gallery on the left.
|
295 |
-
|
296 |
-
3. Reconstruct: Click the "Reconstruct" button to start the 3D reconstruction process.
|
297 |
-
|
298 |
-
4. Visualize: The reconstructed 3D Gaussian Splat will appear in the viewer on the right, along with the rendered RGB and depth videos. The trajectory of the rendered video is obtained by interpolating the estimated input image poses.
|
299 |
|
300 |
-
<strong style="color: #0ea5e9;">Please note:</strong> <span style="color: #0ea5e9; font-weight: bold;">The generated splats are large in size, so they may not load successfully in the Hugging Face demo. You can download the .ply file and render it using other viewers, such as [SuperSplat](https://playcanvas.com/supersplat/editor).</span>
|
301 |
-
"""
|
302 |
-
)
|
303 |
-
|
304 |
target_dir_output = gr.Textbox(label="Target Dir", visible=False, value="None")
|
305 |
is_example = gr.Textbox(label="is_example", visible=False, value="None")
|
306 |
num_images = gr.Textbox(label="num_images", visible=False, value="None")
|
@@ -309,64 +280,61 @@ if __name__ == "__main__":
|
|
309 |
image_type = gr.Textbox(label="image_type", visible=False, value="None")
|
310 |
|
311 |
with gr.Row():
|
312 |
-
with gr.Column(
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
|
320 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
321 |
|
322 |
-
image_gallery = gr.Gallery(
|
323 |
-
label="Preview",
|
324 |
-
columns=4,
|
325 |
-
height="300px",
|
326 |
-
show_download_button=True,
|
327 |
-
object_fit="contain",
|
328 |
-
preview=True,
|
329 |
-
)
|
330 |
|
331 |
-
with gr.Column(
|
332 |
-
with gr.
|
333 |
-
|
334 |
-
|
335 |
-
|
336 |
-
|
337 |
-
|
338 |
-
|
339 |
-
|
340 |
-
|
341 |
-
|
342 |
-
|
343 |
-
|
344 |
-
|
345 |
-
|
346 |
-
|
347 |
-
|
348 |
-
|
349 |
-
|
350 |
-
|
351 |
-
|
352 |
-
|
353 |
-
|
354 |
-
|
355 |
-
|
356 |
-
|
357 |
-
|
358 |
-
|
359 |
-
|
360 |
-
|
361 |
-
|
362 |
-
|
363 |
-
|
364 |
-
|
365 |
-
|
366 |
-
|
367 |
-
|
368 |
-
scale=1,
|
369 |
-
)
|
370 |
|
371 |
# ---------------------- Examples section ----------------------
|
372 |
|
@@ -458,8 +426,6 @@ if __name__ == "__main__":
|
|
458 |
inputs=[input_video, input_images],
|
459 |
outputs=[reconstruction_output, target_dir_output, image_gallery],
|
460 |
)
|
461 |
-
|
462 |
-
# demo.launch(share=share, server_name=server_name, server_port=server_port)
|
463 |
demo.queue().launch(show_error=True, share=True)
|
464 |
|
465 |
# We thank VGGT for their excellent gradio implementation
|
|
|
203 |
|
204 |
|
205 |
if __name__ == "__main__":
|
|
|
|
|
206 |
share = True
|
207 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
208 |
|
|
|
265 |
"""
|
266 |
)
|
267 |
|
268 |
+
gr.Markdown(
|
269 |
+
""" # AnySplat – Feed-forward 3D Gaussian Splatting from Unconstrained Views
|
270 |
+
|
271 |
+
• Source: [Github](https://github.com/OpenRobotLab/AnySplat)
|
272 |
+
• HF Space by : [@alexandernasa](https://twitter.com/alexandernasa/) """
|
273 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
274 |
|
|
|
|
|
|
|
|
|
275 |
target_dir_output = gr.Textbox(label="Target Dir", visible=False, value="None")
|
276 |
is_example = gr.Textbox(label="is_example", visible=False, value="None")
|
277 |
num_images = gr.Textbox(label="num_images", visible=False, value="None")
|
|
|
280 |
image_type = gr.Textbox(label="image_type", visible=False, value="None")
|
281 |
|
282 |
with gr.Row():
|
283 |
+
with gr.Column():
|
284 |
+
input_video = gr.Video(label="Upload Video", interactive=True)
|
285 |
+
input_images = gr.File(
|
286 |
+
file_count="multiple",
|
287 |
+
label="Upload Images",
|
288 |
+
interactive=True,
|
289 |
+
)
|
290 |
+
|
291 |
+
image_gallery = gr.Gallery(
|
292 |
+
label="Preview",
|
293 |
+
columns=4,
|
294 |
+
height="300px",
|
295 |
+
show_download_button=True,
|
296 |
+
object_fit="contain",
|
297 |
+
preview=True,
|
298 |
+
)
|
299 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
300 |
|
301 |
+
with gr.Column():
|
302 |
+
with gr.Column():
|
303 |
+
reconstruction_output = gr.Model3D(
|
304 |
+
label="3D Reconstructed Gaussian Splat",
|
305 |
+
height=540,
|
306 |
+
zoom_speed=0.5,
|
307 |
+
pan_speed=0.5,
|
308 |
+
camera_position=[20, 20, 20],
|
309 |
+
)
|
310 |
+
|
311 |
+
with gr.Row():
|
312 |
+
with gr.Row():
|
313 |
+
rgb_video = gr.Video(
|
314 |
+
label="RGB Video", interactive=False, autoplay=True
|
315 |
+
)
|
316 |
+
depth_video = gr.Video(
|
317 |
+
label="Depth Video",
|
318 |
+
interactive=False,
|
319 |
+
autoplay=True,
|
320 |
+
)
|
321 |
+
|
322 |
+
with gr.Row():
|
323 |
+
submit_btn = gr.Button(
|
324 |
+
"Reconstruct", scale=1, variant="primary"
|
325 |
+
)
|
326 |
+
clear_btn = gr.ClearButton(
|
327 |
+
[
|
328 |
+
input_video,
|
329 |
+
input_images,
|
330 |
+
reconstruction_output,
|
331 |
+
target_dir_output,
|
332 |
+
image_gallery,
|
333 |
+
rgb_video,
|
334 |
+
depth_video,
|
335 |
+
],
|
336 |
+
scale=1,
|
337 |
+
)
|
|
|
|
|
338 |
|
339 |
# ---------------------- Examples section ----------------------
|
340 |
|
|
|
426 |
inputs=[input_video, input_images],
|
427 |
outputs=[reconstruction_output, target_dir_output, image_gallery],
|
428 |
)
|
|
|
|
|
429 |
demo.queue().launch(show_error=True, share=True)
|
430 |
|
431 |
# We thank VGGT for their excellent gradio implementation
|