Spaces:
Running
Running
File size: 2,695 Bytes
4ba17b3 9c02460 4ba17b3 2bb13f8 706f673 eb69c1b 706f673 eb69c1b 641ce46 eb69c1b f4bfc01 706f673 eb69c1b b87aec8 eb69c1b b87aec8 eb69c1b f4bfc01 eb69c1b f4bfc01 eb69c1b f4bfc01 eb69c1b 4c72af5 eb69c1b 3672275 eb69c1b 4c72af5 b0fe31d eb69c1b b0fe31d 3672275 b0fe31d eb69c1b b0fe31d eb69c1b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 |
import streamlit as st
import os
VIDEO_FOLDER = "./src/synthda_falling_realreal/"
# Page config optimized for screen recording
st.set_page_config(layout="wide", initial_sidebar_state="collapsed")
# Custom CSS to center and reduce spacing
st.markdown("""
<style>
html, body, [class*="css"] {
padding: 0;
margin: 0;
font-size: 14px;
}
.block-container {
padding-top: 1rem;
padding-bottom: 0rem;
}
.video-wrapper {
display: flex;
justify-content: center;
}
.video-caption {
text-align: center;
font-weight: bold;
margin-bottom: 0.2rem;
}
</style>
""", unsafe_allow_html=True)
# Compact title and description
st.markdown("""
<h2 style='text-align: center;'>Project SynthDa</h2>
<p style='text-align: center;'>
AutoSynthDa generates synthetic action videos by interpolating between two input motions.<br>
Move the slider to explore the effect of blending.
</p>
<p style='text-align: center;'>
<a href="https://github.com/nvidia/synthda" target="_blank">github.com/nvidia/synthda</a>
</p>
""", unsafe_allow_html=True)
# Slider with default at midpoint
weight = st.slider("Interpolation Weight (0 = Left Video, 1 = Right Video)", 0.1, 0.9, 0.5, step=0.1)
# Interpolation text
if weight == 0.0:
interp_text = "Showing Input Video 1 (no interpolation)"
elif weight == 1.0:
interp_text = "Showing Input Video 2 (no interpolation)"
else:
w2 = round(1.0 - weight, 1)
interp_text = f"{weight:.1f} from Input 1 + {w2:.1f} from Input 2"
st.markdown(f"<p style='text-align:center;'><strong>{interp_text}</strong></p>", unsafe_allow_html=True)
# File paths
filename_interp = f"videos_generated_{weight:.1f}.mp4"
video_interp = os.path.join(VIDEO_FOLDER, filename_interp)
video_input1 = os.path.join(VIDEO_FOLDER, "videos_generated_0.0.mp4")
video_input2 = os.path.join(VIDEO_FOLDER, "videos_generated_1.0.mp4")
# 2 columns only
col1, col2 = st.columns([1, 1])
with col1:
st.markdown("<div class='video-caption'>Input Video 1</div>", unsafe_allow_html=True)
if os.path.exists(video_input1):
st.video(video_input1)
else:
st.error("Video 1 not found")
with col2:
st.markdown("<div class='video-caption'>Input Video 2</div>", unsafe_allow_html=True)
if os.path.exists(video_input2):
st.video(video_input2)
else:
st.error("Video 2 not found")
# Below the two main inputs, show the interpolated video centered
st.markdown("<div class='video-caption'>Interpolated Output</div>", unsafe_allow_html=True)
if os.path.exists(video_interp):
st.video(video_interp)
else:
st.error("Interpolated video not found")
|