import streamlit as st import replicate import requests from PIL import Image from io import BytesIO # Set the replicate API token import os os.environ["REPLICATE_API_TOKEN"] = "r8_JSR8xlRoCk6cmq3qEOOThVTn3dAgdPq1bWXdj" def run_replicate_model(input_params): output = replicate.run( "stability-ai/stable-video-diffusion:3f0457e4619daac51203dedb472816fd4af51f3149fa7a9e0b5ffcf1b8172438", input=input_params ) return output st.title("Stable Video Diffusion") st.write("Fill in the input parameters and click on the button to run the Stable Video Diffusion model.") # Allow users to upload an image file or provide a URL uploaded_file = st.file_uploader("Upload Image File", type=["jpg", "png", "jpeg"]) input_image = None if uploaded_file is not None: input_image = uploaded_file else: input_image = st.text_input("Input Image URL", "https://example.com/s-widua-icjKmXvsO7U-unsplash.jpg") # Additional parameters expander = st.beta_expander("Additional Parameters") with expander: cond_aug = st.number_input("Conditioning Augmentation", value=0.02, step=0.01) decoding_t = st.number_input("Decoding Time", value=7) sizing_strategy = st.selectbox("Sizing Strategy", ["maintain_aspect_ratio", "other_options"]) motion_bucket_id = st.number_input("Motion Bucket ID", value=127) frames_per_second = st.number_input("Frames Per Second", value=6) input_params = { "cond_aug": cond_aug, "decoding_t": decoding_t, "input_image": input_image, "video_length": "14_frames_with_svd", "sizing_strategy": sizing_strategy, "motion_bucket_id": motion_bucket_id, "frames_per_second": frames_per_second } if st.button("Run Model"): output_result = run_replicate_model(input_params) st.write("Output Video:") try: response = requests.get(output_result) video_bytes = BytesIO(response.content) st.video(video_bytes) except Exception as e: st.write("Unable to display the video.")