COP-GEN-Beta / app.py
mikonvergence
front end ready
5318c78
raw
history blame
5.2 kB
import gradio as gr
from src.utils import *
theme = gr.themes.Soft(primary_hue="amber", secondary_hue="orange", font=[gr.themes.GoogleFont("Source Sans 3", weights=(400, 600)),'arial'])
with gr.Blocks(theme=theme) as demo:
with gr.Column(elem_classes="header"):
gr.Markdown("# ๐Ÿ—พ COP-GEN-Beta: Unified Generative Modelling of COPernicus Imagery Thumbnails")
gr.Markdown("### Miguel Espinosa, Valerio Marsocci, Yuru Jia, Elliot J. Crowley, Mikolaj Czerkawski")
gr.Markdown('[[Website](https://miquel-espinosa.github.io/cop-gen-beta/)] [[GitHub](https://github.com/miquel-espinosa/COP-GEN-Beta)] [[Model](https://huggingface.co/mespinosami/COP-GEN-Beta)] [[Dataset](https://huggingface.co/Major-TOM)]')
with gr.Column(elem_classes="abstract"):
with gr.Accordion("Abstract", open=False) as abstract:
gr.Markdown("In remote sensing, multi-modal data from various sensors capturing the same scene offers rich opportunities, but learning a unified representation across these modalities remains a significant challenge. Traditional methods have often been limited to single or dual-modality approaches. In this paper, we introduce COP-GEN-Beta, a generative diffusion model trained on optical, radar, and elevation data from the Major TOM dataset. What sets COP-GEN-Beta apart is its ability to map any subset of modalities to any other, enabling zero-shot modality translation after training. This is achieved through a sequence-based diffusion transformer, where each modality is controlled by its own timestep embedding. We extensively evaluate COP-GEN-Beta on thumbnail images from the Major TOM dataset, demonstrating its effectiveness in generating high-quality samples. Qualitative and quantitative evaluations validate the model's performance, highlighting its potential as a powerful pre-trained model for future remote sensing tasks.") # Replace with your abstract text
with gr.Accordion("Instructions", open=False) as abstract:
gr.Markdown("1. **Define input**: You can upload your thumbnails manually or you can get a random sample from Major TOM by clicking the button.")
gr.Markdown("2. **Select conditions**: Each input image can be used as a **conditioning** by selecting the `Active` checkbox. If no checkbox is selected, then you will observe **unconditional generation**.")
gr.Markdown("3. **Generate**: Click the `Generate` button to synthesize the output. The outputs will be shown below.")
with gr.Column():
with gr.Row():
gr.Markdown("## Inputs (Optional)")
load_button = gr.Button("Load a random sample from Major TOM ๐Ÿ—บ", variant="secondary")
with gr.Row():
with gr.Column():
s2l1c_input = gr.Image(label="S2 L1C (Optical - Top of Atmosphere)", interactive=True)
s2l1c_active = gr.Checkbox(value=False, label="Active", interactive=True)
with gr.Column():
s2l2a_input = gr.Image(label="S2 L2A (Optical - Bottom of Atmosphere)", interactive=True)
s2l2a_active = gr.Checkbox(value=False, label="Active", interactive=True)
with gr.Column():
s1rtc_input = gr.Image(label="S1 RTC (SAR)", interactive=True)
s1rtc_active = gr.Checkbox(value=False, label="Active", interactive=True)
with gr.Column():
dem_input = gr.Image(label="DEM (Elevation)", interactive=True)
dem_active = gr.Checkbox(value=False, label="Active", interactive=True)
generate_button = gr.Button("Generate", variant="primary")
gr.Markdown("## Outputs")
with gr.Row():
s2l1c_output = gr.Image(label="S2 L1C (Optical - Top of Atmosphere)", interactive=False)
s2l2a_output = gr.Image(label="S2 L2A (Optical - Bottom of Atmosphere)", interactive=False)
s1rtc_output = gr.Image(label="S1 RTC (SAR)", interactive=False)
dem_output = gr.Image(label="DEM (Elevation)", interactive=False)
with gr.Accordion("Advanced Options", open=False) as advanced_options:
num_inference_steps_slider = gr.Slider(minimum=10, maximum=1000, step=10, value=50, label="Inference Steps")
guidance_scale_slider = gr.Slider(minimum=1.0, maximum=15.0, step=0.5, value=7.5, label="Guidance Scale")
with gr.Row():
seed_number = gr.Number(value=6378, label="Seed")
seed_checkbox = gr.Checkbox(value=True, label="Random")
load_button.click(
fn=sample_shuffle,
outputs=[s2l1c_input, s2l1c_active, s2l2a_input,s2l2a_active, s1rtc_input, s1rtc_active, dem_input, dem_active]
)
generate_button.click(
#fn=generate_output,
inputs=[s2l1c_input, s2l2a_input, s1rtc_input, dem_input, num_inference_steps_slider, guidance_scale_slider, seed_number, seed_checkbox],
outputs=[s2l1c_output, s2l2a_output, s1rtc_output, dem_output],
)
demo.launch()
demo.launch(share=True)