Spaces:
Running
Running
import gradio as gr | |
import os | |
from PIL import Image | |
# Paths to the images folder | |
RAW_PATH = os.path.join("images", "raw") | |
EMBEDDINGS_PATH = os.path.join("images", "embeddings") | |
# Specific values for percentage and complexity | |
percentage_values = [10, 30, 50, 70, 100] | |
complexity_values = [16, 32] | |
# Function to load and display images based on user selection | |
def display_images(percentage_idx, complexity_idx): | |
# Map the slider index to the actual value | |
percentage = percentage_values[percentage_idx] | |
complexity = complexity_values[complexity_idx] | |
# Generate the paths to the images | |
raw_image_path = os.path.join(RAW_PATH, f"percentage_{percentage}_complexity_{complexity}.png") | |
embeddings_image_path = os.path.join(EMBEDDINGS_PATH, f"percentage_{percentage}_complexity_{complexity}.png") | |
# Load images using PIL | |
raw_image = Image.open(raw_image_path) | |
embeddings_image = Image.open(embeddings_image_path) | |
# Return the loaded images | |
return raw_image, embeddings_image | |
# Define the beam prediction function | |
def beam_prediction(input_data): | |
# Add your beam prediction logic here | |
return {"Prediction": "Beam X", "Confidence": "95%"} | |
# Define the LoS/NLoS classification function | |
def los_nlos_classification(input_data): | |
# Add your LoS/NLoS classification logic here | |
return {"Classification": "LoS", "Confidence": "98%"} | |
# Define the Gradio interface | |
with gr.Blocks(css=""" | |
.vertical-slider input[type=range] { | |
writing-mode: bt-lr; /* IE */ | |
-webkit-appearance: slider-vertical; /* WebKit */ | |
width: 8px; | |
height: 200px; | |
} | |
.slider-container { | |
display: inline-block; | |
margin-right: 50px; | |
text-align: center; | |
} | |
""") as demo: | |
gr.Markdown("# Wireless Model Tasks") | |
# Tabs for Beam Prediction and LoS/NLoS Classification | |
with gr.Tab("Beam Prediction Task"): | |
gr.Markdown("### Beam Prediction Task") | |
beam_input = gr.Textbox(label="Enter Input Data for Beam Prediction", placeholder="Enter data here...") | |
beam_button = gr.Button("Predict Beam") | |
beam_output = gr.JSON(label="Beam Prediction Result") | |
beam_button.click(beam_prediction, inputs=beam_input, outputs=beam_output) | |
with gr.Tab("LoS/NLoS Classification Task"): | |
gr.Markdown("### LoS/NLoS Classification Task") | |
los_input = gr.Textbox(label="Enter Input Data for LoS/NLoS Classification", placeholder="Enter data here...") | |
los_button = gr.Button("Classify") | |
los_output = gr.JSON(label="LoS/NLoS Classification Result") | |
los_button.click(los_nlos_classification, inputs=los_input, outputs=los_output) | |
with gr.Tab("Raw vs. Embeddings Inference Results"): | |
gr.Markdown("Use the sliders to adjust the percentage of data for training and task complexity.") | |
# Layout for vertical side-by-side sliders (using CSS to rotate sliders) | |
with gr.Row(): | |
# Column for percentage slider | |
with gr.Column(elem_id="slider-container"): | |
gr.Markdown("Percentage of Data for Training") | |
percentage_slider = gr.Slider(minimum=0, maximum=4, step=1, value=0, interactive=True, elem_id="vertical-slider") | |
# Column for complexity slider | |
with gr.Column(elem_id="slider-container"): | |
gr.Markdown("Task Complexity") | |
complexity_slider = gr.Slider(minimum=0, maximum=1, step=1, value=0, interactive=True, elem_id="vertical-slider") | |
# Outputs (display the images side by side and set a smaller size for the images) | |
with gr.Row(): | |
raw_img = gr.Image(label="Raw Channels", type="pil", width=300, height=300, interactive=False) # Smaller image size | |
embeddings_img = gr.Image(label="Embeddings", type="pil", width=300, height=300, interactive=False) # Smaller image size | |
# Trigger image updates when sliders change | |
percentage_slider.change(fn=display_images, inputs=[percentage_slider, complexity_slider], outputs=[raw_img, embeddings_img]) | |
complexity_slider.change(fn=display_images, inputs=[percentage_slider, complexity_slider], outputs=[raw_img, embeddings_img]) | |
# Launch the app | |
if __name__ == "__main__": | |
demo.launch() | |