thay thế gr.Box() bằng gr.Row() và gr.Column()
Browse files
app.py
CHANGED
|
@@ -20,7 +20,6 @@ from transforms import (
|
|
| 20 |
import gradio as gr
|
| 21 |
from huggingface_hub import hf_hub_download
|
| 22 |
|
| 23 |
-
|
| 24 |
# Device on which to run the model
|
| 25 |
# Set to cuda to load on GPU
|
| 26 |
device = "cpu"
|
|
@@ -47,9 +46,7 @@ for k, v in imagenet_classnames.items():
|
|
| 47 |
def get_index(num_frames, num_segments=8):
|
| 48 |
seg_size = float(num_frames - 1) / num_segments
|
| 49 |
start = int(seg_size / 2)
|
| 50 |
-
offsets = np.array([
|
| 51 |
-
start + int(np.round(seg_size * idx)) for idx in range(num_segments)
|
| 52 |
-
])
|
| 53 |
return offsets
|
| 54 |
|
| 55 |
|
|
@@ -134,28 +131,26 @@ with demo:
|
|
| 134 |
)
|
| 135 |
|
| 136 |
with gr.Tab("Video"):
|
| 137 |
-
with gr.Box()
|
| 138 |
-
with gr.
|
| 139 |
-
with gr.
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
label_video = gr.Label(num_top_classes=5)
|
| 146 |
with gr.Row():
|
| 147 |
example_videos = gr.Dataset(components=[input_video], samples=[['./videos/hitting_baseball.mp4'], ['./videos/hoverboarding.mp4'], ['./videos/yoga.mp4']])
|
| 148 |
|
| 149 |
with gr.Tab("Image"):
|
| 150 |
-
with gr.Box()
|
| 151 |
-
with gr.
|
| 152 |
-
with gr.
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
label_image = gr.Label(num_top_classes=5)
|
| 159 |
with gr.Row():
|
| 160 |
example_images = gr.Dataset(components=[input_image], samples=[['./images/cat.png'], ['./images/dog.png'], ['./images/panda.png']])
|
| 161 |
|
|
@@ -170,4 +165,4 @@ with demo:
|
|
| 170 |
submit_image_button.click(fn=inference_image, inputs=input_image, outputs=label_image)
|
| 171 |
example_images.click(fn=set_example_image, inputs=example_images, outputs=example_images.components)
|
| 172 |
|
| 173 |
-
demo.launch(enable_queue=True)
|
|
|
|
| 20 |
import gradio as gr
|
| 21 |
from huggingface_hub import hf_hub_download
|
| 22 |
|
|
|
|
| 23 |
# Device on which to run the model
|
| 24 |
# Set to cuda to load on GPU
|
| 25 |
device = "cpu"
|
|
|
|
| 46 |
def get_index(num_frames, num_segments=8):
|
| 47 |
seg_size = float(num_frames - 1) / num_segments
|
| 48 |
start = int(seg_size / 2)
|
| 49 |
+
offsets = np.array([start + int(np.round(seg_size * idx)) for idx in range(num_segments)])
|
|
|
|
|
|
|
| 50 |
return offsets
|
| 51 |
|
| 52 |
|
|
|
|
| 131 |
)
|
| 132 |
|
| 133 |
with gr.Tab("Video"):
|
| 134 |
+
with gr.Row(): # Replace gr.Box() with gr.Row()
|
| 135 |
+
with gr.Column(): # Inside a column layout
|
| 136 |
+
with gr.Row(): # For individual row layout within a column
|
| 137 |
+
input_video = gr.Video(label='Input Video').style(height=360)
|
| 138 |
+
with gr.Row():
|
| 139 |
+
submit_video_button = gr.Button('Submit')
|
| 140 |
+
with gr.Column():
|
| 141 |
+
label_video = gr.Label(num_top_classes=5)
|
|
|
|
| 142 |
with gr.Row():
|
| 143 |
example_videos = gr.Dataset(components=[input_video], samples=[['./videos/hitting_baseball.mp4'], ['./videos/hoverboarding.mp4'], ['./videos/yoga.mp4']])
|
| 144 |
|
| 145 |
with gr.Tab("Image"):
|
| 146 |
+
with gr.Row(): # Replace gr.Box() with gr.Row()
|
| 147 |
+
with gr.Column(): # Inside a column layout
|
| 148 |
+
with gr.Row():
|
| 149 |
+
input_image = gr.Image(label='Input Image', type='pil').style(height=360)
|
| 150 |
+
with gr.Row():
|
| 151 |
+
submit_image_button = gr.Button('Submit')
|
| 152 |
+
with gr.Column():
|
| 153 |
+
label_image = gr.Label(num_top_classes=5)
|
|
|
|
| 154 |
with gr.Row():
|
| 155 |
example_images = gr.Dataset(components=[input_image], samples=[['./images/cat.png'], ['./images/dog.png'], ['./images/panda.png']])
|
| 156 |
|
|
|
|
| 165 |
submit_image_button.click(fn=inference_image, inputs=input_image, outputs=label_image)
|
| 166 |
example_images.click(fn=set_example_image, inputs=example_images, outputs=example_images.components)
|
| 167 |
|
| 168 |
+
demo.launch(enable_queue=True)
|