Upload app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import pandas as pd
|
| 3 |
+
import numpy as np
|
| 4 |
+
import gradio as gr
|
| 5 |
+
import random
|
| 6 |
+
|
| 7 |
+
def flip_text(x):
|
| 8 |
+
return x[::-1]
|
| 9 |
+
|
| 10 |
+
def flip_image(x):
|
| 11 |
+
return np.fliplr(x)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
df = pd.DataFrame({
|
| 15 |
+
'Year': np.random.randint(2000, 2024, 25),
|
| 16 |
+
'Reviews': np.random.randint(120, 320, 25),
|
| 17 |
+
'age': np.random.randint(18, 30, 25),
|
| 18 |
+
'ethnicity': [random.choice(["white", "black", "asian"]) for _ in range(25)]
|
| 19 |
+
})
|
| 20 |
+
|
| 21 |
+
theme = gr.themes.Soft(
|
| 22 |
+
primary_hue="yellow",
|
| 23 |
+
secondary_hue="amber",
|
| 24 |
+
spacing_size="sm",
|
| 25 |
+
radius_size="lg",
|
| 26 |
+
|
| 27 |
+
)
|
| 28 |
+
|
| 29 |
+
with gr.Blocks(theme=theme) as demo:
|
| 30 |
+
|
| 31 |
+
gr.ScatterPlot(df, x="Reviews", y="age", color="age")
|
| 32 |
+
gr.LinePlot(df, x="Year", y="Reviews")
|
| 33 |
+
gr.Slider(2000, 2024, value=2024, label="Count", info="Choose between 2000 and 2024"),
|
| 34 |
+
gr.Markdown("Flip text or image files using this demo.")
|
| 35 |
+
with gr.Tab("User Interface"):
|
| 36 |
+
text_input = gr.Textbox()
|
| 37 |
+
text_output = gr.Textbox()
|
| 38 |
+
text_button = gr.Button("Flip")
|
| 39 |
+
with gr.Tab("Testing Area"):
|
| 40 |
+
with gr.Row():
|
| 41 |
+
image_input = gr.Image()
|
| 42 |
+
image_output = gr.Image()
|
| 43 |
+
image_button = gr.Button("Flip")
|
| 44 |
+
with gr.Row("Flip Text"):
|
| 45 |
+
text_input = gr.Textbox()
|
| 46 |
+
text_output = gr.Textbox()
|
| 47 |
+
text_button = gr.Button("Flip")
|
| 48 |
+
with gr.Column(visible=False) as output_col:
|
| 49 |
+
text_input = gr.Textbox()
|
| 50 |
+
text_output = gr.Textbox()
|
| 51 |
+
text_button = gr.Button("Flip")
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
with gr.Accordion("Open for More!", open=False):
|
| 55 |
+
gr.Markdown("Look at me...")
|
| 56 |
+
temp_slider = gr.Slider(
|
| 57 |
+
0, 1,
|
| 58 |
+
value=0.1,
|
| 59 |
+
step=0.1,
|
| 60 |
+
interactive=True,
|
| 61 |
+
label="Slide me",
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
text_button.click(flip_text, inputs=text_input, outputs=text_output)
|
| 65 |
+
image_button.click(flip_image, inputs=image_input, outputs=image_output)
|
| 66 |
+
|
| 67 |
+
track_count = gr.State(1)
|
| 68 |
+
add_track_btn = gr.Button("Add Track")
|
| 69 |
+
|
| 70 |
+
add_track_btn.click(lambda count: count + 1, track_count, track_count)
|
| 71 |
+
|
| 72 |
+
@gr.render(inputs=track_count)
|
| 73 |
+
def render_tracks(count):
|
| 74 |
+
audios = []
|
| 75 |
+
volumes = []
|
| 76 |
+
with gr.Row():
|
| 77 |
+
for i in range(count):
|
| 78 |
+
with gr.Column(variant="panel", min_width=200):
|
| 79 |
+
gr.Textbox(placeholder="Data Name", key=f"name-{i}", show_label=False)
|
| 80 |
+
track_audio = gr.Audio(label=f"Data {i}", key=f"track-{i}")
|
| 81 |
+
track_volume = gr.Slider(0, 100, value=100, label="Volume", key=f"volume-{i}")
|
| 82 |
+
audios.append(track_audio)
|
| 83 |
+
volumes.append(track_volume)
|
| 84 |
+
|
| 85 |
+
def merge(data):
|
| 86 |
+
sr, output = None, None
|
| 87 |
+
for audio, volume in zip(audios, volumes):
|
| 88 |
+
sr, audio_val = data[audio]
|
| 89 |
+
volume_val = data[volume]
|
| 90 |
+
final_track = audio_val * (volume_val / 100)
|
| 91 |
+
if output is None:
|
| 92 |
+
output = final_track
|
| 93 |
+
else:
|
| 94 |
+
min_shape = tuple(min(s1, s2) for s1, s2 in zip(output.shape, final_track.shape))
|
| 95 |
+
trimmed_output = output[:min_shape[0], ...][:, :min_shape[1], ...] if output.ndim > 1 else output[:min_shape[0]]
|
| 96 |
+
trimmed_final = final_track[:min_shape[0], ...][:, :min_shape[1], ...] if final_track.ndim > 1 else final_track[:min_shape[0]]
|
| 97 |
+
output += trimmed_output + trimmed_final
|
| 98 |
+
return (sr, output)
|
| 99 |
+
|
| 100 |
+
merge_btn.click(merge, set(audios + volumes), output_audio)
|
| 101 |
+
|
| 102 |
+
merge_btn = gr.Button("Merge Tracks")
|
| 103 |
+
output_audio = gr.Audio(label="Output", interactive=False)
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
demo.launch()
|