AudioFusion / app.py
zarox's picture
UPDATE
1359b3c
raw
history blame
6 kB
import os
import secrets
import gradio as gr
from AudioFusion import Fusion
from telethon.sync import TelegramClient, events, Button
API_ID = os.environ.get("API_ID")
API_HASH = os.environ.get("API_HASH")
BOT_TOKEN = os.environ.get("BOT_TOKEN")
def process_audio(input_file,
effect_8d, pan_boundary, jump_percentage, time_l_to_r, volume_multiplier,
effect_slowed, speed_multiplier,
effect_reverb, room_size, damping, width, wet_level, dry_level
):
# Load the sound file
sound = Fusion.loadSound(input_file)
os.remove(os.path.abspath(input_file))
effects_str = []
# Apply effects based on user choices
if effect_8d:
sound = Fusion.effect8D(sound, pan_boundary, jump_percentage, time_l_to_r*1000, volume_multiplier)
effects_str.append("8d")
if effect_slowed:
sound = Fusion.effectSlowed(sound, speed_multiplier)
effects_str.append("Slowed")
if effect_reverb:
sound = Fusion.effectReverb(sound, room_size, damping, width, wet_level, dry_level, str(secrets.token_hex(5)))
effects_str.append("Reverb")
output_file = f"{input_file} {' + '.join(effects_str)} - {'By AudioFusion'}"
# Save the processed sound and return the output file
output = Fusion.saveSound(sound, output_file)
return output
before_text = """<div align="center">
<h1>AudioFusion</h1>
<i>Add a touch of uniqueness with various customizable effects like slowed and reverb.</i>
</div>
<hr>"""
after_text = """<hr>
PR in [github](https://github.com/MineisZarox/AudioFusion) repository beta branch are always welcome.
<h3>Todo</h3>
\# Acapella Extractor<br>
\# Karoke Maker<br>
\# Bass Booster<br>
\# Volume Booster<br>
<h3>Inspiration & Credits</h3>
- Special thanks to [Jiaaro](https://github.com/jiaaro) for pydub. AudioFusion is mainly wrapped around pydub
- My Soundscapes of Serenity - [Because](https://t.me/bcuzwhynot)
"""
with gr.Blocks(title="Audio Fusion") as iface:
gr.Markdown(before_text)
input_audio = gr.Audio(label="Upload your music file", type="filepath")
# SLowed Effect and its arguments
with gr.Tab("Slowed Effect"):
speed_check = gr.Checkbox(label="Apply slowed effect")
with gr.Column(visible=False) as se_col:
speed = gr.Slider(label="Speed Multiplier", minimum=0.1, maximum=4, step=0.05, value=0.90)
# Reverb Effect and its argument
with gr.Tab("Reverb Effect"):
reverb_check = gr.Checkbox(label="Apply reverb effect")
with gr.Column(visible=False) as re_col:
with gr.Row():
room = gr.Slider(label="Room Size", minimum=0, maximum=1, step=0.01, value=0.8)
damp = gr.Slider(label="Damping", minimum=0, maximum=1, step=0.05, value=1)
width = gr.Slider(label="Width", minimum=0, maximum=1, step=0.05, value=0.5)
with gr.Row():
wet = gr.Slider(label="Wet Level", minimum=0, maximum=1, step=0.05, value=0.3)
dry = gr.Slider(label="Dry Level", minimum=0, maximum=1, step=0.05, value=0.8)
# 8d Effect and its arguments
with gr.Tab("8d Effect"):
dimension_check = gr.Checkbox(label="Apply 8D effect")
with gr.Column(visible=False) as di_col:
with gr.Row():
pan = gr.Slider(label="Pan Boundary", minimum=0, maximum=100, value=90)
jump = gr.Slider(label="Jump Percentage", minimum=1, maximum=100, value=5)
with gr.Row():
time = gr.Slider(label="Time L to R (s)", minimum=1, maximum=30, value=10)
volx = gr.Slider(label="Volume Multiplier", minimum=1, maximum=20, value=6)
# =====================================================
def di_v(check):
if check:
return {di_col: gr.Column(visible=True)}
else:
return {di_col: gr.Column(visible=False)}
def se_v(check):
if check:
return {se_col: gr.Column(visible=True)}
else:
return {se_col: gr.Column(visible=False)}
def re_v(check):
if check:
return {re_col: gr.Column(visible=True)}
else:
return {re_col: gr.Column(visible=False)}
dimension_check.change(di_v, inputs=[dimension_check], outputs=[di_col])
speed_check.change(se_v, inputs=[speed_check], outputs=[se_col])
reverb_check.change(re_v, inputs=[reverb_check], outputs=[re_col])
# =====================================================
with gr.Row():
btnClear = gr.ClearButton(components=[dimension_check, speed_check, reverb_check])
btnRun = gr.Button("Run", size="sm", variant="primary")
inputs = [input_audio, dimension_check, pan, jump, time, volx, speed_check, speed, reverb_check, room, damp, width, wet, dry]
output = [gr.Audio(label="Download processed music", type="filepath")]
gr.Markdown(after_text)
btnClear.add(components=output)
btnRun.click(fn=process_audio, inputs=inputs, outputs=output, api_name="AudioFusion")
client = TelegramClient('session_name', API_ID, API_HASH)
@client.on(events.NewMessage(pattern='/start'))
async def start_handler(event):
await event.respond("Welcome to the bot!")
@client.on(events.NewMessage(pattern='/broadcast'))
async def broadcast_handler(event):
message_to_broadcast = "This is a broadcast message!"
await event.respond(message_to_broadcast)
async def initiation():
await client.send_message(-1001662130485, "**Hugging is Running.**", buttons=[(Button.url("Execal", "https://t.me/execal"),)],)
if __name__ == '__main__':
client.start(bot_token=BOT_TOKEN)
client.loop.run_until_complete(initiation())
print("Bot started succefully")
thread_one = threading.Thread(target=client.run_until_disconnected)
thread_two = threading.Thread(target=iface.launch, kwargs={"share": False})
# Start the threads
thread_one.start()
thread_two.start()
(share=False)