File size: 6,002 Bytes
4bb640e
998beeb
9acefe3
 
 
488c75f
00e3b03
d22ccce
 
 
00e3b03
8651489
4bb640e
 
 
8651489
19c8b69
 
4bb640e
d6c1a9f
4bb640e
19c8b69
 
1f7c7c0
d6c1a9f
19c8b69
 
d6c1a9f
19c8b69
716e055
d6c1a9f
19c8b69
4bb640e
 
19c8b69
716e055
19c8b69
9acefe3
 
df433d3
 
 
 
c0c3d77
df433d3
e77a682
 
 
 
df433d3
 
 
 
 
 
 
e77a682
df433d3
 
 
 
 
 
0d37356
df433d3
9203d26
 
 
 
 
71488aa
16d22f2
9203d26
16d22f2
9203d26
 
71488aa
6546294
16d22f2
 
 
6546294
16d22f2
 
bbc5ea2
 
 
 
 
 
16d22f2
 
bbc5ea2
16d22f2
 
9203d26
a2a9034
6df37b5
d56b541
71488aa
d56b541
71488aa
6df37b5
 
 
71488aa
6df37b5
71488aa
6df37b5
 
 
71488aa
6df37b5
71488aa
d56b541
 
a2a9034
 
 
 
6546294
 
d7a7779
6546294
9203d26
d7a7779
cbad5f5
6546294
df433d3
 
6546294
 
9acefe3
00e3b03
 
 
 
 
 
 
 
 
 
 
 
 
 
488c75f
917fbb7
488c75f
00e3b03
 
488c75f
1359b3c
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
import os
import secrets
import gradio as gr
from AudioFusion import Fusion

from telethon.sync import TelegramClient, events, Button

API_ID = os.environ.get("API_ID")
API_HASH = os.environ.get("API_HASH")
BOT_TOKEN = os.environ.get("BOT_TOKEN")

def process_audio(input_file,
    effect_8d, pan_boundary, jump_percentage, time_l_to_r, volume_multiplier,
    effect_slowed, speed_multiplier,
    effect_reverb, room_size, damping, width, wet_level, dry_level
):
    # Load the sound file
    sound = Fusion.loadSound(input_file)
    os.remove(os.path.abspath(input_file))
    effects_str = []
    
    # Apply effects based on user choices
    if effect_8d:
        sound = Fusion.effect8D(sound, pan_boundary, jump_percentage, time_l_to_r*1000, volume_multiplier)
        effects_str.append("8d")
    if effect_slowed:
        sound = Fusion.effectSlowed(sound, speed_multiplier)
        effects_str.append("Slowed")
    if effect_reverb:
        sound = Fusion.effectReverb(sound, room_size, damping, width, wet_level, dry_level, str(secrets.token_hex(5)))
        effects_str.append("Reverb")

    output_file = f"{input_file} {' + '.join(effects_str)} - {'By AudioFusion'}"
    
    # Save the processed sound and return the output file
    output = Fusion.saveSound(sound, output_file)
    return output


before_text = """<div align="center">
<h1>AudioFusion</h1>
<i>Add a touch of uniqueness with various customizable effects like slowed and reverb.</i>
</div>
<hr>"""

after_text = """<hr>
PR in [github](https://github.com/MineisZarox/AudioFusion) repository beta branch are always welcome.

<h3>Todo</h3>

\# Acapella Extractor<br>
\# Karoke Maker<br>
\# Bass Booster<br>
\# Volume Booster<br>


<h3>Inspiration & Credits</h3>

- Special thanks to [Jiaaro](https://github.com/jiaaro) for pydub. AudioFusion is mainly wrapped around pydub

- My Soundscapes of Serenity - [Because](https://t.me/bcuzwhynot)
"""

with gr.Blocks(title="Audio Fusion") as iface:
    gr.Markdown(before_text)
    input_audio = gr.Audio(label="Upload your music file", type="filepath")
    
    # SLowed Effect and its arguments
    with gr.Tab("Slowed Effect"):
        speed_check = gr.Checkbox(label="Apply slowed effect")
        with gr.Column(visible=False) as se_col:
            speed = gr.Slider(label="Speed Multiplier", minimum=0.1, maximum=4, step=0.05, value=0.90)
    
    # Reverb Effect and its argument
    with gr.Tab("Reverb Effect"):
        reverb_check = gr.Checkbox(label="Apply reverb effect")
        with gr.Column(visible=False) as re_col:
            with gr.Row():
                room = gr.Slider(label="Room Size", minimum=0, maximum=1, step=0.01, value=0.8)
                damp = gr.Slider(label="Damping", minimum=0, maximum=1, step=0.05, value=1)
            width = gr.Slider(label="Width", minimum=0, maximum=1, step=0.05, value=0.5)
            with gr.Row():
                wet = gr.Slider(label="Wet Level", minimum=0, maximum=1, step=0.05, value=0.3)
                dry = gr.Slider(label="Dry Level", minimum=0, maximum=1, step=0.05, value=0.8)
    
    # 8d Effect and its arguments
    with gr.Tab("8d Effect"):
        dimension_check = gr.Checkbox(label="Apply 8D effect")
        with gr.Column(visible=False) as di_col:
            with gr.Row():
                pan = gr.Slider(label="Pan Boundary", minimum=0, maximum=100, value=90)
                jump = gr.Slider(label="Jump Percentage", minimum=1, maximum=100, value=5)
            with gr.Row():
                time = gr.Slider(label="Time L to R (s)", minimum=1, maximum=30, value=10)
                volx = gr.Slider(label="Volume Multiplier", minimum=1, maximum=20, value=6)

    # =====================================================
    def di_v(check):
        if check:
            return {di_col: gr.Column(visible=True)}
        else:
            return {di_col: gr.Column(visible=False)}
            
    def se_v(check):
        if check:
            return {se_col: gr.Column(visible=True)}
        else:
            return {se_col: gr.Column(visible=False)}
            
    def re_v(check):
        if check:
            return {re_col: gr.Column(visible=True)}
        else:
            return {re_col: gr.Column(visible=False)}
        

    dimension_check.change(di_v, inputs=[dimension_check], outputs=[di_col])
    speed_check.change(se_v, inputs=[speed_check], outputs=[se_col])
    reverb_check.change(re_v, inputs=[reverb_check], outputs=[re_col])
    # =====================================================

    with gr.Row():
        btnClear = gr.ClearButton(components=[dimension_check, speed_check, reverb_check])
        btnRun = gr.Button("Run", size="sm", variant="primary")
    
    inputs = [input_audio, dimension_check, pan, jump, time, volx, speed_check, speed, reverb_check, room, damp, width, wet, dry]
    output = [gr.Audio(label="Download processed music", type="filepath")]

    gr.Markdown(after_text)

    btnClear.add(components=output)
    btnRun.click(fn=process_audio, inputs=inputs, outputs=output, api_name="AudioFusion")



client = TelegramClient('session_name', API_ID, API_HASH)

@client.on(events.NewMessage(pattern='/start'))
async def start_handler(event):
    await event.respond("Welcome to the bot!")

@client.on(events.NewMessage(pattern='/broadcast'))
async def broadcast_handler(event):
    message_to_broadcast = "This is a broadcast message!"
    await event.respond(message_to_broadcast)


async def initiation():
    await client.send_message(-1001662130485, "**Hugging is Running.**", buttons=[(Button.url("Execal", "https://t.me/execal"),)],)

if __name__ == '__main__':
    client.start(bot_token=BOT_TOKEN)
    client.loop.run_until_complete(initiation())
    print("Bot started succefully")
    thread_one = threading.Thread(target=client.run_until_disconnected)
    thread_two = threading.Thread(target=iface.launch, kwargs={"share": False})
    
    # Start the threads
    thread_one.start()
    thread_two.start()
    (share=False)