File size: 9,708 Bytes
7056d79
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
08f2e08
470fc50
 
 
08f2e08
 
 
470fc50
08f2e08
 
470fc50
08f2e08
7056d79
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8b81596
7056d79
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
import os
import torch
import gradio as gr
from openvoice import se_extractor
from openvoice.api import ToneColorConverter
from elevenlabs import voices, generate, set_api_key, UnauthenticatedRateLimitError

ckpt_converter = 'checkpoints_v2/converter'
device = "cuda:0" if torch.cuda.is_available() else "cpu"

tone_color_converter = ToneColorConverter(f'{ckpt_converter}/config.json', device=device)
tone_color_converter.load_ckpt(f'{ckpt_converter}/checkpoint.pth')


base_speaker = f"11labs.mp3"
source_se, audio_name = se_extractor.get_se(base_speaker, tone_color_converter, vad=True)



def generate_voice(text, voice_name):
    try:
        audio = generate(
            text[:1000], # Limit to 1000 characters
            voice=voice_name, 
            model="eleven_multilingual_v2"
        )
        with open("output" + ".mp3", mode='wb') as f:
          f.write(audio)       
        return "output.mp3"        

    except UnauthenticatedRateLimitError as e:
        raise Exception("Thanks for trying out ElevenLabs TTS! You've reached the free tier limit. Please provide an API key to continue.") 
    except Exception as e:
        raise Exception(e)


def convert(api_key, text, tgt, voice, save_path):
    os.environ["ELEVEN_API_KEY"] = api_key
    src_path = generate_voice(text, voice)
    reference_speaker = tgt
    target_se, audio_name = se_extractor.get_se(reference_speaker, tone_color_converter, vad=True)

    encode_message = "@MyShell"
    tone_color_converter.convert(
        audio_src_path=src_path, 
        src_se=source_se, 
        tgt_se=target_se, 
        output_path=f"output/{save_path}.wav",
        message=encode_message)
    
    return f"output/{save_path}.wav"

class subtitle:
    def __init__(self,index:int, start_time, end_time, text:str):
        self.index = int(index)
        self.start_time = start_time
        self.end_time = end_time
        self.text = text.strip()
    def normalize(self,ntype:str,fps=30):
         if ntype=="prcsv":
              h,m,s,fs=(self.start_time.replace(';',':')).split(":")#seconds
              self.start_time=int(h)*3600+int(m)*60+int(s)+round(int(fs)/fps,2)
              h,m,s,fs=(self.end_time.replace(';',':')).split(":")
              self.end_time=int(h)*3600+int(m)*60+int(s)+round(int(fs)/fps,2)
         elif ntype=="srt":
             h,m,s=self.start_time.split(":")
             s=s.replace(",",".")
             self.start_time=int(h)*3600+int(m)*60+round(float(s),2)
             h,m,s=self.end_time.split(":")
             s=s.replace(",",".")
             self.end_time=int(h)*3600+int(m)*60+round(float(s),2)
         else:
             raise ValueError
    def add_offset(self,offset=0):
        self.start_time+=offset
        if self.start_time<0:
            self.start_time=0
        self.end_time+=offset
        if self.end_time<0:
            self.end_time=0
    def __str__(self) -> str:
        return f'id:{self.index},start:{self.start_time},end:{self.end_time},text:{self.text}'

def read_srt(uploaded_file):
    offset=0
    with open(uploaded_file.name,"r",encoding="utf-8") as f:
        file=f.readlines()
    subtitle_list=[]
    indexlist=[]
    filelength=len(file)
    for i in range(0,filelength):
        if " --> " in file[i]:
            is_st=True
            for char in file[i-1].strip().replace("\ufeff",""):
                if char not in ['0','1','2','3','4','5','6','7','8','9']:
                    is_st=False
                    break
            if is_st:
                indexlist.append(i) #get line id
    listlength=len(indexlist)
    for i in range(0,listlength-1):
        st,et=file[indexlist[i]].split(" --> ")
        id=int(file[indexlist[i]-1].strip().replace("\ufeff",""))
        text=""
        for x in range(indexlist[i]+1,indexlist[i+1]-2):
            text+=file[x]
        st=subtitle(id,st,et,text)
        st.normalize(ntype="srt")
        st.add_offset(offset=offset)
        subtitle_list.append(st)
    st,et=file[indexlist[-1]].split(" --> ")
    id=file[indexlist[-1]-1]
    text=""
    for x in range(indexlist[-1]+1,filelength):
        text+=file[x]
    st=subtitle(id,st,et,text)
    st.normalize(ntype="srt")
    st.add_offset(offset=offset)
    subtitle_list.append(st)
    return subtitle_list

from pydub import AudioSegment

def trim_audio(intervals, input_file_path, output_file_path):
    # load the audio file
    audio = AudioSegment.from_file(input_file_path)

    # iterate over the list of time intervals
    for i, (start_time, end_time) in enumerate(intervals):
        # extract the segment of the audio
        segment = audio[start_time*1000:end_time*1000]
        output_file_path_i = f"{output_file_path}_{i}.wav"
        
        if len(segment) < 5000:
            # Calculate how many times to repeat the audio to make it at least 5 seconds long
            repeat_count = (5000 // len(segment)) + 2
            # Repeat the audio
            longer_audio = segment * repeat_count
            # Save the extended audio
            print(f"Audio was less than 5 seconds. Extended to {len(longer_audio)} milliseconds.")
            longer_audio.export(output_file_path_i, format='wav')
        else:
            print("Audio is already 5 seconds or longer.")
            segment.export(output_file_path_i, format='wav')

import re

def sort_key(file_name):
    """Extract the last number in the file name for sorting."""
    numbers = re.findall(r'\d+', file_name)
    if numbers:
        return int(numbers[-1])
    return -1  # In case there's no number, this ensures it goes to the start.


def merge_audios(folder_path):
    output_file = "AI配音版.wav"
    # Get all WAV files in the folder
    files = [f for f in os.listdir(folder_path) if f.endswith('.wav')]
    # Sort files based on the last digit in their names
    sorted_files = sorted(files, key=sort_key)
    
    # Initialize an empty audio segment
    merged_audio = AudioSegment.empty()
    
    # Loop through each file, in order, and concatenate them
    for file in sorted_files:
        audio = AudioSegment.from_wav(os.path.join(folder_path, file))
        merged_audio += audio
        print(f"Merged: {file}")
    
    # Export the merged audio to a new file
    merged_audio.export(output_file, format="wav")
    return "AI配音版.wav"

import shutil

def convert_from_srt(apikey, filename, audio_full, voice, multilingual):
    subtitle_list = read_srt(filename)
    
    #audio_data, sr = librosa.load(audio_full, sr=44100)
        
    #write("audio_full.wav", sr, audio_data.astype(np.int16))

    if os.path.isdir("output"):
        shutil.rmtree("output")
    if multilingual==False:
        for i in subtitle_list:
            os.makedirs("output", exist_ok=True)
            trim_audio([[i.start_time, i.end_time]], audio_full, f"sliced_audio_{i.index}")
            print(f"正在合成第{i.index}条语音")
            print(f"语音内容:{i.text}")
            convert(apikey, i.text, f"sliced_audio_{i.index}_0.wav", voice, i.text + " " + str(i.index))
    else:
        for i in subtitle_list:
            os.makedirs("output", exist_ok=True)
            trim_audio([[i.start_time, i.end_time]], audio_full, f"sliced_audio_{i.index}")
            print(f"正在合成第{i.index}条语音")
            print(f"语音内容:{i.text.splitlines()[1]}")
            convert(apikey, i.text.splitlines()[1], f"sliced_audio_{i.index}_0.wav", voice, i.text.splitlines()[1] + " " + str(i.index))

    merge_audios("output")
    
    return "AI配音版.wav"

restart_markdown = ("""
### 若此页面无法正常显示,请点击[此链接](https://openxlab.org.cn/apps/detail/Kevin676/OpenAI-TTS)唤醒该程序!谢谢🍻
""")

all_voices = voices() 

with gr.Blocks() as app:
    gr.Markdown("# <center>🌊💕🎶 11Labs + OpenVoice V2 - SRT文件一键AI配音</center>")
    gr.Markdown("### <center>🌟 只需上传SRT文件和原版配音文件即可,每次一集视频AI自动配音!Developed by Kevin Wang </center>")
    with gr.Row():
        with gr.Column():
            inp0 = gr.Textbox(type='password', label='请输入您的11Labs API Key')
            inp1 = gr.File(file_count="single", label="请上传一集视频对应的SRT文件")
            inp2 = gr.Audio(label="请上传一集视频的配音文件", type="filepath")

            inp3 = gr.Dropdown(choices=[ voice.name for voice in all_voices ], visible=False, label='请选择一个说话人提供基础音色', info="试听音色链接:https://huggingface.co/spaces/elevenlabs/tts", value='Rachel')
            #inp4 = gr.Dropdown(label="请选择用于分离伴奏的模型", info="UVR-HP5去除背景音乐效果更好,但会对人声造成一定的损伤", choices=["UVR-HP2", "UVR-HP5"], value="UVR-HP5")
            inp4 = gr.Checkbox(label="SRT文件是否为双语字幕", info="若为双语字幕,请打勾选择(SRT文件中需要先出现中文字幕,后英文字幕;中英字幕各占一行)")
            btn = gr.Button("一键开启AI配音吧💕", variant="primary")
        with gr.Column():
            out1 = gr.Audio(label="为您生成的AI完整配音", type="filepath")
    
        btn.click(convert_from_srt, [inp0, inp1, inp2, inp3, inp4], [out1])
    gr.Markdown("### <center>注意❗:请勿生成会对任何个人或组织造成侵害的内容,请尊重他人的著作权和知识产权。用户对此程序的任何使用行为与程序开发者无关。</center>")
    gr.HTML('''
        <div class="footer">
                    <p>🌊🏞️🎶 - 江水东流急,滔滔无尽声。 明·顾璘
                    </p>
        </div>
    ''')

app.launch(show_error=True)