kevinwang676 commited on
Commit
7056d79
·
verified ·
1 Parent(s): ebfdb6b

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +230 -0
app.py ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import gradio as gr
4
+ from openvoice import se_extractor
5
+ from openvoice.api import ToneColorConverter
6
+ from elevenlabs import voices, generate, set_api_key, UnauthenticatedRateLimitError
7
+
8
+ ckpt_converter = 'checkpoints_v2/converter'
9
+ device = "cuda:0" if torch.cuda.is_available() else "cpu"
10
+
11
+ tone_color_converter = ToneColorConverter(f'{ckpt_converter}/config.json', device=device)
12
+ tone_color_converter.load_ckpt(f'{ckpt_converter}/checkpoint.pth')
13
+
14
+
15
+ base_speaker = f"11labs.mp3"
16
+ source_se, audio_name = se_extractor.get_se(base_speaker, tone_color_converter, vad=True)
17
+
18
+
19
+
20
+ def generate_voice(text, voice_name):
21
+ try:
22
+ audio = generate(
23
+ text[:1000], # Limit to 1000 characters
24
+ voice=voice_name,
25
+ model="eleven_multilingual_v2"
26
+ )
27
+ with open("output" + ".mp3", mode='wb') as f:
28
+ f.write(audio)
29
+ return "output.mp3"
30
+
31
+ except UnauthenticatedRateLimitError as e:
32
+ raise Exception("Thanks for trying out ElevenLabs TTS! You've reached the free tier limit. Please provide an API key to continue.")
33
+ except Exception as e:
34
+ raise Exception(e)
35
+
36
+
37
+ def convert(api_key, text, tgt, voice, save_path):
38
+ os.environ["ELEVEN_API_KEY"] = api_key
39
+ src_path = generate_voice(text, voice)
40
+ reference_speaker = tgt
41
+ target_se, audio_name = se_extractor.get_se(reference_speaker, tone_color_converter, vad=True)
42
+
43
+ encode_message = "@MyShell"
44
+ tone_color_converter.convert(
45
+ audio_src_path=src_path,
46
+ src_se=source_se,
47
+ tgt_se=target_se,
48
+ output_path=f"output/{save_path}.wav",
49
+ message=encode_message)
50
+
51
+ return f"output/{save_path}.wav"
52
+
53
+ class subtitle:
54
+ def __init__(self,index:int, start_time, end_time, text:str):
55
+ self.index = int(index)
56
+ self.start_time = start_time
57
+ self.end_time = end_time
58
+ self.text = text.strip()
59
+ def normalize(self,ntype:str,fps=30):
60
+ if ntype=="prcsv":
61
+ h,m,s,fs=(self.start_time.replace(';',':')).split(":")#seconds
62
+ self.start_time=int(h)*3600+int(m)*60+int(s)+round(int(fs)/fps,2)
63
+ h,m,s,fs=(self.end_time.replace(';',':')).split(":")
64
+ self.end_time=int(h)*3600+int(m)*60+int(s)+round(int(fs)/fps,2)
65
+ elif ntype=="srt":
66
+ h,m,s=self.start_time.split(":")
67
+ s=s.replace(",",".")
68
+ self.start_time=int(h)*3600+int(m)*60+round(float(s),2)
69
+ h,m,s=self.end_time.split(":")
70
+ s=s.replace(",",".")
71
+ self.end_time=int(h)*3600+int(m)*60+round(float(s),2)
72
+ else:
73
+ raise ValueError
74
+ def add_offset(self,offset=0):
75
+ self.start_time+=offset
76
+ if self.start_time<0:
77
+ self.start_time=0
78
+ self.end_time+=offset
79
+ if self.end_time<0:
80
+ self.end_time=0
81
+ def __str__(self) -> str:
82
+ return f'id:{self.index},start:{self.start_time},end:{self.end_time},text:{self.text}'
83
+
84
+ def read_srt(uploaded_file):
85
+ offset=0
86
+ with open(uploaded_file.name,"r",encoding="utf-8") as f:
87
+ file=f.readlines()
88
+ subtitle_list=[]
89
+ indexlist=[]
90
+ filelength=len(file)
91
+ for i in range(0,filelength):
92
+ if " --> " in file[i]:
93
+ is_st=True
94
+ for char in file[i-1].strip().replace("\ufeff",""):
95
+ if char not in ['0','1','2','3','4','5','6','7','8','9']:
96
+ is_st=False
97
+ break
98
+ if is_st:
99
+ indexlist.append(i) #get line id
100
+ listlength=len(indexlist)
101
+ for i in range(0,listlength-1):
102
+ st,et=file[indexlist[i]].split(" --> ")
103
+ id=int(file[indexlist[i]-1].strip().replace("\ufeff",""))
104
+ text=""
105
+ for x in range(indexlist[i]+1,indexlist[i+1]-2):
106
+ text+=file[x]
107
+ st=subtitle(id,st,et,text)
108
+ st.normalize(ntype="srt")
109
+ st.add_offset(offset=offset)
110
+ subtitle_list.append(st)
111
+ st,et=file[indexlist[-1]].split(" --> ")
112
+ id=file[indexlist[-1]-1]
113
+ text=""
114
+ for x in range(indexlist[-1]+1,filelength):
115
+ text+=file[x]
116
+ st=subtitle(id,st,et,text)
117
+ st.normalize(ntype="srt")
118
+ st.add_offset(offset=offset)
119
+ subtitle_list.append(st)
120
+ return subtitle_list
121
+
122
+ from pydub import AudioSegment
123
+
124
+ def trim_audio(intervals, input_file_path, output_file_path):
125
+ # load the audio file
126
+ audio = AudioSegment.from_file(input_file_path)
127
+
128
+ # iterate over the list of time intervals
129
+ for i, (start_time, end_time) in enumerate(intervals):
130
+ # extract the segment of the audio
131
+ segment = audio[start_time*1000:end_time*1000]
132
+
133
+ # construct the output file path
134
+ output_file_path_i = f"{output_file_path}_{i}.wav"
135
+
136
+ # export the segment to a file
137
+ segment.export(output_file_path_i, format='wav')
138
+
139
+ import re
140
+
141
+ def sort_key(file_name):
142
+ """Extract the last number in the file name for sorting."""
143
+ numbers = re.findall(r'\d+', file_name)
144
+ if numbers:
145
+ return int(numbers[-1])
146
+ return -1 # In case there's no number, this ensures it goes to the start.
147
+
148
+
149
+ def merge_audios(folder_path):
150
+ output_file = "AI配音版.wav"
151
+ # Get all WAV files in the folder
152
+ files = [f for f in os.listdir(folder_path) if f.endswith('.wav')]
153
+ # Sort files based on the last digit in their names
154
+ sorted_files = sorted(files, key=sort_key)
155
+
156
+ # Initialize an empty audio segment
157
+ merged_audio = AudioSegment.empty()
158
+
159
+ # Loop through each file, in order, and concatenate them
160
+ for file in sorted_files:
161
+ audio = AudioSegment.from_wav(os.path.join(folder_path, file))
162
+ merged_audio += audio
163
+ print(f"Merged: {file}")
164
+
165
+ # Export the merged audio to a new file
166
+ merged_audio.export(output_file, format="wav")
167
+ return "AI配音版.wav"
168
+
169
+ import shutil
170
+
171
+ def convert_from_srt(apikey, filename, audio_full, voice, multilingual):
172
+ subtitle_list = read_srt(filename)
173
+
174
+ #audio_data, sr = librosa.load(audio_full, sr=44100)
175
+
176
+ #write("audio_full.wav", sr, audio_data.astype(np.int16))
177
+
178
+ if os.path.isdir("output"):
179
+ shutil.rmtree("output")
180
+ if multilingual==False:
181
+ for i in subtitle_list:
182
+ os.makedirs("output", exist_ok=True)
183
+ trim_audio([[i.start_time, i.end_time]], audio_full, f"sliced_audio_{i.index}")
184
+ print(f"正在合成第{i.index}条语音")
185
+ print(f"语音内容:{i.text}")
186
+ convert(apikey, i.text, f"sliced_audio_{i.index}_0.wav", voice, i.text + " " + str(i.index))
187
+ else:
188
+ for i in subtitle_list:
189
+ os.makedirs("output", exist_ok=True)
190
+ trim_audio([[i.start_time, i.end_time]], audio_full, f"sliced_audio_{i.index}")
191
+ print(f"正在合成第{i.index}条语音")
192
+ print(f"语音内容:{i.text.splitlines()[1]}")
193
+ convert(apikey, i.text.splitlines()[1], f"sliced_audio_{i.index}_0.wav", voice, i.text.splitlines()[1] + " " + str(i.index))
194
+
195
+ merge_audios("output")
196
+
197
+ return "AI配音版.wav"
198
+
199
+ restart_markdown = ("""
200
+ ### 若此页面无法正常显示,请点击[此链接](https://openxlab.org.cn/apps/detail/Kevin676/OpenAI-TTS)唤醒该程序!谢谢🍻
201
+ """)
202
+
203
+ all_voices = voices()
204
+
205
+ with gr.Blocks() as app:
206
+ gr.Markdown("# <center>🌊💕🎶 11Labs + OpenVoice V2 - SRT文件一键AI配音</center>")
207
+ gr.Markdown("### <center>🌟 只需上传SRT文件和原版配音文件即可,每次一集视频AI自动配音!Developed by Kevin Wang </center>")
208
+ with gr.Row():
209
+ with gr.Column():
210
+ inp0 = gr.Textbox(type='password', label='请输入您的11Labs API Key')
211
+ inp1 = gr.File(file_count="single", label="请上传一集视频对应的SRT文件")
212
+ inp2 = gr.Audio(label="请上传一集视频的配音文件", type="filepath")
213
+
214
+ inp3 = gr.Dropdown(choices=[ voice.name for voice in all_voices ], label='请选择一个说话人提供基础音色', info="试听音色链接:https://huggingface.co/spaces/elevenlabs/tts", value='Rachel')
215
+ #inp4 = gr.Dropdown(label="请选择用于分离伴奏的模型", info="UVR-HP5去除背景音乐效果更好,但会对人声造成一定的损伤", choices=["UVR-HP2", "UVR-HP5"], value="UVR-HP5")
216
+ inp4 = gr.Checkbox(label="SRT文件是否为双语字幕", info="若为双语字幕,请打勾选择(SRT文件中需要先出现中文字幕,后英文字幕;中英字幕各占一行)")
217
+ btn = gr.Button("一键开启AI配音吧💕", variant="primary")
218
+ with gr.Column():
219
+ out1 = gr.Audio(label="为您生成的AI完整配音", type="filepath")
220
+
221
+ btn.click(convert_from_srt, [inp0, inp1, inp2, inp3, inp4], [out1])
222
+ gr.Markdown("### <center>注意❗:请勿生成会对任何个人或组织造成侵害的内容,请尊重他人的著作权和知识产权。用户对此程序的任何使用行为与程序开发者无关。</center>")
223
+ gr.HTML('''
224
+ <div class="footer">
225
+ <p>🌊🏞️🎶 - 江水东流急,滔滔无尽声。 明·顾璘
226
+ </p>
227
+ </div>
228
+ ''')
229
+
230
+ app.launch(show_error=True)