kevinwang676 commited on
Commit
f4eb0ae
·
verified ·
1 Parent(s): d3b67ac

Create app_srt.py

Browse files
Files changed (1) hide show
  1. app_srt.py +566 -0
app_srt.py ADDED
@@ -0,0 +1,566 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import io, os, stat
3
+ import subprocess
4
+ import random
5
+ from zipfile import ZipFile
6
+ import uuid
7
+ import time
8
+ import torch
9
+ import torchaudio
10
+ import gradio as gr
11
+
12
+ #download for mecab
13
+ os.system('python -m unidic download')
14
+
15
+ # By using XTTS you agree to CPML license https://coqui.ai/cpml
16
+ os.environ["COQUI_TOS_AGREED"] = "1"
17
+
18
+ # langid is used to detect language for longer text
19
+ # Most users expect text to be their own language, there is checkbox to disable it
20
+ import langid
21
+ import base64
22
+ import csv
23
+ from io import StringIO
24
+ import datetime
25
+ import re
26
+
27
+ import gradio as gr
28
+ from scipy.io.wavfile import write
29
+ from pydub import AudioSegment
30
+
31
+ from TTS.api import TTS
32
+ from TTS.tts.configs.xtts_config import XttsConfig
33
+ from TTS.tts.models.xtts import Xtts
34
+ from TTS.utils.generic_utils import get_user_data_dir
35
+
36
+ HF_TOKEN = os.environ.get("HF_TOKEN")
37
+
38
+ from huggingface_hub import HfApi
39
+
40
+ # will use api to restart space on a unrecoverable error
41
+ api = HfApi(token=HF_TOKEN)
42
+ repo_id = "coqui/xtts"
43
+
44
+ # Use never ffmpeg binary for Ubuntu20 to use denoising for microphone input
45
+ print("Export newer ffmpeg binary for denoise filter")
46
+ ZipFile("ffmpeg.zip").extractall()
47
+ print("Make ffmpeg binary executable")
48
+ st = os.stat("ffmpeg")
49
+ os.chmod("ffmpeg", st.st_mode | stat.S_IEXEC)
50
+
51
+ # This will trigger downloading model
52
+ print("Downloading if not downloaded Coqui XTTS V2")
53
+ from TTS.utils.manage import ModelManager
54
+
55
+ model_name = "tts_models/multilingual/multi-dataset/xtts_v2"
56
+ ModelManager().download_model(model_name)
57
+ model_path = os.path.join(get_user_data_dir("tts"), model_name.replace("/", "--"))
58
+ print("XTTS downloaded")
59
+
60
+ config = XttsConfig()
61
+ config.load_json(os.path.join(model_path, "config.json"))
62
+
63
+ model = Xtts.init_from_config(config)
64
+ model.load_checkpoint(
65
+ config,
66
+ checkpoint_path=os.path.join(model_path, "model.pth"),
67
+ vocab_path=os.path.join(model_path, "vocab.json"),
68
+ eval=True,
69
+ use_deepspeed=True,
70
+ )
71
+ model.cuda()
72
+
73
+ # This is for debugging purposes only
74
+ DEVICE_ASSERT_DETECTED = 0
75
+ DEVICE_ASSERT_PROMPT = None
76
+ DEVICE_ASSERT_LANG = None
77
+
78
+ supported_languages = config.languages
79
+
80
+ def predict(
81
+ prompt,
82
+ language,
83
+ audio_file_pth,
84
+ save_path
85
+ ):
86
+ voice_cleanup = False
87
+ mic_file_path = None
88
+ use_mic = False
89
+ agree = True
90
+ no_lang_auto_detect = False
91
+ if agree == True:
92
+ if language not in supported_languages:
93
+ gr.Warning(
94
+ f"Language you put {language} in is not in is not in our Supported Languages, please choose from dropdown"
95
+ )
96
+
97
+ return (
98
+ None,
99
+ None,
100
+ None,
101
+ None,
102
+ )
103
+
104
+ language_predicted = langid.classify(prompt)[
105
+ 0
106
+ ].strip() # strip need as there is space at end!
107
+
108
+ # tts expects chinese as zh-cn
109
+ if language_predicted == "zh":
110
+ # we use zh-cn
111
+ language_predicted = "zh-cn"
112
+
113
+ print(f"Detected language:{language_predicted}, Chosen language:{language}")
114
+
115
+ # After text character length 15 trigger language detection
116
+ if len(prompt) > 15:
117
+ # allow any language for short text as some may be common
118
+ # If user unchecks language autodetection it will not trigger
119
+ # You may remove this completely for own use
120
+ if language_predicted != language and not no_lang_auto_detect:
121
+ # Please duplicate and remove this check if you really want this
122
+ # Or auto-detector fails to identify language (which it can on pretty short text or mixed text)
123
+ gr.Warning(
124
+ f"It looks like your text isn’t the language you chose , if you’re sure the text is the same language you chose, please check disable language auto-detection checkbox"
125
+ )
126
+
127
+ return (
128
+ None,
129
+ None,
130
+ None,
131
+ None,
132
+ )
133
+
134
+ if use_mic == True:
135
+ if mic_file_path is not None:
136
+ speaker_wav = mic_file_path
137
+ else:
138
+ gr.Warning(
139
+ "Please record your voice with Microphone, or uncheck Use Microphone to use reference audios"
140
+ )
141
+ return (
142
+ None,
143
+ None,
144
+ None,
145
+ None,
146
+ )
147
+
148
+ else:
149
+ speaker_wav = audio_file_pth
150
+
151
+ # Filtering for microphone input, as it has BG noise, maybe silence in beginning and end
152
+ # This is fast filtering not perfect
153
+
154
+ # Apply all on demand
155
+ lowpassfilter = denoise = trim = loudness = True
156
+
157
+ if lowpassfilter:
158
+ lowpass_highpass = "lowpass=8000,highpass=75,"
159
+ else:
160
+ lowpass_highpass = ""
161
+
162
+ if trim:
163
+ # better to remove silence in beginning and end for microphone
164
+ trim_silence = "areverse,silenceremove=start_periods=1:start_silence=0:start_threshold=0.02,areverse,silenceremove=start_periods=1:start_silence=0:start_threshold=0.02,"
165
+ else:
166
+ trim_silence = ""
167
+
168
+ if voice_cleanup:
169
+ try:
170
+ out_filename = (
171
+ speaker_wav + str(uuid.uuid4()) + ".wav"
172
+ ) # ffmpeg to know output format
173
+
174
+ # we will use newer ffmpeg as that has afftn denoise filter
175
+ shell_command = f"./ffmpeg -y -i {speaker_wav} -af {lowpass_highpass}{trim_silence} {out_filename}".split(
176
+ " "
177
+ )
178
+
179
+ command_result = subprocess.run(
180
+ [item for item in shell_command],
181
+ capture_output=False,
182
+ text=True,
183
+ check=True,
184
+ )
185
+ speaker_wav = out_filename
186
+ print("Filtered microphone input")
187
+ except subprocess.CalledProcessError:
188
+ # There was an error - command exited with non-zero code
189
+ print("Error: failed filtering, use original microphone input")
190
+ else:
191
+ speaker_wav = speaker_wav
192
+
193
+ if len(prompt) < 2:
194
+ gr.Warning("Please give a longer prompt text")
195
+ return (
196
+ None,
197
+ None,
198
+ None,
199
+ None,
200
+ )
201
+ if len(prompt) > 500:
202
+ gr.Warning(
203
+ "Text length limited to 500 characters for this demo, please try shorter text. You can clone this space and edit code for your own usage"
204
+ )
205
+ return (
206
+ None,
207
+ None,
208
+ None,
209
+ None,
210
+ )
211
+ global DEVICE_ASSERT_DETECTED
212
+ if DEVICE_ASSERT_DETECTED:
213
+ global DEVICE_ASSERT_PROMPT
214
+ global DEVICE_ASSERT_LANG
215
+ # It will likely never come here as we restart space on first unrecoverable error now
216
+ print(
217
+ f"Unrecoverable exception caused by language:{DEVICE_ASSERT_LANG} prompt:{DEVICE_ASSERT_PROMPT}"
218
+ )
219
+
220
+ # HF Space specific.. This error is unrecoverable need to restart space
221
+ space = api.get_space_runtime(repo_id=repo_id)
222
+ if space.stage!="BUILDING":
223
+ api.restart_space(repo_id=repo_id)
224
+ else:
225
+ print("TRIED TO RESTART but space is building")
226
+
227
+ try:
228
+ metrics_text = ""
229
+ t_latent = time.time()
230
+
231
+ # note diffusion_conditioning not used on hifigan (default mode), it will be empty but need to pass it to model.inference
232
+ try:
233
+ (
234
+ gpt_cond_latent,
235
+ speaker_embedding,
236
+ ) = model.get_conditioning_latents(audio_path=speaker_wav, gpt_cond_len=30, gpt_cond_chunk_len=4, max_ref_length=60)
237
+ except Exception as e:
238
+ print("Speaker encoding error", str(e))
239
+ gr.Warning(
240
+ "It appears something wrong with reference, did you unmute your microphone?"
241
+ )
242
+ return (
243
+ None,
244
+ None,
245
+ None,
246
+ None,
247
+ )
248
+
249
+ latent_calculation_time = time.time() - t_latent
250
+ # metrics_text=f"Embedding calculation time: {latent_calculation_time:.2f} seconds\n"
251
+
252
+ # temporary comma fix
253
+ prompt= re.sub("([^\x00-\x7F]|\w)(\.|\。|\?)",r"\1 \2\2",prompt)
254
+
255
+ wav_chunks = []
256
+ ## Direct mode
257
+
258
+ print("I: Generating new audio...")
259
+ t0 = time.time()
260
+ out = model.inference(
261
+ prompt,
262
+ language,
263
+ gpt_cond_latent,
264
+ speaker_embedding,
265
+ repetition_penalty=5.0,
266
+ temperature=0.75,
267
+ )
268
+ inference_time = time.time() - t0
269
+ print(f"I: Time to generate audio: {round(inference_time*1000)} milliseconds")
270
+ metrics_text+=f"Time to generate audio: {round(inference_time*1000)} milliseconds\n"
271
+ real_time_factor= (time.time() - t0) / out['wav'].shape[-1] * 24000
272
+ print(f"Real-time factor (RTF): {real_time_factor}")
273
+ metrics_text+=f"Real-time factor (RTF): {real_time_factor:.2f}\n"
274
+ torchaudio.save(f"output/{save_path}.wav", torch.tensor(out["wav"]).unsqueeze(0), 24000)
275
+
276
+
277
+ """
278
+ print("I: Generating new audio in streaming mode...")
279
+ t0 = time.time()
280
+ chunks = model.inference_stream(
281
+ prompt,
282
+ language,
283
+ gpt_cond_latent,
284
+ speaker_embedding,
285
+ repetition_penalty=7.0,
286
+ temperature=0.85,
287
+ )
288
+ first_chunk = True
289
+ for i, chunk in enumerate(chunks):
290
+ if first_chunk:
291
+ first_chunk_time = time.time() - t0
292
+ metrics_text += f"Latency to first audio chunk: {round(first_chunk_time*1000)} milliseconds\n"
293
+ first_chunk = False
294
+ wav_chunks.append(chunk)
295
+ print(f"Received chunk {i} of audio length {chunk.shape[-1]}")
296
+ inference_time = time.time() - t0
297
+ print(
298
+ f"I: Time to generate audio: {round(inference_time*1000)} milliseconds"
299
+ )
300
+ #metrics_text += (
301
+ # f"Time to generate audio: {round(inference_time*1000)} milliseconds\n"
302
+ #)
303
+ wav = torch.cat(wav_chunks, dim=0)
304
+ print(wav.shape)
305
+ real_time_factor = (time.time() - t0) / wav.shape[0] * 24000
306
+ print(f"Real-time factor (RTF): {real_time_factor}")
307
+ metrics_text += f"Real-time factor (RTF): {real_time_factor:.2f}\n"
308
+ torchaudio.save("output.wav", wav.squeeze().unsqueeze(0).cpu(), 24000)
309
+ """
310
+
311
+ except RuntimeError as e:
312
+ if "device-side assert" in str(e):
313
+ # cannot do anything on cuda device side error, need tor estart
314
+ print(
315
+ f"Exit due to: Unrecoverable exception caused by language:{language} prompt:{prompt}",
316
+ flush=True,
317
+ )
318
+ gr.Warning("Unhandled Exception encounter, please retry in a minute")
319
+ print("Cuda device-assert Runtime encountered need restart")
320
+ if not DEVICE_ASSERT_DETECTED:
321
+ DEVICE_ASSERT_DETECTED = 1
322
+ DEVICE_ASSERT_PROMPT = prompt
323
+ DEVICE_ASSERT_LANG = language
324
+
325
+ # just before restarting save what caused the issue so we can handle it in future
326
+ # Uploading Error data only happens for unrecovarable error
327
+ error_time = datetime.datetime.now().strftime("%d-%m-%Y-%H:%M:%S")
328
+ error_data = [
329
+ error_time,
330
+ prompt,
331
+ language,
332
+ audio_file_pth,
333
+ mic_file_path,
334
+ use_mic,
335
+ voice_cleanup,
336
+ no_lang_auto_detect,
337
+ agree,
338
+ ]
339
+ error_data = [str(e) if type(e) != str else e for e in error_data]
340
+ print(error_data)
341
+ print(speaker_wav)
342
+ write_io = StringIO()
343
+ csv.writer(write_io).writerows([error_data])
344
+ csv_upload = write_io.getvalue().encode()
345
+
346
+ filename = error_time + "_" + str(uuid.uuid4()) + ".csv"
347
+ print("Writing error csv")
348
+ error_api = HfApi()
349
+ error_api.upload_file(
350
+ path_or_fileobj=csv_upload,
351
+ path_in_repo=filename,
352
+ repo_id="coqui/xtts-flagged-dataset",
353
+ repo_type="dataset",
354
+ )
355
+
356
+ # speaker_wav
357
+ print("Writing error reference audio")
358
+ speaker_filename = (
359
+ error_time + "_reference_" + str(uuid.uuid4()) + ".wav"
360
+ )
361
+ error_api = HfApi()
362
+ error_api.upload_file(
363
+ path_or_fileobj=speaker_wav,
364
+ path_in_repo=speaker_filename,
365
+ repo_id="coqui/xtts-flagged-dataset",
366
+ repo_type="dataset",
367
+ )
368
+
369
+ # HF Space specific.. This error is unrecoverable need to restart space
370
+ space = api.get_space_runtime(repo_id=repo_id)
371
+ if space.stage!="BUILDING":
372
+ api.restart_space(repo_id=repo_id)
373
+ else:
374
+ print("TRIED TO RESTART but space is building")
375
+
376
+ else:
377
+ if "Failed to decode" in str(e):
378
+ print("Speaker encoding error", str(e))
379
+ gr.Warning(
380
+ "It appears something wrong with reference, did you unmute your microphone?"
381
+ )
382
+ else:
383
+ print("RuntimeError: non device-side assert error:", str(e))
384
+ gr.Warning("Something unexpected happened please retry again.")
385
+ return (
386
+ None,
387
+ None,
388
+ None,
389
+ None,
390
+ )
391
+ return (
392
+ f"output/{save_path}.wav"
393
+ )
394
+ else:
395
+ gr.Warning("Please accept the Terms & Condition!")
396
+ return (
397
+ None
398
+ )
399
+
400
+ class subtitle:
401
+ def __init__(self,index:int, start_time, end_time, text:str):
402
+ self.index = int(index)
403
+ self.start_time = start_time
404
+ self.end_time = end_time
405
+ self.text = text.strip()
406
+ def normalize(self,ntype:str,fps=30):
407
+ if ntype=="prcsv":
408
+ h,m,s,fs=(self.start_time.replace(';',':')).split(":")#seconds
409
+ self.start_time=int(h)*3600+int(m)*60+int(s)+round(int(fs)/fps,2)
410
+ h,m,s,fs=(self.end_time.replace(';',':')).split(":")
411
+ self.end_time=int(h)*3600+int(m)*60+int(s)+round(int(fs)/fps,2)
412
+ elif ntype=="srt":
413
+ h,m,s=self.start_time.split(":")
414
+ s=s.replace(",",".")
415
+ self.start_time=int(h)*3600+int(m)*60+round(float(s),2)
416
+ h,m,s=self.end_time.split(":")
417
+ s=s.replace(",",".")
418
+ self.end_time=int(h)*3600+int(m)*60+round(float(s),2)
419
+ else:
420
+ raise ValueError
421
+ def add_offset(self,offset=0):
422
+ self.start_time+=offset
423
+ if self.start_time<0:
424
+ self.start_time=0
425
+ self.end_time+=offset
426
+ if self.end_time<0:
427
+ self.end_time=0
428
+ def __str__(self) -> str:
429
+ return f'id:{self.index},start:{self.start_time},end:{self.end_time},text:{self.text}'
430
+
431
+ def read_srt(filename):
432
+ offset=0
433
+ with open(filename,"r",encoding="utf-8") as f:
434
+ file=f.readlines()
435
+ subtitle_list=[]
436
+ indexlist=[]
437
+ filelength=len(file)
438
+ for i in range(0,filelength):
439
+ if " --> " in file[i]:
440
+ is_st=True
441
+ for char in file[i-1].strip().replace("\ufeff",""):
442
+ if char not in ['0','1','2','3','4','5','6','7','8','9']:
443
+ is_st=False
444
+ break
445
+ if is_st:
446
+ indexlist.append(i) #get line id
447
+ listlength=len(indexlist)
448
+ for i in range(0,listlength-1):
449
+ st,et=file[indexlist[i]].split(" --> ")
450
+ id=int(file[indexlist[i]-1].strip().replace("\ufeff",""))
451
+ text=""
452
+ for x in range(indexlist[i]+1,indexlist[i+1]-2):
453
+ text+=file[x]
454
+ st=subtitle(id,st,et,text)
455
+ st.normalize(ntype="srt")
456
+ st.add_offset(offset=offset)
457
+ subtitle_list.append(st)
458
+ st,et=file[indexlist[-1]].split(" --> ")
459
+ id=file[indexlist[-1]-1]
460
+ text=""
461
+ for x in range(indexlist[-1]+1,filelength):
462
+ text+=file[x]
463
+ st=subtitle(id,st,et,text)
464
+ st.normalize(ntype="srt")
465
+ st.add_offset(offset=offset)
466
+ subtitle_list.append(st)
467
+ return subtitle_list
468
+
469
+ from pydub import AudioSegment
470
+
471
+ def trim_audio(intervals, input_file_path, output_file_path):
472
+ # load the audio file
473
+ audio = AudioSegment.from_file(input_file_path)
474
+
475
+ # iterate over the list of time intervals
476
+ for i, (start_time, end_time) in enumerate(intervals):
477
+ # extract the segment of the audio
478
+ segment = audio[start_time*1000:end_time*1000]
479
+
480
+ # construct the output file path
481
+ output_file_path_i = f"{output_file_path}_{i}.wav"
482
+
483
+ # export the segment to a file
484
+ segment.export(output_file_path_i, format='wav')
485
+
486
+ import re
487
+
488
+ def merge_audios(input_dir):
489
+ output_file = "AI配音版.wav"
490
+ # List all .wav files in the directory
491
+ files = [f for f in os.listdir(input_dir) if f.endswith('.wav')]
492
+
493
+ # Sort files based on the numerical order extracted from their names
494
+ sorted_files = sorted(files, key=lambda x: int(re.search(r'(\d+)', x).group()))
495
+
496
+ # Initialize an empty audio segment
497
+ combined = AudioSegment.empty()
498
+
499
+ # Loop through the sorted list and concatenate them
500
+ for file in sorted_files:
501
+ path = os.path.join(input_dir, file)
502
+ audio = AudioSegment.from_wav(path)
503
+ combined += audio
504
+ print(f"Merged: {file}")
505
+
506
+ # Export the combined audio
507
+ combined.export(output_file, format="wav")
508
+ return "AI配音版.wav"
509
+
510
+ def convert_from_srt(filename, audio_full, language):
511
+ subtitle_list = read_srt(filename)
512
+ for i in subtitle_list:
513
+ os.makedirs("output", exist_ok=True)
514
+ print(f"正在合成第{i.index}条语音")
515
+ print(f"语音内容:{i.text}")
516
+ trim_audio([[i.start_time, i.end_time]], audio_full, f"sliced_audio_{i.index}")
517
+ predict(i.text, language, f"sliced_audio_{i.index}_0.wav", i.text + " " + str(i.index))
518
+ return merge_audios("output")
519
+
520
+ with gr.Blocks() as app:
521
+ gr.Markdown("# <center>🌊💕🎶 XTTS - SRT文件一键AI配音</center>")
522
+ gr.Markdown("### <center>🌟 只需上传SRT文件和原版配音文件即可,每次一集视频AI自动配音!Developed by Kevin Wang </center>")
523
+ with gr,Row():
524
+ with gr.Column():
525
+ inp1 = gr.File(file_count="single", label="请上传一集视频对应的SRT文件")
526
+ inp2 = gr.Audio(label="请上传一集视频对应的原声配音", info="需要是.wav音频文件", type="filepath")
527
+ inp3 = gr.Dropdown(
528
+ label="请选择SRT文件对应的语言",
529
+ info="各种语言的简写代码请参考[此网站](https://www.science.co.il/language/Codes.php)",
530
+ choices=[
531
+ "en",
532
+ "es",
533
+ "fr",
534
+ "de",
535
+ "it",
536
+ "pt",
537
+ "pl",
538
+ "tr",
539
+ "ru",
540
+ "nl",
541
+ "cs",
542
+ "ar",
543
+ "zh-cn",
544
+ "ja",
545
+ "ko",
546
+ "hu",
547
+ "hi"
548
+ ],
549
+ max_choices=1,
550
+ value="en",
551
+ )
552
+ btn = gr.Button("一键开启AI配音吧💕", variant="primary")
553
+ with gr.Column():
554
+ out1 = gr.Audio(label="为您生成的AI完整配音")
555
+
556
+ btn.click(convert_from_srt, [inp1, inp2, inp3], [out1])
557
+
558
+ gr.Markdown("### <center>注意❗:请不要生成会对任何个人或组织造成侵害的内容。</center>")
559
+ gr.HTML('''
560
+ <div class="footer">
561
+ <p>🌊🏞️🎶 - 江水东流急,滔滔无尽声。 明·顾璘
562
+ </p>
563
+ </div>
564
+ ''')
565
+
566
+ app.launch(share=True, show_error=True)