jpgallegoar2 commited on
Commit
3d2e8fd
·
1 Parent(s): 9d2b8cb

Added multiple speech types generation

Browse files
Files changed (1) hide show
  1. gradio_app.py +265 -1
gradio_app.py CHANGED
@@ -380,6 +380,30 @@ def generate_podcast(script, speaker1_name, ref_audio1, ref_text1, speaker2_name
380
 
381
  return podcast_path
382
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
383
  with gr.Blocks() as app:
384
  gr.Markdown(
385
  """
@@ -484,6 +508,246 @@ Supported by [RootingInLoad](https://github.com/RootingInLoad)
484
  ],
485
  outputs=podcast_output,
486
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
487
 
488
  @click.command()
489
  @click.option("--port", "-p", default=None, type=int, help="Port to run the app on")
@@ -505,4 +769,4 @@ def main(port, host, share, api):
505
 
506
 
507
  if __name__ == "__main__":
508
- main()
 
380
 
381
  return podcast_path
382
 
383
+ def parse_speechtypes_text(gen_text):
384
+ # Pattern to find (Emotion)
385
+ pattern = r'\((.*?)\)'
386
+
387
+ # Split the text by the pattern
388
+ tokens = re.split(pattern, gen_text)
389
+
390
+ segments = []
391
+
392
+ current_emotion = 'Regular'
393
+
394
+ for i in range(len(tokens)):
395
+ if i % 2 == 0:
396
+ # This is text
397
+ text = tokens[i].strip()
398
+ if text:
399
+ segments.append({'emotion': current_emotion, 'text': text})
400
+ else:
401
+ # This is emotion
402
+ emotion = tokens[i].strip()
403
+ current_emotion = emotion
404
+
405
+ return segments
406
+
407
  with gr.Blocks() as app:
408
  gr.Markdown(
409
  """
 
508
  ],
509
  outputs=podcast_output,
510
  )
511
+
512
+ # New section for emotional generation
513
+ gr.Markdown(
514
+ """
515
+ # Multiple Speech-Type Generation
516
+
517
+ This section allows you to upload different audio clips for each speech type. 'Regular' emotion is mandatory. You can add additional speech types by clicking the "Add Speech Type" button. Enter your text in the format shown below, and the system will generate speech using the appropriate emotions. If unspecified, the model will use the regular speech type. The current speech type will be used until the next speech type is specified.
518
+
519
+ **Example Input:**
520
+
521
+ (Regular) Hello, I'd like to order a sandwich please. (Surprised) What do you mean you're out of bread? (Sad) I really wanted a sandwich though... (Angry) You know what, darn you and your little shop, you suck! (Whisper) I'll just go back home and cry now. (Shouting) Why me?!
522
+ """
523
+ )
524
+
525
+ with gr.Tab("Multiple Speech-Type Generation"):
526
+ gr.Markdown("Upload different audio clips for each speech type. 'Regular' emotion is mandatory. You can add additional speech types by clicking the 'Add Speech Type' button.")
527
+
528
+ # Regular speech type (mandatory)
529
+ with gr.Row():
530
+ regular_name = gr.Textbox(value='Regular', label='Speech Type Name', interactive=False)
531
+ regular_audio = gr.Audio(label='Regular Reference Audio', type='filepath')
532
+ regular_ref_text = gr.Textbox(label='Reference Text (Regular)', lines=2)
533
+
534
+ # Additional speech types (up to 9 more)
535
+ max_speech_types = 10
536
+ speech_type_names = []
537
+ speech_type_audios = []
538
+ speech_type_ref_texts = []
539
+ speech_type_delete_btns = []
540
+
541
+ for i in range(max_speech_types - 1):
542
+ with gr.Row():
543
+ name_input = gr.Textbox(label='Speech Type Name', visible=False)
544
+ audio_input = gr.Audio(label='Reference Audio', type='filepath', visible=False)
545
+ ref_text_input = gr.Textbox(label='Reference Text', lines=2, visible=False)
546
+ delete_btn = gr.Button("Delete", variant="secondary", visible=False)
547
+ speech_type_names.append(name_input)
548
+ speech_type_audios.append(audio_input)
549
+ speech_type_ref_texts.append(ref_text_input)
550
+ speech_type_delete_btns.append(delete_btn)
551
+
552
+ # Button to add speech type
553
+ add_speech_type_btn = gr.Button("Add Speech Type")
554
+
555
+ # Keep track of current number of speech types
556
+ speech_type_count = gr.State(value=0)
557
+
558
+ # Function to add a speech type
559
+ def add_speech_type_fn(speech_type_count):
560
+ if speech_type_count < max_speech_types - 1:
561
+ speech_type_count += 1
562
+ # Prepare updates for the components
563
+ name_updates = []
564
+ audio_updates = []
565
+ ref_text_updates = []
566
+ delete_btn_updates = []
567
+ for i in range(max_speech_types - 1):
568
+ if i < speech_type_count:
569
+ name_updates.append(gr.update(visible=True))
570
+ audio_updates.append(gr.update(visible=True))
571
+ ref_text_updates.append(gr.update(visible=True))
572
+ delete_btn_updates.append(gr.update(visible=True))
573
+ else:
574
+ name_updates.append(gr.update())
575
+ audio_updates.append(gr.update())
576
+ ref_text_updates.append(gr.update())
577
+ delete_btn_updates.append(gr.update())
578
+ else:
579
+ # Optionally, show a warning
580
+ # gr.Warning("Maximum number of speech types reached.")
581
+ name_updates = [gr.update() for _ in range(max_speech_types - 1)]
582
+ audio_updates = [gr.update() for _ in range(max_speech_types - 1)]
583
+ ref_text_updates = [gr.update() for _ in range(max_speech_types - 1)]
584
+ delete_btn_updates = [gr.update() for _ in range(max_speech_types - 1)]
585
+ return [speech_type_count] + name_updates + audio_updates + ref_text_updates + delete_btn_updates
586
+
587
+ add_speech_type_btn.click(
588
+ add_speech_type_fn,
589
+ inputs=speech_type_count,
590
+ outputs=[speech_type_count] + speech_type_names + speech_type_audios + speech_type_ref_texts + speech_type_delete_btns
591
+ )
592
+
593
+ # Function to delete a speech type
594
+ def make_delete_speech_type_fn(index):
595
+ def delete_speech_type_fn(speech_type_count):
596
+ # Prepare updates
597
+ name_updates = []
598
+ audio_updates = []
599
+ ref_text_updates = []
600
+ delete_btn_updates = []
601
+
602
+ for i in range(max_speech_types - 1):
603
+ if i == index:
604
+ name_updates.append(gr.update(visible=False, value=''))
605
+ audio_updates.append(gr.update(visible=False, value=None))
606
+ ref_text_updates.append(gr.update(visible=False, value=''))
607
+ delete_btn_updates.append(gr.update(visible=False))
608
+ else:
609
+ name_updates.append(gr.update())
610
+ audio_updates.append(gr.update())
611
+ ref_text_updates.append(gr.update())
612
+ delete_btn_updates.append(gr.update())
613
+
614
+ speech_type_count = max(0, speech_type_count - 1)
615
+
616
+ return [speech_type_count] + name_updates + audio_updates + ref_text_updates + delete_btn_updates
617
+
618
+ return delete_speech_type_fn
619
+
620
+ for i, delete_btn in enumerate(speech_type_delete_btns):
621
+ delete_fn = make_delete_speech_type_fn(i)
622
+ delete_btn.click(
623
+ delete_fn,
624
+ inputs=speech_type_count,
625
+ outputs=[speech_type_count] + speech_type_names + speech_type_audios + speech_type_ref_texts + speech_type_delete_btns
626
+ )
627
+
628
+ # Text input for the prompt
629
+ gen_text_input_emotional = gr.Textbox(label="Text to Generate", lines=10)
630
+
631
+ # Model choice
632
+ model_choice_emotional = gr.Radio(
633
+ choices=["F5-TTS", "E2-TTS"], label="Choose TTS Model", value="F5-TTS"
634
+ )
635
+
636
+ with gr.Accordion("Advanced Settings", open=False):
637
+ remove_silence_emotional = gr.Checkbox(
638
+ label="Remove Silences",
639
+ value=True,
640
+ )
641
+
642
+ # Generate button
643
+ generate_emotional_btn = gr.Button("Generate Emotional Speech", variant="primary")
644
+
645
+ # Output audio
646
+ audio_output_emotional = gr.Audio(label="Synthesized Audio")
647
+
648
+ def generate_emotional_speech(
649
+ regular_audio,
650
+ regular_ref_text,
651
+ gen_text,
652
+ *args,
653
+ ):
654
+ num_additional_speech_types = max_speech_types - 1
655
+ speech_type_names_list = args[:num_additional_speech_types]
656
+ speech_type_audios_list = args[num_additional_speech_types:2 * num_additional_speech_types]
657
+ speech_type_ref_texts_list = args[2 * num_additional_speech_types:3 * num_additional_speech_types]
658
+ model_choice = args[3 * num_additional_speech_types]
659
+ remove_silence = args[3 * num_additional_speech_types + 1]
660
+
661
+ # Collect the speech types and their audios into a dict
662
+ speech_types = {'Regular': {'audio': regular_audio, 'ref_text': regular_ref_text}}
663
+
664
+ for name_input, audio_input, ref_text_input in zip(speech_type_names_list, speech_type_audios_list, speech_type_ref_texts_list):
665
+ if name_input and audio_input:
666
+ speech_types[name_input] = {'audio': audio_input, 'ref_text': ref_text_input}
667
+
668
+ # Parse the gen_text into segments
669
+ segments = parse_speechtypes_text(gen_text)
670
+
671
+ # For each segment, generate speech
672
+ generated_audio_segments = []
673
+ current_emotion = 'Regular'
674
+
675
+ for segment in segments:
676
+ emotion = segment['emotion']
677
+ text = segment['text']
678
+
679
+ if emotion in speech_types:
680
+ current_emotion = emotion
681
+ else:
682
+ # If emotion not available, default to Regular
683
+ current_emotion = 'Regular'
684
+
685
+ ref_audio = speech_types[current_emotion]['audio']
686
+ ref_text = speech_types[current_emotion].get('ref_text', '')
687
+
688
+ # Generate speech for this segment
689
+ audio, _ = infer(ref_audio, ref_text, text, model_choice, remove_silence, "")
690
+ sr, audio_data = audio
691
+
692
+ generated_audio_segments.append(audio_data)
693
+
694
+ # Concatenate all audio segments
695
+ if generated_audio_segments:
696
+ final_audio_data = np.concatenate(generated_audio_segments)
697
+ return (sr, final_audio_data)
698
+ else:
699
+ gr.Warning("No audio generated.")
700
+ return None
701
+
702
+ generate_emotional_btn.click(
703
+ generate_emotional_speech,
704
+ inputs=[
705
+ regular_audio,
706
+ regular_ref_text,
707
+ gen_text_input_emotional,
708
+ ] + speech_type_names + speech_type_audios + speech_type_ref_texts + [
709
+ model_choice_emotional,
710
+ remove_silence_emotional,
711
+ ],
712
+ outputs=audio_output_emotional,
713
+ )
714
+
715
+ # Validation function to disable Generate button if speech types are missing
716
+ def validate_speech_types(
717
+ gen_text,
718
+ regular_name,
719
+ *args
720
+ ):
721
+ num_additional_speech_types = max_speech_types - 1
722
+ speech_type_names_list = args[:num_additional_speech_types]
723
+
724
+ # Collect the speech types names
725
+ speech_types_available = set()
726
+ if regular_name:
727
+ speech_types_available.add(regular_name)
728
+ for name_input in speech_type_names_list:
729
+ if name_input:
730
+ speech_types_available.add(name_input)
731
+
732
+ # Parse the gen_text to get the speech types used
733
+ segments = parse_emotional_text(gen_text)
734
+ speech_types_in_text = set(segment['emotion'] for segment in segments)
735
+
736
+ # Check if all speech types in text are available
737
+ missing_speech_types = speech_types_in_text - speech_types_available
738
+
739
+ if missing_speech_types:
740
+ # Disable the generate button
741
+ return gr.update(interactive=False)
742
+ else:
743
+ # Enable the generate button
744
+ return gr.update(interactive=True)
745
+
746
+ gen_text_input_emotional.change(
747
+ validate_speech_types,
748
+ inputs=[gen_text_input_emotional, regular_name] + speech_type_names,
749
+ outputs=generate_emotional_btn
750
+ )
751
 
752
  @click.command()
753
  @click.option("--port", "-p", default=None, type=int, help="Port to run the app on")
 
769
 
770
 
771
  if __name__ == "__main__":
772
+ main()