import spaces import gradio as gr import edge_tts import asyncio import tempfile import os import re from pathlib import Path from pydub import AudioSegment def get_silence(duration_ms=1000): # Create silent audio segment with specified parameters silent_audio = AudioSegment.silent( duration=duration_ms, frame_rate=24000 # 24kHz sampling rate ) # Set audio parameters silent_audio = silent_audio.set_channels(1) # Mono silent_audio = silent_audio.set_sample_width(4) # 32-bit (4 bytes per sample) with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp_file: # Export with specific bitrate and codec parameters silent_audio.export( tmp_file.name, format="mp3", bitrate="48k", parameters=[ "-ac", "1", # Mono "-ar", "24000", # Sample rate "-sample_fmt", "s32", # 32-bit samples "-codec:a", "libmp3lame" # MP3 codec ] ) return tmp_file.name # Get all available voices async def get_voices(): voices = await edge_tts.list_voices() return {f"{v['ShortName']} - {v['Locale']} ({v['Gender']})": v['ShortName'] for v in voices} async def text_to_speech_segment(text_segment, voice, rate, pitch): """Processes a single text segment for voice commands and generates audio.""" current_voice_full = voice current_voice_short = current_voice_full.split(" - ")[0] if current_voice_full else "" current_rate = rate current_pitch = pitch processed_text = text_segment voice1_full = "en-AU-WilliamNeural - en-AU (Male)" voice1_short = voice1_full.split(" - ")[0] voice1F_full ="en-GB-SoniaNeural - en-GB (Female)" voice1F_short = voice1F_full.split(" - ")[0] voice2_full = "en-GB-RyanNeural - en-GB (Male)" voice2_short = voice2_full.split(" - ")[0] voice2F_full = "en-US-JennyNeural - en-US (Female)" voice2F_short = voice2F_full.split(" - ")[0] voice3_full ="en-US-BrianMultilingualNeural - en-US (Male)" #good for reading voice3_short = voice3_full.split(" - ")[0] voice3F_full = "en-HK-YanNeural - en-HK (Female)" voice3F_short = voice3F_full.split(" - ")[0] voice4_full = "en-GB-ThomasNeural - en-GB (Male)" voice4_short = voice4_full.split(" - ")[0] voice4F_full ="en-US-EmmaNeural - en-US (Female)" voice4F_short = voice4F_full.split(" - ")[0] voice5_full = "en-GB-RyanNeural - en-GB (Male)" #Old Man voice5_short = voice5_full.split(" - ")[0] voice6_full = "en-GB-MaisieNeural - en-GB (Female)" #Child voice6_short = voice6_full.split(" - ")[0] if text_segment.startswith("1F"): current_voice_short = voice1F_short current_pitch = 25 processed_text = text_segment[2:].strip() elif text_segment.startswith("2F"): current_voice_short = voice2F_short processed_text = text_segment[2:].strip() elif text_segment.startswith("3F"): current_voice_short = voice3F_short processed_text = text_segment[2:].strip() elif text_segment.startswith("4F"): current_voice_short = voice4F_short processed_text = text_segment[2:].strip() elif text_segment.startswith("1M"): current_voice_short = voice1_short processed_text = text_segment[2:].strip() elif text_segment.startswith("2M"): current_voice_short = voice2_short processed_text = text_segment[2:].strip() elif text_segment.startswith("3M"): current_voice_short = voice3_short processed_text = text_segment[2:].strip() elif text_segment.startswith("4M"): current_voice_short = voice4_short processed_text = text_segment[2:].strip() elif text_segment.startswith("1O"): # Old man voice current_voice_short = voice5_short current_pitch = -20 current_rate = -10 processed_text = text_segment[2:].strip() elif text_segment.startswith("1C"): #Child voice current_voice_short = voice6_short processed_text = text_segment[2:].strip() rate_str = f"{current_rate:+d}%" pitch_str = f"{current_pitch:+d}Hz" communicate = edge_tts.Communicate(processed_text, current_voice_short, rate=rate_str, pitch=pitch_str) with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp_file: audio_path = tmp_file.name await communicate.save(audio_path) return audio_path async def transcript_to_speech(transcript_text, voice, rate, pitch): if not transcript_text.strip(): return None, gr.Warning("Please enter transcript text.") if not voice: return None, gr.Warning("Please select a voice.") segments = re.split(r'[“”"]', transcript_text) audio_paths = [] for segment in segments: segment = segment.strip() if segment: # Check if the segment starts with a timestamp timestamp_match = re.match(r'(\d+):(\d+)(?:\.(\d+))?\s+(.*)', segment) if timestamp_match: minutes, seconds, milliseconds_str, text_with_commands = timestamp_match.groups() start_time_ms = int(minutes) * 60000 + int(seconds) * 1000 + (int(milliseconds_str) * 10 if milliseconds_str else 0) audio_path = await text_to_speech_segment(text_with_commands, voice, rate, pitch) audio_paths.append({'start': start_time_ms, 'path': audio_path}) else: # Process segments without timestamps (for voice switching) audio_path = await text_to_speech_segment(segment, voice, rate, pitch) if audio_path: audio_paths.append({'start': None, 'path': audio_path}) # No specific start time if not audio_paths: return None, "No audio segments generated." # Handle combining audio with timestamps timed_segments = [item for item in audio_paths if item['start'] is not None] non_timed_segments = [item for item in audio_paths if item['start'] is None and item['path']] if timed_segments: max_end_time_ms = 0 processed_timed_segments = [] for item in timed_segments: audio = AudioSegment.from_mp3(item['path']) processed_timed_segments.append({'start': item['start'], 'audio': audio, 'path': item['path']}) max_end_time_ms = max(max_end_time_ms, item['start'] + len(audio)) final_audio = AudioSegment.silent(duration=max_end_time_ms, frame_rate=24000) for segment in processed_timed_segments: final_audio = final_audio.overlay(segment['audio'], position=segment['start']) os.remove(segment['path']) # Append non-timed segments sequentially for item in non_timed_segments: audio = AudioSegment.from_mp3(item['path']) final_audio += audio os.remove(item['path']) combined_audio_path = tempfile.mktemp(suffix=".mp3") final_audio.export(combined_audio_path, format="mp3") return combined_audio_path, None elif non_timed_segments: # Combine non-timed segments sequentially if no timestamps are found combined_audio = AudioSegment.empty() for item in non_timed_segments: audio = AudioSegment.from_mp3(item['path']) combined_audio += audio os.remove(item['path']) combined_audio_path = tempfile.mktemp(suffix=".mp3") combined_audio.export(combined_audio_path, format="mp3") return combined_audio_path, None return None, "No processable audio segments found." @spaces.GPU def tts_interface(transcript, voice, rate, pitch): audio, warning = asyncio.run(transcript_to_speech(transcript, voice, rate, pitch)) return audio, warning async def create_demo(): voices = await get_voices() default_voice = "en-US-AndrewMultilingualNeural - en-US (Male)" description = """ Process text, handling both timestamped transcripts and voice switching using quote marks and prefixes. Separate segments by quote marks ("). For timestamped segments, use the format: `minutes:seconds[.milliseconds] text`. Voice prefixes (e.g., 1F, 1C) can be used at the beginning of a quoted segment to switch voices. Example: ``` 0:00 "This" "0:14 is the story of little Red Riding Hood" "0:38 1F Grandma isn’t feeling very well." "0:48 1C Yes, said Little Red Riding Hood." "and then the default voice continues" ``` """ demo = gr.Interface( fn=tts_interface, inputs=[ gr.Textbox(label="Input Text / Transcript", lines=10, placeholder='0:00 "This"\n"0:14 is the story..."\n"1F Hello"'), gr.Dropdown(choices=[""] + list(voices.keys()), label="Select Voice", value=default_voice), gr.Slider(minimum=-50, maximum=50, value=0, label="Speech Rate Adjustment (%)", step=1), gr.Slider(minimum=-50, maximum=50, value=0, label="Pitch Adjustment (Hz)", step=1) ], outputs=[ gr.Audio(label="Generated Audio", type="filepath"), gr.Markdown(label="Warning", visible=False) ], title="Combined TTS: Timestamps and Voice Switching", description=description, analytics_enabled=False, allow_flagging=False ) return demo if __name__ == "__main__": demo = asyncio.run(create_demo()) demo.launch()