import gradio as gr import torchaudio import torchaudio.transforms as T from transformers import pipeline import requests from pydub import AudioSegment from pydub.silence import split_on_silence import io import os from bs4 import BeautifulSoup import re import numpy as np from moviepy.editor import VideoFileClip import soundfile as sf from spellchecker import SpellChecker import tempfile # Load the transcription model transcription_pipeline = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h") spell = SpellChecker() def download_audio_from_url(url): try: if "share" in url: print("Processing shareable link...") response = requests.get(url) soup = BeautifulSoup(response.content, 'html.parser') video_tag = soup.find('video') if video_tag and 'src' in video_tag.attrs: video_url = video_tag['src'] print(f"Extracted video URL: {video_url}") else: raise ValueError("Direct video URL not found in the shareable link.") else: video_url = url print(f"Downloading video from URL: {video_url}") response = requests.get(video_url) audio_bytes = response.content print(f"Successfully downloaded {len(audio_bytes)} bytes of data") return audio_bytes except Exception as e: print(f"Error in download_audio_from_url: {str(e)}") raise def correct_spelling(text): words = text.split() corrected_words = [spell.correction(word) or word for word in words] return ' '.join(corrected_words) def format_transcript(transcript): sentences = transcript.split('.') formatted_transcript = [] current_speaker = None for sentence in sentences: if ':' in sentence: speaker, content = sentence.split(':', 1) if speaker != current_speaker: formatted_transcript.append(f"\n\n{speaker.strip()}:{content.strip()}.") current_speaker = speaker else: formatted_transcript.append(f"{content.strip()}.") else: formatted_transcript.append(sentence.strip() + '.') return ' '.join(formatted_transcript) def transcribe_audio(video_bytes): try: with open("temp_video.mp4", "wb") as f: f.write(video_bytes) video = VideoFileClip("temp_video.mp4") audio = video.audio audio.write_audiofile("temp_audio.wav", fps=16000, nbytes=2, codec='pcm_s16le') audio_data, sample_rate = sf.read("temp_audio.wav") if len(audio_data.shape) > 1: audio_data = audio_data.mean(axis=1) audio_data = audio_data.astype(np.float32) / np.max(np.abs(audio_data)) result = transcription_pipeline(audio_data) transcript = result['text'] transcript = correct_spelling(transcript) transcript = format_transcript(transcript) os.remove("temp_video.mp4") os.remove("temp_audio.wav") return transcript except Exception as e: print(f"Error in transcribe_audio: {str(e)}") raise def transcribe_video(url): try: print(f"Attempting to download audio from URL: {url}") audio_bytes = download_audio_from_url(url) print(f"Successfully downloaded {len(audio_bytes)} bytes of audio data") print("Starting audio transcription...") transcript = transcribe_audio(audio_bytes) print("Transcription completed successfully") return transcript except Exception as e: error_message = f"An error occurred: {str(e)}" print(error_message) return error_message def download_transcript(transcript): with tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='.txt') as temp_file: temp_file.write(transcript) temp_file_path = temp_file.name return temp_file_path # Create the Gradio interface with gr.Blocks(title="Video Transcription") as demo: gr.Markdown("# Video Transcription") video_url = gr.Textbox(label="Video URL") transcribe_button = gr.Button("Transcribe") transcript_output = gr.Textbox(label="Transcript", lines=20) download_button = gr.Button("Download Transcript") download_link = gr.File(label="Download Transcript") transcribe_button.click(fn=transcribe_video, inputs=video_url, outputs=transcript_output) download_button.click(fn=download_transcript, inputs=transcript_output, outputs=download_link) demo.launch()