File size: 3,197 Bytes
a18a113
 
 
 
 
 
 
 
 
4d8a5fc
 
a18a113
 
 
 
 
4d8a5fc
 
 
 
 
 
 
 
 
 
 
 
 
a18a113
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
import gradio as gr
import torchaudio
import torchaudio.transforms as T
from transformers import pipeline
import requests
from pydub import AudioSegment
from pydub.silence import split_on_silence
import io
import os
from bs4 import BeautifulSoup
import re

# Load the transcription model
transcription_pipeline = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h")

def download_audio_from_url(url):
    if "share" in url:
        # Extract the direct MP4 URL from the shareable link
        response = requests.get(url)
        soup = BeautifulSoup(response.content, 'html.parser')
        video_tag = soup.find('video')
        if video_tag and 'src' in video_tag.attrs:
            video_url = video_tag['src']
        else:
            raise ValueError("Direct video URL not found in the shareable link.")
    else:
        video_url = url
    
    response = requests.get(video_url)
    audio_bytes = response.content
    return audio_bytes

def transcribe_audio(audio_bytes):
    audio = AudioSegment.from_file(io.BytesIO(audio_bytes))
    audio.export("temp_audio.wav", format="wav")
    waveform, sample_rate = torchaudio.load("temp_audio.wav")
    os.remove("temp_audio.wav")
    
    # Transcribe the audio
    result = transcription_pipeline(waveform, chunk_length_s=30)
    transcript = result['text']
    
    # Split transcript into paragraphs based on silence
    chunks = split_on_silence(audio, min_silence_len=500, silence_thresh=-40)
    paragraphs = []
    current_paragraph = ""
    
    for chunk in chunks:
        chunk.export("temp_chunk.wav", format="wav")
        waveform, sample_rate = torchaudio.load("temp_chunk.wav")
        os.remove("temp_chunk.wav")
        
        chunk_result = transcription_pipeline(waveform, chunk_length_s=30)
        chunk_transcript = chunk_result['text']
        
        if chunk_transcript:
            if current_paragraph:
                current_paragraph += " " + chunk_transcript
            else:
                current_paragraph = chunk_transcript
        else:
            if current_paragraph:
                paragraphs.append(current_paragraph)
                current_paragraph = ""
    
    if current_paragraph:
        paragraphs.append(current_paragraph)
    
    formatted_transcript = "\n\n".join(paragraphs)
    return formatted_transcript

def transcribe_video(url):
    audio_bytes = download_audio_from_url(url)
    transcript = transcribe_audio(audio_bytes)
    return transcript

def download_transcript(transcript):
    return transcript, "transcript.txt"

# Create the Gradio interface
with gr.Blocks(title="Video Transcription") as demo:
    gr.Markdown("# Video Transcription")
    video_url = gr.Textbox(label="Video URL")
    transcribe_button = gr.Button("Transcribe")
    transcript_output = gr.Textbox(label="Transcript", lines=20)
    download_button = gr.Button("Download Transcript")
    download_link = gr.File(label="Download Transcript")
    
    transcribe_button.click(fn=transcribe_video, inputs=video_url, outputs=transcript_output)
    download_button.click(fn=download_transcript, inputs=transcript_output, outputs=[download_link, download_link])

demo.launch()