Update app.py
Browse files
app.py
CHANGED
@@ -10,11 +10,14 @@ import os
|
|
10 |
from bs4 import BeautifulSoup
|
11 |
import re
|
12 |
import numpy as np
|
13 |
-
from moviepy import VideoFileClip
|
14 |
import soundfile as sf
|
|
|
|
|
15 |
|
16 |
# Load the transcription model
|
17 |
transcription_pipeline = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h")
|
|
|
18 |
|
19 |
def download_audio_from_url(url):
|
20 |
try:
|
@@ -40,34 +43,50 @@ def download_audio_from_url(url):
|
|
40 |
print(f"Error in download_audio_from_url: {str(e)}")
|
41 |
raise
|
42 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
def transcribe_audio(video_bytes):
|
44 |
try:
|
45 |
-
# Save the video bytes to a temporary file
|
46 |
with open("temp_video.mp4", "wb") as f:
|
47 |
f.write(video_bytes)
|
48 |
|
49 |
-
# Extract audio from video
|
50 |
video = VideoFileClip("temp_video.mp4")
|
51 |
audio = video.audio
|
52 |
|
53 |
-
# Export audio as mono WAV
|
54 |
audio.write_audiofile("temp_audio.wav", fps=16000, nbytes=2, codec='pcm_s16le')
|
55 |
|
56 |
-
# Load the audio file
|
57 |
audio_data, sample_rate = sf.read("temp_audio.wav")
|
58 |
|
59 |
-
# Ensure audio is mono
|
60 |
if len(audio_data.shape) > 1:
|
61 |
audio_data = audio_data.mean(axis=1)
|
62 |
|
63 |
-
# Normalize the audio data
|
64 |
audio_data = audio_data.astype(np.float32) / np.max(np.abs(audio_data))
|
65 |
|
66 |
-
# Transcribe the audio
|
67 |
result = transcription_pipeline(audio_data)
|
68 |
transcript = result['text']
|
69 |
|
70 |
-
|
|
|
|
|
71 |
os.remove("temp_video.mp4")
|
72 |
os.remove("temp_audio.wav")
|
73 |
|
@@ -91,9 +110,12 @@ def transcribe_video(url):
|
|
91 |
error_message = f"An error occurred: {str(e)}"
|
92 |
print(error_message)
|
93 |
return error_message
|
94 |
-
|
95 |
def download_transcript(transcript):
|
96 |
-
|
|
|
|
|
|
|
97 |
|
98 |
# Create the Gradio interface
|
99 |
with gr.Blocks(title="Video Transcription") as demo:
|
@@ -105,6 +127,6 @@ with gr.Blocks(title="Video Transcription") as demo:
|
|
105 |
download_link = gr.File(label="Download Transcript")
|
106 |
|
107 |
transcribe_button.click(fn=transcribe_video, inputs=video_url, outputs=transcript_output)
|
108 |
-
download_button.click(fn=download_transcript, inputs=transcript_output, outputs=
|
109 |
|
110 |
demo.launch()
|
|
|
10 |
from bs4 import BeautifulSoup
|
11 |
import re
|
12 |
import numpy as np
|
13 |
+
from moviepy.editor import VideoFileClip
|
14 |
import soundfile as sf
|
15 |
+
from spellchecker import SpellChecker
|
16 |
+
import tempfile
|
17 |
|
18 |
# Load the transcription model
|
19 |
transcription_pipeline = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h")
|
20 |
+
spell = SpellChecker()
|
21 |
|
22 |
def download_audio_from_url(url):
|
23 |
try:
|
|
|
43 |
print(f"Error in download_audio_from_url: {str(e)}")
|
44 |
raise
|
45 |
|
46 |
+
def correct_spelling(text):
|
47 |
+
words = text.split()
|
48 |
+
corrected_words = [spell.correction(word) or word for word in words]
|
49 |
+
return ' '.join(corrected_words)
|
50 |
+
|
51 |
+
def format_transcript(transcript):
|
52 |
+
sentences = transcript.split('.')
|
53 |
+
formatted_transcript = []
|
54 |
+
current_speaker = None
|
55 |
+
for sentence in sentences:
|
56 |
+
if ':' in sentence:
|
57 |
+
speaker, content = sentence.split(':', 1)
|
58 |
+
if speaker != current_speaker:
|
59 |
+
formatted_transcript.append(f"\n\n{speaker.strip()}:{content.strip()}.")
|
60 |
+
current_speaker = speaker
|
61 |
+
else:
|
62 |
+
formatted_transcript.append(f"{content.strip()}.")
|
63 |
+
else:
|
64 |
+
formatted_transcript.append(sentence.strip() + '.')
|
65 |
+
return ' '.join(formatted_transcript)
|
66 |
+
|
67 |
def transcribe_audio(video_bytes):
|
68 |
try:
|
|
|
69 |
with open("temp_video.mp4", "wb") as f:
|
70 |
f.write(video_bytes)
|
71 |
|
|
|
72 |
video = VideoFileClip("temp_video.mp4")
|
73 |
audio = video.audio
|
74 |
|
|
|
75 |
audio.write_audiofile("temp_audio.wav", fps=16000, nbytes=2, codec='pcm_s16le')
|
76 |
|
|
|
77 |
audio_data, sample_rate = sf.read("temp_audio.wav")
|
78 |
|
|
|
79 |
if len(audio_data.shape) > 1:
|
80 |
audio_data = audio_data.mean(axis=1)
|
81 |
|
|
|
82 |
audio_data = audio_data.astype(np.float32) / np.max(np.abs(audio_data))
|
83 |
|
|
|
84 |
result = transcription_pipeline(audio_data)
|
85 |
transcript = result['text']
|
86 |
|
87 |
+
transcript = correct_spelling(transcript)
|
88 |
+
transcript = format_transcript(transcript)
|
89 |
+
|
90 |
os.remove("temp_video.mp4")
|
91 |
os.remove("temp_audio.wav")
|
92 |
|
|
|
110 |
error_message = f"An error occurred: {str(e)}"
|
111 |
print(error_message)
|
112 |
return error_message
|
113 |
+
|
114 |
def download_transcript(transcript):
|
115 |
+
with tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='.txt') as temp_file:
|
116 |
+
temp_file.write(transcript)
|
117 |
+
temp_file_path = temp_file.name
|
118 |
+
return temp_file_path
|
119 |
|
120 |
# Create the Gradio interface
|
121 |
with gr.Blocks(title="Video Transcription") as demo:
|
|
|
127 |
download_link = gr.File(label="Download Transcript")
|
128 |
|
129 |
transcribe_button.click(fn=transcribe_video, inputs=video_url, outputs=transcript_output)
|
130 |
+
download_button.click(fn=download_transcript, inputs=transcript_output, outputs=download_link)
|
131 |
|
132 |
demo.launch()
|