bluenevus commited on
Commit
b09f327
·
verified ·
1 Parent(s): 249a3c0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +80 -3
app.py CHANGED
@@ -1,15 +1,69 @@
 
1
  import torch
2
  from transformers import WhisperProcessor, WhisperForConditionalGeneration
 
 
 
 
 
 
3
 
4
  # Check if CUDA is available and set the device
5
  device = "cuda" if torch.cuda.is_available() else "cpu"
6
  print(f"Using device: {device}")
7
 
8
  # Load the Whisper model and processor
9
- model_name = "openai/whisper-base"
10
  processor = WhisperProcessor.from_pretrained(model_name)
11
  model = WhisperForConditionalGeneration.from_pretrained(model_name).to(device)
12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  def transcribe_audio(audio_file):
14
  try:
15
  # Load and preprocess the audio
@@ -27,7 +81,6 @@ def transcribe_audio(audio_file):
27
  print(f"Error in transcribe_audio: {str(e)}")
28
  raise
29
 
30
- # Update the transcribe_video function to use the new transcribe_audio function
31
  def transcribe_video(url):
32
  try:
33
  print(f"Attempting to download audio from URL: {url}")
@@ -46,8 +99,32 @@ def transcribe_video(url):
46
  # Clean up the temporary file
47
  os.unlink(temp_audio_path)
48
 
 
 
 
 
49
  return transcript
50
  except Exception as e:
51
  error_message = f"An error occurred: {str(e)}"
52
  print(error_message)
53
- return error_message
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
  import torch
3
  from transformers import WhisperProcessor, WhisperForConditionalGeneration
4
+ import requests
5
+ from bs4 import BeautifulSoup
6
+ import tempfile
7
+ import os
8
+ import soundfile as sf
9
+ from spellchecker import SpellChecker
10
 
11
  # Check if CUDA is available and set the device
12
  device = "cuda" if torch.cuda.is_available() else "cpu"
13
  print(f"Using device: {device}")
14
 
15
  # Load the Whisper model and processor
16
+ model_name = "openai/whisper-small"
17
  processor = WhisperProcessor.from_pretrained(model_name)
18
  model = WhisperForConditionalGeneration.from_pretrained(model_name).to(device)
19
 
20
+ spell = SpellChecker()
21
+
22
+ def download_audio_from_url(url):
23
+ try:
24
+ if "share" in url:
25
+ print("Processing shareable link...")
26
+ response = requests.get(url)
27
+ soup = BeautifulSoup(response.content, 'html.parser')
28
+ video_tag = soup.find('video')
29
+ if video_tag and 'src' in video_tag.attrs:
30
+ video_url = video_tag['src']
31
+ print(f"Extracted video URL: {video_url}")
32
+ else:
33
+ raise ValueError("Direct video URL not found in the shareable link.")
34
+ else:
35
+ video_url = url
36
+
37
+ print(f"Downloading video from URL: {video_url}")
38
+ response = requests.get(video_url)
39
+ audio_bytes = response.content
40
+ print(f"Successfully downloaded {len(audio_bytes)} bytes of data")
41
+ return audio_bytes
42
+ except Exception as e:
43
+ print(f"Error in download_audio_from_url: {str(e)}")
44
+ raise
45
+
46
+ def correct_spelling(text):
47
+ words = text.split()
48
+ corrected_words = [spell.correction(word) or word for word in words]
49
+ return ' '.join(corrected_words)
50
+
51
+ def format_transcript(transcript):
52
+ sentences = transcript.split('.')
53
+ formatted_transcript = []
54
+ current_speaker = None
55
+ for sentence in sentences:
56
+ if ':' in sentence:
57
+ speaker, content = sentence.split(':', 1)
58
+ if speaker != current_speaker:
59
+ formatted_transcript.append(f"\n\n{speaker.strip()}:{content.strip()}.")
60
+ current_speaker = speaker
61
+ else:
62
+ formatted_transcript.append(f"{content.strip()}.")
63
+ else:
64
+ formatted_transcript.append(sentence.strip() + '.')
65
+ return ' '.join(formatted_transcript)
66
+
67
  def transcribe_audio(audio_file):
68
  try:
69
  # Load and preprocess the audio
 
81
  print(f"Error in transcribe_audio: {str(e)}")
82
  raise
83
 
 
84
  def transcribe_video(url):
85
  try:
86
  print(f"Attempting to download audio from URL: {url}")
 
99
  # Clean up the temporary file
100
  os.unlink(temp_audio_path)
101
 
102
+ # Apply spelling correction and formatting
103
+ transcript = correct_spelling(transcript)
104
+ transcript = format_transcript(transcript)
105
+
106
  return transcript
107
  except Exception as e:
108
  error_message = f"An error occurred: {str(e)}"
109
  print(error_message)
110
+ return error_message
111
+
112
+ def download_transcript(transcript):
113
+ with tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='.txt') as temp_file:
114
+ temp_file.write(transcript)
115
+ temp_file_path = temp_file.name
116
+ return temp_file_path
117
+
118
+ # Create the Gradio interface
119
+ with gr.Blocks(title="Video Transcription") as demo:
120
+ gr.Markdown("# Video Transcription")
121
+ video_url = gr.Textbox(label="Video URL")
122
+ transcribe_button = gr.Button("Transcribe")
123
+ transcript_output = gr.Textbox(label="Transcript", lines=20)
124
+ download_button = gr.Button("Download Transcript")
125
+ download_link = gr.File(label="Download Transcript")
126
+
127
+ transcribe_button.click(fn=transcribe_video, inputs=video_url, outputs=transcript_output)
128
+ download_button.click(fn=download_transcript, inputs=transcript_output, outputs=download_link)
129
+
130
+ demo.launch()