monitkorn commited on
Commit
7f17e42
·
1 Parent(s): 8f51d07

remove token

Browse files
Files changed (2) hide show
  1. app.py +117 -0
  2. requirements.txt +9 -0
app.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import subprocess
4
+ import tempfile
5
+ import requests
6
+ from moviepy.editor import VideoFileClip
7
+
8
+ # Ensure the official OpenAI Whisper package is installed (supports load_model)
9
+ try:
10
+ import whisper
11
+ if not hasattr(whisper, 'load_model'):
12
+ raise ImportError
13
+ except ImportError:
14
+ subprocess.run([sys.executable, "-m", "pip", "install", "--upgrade", "openai-whisper"], check=True)
15
+ import whisper
16
+
17
+ import torch
18
+ import librosa
19
+ import pandas as pd
20
+ from transformers import Wav2Vec2Processor, Wav2Vec2ForSequenceClassification
21
+ from huggingface_hub import login
22
+ import gradio as gr
23
+
24
+ # Authenticate with Hugging Face (token via HF_TOKEN env var)
25
+
26
+
27
+
28
+ # Device setup (GPU if available)
29
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
30
+
31
+ def load_models():
32
+ # Load Whisper directly on the target device
33
+ whisper_model = whisper.load_model('base', device=device)
34
+ processor = Wav2Vec2Processor.from_pretrained(
35
+ 'jonatasgrosman/wav2vec2-large-xlsr-53-english'
36
+ )
37
+ accent_model = Wav2Vec2ForSequenceClassification.from_pretrained(
38
+ 'jonatasgrosman/wav2vec2-large-xlsr-53-english'
39
+ ).to(device)
40
+ return whisper_model, processor, accent_model
41
+
42
+ whisper_model, processor, accent_model = load_models()
43
+
44
+ # Main analysis function
45
+ def analyze(video_url: str):
46
+ # Download video to temp file
47
+ with tempfile.NamedTemporaryFile(delete=False, suffix='.mp4') as tmp_vid:
48
+ response = requests.get(video_url, stream=True)
49
+ response.raise_for_status()
50
+ for chunk in response.iter_content(chunk_size=1024 * 1024):
51
+ if chunk:
52
+ tmp_vid.write(chunk)
53
+ video_path = tmp_vid.name
54
+
55
+ # Extract audio
56
+ audio_path = video_path.replace('.mp4', '.wav')
57
+ clip = VideoFileClip(video_path)
58
+ clip.audio.write_audiofile(audio_path, verbose=False, logger=None)
59
+ clip.close()
60
+
61
+ # Load audio waveform
62
+ speech, sr = librosa.load(audio_path, sr=16000)
63
+
64
+ # Transcribe with Whisper (model on correct device)
65
+ result = whisper_model.transcribe(speech)
66
+ transcript = result.get('text', '')
67
+ lang = result.get('language', 'unknown')
68
+ if lang != 'en':
69
+ transcript = f"[Non-English detected: {lang}]\n" + transcript
70
+
71
+ # Accent classification
72
+ inputs = processor(speech, sampling_rate=sr, return_tensors='pt', padding=True)
73
+ input_values = inputs.input_values.to(device)
74
+ attention_mask = inputs.attention_mask.to(device)
75
+ with torch.no_grad():
76
+ logits = accent_model(input_values=input_values, attention_mask=attention_mask).logits
77
+ probs = torch.softmax(logits, dim=-1).squeeze().cpu().tolist()
78
+
79
+ # Map default LABEL_x to human-readable accents
80
+ accent_labels = [
81
+ 'American', 'Australian', 'British', 'Canadian', 'Indian',
82
+ 'Irish', 'New Zealander', 'South African', 'Welsh'
83
+ ] # ensure this matches model output order
84
+ accent_probs = [(accent_labels[i], probs[i] * 100) for i in range(len(probs))]
85
+ accent_probs.sort(key=lambda x: x[1], reverse=True)
86
+ top_accent, top_conf = accent_probs[0]
87
+
88
+ # Prepare DataFrame
89
+ df = pd.DataFrame(accent_probs, columns=['Accent', 'Confidence (%)'])
90
+ df = pd.DataFrame(accent_probs, columns=['Accent', 'Confidence (%)'])
91
+
92
+ # Cleanup temp files
93
+ try:
94
+ os.remove(video_path)
95
+ os.remove(audio_path)
96
+ except:
97
+ pass
98
+
99
+ return top_accent, f"{top_conf:.2f}%", df
100
+
101
+ # Gradio interface
102
+ interface = gr.Interface(
103
+ fn=analyze,
104
+ inputs=gr.Textbox(label='Video URL', placeholder='Enter public MP4 URL'),
105
+ outputs=[
106
+ # gr.Textbox(label='Transcript'),
107
+ gr.Textbox(label='Predicted Accent'),
108
+ gr.Textbox(label='Accent Confidence'),
109
+ gr.Dataframe(label='All Accent Probabilities')
110
+ ],
111
+ title='English Accent Detector',
112
+ description='Paste a Loom or direct MP4 URL to extract, transcribe, and classify English accents (uses GPU if available).',
113
+ allow_flagging='never'
114
+ )
115
+
116
+ if __name__ == '__main__':
117
+ interface.launch()
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ openai-whisper
2
+ gradio
3
+ requests
4
+ moviepy
5
+ torch
6
+ librosa
7
+ pandas
8
+ transformers
9
+ huggingface-hub