Spaces:
Running
Running
File size: 6,856 Bytes
9a3e0af ab2a3d2 9a3e0af ab2a3d2 5bd7940 9a3e0af 788bd3b ab2a3d2 788bd3b 5bd7940 46b9035 9a3e0af 822f07f ab2a3d2 46b9035 9a3e0af 46b9035 9a3e0af 5bd7940 9a3e0af ab2a3d2 5bd7940 9a3e0af 059cd4b 9a3e0af ab2a3d2 9a3e0af aaf4096 9a3e0af ab2a3d2 9a3e0af ab2a3d2 9a3e0af 93224e1 9a3e0af 93224e1 9a3e0af 5c3a771 9a3e0af 46b9035 9a3e0af 46b9035 9ff2c00 46b9035 9a3e0af 46b9035 9a3e0af 46b9035 9a3e0af 46b9035 5c3a771 46b9035 5c3a771 46b9035 f8a135a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 |
import gradio as gr
import torch
import tempfile
import os
import requests
from moviepy import VideoFileClip
from transformers import pipeline
import torchaudio
from speechbrain.pretrained.interfaces import foreign_class
from transformers import WhisperForConditionalGeneration, WhisperProcessor
# Load Whisper model to confirm English
whisper_pipe = pipeline("automatic-speech-recognition", model="openai/whisper-tiny", device="cpu")
# Loading accent classifier
classifier = foreign_class(source="Jzuluaga/accent-id-commonaccent_xlsr-en-english", pymodule_file="custom_interface.py", classname="CustomEncoderWav2vec2Classifier")
# these are for fallback in case transformer's whisper-tiny doesn't return language
model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny")
processor = WhisperProcessor.from_pretrained("openai/whisper-tiny")
ACCENT_LABELS = {
"us": "American Accent",
"england": "British Accent",
"australia": "Australian Accent",
"indian": "Indian Accent",
"canada": "Canadian Accent",
"bermuda": "Bermudian Accent",
"scotland": "Scottish Accent",
"african": "African Accent",
"ireland": "Irish Accent",
"newzealand": "New Zealand Accent",
"wales": "Welsh Accent",
"malaysia": "Malaysian Accent",
"philippines": "Philippine Accent",
"singapore": "Singaporean Accent",
"hongkong": "Hong Kong Accent",
"southatlandtic": "South Atlantic Accent"
}
def classify_accent(audio_tensor, sample_rate):
if sample_rate != 16000:
resampler = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=16000)
audio_tensor = resampler(audio_tensor)
out_prob, score, index, text_lab = classifier.classify_batch(audio_tensor)
print(out_prob, score, index, text_lab)
accent_label = text_lab[0]
readable_accent = ACCENT_LABELS.get(accent_label, accent_label.title() + " Accent")
return {
"accent": readable_accent,
"confidence": round(score[0].item() * 100, 2),
"summary": f"The speaker is predicted to have a {readable_accent} with {round(score[0].item() * 100, 2)}% confidence."
}
def download_video(url):
video_path = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False).name
response = requests.get(url, stream=True)
with open(video_path, "wb") as f:
for chunk in response.iter_content(chunk_size=1024*1024):
if chunk:
f.write(chunk)
return video_path
def extract_audio(video_path):
audio_path = tempfile.NamedTemporaryFile(suffix=".wav", delete=False).name
clip = VideoFileClip(video_path)
clip.audio.write_audiofile(audio_path, codec='pcm_s16le')
return audio_path
def detect_language(audio_path):
audio, sr = torchaudio.load(audio_path)
inputs = processor(audio[0], sampling_rate=sr, return_tensors="pt")
logits = model.forward(**inputs).logits
predicted_ids = torch.argmax(logits, dim=-1)
decoded = processor.tokenizer.batch_decode(predicted_ids, skip_special_tokens=True)
return decoded[0] # crude approximation
def transcribe(audio_path):
result = whisper_pipe(audio_path, return_language=True)
print(result)
lang = result['chunks'][0]['language']
if lang == None:
lang = detect_language(audio_path)
return result['text'], lang
def analyze_accent(url_or_file):
try:
print("Video path 1:", url_or_file)
if url_or_file.startswith("http"):
video_path = download_video(url_or_file)
else:
video_path = url_or_file
print("Video path:", video_path)
audio_path = extract_audio(video_path)
print("Audio path:", audio_path)
# Load audio with torchaudio
waveform, sample_rate = torchaudio.load(audio_path)
# Transcription (to verify English)
transcript = transcribe(audio_path)
if len(transcript[0].strip()) < 3:
return "Could not understand speech. Please try another video."
print("Transcript:", transcript)
# Accent classification
result = classify_accent(waveform, sample_rate)
output = f"**Language**: {transcript[1]}\n\n"
if transcript[1].lower() != "en" and transcript[1].lower() != "english":
return "The video is not in English. Please provide an English video."
output += f"**Accent**: {result['accent']}\n\n"
output += f"**Confidence**: {result['confidence']}%\n\n"
output += f"**Explanation**: {result['summary']}\n\n"
output += f"**Transcript** (first 200 chars): {transcript[0][:200]}..."
# Clean up temp files
if url_or_file.startswith("http"):
os.remove(video_path)
os.remove(audio_path)
return output
except Exception as e:
return f"❌ Error: {str(e)}"
with gr.Blocks() as demo:
gr.Markdown("""
# English Accent Classifier!
### How it works?
- Takes video URL or video file
- Converts it into audio
- Uses `Whisper-tiny` to detect which language is being spoken
- If the detected language is English, it uses SpeechBrain's Accent ID classifier to show the speaker's accent along with a confidence score.
**Q: What if my transformers version doesn't expose `return_language` for `whisper-tiny`?**
A: Then it will approximate the language by counting which language's tokens it is using the most.
""")
with gr.Tab("From URL"):
url_input = gr.Textbox(label="Video URL (MP4)")
url_output = gr.Markdown("""### Output will be shown here!""", elem_classes="output-box")
gr.Button("Analyze").click(fn=analyze_accent, inputs=url_input, outputs=url_output)
gr.Examples(
examples=[["https://huggingface.co/spaces/fahadqazi/accent-classifier/resolve/main/examples/american.mp4"], ["https://huggingface.co/spaces/fahadqazi/accent-classifier/resolve/main/examples/british.mp4"]],
inputs=[url_input],
outputs=[url_output],
label="Example MP4 Video URLs",
examples_per_page=5
)
with gr.Tab("From File"):
file_input = gr.File(label="Upload MP4 Video", file_types=[".mp4"])
file_output = gr.Markdown("""### Output will be shown here!""", elem_classes="output-box")
gr.Button("Analyze").click(fn=analyze_accent, inputs=file_input, outputs=file_output)
gr.Examples(
examples=[[os.getcwd() + "/examples/american.mp4"], [os.getcwd() + "/examples/british.mp4"]],
inputs=[file_input],
outputs=[file_output],
label="Example MP4 Videos",
examples_per_page=5
)
demo.css = """
.output-box {
min-height: 70px;
overflow-y: auto;
padding: 10px;
}
"""
demo.launch() |