Update app.py
Browse files
app.py
CHANGED
@@ -67,11 +67,11 @@ def transcribe_audio(audio_file):
|
|
67 |
print("Loading audio file...")
|
68 |
audio = AudioSegment.from_file(audio_file)
|
69 |
audio = audio.set_channels(1).set_frame_rate(16000)
|
70 |
-
audio_array = audio.get_array_of_samples()
|
71 |
|
72 |
print("Starting transcription...")
|
73 |
-
input_features = whisper_processor(audio_array, sampling_rate=16000, return_tensors="pt").input_features.to(device)
|
74 |
-
predicted_ids = whisper_model.generate(input_features)
|
75 |
transcription = whisper_processor.batch_decode(predicted_ids, skip_special_tokens=True)
|
76 |
|
77 |
print(f"Transcription complete. Length: {len(transcription[0])} characters")
|
|
|
67 |
print("Loading audio file...")
|
68 |
audio = AudioSegment.from_file(audio_file)
|
69 |
audio = audio.set_channels(1).set_frame_rate(16000)
|
70 |
+
audio_array = torch.tensor(audio.get_array_of_samples()).float()
|
71 |
|
72 |
print("Starting transcription...")
|
73 |
+
input_features = whisper_processor(audio_array, sampling_rate=16000, return_tensors="pt").input_features.to(device).to(torch.float16)
|
74 |
+
predicted_ids = whisper_model.generate(input_features, language='en', task='translate')
|
75 |
transcription = whisper_processor.batch_decode(predicted_ids, skip_special_tokens=True)
|
76 |
|
77 |
print(f"Transcription complete. Length: {len(transcription[0])} characters")
|