Dominik Macháček commited on
Commit
6b968c6
·
2 Parent(s): 6fa0080 b66c61c

Merge branch 'main' into vad-streaming

Browse files
Files changed (2) hide show
  1. README.md +1 -1
  2. whisper_online.py +6 -5
README.md CHANGED
@@ -86,7 +86,7 @@ options:
86
  --model_dir MODEL_DIR
87
  Dir where Whisper model.bin and other files are saved. This option overrides --model and --model_cache_dir parameter.
88
  --lan LAN, --language LAN
89
- Language code for transcription, e.g. en,de,cs.
90
  --task {transcribe,translate}
91
  Transcribe or translate.
92
  --backend {faster-whisper,whisper_timestamped}
 
86
  --model_dir MODEL_DIR
87
  Dir where Whisper model.bin and other files are saved. This option overrides --model and --model_cache_dir parameter.
88
  --lan LAN, --language LAN
89
+ Source language code, e.g. en,de,cs, or 'auto' for language detection.
90
  --task {transcribe,translate}
91
  Transcribe or translate.
92
  --backend {faster-whisper,whisper_timestamped}
whisper_online.py CHANGED
@@ -30,7 +30,10 @@ class ASRBase:
30
  self.logfile = logfile
31
 
32
  self.transcribe_kargs = {}
33
- self.original_language = lan
 
 
 
34
 
35
  self.model = self.load_model(modelsize, cache_dir, model_dir)
36
 
@@ -119,11 +122,9 @@ class FasterWhisperASR(ASRBase):
119
 
120
  def transcribe(self, audio, init_prompt=""):
121
 
122
- # tiempo_inicio = datetime.datetime.now()
123
  # tested: beam_size=5 is faster and better than 1 (on one 200 second document from En ESIC, min chunk 0.01)
124
  segments, info = self.model.transcribe(audio, language=self.original_language, initial_prompt=init_prompt, beam_size=5, word_timestamps=True, condition_on_previous_text=True, **self.transcribe_kargs)
125
-
126
- # print(f'({datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")})----------r> whisper transcribe take { (datetime.datetime.now() -tiempo_inicio) } ms.')
127
 
128
  return list(segments)
129
 
@@ -458,7 +459,7 @@ def add_shared_args(parser):
458
  parser.add_argument('--model', type=str, default='large-v2', choices="tiny.en,tiny,base.en,base,small.en,small,medium.en,medium,large-v1,large-v2,large-v3,large".split(","),help="Name size of the Whisper model to use (default: large-v2). The model is automatically downloaded from the model hub if not present in model cache dir.")
459
  parser.add_argument('--model_cache_dir', type=str, default=None, help="Overriding the default model cache dir where models downloaded from the hub are saved")
460
  parser.add_argument('--model_dir', type=str, default=None, help="Dir where Whisper model.bin and other files are saved. This option overrides --model and --model_cache_dir parameter.")
461
- parser.add_argument('--lan', '--language', type=str, default='en', help="Language code for transcription, e.g. en,de,cs.")
462
  parser.add_argument('--task', type=str, default='transcribe', choices=["transcribe","translate"],help="Transcribe or translate.")
463
  parser.add_argument('--backend', type=str, default="faster-whisper", choices=["faster-whisper", "whisper_timestamped"],help='Load only this backend for Whisper processing.')
464
  parser.add_argument('--vad', action="store_true", default=False, help='Use VAD = voice activity detection, with the default parameters.')
 
30
  self.logfile = logfile
31
 
32
  self.transcribe_kargs = {}
33
+ if lan == "auto":
34
+ self.original_language = None
35
+ else:
36
+ self.original_language = lan
37
 
38
  self.model = self.load_model(modelsize, cache_dir, model_dir)
39
 
 
122
 
123
  def transcribe(self, audio, init_prompt=""):
124
 
 
125
  # tested: beam_size=5 is faster and better than 1 (on one 200 second document from En ESIC, min chunk 0.01)
126
  segments, info = self.model.transcribe(audio, language=self.original_language, initial_prompt=init_prompt, beam_size=5, word_timestamps=True, condition_on_previous_text=True, **self.transcribe_kargs)
127
+ #print(info) # info contains language detection result
 
128
 
129
  return list(segments)
130
 
 
459
  parser.add_argument('--model', type=str, default='large-v2', choices="tiny.en,tiny,base.en,base,small.en,small,medium.en,medium,large-v1,large-v2,large-v3,large".split(","),help="Name size of the Whisper model to use (default: large-v2). The model is automatically downloaded from the model hub if not present in model cache dir.")
460
  parser.add_argument('--model_cache_dir', type=str, default=None, help="Overriding the default model cache dir where models downloaded from the hub are saved")
461
  parser.add_argument('--model_dir', type=str, default=None, help="Dir where Whisper model.bin and other files are saved. This option overrides --model and --model_cache_dir parameter.")
462
+ parser.add_argument('--lan', '--language', type=str, default='en', help="Source language code, e.g. en,de,cs, or 'auto' for language detection.")
463
  parser.add_argument('--task', type=str, default='transcribe', choices=["transcribe","translate"],help="Transcribe or translate.")
464
  parser.add_argument('--backend', type=str, default="faster-whisper", choices=["faster-whisper", "whisper_timestamped"],help='Load only this backend for Whisper processing.')
465
  parser.add_argument('--vad', action="store_true", default=False, help='Use VAD = voice activity detection, with the default parameters.')