Dominik Macháček
commited on
Commit
·
e0f5d42
1
Parent(s):
8883397
better documentation, help message and logging prints
Browse files- README.md +1 -1
- whisper_online.py +1 -1
- whisper_online_server.py +13 -14
README.md
CHANGED
@@ -183,7 +183,7 @@ online.init() # refresh if you're going to re-use the object for the next audio
|
|
183 |
|
184 |
### Server -- real-time from mic
|
185 |
|
186 |
-
`whisper_online_server.py` has the same model options as `whisper_online.py`, plus `--host` and `--port` of the TCP connection
|
187 |
|
188 |
Client example:
|
189 |
|
|
|
183 |
|
184 |
### Server -- real-time from mic
|
185 |
|
186 |
+
`whisper_online_server.py` has the same model options as `whisper_online.py`, plus `--host` and `--port` of the TCP connection and the `--warmup-file`. See the help message (`-h` option).
|
187 |
|
188 |
Client example:
|
189 |
|
whisper_online.py
CHANGED
@@ -625,7 +625,7 @@ if __name__ == "__main__":
|
|
625 |
# load the audio into the LRU cache before we start the timer
|
626 |
a = load_audio_chunk(audio_path,0,1)
|
627 |
|
628 |
-
# warm up the ASR
|
629 |
asr.transcribe(a)
|
630 |
|
631 |
beg = args.start_at
|
|
|
625 |
# load the audio into the LRU cache before we start the timer
|
626 |
a = load_audio_chunk(audio_path,0,1)
|
627 |
|
628 |
+
# warm up the ASR because the very first transcribe takes much more time than the other
|
629 |
asr.transcribe(a)
|
630 |
|
631 |
beg = args.start_at
|
whisper_online_server.py
CHANGED
@@ -10,8 +10,8 @@ parser = argparse.ArgumentParser()
|
|
10 |
# server options
|
11 |
parser.add_argument("--host", type=str, default='localhost')
|
12 |
parser.add_argument("--port", type=int, default=43007)
|
13 |
-
|
14 |
-
|
15 |
|
16 |
|
17 |
# options from whisper_online
|
@@ -41,19 +41,18 @@ else:
|
|
41 |
tokenizer = None
|
42 |
online = OnlineASRProcessor(asr,tokenizer,buffer_trimming=(args.buffer_trimming, args.buffer_trimming_sec))
|
43 |
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
|
|
53 |
else:
|
54 |
-
print("
|
55 |
-
|
56 |
-
|
57 |
|
58 |
|
59 |
######### Server objects
|
|
|
10 |
# server options
|
11 |
parser.add_argument("--host", type=str, default='localhost')
|
12 |
parser.add_argument("--port", type=int, default=43007)
|
13 |
+
parser.add_argument("--warmup-file", type=str, dest="warmup_file",
|
14 |
+
help="The path to a speech audio wav file to warm up Whisper so that the very first chunk processing is fast. It can be e.g. https://github.com/ggerganov/whisper.cpp/raw/master/samples/jfk.wav .")
|
15 |
|
16 |
|
17 |
# options from whisper_online
|
|
|
41 |
tokenizer = None
|
42 |
online = OnlineASRProcessor(asr,tokenizer,buffer_trimming=(args.buffer_trimming, args.buffer_trimming_sec))
|
43 |
|
44 |
+
# warm up the ASR because the very first transcribe takes more time than the others.
|
45 |
+
# Test results in https://github.com/ufal/whisper_streaming/pull/81
|
46 |
+
msg = "Whisper is not warmed up. The first chunk processing may take longer."
|
47 |
+
if args.warmup_file:
|
48 |
+
if os.path.isfile(args.warmup_file):
|
49 |
+
a = load_audio_chunk(args.warmup_file,0,1)
|
50 |
+
asr.transcribe(a)
|
51 |
+
print("INFO: Whisper is warmed up.",file=sys.stderr)
|
52 |
+
else:
|
53 |
+
print("WARNING: The warm up file is not available. "+msg,file=sys.stderr)
|
54 |
else:
|
55 |
+
print("WARNING: " + msg, file=sys.stderr)
|
|
|
|
|
56 |
|
57 |
|
58 |
######### Server objects
|