istupakov commited on
Commit
873f3d3
·
verified ·
1 Parent(s): d1326a6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -8
app.py CHANGED
@@ -32,7 +32,6 @@ models_en = {
32
  name: onnx_asr.load_model(name)
33
  for name in [
34
  "nemo-parakeet-ctc-0.6b",
35
- "nemo-parakeet-rnnt-0.6b",
36
  "nemo-parakeet-tdt-0.6b-v2",
37
  ]
38
  }
@@ -45,7 +44,7 @@ def recognize(audio: tuple[int, np.ndarray], models, language):
45
  return None
46
 
47
  sample_rate, waveform = audio
48
- length = waveform.shape[-1] / sample_rate
49
  logger.debug("recognize: length %.3f, sample_rate %s, waveform.shape %s.", length, sample_rate, waveform.shape)
50
  try:
51
  waveform = waveform.astype(np.float32) / 2 ** (8 * waveform.itemsize - 1)
@@ -60,7 +59,7 @@ def recognize(audio: tuple[int, np.ndarray], models, language):
60
  result = model.recognize(waveform, sample_rate=sample_rate, language=language)
61
  time = timer() - start
62
  logger.debug("recognized by %s: result '%s', time %.3f s.", name, result, time)
63
- results.append([name, result, f"{time:.3f} s."])
64
 
65
  except Exception as e:
66
  raise gr.Error(f"{e} Audio: sample_rate: {sample_rate}, waveform.shape: {waveform.shape}.") from e
@@ -81,7 +80,8 @@ def recognize_with_vad(audio: tuple[int, np.ndarray], name: str):
81
  return None
82
 
83
  sample_rate, waveform = audio
84
- logger.debug("recognize: sample_rate %s, waveform.shape %s.", sample_rate, waveform.shape)
 
85
  try:
86
  waveform = waveform.astype(np.float32) / 2 ** (8 * waveform.itemsize - 1)
87
  if waveform.ndim == 2:
@@ -104,14 +104,14 @@ with gr.Blocks() as recognize_short:
104
  gr.ClearButton(audio)
105
  btn_ru = gr.Button("Recognize (ru)", variant="primary")
106
  btn_en = gr.Button("Recognize (en)", variant="primary")
107
- output = gr.Dataframe(headers=["model", "result", "time"], wrap=True)
108
  btn_ru.click(fn=recognize_ru, inputs=audio, outputs=output)
109
  btn_en.click(fn=recognize_en, inputs=audio, outputs=output)
110
 
111
 
112
  with gr.Blocks() as recognize_long:
 
113
  name = gr.Dropdown(models_vad.keys(), label="Model")
114
- # lang = gr.Label()
115
  audio = gr.Audio(min_length=1, max_length=300)
116
  with gr.Row():
117
  gr.ClearButton(audio)
@@ -130,7 +130,7 @@ with gr.Blocks() as recognize_long:
130
 
131
  name.change(on_model_change, inputs=name, outputs=audio)
132
 
133
- with gr.Blocks() as demo:
134
  gr.Markdown("""
135
  # ASR demo using onnx-asr
136
  **[onnx-asr](https://github.com/istupakov/onnx-asr)** is a Python package for Automatic Speech Recognition using ONNX models.
@@ -155,7 +155,6 @@ with gr.Blocks() as demo:
155
  * `alphacep/vosk-model-small-ru` - Alpha Cephei Vosk 0.52-small-ru ([origin](https://huggingface.co/alphacep/vosk-model-small-ru))
156
  ## English ASR models
157
  * `nemo-parakeet-ctc-0.6b` - Nvidia Parakeet CTC 0.6B (en) ([origin](https://huggingface.co/nvidia/parakeet-ctc-0.6b), [onnx](https://huggingface.co/istupakov/parakeet-ctc-0.6b-onnx))
158
- * `nemo-parakeet-rnnt-0.6b` - Nvidia Parakeet RNNT 0.6B (en) ([origin](https://huggingface.co/nvidia/parakeet-rnnt-0.6b), [onnx](https://huggingface.co/istupakov/parakeet-rnnt-0.6b-onnx))
159
  * `nemo-parakeet-tdt-0.6b-v2` - Nvidia Parakeet TDT 0.6B V2 (en) ([origin](https://huggingface.co/nvidia/parakeet-tdt-0.6b-v2), [onnx](https://huggingface.co/istupakov/parakeet-tdt-0.6b-v2-onnx))
160
  * `whisper-base` - OpenAI Whisper Base exported with onnxruntime ([origin](https://huggingface.co/openai/whisper-base), [onnx](https://huggingface.co/istupakov/whisper-base-onnx))
161
  ## VAD models
 
32
  name: onnx_asr.load_model(name)
33
  for name in [
34
  "nemo-parakeet-ctc-0.6b",
 
35
  "nemo-parakeet-tdt-0.6b-v2",
36
  ]
37
  }
 
44
  return None
45
 
46
  sample_rate, waveform = audio
47
+ length = waveform.shape[0] / sample_rate
48
  logger.debug("recognize: length %.3f, sample_rate %s, waveform.shape %s.", length, sample_rate, waveform.shape)
49
  try:
50
  waveform = waveform.astype(np.float32) / 2 ** (8 * waveform.itemsize - 1)
 
59
  result = model.recognize(waveform, sample_rate=sample_rate, language=language)
60
  time = timer() - start
61
  logger.debug("recognized by %s: result '%s', time %.3f s.", name, result, time)
62
+ results.append([name, result])
63
 
64
  except Exception as e:
65
  raise gr.Error(f"{e} Audio: sample_rate: {sample_rate}, waveform.shape: {waveform.shape}.") from e
 
80
  return None
81
 
82
  sample_rate, waveform = audio
83
+ length = waveform.shape[0] / sample_rate
84
+ logger.debug("recognize: length %.3f, sample_rate %s, waveform.shape %s.", length, sample_rate, waveform.shape)
85
  try:
86
  waveform = waveform.astype(np.float32) / 2 ** (8 * waveform.itemsize - 1)
87
  if waveform.ndim == 2:
 
104
  gr.ClearButton(audio)
105
  btn_ru = gr.Button("Recognize (ru)", variant="primary")
106
  btn_en = gr.Button("Recognize (en)", variant="primary")
107
+ output = gr.Dataframe(headers=["model", "result"], wrap=True)
108
  btn_ru.click(fn=recognize_ru, inputs=audio, outputs=output)
109
  btn_en.click(fn=recognize_en, inputs=audio, outputs=output)
110
 
111
 
112
  with gr.Blocks() as recognize_long:
113
+ gr.Markdown("For better results, you need to adjust the VAD parameters.")
114
  name = gr.Dropdown(models_vad.keys(), label="Model")
 
115
  audio = gr.Audio(min_length=1, max_length=300)
116
  with gr.Row():
117
  gr.ClearButton(audio)
 
130
 
131
  name.change(on_model_change, inputs=name, outputs=audio)
132
 
133
+ with gr.Blocks(title="onnx-asr demo") as demo:
134
  gr.Markdown("""
135
  # ASR demo using onnx-asr
136
  **[onnx-asr](https://github.com/istupakov/onnx-asr)** is a Python package for Automatic Speech Recognition using ONNX models.
 
155
  * `alphacep/vosk-model-small-ru` - Alpha Cephei Vosk 0.52-small-ru ([origin](https://huggingface.co/alphacep/vosk-model-small-ru))
156
  ## English ASR models
157
  * `nemo-parakeet-ctc-0.6b` - Nvidia Parakeet CTC 0.6B (en) ([origin](https://huggingface.co/nvidia/parakeet-ctc-0.6b), [onnx](https://huggingface.co/istupakov/parakeet-ctc-0.6b-onnx))
 
158
  * `nemo-parakeet-tdt-0.6b-v2` - Nvidia Parakeet TDT 0.6B V2 (en) ([origin](https://huggingface.co/nvidia/parakeet-tdt-0.6b-v2), [onnx](https://huggingface.co/istupakov/parakeet-tdt-0.6b-v2-onnx))
159
  * `whisper-base` - OpenAI Whisper Base exported with onnxruntime ([origin](https://huggingface.co/openai/whisper-base), [onnx](https://huggingface.co/istupakov/whisper-base-onnx))
160
  ## VAD models