Spaces:
Sleeping
Sleeping
Upload 2 files
Browse files- app.py +110 -0
- requirements.txt +6 -0
app.py
ADDED
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from huggingface_hub import hf_hub_download
|
3 |
+
import gradio as gr
|
4 |
+
import json
|
5 |
+
import pandas as pd
|
6 |
+
import collections
|
7 |
+
import scipy.signal
|
8 |
+
import numpy as np
|
9 |
+
from functools import partial
|
10 |
+
from openwakeword.model import Model
|
11 |
+
|
12 |
+
from openwakeword.utils import download_models
|
13 |
+
download_models()
|
14 |
+
|
15 |
+
# 用 Secret token 從 HF Model Hub 下載私有模型
|
16 |
+
hf_token = os.environ.get("HF_TOKEN")
|
17 |
+
model_path = hf_hub_download(
|
18 |
+
repo_id="JTBTechnology/kmu_wakeword",
|
19 |
+
filename="hi_kmu_0721.onnx", # 改成你模型內的正確檔名
|
20 |
+
token=hf_token,
|
21 |
+
repo_type="model"
|
22 |
+
)
|
23 |
+
|
24 |
+
# 直接用下載的模型路徑載入
|
25 |
+
model = Model(wakeword_models=[model_path], inference_framework="onnx")
|
26 |
+
|
27 |
+
# Define function to process audio
|
28 |
+
# def process_audio(audio, state=collections.defaultdict(partial(collections.deque, maxlen=60))):
|
29 |
+
def process_audio(audio, state=None):
|
30 |
+
if state is None:
|
31 |
+
state = collections.defaultdict(partial(collections.deque, maxlen=60))
|
32 |
+
# Resample audio to 16khz if needed
|
33 |
+
if audio[0] != 16000:
|
34 |
+
data = scipy.signal.resample(audio[1], int(float(audio[1].shape[0])/audio[0]*16000))
|
35 |
+
|
36 |
+
# Get predictions
|
37 |
+
for i in range(0, data.shape[0], 1280):
|
38 |
+
if len(data.shape) == 2 or data.shape[-1] == 2:
|
39 |
+
chunk = data[i:i+1280][:, 0] # just get one channel of audio
|
40 |
+
else:
|
41 |
+
chunk = data[i:i+1280]
|
42 |
+
|
43 |
+
if chunk.shape[0] == 1280:
|
44 |
+
prediction = model.predict(chunk)
|
45 |
+
for key in prediction:
|
46 |
+
#Fill deque with zeros if it's empty
|
47 |
+
if len(state[key]) == 0:
|
48 |
+
state[key].extend(np.zeros(60))
|
49 |
+
|
50 |
+
# Add prediction
|
51 |
+
state[key].append(prediction[key])
|
52 |
+
|
53 |
+
# Make line plot
|
54 |
+
dfs = []
|
55 |
+
for key in state.keys():
|
56 |
+
df = pd.DataFrame({"x": np.arange(len(state[key])), "y": state[key], "Model": key})
|
57 |
+
dfs.append(df)
|
58 |
+
|
59 |
+
df = pd.concat(dfs)
|
60 |
+
|
61 |
+
plot = gr.LinePlot(
|
62 |
+
value=df,
|
63 |
+
x='x',
|
64 |
+
y='y',
|
65 |
+
color="Model",
|
66 |
+
y_lim=(0,1),
|
67 |
+
tooltip="Model",
|
68 |
+
width=600,
|
69 |
+
height=300,
|
70 |
+
x_title="Time (frames)",
|
71 |
+
y_title="Model Score",
|
72 |
+
color_legend_position="bottom"
|
73 |
+
)
|
74 |
+
# 1. 將 state 轉成可 JSON 序列化格式(dict of lists)
|
75 |
+
serializable_state = {k: [float(x) for x in v] for k, v in state.items()}
|
76 |
+
|
77 |
+
# 2. 回傳 serializable_state 給 Gradio
|
78 |
+
return plot, serializable_state
|
79 |
+
|
80 |
+
# Create Gradio interface and launch
|
81 |
+
|
82 |
+
desc = """
|
83 |
+
這是 [openWakeWord](https://github.com/dscripka/openWakeWord) 最新版本預設模型的小工具示範。
|
84 |
+
請點一下下面的「開始錄音」按鈕,就能直接用麥克風測試。
|
85 |
+
系統會即時把每個模型的分數用折線圖秀出來,你也可以把滑鼠移到線上看是哪一個模型。
|
86 |
+
|
87 |
+
每一個模型都有自己專屬的喚醒詞或指令句(更多可以參考 [模型說明](https://github.com/dscripka/openWakeWord/tree/main/docs/models))。
|
88 |
+
如果偵測到你講了對的關鍵詞,圖上對應模型的分數會突然變高。你可以試著講下面的範例語句試試看:
|
89 |
+
|
90 |
+
| 模型名稱 | 建議語句 |
|
91 |
+
| ------------- | ------ |
|
92 |
+
| hi\_kmu\_0721 | 「嗨,高醫」 |
|
93 |
+
"""
|
94 |
+
|
95 |
+
gr_int = gr.Interface(
|
96 |
+
title = "語音喚醒展示",
|
97 |
+
description = desc,
|
98 |
+
css = ".flex {flex-direction: column} .gr-panel {width: 100%}",
|
99 |
+
fn=process_audio,
|
100 |
+
inputs=[
|
101 |
+
gr.Audio(sources=["microphone"], type="numpy", streaming=True, show_label=False),
|
102 |
+
"state"
|
103 |
+
],
|
104 |
+
outputs=[
|
105 |
+
gr.LinePlot(show_label=False),
|
106 |
+
"state"
|
107 |
+
],
|
108 |
+
live=True)
|
109 |
+
|
110 |
+
gr_int.launch()
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio
|
2 |
+
pandas
|
3 |
+
numpy
|
4 |
+
scipy
|
5 |
+
huggingface_hub
|
6 |
+
openwakeword
|