admin
upd cite
67907d1
raw
history blame
4.29 kB
import os
import torch
import shutil
import librosa
import warnings
import numpy as np
import gradio as gr
import librosa.display
import matplotlib.pyplot as plt
from model import EvalNet
from utils import get_modelist, find_wav_files, embed_img
TRANSLATE = {
"vibrato": "Rou xian",
"trill": "Chan yin",
"tremolo": "Chan gong",
"staccato": "Dun gong",
"ricochet": "Pao gong",
"pizzicato": "Bo xian",
"percussive": "Ji gong",
"legato_slide_glissando": "Lian hua yin",
"harmonic": "Fan yin",
"diangong": "Dian gong",
"detache": "Fen gong",
}
CLASSES = list(TRANSLATE.keys())
TEMP_DIR = "./__pycache__/tmp"
SAMPLE_RATE = 44100
def circular_padding(y: np.ndarray, sr: int, dur=3):
if len(y) >= sr * dur:
return y[: sr * dur]
size = sr * dur // len(y) + int((sr * dur) % len(y) > 0)
arrays = []
for _ in range(size):
arrays.append(y)
y = np.hstack(arrays)
return y[: sr * dur]
def wav2mel(audio_path: str):
y, sr = librosa.load(audio_path, sr=SAMPLE_RATE)
y = circular_padding(y, sr)
mel_spec = librosa.feature.melspectrogram(y=y, sr=sr)
log_mel_spec = librosa.power_to_db(mel_spec, ref=np.max)
librosa.display.specshow(log_mel_spec)
plt.axis("off")
plt.savefig(
f"{TEMP_DIR}/output.jpg",
bbox_inches="tight",
pad_inches=0.0,
)
plt.close()
def wav2cqt(audio_path: str):
y, sr = librosa.load(audio_path, sr=SAMPLE_RATE)
y = circular_padding(y, sr)
cqt_spec = librosa.cqt(y=y, sr=sr)
log_cqt_spec = librosa.power_to_db(np.abs(cqt_spec) ** 2, ref=np.max)
librosa.display.specshow(log_cqt_spec)
plt.axis("off")
plt.savefig(
f"{TEMP_DIR}/output.jpg",
bbox_inches="tight",
pad_inches=0.0,
)
plt.close()
def wav2chroma(audio_path: str):
y, sr = librosa.load(audio_path, sr=SAMPLE_RATE)
y = circular_padding(y, sr)
chroma_spec = librosa.feature.chroma_stft(y=y, sr=sr)
log_chroma_spec = librosa.power_to_db(np.abs(chroma_spec) ** 2, ref=np.max)
librosa.display.specshow(log_chroma_spec)
plt.axis("off")
plt.savefig(
f"{TEMP_DIR}/output.jpg",
bbox_inches="tight",
pad_inches=0.0,
)
plt.close()
def infer(wav_path: str, log_name: str, folder_path=TEMP_DIR):
if os.path.exists(folder_path):
shutil.rmtree(folder_path)
if not wav_path:
return None, "Please input an audio!"
spec = log_name.split("_")[-3]
os.makedirs(folder_path, exist_ok=True)
try:
model = EvalNet(log_name, len(TRANSLATE)).model
eval("wav2%s" % spec)(wav_path)
except Exception as e:
return None, f"{e}"
input = embed_img(f"{folder_path}/output.jpg")
output: torch.Tensor = model(input)
pred_id = torch.max(output.data, 1)[1]
return (
os.path.basename(wav_path),
f"{TRANSLATE[CLASSES[pred_id]]} ({CLASSES[pred_id].capitalize()})",
)
if __name__ == "__main__":
warnings.filterwarnings("ignore")
models = get_modelist(assign_model="Swin_T_mel")
examples = []
example_wavs = find_wav_files()
for wav in example_wavs:
examples.append([wav, models[0]])
with gr.Blocks() as demo:
gr.Interface(
fn=infer,
inputs=[
gr.Audio(label="Upload a recording", type="filepath"),
gr.Dropdown(choices=models, label="Select a model", value=models[0]),
],
outputs=[
gr.Textbox(label="Audio filename", show_copy_button=True),
gr.Textbox(label="Playing tech recognition", show_copy_button=True),
],
examples=examples,
cache_examples=False,
allow_flagging="never",
title="It is recommended to keep the recording length around 3s.",
)
gr.Markdown(
"""
# Cite
```bibtex
@article{Zhou-2025,
title = {CCMusic: an Open and Diverse Database for Chinese Music Information Retrieval Research},
author = {Monan Zhou, Shenyang Xu, Zhaorui Liu, Zhaowen Wang, Feng Yu, Wei Li and Baoqiang Han},
journal = {Transactions of the International Society for Music Information Retrieval},
year = {2025}
}
```"""
)
demo.launch()