Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,54 +1,53 @@
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
-
from
|
3 |
-
import torch
|
4 |
import os
|
5 |
|
6 |
# --- Performance Improvement ---
|
7 |
-
#
|
|
|
8 |
num_cpu_cores = os.cpu_count()
|
9 |
-
|
10 |
-
# 2. Configure PyTorch to use all available CPU cores for its operations.
|
11 |
-
# This is crucial for speeding up model inference on a CPU.
|
12 |
if num_cpu_cores is not None:
|
13 |
-
|
14 |
-
print(f"✅
|
15 |
else:
|
16 |
print("Could not determine the number of CPU cores. Using default settings.")
|
17 |
|
18 |
|
19 |
-
# Initialize the
|
20 |
-
#
|
21 |
pipe = pipeline(
|
22 |
-
"audio-classification",
|
23 |
-
model="
|
24 |
-
|
|
|
25 |
)
|
26 |
|
27 |
# Define the function to classify an audio file
|
28 |
-
def classify_audio(
|
29 |
"""
|
30 |
-
Takes an audio file path, classifies it using the pipeline,
|
31 |
and returns a dictionary of top labels and their scores.
|
32 |
"""
|
33 |
-
if
|
34 |
return "Please upload an audio file first."
|
35 |
|
36 |
-
# The pipeline
|
37 |
-
result = pipe(
|
38 |
-
# The output is formatted for the Gradio Label component.
|
39 |
return {label['label']: label['score'] for label in result}
|
40 |
|
41 |
# Set up the Gradio interface
|
42 |
app = gr.Interface(
|
43 |
-
fn=classify_audio,
|
44 |
-
inputs=gr.Audio(type="filepath", label="Upload Audio"),
|
45 |
-
outputs=gr.Label(num_top_classes=3, label="Top 3 Predictions"),
|
46 |
-
title="Audio Classification",
|
47 |
-
description="Upload an audio file to classify it. This app
|
48 |
examples=[
|
49 |
-
# You can add example audio files here if you have them
|
50 |
-
# ["path/to/
|
51 |
-
# ["path/to/your/example_audio_2.mp3"],
|
52 |
]
|
53 |
)
|
54 |
|
|
|
1 |
+
# 1. Install the necessary libraries first:
|
2 |
+
# pip install gradio optimum[onnxruntime] transformers
|
3 |
+
|
4 |
import gradio as gr
|
5 |
+
from optimum.pipelines import pipeline # Use the pipeline from 'optimum'
|
|
|
6 |
import os
|
7 |
|
8 |
# --- Performance Improvement ---
|
9 |
+
# Configure ONNX Runtime to use all available CPU cores.
|
10 |
+
# This is done by setting the OMP_NUM_THREADS environment variable.
|
11 |
num_cpu_cores = os.cpu_count()
|
|
|
|
|
|
|
12 |
if num_cpu_cores is not None:
|
13 |
+
os.environ["OMP_NUM_THREADS"] = str(num_cpu_cores)
|
14 |
+
print(f"✅ ONNX Runtime configured to use {num_cpu_cores} CPU cores.")
|
15 |
else:
|
16 |
print("Could not determine the number of CPU cores. Using default settings.")
|
17 |
|
18 |
|
19 |
+
# 2. Initialize the pipeline using the ONNX model from the Hub.
|
20 |
+
# 'optimum' handles downloading the model and running it with the specified accelerator.
|
21 |
pipe = pipeline(
|
22 |
+
task="audio-classification",
|
23 |
+
model="onnx-community/ast-finetuned-audioset-10-10-0.4593-ONNX",
|
24 |
+
accelerator="ort", # Specifies to use ONNX Runtime ('ort')
|
25 |
+
device="cpu" # Explicitly run on the CPU
|
26 |
)
|
27 |
|
28 |
# Define the function to classify an audio file
|
29 |
+
def classify_audio(audio_filepath):
|
30 |
"""
|
31 |
+
Takes an audio file path, classifies it using the ONNX pipeline,
|
32 |
and returns a dictionary of top labels and their scores.
|
33 |
"""
|
34 |
+
if audio_filepath is None:
|
35 |
return "Please upload an audio file first."
|
36 |
|
37 |
+
# The 'optimum' pipeline works just like the 'transformers' one
|
38 |
+
result = pipe(audio_filepath)
|
|
|
39 |
return {label['label']: label['score'] for label in result}
|
40 |
|
41 |
# Set up the Gradio interface
|
42 |
app = gr.Interface(
|
43 |
+
fn=classify_audio,
|
44 |
+
inputs=gr.Audio(type="filepath", label="Upload Audio"),
|
45 |
+
outputs=gr.Label(num_top_classes=3, label="Top 3 Predictions"),
|
46 |
+
title="High-Performance Audio Classification with ONNX",
|
47 |
+
description="Upload an audio file to classify it. This app uses a pre-optimized ONNX model and runs on all available CPU cores for maximum speed.",
|
48 |
examples=[
|
49 |
+
# You can add local example audio files here if you have them
|
50 |
+
# ["path/to/example_cat_purr.wav"],
|
|
|
51 |
]
|
52 |
)
|
53 |
|