File size: 4,765 Bytes
09aaa9c
 
 
 
 
 
 
 
 
 
2c19de2
 
 
09aaa9c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2c19de2
 
09aaa9c
 
 
 
 
 
 
 
2c19de2
 
09aaa9c
 
 
 
 
 
2c19de2
 
 
 
 
 
 
 
 
 
 
 
 
e2369f4
2c19de2
 
 
 
 
e2369f4
2c19de2
 
 
ff40bf8
2c19de2
 
ff40bf8
2c19de2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
# Import necessary libraries
import gradio as gr
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from transformers import WhisperModel, WhisperFeatureExtractor
import datasets
from datasets import load_dataset, DatasetDict, Audio
from huggingface_hub import PyTorchModelHubMixin

# Ensure you have the device setup (cuda or cpu)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# Define data class
class SpeechInferenceDataset(Dataset):
    def __init__(self, audio_data, text_processor):
        self.audio_data = audio_data
        self.text_processor = text_processor

    def __len__(self):
        return len(self.audio_data)

    def __getitem__(self, index):
        inputs = self.text_processor(self.audio_data[index]["audio"]["array"],
                                     return_tensors="pt",
                                     sampling_rate=self.audio_data[index]["audio"]["sampling_rate"])
        input_features = inputs.input_features
        decoder_input_ids = torch.tensor([[1, 1]])  # Modify as per your model's requirements
        return input_features, decoder_input_ids

# Define model class
class SpeechClassifier(nn.Module, PyTorchModelHubMixin):
    def __init__(self, config):
        super(SpeechClassifier, self).__init__()
        self.encoder = WhisperModel.from_pretrained(config["encoder"])
        self.classifier = nn.Sequential(
            nn.Linear(self.encoder.config.hidden_size, 4096),
            nn.ReLU(),
            nn.Linear(4096, 2048),
            nn.ReLU(),
            nn.Linear(2048, 1024),
            nn.ReLU(),
            nn.Linear(1024, 512),
            nn.ReLU(),
            nn.Linear(512, config["num_labels"])
        )

    def forward(self, input_features, decoder_input_ids):
        outputs = self.encoder(input_features, decoder_input_ids=decoder_input_ids)
        pooled_output = outputs['last_hidden_state'][:, 0, :]
        logits = self.classifier(pooled_output)
        return logits

# Prepare data function
def prepare_data(audio_file_path, model_checkpoint="openai/whisper-base"):
    feature_extractor = WhisperFeatureExtractor.from_pretrained(model_checkpoint)
    inference_data = datasets.Dataset.from_dict({"path": [audio_file_path], "audio": [audio_file_path]}).cast_column("audio", Audio(sampling_rate=16_000))
    inference_dataset = SpeechInferenceDataset(inference_data, feature_extractor)
    inference_loader = DataLoader(inference_dataset, batch_size=1, shuffle=False)
    input_features, decoder_input_ids = next(iter(inference_loader))
    input_features = input_features.squeeze(1).to(device)
    decoder_input_ids = decoder_input_ids.squeeze(1).to(device)
    return input_features, decoder_input_ids

# Prediction function
def predict(audio_file_path, config={"encoder": "openai/whisper-base", "num_labels": 2}):
    input_features, decoder_input_ids = prepare_data(audio_file_path)
    
    # Load the model from Hugging Face Hub
    model = SpeechClassifier(config)
    model.to(device)
    # Use the correct method to load your model (this is an example and may not directly apply)
    model.load_state_dict(torch.load(model.push_from_hub("jcho02/whisper_cleft")))
    model.eval()

    with torch.no_grad():
        logits = model(input_features, decoder_input_ids)
        predicted_ids = int(torch.argmax(logits, dim=-1))
    return predicted_ids

# Gradio Interface function for uploaded files
def gradio_file_interface(uploaded_file):
    with open(uploaded_file.name, "wb") as f:
        f.write(uploaded_file.read())
    prediction = predict(uploaded_file.name)
    label = "Hypernasality Detected" if prediction == 1 else "No Hypernasality Detected"
    return label

# Gradio Interface function for microphone input
def gradio_mic_interface(mic_input):
    prediction = predict(mic_input.name)
    label = "Hypernasality Detected" if prediction == 1 else "No Hypernasality Detected"
    return label

# Initialize Blocks
demo = gr.Blocks()

# Define the interfaces inside the Blocks context
with demo:
    mic_transcribe = gr.Interface(
        fn=gradio_mic_interface,
        inputs=gr.Audio(),  # No type needed for microphone input
        outputs=gr.Textbox(label="Prediction")
    )

    file_transcribe = gr.Interface(
        fn=gradio_file_interface,
        inputs=gr.Audio(type="filepath"),  # Specify filepath for file upload
        outputs=gr.Textbox(label="Prediction")
    )
    
    # Use a tabbed interface to switch between the microphone and file upload interfaces
    gr.TabbedInterface([mic_transcribe, file_transcribe], ["Transcribe Microphone", "Transcribe Audio File"])

# Launch the demo with debugging enabled to catch any potential errors early on
demo.launch(debug=True)