tzu commited on
Commit
eb6ada7
·
1 Parent(s): 1041028

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -6,7 +6,7 @@ import transformers
6
 
7
  from datasets import load_dataset
8
  import torch
9
- from transformers import pipeline
10
 
11
  def predict(speech):
12
  # load model and tokenizer
@@ -14,9 +14,9 @@ def predict(speech):
14
  ds = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation")
15
  audio_file = ds[0]["audio"]["path"]
16
  audio_classifier = pipeline(
17
- task="audio-classification", model="ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition"
18
- preds = audio_classifier(audio_file)
19
- preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds]
20
  return preds
21
 
22
  demo = gr.Interface(fn=predict, inputs='texts' outputs="texts")
 
6
 
7
  from datasets import load_dataset
8
  import torch
9
+ from transformers import pipeline
10
 
11
  def predict(speech):
12
  # load model and tokenizer
 
14
  ds = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation")
15
  audio_file = ds[0]["audio"]["path"]
16
  audio_classifier = pipeline(
17
+ task="audio-classification", model="ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition"
18
+ preds = audio_classifier(audio_file)
19
+ preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds]
20
  return preds
21
 
22
  demo = gr.Interface(fn=predict, inputs='texts' outputs="texts")