usamaijaz-ai commited on
Commit
55f0f0a
·
1 Parent(s): b5e4ecb

model update

Browse files
Files changed (2) hide show
  1. app.py +5 -5
  2. extracted_audio.wav +3 -0
app.py CHANGED
@@ -16,14 +16,14 @@ CONVERTED_AUDIO = "converted_audio.wav"
16
  MODEL_REPO = "ylacombe/accent-classifier"
17
 
18
  # === load local model
19
- MODEL_DIR = "model"
20
- model = Wav2Vec2ForSequenceClassification.from_pretrained(MODEL_DIR, local_files_only=True)
21
- feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(MODEL_DIR)
22
 
23
 
24
  # # === Load models ===
25
- # model = Wav2Vec2ForSequenceClassification.from_pretrained(MODEL_REPO)
26
- # feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(MODEL_REPO)
27
  whisper = pipeline("automatic-speech-recognition", model="openai/whisper-tiny")
28
 
29
  LABELS = [model.config.id2label[i] for i in range(len(model.config.id2label))]
 
16
  MODEL_REPO = "ylacombe/accent-classifier"
17
 
18
  # === load local model
19
+ # MODEL_DIR = "model"
20
+ # model = Wav2Vec2ForSequenceClassification.from_pretrained(MODEL_DIR, local_files_only=True)
21
+ # feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(MODEL_DIR)
22
 
23
 
24
  # # === Load models ===
25
+ model = Wav2Vec2ForSequenceClassification.from_pretrained(MODEL_REPO)
26
+ feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(MODEL_REPO)
27
  whisper = pipeline("automatic-speech-recognition", model="openai/whisper-tiny")
28
 
29
  LABELS = [model.config.id2label[i] for i in range(len(model.config.id2label))]
extracted_audio.wav ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31ec206ac9720d356425512609abbd2b39af0757a149c4b23d7e94c1213de8e8
3
+ size 7826946