storresbusquets commited on
Commit
82898ce
·
1 Parent(s): 0905a09

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -10,7 +10,7 @@ class GradioInference():
10
  self.current_size = "base"
11
  self.loaded_model = whisper.load_model(self.current_size)
12
  self.yt = None
13
- # self.summarizer = pipeline("summarization", model="sshleifer/distilbart-cnn-12-6")
14
 
15
  self.tokenizer_model = AutoTokenizer.from_pretrained("google/pegasus-large")
16
  self.summarizer_model = AutoModelForSeq2SeqLM.from_pretrained("google/pegasus-large")
@@ -36,7 +36,7 @@ class GradioInference():
36
 
37
  results = self.loaded_model.transcribe(path, language=lang)
38
 
39
- inputs = tokenizer(results["text"], max_length=1024, truncation=True, return_tensors="pt")
40
 
41
  summary_ids = self.keyword_model.generate(inputs["input_ids"])
42
  summary = self.keyword_tokenizer.batch_decode(summary_ids,
 
10
  self.current_size = "base"
11
  self.loaded_model = whisper.load_model(self.current_size)
12
  self.yt = None
13
+ self.summarizer = pipeline("summarization", model="sshleifer/distilbart-cnn-12-6")
14
 
15
  self.tokenizer_model = AutoTokenizer.from_pretrained("google/pegasus-large")
16
  self.summarizer_model = AutoModelForSeq2SeqLM.from_pretrained("google/pegasus-large")
 
36
 
37
  results = self.loaded_model.transcribe(path, language=lang)
38
 
39
+ inputs = self.tokenizer_model(results["text"], max_length=1024, truncation=True, return_tensors="pt")
40
 
41
  summary_ids = self.keyword_model.generate(inputs["input_ids"])
42
  summary = self.keyword_tokenizer.batch_decode(summary_ids,