storresbusquets commited on
Commit
6100c94
·
1 Parent(s): 65b4025

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -5
app.py CHANGED
@@ -36,7 +36,7 @@ class GradioInference:
36
  )
37
 
38
  # Sentiment Classifier
39
- self.classifier = pipeline("text-classification")
40
 
41
 
42
  self.tokenizer = AutoTokenizer.from_pretrained("csebuetnlp/mT5_multilingual_XLSum")
@@ -116,6 +116,7 @@ class GradioInference:
116
  )
117
  predicted = self.keyword_tokenizer.decode(output[0], skip_special_tokens=True)
118
  keywords = [x.strip() for x in predicted.split(",") if x.strip()]
 
119
 
120
  progress(0.80, desc="Extracting Sentiment")
121
  # Sentiment label
@@ -132,7 +133,7 @@ class GradioInference:
132
  return (
133
  results["text"],
134
  transcription_summary[0]["summary_text"],
135
- keywords,
136
  label,
137
  wordcloud_image,
138
  )
@@ -140,7 +141,7 @@ class GradioInference:
140
  return (
141
  results["text"],
142
  summary,
143
- keywords,
144
  label,
145
  wordcloud_image,
146
  )
@@ -219,6 +220,7 @@ class GradioInference:
219
  )
220
  predicted = self.keyword_tokenizer.decode(output[0], skip_special_tokens=True)
221
  keywords = [x.strip() for x in predicted.split(",") if x.strip()]
 
222
 
223
  progress(0.80, desc="Extracting Sentiment")
224
  # Sentiment label
@@ -235,7 +237,7 @@ class GradioInference:
235
  return (
236
  results["text"],
237
  transcription_summary[0]["summary_text"],
238
- keywords,
239
  label,
240
  wordcloud_image,
241
  )
@@ -243,7 +245,7 @@ class GradioInference:
243
  return (
244
  results["text"],
245
  summary,
246
- keywords,
247
  label,
248
  wordcloud_image,
249
  )
 
36
  )
37
 
38
  # Sentiment Classifier
39
+ self.classifier = pipeline("text-classification", model="lxyuan/distilbert-base-multilingual-cased-sentiments-student", return_all_scores=False)
40
 
41
 
42
  self.tokenizer = AutoTokenizer.from_pretrained("csebuetnlp/mT5_multilingual_XLSum")
 
116
  )
117
  predicted = self.keyword_tokenizer.decode(output[0], skip_special_tokens=True)
118
  keywords = [x.strip() for x in predicted.split(",") if x.strip()]
119
+ formatted_keywords = "\n".join([f"• {keyword}" for keyword in keywords])
120
 
121
  progress(0.80, desc="Extracting Sentiment")
122
  # Sentiment label
 
133
  return (
134
  results["text"],
135
  transcription_summary[0]["summary_text"],
136
+ formatted_keywords,
137
  label,
138
  wordcloud_image,
139
  )
 
141
  return (
142
  results["text"],
143
  summary,
144
+ formatted_keywords,
145
  label,
146
  wordcloud_image,
147
  )
 
220
  )
221
  predicted = self.keyword_tokenizer.decode(output[0], skip_special_tokens=True)
222
  keywords = [x.strip() for x in predicted.split(",") if x.strip()]
223
+ formatted_keywords = "\n".join([f"• {keyword}" for keyword in keywords])
224
 
225
  progress(0.80, desc="Extracting Sentiment")
226
  # Sentiment label
 
237
  return (
238
  results["text"],
239
  transcription_summary[0]["summary_text"],
240
+ formatted_keywords,
241
  label,
242
  wordcloud_image,
243
  )
 
245
  return (
246
  results["text"],
247
  summary,
248
+ formatted_keywords,
249
  label,
250
  wordcloud_image,
251
  )