Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -93,42 +93,50 @@ def classify_toxicity(audio_file, classify_anxiety, emo_class, explitive_selecti
|
|
| 93 |
classify_emotion(audio_file)
|
| 94 |
|
| 95 |
#### Text classification #####
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
text_classifier = pipeline("zero-shot-classification", model="facebook/bart-large-mnli")
|
| 100 |
-
|
| 101 |
-
sequence_to_classify = transcribed_text
|
| 102 |
-
print(classify_anxiety, class_options)
|
| 103 |
-
candidate_labels = class_options.get(classify_anxiety, [])
|
| 104 |
-
# classification_output = classifier(sequence_to_classify, candidate_labels, multi_label=False)
|
| 105 |
-
classification_output = text_classifier(sequence_to_classify, candidate_labels, multi_label=True)
|
| 106 |
-
print("class output ", type(classification_output))
|
| 107 |
-
# classification_df = pd.DataFrame.from_dict(classification_output)
|
| 108 |
-
print("keys ", classification_output.keys())
|
| 109 |
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 120 |
else:
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 124 |
else:
|
| 125 |
-
if
|
| 126 |
affirm = positive_affirmations()
|
| 127 |
topScore = toxicity_score
|
| 128 |
else:
|
| 129 |
-
print("Not Toxic")
|
| 130 |
affirm = ""
|
| 131 |
topScore = toxicity_score
|
|
|
|
| 132 |
|
| 133 |
return transcribed_text, topScore, label_score_dict, affirm
|
| 134 |
# return f"Toxicity Score ({available_models[selected_model]}): {toxicity_score:.4f}"
|
|
|
|
| 93 |
classify_emotion(audio_file)
|
| 94 |
|
| 95 |
#### Text classification #####
|
| 96 |
+
if classify_anxiety != None:
|
| 97 |
+
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 98 |
|
| 99 |
+
text_classifier = pipeline("zero-shot-classification", model="facebook/bart-large-mnli")
|
| 100 |
+
|
| 101 |
+
sequence_to_classify = transcribed_text
|
| 102 |
+
print(classify_anxiety, class_options)
|
| 103 |
+
candidate_labels = class_options.get(classify_anxiety, [])
|
| 104 |
+
# classification_output = classifier(sequence_to_classify, candidate_labels, multi_label=False)
|
| 105 |
+
classification_output = text_classifier(sequence_to_classify, candidate_labels, multi_label=True)
|
| 106 |
+
print("class output ", type(classification_output))
|
| 107 |
+
# classification_df = pd.DataFrame.from_dict(classification_output)
|
| 108 |
+
print("keys ", classification_output.keys())
|
| 109 |
+
|
| 110 |
+
# formatted_classification_output = "\n".join([f"{key}: {value}" for key, value in classification_output.items()])
|
| 111 |
+
# label_score_pairs = [(label, score) for label, score in zip(classification_output['labels'], classification_output['scores'])]
|
| 112 |
+
label_score_dict = {label: score for label, score in zip(classification_output['labels'], classification_output['scores'])}
|
| 113 |
+
k = max(label_score_dict, key=label_score_dict.get)
|
| 114 |
+
maxval = label_score_dict[k]
|
| 115 |
+
if maxval > toxicity_score:
|
| 116 |
+
if maxval > threshold:
|
| 117 |
+
print("Toxic")
|
| 118 |
+
affirm = positive_affirmations()
|
| 119 |
+
topScore = maxval
|
| 120 |
+
else:
|
| 121 |
+
print("Not Toxic")
|
| 122 |
+
affirm = ""
|
| 123 |
+
topScore = maxval
|
| 124 |
else:
|
| 125 |
+
if toxicity_score > threshold:
|
| 126 |
+
affirm = positive_affirmations()
|
| 127 |
+
topScore = toxicity_score
|
| 128 |
+
else:
|
| 129 |
+
print("Not Toxic")
|
| 130 |
+
affirm = ""
|
| 131 |
+
topScore = toxicity_score
|
| 132 |
else:
|
| 133 |
+
if toxixity_score > threshold:
|
| 134 |
affirm = positive_affirmations()
|
| 135 |
topScore = toxicity_score
|
| 136 |
else:
|
|
|
|
| 137 |
affirm = ""
|
| 138 |
topScore = toxicity_score
|
| 139 |
+
label_score_dict = {"toxicity" : toxicity_score}
|
| 140 |
|
| 141 |
return transcribed_text, topScore, label_score_dict, affirm
|
| 142 |
# return f"Toxicity Score ({available_models[selected_model]}): {toxicity_score:.4f}"
|