Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -37,9 +37,8 @@ class ONNXInferencePipeline:
|
|
37 |
self.session.set_providers(self.providers)
|
38 |
|
39 |
def load_banned_keywords(self):
|
40 |
-
#
|
41 |
-
#
|
42 |
-
# For this example, I'm using a shortened list from your document
|
43 |
return [
|
44 |
# Original banned keywords
|
45 |
"fuck", "shit", "bitch", "cunt", "asshole", "faggot", "nigger", "bastard", "damn", "crap",
|
@@ -837,9 +836,7 @@ class ONNXInferencePipeline:
|
|
837 |
if self.contains_banned_keyword(text):
|
838 |
return {
|
839 |
'label': 'Inappropriate Content',
|
840 |
-
'
|
841 |
-
'probabilities': [1.0, 0.0], # Assuming [inappropriate, appropriate]
|
842 |
-
'keyword_matched': True # Flag to indicate keyword match
|
843 |
}
|
844 |
|
845 |
# If no banned keywords, proceed with model prediction
|
@@ -861,9 +858,7 @@ class ONNXInferencePipeline:
|
|
861 |
class_labels = ['Inappropriate Content', 'Appropriate']
|
862 |
return {
|
863 |
'label': class_labels[predicted_class],
|
864 |
-
'
|
865 |
-
'probabilities': probabilities[0].tolist(),
|
866 |
-
'keyword_matched': False # Flag to indicate model prediction
|
867 |
}
|
868 |
|
869 |
# Example usage
|
@@ -873,27 +868,23 @@ if __name__ == "__main__":
|
|
873 |
|
874 |
# Example texts for testing
|
875 |
example_texts = [
|
876 |
-
"You're a worthless piece of garbage who should die"
|
877 |
-
"Hello, how are you doing today?"
|
878 |
]
|
879 |
|
880 |
for text in example_texts:
|
881 |
result = pipeline.predict(text)
|
882 |
print(f"Input: {text}")
|
883 |
-
print(f"Prediction: {result['label']}")
|
884 |
-
|
885 |
-
print("(Detected by keyword filter)")
|
886 |
-
else:
|
887 |
-
print(f"Confidence: {result['confidence']:.2%}")
|
888 |
print("-" * 80)
|
889 |
|
890 |
# Define a function for Gradio to use
|
891 |
def gradio_predict(text):
|
892 |
result = pipeline.predict(text)
|
893 |
-
|
894 |
-
|
895 |
-
|
896 |
-
|
897 |
|
898 |
# Create a Gradio interface
|
899 |
iface = gr.Interface(
|
|
|
37 |
self.session.set_providers(self.providers)
|
38 |
|
39 |
def load_banned_keywords(self):
|
40 |
+
# Load your comprehensive list of banned keywords
|
41 |
+
# In a production environment, this would be your full list of 500+ keywords
|
|
|
42 |
return [
|
43 |
# Original banned keywords
|
44 |
"fuck", "shit", "bitch", "cunt", "asshole", "faggot", "nigger", "bastard", "damn", "crap",
|
|
|
836 |
if self.contains_banned_keyword(text):
|
837 |
return {
|
838 |
'label': 'Inappropriate Content',
|
839 |
+
'probabilities': [1.0, 0.0] # Assuming [inappropriate, appropriate]
|
|
|
|
|
840 |
}
|
841 |
|
842 |
# If no banned keywords, proceed with model prediction
|
|
|
858 |
class_labels = ['Inappropriate Content', 'Appropriate']
|
859 |
return {
|
860 |
'label': class_labels[predicted_class],
|
861 |
+
'probabilities': probabilities[0].tolist()
|
|
|
|
|
862 |
}
|
863 |
|
864 |
# Example usage
|
|
|
868 |
|
869 |
# Example texts for testing
|
870 |
example_texts = [
|
871 |
+
"You're a worthless piece of garbage who should die"
|
|
|
872 |
]
|
873 |
|
874 |
for text in example_texts:
|
875 |
result = pipeline.predict(text)
|
876 |
print(f"Input: {text}")
|
877 |
+
print(f"Prediction: {result['label']} ")
|
878 |
+
#print(f"Probabilities: Inappropriate={result['probabilities'][0]:.2%}, Not Inappropriate={result['probabilities'][1]:.2%}")
|
|
|
|
|
|
|
879 |
print("-" * 80)
|
880 |
|
881 |
# Define a function for Gradio to use
|
882 |
def gradio_predict(text):
|
883 |
result = pipeline.predict(text)
|
884 |
+
return (
|
885 |
+
f"Prediction: {result['label']} \n"
|
886 |
+
#f"Probabilities: Inappropriate={result['probabilities'][0]:.2%}, Not Inappropriate={result['probabilities'][1]:.2%}"
|
887 |
+
)
|
888 |
|
889 |
# Create a Gradio interface
|
890 |
iface = gr.Interface(
|