llepogam commited on
Commit
58ef32f
Β·
1 Parent(s): 945f037

app improvement

Browse files
Files changed (1) hide show
  1. app.py +204 -22
app.py CHANGED
@@ -4,6 +4,8 @@ import plotly.express as px
4
  import plotly.graph_objects as go
5
  import numpy as np
6
  import requests
 
 
7
 
8
  ### Config
9
  st.set_page_config(
@@ -12,44 +14,224 @@ st.set_page_config(
12
  layout="wide"
13
  )
14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
  def hate_speech_detection(text):
 
17
  url = "https://llepogam-hate-speech-detection-api.hf.space/predict"
18
  headers = {
19
  "accept": "application/json",
20
  "Content-Type": "application/json"
21
  }
22
 
23
- # Define the payload
24
- Text_to_predict = {
25
- "Text": text
26
- }
 
 
 
 
 
 
 
 
 
 
 
27
 
28
- # Make the POST request
29
- response = requests.post(url, headers=headers, json=Text_to_predict)
 
 
 
 
 
30
 
31
- # Process the response
32
- if response.status_code == 200:
33
- return response.json()
34
- else:
35
- return f"Failed to get a response. Status code: {response.status_code}, Response: {response.text}"
36
 
 
 
 
 
 
37
 
 
 
 
 
 
 
 
 
 
38
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
- st.title("Offensive Speech Detection")
41
- user_input = st.text_area("Enter a text:")
 
 
 
42
 
43
- if user_input:
44
- prediction = hate_speech_detection(user_input)
45
- st.write(f"Prediction: {prediction['prediction']}")
46
- st.write(f"Probability: {prediction['probability']}")
47
-
 
 
 
 
 
 
 
 
 
 
 
48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
 
50
- ### Footer
51
- empty_space, footer = st.columns([1, 2])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
 
53
- with empty_space:
54
- st.write("")
 
 
 
 
 
55
 
 
4
  import plotly.graph_objects as go
5
  import numpy as np
6
  import requests
7
+ from datetime import datetime
8
+ import time
9
 
10
  ### Config
11
  st.set_page_config(
 
14
  layout="wide"
15
  )
16
 
17
+ # Initialize session state
18
+ if 'history' not in st.session_state:
19
+ st.session_state.history = []
20
+ if 'api_health' not in st.session_state:
21
+ st.session_state.api_health = None
22
+
23
+ # Custom CSS
24
+ st.markdown("""
25
+ <style>
26
+ .prediction-box {
27
+ padding: 20px;
28
+ border-radius: 5px;
29
+ margin: 10px 0;
30
+ }
31
+ .high-severity {
32
+ background-color: rgba(255, 0, 0, 0.1);
33
+ border: 1px solid red;
34
+ }
35
+ .medium-severity {
36
+ background-color: rgba(255, 165, 0, 0.1);
37
+ border: 1px solid orange;
38
+ }
39
+ .low-severity {
40
+ background-color: rgba(0, 255, 0, 0.1);
41
+ border: 1px solid green;
42
+ }
43
+ </style>
44
+ """, unsafe_allow_html=True)
45
+
46
+ def check_api_health():
47
+ """Check if the API is responsive"""
48
+ try:
49
+ response = requests.get("https://llepogam-hate-speech-detection-api.hf.space/health")
50
+ return response.status_code == 200
51
+ except:
52
+ return False
53
 
54
  def hate_speech_detection(text):
55
+ """Make API call with error handling"""
56
  url = "https://llepogam-hate-speech-detection-api.hf.space/predict"
57
  headers = {
58
  "accept": "application/json",
59
  "Content-Type": "application/json"
60
  }
61
 
62
+ try:
63
+ response = requests.post(
64
+ url,
65
+ headers=headers,
66
+ json={"Text": text},
67
+ timeout=10
68
+ )
69
+ response.raise_for_status()
70
+ return response.json(), None
71
+ except requests.exceptions.Timeout:
72
+ return None, "API request timed out. Please try again."
73
+ except requests.exceptions.RequestException as e:
74
+ return None, f"API error: {str(e)}"
75
+ except Exception as e:
76
+ return None, f"Unexpected error: {str(e)}"
77
 
78
+ def get_severity_class(probability):
79
+ """Determine severity class based on probability"""
80
+ if probability > 0.7:
81
+ return "high-severity"
82
+ elif probability > 0.4:
83
+ return "medium-severity"
84
+ return "low-severity"
85
 
86
+ # Header Section
87
+ st.title("🚫 Offensive Speech Detection")
88
+ st.markdown("""
89
+ This application helps identify potentially offensive or harmful content in text.
90
+ It uses a machine learning model to analyze text and determine if it contains offensive speech.
91
 
92
+ **How it works:**
93
+ 1. Enter your text in the input box below
94
+ 2. The model will analyze the content and provide a prediction
95
+ 3. Results show both the classification and confidence level
96
+ """)
97
 
98
+ # API Status
99
+ if st.button("Check API Status"):
100
+ with st.spinner("Checking API health..."):
101
+ st.session_state.api_health = check_api_health()
102
+
103
+ if st.session_state.api_health is not None:
104
+ status_color = "green" if st.session_state.api_health else "red"
105
+ status_text = "Online" if st.session_state.api_health else "Offline"
106
+ st.markdown(f"API Status: :{status_color}[{status_text}]")
107
 
108
+ # Example inputs
109
+ with st.expander("πŸ“ Example Inputs"):
110
+ st.markdown("""
111
+ Try these example texts to test the model:
112
+ 1. "Have a great day!"
113
+ 2. "I disagree with your opinion."
114
+ 3. "You're amazing!"
115
+
116
+ Click on any example to copy it to the input box.
117
+ """)
118
+ if st.button("Use Example 1"):
119
+ st.session_state.user_input = "Have a great day!"
120
+ if st.button("Use Example 2"):
121
+ st.session_state.user_input = "I disagree with your opinion."
122
+ if st.button("Use Example 3"):
123
+ st.session_state.user_input = "You're amazing!"
124
 
125
+ # FAQ Section
126
+ with st.expander("❓ Frequently Asked Questions"):
127
+ st.markdown("""
128
+ **Q: What is considered offensive speech?**
129
+ - A: The model identifies content that could be harmful, insulting, or discriminatory.
130
 
131
+ **Q: How accurate is the detection?**
132
+ - A: The model provides a confidence score with each prediction. Higher scores indicate greater confidence.
133
+
134
+ **Q: What happens to my input data?**
135
+ - A: Your text is only used for prediction and temporarily stored in your session history.
136
+ """)
137
+
138
+ # Text Input Section
139
+ max_chars = 500
140
+ user_input = st.text_area(
141
+ "Enter text to analyze:",
142
+ height=100,
143
+ key="user_input",
144
+ help="Enter the text you want to analyze for offensive content. Maximum 500 characters.",
145
+ max_chars=max_chars
146
+ )
147
 
148
+ # Character counter
149
+ chars_remaining = max_chars - len(user_input)
150
+ st.caption(f"Characters remaining: {chars_remaining}")
151
+
152
+ # Clear button
153
+ if st.button("Clear Input"):
154
+ st.session_state.user_input = ""
155
+ st.experimental_rerun()
156
+
157
+ # Process input
158
+ if user_input:
159
+ if len(user_input.strip()) == 0:
160
+ st.warning("Please enter some text to analyze.")
161
+ else:
162
+ with st.spinner("Analyzing text..."):
163
+ result, error = hate_speech_detection(user_input)
164
+
165
+ if error:
166
+ st.error(f"Error: {error}")
167
+ else:
168
+ # Format probability as percentage
169
+ probability_pct = result['probability'] * 100
170
+
171
+ # Create prediction box with appropriate severity class
172
+ severity_class = get_severity_class(result['probability'])
173
+
174
+ st.markdown(f"""
175
+ <div class="prediction-box {severity_class}">
176
+ <h3>Analysis Results</h3>
177
+ <p><strong>Prediction:</strong> {result['prediction']}</p>
178
+ <p><strong>Confidence:</strong> {probability_pct:.1f}%</p>
179
+ </div>
180
+ """, unsafe_allow_html=True)
181
+
182
+ # Confidence meter using Plotly
183
+ fig = go.Figure(go.Indicator(
184
+ mode = "gauge+number",
185
+ value = probability_pct,
186
+ title = {'text': "Confidence Level"},
187
+ gauge = {
188
+ 'axis': {'range': [0, 100]},
189
+ 'bar': {'color': "darkblue"},
190
+ 'steps': [
191
+ {'range': [0, 40], 'color': "lightgreen"},
192
+ {'range': [40, 70], 'color': "orange"},
193
+ {'range': [70, 100], 'color': "red"}
194
+ ]
195
+ }
196
+ ))
197
+ fig.update_layout(height=300)
198
+ st.plotly_chart(fig, use_container_width=True)
199
+
200
+ # Add to history
201
+ st.session_state.history.append({
202
+ 'timestamp': datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
203
+ 'text': user_input,
204
+ 'prediction': result['prediction'],
205
+ 'confidence': probability_pct
206
+ })
207
 
208
+ # History Section
209
+ if st.session_state.history:
210
+ with st.expander("πŸ“œ Analysis History"):
211
+ history_df = pd.DataFrame(st.session_state.history)
212
+ st.dataframe(
213
+ history_df,
214
+ column_config={
215
+ "timestamp": "Time",
216
+ "text": "Input Text",
217
+ "prediction": "Prediction",
218
+ "confidence": st.column_config.NumberColumn(
219
+ "Confidence",
220
+ format="%.1f%%"
221
+ )
222
+ },
223
+ hide_index=True
224
+ )
225
+
226
+ if st.button("Clear History"):
227
+ st.session_state.history = []
228
+ st.experimental_rerun()
229
 
230
+ # Footer
231
+ st.markdown("---")
232
+ st.markdown("""
233
+ <div style='text-align: center'>
234
+ <p>Developed with ❀️ for safer online communication</p>
235
+ </div>
236
+ """, unsafe_allow_html=True)
237