JohanBeytell commited on
Commit
20628b5
·
verified ·
1 Parent(s): b16c88e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -2
app.py CHANGED
@@ -38,6 +38,24 @@ def generate_random_name(interpreter, vocab_size, sp, max_length=10, temperature
38
  decoded_name = ''
39
 
40
  if seed_text:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  generated_name = seed_text
42
  else:
43
  random_index = np.random.randint(1, vocab_size)
@@ -132,7 +150,7 @@ def generateNames(type, amount, max_length=30, temperature=0.5, seed_text=""):
132
  elif hate_speech == ['Offensive Speech']:
133
  name = 'Offensive Speech Detected'
134
  elif hate_speech == ['No Hate and Offensive Speech']:
135
- name = generated_name
136
  names.append(name)
137
  return pd.DataFrame(names, columns=['Names'])
138
 
@@ -155,7 +173,21 @@ def generateNames(type, amount, max_length=30, temperature=0.5, seed_text=""):
155
  # Use the function to generate a name
156
  for _ in range(amount):
157
  generated_name = generate_random_name(interpreter, vocab_size, sp, seed_text=seed_text, max_length=max_length, temperature=temperature, max_seq_len=max_seq_len)
158
- names.append(generated_name)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159
  return pd.DataFrame(names, columns=['Names'])
160
 
161
  demo = gr.Interface(
 
38
  decoded_name = ''
39
 
40
  if seed_text:
41
+ hate_speech = detect_hate_speech(seed_text)
42
+ profanity = detect_profanity([seed_text], language='All')
43
+ output = ''
44
+
45
+ if profanity > 0:
46
+ gr.Warning("Profanity Detected in the seed text, using an empty seed text.")
47
+ seed_text = ''
48
+ else:
49
+ if hate_speech == ['Hate Speech']:
50
+ gr.Warning('Hate Speech Detected in the seed text, using an empty seed text.')
51
+ seed_text = ''
52
+ elif hate_speech == ['Offensive Speech']:
53
+ gr.Warning('Offensive Speech Detected in the seed text, using an empty seed text.')
54
+ seed_text = ''
55
+ # elif hate_speech == ['No Hate and Offensive Speech']:
56
+
57
+ names.append(name)
58
+ return pd.DataFrame(names, columns=['Names'])
59
  generated_name = seed_text
60
  else:
61
  random_index = np.random.randint(1, vocab_size)
 
150
  elif hate_speech == ['Offensive Speech']:
151
  name = 'Offensive Speech Detected'
152
  elif hate_speech == ['No Hate and Offensive Speech']:
153
+ name = stripped
154
  names.append(name)
155
  return pd.DataFrame(names, columns=['Names'])
156
 
 
173
  # Use the function to generate a name
174
  for _ in range(amount):
175
  generated_name = generate_random_name(interpreter, vocab_size, sp, seed_text=seed_text, max_length=max_length, temperature=temperature, max_seq_len=max_seq_len)
176
+ stripped = generated_name.strip()
177
+ hate_speech = detect_hate_speech(stripped)
178
+ profanity = detect_profanity([stripped], language='All')
179
+ name = ''
180
+
181
+ if profanity > 0:
182
+ name = "Profanity Detected"
183
+ else:
184
+ if hate_speech == ['Hate Speech']:
185
+ name = 'Hate Speech Detected'
186
+ elif hate_speech == ['Offensive Speech']:
187
+ name = 'Offensive Speech Detected'
188
+ elif hate_speech == ['No Hate and Offensive Speech']:
189
+ name = stripped
190
+ names.append(name)
191
  return pd.DataFrame(names, columns=['Names'])
192
 
193
  demo = gr.Interface(