JohanBeytell commited on
Commit
16bacdc
·
verified ·
1 Parent(s): db955af

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +217 -1
app.py CHANGED
@@ -1051,6 +1051,222 @@ def generateNames(type, amount, max_length=30, temperature=0.5, seed_text=""):
1051
  name = stripped
1052
  names.append(name)
1053
  return pd.DataFrame(names, columns=['Names'])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1054
 
1055
  elif type == "Fantasy":
1056
  max_seq_len = 16 # For fantasy, 16
@@ -1090,7 +1306,7 @@ def generateNames(type, amount, max_length=30, temperature=0.5, seed_text=""):
1090
 
1091
  demo = gr.Interface(
1092
  fn=generateNames,
1093
- inputs=[gr.Radio(choices=["Terraria", "Skyrim", "Witcher", "WOW", "Minecraft", "Dark Souls", "Final Fantasy", "Elden Ring", "Zelda", "Dragon Age", "Fallout", "Darkest Dungeon", "Monster Hunter", "Bloodborne", "Hollow Knight", "Assassin's Creed", "Baldur's Gate", "Cyberpunk", "Mass Effect", "God Of War", "Last Of Us", "Factorio", "The Sims", "Fortnite", "League Of Legends", "Among Us", "Fantasy"], label="Choose a model for your request", value="Terraria"), gr.Slider(1,100, step=1, label='Amount of Names', info='How many names to generate, must be greater than 0'), gr.Slider(10, 60, value=30, step=1, label='Max Length', info='Max length of the generated word'), gr.Slider(0.1, 1, value=0.5, label='Temperature', info='Controls randomness of generation, higher values = more creative, lower values = more probalistic'), gr.Textbox('', label='Seed text (optional)', info='The starting text to begin with', max_lines=1, )],
1094
  outputs=[gr.Dataframe(row_count = (2, "dynamic"), col_count=(1, "fixed"), label="Generated Names", headers=["Names"])],
1095
  title='Dungen - Name Generator',
1096
  description='A fun game-inspired name generator. For an example of how to create, and train your model, like this one, head over to: https://github.com/Infinitode/OPEN-ARC/tree/main/Project-5-TWNG. There you will find our base model, the dataset we used, and implementation code in the form of a Jupyter Notebook (exported from Kaggle).'
 
1051
  name = stripped
1052
  names.append(name)
1053
  return pd.DataFrame(names, columns=['Names'])
1054
+
1055
+ elif type == "Warframe":
1056
+ max_seq_len = 13 # For skyrim = 13, for terraria = 12
1057
+ sp = spm.SentencePieceProcessor()
1058
+ sp.load("models/warframe_names.model")
1059
+ amount = int(amount)
1060
+ max_length = int(max_length)
1061
+
1062
+ names = []
1063
+
1064
+ # Define necessary variables
1065
+ vocab_size = sp.GetPieceSize()
1066
+
1067
+ # Load TFLite model
1068
+ interpreter = tf.lite.Interpreter(model_path="models/dungen_warframe_model.tflite")
1069
+ interpreter.allocate_tensors()
1070
+
1071
+ # Use the function to generate a name
1072
+ for _ in range(amount):
1073
+ generated_name = generate_random_name(interpreter, vocab_size, sp, seed_text=seed_text, max_length=max_length, temperature=temperature, max_seq_len=max_seq_len)
1074
+ stripped = generated_name.strip()
1075
+ hate_speech = detect_hate_speech(stripped)
1076
+ profanity = detect_profanity([stripped], language='All')
1077
+ name = ''
1078
+
1079
+ if len(profanity) > 0:
1080
+ name = "Profanity Detected"
1081
+ else:
1082
+ if hate_speech == ['Hate Speech']:
1083
+ name = 'Hate Speech Detected'
1084
+ elif hate_speech == ['Offensive Speech']:
1085
+ name = 'Offensive Speech Detected'
1086
+ elif hate_speech == ['No Hate and Offensive Speech']:
1087
+ name = stripped
1088
+ names.append(name)
1089
+ return pd.DataFrame(names, columns=['Names'])
1090
+
1091
+ elif type == "Call of Duty":
1092
+ max_seq_len = 11 # For skyrim = 13, for terraria = 12
1093
+ sp = spm.SentencePieceProcessor()
1094
+ sp.load("models/call_of_duty_names.model")
1095
+ amount = int(amount)
1096
+ max_length = int(max_length)
1097
+
1098
+ names = []
1099
+
1100
+ # Define necessary variables
1101
+ vocab_size = sp.GetPieceSize()
1102
+
1103
+ # Load TFLite model
1104
+ interpreter = tf.lite.Interpreter(model_path="models/dungen_call_of_duty_model.tflite")
1105
+ interpreter.allocate_tensors()
1106
+
1107
+ # Use the function to generate a name
1108
+ for _ in range(amount):
1109
+ generated_name = generate_random_name(interpreter, vocab_size, sp, seed_text=seed_text, max_length=max_length, temperature=temperature, max_seq_len=max_seq_len)
1110
+ stripped = generated_name.strip()
1111
+ hate_speech = detect_hate_speech(stripped)
1112
+ profanity = detect_profanity([stripped], language='All')
1113
+ name = ''
1114
+
1115
+ if len(profanity) > 0:
1116
+ name = "Profanity Detected"
1117
+ else:
1118
+ if hate_speech == ['Hate Speech']:
1119
+ name = 'Hate Speech Detected'
1120
+ elif hate_speech == ['Offensive Speech']:
1121
+ name = 'Offensive Speech Detected'
1122
+ elif hate_speech == ['No Hate and Offensive Speech']:
1123
+ name = stripped
1124
+ names.append(name)
1125
+ return pd.DataFrame(names, columns=['Names'])
1126
+
1127
+ elif type == "Forza Horizon":
1128
+ max_seq_len = 10 # For skyrim = 13, for terraria = 12
1129
+ sp = spm.SentencePieceProcessor()
1130
+ sp.load("models/forza_horizon_names.model")
1131
+ amount = int(amount)
1132
+ max_length = int(max_length)
1133
+
1134
+ names = []
1135
+
1136
+ # Define necessary variables
1137
+ vocab_size = sp.GetPieceSize()
1138
+
1139
+ # Load TFLite model
1140
+ interpreter = tf.lite.Interpreter(model_path="models/dungen_forza_horizon_model.tflite")
1141
+ interpreter.allocate_tensors()
1142
+
1143
+ # Use the function to generate a name
1144
+ for _ in range(amount):
1145
+ generated_name = generate_random_name(interpreter, vocab_size, sp, seed_text=seed_text, max_length=max_length, temperature=temperature, max_seq_len=max_seq_len)
1146
+ stripped = generated_name.strip()
1147
+ hate_speech = detect_hate_speech(stripped)
1148
+ profanity = detect_profanity([stripped], language='All')
1149
+ name = ''
1150
+
1151
+ if len(profanity) > 0:
1152
+ name = "Profanity Detected"
1153
+ else:
1154
+ if hate_speech == ['Hate Speech']:
1155
+ name = 'Hate Speech Detected'
1156
+ elif hate_speech == ['Offensive Speech']:
1157
+ name = 'Offensive Speech Detected'
1158
+ elif hate_speech == ['No Hate and Offensive Speech']:
1159
+ name = stripped
1160
+ names.append(name)
1161
+ return pd.DataFrame(names, columns=['Names'])
1162
+
1163
+ elif type == "Halo":
1164
+ max_seq_len = 14 # For skyrim = 13, for terraria = 12
1165
+ sp = spm.SentencePieceProcessor()
1166
+ sp.load("models/halo_names.model")
1167
+ amount = int(amount)
1168
+ max_length = int(max_length)
1169
+
1170
+ names = []
1171
+
1172
+ # Define necessary variables
1173
+ vocab_size = sp.GetPieceSize()
1174
+
1175
+ # Load TFLite model
1176
+ interpreter = tf.lite.Interpreter(model_path="models/dungen_halo_model.tflite")
1177
+ interpreter.allocate_tensors()
1178
+
1179
+ # Use the function to generate a name
1180
+ for _ in range(amount):
1181
+ generated_name = generate_random_name(interpreter, vocab_size, sp, seed_text=seed_text, max_length=max_length, temperature=temperature, max_seq_len=max_seq_len)
1182
+ stripped = generated_name.strip()
1183
+ hate_speech = detect_hate_speech(stripped)
1184
+ profanity = detect_profanity([stripped], language='All')
1185
+ name = ''
1186
+
1187
+ if len(profanity) > 0:
1188
+ name = "Profanity Detected"
1189
+ else:
1190
+ if hate_speech == ['Hate Speech']:
1191
+ name = 'Hate Speech Detected'
1192
+ elif hate_speech == ['Offensive Speech']:
1193
+ name = 'Offensive Speech Detected'
1194
+ elif hate_speech == ['No Hate and Offensive Speech']:
1195
+ name = stripped
1196
+ names.append(name)
1197
+ return pd.DataFrame(names, columns=['Names'])
1198
+
1199
+ elif type == "Overwatch":
1200
+ max_seq_len = 9 # For skyrim = 13, for terraria = 12
1201
+ sp = spm.SentencePieceProcessor()
1202
+ sp.load("models/overwatch_names.model")
1203
+ amount = int(amount)
1204
+ max_length = int(max_length)
1205
+
1206
+ names = []
1207
+
1208
+ # Define necessary variables
1209
+ vocab_size = sp.GetPieceSize()
1210
+
1211
+ # Load TFLite model
1212
+ interpreter = tf.lite.Interpreter(model_path="models/dungen_overwatch_model.tflite")
1213
+ interpreter.allocate_tensors()
1214
+
1215
+ # Use the function to generate a name
1216
+ for _ in range(amount):
1217
+ generated_name = generate_random_name(interpreter, vocab_size, sp, seed_text=seed_text, max_length=max_length, temperature=temperature, max_seq_len=max_seq_len)
1218
+ stripped = generated_name.strip()
1219
+ hate_speech = detect_hate_speech(stripped)
1220
+ profanity = detect_profanity([stripped], language='All')
1221
+ name = ''
1222
+
1223
+ if len(profanity) > 0:
1224
+ name = "Profanity Detected"
1225
+ else:
1226
+ if hate_speech == ['Hate Speech']:
1227
+ name = 'Hate Speech Detected'
1228
+ elif hate_speech == ['Offensive Speech']:
1229
+ name = 'Offensive Speech Detected'
1230
+ elif hate_speech == ['No Hate and Offensive Speech']:
1231
+ name = stripped
1232
+ names.append(name)
1233
+ return pd.DataFrame(names, columns=['Names'])
1234
+
1235
+ elif type == "Subnautica":
1236
+ max_seq_len = 14 # For skyrim = 13, for terraria = 12
1237
+ sp = spm.SentencePieceProcessor()
1238
+ sp.load("models/subnautica_names.model")
1239
+ amount = int(amount)
1240
+ max_length = int(max_length)
1241
+
1242
+ names = []
1243
+
1244
+ # Define necessary variables
1245
+ vocab_size = sp.GetPieceSize()
1246
+
1247
+ # Load TFLite model
1248
+ interpreter = tf.lite.Interpreter(model_path="models/dungen_subnautica_model.tflite")
1249
+ interpreter.allocate_tensors()
1250
+
1251
+ # Use the function to generate a name
1252
+ for _ in range(amount):
1253
+ generated_name = generate_random_name(interpreter, vocab_size, sp, seed_text=seed_text, max_length=max_length, temperature=temperature, max_seq_len=max_seq_len)
1254
+ stripped = generated_name.strip()
1255
+ hate_speech = detect_hate_speech(stripped)
1256
+ profanity = detect_profanity([stripped], language='All')
1257
+ name = ''
1258
+
1259
+ if len(profanity) > 0:
1260
+ name = "Profanity Detected"
1261
+ else:
1262
+ if hate_speech == ['Hate Speech']:
1263
+ name = 'Hate Speech Detected'
1264
+ elif hate_speech == ['Offensive Speech']:
1265
+ name = 'Offensive Speech Detected'
1266
+ elif hate_speech == ['No Hate and Offensive Speech']:
1267
+ name = stripped
1268
+ names.append(name)
1269
+ return pd.DataFrame(names, columns=['Names'])
1270
 
1271
  elif type == "Fantasy":
1272
  max_seq_len = 16 # For fantasy, 16
 
1306
 
1307
  demo = gr.Interface(
1308
  fn=generateNames,
1309
+ inputs=[gr.Radio(choices=["Terraria", "Skyrim", "Witcher", "WOW", "Minecraft", "Dark Souls", "Final Fantasy", "Elden Ring", "Zelda", "Dragon Age", "Fallout", "Darkest Dungeon", "Monster Hunter", "Bloodborne", "Hollow Knight", "Assassin's Creed", "Baldur's Gate", "Cyberpunk", "Mass Effect", "God Of War", "Last Of Us", "Factorio", "The Sims", "Fortnite", "League Of Legends", "Among Us", "Warframe", "Call of Duty", "Forza Horizon", "Halo", "Overwatch", "Subnautica" "Fantasy"], label="Choose a model for your request", value="Terraria"), gr.Slider(1,100, step=1, label='Amount of Names', info='How many names to generate, must be greater than 0'), gr.Slider(10, 60, value=30, step=1, label='Max Length', info='Max length of the generated word'), gr.Slider(0.1, 1, value=0.5, label='Temperature', info='Controls randomness of generation, higher values = more creative, lower values = more probalistic'), gr.Textbox('', label='Seed text (optional)', info='The starting text to begin with', max_lines=1, )],
1310
  outputs=[gr.Dataframe(row_count = (2, "dynamic"), col_count=(1, "fixed"), label="Generated Names", headers=["Names"])],
1311
  title='Dungen - Name Generator',
1312
  description='A fun game-inspired name generator. For an example of how to create, and train your model, like this one, head over to: https://github.com/Infinitode/OPEN-ARC/tree/main/Project-5-TWNG. There you will find our base model, the dataset we used, and implementation code in the form of a Jupyter Notebook (exported from Kaggle).'