import os import random import gradio as gr import sentencepiece as spm import numpy as np import pandas as pd import tensorflow as tf from valx import detect_profanity, detect_hate_speech def custom_pad_sequences(sequences, maxlen, padding='pre', value=0): """ Pads sequences to the same length. :param sequences: List of lists, where each element is a sequence. :param maxlen: Maximum length of all sequences. :param padding: 'pre' or 'post', pad either before or after each sequence. :param value: Float, padding value. :return: Numpy array with dimensions (number_of_sequences, maxlen) """ padded_sequences = np.full((len(sequences), maxlen), value) for i, seq in enumerate(sequences): if padding == 'pre': if len(seq) <= maxlen: padded_sequences[i, -len(seq):] = seq else: padded_sequences[i, :] = seq[-maxlen:] elif padding == 'post': if len(seq) <= maxlen: padded_sequences[i, :len(seq)] = seq else: padded_sequences[i, :] = seq[:maxlen] return padded_sequences def generate_random_name(interpreter, vocab_size, sp, max_length=10, temperature=0.5, seed_text="", max_seq_len=12): # Get input and output tensors input_details = interpreter.get_input_details() output_details = interpreter.get_output_details() decoded_name = '' if seed_text: generated_name = seed_text else: random_index = np.random.randint(1, vocab_size) random_token = sp.id_to_piece(random_index) generated_name = random_token for _ in range(max_length - 1): token_list = sp.encode_as_ids(generated_name) # Handle empty token list case if len(token_list) == 0: continue # Skip the current iteration if the token list is empty # Pad to the correct length expected by the model token_list = custom_pad_sequences([token_list], maxlen=max_seq_len, padding='pre') # Convert token_list to FLOAT32 before setting the tensor token_list = token_list.astype(np.float32) # Set the input tensor interpreter.set_tensor(input_details[0]['index'], token_list) # Run inference interpreter.invoke() # Get the output tensor predicted = interpreter.get_tensor(output_details[0]['index'])[0] # Apply temperature to predictions predicted = np.log(predicted + 1e-8) / temperature predicted = np.exp(predicted) / np.sum(np.exp(predicted)) # Sample from the distribution next_index = np.random.choice(range(vocab_size), p=predicted) next_index = int(next_index) next_token = sp.id_to_piece(next_index) generated_name += next_token # Decode the generated subword tokens into a string decoded_name = sp.decode_pieces(generated_name.split()) # Stop if end token is predicted (optional) if next_token == '' or len(decoded_name) > max_length: break decoded_name = decoded_name.replace("▁", " ") decoded_name = decoded_name.replace("", "") decoded_name = decoded_name.replace("", "") decoded_name = decoded_name.replace("", "") generated_name = decoded_name.rsplit(' ', 1)[0] if generated_name: generated_name = generated_name[0].upper() + generated_name[1:] # Split the name and check the last part parts = generated_name.split() if parts and len(parts[-1]) < 3: generated_name = " ".join(parts[:-1]) return generated_name.strip() def generateNames(type, amount, max_length=30, temperature=0.5, seed_text=""): hate_speech = detect_hate_speech(seed_text) profanity = detect_profanity([seed_text], language='All') output = '' if profanity > 0: gr.Warning("Profanity detected in the seed text, using an empty seed text.") seed_text = '' else: if hate_speech == ['Hate Speech']: gr.Warning('Hate speech detected in the seed text, using an empty seed text.') seed_text = '' elif hate_speech == ['Offensive Speech']: gr.Warning('Offensive speech detected in the seed text, using an empty seed text.') seed_text = '' # elif hate_speech == ['No Hate and Offensive Speech']: if type == "Terraria": max_seq_len = 12 # For skyrim = 13, for terraria = 12 sp = spm.SentencePieceProcessor() sp.load("models/terraria_names.model") amount = int(amount) max_length = int(max_length) names = [] # Define necessary variables vocab_size = sp.GetPieceSize() # Load TFLite model interpreter = tf.lite.Interpreter(model_path="models/dungen_terraria_model.tflite") interpreter.allocate_tensors() # Use the function to generate a name for _ in range(amount): generated_name = generate_random_name(interpreter, vocab_size, sp, seed_text=seed_text, max_length=max_length, temperature=temperature, max_seq_len=max_seq_len) stripped = generated_name.strip() hate_speech = detect_hate_speech(stripped) profanity = detect_profanity([stripped], language='All') name = '' if profanity > 0: name = "Profanity Detected" else: if hate_speech == ['Hate Speech']: name = 'Hate Speech Detected' elif hate_speech == ['Offensive Speech']: name = 'Offensive Speech Detected' elif hate_speech == ['No Hate and Offensive Speech']: name = stripped names.append(name) return pd.DataFrame(names, columns=['Names']) elif type == "Skyrim": max_seq_len = 13 # For skyrim = 13, for terraria = 12 sp = spm.SentencePieceProcessor() sp.load("models/skyrim_names.model") amount = int(amount) max_length = int(max_length) names = [] # Define necessary variables vocab_size = sp.GetPieceSize() # Load TFLite model interpreter = tf.lite.Interpreter(model_path="models/dungen_skyrim_model.tflite") interpreter.allocate_tensors() # Use the function to generate a name for _ in range(amount): generated_name = generate_random_name(interpreter, vocab_size, sp, seed_text=seed_text, max_length=max_length, temperature=temperature, max_seq_len=max_seq_len) stripped = generated_name.strip() hate_speech = detect_hate_speech(stripped) profanity = detect_profanity([stripped], language='All') name = '' if profanity > 0: name = "Profanity Detected" else: if hate_speech == ['Hate Speech']: name = 'Hate Speech Detected' elif hate_speech == ['Offensive Speech']: name = 'Offensive Speech Detected' elif hate_speech == ['No Hate and Offensive Speech']: name = stripped names.append(name) return pd.DataFrame(names, columns=['Names']) elif type == "Witcher": max_seq_len = 20 # For skyrim = 13, for terraria = 12 sp = spm.SentencePieceProcessor() sp.load("models/witcher_names.model") amount = int(amount) max_length = int(max_length) names = [] # Define necessary variables vocab_size = sp.GetPieceSize() # Load TFLite model interpreter = tf.lite.Interpreter(model_path="models/dungen_witcher_model.tflite") interpreter.allocate_tensors() # Use the function to generate a name for _ in range(amount): generated_name = generate_random_name(interpreter, vocab_size, sp, seed_text=seed_text, max_length=max_length, temperature=temperature, max_seq_len=max_seq_len) stripped = generated_name.strip() hate_speech = detect_hate_speech(stripped) profanity = detect_profanity([stripped], language='All') name = '' if profanity > 0: name = "Profanity Detected" else: if hate_speech == ['Hate Speech']: name = 'Hate Speech Detected' elif hate_speech == ['Offensive Speech']: name = 'Offensive Speech Detected' elif hate_speech == ['No Hate and Offensive Speech']: name = stripped names.append(name) return pd.DataFrame(names, columns=['Names']) elif type == "Fantasy": max_seq_len = 16 # For fantasy, 16 sp = spm.SentencePieceProcessor() sp.load("models/fantasy_names.model") amount = int(amount) max_length = int(max_length) names = [] # Define necessary variables vocab_size = sp.GetPieceSize() # Load TFLite model interpreter = tf.lite.Interpreter(model_path="models/dungen_fantasy_model.tflite") interpreter.allocate_tensors() # Use the function to generate a name for _ in range(amount): generated_name = generate_random_name(interpreter, vocab_size, sp, seed_text=seed_text, max_length=max_length, temperature=temperature, max_seq_len=max_seq_len) stripped = generated_name.strip() hate_speech = detect_hate_speech(stripped) profanity = detect_profanity([stripped], language='All') name = '' if profanity > 0: name = "Profanity Detected" else: if hate_speech == ['Hate Speech']: name = 'Hate Speech Detected' elif hate_speech == ['Offensive Speech']: name = 'Offensive Speech Detected' elif hate_speech == ['No Hate and Offensive Speech']: name = stripped names.append(name) return pd.DataFrame(names, columns=['Names']) demo = gr.Interface( fn=generateNames, inputs=[gr.Radio(choices=["Terraria", "Skyrim", "Witcher", "Fantasy"], label="Choose a model for your request", value="Terraria"), gr.Slider(1,100, step=1, label='Amount of Names', info='How many names to generate, must be greater than 0'), gr.Slider(10, 60, value=30, step=1, label='Max Length', info='Max length of the generated word'), gr.Slider(0.1, 1, value=0.5, label='Temperature', info='Controls randomness of generation, higher values = more creative, lower values = more probalistic'), gr.Textbox('', label='Seed text (optional)', info='The starting text to begin with', max_lines=1, )], outputs=[gr.Dataframe(row_count = (2, "dynamic"), col_count=(1, "fixed"), label="Generated Names", headers=["Names"])], title='Dungen - Name Generator', description='A fun game-inspired name generator. For an example of how to create, and train your model, similar to this one, head over to: https://github.com/Infinitode/OPEN-ARC/tree/main/Project-5-TWNG. There you will find our base model, the dataset we used, and implementation code in the form of a Jupyter Notebook (exported from Kaggle).' ) demo.launch()