Spaces:
Sleeping
Sleeping
import nltk | |
import numpy as np | |
import tflearn | |
import tensorflow as tf | |
import random | |
import json | |
import pickle | |
import gradio as gr | |
from nltk.stem.lancaster import LancasterStemmer | |
nltk.download('punkt') | |
stemmer = LancasterStemmer() | |
# Load intents file | |
with open("intents.json") as file: | |
data = json.load(file) | |
# Data preprocessing | |
words, labels, docs_x, docs_y = [], [], [], [] | |
for intent in data["intents"]: | |
for pattern in intent["patterns"]: | |
wrds = nltk.word_tokenize(pattern) | |
words.extend(wrds) | |
docs_x.append(wrds) | |
docs_y.append(intent["tag"]) | |
if intent["tag"] not in labels: | |
labels.append(intent["tag"]) | |
# Stem and sort words | |
words = sorted(set(stemmer.stem(w.lower()) for w in words if w not in ["?", ".", ",", "!"])) | |
labels = sorted(labels) | |
# Create training data | |
training, output = [], [] | |
out_empty = [0] * len(labels) | |
for x, doc in enumerate(docs_x): | |
bag = [1 if stemmer.stem(w.lower()) in [stemmer.stem(word) for word in doc] else 0 for w in words] | |
output_row = out_empty[:] | |
output_row[labels.index(docs_y[x])] = 1 | |
training.append(bag) | |
output.append(output_row) | |
training, output = np.array(training), np.array(output) | |
# Build and train the model | |
tf.compat.v1.reset_default_graph() | |
net = tflearn.input_data(shape=[None, len(training[0])]) | |
net = tflearn.fully_connected(net, 8) | |
net = tflearn.fully_connected(net, 8) | |
net = tflearn.fully_connected(net, len(output[0]), activation="softmax") | |
net = tflearn.regression(net) | |
model = tflearn.DNN(net) | |
try: | |
model.load("MentalHealthChatBotmodel.tflearn") | |
except FileNotFoundError: | |
model.fit(training, output, n_epoch=1000, batch_size=8, show_metric=True) | |
model.save("MentalHealthChatBotmodel.tflearn") | |
# Function to preprocess user input | |
def bag_of_words(s, words): | |
bag = [0 for _ in range(len(words))] | |
s_words = nltk.word_tokenize(s) | |
s_words = [stemmer.stem(word.lower()) for word in s_words] | |
for se in s_words: | |
for i, w in enumerate(words): | |
if w == se: | |
bag[i] = 1 | |
return np.array(bag) | |
# Chat function | |
def chat(message, history=None): | |
history = history or [] | |
try: | |
bag = bag_of_words(message, words) | |
results = model.predict([bag]) | |
results_index = np.argmax(results) | |
tag = labels[results_index] | |
for tg in data["intents"]: | |
if tg['tag'] == tag: | |
response = random.choice(tg['responses']) | |
break | |
else: | |
response = "I'm sorry, I don't have a response for that." | |
except Exception as e: | |
response = "I'm sorry, I couldn't understand your message." | |
history.append((message, response)) | |
return history, history | |
# Gradio Interface | |
demo = gr.Interface( | |
fn=chat, | |
inputs=[gr.Textbox(lines=1, label="Message"), gr.State()], | |
outputs=[gr.Chatbot(label="Chat"), gr.State()], | |
allow_flagging="never", | |
title="Wellbeing for All | Generative AI Enthusiasts" | |
) | |
if __name__ == "__main__": | |
demo.launch() | |