Spaces:
Sleeping
Sleeping
File size: 3,033 Bytes
8dc6340 b102079 8dc6340 b102079 8dc6340 b102079 8dc6340 3fa17a3 7fe9a0d b102079 3fa17a3 8dc6340 3fa17a3 b102079 7fe9a0d 8dc6340 3fa17a3 8dc6340 3fa17a3 8dc6340 3fa17a3 fdf69bb c4f1a8a 7f4102e c4f1a8a 7f4102e 3fa17a3 7fe9a0d 7f4102e 8dc6340 b102079 8dc6340 36d53a0 7fe9a0d 36d53a0 3fa17a3 8dc6340 5a39aa0 8dc6340 5a39aa0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 |
import nltk
import numpy as np
import tflearn
import tensorflow as tf
import random
import json
import pickle
import gradio as gr
from nltk.stem.lancaster import LancasterStemmer
nltk.download('punkt')
stemmer = LancasterStemmer()
# Load intents file
with open("intents.json") as file:
data = json.load(file)
# Data preprocessing
words, labels, docs_x, docs_y = [], [], [], []
for intent in data["intents"]:
for pattern in intent["patterns"]:
wrds = nltk.word_tokenize(pattern)
words.extend(wrds)
docs_x.append(wrds)
docs_y.append(intent["tag"])
if intent["tag"] not in labels:
labels.append(intent["tag"])
# Stem and sort words
words = sorted(set(stemmer.stem(w.lower()) for w in words if w not in ["?", ".", ",", "!"]))
labels = sorted(labels)
# Create training data
training, output = [], []
out_empty = [0] * len(labels)
for x, doc in enumerate(docs_x):
bag = [1 if stemmer.stem(w.lower()) in [stemmer.stem(word) for word in doc] else 0 for w in words]
output_row = out_empty[:]
output_row[labels.index(docs_y[x])] = 1
training.append(bag)
output.append(output_row)
training, output = np.array(training), np.array(output)
# Build and train the model
tf.compat.v1.reset_default_graph()
net = tflearn.input_data(shape=[None, len(training[0])])
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
net = tflearn.regression(net)
model = tflearn.DNN(net)
try:
model.load("MentalHealthChatBotmodel.tflearn")
except FileNotFoundError:
model.fit(training, output, n_epoch=1000, batch_size=8, show_metric=True)
model.save("MentalHealthChatBotmodel.tflearn")
# Function to preprocess user input
def bag_of_words(s, words):
bag = [0 for _ in range(len(words))]
s_words = nltk.word_tokenize(s)
s_words = [stemmer.stem(word.lower()) for word in s_words]
for se in s_words:
for i, w in enumerate(words):
if w == se:
bag[i] = 1
return np.array(bag)
# Chat function
def chat(message, history=None):
history = history or []
try:
bag = bag_of_words(message, words)
results = model.predict([bag])
results_index = np.argmax(results)
tag = labels[results_index]
for tg in data["intents"]:
if tg['tag'] == tag:
response = random.choice(tg['responses'])
break
else:
response = "I'm sorry, I don't have a response for that."
except Exception as e:
response = "I'm sorry, I couldn't understand your message."
history.append((message, response))
return history, history
# Gradio Interface
demo = gr.Interface(
fn=chat,
inputs=[gr.Textbox(lines=1, label="Message"), gr.State()],
outputs=[gr.Chatbot(label="Chat"), gr.State()],
allow_flagging="never",
title="Wellbeing for All | Generative AI Enthusiasts"
)
if __name__ == "__main__":
demo.launch()
|