Update app.py
Browse files
app.py
CHANGED
@@ -1,18 +1,18 @@
|
|
1 |
import gradio as gr
|
2 |
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
|
3 |
|
4 |
-
# Load chatbot model
|
5 |
chatbot_model = "microsoft/DialoGPT-medium"
|
6 |
tokenizer = AutoTokenizer.from_pretrained(chatbot_model)
|
7 |
-
model = AutoModelForCausalLM.from_pretrained(chatbot_model)
|
8 |
|
9 |
-
# Load emotion detection model
|
10 |
-
emotion_pipeline = pipeline("text-classification", model="bhadresh-savani/distilbert-base-uncased-emotion")
|
11 |
|
12 |
# Function to generate chatbot response and emotion analysis
|
13 |
def generate_response(user_input):
|
14 |
# Generate chatbot response
|
15 |
-
input_ids = tokenizer.encode(user_input + tokenizer.eos_token, return_tensors="pt")
|
16 |
output = model.generate(input_ids, max_length=200, pad_token_id=tokenizer.eos_token_id)
|
17 |
response = tokenizer.decode(output[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
|
18 |
|
@@ -25,4 +25,11 @@ def generate_response(user_input):
|
|
25 |
|
26 |
# Gradio interface setup
|
27 |
iface = gr.Interface(
|
28 |
-
fn
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
|
3 |
|
4 |
+
# Load chatbot model and move it to CPU explicitly
|
5 |
chatbot_model = "microsoft/DialoGPT-medium"
|
6 |
tokenizer = AutoTokenizer.from_pretrained(chatbot_model)
|
7 |
+
model = AutoModelForCausalLM.from_pretrained(chatbot_model).to('cpu') # Ensure model is on CPU
|
8 |
|
9 |
+
# Load emotion detection model and move it to CPU explicitly
|
10 |
+
emotion_pipeline = pipeline("text-classification", model="bhadresh-savani/distilbert-base-uncased-emotion", device=-1) # -1 means CPU
|
11 |
|
12 |
# Function to generate chatbot response and emotion analysis
|
13 |
def generate_response(user_input):
|
14 |
# Generate chatbot response
|
15 |
+
input_ids = tokenizer.encode(user_input + tokenizer.eos_token, return_tensors="pt").to('cpu') # Ensure input tensor is on CPU
|
16 |
output = model.generate(input_ids, max_length=200, pad_token_id=tokenizer.eos_token_id)
|
17 |
response = tokenizer.decode(output[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
|
18 |
|
|
|
25 |
|
26 |
# Gradio interface setup
|
27 |
iface = gr.Interface(
|
28 |
+
fn=generate_response,
|
29 |
+
inputs=gr.Textbox(label="You:", placeholder="Enter your message here..."),
|
30 |
+
outputs=[gr.Textbox(label="Bot Response"), gr.Textbox(label="Emotion Detected")],
|
31 |
+
live=True
|
32 |
+
)
|
33 |
+
|
34 |
+
# Launch the app
|
35 |
+
iface.launch()
|