safwansajad commited on
Commit
4e108a8
·
verified ·
1 Parent(s): ffa3f44

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +62 -37
app.py CHANGED
@@ -1,40 +1,65 @@
1
  import gradio as gr
2
- import requests
3
-
4
- # Define the sentiment analysis function that communicates with the Hugging Face model API
5
- def analyze_sentiment(message: str):
6
- try:
7
- # Send request to the Hugging Face Space for sentiment analysis
8
- response = requests.post(
9
- 'https://safwansajad-emotion-detection-gpt.hf.space/predict',
10
- json={'text': message},
11
- headers={'Content-Type': 'application/json'}
12
- )
13
-
14
- # Extract the sentiment and score from the response
15
- data = response.json()
16
-
17
- # Return the label and score in the format expected by your app
18
- if 'emotion' in data and 'score' in data:
19
- return [{"label": data['emotion'], "score": data['score']}]
20
- else:
21
- return [{"label": "Unknown", "score": 0}]
22
-
23
- except Exception as e:
24
- print(f"Error during sentiment analysis: {e}")
25
- return [{"label": "Error", "score": 0}]
26
-
27
- # Set up Gradio interface
28
- iface = gr.Interface(
29
- fn=analyze_sentiment, # Function to be called
30
- inputs=gr.Textbox(label="Enter your message", placeholder="How are you feeling today?", lines=2), # User input
31
- outputs=gr.JSON(), # Output in JSON format (label and score)
32
- live=True, # Enables live input processing
33
- title="Sentiment Analysis with SerenityAI", # Title of the interface
34
- description="Enter a message and get feedback about your emotional state. Your feelings matter!",
35
- theme="huggingface", # Optionally set the theme to Hugging Face's style
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  )
37
 
38
- # Launch the Gradio app
39
- if __name__ == "__main__":
40
- iface.launch(share=True) # share=True gives you a public link
 
1
  import gradio as gr
2
+ from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
3
+
4
+ # Load models
5
+ chatbot_model = "microsoft/DialoGPT-medium"
6
+ tokenizer = AutoTokenizer.from_pretrained(chatbot_model)
7
+ model = AutoModelForCausalLM.from_pretrained(chatbot_model)
8
+ emotion_pipeline = pipeline("text-classification", model="j-hartmann/emotion-english-distilroberta-base")
9
+
10
+ # Store chat histories
11
+ chat_histories = {}
12
+
13
+ def chatbot_response(message, session_id="default"):
14
+ if session_id not in chat_histories:
15
+ chat_histories[session_id] = []
16
+
17
+ # Generate response
18
+ input_ids = tokenizer.encode(message + tokenizer.eos_token, return_tensors="pt")
19
+ output = model.generate(input_ids, max_length=200, pad_token_id=tokenizer.eos_token_id)
20
+ response = tokenizer.decode(output[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
21
+
22
+ # Detect emotion
23
+ emotion_result = emotion_pipeline(message)
24
+ emotion = emotion_result[0]["label"]
25
+ score = float(emotion_result[0]["score"])
26
+
27
+ # Store history
28
+ chat_histories[session_id].append((message, response))
29
+ return response, emotion, score
30
+
31
+ # ------------------ Web Interface ------------------
32
+ with gr.Blocks() as demo:
33
+ gr.Markdown("# 🤖 Mental Health Chatbot")
34
+ with gr.Row():
35
+ with gr.Column():
36
+ chatbot = gr.Chatbot()
37
+ msg = gr.Textbox(label="Your Message")
38
+ session_id = gr.Textbox(label="Session ID", value="default")
39
+ btn = gr.Button("Send")
40
+ clear_btn = gr.Button("Clear History")
41
+ with gr.Column():
42
+ emotion_out = gr.Textbox(label="Detected Emotion")
43
+ score_out = gr.Number(label="Confidence Score")
44
+
45
+ def respond(message, chat_history, session_id):
46
+ response, emotion, score = chatbot_response(message, session_id)
47
+ chat_history.append((message, response))
48
+ return "", chat_history, emotion, score
49
+
50
+ btn.click(respond, [msg, chatbot, session_id], [msg, chatbot, emotion_out, score_out])
51
+ msg.submit(respond, [msg, chatbot, session_id], [msg, chatbot, emotion_out, score_out])
52
+ clear_btn.click(lambda s_id: ([], "", 0.0) if s_id in chat_histories else ([], "", 0.0),
53
+ inputs=[session_id],
54
+ outputs=[chatbot, emotion_out, score_out])
55
+
56
+ # ------------------ API Endpoint for /api/predict ------------------
57
+ predict_api = gr.Interface(
58
+ fn=chatbot_response,
59
+ inputs=[gr.Textbox(label="Message"), gr.Textbox(label="Session ID")],
60
+ outputs=[gr.Textbox(label="Response"), gr.Textbox(label="Emotion"), gr.Number(label="Score")]
61
  )
62
 
63
+ # ------------------ Launch for Gradio Spaces ------------------
64
+ demo.launch()
65
+ predict_api.launch(inline=False)