Shabi23 commited on
Commit
8110039
·
verified ·
1 Parent(s): 1593a5d

Upload application.py

Browse files
Files changed (1) hide show
  1. application.py +101 -99
application.py CHANGED
@@ -1,99 +1,101 @@
1
- import os
2
- from flask import Flask, request, jsonify, render_template, send_from_directory
3
- from huggingface_hub import InferenceClient
4
- from datasets import load_dataset
5
- import markdown2
6
- import signal
7
-
8
- application = Flask(__name__, static_folder='static', template_folder='templates')
9
-
10
- hf_token = os.getenv("HF_TOKEN")
11
-
12
- chat_doctor_dataset = load_dataset("avaliev/chat_doctor")
13
- mental_health_dataset = load_dataset("Amod/mental_health_counseling_conversations")
14
-
15
- client = InferenceClient(
16
- "meta-llama/Meta-Llama-3-8B-Instruct",
17
- token=hf_token,
18
- )
19
-
20
- def select_relevant_context(user_input):
21
- mental_health_keywords = [
22
- "anxious", "depressed", "stress", "mental health", "counseling",
23
- "therapy", "feelings", "worthless", "suicidal", "panic", "anxiety"
24
- ]
25
- medical_keywords = [
26
- "symptoms", "diagnosis", "treatment", "doctor", "prescription", "medication",
27
- "pain", "illness", "disease", "infection", "surgery"
28
- ]
29
-
30
- # Check if the input contains any mental health-related keywords
31
- if any(keyword in user_input.lower() for keyword in mental_health_keywords):
32
- example = mental_health_dataset['train'][0]
33
- context = f"Counselor: {example['Response']}\nUser: {example['Context']}"
34
- # Check if the input contains any medical-related keywords
35
- elif any(keyword in user_input.lower() for keyword in medical_keywords):
36
- example = chat_doctor_dataset['train'][0]
37
- context = f"Doctor: {example['input']}\nPatient: {example['output']}"
38
- else:
39
- # If no specific keywords are found, provide a general response
40
- context = "You are a general assistant. Respond to the user's query in a helpful manner."
41
-
42
- return context
43
-
44
- def create_prompt(context, user_input):
45
- prompt = (
46
- f"{context}\n\n"
47
- f"User: {user_input}\nAssistant:"
48
- )
49
- return prompt
50
-
51
- # Function to render Markdown into HTML
52
- def render_markdown(text):
53
- return markdown2.markdown(text)
54
-
55
- @application.route('/')
56
- def index():
57
- return render_template('index.html')
58
-
59
- @application.route('/static/<path:path>')
60
- def send_static(path):
61
- return send_from_directory('static', path)
62
-
63
- @application.route('/chat', methods=['POST'])
64
- def chat():
65
- user_input = request.json['message']
66
-
67
- context = select_relevant_context(user_input)
68
-
69
- prompt = create_prompt(context, user_input)
70
-
71
- response = ""
72
- for message in client.chat_completion(
73
- messages=[{"role": "user", "content": prompt}],
74
- max_tokens=500,
75
- stream=True,
76
- ):
77
- response += message.choices[0].delta.content
78
-
79
- formatted_response = render_markdown(response)
80
-
81
- return jsonify({"response": formatted_response})
82
-
83
- @application.route('/shutdown', methods=['POST'])
84
- def shutdown():
85
- if request.environ.get('werkzeug.server.shutdown'):
86
- shutdown_server()
87
- else:
88
- os.kill(os.getpid(), signal.SIGINT)
89
- return jsonify({"message": "Server is shutting down..."})
90
-
91
- def shutdown_server():
92
- func = request.environ.get('werkzeug.server.shutdown')
93
- if func is None:
94
- os.kill(os.getpid(), signal.SIGINT) # Kill the process if Werkzeug is not available
95
- else:
96
- func()
97
-
98
- if __name__ == '__main__':
99
- application.run(debug=False)
 
 
 
1
+ import os
2
+ from flask import Flask, request, jsonify, render_template, send_from_directory
3
+ from huggingface_hub import InferenceClient
4
+ from datasets import load_dataset
5
+ import markdown2
6
+ import signal
7
+
8
+ os.environ["HF_HOME"] = "/app/.cache"
9
+
10
+ application = Flask(__name__, static_folder='static', template_folder='templates')
11
+
12
+ hf_token = os.getenv("HF_TOKEN")
13
+
14
+ chat_doctor_dataset = load_dataset("avaliev/chat_doctor")
15
+ mental_health_dataset = load_dataset("Amod/mental_health_counseling_conversations")
16
+
17
+ client = InferenceClient(
18
+ "meta-llama/Meta-Llama-3-8B-Instruct",
19
+ token=hf_token,
20
+ )
21
+
22
+ def select_relevant_context(user_input):
23
+ mental_health_keywords = [
24
+ "anxious", "depressed", "stress", "mental health", "counseling",
25
+ "therapy", "feelings", "worthless", "suicidal", "panic", "anxiety"
26
+ ]
27
+ medical_keywords = [
28
+ "symptoms", "diagnosis", "treatment", "doctor", "prescription", "medication",
29
+ "pain", "illness", "disease", "infection", "surgery"
30
+ ]
31
+
32
+ # Check if the input contains any mental health-related keywords
33
+ if any(keyword in user_input.lower() for keyword in mental_health_keywords):
34
+ example = mental_health_dataset['train'][0]
35
+ context = f"Counselor: {example['Response']}\nUser: {example['Context']}"
36
+ # Check if the input contains any medical-related keywords
37
+ elif any(keyword in user_input.lower() for keyword in medical_keywords):
38
+ example = chat_doctor_dataset['train'][0]
39
+ context = f"Doctor: {example['input']}\nPatient: {example['output']}"
40
+ else:
41
+ # If no specific keywords are found, provide a general response
42
+ context = "You are a general assistant. Respond to the user's query in a helpful manner."
43
+
44
+ return context
45
+
46
+ def create_prompt(context, user_input):
47
+ prompt = (
48
+ f"{context}\n\n"
49
+ f"User: {user_input}\nAssistant:"
50
+ )
51
+ return prompt
52
+
53
+ # Function to render Markdown into HTML
54
+ def render_markdown(text):
55
+ return markdown2.markdown(text)
56
+
57
+ @application.route('/')
58
+ def index():
59
+ return render_template('index.html')
60
+
61
+ @application.route('/static/<path:path>')
62
+ def send_static(path):
63
+ return send_from_directory('static', path)
64
+
65
+ @application.route('/chat', methods=['POST'])
66
+ def chat():
67
+ user_input = request.json['message']
68
+
69
+ context = select_relevant_context(user_input)
70
+
71
+ prompt = create_prompt(context, user_input)
72
+
73
+ response = ""
74
+ for message in client.chat_completion(
75
+ messages=[{"role": "user", "content": prompt}],
76
+ max_tokens=500,
77
+ stream=True,
78
+ ):
79
+ response += message.choices[0].delta.content
80
+
81
+ formatted_response = render_markdown(response)
82
+
83
+ return jsonify({"response": formatted_response})
84
+
85
+ @application.route('/shutdown', methods=['POST'])
86
+ def shutdown():
87
+ if request.environ.get('werkzeug.server.shutdown'):
88
+ shutdown_server()
89
+ else:
90
+ os.kill(os.getpid(), signal.SIGINT)
91
+ return jsonify({"message": "Server is shutting down..."})
92
+
93
+ def shutdown_server():
94
+ func = request.environ.get('werkzeug.server.shutdown')
95
+ if func is None:
96
+ os.kill(os.getpid(), signal.SIGINT) # Kill the process if Werkzeug is not available
97
+ else:
98
+ func()
99
+
100
+ if __name__ == '__main__':
101
+ application.run(debug=False)