Shabi23 commited on
Commit
17db205
·
verified ·
1 Parent(s): 4d76d22

Rename app.py to application.py

Browse files
Files changed (1) hide show
  1. app.py → application.py +101 -101
app.py → application.py RENAMED
@@ -1,101 +1,101 @@
1
- import os
2
- from flask import Flask, request, jsonify, render_template, send_from_directory
3
- from huggingface_hub import InferenceClient
4
- from datasets import load_dataset
5
- import markdown2
6
- import signal
7
-
8
- os.environ["HF_HOME"] = "/app/.cache"
9
-
10
- application = Flask(__name__, static_folder='static', template_folder='templates')
11
-
12
- hf_token = os.getenv("HF_TOKEN")
13
-
14
- chat_doctor_dataset = load_dataset("avaliev/chat_doctor")
15
- mental_health_dataset = load_dataset("Amod/mental_health_counseling_conversations")
16
-
17
- client = InferenceClient(
18
- "meta-llama/Meta-Llama-3-8B-Instruct",
19
- token=hf_token,
20
- )
21
-
22
- def select_relevant_context(user_input):
23
- mental_health_keywords = [
24
- "anxious", "depressed", "stress", "mental health", "counseling",
25
- "therapy", "feelings", "worthless", "suicidal", "panic", "anxiety"
26
- ]
27
- medical_keywords = [
28
- "symptoms", "diagnosis", "treatment", "doctor", "prescription", "medication",
29
- "pain", "illness", "disease", "infection", "surgery"
30
- ]
31
-
32
- # Check if the input contains any mental health-related keywords
33
- if any(keyword in user_input.lower() for keyword in mental_health_keywords):
34
- example = mental_health_dataset['train'][0]
35
- context = f"Counselor: {example['Response']}\nUser: {example['Context']}"
36
- # Check if the input contains any medical-related keywords
37
- elif any(keyword in user_input.lower() for keyword in medical_keywords):
38
- example = chat_doctor_dataset['train'][0]
39
- context = f"Doctor: {example['input']}\nPatient: {example['output']}"
40
- else:
41
- # If no specific keywords are found, provide a general response
42
- context = "You are a general assistant. Respond to the user's query in a helpful manner."
43
-
44
- return context
45
-
46
- def create_prompt(context, user_input):
47
- prompt = (
48
- f"{context}\n\n"
49
- f"User: {user_input}\nAssistant:"
50
- )
51
- return prompt
52
-
53
- # Function to render Markdown into HTML
54
- def render_markdown(text):
55
- return markdown2.markdown(text)
56
-
57
- @application.route('/')
58
- def index():
59
- return render_template('index.html')
60
-
61
- @application.route('/static/<path:path>')
62
- def send_static(path):
63
- return send_from_directory('static', path)
64
-
65
- @application.route('/chat', methods=['POST'])
66
- def chat():
67
- user_input = request.json['message']
68
-
69
- context = select_relevant_context(user_input)
70
-
71
- prompt = create_prompt(context, user_input)
72
-
73
- response = ""
74
- for message in client.chat_completion(
75
- messages=[{"role": "user", "content": prompt}],
76
- max_tokens=500,
77
- stream=True,
78
- ):
79
- response += message.choices[0].delta.content
80
-
81
- formatted_response = render_markdown(response)
82
-
83
- return jsonify({"response": formatted_response})
84
-
85
- @application.route('/shutdown', methods=['POST'])
86
- def shutdown():
87
- if request.environ.get('werkzeug.server.shutdown'):
88
- shutdown_server()
89
- else:
90
- os.kill(os.getpid(), signal.SIGINT)
91
- return jsonify({"message": "Server is shutting down..."})
92
-
93
- def shutdown_server():
94
- func = request.environ.get('werkzeug.server.shutdown')
95
- if func is None:
96
- os.kill(os.getpid(), signal.SIGINT) # Kill the process if Werkzeug is not available
97
- else:
98
- func()
99
-
100
- if __name__ == '__main__':
101
- application.run(host='0.0.0.0', port=8080)
 
1
+ import os
2
+ from flask import Flask, request, jsonify, render_template, send_from_directory
3
+ from huggingface_hub import InferenceClient
4
+ from datasets import load_dataset
5
+ import markdown2
6
+ import signal
7
+
8
+ os.environ["HF_HOME"] = "/app/.cache"
9
+
10
+ application = Flask(__name__, static_folder='static', template_folder='templates')
11
+
12
+ hf_token = os.getenv("HF_TOKEN")
13
+
14
+ chat_doctor_dataset = load_dataset("avaliev/chat_doctor")
15
+ mental_health_dataset = load_dataset("Amod/mental_health_counseling_conversations")
16
+
17
+ client = InferenceClient(
18
+ "meta-llama/Meta-Llama-3-8B-Instruct",
19
+ token=hf_token,
20
+ )
21
+
22
+ def select_relevant_context(user_input):
23
+ mental_health_keywords = [
24
+ "anxious", "depressed", "stress", "mental health", "counseling",
25
+ "therapy", "feelings", "worthless", "suicidal", "panic", "anxiety"
26
+ ]
27
+ medical_keywords = [
28
+ "symptoms", "diagnosis", "treatment", "doctor", "prescription", "medication",
29
+ "pain", "illness", "disease", "infection", "surgery"
30
+ ]
31
+
32
+ # Check if the input contains any mental health-related keywords
33
+ if any(keyword in user_input.lower() for keyword in mental_health_keywords):
34
+ example = mental_health_dataset['train'][0]
35
+ context = f"Counselor: {example['Response']}\nUser: {example['Context']}"
36
+ # Check if the input contains any medical-related keywords
37
+ elif any(keyword in user_input.lower() for keyword in medical_keywords):
38
+ example = chat_doctor_dataset['train'][0]
39
+ context = f"Doctor: {example['input']}\nPatient: {example['output']}"
40
+ else:
41
+ # If no specific keywords are found, provide a general response
42
+ context = "You are a general assistant. Respond to the user's query in a helpful manner."
43
+
44
+ return context
45
+
46
+ def create_prompt(context, user_input):
47
+ prompt = (
48
+ f"{context}\n\n"
49
+ f"User: {user_input}\nAssistant:"
50
+ )
51
+ return prompt
52
+
53
+ # Function to render Markdown into HTML
54
+ def render_markdown(text):
55
+ return markdown2.markdown(text)
56
+
57
+ @application.route('/')
58
+ def index():
59
+ return render_template('index.html')
60
+
61
+ @application.route('/static/<path:path>')
62
+ def send_static(path):
63
+ return send_from_directory('static', path)
64
+
65
+ @application.route('/chat', methods=['POST'])
66
+ def chat():
67
+ user_input = request.json['message']
68
+
69
+ context = select_relevant_context(user_input)
70
+
71
+ prompt = create_prompt(context, user_input)
72
+
73
+ response = ""
74
+ for message in client.chat_completion(
75
+ messages=[{"role": "user", "content": prompt}],
76
+ max_tokens=500,
77
+ stream=True,
78
+ ):
79
+ response += message.choices[0].delta.content
80
+
81
+ formatted_response = render_markdown(response)
82
+
83
+ return jsonify({"response": formatted_response})
84
+
85
+ @application.route('/shutdown', methods=['POST'])
86
+ def shutdown():
87
+ if request.environ.get('werkzeug.server.shutdown'):
88
+ shutdown_server()
89
+ else:
90
+ os.kill(os.getpid(), signal.SIGINT)
91
+ return jsonify({"message": "Server is shutting down..."})
92
+
93
+ def shutdown_server():
94
+ func = request.environ.get('werkzeug.server.shutdown')
95
+ if func is None:
96
+ os.kill(os.getpid(), signal.SIGINT) # Kill the process if Werkzeug is not available
97
+ else:
98
+ func()
99
+
100
+ if __name__ == '__main__':
101
+ application.run(host='0.0.0.0', port=7680)