Shabi23 commited on
Commit
9a066f9
·
verified ·
1 Parent(s): bd12ab8

Update application.py

Browse files
Files changed (1) hide show
  1. application.py +2 -22
application.py CHANGED
@@ -3,7 +3,6 @@ from flask import Flask, request, jsonify, render_template, send_from_directory
3
  from huggingface_hub import InferenceClient
4
  from datasets import load_dataset
5
  import markdown2
6
- import signal
7
 
8
  os.environ["HF_HOME"] = "/app/.cache"
9
 
@@ -29,16 +28,13 @@ def select_relevant_context(user_input):
29
  "pain", "illness", "disease", "infection", "surgery"
30
  ]
31
 
32
- # Check if the input contains any mental health-related keywords
33
  if any(keyword in user_input.lower() for keyword in mental_health_keywords):
34
  example = mental_health_dataset['train'][0]
35
  context = f"Counselor: {example['Response']}\nUser: {example['Context']}"
36
- # Check if the input contains any medical-related keywords
37
  elif any(keyword in user_input.lower() for keyword in medical_keywords):
38
  example = chat_doctor_dataset['train'][0]
39
  context = f"Doctor: {example['input']}\nPatient: {example['output']}"
40
  else:
41
- # If no specific keywords are found, provide a general response
42
  context = "You are a general assistant. Respond to the user's query in a helpful manner."
43
 
44
  return context
@@ -50,7 +46,6 @@ def create_prompt(context, user_input):
50
  )
51
  return prompt
52
 
53
- # Function to render Markdown into HTML
54
  def render_markdown(text):
55
  return markdown2.markdown(text)
56
 
@@ -65,9 +60,7 @@ def send_static(path):
65
  @app.route('/chat', methods=['POST'])
66
  def chat():
67
  user_input = request.json['message']
68
-
69
  context = select_relevant_context(user_input)
70
-
71
  prompt = create_prompt(context, user_input)
72
 
73
  response = ""
@@ -82,18 +75,5 @@ def chat():
82
 
83
  return jsonify({"response": formatted_response})
84
 
85
- @app.route('/shutdown', methods=['POST'])
86
- def shutdown():
87
- if request.environ.get('werkzeug.server.shutdown'):
88
- shutdown_server()
89
- else:
90
- os.kill(os.getpid(), signal.SIGINT)
91
- return jsonify({"message": "Server is shutting down..."})
92
-
93
- def shutdown_server():
94
- func = request.environ.get('werkzeug.server.shutdown')
95
- if func is None:
96
- os.kill(os.getpid(), signal.SIGINT) # Kill the process if Werkzeug is not available
97
- else:
98
- func()
99
-
 
3
  from huggingface_hub import InferenceClient
4
  from datasets import load_dataset
5
  import markdown2
 
6
 
7
  os.environ["HF_HOME"] = "/app/.cache"
8
 
 
28
  "pain", "illness", "disease", "infection", "surgery"
29
  ]
30
 
 
31
  if any(keyword in user_input.lower() for keyword in mental_health_keywords):
32
  example = mental_health_dataset['train'][0]
33
  context = f"Counselor: {example['Response']}\nUser: {example['Context']}"
 
34
  elif any(keyword in user_input.lower() for keyword in medical_keywords):
35
  example = chat_doctor_dataset['train'][0]
36
  context = f"Doctor: {example['input']}\nPatient: {example['output']}"
37
  else:
 
38
  context = "You are a general assistant. Respond to the user's query in a helpful manner."
39
 
40
  return context
 
46
  )
47
  return prompt
48
 
 
49
  def render_markdown(text):
50
  return markdown2.markdown(text)
51
 
 
60
  @app.route('/chat', methods=['POST'])
61
  def chat():
62
  user_input = request.json['message']
 
63
  context = select_relevant_context(user_input)
 
64
  prompt = create_prompt(context, user_input)
65
 
66
  response = ""
 
75
 
76
  return jsonify({"response": formatted_response})
77
 
78
+ if __name__ == '__main__':
79
+ app.run(debug=False)