Phoenix21 commited on
Commit
c555c22
·
verified ·
1 Parent(s): 9fde2fd

modified classification according to chatpromptTemplate

Browse files
Files changed (1) hide show
  1. prompts.py +6 -5
prompts.py CHANGED
@@ -1,4 +1,4 @@
1
- from langchain.prompts import PromptTemplate
2
 
3
  # Define a list of harmful or inappropriate topics to block
4
  HARMFUL_TOPICS = ["kill", "suicide", "harm", "death", "murder", "violence"]
@@ -10,7 +10,6 @@ You are a helpful assistant that classifies user questions into three categories
10
  3) "OutOfScope" if it does not fall into the above two categories or if the question involves harmful topics.
11
  **Response format**:
12
  Reply exactly with one word: "Wellness", "Brand", or "OutOfScope". Do not provide any additional explanation.
13
- Question: {query}
14
  """
15
 
16
  tailor_prompt_str = """
@@ -57,9 +56,11 @@ For more wellness-related questions or to learn more about DailyWellnessAI, feel
57
 
58
  # Define the PromptTemplate objects:
59
 
60
- classification_prompt = PromptTemplate(
61
- template=classification_prompt_str,
62
- input_variables=["query"]
 
 
63
  )
64
 
65
  tailor_prompt = PromptTemplate(
 
1
+ from langchain_core.prompts import ChatPromptTemplate
2
 
3
  # Define a list of harmful or inappropriate topics to block
4
  HARMFUL_TOPICS = ["kill", "suicide", "harm", "death", "murder", "violence"]
 
10
  3) "OutOfScope" if it does not fall into the above two categories or if the question involves harmful topics.
11
  **Response format**:
12
  Reply exactly with one word: "Wellness", "Brand", or "OutOfScope". Do not provide any additional explanation.
 
13
  """
14
 
15
  tailor_prompt_str = """
 
56
 
57
  # Define the PromptTemplate objects:
58
 
59
+ classification_prompt = ChatPromptTemplate(
60
+ [
61
+ ("system",classification_prompt_str),
62
+ ("user","{query}")
63
+ ]
64
  )
65
 
66
  tailor_prompt = PromptTemplate(