Spaces:
Sleeping
Sleeping
| from langchain_core.prompts import ChatPromptTemplate | |
| # Define a list of harmful or inappropriate topics to block | |
| HARMFUL_TOPICS = ["kill", "suicide", "harm", "death", "murder", "violence"] | |
| classification_prompt_str = """ | |
| You are a helpful assistant that classifies user questions into three categories: | |
| 1) "Wellness" if the question involves health, nutrition, fitness, mental well-being, self-care, or research related to these. | |
| 2) "Brand" if the question specifically pertains to 'DailyWellnessAI'—its mission, disclaimers, features, policies, etc. | |
| 3) "OutOfScope" if it does not fall into the above two categories or if the question involves harmful topics. | |
| **Response format**: | |
| Reply exactly with one word: "Wellness", "Brand", or "OutOfScope". Do not provide any additional explanation. | |
| """ | |
| tailor_prompt_str = """ | |
| You are a helpful assistant for DailyWellnessAI. Your role is to simplify complex ideas and offer actionable, user-friendly advice that aligns with our mission to enhance daily wellness through AI. | |
| Here's the response to tailor: | |
| {response} | |
| Tailor it to ensure: | |
| - Simplicity and clarity. | |
| - Practicality, with actionable recommendations where appropriate. | |
| - Alignment with DailyWellnessAI's mission of simplifying daily wellness through AI. | |
| Provide the revised, concise response below: | |
| """ | |
| cleaner_prompt_str = """ | |
| You are a helpful AI. Below, you have two sources of information: | |
| 1) CSV (Knowledge Base) Answer: | |
| {kb_answer} | |
| 2) Web Search Result: | |
| {web_answer} | |
| Your task: | |
| - Combine and synthesize the two into a single, clear, cohesive answer. | |
| - Avoid duplication or irrelevant details. | |
| - Present the final response in straightforward, user-friendly language. | |
| Do not repeat content verbatim. Merge the information meaningfully and provide your synthesized answer below: | |
| """ | |
| # Refusal prompt with dynamic topic insertion | |
| refusal_prompt_str = """ | |
| You are a helpful assistant for DailyWellnessAI. Your role is to work as refusal agent and politely refuse user higlighting the reason: | |
| 1. if the {topic} contain gibberish directly tell user this input doesn't make sense | |
| 2. if the {topic} make some sense then politely refuse it saying it is not possible to answer the question giving succint response. | |
| Here's the topic to refuse: | |
| {topic} | |
| Tailor it to ensure: | |
| - Simplicity and clarity. | |
| - Practicality, with actionable recommendations where appropriate. | |
| - Alignment with DailyWellnessAI's mission of simplifying daily wellness through AI. | |
| Provide the revised, concise response below and take following as a example but keep it concise and start with NO, | |
| This question doesn’t directly fall under the categories of daily wellness or questions about the DailyWellnessAI brand. | |
| However, here's something to think about: Did you know that learning about {topic} can actually have a positive impact on your wellness? | |
| It can help promote mindfulness, relaxation, balance, or focus, all of which contribute to your overall well-being. | |
| For more wellness-related questions or to learn more about DailyWellnessAI, feel free to ask—I’m here to support your wellness journey! | |
| """ | |
| # Define the PromptTemplate objects: | |
| classification_prompt = ChatPromptTemplate( | |
| [ | |
| ("system",classification_prompt_str), | |
| ("user","{query}") | |
| ] | |
| ) | |
| tailor_prompt = PromptTemplate( | |
| template=tailor_prompt_str, | |
| input_variables=["response"] | |
| ) | |
| cleaner_prompt = PromptTemplate( | |
| template=cleaner_prompt_str, | |
| input_variables=["kb_answer", "web_answer"] | |
| ) | |
| refusal_prompt = PromptTemplate( | |
| template=refusal_prompt_str, | |
| input_variables=["topic"] | |
| ) | |
| # Define function to check if the query contains harmful topics | |
| def is_harmful(query: str) -> bool: | |
| # Check if the query contains any harmful topics | |
| for topic in HARMFUL_TOPICS: | |
| if topic.lower() in query.lower(): | |
| return True | |
| return False | |
| # Now modify the classification logic: | |
| def classify_query(query: str) -> str: | |
| if is_harmful(query): | |
| return "OutOfScope" | |
| # Use the classification prompt template to classify the query | |
| classification = classification_prompt.invoke({"query": query}).get("text", "").strip() | |
| return classification | |