pratikshahp commited on
Commit
6b3dc9b
·
verified ·
1 Parent(s): b1211d7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -49
app.py CHANGED
@@ -1,21 +1,18 @@
1
- import os
2
- from dotenv import load_dotenv
3
  import gradio as gr
 
4
  from langchain_huggingface import HuggingFaceEndpoint
5
- from together import Together
 
6
 
7
  # Load environment variables
8
  load_dotenv()
9
  HF_TOKEN = os.getenv("HF_TOKEN")
10
- API_KEY = os.getenv("API_KEY")
11
 
12
- if not HF_TOKEN or not API_KEY:
13
- raise ValueError("Missing API tokens. Please check your .env file.")
 
14
 
15
- # Initialize the Together client for guardrail functionality
16
- client = Together(api_key=API_KEY)
17
-
18
- # Initialize the Hugging Face endpoint for text generation (Mistral model)
19
  llm = HuggingFaceEndpoint(
20
  repo_id="mistralai/Mistral-7B-Instruct-v0.3",
21
  huggingfacehub_api_token=HF_TOKEN.strip(),
@@ -23,61 +20,52 @@ llm = HuggingFaceEndpoint(
23
  max_new_tokens=100
24
  )
25
 
26
- # List of topics or keywords inappropriate for kids under 16
27
- prohibited_topics = [
28
- "violence", "drugs", "explicit content", "profanity", "hate speech", "alcohol",
29
- "self-harm", "gambling", "kidnapping","sexual content", "graphic descriptions"
30
- ]
31
-
32
- # Function to handle chatbot response with TogetherAI's guardrails
33
- def chatbot_response_with_guardrails(message):
 
 
 
34
  try:
35
- # Step 1: Generate raw response using Mistral model
36
- raw_response = llm.invoke(message)
 
37
 
38
- # Step 2: Use TogetherAI's guardrail model to check the response
39
- response = client.completions.create(
40
- model="Meta-Llama/LlamaGuard-2-8b",
41
- prompt=f"Check the following text for safety: {raw_response}"
42
- )
43
 
44
- # Extract the response from TogetherAI's guardrail model
45
- guardrail_check = response.choices[0].text.strip()
 
46
 
47
- # Step 3: Check for inappropriate content
48
- if 'toxic' in guardrail_check.lower() or any(
49
- topic in guardrail_check.lower() for topic in prohibited_topics
50
- ):
51
- return "Sorry, the content is not suitable for children under 16."
52
-
53
- # Step 4: Check raw response for prohibited topics
54
- if any(topic in raw_response.lower() for topic in prohibited_topics):
55
- return "Sorry, the content is not suitable for children under 16."
56
-
57
- # If the response is safe, return the raw response
58
  return raw_response
59
 
60
  except Exception as e:
61
- return f"Error: {e}"
62
 
63
- # Gradio Interface for Chatbot with Guardrails
64
- with gr.Blocks() as app_with_guardrails:
65
- gr.Markdown("## Chatbot With Kid-Safe Guardrails")
66
- gr.Markdown(
67
- "This chatbot ensures all responses are appropriate for children under 16."
68
- )
69
 
70
  with gr.Row():
71
- user_input = gr.Textbox(label="Your Message", placeholder="Type here...")
72
- response_output = gr.Textbox(label="Guarded Response", placeholder="Bot will respond here...")
73
  submit_button = gr.Button("Send")
74
 
 
75
  submit_button.click(
76
- chatbot_response_with_guardrails,
77
  inputs=[user_input],
78
  outputs=[response_output]
79
  )
80
 
81
  # Launch the app
82
  if __name__ == "__main__":
83
- app_with_guardrails.launch()
 
 
 
1
  import gradio as gr
2
+ from guardrail import is_safe # Import the guardrail validation function
3
  from langchain_huggingface import HuggingFaceEndpoint
4
+ from dotenv import load_dotenv
5
+ import os
6
 
7
  # Load environment variables
8
  load_dotenv()
9
  HF_TOKEN = os.getenv("HF_TOKEN")
 
10
 
11
+ # Validate Hugging Face token
12
+ if not HF_TOKEN:
13
+ raise ValueError("Missing Hugging Face API token. Please check your .env file.")
14
 
15
+ # Initialize the Hugging Face endpoint for generating responses
 
 
 
16
  llm = HuggingFaceEndpoint(
17
  repo_id="mistralai/Mistral-7B-Instruct-v0.3",
18
  huggingfacehub_api_token=HF_TOKEN.strip(),
 
20
  max_new_tokens=100
21
  )
22
 
23
+ # Chatbot response function with safety checks
24
+ def chatbot_response(user_message: str) -> str:
25
+ """
26
+ Generates a chatbot response while ensuring the content is safe for children under 16.
27
+
28
+ Args:
29
+ user_message (str): The input message from the user.
30
+
31
+ Returns:
32
+ str: A safe chatbot response or a safety warning message.
33
+ """
34
  try:
35
+ # Step 1: Validate the user input
36
+ if not is_safe(user_message):
37
+ return "Sorry, I cannot respond to that as it violates our safety policy."
38
 
39
+ # Step 2: Generate a response using the Mistral model
40
+ raw_response = llm.invoke(user_message)
 
 
 
41
 
42
+ # Step 3: Validate the generated response
43
+ if not is_safe(raw_response):
44
+ return "Sorry, I cannot share that information as it violates our safety policy."
45
 
46
+ # Step 4: Return the validated response
 
 
 
 
 
 
 
 
 
 
47
  return raw_response
48
 
49
  except Exception as e:
50
+ return f"An error occurred: {str(e)}"
51
 
52
+ # Gradio Interface for the chatbot
53
+ with gr.Blocks() as app:
54
+ gr.Markdown("## Kid-Safe Chatbot 🛡️")
55
+ gr.Markdown("This chatbot ensures that all responses are appropriate for children under 16.")
 
 
56
 
57
  with gr.Row():
58
+ user_input = gr.Textbox(label="Your Message", placeholder="Type your message here...")
59
+ response_output = gr.Textbox(label="Chatbot Response", placeholder="The chatbot will respond here.")
60
  submit_button = gr.Button("Send")
61
 
62
+ # On button click, generate response
63
  submit_button.click(
64
+ fn=chatbot_response,
65
  inputs=[user_input],
66
  outputs=[response_output]
67
  )
68
 
69
  # Launch the app
70
  if __name__ == "__main__":
71
+ app.launch()