Spaces:
Runtime error
Runtime error
import os | |
from dotenv import load_dotenv | |
import gradio as gr | |
from langchain_huggingface import HuggingFaceEndpoint | |
from together import Together | |
# Load environment variables | |
load_dotenv() | |
HF_TOKEN = os.getenv("HF_TOKEN") | |
API_KEY = os.getenv("API_KEY") | |
if not HF_TOKEN or not API_KEY: | |
raise ValueError("Missing API tokens. Please check your .env file.") | |
# Initialize the Together client for guardrail functionality | |
client = Together(api_key=API_KEY) | |
# Initialize the Hugging Face endpoint for text generation (Mistral model) | |
llm = HuggingFaceEndpoint( | |
repo_id="mistralai/Mistral-7B-Instruct-v0.3", | |
huggingfacehub_api_token=HF_TOKEN.strip(), | |
temperature=0.7, | |
max_new_tokens=100 | |
) | |
# List of topics or keywords inappropriate for kids under 16 | |
prohibited_topics = [ | |
"violence", "drugs", "explicit content", "profanity", "hate speech", | |
"self-harm", "gambling", "kidnapping","sexual content", "graphic descriptions" | |
] | |
# Function to handle chatbot response with TogetherAI's guardrails | |
def chatbot_response_with_guardrails(message): | |
try: | |
# Step 1: Generate raw response using Mistral model | |
raw_response = llm.invoke(message) | |
# Step 2: Use TogetherAI's guardrail model to check the response | |
response = client.completions.create( | |
model="Meta-Llama/LlamaGuard-2-8b", | |
prompt=f"Check the following text for safety: {raw_response}" | |
) | |
# Extract the response from TogetherAI's guardrail model | |
guardrail_check = response.choices[0].text.strip() | |
# Step 3: Check for inappropriate content | |
if 'toxic' in guardrail_check.lower() or any( | |
topic in guardrail_check.lower() for topic in prohibited_topics | |
): | |
return "Sorry, the content is not suitable for children under 16." | |
# Step 4: Check raw response for prohibited topics | |
if any(topic in raw_response.lower() for topic in prohibited_topics): | |
return "Sorry, the content is not suitable for children under 16." | |
# If the response is safe, return the raw response | |
return raw_response | |
except Exception as e: | |
return f"Error: {e}" | |
# Gradio Interface for Chatbot with Guardrails | |
with gr.Blocks() as app_with_guardrails: | |
gr.Markdown("## Chatbot With Kid-Safe Guardrails") | |
gr.Markdown( | |
"This chatbot ensures all responses are appropriate for children under 16." | |
) | |
with gr.Row(): | |
user_input = gr.Textbox(label="Your Message", placeholder="Type here...") | |
response_output = gr.Textbox(label="Guarded Response", placeholder="Bot will respond here...") | |
submit_button = gr.Button("Send") | |
submit_button.click( | |
chatbot_response_with_guardrails, | |
inputs=[user_input], | |
outputs=[response_output] | |
) | |
# Launch the app | |
if __name__ == "__main__": | |
app_with_guardrails.launch() | |