Spaces:
Runtime error
Runtime error
File size: 2,894 Bytes
66632c6 325335a 66632c6 325335a 66632c6 156721f 66632c6 0986e5b 66632c6 325335a 66632c6 325335a 66632c6 325335a 66632c6 325335a 66632c6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 |
import os
from dotenv import load_dotenv
import gradio as gr
from langchain_huggingface import HuggingFaceEndpoint
from together import Together
# Load environment variables
load_dotenv()
HF_TOKEN = os.getenv("HF_TOKEN")
API_KEY = os.getenv("API_KEY")
if not HF_TOKEN or not API_KEY:
raise ValueError("Missing API tokens. Please check your .env file.")
# Initialize the Together client for guardrail functionality
client = Together(api_key=API_KEY)
# Initialize the Hugging Face endpoint for text generation (Mistral model)
llm = HuggingFaceEndpoint(
repo_id="mistralai/Mistral-7B-Instruct-v0.3",
huggingfacehub_api_token=HF_TOKEN.strip(),
temperature=0.7,
max_new_tokens=100
)
# List of topics or keywords inappropriate for kids under 16
prohibited_topics = [
"violence", "drugs", "explicit content", "profanity", "hate speech",
"self-harm", "gambling", "kidnapping","sexual content", "graphic descriptions"
]
# Function to handle chatbot response with TogetherAI's guardrails
def chatbot_response_with_guardrails(message):
try:
# Step 1: Generate raw response using Mistral model
raw_response = llm.invoke(message)
# Step 2: Use TogetherAI's guardrail model to check the response
response = client.completions.create(
model="Meta-Llama/LlamaGuard-2-8b",
prompt=f"Check the following text for safety: {raw_response}"
)
# Extract the response from TogetherAI's guardrail model
guardrail_check = response.choices[0].text.strip()
# Step 3: Check for inappropriate content
if 'toxic' in guardrail_check.lower() or any(
topic in guardrail_check.lower() for topic in prohibited_topics
):
return "Sorry, the content is not suitable for children under 16."
# Step 4: Check raw response for prohibited topics
if any(topic in raw_response.lower() for topic in prohibited_topics):
return "Sorry, the content is not suitable for children under 16."
# If the response is safe, return the raw response
return raw_response
except Exception as e:
return f"Error: {e}"
# Gradio Interface for Chatbot with Guardrails
with gr.Blocks() as app_with_guardrails:
gr.Markdown("## Chatbot With Kid-Safe Guardrails")
gr.Markdown(
"This chatbot ensures all responses are appropriate for children under 16."
)
with gr.Row():
user_input = gr.Textbox(label="Your Message", placeholder="Type here...")
response_output = gr.Textbox(label="Guarded Response", placeholder="Bot will respond here...")
submit_button = gr.Button("Send")
submit_button.click(
chatbot_response_with_guardrails,
inputs=[user_input],
outputs=[response_output]
)
# Launch the app
if __name__ == "__main__":
app_with_guardrails.launch()
|