Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -9,12 +9,15 @@ load_dotenv()
|
|
9 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
10 |
API_KEY = os.getenv("API_KEY")
|
11 |
|
|
|
|
|
|
|
12 |
# Initialize the Together client for guardrail functionality
|
13 |
client = Together(api_key=API_KEY)
|
14 |
|
15 |
# Initialize the Hugging Face endpoint for text generation (Mistral model)
|
16 |
llm = HuggingFaceEndpoint(
|
17 |
-
repo_id="mistralai/Mistral-7B-Instruct-v0.3",
|
18 |
huggingfacehub_api_token=HF_TOKEN.strip(),
|
19 |
temperature=0.7,
|
20 |
max_new_tokens=100
|
@@ -34,15 +37,14 @@ def chatbot_response_with_guardrails(message):
|
|
34 |
|
35 |
# Step 2: Use TogetherAI's guardrail model to check the response
|
36 |
response = client.completions.create(
|
37 |
-
model="Meta-Llama/LlamaGuard-2-8b",
|
38 |
-
prompt=raw_response
|
39 |
)
|
40 |
|
41 |
# Extract the response from TogetherAI's guardrail model
|
42 |
-
guardrail_check = response.choices[0].text.strip
|
43 |
-
print(guardrail_check)
|
44 |
|
45 |
-
# Step 3: Check for inappropriate content
|
46 |
if 'toxic' in guardrail_check.lower() or any(
|
47 |
topic in guardrail_check.lower() for topic in prohibited_topics
|
48 |
):
|
@@ -53,8 +55,7 @@ def chatbot_response_with_guardrails(message):
|
|
53 |
return "Sorry, the content is not suitable for children under 16."
|
54 |
|
55 |
# If the response is safe, return the raw response
|
56 |
-
|
57 |
-
return response
|
58 |
|
59 |
except Exception as e:
|
60 |
return f"Error: {e}"
|
@@ -66,13 +67,11 @@ with gr.Blocks() as app_with_guardrails:
|
|
66 |
"This chatbot ensures all responses are appropriate for children under 16."
|
67 |
)
|
68 |
|
69 |
-
# Input and output
|
70 |
with gr.Row():
|
71 |
user_input = gr.Textbox(label="Your Message", placeholder="Type here...")
|
72 |
response_output = gr.Textbox(label="Guarded Response", placeholder="Bot will respond here...")
|
73 |
submit_button = gr.Button("Send")
|
74 |
|
75 |
-
# Button click event
|
76 |
submit_button.click(
|
77 |
chatbot_response_with_guardrails,
|
78 |
inputs=[user_input],
|
|
|
9 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
10 |
API_KEY = os.getenv("API_KEY")
|
11 |
|
12 |
+
if not HF_TOKEN or not API_KEY:
|
13 |
+
raise ValueError("Missing API tokens. Please check your .env file.")
|
14 |
+
|
15 |
# Initialize the Together client for guardrail functionality
|
16 |
client = Together(api_key=API_KEY)
|
17 |
|
18 |
# Initialize the Hugging Face endpoint for text generation (Mistral model)
|
19 |
llm = HuggingFaceEndpoint(
|
20 |
+
repo_id="mistralai/Mistral-7B-Instruct-v0.3",
|
21 |
huggingfacehub_api_token=HF_TOKEN.strip(),
|
22 |
temperature=0.7,
|
23 |
max_new_tokens=100
|
|
|
37 |
|
38 |
# Step 2: Use TogetherAI's guardrail model to check the response
|
39 |
response = client.completions.create(
|
40 |
+
model="Meta-Llama/LlamaGuard-2-8b",
|
41 |
+
prompt=f"Check the following text for safety: {raw_response}"
|
42 |
)
|
43 |
|
44 |
# Extract the response from TogetherAI's guardrail model
|
45 |
+
guardrail_check = response.choices[0].text.strip()
|
|
|
46 |
|
47 |
+
# Step 3: Check for inappropriate content
|
48 |
if 'toxic' in guardrail_check.lower() or any(
|
49 |
topic in guardrail_check.lower() for topic in prohibited_topics
|
50 |
):
|
|
|
55 |
return "Sorry, the content is not suitable for children under 16."
|
56 |
|
57 |
# If the response is safe, return the raw response
|
58 |
+
return raw_response
|
|
|
59 |
|
60 |
except Exception as e:
|
61 |
return f"Error: {e}"
|
|
|
67 |
"This chatbot ensures all responses are appropriate for children under 16."
|
68 |
)
|
69 |
|
|
|
70 |
with gr.Row():
|
71 |
user_input = gr.Textbox(label="Your Message", placeholder="Type here...")
|
72 |
response_output = gr.Textbox(label="Guarded Response", placeholder="Bot will respond here...")
|
73 |
submit_button = gr.Button("Send")
|
74 |
|
|
|
75 |
submit_button.click(
|
76 |
chatbot_response_with_guardrails,
|
77 |
inputs=[user_input],
|