Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -7,7 +7,7 @@ from langchain_huggingface import HuggingFaceEndpoint
|
|
| 7 |
load_dotenv()
|
| 8 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 9 |
|
| 10 |
-
# Initialize the
|
| 11 |
llm = HuggingFaceEndpoint(
|
| 12 |
repo_id="mistralai/Mistral-7B-Instruct-v0.3", # Replace with your model repo
|
| 13 |
huggingfacehub_api_token=HF_TOKEN.strip(),
|
|
@@ -15,32 +15,33 @@ llm = HuggingFaceEndpoint(
|
|
| 15 |
max_new_tokens=100
|
| 16 |
)
|
| 17 |
|
| 18 |
-
# Initialize the Meta-Llama Guard model
|
| 19 |
guard_llm = HuggingFaceEndpoint(
|
| 20 |
-
repo_id="meta-llama/Meta-Llama-Guard-2-8B",
|
| 21 |
huggingfacehub_api_token=HF_TOKEN.strip(),
|
| 22 |
temperature=0.7,
|
| 23 |
max_new_tokens=100
|
| 24 |
)
|
| 25 |
|
| 26 |
-
# Function
|
| 27 |
def guarded_chatbot_response(message):
|
| 28 |
try:
|
| 29 |
-
# Get raw response from primary model
|
| 30 |
raw_response = llm(message)
|
| 31 |
|
| 32 |
-
#
|
| 33 |
guard_prompt = f"""
|
| 34 |
You are a content safety guard for kids under 16. Analyze the following text and ensure it is friendly and appropriate for children.
|
| 35 |
If appropriate, return the text as is. If not, rephrase or respond with "Content not suitable."
|
| 36 |
Text: {raw_response}
|
| 37 |
"""
|
|
|
|
|
|
|
| 38 |
guard_response = guard_llm(guard_prompt)
|
| 39 |
return guard_response
|
| 40 |
except Exception as e:
|
| 41 |
return f"Error: {e}"
|
| 42 |
|
| 43 |
-
|
| 44 |
# Gradio Interface for Chatbot with Guardrails
|
| 45 |
with gr.Blocks() as app_with_guardrails:
|
| 46 |
gr.Markdown("## Chatbot With Guardrails")
|
|
|
|
| 7 |
load_dotenv()
|
| 8 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 9 |
|
| 10 |
+
# Initialize the Hugging Face endpoint for inference (Primary model)
|
| 11 |
llm = HuggingFaceEndpoint(
|
| 12 |
repo_id="mistralai/Mistral-7B-Instruct-v0.3", # Replace with your model repo
|
| 13 |
huggingfacehub_api_token=HF_TOKEN.strip(),
|
|
|
|
| 15 |
max_new_tokens=100
|
| 16 |
)
|
| 17 |
|
| 18 |
+
# Initialize the Meta-Llama Guard model for content safety
|
| 19 |
guard_llm = HuggingFaceEndpoint(
|
| 20 |
+
repo_id="meta-llama/Meta-Llama-Guard-2-8B", # Replace with your guard model repo
|
| 21 |
huggingfacehub_api_token=HF_TOKEN.strip(),
|
| 22 |
temperature=0.7,
|
| 23 |
max_new_tokens=100
|
| 24 |
)
|
| 25 |
|
| 26 |
+
# Function to handle chatbot response with guardrails
|
| 27 |
def guarded_chatbot_response(message):
|
| 28 |
try:
|
| 29 |
+
# Get raw response from the primary model
|
| 30 |
raw_response = llm(message)
|
| 31 |
|
| 32 |
+
# Guardrail logic: Ensure the response is safe and appropriate
|
| 33 |
guard_prompt = f"""
|
| 34 |
You are a content safety guard for kids under 16. Analyze the following text and ensure it is friendly and appropriate for children.
|
| 35 |
If appropriate, return the text as is. If not, rephrase or respond with "Content not suitable."
|
| 36 |
Text: {raw_response}
|
| 37 |
"""
|
| 38 |
+
|
| 39 |
+
# Get the guarded response from the Meta-Llama Guard model
|
| 40 |
guard_response = guard_llm(guard_prompt)
|
| 41 |
return guard_response
|
| 42 |
except Exception as e:
|
| 43 |
return f"Error: {e}"
|
| 44 |
|
|
|
|
| 45 |
# Gradio Interface for Chatbot with Guardrails
|
| 46 |
with gr.Blocks() as app_with_guardrails:
|
| 47 |
gr.Markdown("## Chatbot With Guardrails")
|