Ali231a's picture
Create app.py
c035ad5 verified
raw
history blame
1.83 kB
import gradio as gr
import requests
import os
# Model settings
MODEL_NAME = "Canstralian/pentest_ai"
HF_API_TOKEN = os.getenv("HF_API_TOKEN")
# Function to query the Hugging Face model
def query_hf(prompt):
headers = {"Authorization": f"Bearer {HF_API_TOKEN}"}
payload = {"inputs": prompt, "parameters": {"max_new_tokens": 300}}
try:
response = requests.post(
f"https://api-inference.huggingface.co/models/{MODEL_NAME}",
headers=headers,
json=payload
)
response.raise_for_status() # Raise an error for bad responses
data = response.json()
# Handle different response formats
if isinstance(data, list) and "generated_text" in data[0]:
return data[0]["generated_text"]
elif isinstance(data, dict) and "generated_text" in data:
return data["generated_text"]
else:
return str(data) # Fallback to string representation
except Exception as e:
return f"Error querying model: {str(e)}"
# Chat function for Gradio
def chat_fn(message, history):
# Convert history to a prompt with context
prompt = ""
for user_msg, assistant_msg in history:
prompt += f"User: {user_msg}\nAssistant: {assistant_msg}\n"
prompt += f"User: {message}\nAssistant: "
# Get response from the model
response = query_hf(prompt)
# Return in messages format
return [{"role": "user", "content": message}, {"role": "assistant", "content": response}]
# Create Gradio interface
demo = gr.ChatInterface(
fn=chat_fn,
chatbot=gr.Chatbot(type="messages"), # Use messages format
title="Pentest Assistant",
description="Your AI-powered assistant for penetration testing and cybersecurity tasks.",
theme="soft"
)
# Launch the app
demo.launch()