Spaces:
Sleeping
Sleeping
Delete app.py
Browse files
app.py
DELETED
@@ -1,30 +0,0 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import requests
|
3 |
-
import os
|
4 |
-
|
5 |
-
# Model settings
|
6 |
-
MODEL_NAME = "Canstralian/pentest_ai"
|
7 |
-
HF_API_TOKEN = os.getenv("HF_API_TOKEN")
|
8 |
-
|
9 |
-
# Function to talk to the model
|
10 |
-
def query_hf(prompt):
|
11 |
-
headers = {"Authorization": f"Bearer {HF_API_TOKEN}"}
|
12 |
-
payload = {"inputs": prompt, "parameters": {"max_new_tokens": 300}}
|
13 |
-
response = requests.post(
|
14 |
-
f"https://api-inference.huggingface.co/models/{MODEL_NAME}",
|
15 |
-
headers=headers,
|
16 |
-
json=payload
|
17 |
-
)
|
18 |
-
if response.status_code != 200:
|
19 |
-
return f"Error: {response.text}"
|
20 |
-
return response.json()[0]['generated_text']
|
21 |
-
|
22 |
-
# Chat interface function
|
23 |
-
def chat_fn(message, history):
|
24 |
-
answer = query_hf(message)
|
25 |
-
history.append((message, answer))
|
26 |
-
return history, history
|
27 |
-
|
28 |
-
# Launch Gradio app
|
29 |
-
demo = gr.ChatInterface(chat_fn)
|
30 |
-
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|