Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
import os
|
2 |
-
from flask import Flask, request, jsonify
|
3 |
import requests
|
4 |
|
5 |
# 🔧 Hugging Face inference endpoint and token
|
@@ -20,18 +20,36 @@ SYSTEM_PROMPT = (
|
|
20 |
app = Flask(__name__)
|
21 |
chat_history = []
|
22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
@app.route("/chat", methods=["POST"])
|
24 |
def chat():
|
25 |
global chat_history
|
26 |
user_input = request.json.get("message", "")
|
|
|
|
|
|
|
27 |
chat_history.append({"role": "user", "content": user_input})
|
28 |
|
29 |
payload = {
|
30 |
"inputs": [
|
31 |
{"role": "system", "content": SYSTEM_PROMPT},
|
32 |
-
*chat_history[-10:]
|
33 |
],
|
34 |
-
"parameters": {
|
|
|
|
|
|
|
35 |
}
|
36 |
|
37 |
headers = {
|
@@ -44,16 +62,14 @@ def chat():
|
|
44 |
if response.status_code != 200:
|
45 |
return jsonify({"error": "Model error", "details": response.text}), 500
|
46 |
|
47 |
-
|
48 |
-
|
49 |
-
|
|
|
|
|
50 |
|
|
|
51 |
return jsonify({"response": reply})
|
52 |
|
53 |
-
@app.route("/")
|
54 |
-
def index():
|
55 |
-
return "SHODAN backend is running."
|
56 |
-
|
57 |
if __name__ == "__main__":
|
58 |
app.run(host="0.0.0.0", port=7860)
|
59 |
-
|
|
|
1 |
import os
|
2 |
+
from flask import Flask, request, jsonify, send_from_directory
|
3 |
import requests
|
4 |
|
5 |
# 🔧 Hugging Face inference endpoint and token
|
|
|
20 |
app = Flask(__name__)
|
21 |
chat_history = []
|
22 |
|
23 |
+
@app.route("/")
|
24 |
+
def index():
|
25 |
+
return send_from_directory(".", "index.html")
|
26 |
+
|
27 |
+
@app.route("/script.js")
|
28 |
+
def serve_script():
|
29 |
+
return send_from_directory(".", "script.js")
|
30 |
+
|
31 |
+
@app.route("/style.css")
|
32 |
+
def serve_css():
|
33 |
+
return send_from_directory(".", "style.css")
|
34 |
+
|
35 |
@app.route("/chat", methods=["POST"])
|
36 |
def chat():
|
37 |
global chat_history
|
38 |
user_input = request.json.get("message", "")
|
39 |
+
if not user_input:
|
40 |
+
return jsonify({"error": "Empty message"}), 400
|
41 |
+
|
42 |
chat_history.append({"role": "user", "content": user_input})
|
43 |
|
44 |
payload = {
|
45 |
"inputs": [
|
46 |
{"role": "system", "content": SYSTEM_PROMPT},
|
47 |
+
*chat_history[-10:]
|
48 |
],
|
49 |
+
"parameters": {
|
50 |
+
"max_new_tokens": 250,
|
51 |
+
"temperature": 0.7
|
52 |
+
}
|
53 |
}
|
54 |
|
55 |
headers = {
|
|
|
62 |
if response.status_code != 200:
|
63 |
return jsonify({"error": "Model error", "details": response.text}), 500
|
64 |
|
65 |
+
try:
|
66 |
+
result = response.json()
|
67 |
+
reply = result.get("generated_text", "") or result[0]["generated_text"]
|
68 |
+
except Exception as e:
|
69 |
+
return jsonify({"error": "Unexpected model response", "details": str(e)}), 500
|
70 |
|
71 |
+
chat_history.append({"role": "assistant", "content": reply})
|
72 |
return jsonify({"response": reply})
|
73 |
|
|
|
|
|
|
|
|
|
74 |
if __name__ == "__main__":
|
75 |
app.run(host="0.0.0.0", port=7860)
|
|