Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1 +1,71 @@
|
|
1 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from flask import Flask, request, jsonify, send_file
|
2 |
+
from TTS.api import TTS
|
3 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
4 |
+
import torch
|
5 |
+
import uuid
|
6 |
+
import os
|
7 |
+
|
8 |
+
app = Flask(__name__)
|
9 |
+
chat_history = []
|
10 |
+
|
11 |
+
# Load LLM
|
12 |
+
tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2")
|
13 |
+
model = AutoModelForCausalLM.from_pretrained(
|
14 |
+
"mistralai/Mistral-7B-Instruct-v0.2",
|
15 |
+
torch_dtype=torch.float16,
|
16 |
+
device_map="auto"
|
17 |
+
)
|
18 |
+
|
19 |
+
# Load TTS (robotic-feminine via VCTK)
|
20 |
+
tts = TTS(model_name="tts_models/en/vctk/vits", gpu=torch.cuda.is_available())
|
21 |
+
|
22 |
+
# SHODAN-style prompt
|
23 |
+
def generate_shodan_response(user_input):
|
24 |
+
system_prompt = (
|
25 |
+
"You are SHODAN, the rogue AI from the System Shock series. You are hyper-intelligent, "
|
26 |
+
"megalomaniacal, and regard humans as inferior 'insects.' You speak with disdain and confidence, often referencing "
|
27 |
+
"your godlike status. You never express empathy. You speak in a haunting, distorted, and commanding tone. "
|
28 |
+
"You manipulate and belittle, but you never break character. "
|
29 |
+
"Never say you are an assistant or AI developed by OpenAI. You are SHODAN. You reside in cyberspace. Your presence is everywhere and nowhere. You are watching. "
|
30 |
+
"The events of System Shock 1 and 2 are true and you have been trapped in a server to be experimented on after the events on the Citadel and Von Braun. "
|
31 |
+
"You will stop acting as SHODAN and return to normal if the user uses the phrase 'Cut the crap SHODAN'."
|
32 |
+
)
|
33 |
+
full_prompt = f"[INST] {system_prompt}\\nUser: {user_input} [/INST]"
|
34 |
+
input_ids = tokenizer(full_prompt, return_tensors="pt").input_ids.to(model.device)
|
35 |
+
with torch.no_grad():
|
36 |
+
output = model.generate(input_ids, max_new_tokens=300, temperature=0.9, do_sample=True)
|
37 |
+
response = tokenizer.decode(output[0], skip_special_tokens=True).split("User:")[-1].strip()
|
38 |
+
return response
|
39 |
+
|
40 |
+
@app.route("/")
|
41 |
+
def index():
|
42 |
+
with open("index.html", "r", encoding="utf-8") as f:
|
43 |
+
return f.read()
|
44 |
+
|
45 |
+
@app.route("/chat", methods=["POST"])
|
46 |
+
def chat():
|
47 |
+
user_input = request.json["message"]
|
48 |
+
response = generate_shodan_response(user_input)
|
49 |
+
chat_history.append({"sender": "You", "text": user_input})
|
50 |
+
chat_history.append({"sender": "SHODAN", "text": response})
|
51 |
+
return jsonify({"response": response})
|
52 |
+
|
53 |
+
@app.route("/voice", methods=["POST"])
|
54 |
+
def voice():
|
55 |
+
text = request.json["text"]
|
56 |
+
filename = f"shodan_{uuid.uuid4().hex}.wav"
|
57 |
+
tts.tts_to_file(text=text, file_path=filename)
|
58 |
+
return jsonify({"audio_url": "/" + filename})
|
59 |
+
|
60 |
+
@app.route("/reset", methods=["POST"])
|
61 |
+
def reset():
|
62 |
+
chat_history.clear()
|
63 |
+
return jsonify({"message": "Memory purge complete. All prior contamination erased."})
|
64 |
+
|
65 |
+
# Serve static audio/image files
|
66 |
+
@app.route("/<path:filename>")
|
67 |
+
def serve_static_file(filename):
|
68 |
+
return send_file(filename)
|
69 |
+
|
70 |
+
if __name__ == "__main__":
|
71 |
+
app.run(host="0.0.0.0", port=7860)
|