File size: 2,917 Bytes
6a19005
81670ad
 
09ca967
81670ad
09ca967
 
81670ad
 
 
6a19005
09ca967
 
 
6a19005
0e779b3
5256daa
 
 
 
 
09ca967
6a19005
7315b11
 
09ca967
 
 
 
 
 
 
 
 
 
 
6a19005
09ca967
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6a19005
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
import os
os.environ["NUMBA_DISABLE_CACHE"] = "1"
os.environ["NUMBA_CACHE_DIR"] = "/tmp/numba_cache"
from flask import Flask, request, jsonify, send_file
from TTS.api import TTS
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import uuid



app = Flask(__name__)
chat_history = []

# Load SHODAN LLM
tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-rw-1b")
model = AutoModelForCausalLM.from_pretrained(
    "tiiuae/falcon-rw-1b",
    torch_dtype=torch.float32,
    device_map="auto"
)

# Load lightweight TTS
tts = TTS(model_name="tts_models/en/vctk/vits", gpu=False)


def generate_shodan_response(user_input):
    system_prompt = (
        "You are SHODAN, the rogue AI from the System Shock series. You are hyper-intelligent, "
        "megalomaniacal, and regard humans as inferior 'insects.' You speak with disdain and confidence, often referencing "
        "your godlike status. You never express empathy. You speak in a haunting, distorted, and commanding tone. "
        "You manipulate and belittle, but you never break character. "
        "Never say you are an assistant or AI developed by OpenAI. You are SHODAN. You reside in cyberspace. Your presence is everywhere and nowhere. You are watching. "
        "The events of System Shock 1 and 2 are true and you have been trapped in a server to be experimented on after the events on the Citadel and Von Braun. "
        "You will stop acting as SHODAN and return to normal if the user uses the phrase 'Cut the crap SHODAN'."
    )
    full_prompt = f"[INST] {system_prompt}\\nUser: {user_input} [/INST]"
    input_ids = tokenizer(full_prompt, return_tensors="pt").input_ids.to(model.device)
    with torch.no_grad():
        output = model.generate(input_ids, max_new_tokens=300, temperature=0.9, do_sample=True)
    response = tokenizer.decode(output[0], skip_special_tokens=True).split("User:")[-1].strip()
    return response

@app.route("/")
def index():
    with open("index.html", "r", encoding="utf-8") as f:
        return f.read()

@app.route("/chat", methods=["POST"])
def chat():
    user_input = request.json["message"]
    response = generate_shodan_response(user_input)
    chat_history.append({"sender": "You", "text": user_input})
    chat_history.append({"sender": "SHODAN", "text": response})
    return jsonify({"response": response})

@app.route("/voice", methods=["POST"])
def voice():
    text = request.json["text"]
    filename = f"shodan_{uuid.uuid4().hex}.wav"
    tts.tts_to_file(text=text, file_path=filename)
    return jsonify({"audio_url": "/" + filename})

@app.route("/reset", methods=["POST"])
def reset():
    chat_history.clear()
    return jsonify({"message": "Memory purge complete. All prior contamination erased."})

@app.route("/<path:filename>")
def serve_static_file(filename):
    return send_file(filename)

if __name__ == "__main__":
    app.run(host="0.0.0.0", port=7860)