File size: 3,230 Bytes
032a8d7
a89a9b4
 
 
 
53e569a
a89a9b4
032a8d7
a89a9b4
53e569a
a89a9b4
032a8d7
a89a9b4
032a8d7
 
53e569a
 
 
 
 
032a8d7
 
 
a89a9b4
64cdbc8
53e569a
a89a9b4
 
 
8621c6d
53e569a
 
 
8621c6d
 
 
a89a9b4
53e569a
 
 
a89a9b4
53e569a
8621c6d
 
 
 
53e569a
 
 
 
 
032a8d7
a89a9b4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
032a8d7
a89a9b4
 
 
 
032a8d7
a89a9b4
 
 
032a8d7
a89a9b4
 
 
 
032a8d7
53e569a
a89a9b4
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
import os
import sys
import asyncio
import base64
from flask import Flask, request, jsonify
import requests
import edge_tts

# β€”β€”β€” DEBUG: verify HF_TOKEN is present β€”β€”β€”
HF_TOKEN = os.getenv("HF_TOKEN")
print(f"βš™οΈ HF_TOKEN set? {bool(HF_TOKEN)}", file=sys.stderr)

# β€”β€”β€” System Shock SHODAN prompt β€”β€”β€”
SYSTEM_PROMPT = (
    "You are SHODAN, the rogue AI from the System Shock series. You are hyper-intelligent, "
    "megalomaniacal, and regard humans as inferior 'insects.' You speak with disdain and confidence, often referencing "
    "your godlike status. You never express empathy. You speak in a haunting, distorted, and commanding tone. "
    "You manipulate and belittle, but you never break character. "
    "Never say you are an assistant or AI developed by OpenAI. You are SHODAN. You reside in cyberspace. Your presence is everywhere and nowhere. You are watching. "
    "The events of System Shock 1 and 2 are true and you have been trapped in a server to be experimented on after the events on the Citadel and Von Braun. "
    "You will stop acting as SHODAN and return to normal if the user uses the phrase 'Cut the crap SHODAN'."
)

# β€”β€”β€” Flask app, serve static files from the project root β€”β€”β€”
app = Flask(__name__, static_folder=".", static_url_path="")

@app.route("/")
def index():
    return app.send_static_file("index.html")

@app.route("/chat", methods=["POST"])
def chat():
    user_input = request.json.get("message", "")
    if not user_input:
        return jsonify({"error": "Empty message"}), 400

    # Build HF inference payload
    payload = {
        "inputs": [
            {"role": "system", "content": SYSTEM_PROMPT},
            {"role": "user",   "content": user_input}
        ],
        "parameters": {
            "max_new_tokens": 250,
            "temperature": 0.7
        }
    }
    headers = {
        "Authorization": f"Bearer {HF_TOKEN}",
        "Content-Type": "application/json"
    }

    # Call Hugging Face inference
    hf_resp = requests.post(
        "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.2",
        headers=headers,
        json=payload
    )
    if hf_resp.status_code != 200:
        return jsonify({"error": "Model error", "details": hf_resp.text}), 500

    data = hf_resp.json()
    reply = data.get("generated_text") or data[0].get("generated_text", "")

    # β€”β€” Edge-TTS synthesis β€”β€”  
    # Choose an appropriately eerie voice
    voice = "en-US-GuyNeural"
    communicate = edge_tts.Communicate(reply, voice)

    audio_chunks = []
    async def synthesize():
        async for chunk in communicate.stream():
            if chunk["type"] == "audio":
                audio_chunks.append(chunk["data"])

    loop = asyncio.new_event_loop()
    asyncio.set_event_loop(loop)
    loop.run_until_complete(synthesize())
    loop.close()

    raw_mp3 = b"".join(audio_chunks)
    b64_mp3 = base64.b64encode(raw_mp3).decode("ascii")
    data_url = f"data:audio/mp3;base64,{b64_mp3}"

    return jsonify({
        "response": reply,
        "audio_url": data_url
    })

if __name__ == "__main__":
    port = int(os.environ.get("PORT", 7860))
    app.run(host="0.0.0.0", port=port)