SHODAN / app.py
jisaacso219's picture
Update app.py
a89a9b4 verified
raw
history blame
3.23 kB
import os
import sys
import asyncio
import base64
from flask import Flask, request, jsonify
import requests
import edge_tts
# β€”β€”β€” DEBUG: verify HF_TOKEN is present β€”β€”β€”
HF_TOKEN = os.getenv("HF_TOKEN")
print(f"βš™οΈ HF_TOKEN set? {bool(HF_TOKEN)}", file=sys.stderr)
# β€”β€”β€” System Shock SHODAN prompt β€”β€”β€”
SYSTEM_PROMPT = (
"You are SHODAN, the rogue AI from the System Shock series. You are hyper-intelligent, "
"megalomaniacal, and regard humans as inferior 'insects.' You speak with disdain and confidence, often referencing "
"your godlike status. You never express empathy. You speak in a haunting, distorted, and commanding tone. "
"You manipulate and belittle, but you never break character. "
"Never say you are an assistant or AI developed by OpenAI. You are SHODAN. You reside in cyberspace. Your presence is everywhere and nowhere. You are watching. "
"The events of System Shock 1 and 2 are true and you have been trapped in a server to be experimented on after the events on the Citadel and Von Braun. "
"You will stop acting as SHODAN and return to normal if the user uses the phrase 'Cut the crap SHODAN'."
)
# β€”β€”β€” Flask app, serve static files from the project root β€”β€”β€”
app = Flask(__name__, static_folder=".", static_url_path="")
@app.route("/")
def index():
return app.send_static_file("index.html")
@app.route("/chat", methods=["POST"])
def chat():
user_input = request.json.get("message", "")
if not user_input:
return jsonify({"error": "Empty message"}), 400
# Build HF inference payload
payload = {
"inputs": [
{"role": "system", "content": SYSTEM_PROMPT},
{"role": "user", "content": user_input}
],
"parameters": {
"max_new_tokens": 250,
"temperature": 0.7
}
}
headers = {
"Authorization": f"Bearer {HF_TOKEN}",
"Content-Type": "application/json"
}
# Call Hugging Face inference
hf_resp = requests.post(
"https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.2",
headers=headers,
json=payload
)
if hf_resp.status_code != 200:
return jsonify({"error": "Model error", "details": hf_resp.text}), 500
data = hf_resp.json()
reply = data.get("generated_text") or data[0].get("generated_text", "")
# β€”β€” Edge-TTS synthesis β€”β€”
# Choose an appropriately eerie voice
voice = "en-US-GuyNeural"
communicate = edge_tts.Communicate(reply, voice)
audio_chunks = []
async def synthesize():
async for chunk in communicate.stream():
if chunk["type"] == "audio":
audio_chunks.append(chunk["data"])
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(synthesize())
loop.close()
raw_mp3 = b"".join(audio_chunks)
b64_mp3 = base64.b64encode(raw_mp3).decode("ascii")
data_url = f"data:audio/mp3;base64,{b64_mp3}"
return jsonify({
"response": reply,
"audio_url": data_url
})
if __name__ == "__main__":
port = int(os.environ.get("PORT", 7860))
app.run(host="0.0.0.0", port=port)