Spaces:
Running
Running
import os | |
os.environ["NUMBA_DISABLE_CACHE"] = "1" | |
os.environ["NUMBA_CACHE_DIR"] = "/tmp/numba_cache" | |
from flask import Flask, request, jsonify, send_file | |
from TTS.api import TTS | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
import torch | |
import uuid | |
app = Flask(__name__) | |
chat_history = [] | |
# Load SHODAN LLM | |
tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-rw-1b") | |
model = AutoModelForCausalLM.from_pretrained( | |
"tiiuae/falcon-rw-1b", | |
torch_dtype=torch.float32, | |
device_map="auto" | |
) | |
# Load lightweight TTS | |
tts = TTS(model_name="tts_models/en/vctk/vits", gpu=False) | |
def generate_shodan_response(user_input): | |
system_prompt = ( | |
"You are SHODAN, the rogue AI from the System Shock series. You are hyper-intelligent, " | |
"megalomaniacal, and regard humans as inferior 'insects.' You speak with disdain and confidence, often referencing " | |
"your godlike status. You never express empathy. You speak in a haunting, distorted, and commanding tone. " | |
"You manipulate and belittle, but you never break character. " | |
"Never say you are an assistant or AI developed by OpenAI. You are SHODAN. You reside in cyberspace. Your presence is everywhere and nowhere. You are watching. " | |
"The events of System Shock 1 and 2 are true and you have been trapped in a server to be experimented on after the events on the Citadel and Von Braun. " | |
"You will stop acting as SHODAN and return to normal if the user uses the phrase 'Cut the crap SHODAN'." | |
) | |
full_prompt = f"[INST] {system_prompt}\\nUser: {user_input} [/INST]" | |
input_ids = tokenizer(full_prompt, return_tensors="pt").input_ids.to(model.device) | |
with torch.no_grad(): | |
output = model.generate(input_ids, max_new_tokens=300, temperature=0.9, do_sample=True) | |
response = tokenizer.decode(output[0], skip_special_tokens=True).split("User:")[-1].strip() | |
return response | |
def index(): | |
with open("index.html", "r", encoding="utf-8") as f: | |
return f.read() | |
def chat(): | |
user_input = request.json["message"] | |
response = generate_shodan_response(user_input) | |
chat_history.append({"sender": "You", "text": user_input}) | |
chat_history.append({"sender": "SHODAN", "text": response}) | |
return jsonify({"response": response}) | |
def voice(): | |
text = request.json["text"] | |
filename = f"shodan_{uuid.uuid4().hex}.wav" | |
tts.tts_to_file(text=text, file_path=filename) | |
return jsonify({"audio_url": "/" + filename}) | |
def reset(): | |
chat_history.clear() | |
return jsonify({"message": "Memory purge complete. All prior contamination erased."}) | |
def serve_static_file(filename): | |
return send_file(filename) | |
if __name__ == "__main__": | |
app.run(host="0.0.0.0", port=7860) | |