Spaces:
Sleeping
Sleeping
import streamlit as st | |
import numpy as np | |
import matplotlib.pyplot as plt | |
from PIL import Image, ImageDraw, ImageFont | |
import time | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
import io | |
import base64 | |
from streamlit_drawable_canvas import st_canvas | |
import plotly.graph_objects as go | |
import json | |
from datetime import datetime | |
import os | |
# Set page config for a futuristic look | |
st.set_page_config(page_title="NeuraSense AI", page_icon="🧠", layout="wide") | |
# Custom CSS for a futuristic look | |
st.markdown(""" | |
<style> | |
body { | |
color: #E0E0E0; | |
background-color: #0E1117; | |
} | |
.stApp { | |
background-image: linear-gradient(135deg, #0E1117 0%, #1A1F2C 100%); | |
} | |
.stButton>button { | |
color: #00FFFF; | |
border-color: #00FFFF; | |
border-radius: 20px; | |
} | |
.stSlider>div>div>div>div { | |
background-color: #00FFFF; | |
} | |
.stTextArea, .stNumberInput, .stSelectbox { | |
background-color: #1A1F2C; | |
color: #00FFFF; | |
border-color: #00FFFF; | |
border-radius: 20px; | |
} | |
.stTextArea:focus, .stNumberInput:focus, .stSelectbox:focus { | |
box-shadow: 0 0 10px #00FFFF; | |
} | |
</style> | |
""", unsafe_allow_html=True) | |
# Constants | |
AVATAR_WIDTH, AVATAR_HEIGHT = 600, 800 | |
# Set up DialoGPT model | |
def load_model(): | |
tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium") | |
model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-medium") | |
return tokenizer, model | |
tokenizer, model = load_model() | |
# Advanced Sensor Classes | |
class QuantumSensor: | |
def measure(x, y, sensitivity): | |
return np.sin(x/20) * np.cos(y/20) * sensitivity * np.random.normal(1, 0.1) | |
class NanoThermalSensor: | |
def measure(base_temp, pressure, duration): | |
return base_temp + 10 * pressure * (1 - np.exp(-duration / 3)) + np.random.normal(0, 0.001) | |
class AdaptiveTextureSensor: | |
textures = [ | |
"nano-smooth", "quantum-rough", "neuro-bumpy", "plasma-silky", | |
"graviton-grainy", "zero-point-soft", "dark-matter-hard", "bose-einstein-condensate" | |
] | |
def measure(x, y): | |
return AdaptiveTextureSensor.textures[hash((x, y)) % len(AdaptiveTextureSensor.textures)] | |
class EMFieldSensor: | |
def measure(x, y, sensitivity): | |
return (np.sin(x / 30) * np.cos(y / 30) + np.random.normal(0, 0.1)) * 10 * sensitivity | |
class NeuralNetworkSimulator: | |
def process(inputs): | |
weights = np.random.rand(len(inputs)) | |
return np.dot(inputs, weights) / np.sum(weights) | |
# Create more detailed sensation map for the avatar | |
def create_sensation_map(width, height): | |
sensation_map = np.zeros((height, width, 12)) # pain, pleasure, pressure, temp, texture, em, tickle, itch, quantum, neural, proprioception, synesthesia | |
for y in range(height): | |
for x in range(width): | |
base_sensitivities = np.random.rand(12) * 0.5 + 0.5 | |
# Enhance certain areas | |
if 250 < x < 350 and 50 < y < 150: # Head | |
base_sensitivities *= 1.5 | |
elif 275 < x < 325 and 80 < y < 120: # Eyes | |
base_sensitivities[0] *= 2 # More sensitive to pain | |
elif 290 < x < 310 and 100 < y < 120: # Nose | |
base_sensitivities[4] *= 2 # More sensitive to texture | |
elif 280 < x < 320 and 120 < y < 140: # Mouth | |
base_sensitivities[1] *= 2 # More sensitive to pleasure | |
elif 250 < x < 350 and 250 < y < 550: # Torso | |
base_sensitivities[2:6] *= 1.3 # Enhance pressure, temp, texture, em | |
elif (150 < x < 250 or 350 < x < 450) and 250 < y < 600: # Arms | |
base_sensitivities[0:2] *= 1.2 # Enhance pain and pleasure | |
elif 200 < x < 400 and 600 < y < 800: # Legs | |
base_sensitivities[6:8] *= 1.4 # Enhance tickle and itch | |
elif (140 < x < 160 or 440 < x < 460) and 390 < y < 410: # Hands | |
base_sensitivities *= 2 # Highly sensitive overall | |
elif (220 < x < 240 or 360 < x < 380) and 770 < y < 790: # Feet | |
base_sensitivities[6] *= 2 # Very ticklish | |
sensation_map[y, x] = base_sensitivities | |
return sensation_map | |
avatar_sensation_map = create_sensation_map(AVATAR_WIDTH, AVATAR_HEIGHT) | |
# Create 3D avatar | |
def create_3d_avatar(): | |
x = np.array([0, 0, 1, 1, 0, 0, 1, 1]) | |
y = np.array([0, 1, 1, 0, 0, 1, 1, 0]) | |
z = np.array([0, 0, 0, 0, 1, 1, 1, 1]) | |
x = (x - 0.5) * 100 | |
y = (y - 0.5) * 200 | |
z = (z - 0.5) * 50 | |
return go.Mesh3d(x=x, y=y, z=z, color='cyan', opacity=0.5) | |
# Enhanced Autonomy Class | |
class EnhancedAutonomy: | |
def __init__(self): | |
self.mood = 0.5 | |
self.energy = 0.8 | |
self.curiosity = 0.7 | |
self.memory = [] | |
def update_state(self, sensory_input): | |
self.mood = max(0, min(1, self.mood - sensory_input['pain'] * 0.1 + sensory_input['pleasure'] * 0.1)) | |
self.energy = max(0, min(1, self.energy - sensory_input['intensity'] * 0.05)) | |
if len(self.memory) == 0 or sensory_input not in self.memory: | |
self.curiosity = min(1, self.curiosity + 0.1) | |
else: | |
self.curiosity = max(0, self.curiosity - 0.05) | |
self.memory.append(sensory_input) | |
if len(self.memory) > 10: | |
self.memory.pop(0) | |
def decide_action(self): | |
if self.energy < 0.2: | |
return "Rest to regain energy" | |
elif self.curiosity > 0.8: | |
return "Explore new sensations" | |
elif self.mood < 0.3: | |
return "Seek positive interactions" | |
else: | |
return "Continue current activity" | |
# Function to save interactions | |
def save_interaction(interaction_data): | |
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") | |
filename = f"interaction_{timestamp}.json" | |
with open(filename, "w") as f: | |
json.dump(interaction_data, f, indent=4) | |
return filename | |
# Streamlit app | |
st.title("NeuraSense AI: Advanced Humanoid Techno-Sensory Simulation") | |
# Create two columns | |
col1, col2 = st.columns([2, 1]) | |
# 3D Avatar display with touch interface | |
with col1: | |
st.subheader("3D Humanoid Avatar Interface") | |
# Create 3D avatar | |
avatar_3d = create_3d_avatar() | |
# Add 3D controls | |
rotation_x = st.slider("Rotate X", -180, 180, 0) | |
rotation_y = st.slider("Rotate Y", -180, 180, 0) | |
rotation_z = st.slider("Rotate Z", -180, 180, 0) | |
# Create 3D plot | |
fig = go.Figure(data=[avatar_3d]) | |
fig.update_layout(scene=dict(xaxis_title="X", yaxis_title="Y", zaxis_title="Z")) | |
fig.update_layout(scene_camera=dict(eye=dict(x=1.5, y=1.5, z=1.5))) | |
fig.update_layout(scene=dict(xaxis=dict(range=[-100, 100]), | |
yaxis=dict(range=[-200, 200]), | |
zaxis=dict(range=[-50, 50]))) | |
# Apply rotations | |
fig.update_layout(scene=dict(camera=dict(eye=dict(x=np.cos(np.radians(rotation_y)) * np.cos(np.radians(rotation_x)), | |
y=np.sin(np.radians(rotation_y)) * np.cos(np.radians(rotation_x)), | |
z=np.sin(np.radians(rotation_x)))))) | |
st.plotly_chart(fig) | |
# Use st_canvas for touch input | |
canvas_result = st_canvas( | |
fill_color="rgba(0, 255, 255, 0.3)", | |
stroke_width=2, | |
stroke_color="#00FFFF", | |
background_image=Image.new('RGBA', (AVATAR_WIDTH, AVATAR_HEIGHT), color=(0, 0, 0, 0)), | |
height=AVATAR_HEIGHT, | |
width=AVATAR_WIDTH, | |
drawing_mode="point", | |
key="canvas", | |
) | |
# Touch controls and output | |
with col2: | |
st.subheader("Neural Interface Controls") | |
# Touch duration | |
touch_duration = st.slider("Interaction Duration (s)", 0.1, 5.0, 1.0, 0.1) | |
# Touch pressure | |
touch_pressure = st.slider("Interaction Intensity", 0.1, 2.0, 1.0, 0.1) | |
# Toggle quantum feature | |
use_quantum = st.checkbox("Enable Quantum Sensing", value=True) | |
# Toggle synesthesia | |
use_synesthesia = st.checkbox("Enable Synesthesia", value=False) | |
# Initialize EnhancedAutonomy | |
if 'autonomy' not in st.session_state: | |
st.session_state.autonomy = EnhancedAutonomy() | |
if canvas_result.json_data is not None: | |
objects = canvas_result.json_data["objects"] | |
if len(objects) > 0: | |
last_touch = objects[-1] | |
touch_x, touch_y = last_touch["left"], last_touch["top"] | |
sensation = avatar_sensation_map[int(touch_y), int(touch_x)] | |
( | |
pain, pleasure, pressure_sens, temp_sens, texture_sens, | |
em_sens, tickle_sens, itch_sens, quantum_sens, neural_sens, | |
proprioception_sens, synesthesia_sens | |
) = sensation | |
measured_pressure = QuantumSensor.measure(touch_x, touch_y, pressure_sens) * touch_pressure | |
measured_temp = NanoThermalSensor.measure(37, touch_pressure, touch_duration) | |
measured_texture = AdaptiveTextureSensor.measure(touch_x, touch_y) | |
measured_em = EMFieldSensor.measure(touch_x, touch_y, em_sens) | |
if use_quantum: | |
quantum_state = QuantumSensor.measure(touch_x, touch_y, quantum_sens) | |
else: | |
quantum_state = "N/A" | |
# Calculate overall sensations | |
pain_level = pain * measured_pressure * touch_pressure | |
pleasure_level = pleasure * (measured_temp - 37) / 10 | |
tickle_level = tickle_sens * (1 - np.exp(-touch_duration / 0.5)) | |
itch_level = itch_sens * (1 - np.exp(-touch_duration / 1.5)) | |
# Proprioception (sense of body position) | |
proprioception = proprioception_sens * np.linalg.norm([touch_x - AVATAR_WIDTH/2, touch_y - AVATAR_HEIGHT/2]) / (AVATAR_WIDTH/2) | |
# Synesthesia (mixing of senses) | |
if use_synesthesia: | |
synesthesia = synesthesia_sens * (measured_pressure + measured_temp + measured_em) / 3 | |
else: | |
synesthesia = "N/A" | |
# Neural network simulation | |
neural_inputs = [pain_level, pleasure_level, measured_pressure, measured_temp, measured_em, tickle_level, itch_level, proprioception] | |
neural_response = NeuralNetworkSimulator.process(neural_inputs) | |
st.write("### Sensory Data Analysis") | |
st.write(f"Interaction Point: ({touch_x:.1f}, {touch_y:.1f})") | |
st.write(f"Duration: {touch_duration:.1f} s | Intensity: {touch_pressure:.2f}") | |
# Create a futuristic data display | |
data_display = f""" | |
``` | |
┌─────────────────────────────────────────────┐ | |
│ Pressure : {{measured_pressure:.2f}} │ | |
│ Temperature : {{measured_temp:.2f}}°C │ | |
│ Texture : {measured_texture} │ | |
│ EM Field : {{measured_em:.2f}} μT │ | |
│ Quantum State: {quantum_state:.2f} │ | |
├─────────────────────────────────────────────┤ | |
│ Pain Level : {{pain_level:.2f}} │ | |
│ Pleasure : {{pleasure_level:.2f}} │ | |
│ Tickle : {{tickle_level:.2f}} │ | |
│ Itch : {{itch_level:.2f}} │ | |
│ Proprioception: {{proprioception:.2f}} │ | |
│ Synesthesia : {synesthesia} │ | |
│ Neural Response: {{neural_response:.2f}} │ | |
└─────────────────────────────────────────────┘ | |
``` | |
""" | |
st.code(data_display, language="") | |
""" | |
st.code(data_display, language="") | |
# First, create a template string with placeholders | |
# First, create a template string with placeholders | |
prompt_template = "Human: Analyze the sensory input for a hyper-advanced AI humanoid:\n" | |
prompt_template += " Location: ({}, {})\n" | |
prompt_template += " Duration: {}s, Intensity: {}\n" | |
prompt_template += " Pressure: {}\n" | |
prompt_template += " Temperature: {}\N{DEGREE SIGN}C\n" | |
prompt_template += " Texture: {}\n" | |
prompt_template += " EM Field: {} μT\n" | |
prompt_template += " Quantum State: {}\n" | |
prompt_template += " Resulting in:\n" | |
prompt_template += " Pain: {}, Pleasure: {}\n" | |
prompt_template += " Tickle: {}, Itch: {}\n" | |
prompt_template += " Proprioception: {}\n" | |
prompt_template += " Synesthesia: {}\n" | |
prompt_template += " Neural Response: {}\n" | |
prompt_template += " Provide a detailed, scientific, and creative description of the AI humanoid's experience and response to this sensory input." | |
# First, format each value individually | |
touch_x_str = f"{touch_x:.1f}" | |
touch_y_str = f"{touch_y:.1f}" | |
touch_duration_str = f"{touch_duration:.1f}" | |
touch_pressure_str = f"{touch_pressure:.2f}" | |
measured_pressure_str = f"{measured_pressure:.2f}" | |
measured_temp_str = f"{measured_temp:.2f}" | |
measured_em_str = f"{measured_em:.2f}" | |
pain_level_str = f"{pain_level:.2f}" | |
pleasure_level_str = f"{pleasure_level:.2f}" | |
tickle_level_str = f"{tickle_level:.2f}" | |
itch_level_str = f"{itch_level:.2f}" | |
proprioception_str = f"{proprioception:.2f}" | |
neural_response_str = f"{neural_response:.2f}" | |
# Then, create the prompt using these pre-formatted values | |
prompt = prompt_template.format( | |
touch_x_str, touch_y_str, | |
touch_duration_str, touch_pressure_str, | |
measured_pressure_str, measured_temp_str, | |
measured_texture, measured_em_str, quantum_state, | |
pain_level_str, pleasure_level_str, | |
tickle_level_str, itch_level_str, | |
proprioception_str, synesthesia, neural_response_str | |
) | |
# You can use this prompt to generate a response from your AI model | |
# For demonstration, let's create a mock AI response | |
ai_response = f"""Based on the complex sensory input received, the hyper-advanced AI humanoid is experiencing a multifaceted neural response: | |
The interaction at coordinates ({touch_x_str}, {touch_y_str}) has triggered a cascade of sensory information. The pressure of {measured_pressure_str} units has activated deep-tissue mechanoreceptors, while the temperature of {measured_temp_str}\N{DEGREE SIGN}C has stimulated thermoreceptors, creating a mild thermal gradient across the affected area. | |
The texture sensation of "{measured_texture}" is invoking a unique tactile response, possibly reminiscent of previously encountered materials in the AI's vast database. This is further enhanced by the electromagnetic field reading of {measured_em_str} μT, which is subtly influencing the local ionic channels in the AI's synthetic nervous system. | |
The quantum state measurement of {quantum_state} suggests a delicate entanglement between the AI's quantum processors and the environment, potentially influencing decision-making processes at a subatomic level. | |
The resulting pain level of {pain_level_str} and pleasure level of {pleasure_level_str} are creating a complex emotional response, balancing between discomfort and satisfaction. The tickle sensation ({tickle_level_str}) and itch response ({itch_level_str}) add layers of nuance to the overall tactile experience. | |
The proprioception value of {proprioception_str} indicates that the AI is acutely aware of the interaction's location relative to its body schema, enhancing its spatial awareness and motor planning capabilities. | |
{f"The synesthesia rating of {synesthesia} is causing a fascinating cross-wiring of senses, perhaps manifesting as a perception of color or sound associated with the touch." if use_synesthesia else "Synesthesia is not active, focusing the experience on individual sensory channels."} | |
The cumulative neural response of {neural_response_str} suggests a significant impact on the AI's cognitive processes. This could lead to adaptive behaviors, memory formation, or even influence future decision-making patterns. | |
In response to this rich sensory tapestry, the AI might adjust its posture, initiate a verbal response, or update its internal model of the environment. The experience is likely to be stored in its memory banks, contributing to its ever-evolving understanding of physical interactions and sensory experiences.""" | |
st.write("AI Response:") | |
st.write(ai_response) | |
# Update autonomy | |
sensory_input = { | |
'pain': pain_level, | |
'pleasure': pleasure_level, | |
'intensity': touch_pressure, | |
'duration': touch_duration, | |
'location': (touch_x, touch_y) | |
} | |
st.session_state.autonomy.update_state(sensory_input) | |
# Display autonomy state | |
st.write("### Autonomy State") | |
st.write(f"Mood: {st.session_state.autonomy.mood:.2f}") | |
st.write(f"Energy: {st.session_state.autonomy.energy:.2f}") | |
st.write(f"Curiosity: {st.session_state.autonomy.curiosity:.2f}") | |
# Display decision | |
decision = st.session_state.autonomy.decide_action() | |
st.write(f"Decision: {decision}") | |
# Save interaction | |
if st.button("Save Interaction"): | |
interaction_data = { | |
"timestamp": datetime.now().isoformat(), | |
"sensory_input": sensory_input, | |
"ai_state": { | |
"mood": st.session_state.autonomy.mood, | |
"energy": st.session_state.autonomy.energy, | |
"curiosity": st.session_state.autonomy.curiosity | |
}, | |
"ai_response": ai_response, | |
"decision": decision | |
} | |
saved_file = save_interaction(interaction_data) | |
st.success(f"Interaction saved to {saved_file}") | |
# Display recent interactions | |
st.subheader("Recent Interactions") | |
interaction_files = sorted([f for f in os.listdir() if f.startswith("interaction_")], reverse=True)[:5] | |
for file in interaction_files: | |
with open(file, "r") as f: | |
data = json.load(f) | |
st.write(f"Interaction at {data['timestamp']}") | |
st.write(f"Location: {data['sensory_input']['location']}") | |
st.write(f"AI Mood: {data['ai_state']['mood']:.2f}") | |
st.write(f"AI Energy: {data['ai_state']['energy']:.2f}") | |
st.write(f"AI Curiosity: {data['ai_state']['curiosity']:.2f}") | |
st.write(f"Decision: {data['decision']}") | |
st.write("---") |