ThreadAbort commited on
Commit
8431c3b
Β·
1 Parent(s): 607becf

[Update]: Major enhancements to app.py and requirements.txt 🌊✨

Browse files

- Added: Comprehensive documentation and comments throughout app.py to improve understanding and maintainability.
- Implemented: New classes and methods for wave memory operations, emotional context management, and visualization features.
- Updated: Gradio interface for a more interactive user experience, including advanced settings for memory operations.
- Enhanced: Requirements.txt to include necessary libraries for visualization and memory processing.
- Pro Tip of the Commit: A well-documented code is like a lighthouse guiding ships through the fog! πŸ›³οΈπŸ’‘
Aye, Aye! 🚒

Files changed (2) hide show
  1. app.py +882 -468
  2. requirements.txt +6 -4
app.py CHANGED
@@ -1,531 +1,945 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
  import torch
3
  import numpy as np
4
  import matplotlib.pyplot as plt
5
  from matplotlib import cm
6
- import plotly.graph_objects as go
7
- from plotly.subplots import make_subplots
8
  import random
9
- from typing import Tuple, List, Dict, Any, Optional
10
  import time
 
 
 
 
 
 
 
11
  import colorsys
12
- import math
13
- from PIL import Image, ImageDraw, ImageFilter
14
 
15
- # Try importing Stable Diffusion dependencies
16
- try:
17
- from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler
18
- STABLE_DIFFUSION_AVAILABLE = True
19
- except ImportError:
20
- print("Warning: diffusers package not available. Artistic visualization will be disabled.")
21
- STABLE_DIFFUSION_AVAILABLE = False
22
 
23
- # Try importing 3D visualization dependencies
24
- try:
25
- import plotly.express as px
26
- PLOTLY_3D_AVAILABLE = True
27
- except ImportError:
28
- print("Warning: plotly.express not available. 3D visualization will be limited.")
29
- PLOTLY_3D_AVAILABLE = False
30
 
31
- # Initialize Stable Diffusion only if available
 
32
  pipe = None
33
- if STABLE_DIFFUSION_AVAILABLE:
34
- device = "cuda" if torch.cuda.is_available() else "cpu"
35
- model_repo_id = "tensorart/stable-diffusion-3.5-large-TurboX"
36
- torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
37
 
 
38
  try:
39
- pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
40
- pipe.scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(model_repo_id, subfolder="scheduler", shift=5)
41
- pipe = pipe.to(device)
42
- print(f"βœ… Stable Diffusion initialized on {device}")
 
 
 
 
 
 
43
  except Exception as e:
44
- print(f"⚠️ Could not initialize Stable Diffusion: {e}")
45
- STABLE_DIFFUSION_AVAILABLE = False
46
-
47
- # Constants
48
- MAX_SEED = np.iinfo(np.int32).max
49
- DEFAULT_GRID_SIZE = 64
50
- WAVE_TYPES = ["sine", "cosine", "gaussian", "square"]
51
- MEMORY_OPERATIONS = [
52
- "wave_memory",
53
- "interference",
54
- "resonance",
55
- "hot_tub_mode",
56
- "emotional_resonance",
57
- "pattern_completion"
58
- ]
59
 
60
- # Color palettes for different emotional states
61
- COLOR_PALETTES = {
62
- "positive": ["#FF5E5B", "#D8D8F6", "#E8AA14", "#32E875", "#3C91E6"],
63
- "neutral": ["#FAFFFD", "#A1CDF4", "#7D83FF", "#3A3042", "#080708"],
64
- "negative": ["#1B1B1E", "#373F51", "#58A4B0", "#A9BCD0", "#D8DBE2"]
65
- }
66
 
67
  class EmotionalContext:
68
- """Implements Mem|8's emotional context structure"""
 
 
 
 
 
 
 
 
69
  def __init__(self, device="cuda" if torch.cuda.is_available() else "cpu"):
70
  self.device = device
71
  self.valence = torch.zeros(1).to(device) # -128 to 127: negative to positive
72
  self.arousal = torch.zeros(1).to(device) # 0 to 255: intensity level
73
  self.context = torch.zeros(1).to(device) # Contextual flags
74
- self.safety = torch.ones(1).to(device) * 100 # Safety level (0-100)
75
-
76
- # Memory blanket parameters
77
- self.resonance_freq = torch.tensor(1.0).to(device)
78
- self.filter_strength = torch.tensor(0.5).to(device)
79
-
80
- # Hot tub mode parameters
81
- self.hot_tub_active = False
82
- self.hot_tub_temperature = torch.tensor(37.0).to(device) # Default comfortable temperature
83
- self.hot_tub_participants = []
84
 
 
 
 
 
 
 
 
85
  def update(self, valence: float, arousal: Optional[float] = None):
86
- """Update emotional context based on valence and arousal"""
87
- self.valence = torch.tensor([valence]).to(self.device)
88
 
89
- # If arousal not provided, calculate it based on valence intensity
90
  if arousal is None:
91
- self.arousal = torch.abs(torch.tensor([valence * 2])).to(self.device)
92
  else:
93
- self.arousal = torch.tensor([arousal]).to(self.device)
94
 
95
- # Update resonance frequency based on emotional state
96
- self.resonance_freq = 1.0 + torch.sigmoid(self.valence/128)
 
 
97
 
98
- # Update filter strength based on arousal
99
- self.filter_strength = torch.sigmoid(self.arousal/128)
 
 
 
 
 
 
 
100
 
101
- return self
102
-
103
- def get_color_palette(self):
104
- """Get color palette based on emotional valence"""
105
- if self.valence.item() > 20:
106
- return COLOR_PALETTES["positive"]
107
- elif self.valence.item() < -20:
108
- return COLOR_PALETTES["negative"]
109
- else:
110
- return COLOR_PALETTES["neutral"]
111
-
112
- def activate_hot_tub(self, temperature: float = 37.0):
113
- """Activate hot tub mode with specified temperature"""
114
- self.hot_tub_active = True
115
- self.hot_tub_temperature = torch.tensor(temperature).to(self.device)
116
- return self
117
-
118
- def deactivate_hot_tub(self):
119
- """Deactivate hot tub mode"""
120
- self.hot_tub_active = False
121
- self.hot_tub_participants = []
122
- return self
123
-
124
- def add_hot_tub_participant(self, participant: str):
125
- """Add participant to hot tub session"""
126
- if self.hot_tub_active and participant not in self.hot_tub_participants:
127
- self.hot_tub_participants.append(participant)
128
- return self
129
 
130
- def get_state_dict(self) -> Dict[str, Any]:
131
- """Get emotional context as dictionary for display"""
132
- return {
133
- "valence": self.valence.item(),
134
- "arousal": self.arousal.item(),
135
- "resonance_frequency": self.resonance_freq.item(),
136
- "filter_strength": self.filter_strength.item(),
137
- "hot_tub_active": self.hot_tub_active,
138
- "hot_tub_temperature": self.hot_tub_temperature.item() if self.hot_tub_active else None,
139
- "hot_tub_participants": self.hot_tub_participants if self.hot_tub_active else [],
140
- "safety_level": self.safety.item()
141
- }
142
 
143
- class WaveProcessor:
144
- """Processes wave-based memory patterns"""
145
- def __init__(self, device="cuda" if torch.cuda.is_available() else "cpu"):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146
  self.device = device
 
 
 
 
 
 
 
 
 
 
 
 
 
147
 
148
- def create_wave_pattern(self,
149
- size: int,
150
- frequency: float,
151
- amplitude: float,
152
- wave_type: str = "sine") -> torch.Tensor:
153
- """Create a wave pattern as described in Mem|8 paper"""
154
- t = torch.linspace(0, 2*np.pi, size).to(self.device)
155
- x = torch.linspace(0, 2*np.pi, size).to(self.device)
156
- T, X = torch.meshgrid(t, x, indexing='ij')
157
-
158
- if wave_type == "sine":
159
- return amplitude * torch.sin(frequency * T + X)
160
- elif wave_type == "cosine":
161
- return amplitude * torch.cos(frequency * T + X)
162
- elif wave_type == "gaussian":
163
- # Create a Gaussian wave pattern
164
- sigma = size / (4 * frequency)
165
- mu_t = size / 2
166
- mu_x = size / 2
167
- gauss_t = torch.exp(-((t - mu_t) ** 2) / (2 * sigma ** 2))
168
- gauss_x = torch.exp(-((x - mu_x) ** 2) / (2 * sigma ** 2))
169
- G_T, G_X = torch.meshgrid(gauss_t, gauss_x, indexing='ij')
170
- return amplitude * G_T * G_X
171
- elif wave_type == "square":
172
- # Create a square wave pattern
173
- square_t = torch.sign(torch.sin(frequency * t))
174
- square_x = torch.sign(torch.sin(frequency * x))
175
- S_T, S_X = torch.meshgrid(square_t, square_x, indexing='ij')
176
- return amplitude * S_T * S_X
 
 
 
 
 
 
 
 
 
177
  else:
178
- # Default to sine wave
179
- return amplitude * torch.sin(frequency * T + X)
180
-
181
- def apply_emotional_modulation(self,
182
- wave: torch.Tensor,
183
- emotion: EmotionalContext) -> torch.Tensor:
184
- """Apply emotional modulation to wave pattern"""
185
- # Modulate wave based on emotional valence
186
- emotional_mod = torch.exp(emotion.valence/128 * wave)
187
- return wave * emotional_mod
188
 
189
- def create_interference_pattern(self,
190
- wave1: torch.Tensor,
191
- wave2: torch.Tensor,
192
- emotion: EmotionalContext) -> torch.Tensor:
193
- """Create interference between two wave patterns"""
194
- interference = wave1 + wave2
195
- # Weight by emotional valence
196
- emotional_weight = torch.sigmoid(emotion.valence/128) * interference
197
- return emotional_weight
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
198
 
199
- def create_resonance_pattern(self,
200
- base_wave: torch.Tensor,
201
- emotion: EmotionalContext) -> torch.Tensor:
202
- """Create resonance pattern based on emotional state"""
203
- resonant_wave = self.create_wave_pattern(
204
- base_wave.shape[0],
205
- emotion.resonance_freq.item(),
206
- 1.0
207
- )
208
- resonance = base_wave * resonant_wave
209
- return resonance
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
210
 
211
- def apply_memory_blanket(self,
212
- wave: torch.Tensor,
213
- emotion: EmotionalContext) -> torch.Tensor:
214
- """Apply memory blanket filtering as described in the paper"""
215
- # Create a filter based on wave amplitude and emotional state
216
- wave_amplitude = torch.abs(wave)
217
- importance_threshold = emotion.filter_strength * wave_amplitude.mean()
218
-
219
- # Apply the filter - keep only significant waves
220
- filtered_wave = wave * (wave_amplitude > importance_threshold).float()
 
 
 
 
 
 
 
 
 
 
 
221
  return filtered_wave
222
 
223
- def create_hot_tub_pattern(self,
224
- size: int,
225
- emotion: EmotionalContext) -> torch.Tensor:
226
- """Create a hot tub pattern for safe exploration"""
227
- if not emotion.hot_tub_active:
228
- return torch.zeros((size, size)).to(self.device)
229
 
230
- # Create base wave pattern
231
- base_wave = self.create_wave_pattern(size, 1.0, 1.0, "sine")
232
-
233
- # Modulate based on hot tub temperature
234
- temp_factor = emotion.hot_tub_temperature / 50.0 # Normalize to 0-1 range
235
- temp_wave = self.create_wave_pattern(size, temp_factor.item(), 0.5, "gaussian")
236
-
237
- # Add ripples for each participant
238
- participant_count = len(emotion.hot_tub_participants)
239
- if participant_count > 0:
240
- ripple_wave = self.create_wave_pattern(
241
- size,
242
- 2.0 + participant_count * 0.5,
243
- 0.3,
244
- "gaussian"
245
- )
246
- hot_tub_pattern = base_wave + temp_wave + ripple_wave
247
- else:
248
- hot_tub_pattern = base_wave + temp_wave
249
 
250
- # Apply safety modulation
251
- safety_factor = emotion.safety / 100.0
252
- return hot_tub_pattern * safety_factor
253
-
254
- def create_pattern_completion(self,
255
- size: int,
256
- emotion: EmotionalContext,
257
- completion_ratio: float = 0.5) -> Tuple[torch.Tensor, torch.Tensor]:
258
- """Create a pattern completion demonstration"""
259
- # Create original pattern
260
- original = self.create_wave_pattern(size, 2.0, 1.0)
261
-
262
- # Create mask for incomplete pattern (randomly remove portions)
263
- mask = torch.rand(size, size).to(self.device) > completion_ratio
264
- incomplete = original * mask
265
-
266
- # Apply emotional context to reconstruction
267
- emotional_weight = torch.sigmoid(emotion.valence/128)
268
-
269
- # Simple reconstruction algorithm (in real system would be more sophisticated)
270
- # Here we're just doing a simple interpolation
271
- kernel_size = 3
272
- padding = kernel_size // 2
273
-
274
- # Create a kernel for interpolation
275
- kernel = torch.ones(1, 1, kernel_size, kernel_size).to(self.device) / (kernel_size ** 2)
276
-
277
- # Reshape for convolution
278
- incomplete_reshaped = incomplete.reshape(1, 1, size, size)
279
-
280
- # Apply convolution for interpolation
281
- with torch.no_grad():
282
- reconstructed = torch.nn.functional.conv2d(
283
- incomplete_reshaped,
284
- kernel,
285
- padding=padding
286
- ).reshape(size, size)
287
-
288
- # Blend original where mask exists
289
- reconstructed = torch.where(mask, reconstructed, original)
290
 
291
  # Apply emotional modulation
292
- reconstructed = reconstructed * (0.5 + emotional_weight * 0.5)
 
293
 
294
- return incomplete, reconstructed
295
-
296
- def generate_memory_prompt(operation: str, emotion_valence: float) -> str:
297
- """Generate artistic prompts based on memory operation and emotional state"""
298
- base_prompts = {
299
- "wave_memory": "memories flowing like waves in an infinite ocean, ",
300
- "interference": "two waves of memory intersecting and creating patterns, ",
301
- "resonance": "resonating waves of consciousness forming harmonious patterns, ",
302
- "hot_tub_mode": "a safe space for exploring memories, like a warm therapeutic pool, ",
303
- "emotional_resonance": "emotions as colorful waves interacting with memory patterns, ",
304
- "pattern_completion": "fragmented memories being reconstructed into complete patterns, "
305
- }
 
 
 
 
 
 
 
 
 
306
 
307
- emotion_desc = "serene and peaceful" if -20 <= emotion_valence <= 20 else \
308
- "joyful and vibrant" if emotion_valence > 20 else \
309
- "dark and introspective"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
310
 
311
- style = "digital art, abstract, flowing, wave patterns, "
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
312
 
313
- # Add more specific styling based on operation
314
- if operation == "hot_tub_mode":
315
- style += "warm colors, therapeutic atmosphere, "
316
- elif operation == "emotional_resonance":
317
- style += "vibrant colors, emotional energy visualization, "
318
- elif operation == "pattern_completion":
319
- style += "fragmented to whole transition, reconstruction, "
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
320
 
321
- prompt = f"{base_prompts[operation]}{emotion_desc}, {style} ethereal, dreamlike quality"
322
- return prompt
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
323
 
324
- def create_wave_visualization(wave_data: np.ndarray, emotion: EmotionalContext) -> go.Figure:
325
- """Create an interactive 3D visualization of wave data"""
326
- # Get dimensions
327
- n, m = wave_data.shape
328
-
329
- # Create coordinate grids
330
- x = np.linspace(0, 1, m)
331
- y = np.linspace(0, 1, n)
332
- X, Y = np.meshgrid(x, y)
 
 
 
 
 
 
 
 
 
 
 
 
 
333
 
334
- # Get color palette based on emotional state
335
- colors = emotion.get_color_palette()
336
- colorscale = [[0, colors[0]],
337
- [0.25, colors[1]],
338
- [0.5, colors[2]],
339
- [0.75, colors[3]],
340
- [1, colors[4]]]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
341
 
342
- # Create 3D surface plot
343
- fig = go.Figure(data=[go.Surface(
344
- z=wave_data,
345
- x=X,
346
- y=Y,
347
- colorscale=colorscale,
348
- lighting=dict(
349
- ambient=0.6,
350
- diffuse=0.8,
351
- fresnel=0.2,
352
- roughness=0.5,
353
- specular=1.0
354
- ),
355
- contours={
356
- "z": {"show": True, "start": -2, "end": 2, "size": 0.1, "color":"white"}
 
 
 
357
  }
358
- )])
359
-
360
- # Update layout
361
- fig.update_layout(
362
- title=dict(
363
- text="Memory Wave Visualization",
364
- font=dict(size=24, color="#333333")
365
- ),
366
- scene=dict(
367
- xaxis_title="Space",
368
- yaxis_title="Time",
369
- zaxis_title="Amplitude",
370
- aspectratio=dict(x=1, y=1, z=0.8),
371
- camera=dict(
372
- eye=dict(x=1.5, y=1.5, z=1.2)
373
- )
374
- ),
375
- margin=dict(l=0, r=0, b=0, t=30),
376
- template="plotly_white"
377
- )
378
-
379
- return fig
380
 
381
- def create_2d_comparison(wave1: np.ndarray, wave2: np.ndarray,
382
- title1: str, title2: str,
383
- emotion: EmotionalContext) -> go.Figure:
384
- """Create a side-by-side comparison of two wave patterns"""
385
- # Get color palette
386
- colors = emotion.get_color_palette()
387
-
388
- # Create subplots
389
- fig = make_subplots(
390
- rows=1, cols=2,
391
- subplot_titles=(title1, title2),
392
- specs=[[{"type": "heatmap"}, {"type": "heatmap"}]]
393
- )
394
-
395
- # Add heatmaps
396
- fig.add_trace(
397
- go.Heatmap(
398
- z=wave1,
399
- colorscale=[[0, colors[0]], [1, colors[-1]]],
400
- showscale=False
401
- ),
402
- row=1, col=1
403
- )
404
 
405
- fig.add_trace(
406
- go.Heatmap(
407
- z=wave2,
408
- colorscale=[[0, colors[0]], [1, colors[-1]]],
409
- showscale=True
410
- ),
411
- row=1, col=2
 
 
 
 
 
 
 
 
 
 
 
412
  )
413
 
414
- # Update layout
415
- fig.update_layout(
416
- title_text="Memory Pattern Comparison",
417
- height=500,
418
- template="plotly_white"
419
- )
420
 
421
- return fig
422
 
423
- def create_artistic_visualization(prompt: str, seed: int) -> Optional[Image.Image]:
424
- """Create artistic visualization using Stable Diffusion"""
425
- if not STABLE_DIFFUSION_AVAILABLE or pipe is None:
426
- return None
427
-
428
- try:
429
- generator = torch.Generator().manual_seed(seed)
430
- image = pipe(
431
- prompt=prompt,
432
- negative_prompt="text, watermark, signature, blurry, distorted",
433
- guidance_scale=1.5,
434
- num_inference_steps=8,
435
- width=768,
436
- height=768,
437
- generator=generator,
438
- ).images[0]
439
-
440
- return image
441
- except Exception as e:
442
- print(f"Error generating artistic visualization: {e}")
443
- return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
444
 
445
- def create_emotional_wave_animation(size: int, emotion: EmotionalContext) -> Image.Image:
446
- """Create an animated-like visualization of emotional waves"""
447
- # Create a blank image
448
- width, height = size * 10, size * 10
449
- image = Image.new('RGBA', (width, height), (255, 255, 255, 0))
450
- draw = ImageDraw.Draw(image)
451
-
452
- # Get color palette
453
- colors = emotion.get_color_palette()
454
-
455
- # Calculate wave parameters based on emotional state
456
- valence = emotion.valence.item()
457
- arousal = emotion.arousal.item()
458
-
459
- # Normalize to 0-1 range
460
- valence_norm = (valence + 128) / 255
461
- arousal_norm = arousal / 255
462
-
463
- # Create multiple wave layers
464
- for i in range(5):
465
- # Calculate wave parameters
466
- amplitude = 50 + i * 20 * arousal_norm
467
- frequency = 0.01 + i * 0.005 * (1 + valence_norm)
468
- phase = i * math.pi / 5
469
-
470
- # Select color
471
- color = colors[i % len(colors)]
472
-
473
- # Draw wave
474
- points = []
475
- for x in range(width):
476
- # Calculate y position with multiple sine waves
477
- y = height/2 + amplitude * math.sin(frequency * x + phase)
478
- y += amplitude/2 * math.sin(frequency * 2 * x + phase)
479
- points.append((x, y))
480
-
481
- # Draw wave with varying thickness
482
- for j in range(3):
483
- thickness = 5 - j
484
- draw.line(points, fill=color, width=thickness)
485
-
486
- # Apply blur for smoother appearance
487
- image = image.filter(ImageFilter.GaussianBlur(radius=3))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
488
 
489
- return image
490
 
491
- def quantum_memory_ops(
492
- input_size: int,
493
- operation: str,
494
- emotion_valence: float,
495
- emotion_arousal: float = None,
496
- wave_type: str = "sine",
497
- hot_tub_temp: float = 37.0,
498
- hot_tub_participants: str = "",
499
- generate_art: bool = True,
500
- seed: int = 42
501
- ) -> Tuple[str, go.Figure, go.Figure, Image.Image]:
502
- """Perform quantum-inspired memory operations using Mem|8 concepts."""
503
- # Initialize components
504
- device = "cuda" if torch.cuda.is_available() else "cpu"
505
- emotion = EmotionalContext(device)
506
- emotion.update(emotion_valence, emotion_arousal)
507
-
508
- wave_processor = WaveProcessor(device)
509
-
510
- # Process hot tub participants if provided
511
- if hot_tub_participants:
512
- participants = [p.strip() for p in hot_tub_participants.split(',')]
513
- emotion.activate_hot_tub(hot_tub_temp)
514
- for participant in participants:
515
- emotion.add_hot_tub_participant(participant)
516
-
517
- results = []
518
- wave_viz = None
519
- comparison_viz = None
520
- art_viz = None
521
-
522
- # Add header with emotional context
523
- results.append(f"🌊 Mem|8 Wave Memory Analysis 🌊")
524
- results.append(f"Operation: {operation}")
525
- results.append(f"Wave Type: {wave_type}")
526
- results.append(f"Grid Size: {input_size}x{input_size}")
527
- results.append("")
528
-
529
- if operation == "wave_memory":
530
- # Create memory wave pattern (M = A·exp(iωt-kx)·D·E)
531
- wave = wave_processor.create_wave_pattern(input_size, 2.0, 1.0, wave_type)
 
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ """
4
+ 🌊 Mem|8 OceanMind Visualizer 🧠
5
+ =================================
6
+
7
+ A visually stunning implementation of the Mem|8 wave-based memory architecture.
8
+ This application creates an immersive experience to explore how memories propagate
9
+ and interact like waves in an ocean of consciousness.
10
+
11
+ Created by: Aye & Hue (with Trisha from Accounting keeping the numbers flowing)
12
+ """
13
+
14
  import gradio as gr
15
  import torch
16
  import numpy as np
17
  import matplotlib.pyplot as plt
18
  from matplotlib import cm
 
 
19
  import random
 
20
  import time
21
+ from typing import Tuple, List, Dict, Optional, Union
22
+ import os
23
+ import json
24
+ from datetime import datetime
25
+ import plotly.graph_objects as go
26
+ import plotly.express as px
27
+ from plotly.subplots import make_subplots
28
  import colorsys
 
 
29
 
30
+ # Set seeds for reproducibility (but we'll allow for randomness too!)
31
+ RANDOM_SEED = 42
32
+ torch.manual_seed(RANDOM_SEED)
33
+ np.random.seed(RANDOM_SEED)
34
+ random.seed(RANDOM_SEED)
 
 
35
 
36
+ # Constants
37
+ DEFAULT_GRID_SIZE = 64
38
+ EMOTION_RANGE = (-5, 5) # Range for emotional valence
39
+ MAX_SEED = 999999999 # Maximum seed value for art generation
 
 
 
40
 
41
+ # Try to import Stable Diffusion components
42
+ STABLE_DIFFUSION_AVAILABLE = False
43
  pipe = None
44
+ try:
45
+ from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler
46
+ STABLE_DIFFUSION_AVAILABLE = True
 
47
 
48
+ # Initialize Stable Diffusion pipeline
49
  try:
50
+ pipe = DiffusionPipeline.from_pretrained(
51
+ "stabilityai/stable-diffusion-xl-base-1.0",
52
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
53
+ use_safetensors=True,
54
+ variant="fp16" if torch.cuda.is_available() else None
55
+ )
56
+ pipe.scheduler = FlowMatchEulerDiscreteScheduler.from_config(pipe.scheduler.config)
57
+ pipe.to("cuda" if torch.cuda.is_available() else "cpu")
58
+ pipe.enable_model_cpu_offload()
59
+ pipe.enable_vae_slicing()
60
  except Exception as e:
61
+ print(f"Warning: Failed to initialize Stable Diffusion: {e}")
62
+ pipe = None
63
+ except ImportError:
64
+ print("Warning: diffusers package not available. Artistic visualization will be disabled.")
 
 
 
 
 
 
 
 
 
 
 
65
 
66
+ # Create a directory for memory snapshots if it doesn't exist
67
+ MEMORY_DIR = "memory_snapshots"
68
+ os.makedirs(MEMORY_DIR, exist_ok=True)
 
 
 
69
 
70
  class EmotionalContext:
71
+ """
72
+ Implements Mem|8's emotional context structure as described in the paper.
73
+
74
+ Attributes:
75
+ valence (torch.Tensor): Emotional valence (-128 to 127: negative to positive)
76
+ arousal (torch.Tensor): Emotional arousal (0 to 255: intensity level)
77
+ context (torch.Tensor): Contextual flags (16-bit in paper)
78
+ safety (torch.Tensor): Psychological safety indicator
79
+ """
80
  def __init__(self, device="cuda" if torch.cuda.is_available() else "cpu"):
81
  self.device = device
82
  self.valence = torch.zeros(1).to(device) # -128 to 127: negative to positive
83
  self.arousal = torch.zeros(1).to(device) # 0 to 255: intensity level
84
  self.context = torch.zeros(1).to(device) # Contextual flags
85
+ self.safety = torch.ones(1).to(device) # Psychological safety indicator
 
 
 
 
 
 
 
 
 
86
 
87
+ # Track emotional history for visualization
88
+ self.history = {
89
+ 'valence': [],
90
+ 'arousal': [],
91
+ 'timestamps': []
92
+ }
93
+
94
  def update(self, valence: float, arousal: Optional[float] = None):
95
+ """Update emotional context with new values and record in history."""
96
+ self.valence = torch.tensor([valence], device=self.device)
97
 
98
+ # If arousal not provided, calculate based on valence as in the paper
99
  if arousal is None:
100
+ self.arousal = torch.abs(torch.tensor([valence * 2], device=self.device))
101
  else:
102
+ self.arousal = torch.tensor([arousal], device=self.device)
103
 
104
+ # Update history
105
+ self.history['valence'].append(float(self.valence.item()))
106
+ self.history['arousal'].append(float(self.arousal.item()))
107
+ self.history['timestamps'].append(time.time())
108
 
109
+ # Keep history at a reasonable size
110
+ if len(self.history['valence']) > 100:
111
+ self.history['valence'] = self.history['valence'][-100:]
112
+ self.history['arousal'] = self.history['arousal'][-100:]
113
+ self.history['timestamps'] = self.history['timestamps'][-100:]
114
+
115
+ def get_color_mapping(self) -> Tuple[float, float, float]:
116
+ """
117
+ Maps emotional state to RGB color values.
118
 
119
+ Returns:
120
+ Tuple[float, float, float]: RGB color values (0-1 range)
121
+ """
122
+ # Normalize valence to 0-1 range for hue
123
+ norm_valence = (self.valence.item() - EMOTION_RANGE[0]) / (EMOTION_RANGE[1] - EMOTION_RANGE[0])
124
+
125
+ # Normalize arousal to 0-1 range for saturation
126
+ norm_arousal = self.arousal.item() / AROUSAL_RANGE[1]
127
+
128
+ # Convert HSV to RGB (hue from valence, saturation from arousal, value=1)
129
+ rgb = colorsys.hsv_to_rgb(norm_valence, norm_arousal, 1.0)
130
+ return rgb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
 
132
+ def __str__(self) -> str:
133
+ """String representation of emotional context."""
134
+ return f"EmotionalContext(valence={self.valence.item():.2f}, arousal={self.arousal.item():.2f})"
 
 
 
 
 
 
 
 
 
135
 
136
+
137
+ class MemoryWave:
138
+ """
139
+ Implements the wave-based memory patterns from Mem|8 paper.
140
+
141
+ This class creates and manipulates wave patterns that represent memories,
142
+ allowing them to propagate, interfere, and resonate as described in the paper.
143
+ """
144
+ def __init__(self,
145
+ size: int = DEFAULT_GRID_SIZE,
146
+ device: str = "cuda" if torch.cuda.is_available() else "cpu"):
147
+ """
148
+ Initialize a memory wave system.
149
+
150
+ Args:
151
+ size: Size of the memory grid (NxN)
152
+ device: Device to use for computations
153
+ """
154
+ self.size = size
155
  self.device = device
156
+ self.grid = torch.zeros((size, size), device=device)
157
+ self.emotion = EmotionalContext(device)
158
+
159
+ # Initialize coordinates for wave calculations
160
+ self.x = torch.linspace(0, 2*np.pi, size, device=device)
161
+ self.y = torch.linspace(0, 2*np.pi, size, device=device)
162
+ self.X, self.Y = torch.meshgrid(self.x, self.y, indexing='ij')
163
+
164
+ # Memory storage for different types
165
+ self.memory_types = {i: torch.zeros((size, size), device=device) for i in range(6)}
166
+
167
+ # History of wave states for animation
168
+ self.history = []
169
 
170
+ def create_wave(self,
171
+ frequency: float,
172
+ amplitude: float,
173
+ phase: float = 0.0,
174
+ direction: str = "radial") -> torch.Tensor:
175
+ """
176
+ Create a wave pattern as described in Mem|8 paper.
177
+
178
+ Args:
179
+ frequency: Wave frequency (Ο‰ in the paper)
180
+ amplitude: Wave amplitude (A in the paper)
181
+ phase: Initial phase offset
182
+ direction: Wave direction pattern ("radial", "linear_x", "linear_y", or "spiral")
183
+
184
+ Returns:
185
+ torch.Tensor: The generated wave pattern
186
+ """
187
+ if direction == "radial":
188
+ # Radial waves emanating from center (like dropping a stone in water)
189
+ center_x, center_y = self.size/2, self.size/2
190
+ distance = torch.sqrt((self.X - center_x)**2 + (self.Y - center_y)**2)
191
+ wave = amplitude * torch.sin(frequency * distance + phase)
192
+
193
+ elif direction == "linear_x":
194
+ # Waves moving along x-axis
195
+ wave = amplitude * torch.sin(frequency * self.X + phase)
196
+
197
+ elif direction == "linear_y":
198
+ # Waves moving along y-axis
199
+ wave = amplitude * torch.sin(frequency * self.Y + phase)
200
+
201
+ elif direction == "spiral":
202
+ # Spiral wave pattern
203
+ center_x, center_y = self.size/2, self.size/2
204
+ distance = torch.sqrt((self.X - center_x)**2 + (self.Y - center_y)**2)
205
+ angle = torch.atan2(self.Y - center_y, self.X - center_x)
206
+ wave = amplitude * torch.sin(frequency * distance + 5 * angle + phase)
207
+
208
  else:
209
+ raise ValueError(f"Unknown direction: {direction}")
210
+
211
+ return wave
 
 
 
 
 
 
 
212
 
213
+ def apply_emotional_modulation(self, wave: torch.Tensor) -> torch.Tensor:
214
+ """
215
+ Apply emotional modulation to a wave pattern as described in the paper.
216
+
217
+ Args:
218
+ wave: The input wave pattern
219
+
220
+ Returns:
221
+ torch.Tensor: Emotionally modulated wave
222
+ """
223
+ # Emotional modulation formula from paper: M = A·exp(iωt-kx)·D·E
224
+ # We implement a simplified version where E is based on valence
225
+ valence_factor = self.emotion.valence / 128 # Normalize to -1 to 1 range
226
+
227
+ # Different modulation based on valence sign
228
+ if valence_factor > 0:
229
+ # Positive emotions enhance wave (amplify)
230
+ emotional_mod = torch.exp(valence_factor * wave)
231
+ else:
232
+ # Negative emotions suppress wave (dampen)
233
+ emotional_mod = 1 / torch.exp(torch.abs(valence_factor) * wave)
234
+
235
+ # Apply modulation
236
+ modulated_wave = wave * emotional_mod
237
+
238
+ return modulated_wave
239
 
240
+ def create_interference(self, wave1: torch.Tensor, wave2: torch.Tensor,
241
+ interference_type: str = "constructive") -> torch.Tensor:
242
+ """
243
+ Create interference between two memory waves.
244
+
245
+ Args:
246
+ wave1: First wave pattern
247
+ wave2: Second wave pattern
248
+ interference_type: Type of interference ("constructive", "destructive", or "resonance")
249
+
250
+ Returns:
251
+ torch.Tensor: The resulting interference pattern
252
+ """
253
+ if interference_type == "constructive":
254
+ # Simple addition for constructive interference
255
+ return wave1 + wave2
256
+
257
+ elif interference_type == "destructive":
258
+ # Subtraction for destructive interference
259
+ return wave1 - wave2
260
+
261
+ elif interference_type == "resonance":
262
+ # Multiplication for resonance
263
+ return wave1 * wave2
264
+
265
+ else:
266
+ raise ValueError(f"Unknown interference type: {interference_type}")
267
 
268
+ def apply_memory_blanket(self, wave: torch.Tensor, threshold: float = 0.5) -> torch.Tensor:
269
+ """
270
+ Apply the memory blanket concept from the paper.
271
+
272
+ The memory blanket acts as an adaptive filter that:
273
+ 1. Catches significant waves (important memories)
274
+ 2. Allows insignificant ripples to fade
275
+
276
+ Args:
277
+ wave: Input wave pattern
278
+ threshold: Importance threshold
279
+
280
+ Returns:
281
+ torch.Tensor: Filtered wave pattern
282
+ """
283
+ # Calculate wave importance (amplitude)
284
+ importance = torch.abs(wave)
285
+
286
+ # Apply threshold filter (memory blanket)
287
+ filtered_wave = wave * (importance > threshold).float()
288
+
289
  return filtered_wave
290
 
291
+ def store_memory(self, wave: torch.Tensor, memory_type: int = 0) -> None:
292
+ """
293
+ Store a wave pattern in the specified memory type.
 
 
 
294
 
295
+ Args:
296
+ wave: Wave pattern to store
297
+ memory_type: Memory type (0-5) as described in the paper
298
+ """
299
+ if memory_type not in self.memory_types:
300
+ raise ValueError(f"Invalid memory type: {memory_type}")
301
+
302
+ # Store the wave pattern
303
+ self.memory_types[memory_type] = wave
 
 
 
 
 
 
 
 
 
 
304
 
305
+ # Add to history for animation
306
+ self.history.append(wave.clone().cpu().numpy())
307
+
308
+ # Keep history at a reasonable size
309
+ if len(self.history) > 100:
310
+ self.history = self.history[-100:]
311
+
312
+ def generate_wave_memory(self,
313
+ emotion_valence: float,
314
+ wave_type: str = "radial",
315
+ frequency: float = 2.0,
316
+ amplitude: float = 1.0) -> Dict:
317
+ """
318
+ Generate a wave memory pattern with emotional context.
319
+
320
+ Args:
321
+ emotion_valence: Emotional valence value
322
+ wave_type: Type of wave pattern
323
+ frequency: Wave frequency
324
+ amplitude: Wave amplitude
325
+
326
+ Returns:
327
+ Dict: Results including wave pattern and metrics
328
+ """
329
+ # Update emotional context
330
+ self.emotion.update(emotion_valence)
331
+
332
+ # Create base wave pattern
333
+ wave = self.create_wave(frequency, amplitude, direction=wave_type)
 
 
 
 
 
 
 
 
 
 
 
334
 
335
  # Apply emotional modulation
336
+ emotional_mod = self.apply_emotional_modulation(wave)
337
+ memory_state = wave * emotional_mod
338
 
339
+ # Store in memory
340
+ self.store_memory(memory_state, memory_type=0)
341
+
342
+ # Calculate metrics
343
+ metrics = {
344
+ "shape": memory_state.shape,
345
+ "emotional_modulation": emotional_mod.mean().item(),
346
+ "memory_coherence": torch.linalg.norm(memory_state).item(),
347
+ "max_amplitude": memory_state.max().item(),
348
+ "min_amplitude": memory_state.min().item(),
349
+ "mean_amplitude": memory_state.mean().item(),
350
+ }
351
+
352
+ return {
353
+ "wave": memory_state.cpu().numpy(),
354
+ "metrics": metrics,
355
+ "emotion": {
356
+ "valence": self.emotion.valence.item(),
357
+ "arousal": self.emotion.arousal.item(),
358
+ }
359
+ }
360
 
361
+ def generate_interference_pattern(self,
362
+ emotion_valence: float,
363
+ interference_type: str = "constructive",
364
+ freq1: float = 2.0,
365
+ freq2: float = 3.0,
366
+ amp1: float = 1.0,
367
+ amp2: float = 0.5) -> Dict:
368
+ """
369
+ Generate interference between two memory waves.
370
+
371
+ Args:
372
+ emotion_valence: Emotional valence value
373
+ interference_type: Type of interference
374
+ freq1: Frequency of first wave
375
+ freq2: Frequency of second wave
376
+ amp1: Amplitude of first wave
377
+ amp2: Amplitude of second wave
378
+
379
+ Returns:
380
+ Dict: Results including interference pattern and metrics
381
+ """
382
+ # Update emotional context
383
+ self.emotion.update(emotion_valence)
384
+
385
+ # Create two wave patterns
386
+ wave1 = self.create_wave(freq1, amp1, direction="radial")
387
+ wave2 = self.create_wave(freq2, amp2, direction="spiral")
388
+
389
+ # Create interference pattern
390
+ interference = self.create_interference(wave1, wave2, interference_type)
391
+
392
+ # Apply emotional weighting
393
+ emotional_weight = torch.sigmoid(self.emotion.valence/128) * interference
394
+
395
+ # Store in memory
396
+ self.store_memory(emotional_weight, memory_type=1)
397
+
398
+ # Calculate metrics
399
+ metrics = {
400
+ "pattern_strength": torch.max(emotional_weight).item(),
401
+ "emotional_weight": self.emotion.valence.item()/128,
402
+ "interference_type": interference_type,
403
+ "wave1_freq": freq1,
404
+ "wave2_freq": freq2,
405
+ }
406
+
407
+ return {
408
+ "wave": emotional_weight.cpu().numpy(),
409
+ "metrics": metrics,
410
+ "emotion": {
411
+ "valence": self.emotion.valence.item(),
412
+ "arousal": self.emotion.arousal.item(),
413
+ }
414
+ }
415
 
416
+ def generate_resonance_pattern(self,
417
+ emotion_valence: float,
418
+ base_freq: float = 2.0,
419
+ resonance_strength: float = 0.5) -> Dict:
420
+ """
421
+ Generate emotional resonance patterns as described in the paper.
422
+
423
+ Args:
424
+ emotion_valence: Emotional valence value
425
+ base_freq: Base frequency
426
+ resonance_strength: Strength of resonance effect
427
+
428
+ Returns:
429
+ Dict: Results including resonance pattern and metrics
430
+ """
431
+ # Update emotional context
432
+ self.emotion.update(emotion_valence)
433
+
434
+ # Calculate resonance frequency based on emotional state
435
+ resonance_freq = 1.0 + torch.sigmoid(self.emotion.valence/128)
436
+
437
+ # Create wave patterns
438
+ base_wave = self.create_wave(base_freq, 1.0, direction="radial")
439
+ resonant_wave = self.create_wave(resonance_freq.item(), 1.0, direction="spiral")
440
+
441
+ # Create resonance
442
+ resonance = base_wave * resonant_wave * resonance_strength
443
+
444
+ # Store in memory
445
+ self.store_memory(resonance, memory_type=2)
446
+
447
+ # Calculate metrics
448
+ metrics = {
449
+ "resonance_frequency": resonance_freq.item(),
450
+ "pattern_energy": torch.sum(resonance**2).item(),
451
+ "base_frequency": base_freq,
452
+ "resonance_strength": resonance_strength,
453
+ }
454
+
455
+ return {
456
+ "wave": resonance.cpu().numpy(),
457
+ "metrics": metrics,
458
+ "emotion": {
459
+ "valence": self.emotion.valence.item(),
460
+ "arousal": self.emotion.arousal.item(),
461
+ }
462
+ }
463
 
464
+ def generate_memory_reconstruction(self,
465
+ emotion_valence: float,
466
+ corruption_level: float = 0.3) -> Dict:
467
+ """
468
+ Generate memory reconstruction as described in the paper.
469
+
470
+ This simulates how Mem|8 reconstructs complete memories from partial patterns,
471
+ similar to how digital cameras reconstruct full-color images from partial sensor data.
472
+
473
+ Args:
474
+ emotion_valence: Emotional valence value
475
+ corruption_level: Level of corruption in the original memory (0-1)
476
+
477
+ Returns:
478
+ Dict: Results including original, corrupted and reconstructed patterns
479
+ """
480
+ # Update emotional context
481
+ self.emotion.update(emotion_valence)
482
+
483
+ # Create an original "memory" pattern
484
+ original = self.create_wave(2.0, 1.0, direction="radial")
485
+
486
+ # Create a corruption mask (1 = keep, 0 = corrupt)
487
+ mask = torch.rand_like(original) > corruption_level
488
+
489
+ # Apply corruption
490
+ corrupted = original * mask
491
+
492
+ # Reconstruct using a simple interpolation
493
+ # In a real implementation, this would use more sophisticated algorithms
494
+ reconstructed = torch.zeros_like(corrupted)
495
+
496
+ # Simple 3x3 kernel averaging for missing values
497
+ for i in range(1, self.size-1):
498
+ for j in range(1, self.size-1):
499
+ if not mask[i, j]:
500
+ # If this point is corrupted, reconstruct it
501
+ neighbors = [
502
+ original[i-1, j-1] if mask[i-1, j-1] else 0,
503
+ original[i-1, j] if mask[i-1, j] else 0,
504
+ original[i-1, j+1] if mask[i-1, j+1] else 0,
505
+ original[i, j-1] if mask[i, j-1] else 0,
506
+ original[i, j+1] if mask[i, j+1] else 0,
507
+ original[i+1, j-1] if mask[i+1, j-1] else 0,
508
+ original[i+1, j] if mask[i+1, j] else 0,
509
+ original[i+1, j+1] if mask[i+1, j+1] else 0,
510
+ ]
511
+ valid_neighbors = [n for n in neighbors if n != 0]
512
+ if valid_neighbors:
513
+ reconstructed[i, j] = sum(valid_neighbors) / len(valid_neighbors)
514
+ else:
515
+ # If this point is not corrupted, keep original value
516
+ reconstructed[i, j] = original[i, j]
517
+
518
+ # Apply emotional coloring to reconstruction
519
+ emotional_factor = torch.sigmoid(self.emotion.valence/64)
520
+ colored_reconstruction = reconstructed * emotional_factor
521
+
522
+ # Store in memory
523
+ self.store_memory(colored_reconstruction, memory_type=3)
524
+
525
+ # Calculate metrics
526
+ reconstruction_error = torch.mean((original - reconstructed)**2).item()
527
+ emotional_influence = emotional_factor.item()
528
+
529
+ metrics = {
530
+ "corruption_level": corruption_level,
531
+ "reconstruction_error": reconstruction_error,
532
+ "emotional_influence": emotional_influence,
533
+ "reconstruction_fidelity": 1.0 - reconstruction_error,
534
+ }
535
+
536
+ return {
537
+ "original": original.cpu().numpy(),
538
+ "corrupted": corrupted.cpu().numpy(),
539
+ "reconstructed": reconstructed.cpu().numpy(),
540
+ "colored": colored_reconstruction.cpu().numpy(),
541
+ "metrics": metrics,
542
+ "emotion": {
543
+ "valence": self.emotion.valence.item(),
544
+ "arousal": self.emotion.arousal.item(),
545
+ }
546
+ }
547
 
548
+ def generate_hot_tub_simulation(self,
549
+ emotion_valence: float,
550
+ comfort_level: float = 0.8,
551
+ exploration_depth: float = 0.5) -> Dict:
552
+ """
553
+ Simulate the Hot Tub Mode concept from the paper.
554
+
555
+ Hot Tub Mode provides a safe space for exploring alternate paths and difficult scenarios
556
+ without judgment or permanent consequence.
557
+
558
+ Args:
559
+ emotion_valence: Emotional valence value
560
+ comfort_level: Safety threshold (0-1)
561
+ exploration_depth: How deep to explore alternate patterns (0-1)
562
+
563
+ Returns:
564
+ Dict: Results including safe exploration patterns and metrics
565
+ """
566
+ # Update emotional context
567
+ self.emotion.update(emotion_valence)
568
+
569
+ # Create base safe space wave (calm, regular pattern)
570
+ safe_space = self.create_wave(1.0, 0.5, direction="radial")
571
+
572
+ # Create exploration waves with increasing complexity
573
+ exploration_waves = []
574
+ for i in range(3): # Three levels of exploration
575
+ freq = 1.0 + (i + 1) * exploration_depth
576
+ wave = self.create_wave(freq, 0.5 * (1 - i * 0.2), direction="spiral")
577
+ exploration_waves.append(wave)
578
+
579
+ # Combine waves based on comfort level
580
+ combined = safe_space * comfort_level
581
+ for i, wave in enumerate(exploration_waves):
582
+ # Reduce influence of more complex patterns based on comfort
583
+ influence = comfort_level * (1 - i * 0.3)
584
+ combined += wave * influence
585
+
586
+ # Apply emotional safety modulation (S = Ξ±C + Ξ²E + Ξ³D + Ξ΄L from paper)
587
+ alpha = 0.4 # Comfort weight
588
+ beta = 0.3 # Emotional weight
589
+ gamma = 0.2 # Divergence weight
590
+ delta = 0.1 # Lifeguard weight
591
+
592
+ comfort_factor = torch.sigmoid(torch.tensor(comfort_level * 5))
593
+ emotional_factor = torch.sigmoid(self.emotion.valence/128 + 0.5)
594
+ divergence = torch.abs(combined - safe_space).mean()
595
+ lifeguard_signal = torch.sigmoid(-divergence + comfort_level)
596
+
597
+ safety_score = (alpha * comfort_factor +
598
+ beta * emotional_factor +
599
+ gamma * (1 - divergence) +
600
+ delta * lifeguard_signal)
601
+
602
+ # Apply safety modulation
603
+ safe_exploration = combined * safety_score
604
+
605
+ # Store in memory (if safe enough)
606
+ if safety_score > 0.7:
607
+ self.store_memory(safe_exploration, memory_type=4)
608
+
609
+ metrics = {
610
+ "safety_score": safety_score.item(),
611
+ "comfort_level": comfort_level,
612
+ "emotional_safety": emotional_factor.item(),
613
+ "divergence": divergence.item(),
614
+ "lifeguard_signal": lifeguard_signal.item(),
615
+ }
616
+
617
+ return {
618
+ "safe_space": safe_space.cpu().numpy(),
619
+ "exploration": combined.cpu().numpy(),
620
+ "safe_result": safe_exploration.cpu().numpy(),
621
+ "metrics": metrics,
622
+ "emotion": {
623
+ "valence": self.emotion.valence.item(),
624
+ "arousal": self.emotion.arousal.item(),
625
+ }
626
+ }
627
 
628
+ def visualize_wave_pattern(self, wave: np.ndarray, title: str = "Wave Pattern") -> go.Figure:
629
+ """Create an interactive 3D visualization of a wave pattern."""
630
+ fig = go.Figure(data=[
631
+ go.Surface(
632
+ z=wave,
633
+ colorscale='viridis',
634
+ showscale=True
635
+ )
636
+ ])
637
+
638
+ fig.update_layout(
639
+ title=title,
640
+ scene=dict(
641
+ xaxis_title="X",
642
+ yaxis_title="Y",
643
+ zaxis_title="Amplitude"
644
+ ),
645
+ width=600,
646
+ height=600
647
+ )
648
+
649
+ return fig
650
 
651
+ def visualize_emotional_history(self) -> go.Figure:
652
+ """Create a visualization of emotional history."""
653
+ fig = make_subplots(rows=2, cols=1,
654
+ subplot_titles=("Emotional Valence", "Emotional Arousal"))
655
+
656
+ # Convert timestamps to relative time
657
+ start_time = min(self.emotion.history['timestamps'])
658
+ times = [(t - start_time) for t in self.emotion.history['timestamps']]
659
+
660
+ # Plot valence
661
+ fig.add_trace(
662
+ go.Scatter(x=times, y=self.emotion.history['valence'],
663
+ mode='lines+markers',
664
+ name='Valence'),
665
+ row=1, col=1
666
+ )
667
+
668
+ # Plot arousal
669
+ fig.add_trace(
670
+ go.Scatter(x=times, y=self.emotion.history['arousal'],
671
+ mode='lines+markers',
672
+ name='Arousal'),
673
+ row=2, col=1
674
+ )
675
+
676
+ fig.update_layout(
677
+ height=800,
678
+ showlegend=True,
679
+ title_text="Emotional History"
680
+ )
681
+
682
+ return fig
683
 
684
+ def save_memory_snapshot(self, operation: str) -> str:
685
+ """Save current memory state to disk."""
686
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
687
+ filename = f"memory_{operation}_{timestamp}.json"
688
+ filepath = os.path.join(MEMORY_DIR, filename)
689
+
690
+ # Prepare data for saving
691
+ data = {
692
+ 'operation': operation,
693
+ 'timestamp': timestamp,
694
+ 'emotion': {
695
+ 'valence': float(self.emotion.valence.item()),
696
+ 'arousal': float(self.emotion.arousal.item())
697
+ },
698
+ 'memory_types': {
699
+ str(k): v.cpu().numpy().tolist()
700
+ for k, v in self.memory_types.items()
701
+ }
702
  }
703
+
704
+ # Save to file
705
+ with open(filepath, 'w') as f:
706
+ json.dump(data, f)
707
+
708
+ return filepath
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
709
 
710
+ def generate_memory_prompt(operation: str, emotion_valence: float) -> str:
711
+ """Generate an artistic prompt based on the memory operation and emotional context."""
712
+
713
+ # Base prompts for each operation type
714
+ operation_prompts = {
715
+ "wave_memory": "A serene ocean of consciousness with rippling waves of memory, ",
716
+ "interference": "Multiple waves of thought intersecting and creating intricate patterns, ",
717
+ "resonance": "Harmonious waves of memory resonating with emotional energy, ",
718
+ "reconstruction": "Fragments of memory waves reforming into a complete pattern, ",
719
+ "hot_tub": "A safe sanctuary of gentle memory waves with healing energy, "
720
+ }
 
 
 
 
 
 
 
 
 
 
 
 
721
 
722
+ # Emotional modifiers based on valence
723
+ if emotion_valence < -3:
724
+ emotion_desc = "dark and turbulent, with deep indigo and violet hues, expressing profound melancholy"
725
+ elif emotion_valence < -1:
726
+ emotion_desc = "muted and somber, with cool blues and grays, showing gentle sadness"
727
+ elif emotion_valence < 1:
728
+ emotion_desc = "balanced and neutral, with soft pastels, reflecting calm contemplation"
729
+ elif emotion_valence < 3:
730
+ emotion_desc = "warm and uplifting, with golden yellows and soft oranges, radiating joy"
731
+ else:
732
+ emotion_desc = "brilliant and ecstatic, with vibrant rainbow colors, bursting with happiness"
733
+
734
+ # Artistic style modifiers
735
+ style = (
736
+ "digital art in the style of a quantum visualization, "
737
+ "highly detailed, smooth gradients, "
738
+ "abstract yet meaningful, "
739
+ "inspired by neural networks and consciousness"
740
  )
741
 
742
+ # Combine all elements
743
+ base_prompt = operation_prompts.get(operation, operation_prompts["wave_memory"])
744
+ prompt = f"{base_prompt}{emotion_desc}, {style}"
 
 
 
745
 
746
+ return prompt
747
 
748
+ def create_interface():
749
+ """Create the Gradio interface for the Mem|8 Wave Memory Explorer."""
750
+ memory_wave = MemoryWave()
751
+
752
+ def process_memory_operation(
753
+ operation: str,
754
+ emotion_valence: float,
755
+ grid_size: int = DEFAULT_GRID_SIZE,
756
+ comfort_level: float = 0.8,
757
+ exploration_depth: float = 0.5,
758
+ generate_art: bool = True,
759
+ seed: int = 42
760
+ ) -> Tuple[str, go.Figure, go.Figure, Optional[np.ndarray]]:
761
+ """Process a memory operation and return visualizations."""
762
+
763
+ # Resize grid if needed
764
+ if grid_size != memory_wave.size:
765
+ memory_wave.__init__(size=grid_size)
766
+
767
+ # Process based on operation type
768
+ if operation == "wave_memory":
769
+ result = memory_wave.generate_wave_memory(emotion_valence)
770
+ wave_title = "Wave Memory Pattern"
771
+ wave_data = result["wave"]
772
+
773
+ elif operation == "interference":
774
+ result = memory_wave.generate_interference_pattern(emotion_valence)
775
+ wave_title = "Interference Pattern"
776
+ wave_data = result["wave"]
777
+
778
+ elif operation == "resonance":
779
+ result = memory_wave.generate_resonance_pattern(emotion_valence)
780
+ wave_title = "Resonance Pattern"
781
+ wave_data = result["wave"]
782
+
783
+ elif operation == "reconstruction":
784
+ result = memory_wave.generate_memory_reconstruction(emotion_valence)
785
+ wave_title = "Memory Reconstruction"
786
+ wave_data = result["reconstructed"]
787
+
788
+ elif operation == "hot_tub":
789
+ result = memory_wave.generate_hot_tub_simulation(
790
+ emotion_valence, comfort_level, exploration_depth
791
+ )
792
+ wave_title = "Hot Tub Exploration"
793
+ wave_data = result["safe_result"]
794
+
795
+ # Create visualizations
796
+ wave_plot = memory_wave.visualize_wave_pattern(wave_data, wave_title)
797
+ emotion_plot = memory_wave.visualize_emotional_history()
798
+
799
+ # Generate artistic visualization if requested
800
+ art_output = None
801
+ if generate_art and STABLE_DIFFUSION_AVAILABLE and pipe is not None:
802
+ prompt = generate_memory_prompt(operation, emotion_valence)
803
+ generator = torch.Generator().manual_seed(seed)
804
+ art_output = pipe(
805
+ prompt=prompt,
806
+ negative_prompt="text, watermark, signature, blurry, distorted",
807
+ guidance_scale=1.5,
808
+ num_inference_steps=8,
809
+ width=768,
810
+ height=768,
811
+ generator=generator,
812
+ ).images[0]
813
+
814
+ # Format metrics for display
815
+ metrics = result["metrics"]
816
+ metrics_str = "πŸ“Š Analysis Results:\n\n"
817
+ for key, value in metrics.items():
818
+ metrics_str += f"β€’ {key.replace('_', ' ').title()}: {value:.4f}\n"
819
+
820
+ metrics_str += f"\n🎭 Emotional Context:\n"
821
+ metrics_str += f"β€’ Valence: {result['emotion']['valence']:.2f}\n"
822
+ metrics_str += f"β€’ Arousal: {result['emotion']['arousal']:.2f}\n"
823
+
824
+ # Save memory snapshot
825
+ snapshot_path = memory_wave.save_memory_snapshot(operation)
826
+ metrics_str += f"\nπŸ’Ύ Memory snapshot saved: {snapshot_path}"
827
+
828
+ return metrics_str, wave_plot, emotion_plot, art_output
829
 
830
+ # Create the interface
831
+ with gr.Blocks(theme=gr.themes.Soft(primary_hue="purple", secondary_hue="blue")) as demo:
832
+ gr.Markdown("""
833
+ # 🌊 Mem|8 Wave Memory Explorer
834
+
835
+ Welcome to 8b.is's memory ocean demonstration! This showcase implements concepts from our Mem|8
836
+ wave-based memory architecture paper, visualizing how memories propagate and interact like waves
837
+ in an ocean of consciousness.
838
+
839
+ > "Memory is not a storage unit, but a living ocean of waves" - Mem|8 Paper
840
+ """)
841
+
842
+ with gr.Row():
843
+ with gr.Column(scale=1):
844
+ operation_input = gr.Radio(
845
+ ["wave_memory", "interference", "resonance", "reconstruction", "hot_tub"],
846
+ label="Memory Operation",
847
+ value="wave_memory",
848
+ info="Select the type of memory operation to visualize"
849
+ )
850
+
851
+ emotion_input = gr.Slider(
852
+ minimum=EMOTION_RANGE[0],
853
+ maximum=EMOTION_RANGE[1],
854
+ value=0,
855
+ step=1,
856
+ label="Emotional Valence",
857
+ info="Emotional context from negative to positive"
858
+ )
859
+
860
+ grid_size = gr.Slider(
861
+ minimum=16,
862
+ maximum=128,
863
+ value=DEFAULT_GRID_SIZE,
864
+ step=16,
865
+ label="Memory Grid Size"
866
+ )
867
+
868
+ with gr.Accordion("Advanced Settings", open=False):
869
+ comfort_level = gr.Slider(
870
+ minimum=0.0,
871
+ maximum=1.0,
872
+ value=0.8,
873
+ label="Comfort Level",
874
+ info="Safety threshold for Hot Tub Mode"
875
+ )
876
+
877
+ exploration_depth = gr.Slider(
878
+ minimum=0.0,
879
+ maximum=1.0,
880
+ value=0.5,
881
+ label="Exploration Depth",
882
+ info="How deep to explore in Hot Tub Mode"
883
+ )
884
+
885
+ generate_art = gr.Checkbox(
886
+ label="Generate Artistic Visualization",
887
+ value=True,
888
+ info="Use Stable Diffusion to create artistic representations"
889
+ )
890
+
891
+ seed = gr.Slider(
892
+ label="Art Generation Seed",
893
+ minimum=0,
894
+ maximum=MAX_SEED,
895
+ step=1,
896
+ value=42
897
+ )
898
+
899
+ run_btn = gr.Button("Generate Memory Wave", variant="primary")
900
+
901
+ with gr.Column(scale=2):
902
+ output_text = gr.Textbox(label="Analysis Results", lines=10)
903
+
904
+ with gr.Row():
905
+ wave_plot = gr.Plot(label="Wave Pattern")
906
+ emotion_plot = gr.Plot(label="Emotional History")
907
+
908
+ art_output = gr.Image(label="Artistic Visualization", visible=STABLE_DIFFUSION_AVAILABLE)
909
+
910
+ # Set up event handlers
911
+ run_btn.click(
912
+ process_memory_operation,
913
+ inputs=[
914
+ operation_input,
915
+ emotion_input,
916
+ grid_size,
917
+ comfort_level,
918
+ exploration_depth,
919
+ generate_art,
920
+ seed
921
+ ],
922
+ outputs=[output_text, wave_plot, emotion_plot, art_output]
923
+ )
924
+
925
+ gr.Markdown("""
926
+ ### 🧠 Understanding Wave Memory
927
+
928
+ This demo visualizes key concepts from our Mem|8 paper:
929
+ 1. **Wave Memory**: Memories as propagating waves with emotional modulation
930
+ 2. **Interference**: How different memories interact and combine
931
+ 3. **Resonance**: Emotional resonance patterns in memory formation
932
+ 4. **Reconstruction**: How memories are rebuilt from partial patterns
933
+ 5. **Hot Tub Mode**: Safe exploration of memory patterns
934
+
935
+ The visualization shows mathematical wave patterns, emotional history, and artistic
936
+ interpretations of how memories flow through our consciousness.
937
+
938
+ All computations are accelerated using Hugging Face's Zero GPU technology!
939
+ """)
940
 
941
+ return demo
942
 
943
+ if __name__ == "__main__":
944
+ demo = create_interface()
945
+ demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -1,7 +1,9 @@
1
- gradio>=4.19.2
2
- torch>=2.2.0
3
  numpy>=1.24.0
4
- spaces>=0.19.4
 
 
 
5
  diffusers>=0.25.0
6
  transformers>=4.37.0
7
- accelerate>=0.27.0
 
 
 
 
1
  numpy>=1.24.0
2
+ torch>=2.0.0
3
+ gradio>=4.0.0
4
+ plotly>=5.18.0
5
+ matplotlib>=3.8.0
6
  diffusers>=0.25.0
7
  transformers>=4.37.0
8
+ accelerate>=0.27.0
9
+ scipy>=1.11.0