GenAIDevTOProd commited on
Commit
59d1fd6
·
verified ·
1 Parent(s): 05d32d7

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +520 -0
app.py ADDED
@@ -0,0 +1,520 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """app.py
3
+
4
+ Automatically generated by Colab.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1QIEwA7FDPNIgdUKfLyRF4K3Im9CjkadN
8
+
9
+ Logistic Map Equation: x
10
+ n+1
11
+
12
+ =r⋅x
13
+ n
14
+
15
+ ⋅(1−x
16
+ n
17
+
18
+ )
19
+
20
+ - x_n is the current state (a number between 0 and 1).
21
+
22
+ - x_{n+1} is the next value in the sequence.
23
+
24
+ - r is the growth rate parameter.
25
+
26
+ This block:
27
+
28
+ - Introduces the logistic map function
29
+
30
+ - Lets us generate sequences with different r values
31
+
32
+ - Plots them to visually understand convergence, cycles, and chaos
33
+ """
34
+
35
+ import numpy as np
36
+ import matplotlib.pyplot as plt
37
+ import random
38
+
39
+ # Define the logistic map function
40
+ def logistic_map(x0: float, r: float, n: int = 100) -> np.ndarray:
41
+ """
42
+ Generates a logistic map sequence.
43
+
44
+ Args:
45
+ x0 (float): Initial value (between 0 and 1).
46
+ r (float): Growth rate parameter (between 0 and 4).
47
+ n (int): Number of time steps.
48
+
49
+ Returns:
50
+ np.ndarray: Sequence of logistic map values.
51
+ """
52
+ seq = np.zeros(n)
53
+ seq[0] = x0
54
+ for i in range(1, n):
55
+ seq[i] = r * seq[i - 1] * (1 - seq[i - 1])
56
+ return seq
57
+
58
+ # Plot logistic map sequences for different r values
59
+ def plot_logistic_map_examples(x0: float = 0.51, n: int = 100):
60
+ """
61
+ Plots logistic map sequences for several r values to visualize behavior.
62
+
63
+ Args:
64
+ x0 (float): Initial value.
65
+ n (int): Number of iterations.
66
+ """
67
+ r_values = [2.5, 3.2, 3.5, 3.9, 4.0]
68
+ plt.figure(figsize=(12, 8))
69
+
70
+ for i, r in enumerate(r_values, 1):
71
+ x0_safe = random.uniform(0.11, 0.89)
72
+ seq = logistic_map(x0, r, n)
73
+ plt.subplot(3, 2, i)
74
+ plt.plot(seq, label=f"r = {r}")
75
+ plt.title(f"Logistic Map (r = {r})")
76
+ plt.xlabel("Time Step")
77
+ plt.ylabel("x")
78
+ plt.grid(True)
79
+ plt.legend()
80
+
81
+ plt.tight_layout()
82
+ plt.show()
83
+
84
+ # 🔍 Run the plot function to see different behaviors
85
+ plot_logistic_map_examples()
86
+
87
+ """- Low r (e.g., 2.5) = stable
88
+
89
+ - Mid r (e.g., 3.3) = periodic
90
+
91
+ - High r (e.g., 3.8 – 4.0) = chaotic
92
+
93
+ Generate synthetic sequences using random r values
94
+
95
+ Label each sequence as:
96
+
97
+ - 0 = stable (low r)
98
+
99
+ - 1 = periodic (mid r)
100
+
101
+ - 2 = chaotic (high r)
102
+
103
+ Create a full dataset we can later feed into a classifier
104
+ """
105
+
106
+ import random
107
+ from typing import Tuple, List
108
+
109
+ # Label assignment based on r value
110
+ def label_from_r(r: float) -> int:
111
+ """
112
+ Assigns a regime label based on the value of r.
113
+
114
+ Args:
115
+ r (float): Growth rate.
116
+
117
+ Returns:
118
+ int: Label (0 = stable, 1 = periodic, 2 = chaotic)
119
+ """
120
+ if r < 3.0:
121
+ return 0 # Stable regime
122
+ elif 3.0 <= r < 3.57:
123
+ return 1 # Periodic regime
124
+ else:
125
+ return 2 # Chaotic regime
126
+
127
+ # Create one labeled sequence
128
+ def generate_labeled_sequence(n: int = 100) -> Tuple[np.ndarray, int]:
129
+ """
130
+ Generates a single logistic map sequence and its regime label.
131
+
132
+ Args:
133
+ n (int): Sequence length.
134
+
135
+ Returns:
136
+ Tuple: (sequence, label)
137
+ """
138
+ r = round(random.uniform(2.5, 4.0), 4)
139
+ x0 = random.uniform(0.1, 0.9)
140
+ sequence = logistic_map(x0, r, n)
141
+ label = label_from_r(r)
142
+ return sequence, label
143
+
144
+ # Generate a full dataset
145
+ def generate_dataset(num_samples: int = 1000, n: int = 100) -> Tuple[np.ndarray, np.ndarray]:
146
+ """
147
+ Generates a dataset of logistic sequences with regime labels.
148
+
149
+ Args:
150
+ num_samples (int): Number of sequences to generate.
151
+ n (int): Length of each sequence.
152
+
153
+ Returns:
154
+ Tuple[np.ndarray, np.ndarray]: X (sequences), y (labels)
155
+ """
156
+ X, y = [], []
157
+
158
+ for _ in range(num_samples):
159
+ sequence, label = generate_labeled_sequence(n)
160
+ X.append(sequence)
161
+ y.append(label)
162
+
163
+ return np.array(X), np.array(y)
164
+
165
+ # Example: Generate small dataset and view label counts
166
+ X, y = generate_dataset(num_samples=500, n=100)
167
+
168
+ # Check class distribution
169
+ import collections
170
+ print("Label distribution:", collections.Counter(y))
171
+
172
+ """Used controlled r ranges to simulate different market regimes
173
+
174
+ Created 500 synthetic sequences (X) and regime labels (y)
175
+
176
+ Now we can visualize, split, and train on this dataset
177
+
178
+ Visualize:
179
+
180
+ - Randomly samples from X, y
181
+
182
+ - Plots sequences grouped by class (0 = stable, 1 = periodic, 2 = chaotic)
183
+
184
+ Helps us verify that the labels match the visual behavior
185
+ """
186
+
187
+ import matplotlib.pyplot as plt
188
+ import numpy as np
189
+
190
+ # Helper: Plot N random sequences for a given class
191
+ def plot_class_samples(X: np.ndarray, y: np.ndarray, target_label: int, n_samples: int = 5):
192
+ """
193
+ Plots sample sequences from a specified class.
194
+
195
+ Args:
196
+ X (np.ndarray): Dataset of sequences.
197
+ y (np.ndarray): Labels (0=stable, 1=periodic, 2=chaotic).
198
+ target_label (int): Class to visualize.
199
+ n_samples (int): Number of sequences to plot.
200
+ """
201
+ indices = np.where(y == target_label)[0]
202
+ chosen = np.random.choice(indices, n_samples, replace=False)
203
+
204
+ plt.figure(figsize=(12, 6))
205
+ for i, idx in enumerate(chosen):
206
+ plt.plot(X[idx], label=f"Sample {i+1}")
207
+
208
+ regime_name = ["Stable", "Periodic", "Chaotic"][target_label]
209
+ plt.title(f"{regime_name} Regime Samples (Label = {target_label})")
210
+ plt.xlabel("Time Step")
211
+ plt.ylabel("x")
212
+ plt.grid(True)
213
+ plt.legend()
214
+ plt.show()
215
+
216
+ # View class 0 (stable)
217
+ plot_class_samples(X, y, target_label=0)
218
+
219
+ # View class 1 (periodic)
220
+ plot_class_samples(X, y, target_label=1)
221
+
222
+ # View class 2 (chaotic)
223
+ plot_class_samples(X, y, target_label=2)
224
+
225
+ """Stable: Sequences that flatten out
226
+
227
+ Periodic: Repeating waveforms (2, 4, 8 points)
228
+
229
+ Chaotic: No repeating pattern, jittery
230
+
231
+ Each of these sequences looks completely different — even though they're all generated by the same equation.
232
+
233
+ No fixed pattern. No periodic rhythm. Just deterministic unpredictability.
234
+
235
+ But it's not random — it's chaotic: sensitive to initial conditions, governed by internal structure (nonlinear dynamics).
236
+
237
+ Split X, y into training and testing sets
238
+
239
+ Normalize (optional, but improves convergence)
240
+
241
+ Convert to PyTorch tensors
242
+
243
+ Create DataLoaders for training
244
+ """
245
+
246
+ import torch
247
+ from torch.utils.data import TensorDataset, DataLoader
248
+ from sklearn.model_selection import train_test_split
249
+ from sklearn.preprocessing import StandardScaler
250
+
251
+ # Step 1: Split the dataset
252
+ X_train, X_test, y_train, y_test = train_test_split(
253
+ X, y, test_size=0.2, stratify=y, random_state=42
254
+ )
255
+
256
+ # Step 2: Normalize sequences (standardization: mean=0, std=1)
257
+ scaler = StandardScaler()
258
+ X_train_scaled = scaler.fit_transform(X_train) # Fit only on train
259
+ X_test_scaled = scaler.transform(X_test)
260
+
261
+ # Step 3: Convert to PyTorch tensors
262
+ X_train_tensor = torch.tensor(X_train_scaled, dtype=torch.float32)
263
+ y_train_tensor = torch.tensor(y_train, dtype=torch.long)
264
+
265
+ X_test_tensor = torch.tensor(X_test_scaled, dtype=torch.float32)
266
+ y_test_tensor = torch.tensor(y_test, dtype=torch.long)
267
+
268
+ # Step 4: Create TensorDatasets and DataLoaders
269
+ batch_size = 64
270
+
271
+ train_dataset = TensorDataset(X_train_tensor, y_train_tensor)
272
+ test_dataset = TensorDataset(X_test_tensor, y_test_tensor)
273
+
274
+ train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
275
+ test_loader = DataLoader(test_dataset, batch_size=batch_size)
276
+
277
+ """This CNN will:
278
+
279
+ - Take a 1D time series (length 100)
280
+
281
+ - Apply temporal convolutions to learn patterns
282
+
283
+ - Use global pooling to summarize features
284
+
285
+ - Output one of 3 regime classes
286
+ """
287
+
288
+ import torch.nn as nn
289
+ import torch.nn.functional as F
290
+
291
+ # 1D CNN model for sequence classification
292
+ class ChaosCNN(nn.Module):
293
+ def __init__(self, input_length=100, num_classes=3):
294
+ super(ChaosCNN, self).__init__()
295
+
296
+ # Feature extractors
297
+ self.conv1 = nn.Conv1d(in_channels=1, out_channels=32, kernel_size=5, padding=2)
298
+ self.bn1 = nn.BatchNorm1d(32)
299
+
300
+ self.conv2 = nn.Conv1d(in_channels=32, out_channels=64, kernel_size=5, padding=2)
301
+ self.bn2 = nn.BatchNorm1d(64)
302
+
303
+ # Global average pooling
304
+ self.global_pool = nn.AdaptiveAvgPool1d(1) # Outputs shape: (batch_size, channels, 1)
305
+
306
+ # Final classifier
307
+ self.fc = nn.Linear(64, num_classes)
308
+
309
+ def forward(self, x):
310
+ # x shape: (batch_size, sequence_length)
311
+ x = x.unsqueeze(1) # Add channel dim (batch_size, 1, sequence_length)
312
+
313
+ x = F.relu(self.bn1(self.conv1(x))) # (batch_size, 32, seq_len)
314
+ x = F.relu(self.bn2(self.conv2(x))) # (batch_size, 64, seq_len)
315
+
316
+ x = self.global_pool(x).squeeze(2) # (batch_size, 64)
317
+ out = self.fc(x) # (batch_size, num_classes)
318
+ return out
319
+
320
+ """Conv1d: Extracts local patterns across the time dimension
321
+
322
+ BatchNorm1d: Stabilizes training and speeds up convergence
323
+
324
+ AdaptiveAvgPool1d: Summarizes the sequence into global stats
325
+
326
+ Linear: Final decision layer for 3-class classification
327
+ """
328
+
329
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
330
+ model = ChaosCNN().to(device)
331
+
332
+ # Define loss and optimizer
333
+ criterion = nn.CrossEntropyLoss()
334
+ optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
335
+
336
+ from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
337
+ import seaborn as sns
338
+ import matplotlib.pyplot as plt
339
+
340
+ # Training function
341
+ def train_model(model, train_loader, test_loader, criterion, optimizer, device, epochs=15):
342
+ train_losses, test_accuracies = [], []
343
+
344
+ for epoch in range(epochs):
345
+ model.train()
346
+ running_loss = 0.0
347
+
348
+ for X_batch, y_batch in train_loader:
349
+ X_batch, y_batch = X_batch.to(device), y_batch.to(device)
350
+
351
+ optimizer.zero_grad()
352
+ outputs = model(X_batch)
353
+ loss = criterion(outputs, y_batch)
354
+ loss.backward()
355
+ optimizer.step()
356
+
357
+ running_loss += loss.item() * X_batch.size(0)
358
+
359
+ avg_loss = running_loss / len(train_loader.dataset)
360
+ train_losses.append(avg_loss)
361
+
362
+ # Evaluation after each epoch
363
+ model.eval()
364
+ all_preds, all_labels = [], []
365
+
366
+ with torch.no_grad():
367
+ for X_batch, y_batch in test_loader:
368
+ X_batch = X_batch.to(device)
369
+ outputs = model(X_batch)
370
+ preds = outputs.argmax(dim=1).cpu().numpy()
371
+ all_preds.extend(preds)
372
+ all_labels.extend(y_batch.numpy())
373
+
374
+ acc = accuracy_score(all_labels, all_preds)
375
+ test_accuracies.append(acc)
376
+
377
+ print(f"Epoch {epoch+1}/{epochs} - Loss: {avg_loss:.4f} - Test Accuracy: {acc:.4f}")
378
+
379
+ return train_losses, test_accuracies
380
+
381
+ # Train the model
382
+ train_losses, test_accuracies = train_model(
383
+ model, train_loader, test_loader, criterion, optimizer, device, epochs=15
384
+ )
385
+
386
+ plt.figure(figsize=(12, 4))
387
+
388
+ plt.subplot(1, 2, 1)
389
+ plt.plot(train_losses, label="Train Loss")
390
+ plt.xlabel("Epoch")
391
+ plt.ylabel("Loss")
392
+ plt.title("Training Loss Over Time")
393
+ plt.grid(True)
394
+
395
+ plt.subplot(1, 2, 2)
396
+ plt.plot(test_accuracies, label="Test Accuracy", color='green')
397
+ plt.xlabel("Epoch")
398
+ plt.ylabel("Accuracy")
399
+ plt.title("Test Accuracy Over Time")
400
+ plt.grid(True)
401
+
402
+ plt.tight_layout()
403
+ plt.show()
404
+
405
+ # Final performance evaluation
406
+ model.eval()
407
+ y_true, y_pred = [], []
408
+
409
+ with torch.no_grad():
410
+ for X_batch, y_batch in test_loader:
411
+ X_batch = X_batch.to(device)
412
+ outputs = model(X_batch)
413
+ preds = outputs.argmax(dim=1).cpu().numpy()
414
+ y_pred.extend(preds)
415
+ y_true.extend(y_batch.numpy())
416
+
417
+ # Confusion matrix
418
+ cm = confusion_matrix(y_true, y_pred)
419
+ labels = ["Stable", "Periodic", "Chaotic"]
420
+
421
+ plt.figure(figsize=(6, 5))
422
+ sns.heatmap(cm, annot=True, fmt="d", cmap="Blues", xticklabels=labels, yticklabels=labels)
423
+ plt.title("Confusion Matrix")
424
+ plt.xlabel("Predicted")
425
+ plt.ylabel("Actual")
426
+ plt.show()
427
+
428
+ # Classification report
429
+ print(classification_report(y_true, y_pred, target_names=labels))
430
+
431
+ """Input an r value (between 2.5 and 4.0)
432
+
433
+ Generate a logistic map sequence
434
+
435
+ Feed it to your trained model
436
+
437
+ Predict the regime
438
+
439
+ Plot the sequence and overlay the prediction
440
+ """
441
+
442
+ # Label map for decoding
443
+ label_map = {0: "Stable", 1: "Periodic", 2: "Chaotic"}
444
+
445
+ def predict_regime(r_value: float, model, scaler, device, sequence_length=100):
446
+ """
447
+ Generates a logistic sequence for a given r, feeds to model, and predicts regime.
448
+ """
449
+ assert 2.5 <= r_value <= 4.0, "r must be between 2.5 and 4.0"
450
+
451
+ # Generate sequence
452
+ x0 = np.random.uniform(0.1, 0.9)
453
+ sequence = logistic_map(x0, r_value, sequence_length).reshape(1, -1)
454
+
455
+ # Standardize using training scaler
456
+ sequence_scaled = scaler.transform(sequence)
457
+
458
+ # Convert to tensor
459
+ sequence_tensor = torch.tensor(sequence_scaled, dtype=torch.float32).to(device)
460
+
461
+ # Model inference
462
+ model.eval()
463
+ with torch.no_grad():
464
+ output = model(sequence_tensor)
465
+ pred_class = torch.argmax(output, dim=1).item()
466
+
467
+ # Plot
468
+ plt.figure(figsize=(10, 4))
469
+ plt.plot(sequence.flatten(), label=f"r = {r_value}")
470
+ plt.title(f"Predicted Regime: {label_map[pred_class]} (Class {pred_class})")
471
+ plt.xlabel("Time Step")
472
+ plt.ylabel("x")
473
+ plt.grid(True)
474
+ plt.legend()
475
+ plt.show()
476
+
477
+ return label_map[pred_class]
478
+
479
+ predict_regime(2.6, model, scaler, device)
480
+ predict_regime(3.3, model, scaler, device)
481
+ predict_regime(3.95, model, scaler, device)
482
+
483
+ import gradio as gr
484
+
485
+ # Prediction function for Gradio
486
+ def classify_sequence(r_value):
487
+ x0 = np.random.uniform(0.1, 0.9)
488
+ sequence = logistic_map(x0, r_value, 100).reshape(1, -1)
489
+ sequence_scaled = scaler.transform(sequence)
490
+ sequence_tensor = torch.tensor(sequence_scaled, dtype=torch.float32).to(device)
491
+
492
+ model.eval()
493
+ with torch.no_grad():
494
+ output = model(sequence_tensor)
495
+ pred_class = torch.argmax(output, dim=1).item()
496
+
497
+ # Plot the sequence
498
+ fig, ax = plt.subplots(figsize=(6, 3))
499
+ ax.plot(sequence.flatten())
500
+ ax.set_title(f"Logistic Map Sequence (r = {r_value})")
501
+ ax.set_xlabel("Time Step")
502
+ ax.set_ylabel("x")
503
+ ax.grid(True)
504
+
505
+ return fig, label_map[pred_class]
506
+
507
+ # Gradio UI
508
+ interface = gr.Interface(
509
+ fn=classify_sequence,
510
+ inputs=gr.Slider(2.5, 4.0, step=0.01, label="r (growth parameter)"),
511
+ outputs=[
512
+ gr.Plot(label="Sequence Plot"),
513
+ gr.Label(label="Predicted Regime")
514
+ ],
515
+ title="🌀 Chaos Classifier: Logistic Map Regime Detector",
516
+ description="Move the slider to choose an r-value and visualize the predicted regime: Stable, Periodic, or Chaotic."
517
+ )
518
+
519
+ # Launch locally or in HF Space
520
+ interface.launch(share=True)