File size: 2,337 Bytes
5270eaa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
from cryptography.fernet import Fernet

# Generate a key for encryption
key = Fernet.generate_key()
cipher = Fernet(key)

# Example wave data (as a list of floats)
wave_data = [0.1, 0.5, 0.3, 0.4, 0.9]

# Convert wave data to bytes and encrypt
wave_data_bytes = bytes(str(wave_data), 'utf-8')
encrypted_wave_data = cipher.encrypt(wave_data_bytes)

# Decrypting wave data
decrypted_wave_data_bytes = cipher.decrypt(encrypted_wave_data)
decrypted_wave_data = eval(decrypted_wave_data_bytes.decode('utf-8'))

print("Original Wave Data:", wave_data)
print("Encrypted Wave Data:", encrypted_wave_data)
print("Decrypted Wave Data:", decrypted_wave_data)

import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np

class Autoencoder(nn.Module):
    def __init__(self):
        super(Autoencoder, self).__init__()
        self.encoder = nn.Sequential(
            nn.Linear(5, 3),
            nn.ReLU(),
            nn.Linear(3, 2),
            nn.ReLU()
        )
        self.decoder = nn.Sequential(
            nn.Linear(2, 3),
            nn.ReLU(),
            nn.Linear(3, 5),
            nn.Sigmoid()
        )

    def forward(self, x):
        x = self.encoder(x)
        x = self.decoder(x)
        return x

# Initialize the model, loss function, and optimizer
model = Autoencoder()
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=0.01)

normal_wave_data = torch.tensor([
    [0.1, 0.2, 0.3, 0.4, 0.5],
    [0.2, 0.3, 0.4, 0.5, 0.6],
    [0.3, 0.4, 0.5, 0.6, 0.7]
], dtype=torch.float32)

# Training the model
for epoch in range(1000):  # Example training loop
    optimizer.zero_grad()
    outputs = model(normal_wave_data)
    loss = criterion(outputs, normal_wave_data)
    loss.backward()
    optimizer.step()

    if (epoch+1) % 100 == 0:
        print(f'Epoch [{epoch+1}/1000], Loss: {loss.item():.4f}')

# New wave data to check for anomalies
new_wave_data = torch.tensor([0.9, 0.8, 0.7, 0.6, 0.5], dtype=torch.float32)

# Reshape for single input
new_wave_data = new_wave_data.unsqueeze(0)

# Pass through the model
reconstructed_data = model(new_wave_data)
loss = criterion(reconstructed_data, new_wave_data)

# Set a threshold for anomaly detection
anomaly_threshold = 0.01

if loss.item() > anomaly_threshold:
    print("Anomaly detected!")
else:
    print("Data is normal.")