SecureCypher.space / securecypher.space.py
antitheft159's picture
Create securecypher.space.py
569d65e verified
raw
history blame
4.28 kB
import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import seaborn as sns
class WaveformVisualizer:
def __init__(self, processor, input_data, sampling_rate=1000):
self.processor = processor
self.input_data = input_data
self.sampling_rate sampling_rate
self.time = np.arange(input_data.shape[1]) / sampling_rate
class SecureWaveformProcessor(nn.Module):
def __init__(self, input_size, hidden_size, sampling_rate=1000):
super(SecureWaveformProcessor, self).__init__()
self.layer1 = nn.Linear(input_size, hidden_size)
self.layer2 = nn.Linear(hidden_size, input_size)
self.sampling_rate = sampling_rate
def forward(self, x):
x = torch.relu(self.layer1(x))
x = self.layer2(x)
return x
def plot_waveforms(self):
processed_data = self.forward(input_data)
self.time = np.arange(input_data.shape[1]) / self.sampling_rate
def forward(self, x):
x = torch.relu(self.layer1(x))
x = self.layer2(x)
return x
def plot_waveforms(self):
processed_data = self.forward(input_data)
self.time = np.arange(input_data.shape[1]) / self.sampling_rate
self.input_data = input_data
fig = plt.figure(figsize=(15, 10))
gs = fig.add_gridspec(2, 2, hspace=0.3, wspace=0.3)
ax1 = fig.add_subplot(gs[0, 0])
self._plot_waveform(self.input_data[0], ax1, "Original Data")
ax2 = fig.add_subplot(gs[0, 1])
self.plot_waveform(processed_data[0], ax2, "Processed Data")
ax3 = fig.add_subplot(gs[1, 0])
self._plot_spectrogram(self.input_data[0], ax3, "Original Visual")
ax4 = fig.add_subplot(gs[1, 1])
self._plot_spectrogram(processed_data[0], x4, "Processed Visual")
plt.tight_layout()
return fig
def _plot_waveform(self,data, ax, title):
data_np = data.detach().numpy()
ax.plot(self.time, data_np, 'b-', linewidth=1)
ax.set_title(title)
ax.set_xlabel('Time (s)')
ax.set_ylabel('Amplitude')
ax.grid(True)
def _plot_spectrogram(self, data, ax, title):
data_np = data.detach().numpy
ax.specgram(data,np, Fs=self.sampling_rate, cmap='viridis')
ax.set_title(title)
ax.set_xlabel('Time (s)')
ax.set_ylabel('Frequency (Hz)')
def animate_processing(self, frame=50):
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(10, 8))
processed_data = self.forward(self.input_data)
data_original = self.input_data[0].detach().numpy()
data_processed = processed_data[0].detach().numpy()
line1, = ax1.plot([], [], 'b-', label='Original')
line2, = ax2.plot([], [], 'r-', label='Processed')
def init():
ax1.set_xlim(0, self.time[-1])
ax1.set_ylim(data_original.min()*1.2, data_original.max()*1.2)
ax2.set_xlim(0, self.time[-1])
ax2.set_ylim(data_processed.min()*1.2, data_processed.max()*1.2)
ax1.set_title('Original Data')
ax2.set_title('Processed Visual')
ax1.grid(True)
ax2.grid(True)
ax1.legend()
ax2.legend()
return line1, line2
def animate(frame):
idx = int((frame / frames) * len(self.time))
line1.set_data(self.time[:idx], data_original[:idx])
line2.set_data(self.time[:idx], data_processed[:idx])
return line1, line2
anim = FuncAnimation(fig, animate, frames=frames,
init_func=init, blit=True,
interval=50)
plt.tight_layout()
return anim
__name__== "__main__":
input_size = 1000
batch_size = 32
sampling_rate = 1000
processor = SecureWaveformProcessor(input_size=input_size, hidden_size=64, sampling_rate=sampling_rate)
t = np.linspace(0, 10, input_size)
base_signal = np.sin(2 * np.pi * 1 * t) + 0.5 * np.sin(2 * np.pi * 2 * t)
noise = np.random.normal(0, 0.1, input_size)
signal = base_signal + noise
input_data = torch.tensor(np.tile(signal, (batch_size, 1)), dtype=torch.float32)
processor = SecureWaveformProcessor(input_size=input_size, hidden_size=64)
visualizer = WaveformVisualizer(processor, input_data)
fig_static = processor.plot_waveforms()
plt.show()
anim = processor.animate_processing()
plt.show()