Spaces:
Sleeping
Sleeping
File size: 4,716 Bytes
3de89cd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 |
"""model.py module."""
from typing import Dict, List
import tensorflow as tf
import numpy as np
import logging
class FederatedClient:
def __init__(self, client_id: int, config: Dict):
"""Initialize the federated client."""
self.client_id = client_id
self.config = config.get('client', {})
self.model = self._build_model()
def start(self):
"""Start the federated client process."""
logger = logging.getLogger(__name__)
logger.info(f"Client {self.client_id} started")
logger.info(f"Client config: {self.config}")
try:
# Simulate some data
logger.info("Generating training data...")
X, y = self._generate_dummy_data()
logger.info(f"Generated data shapes - X: {X.shape}, y: {y.shape}")
# Train locally
logger.info("Starting local training...")
history = self.train_local((X, y))
# Log training metrics
losses = history.get('loss', [])
logger.info("\nTraining Progress Summary:")
logger.info("-" * 30)
for epoch, loss in enumerate(losses, 1):
logger.info(f"Epoch {epoch:2d}/{len(losses)}: loss = {loss:.4f}")
final_loss = losses[-1]
logger.info(f"\nTraining completed - Final loss: {final_loss:.4f}")
# Log model summary in a simpler format
logger.info("\nModel Architecture:")
logger.info("-" * 30)
logger.info("Layer (Output Shape) -> Params")
total_params = 0
for layer in self.model.layers:
params = layer.count_params()
total_params += params
logger.info(f"{layer.name} {layer.output.shape} -> {params:,} params")
logger.info(f"Total Parameters: {total_params:,}")
except Exception as e:
logger.error(f"Error during client execution: {str(e)}")
raise
def _generate_dummy_data(self):
"""Generate dummy data for testing."""
num_samples = 100
input_dim = 32 # Match with model's input dimension
# Generate input data
X = tf.random.normal((num_samples, input_dim))
# Generate target data (for this example, we'll predict the sum of inputs)
y = tf.reduce_sum(X, axis=1, keepdims=True)
return X, y
def _build_model(self):
"""Build the initial model architecture."""
input_dim = 32 # Match with data generation
model = tf.keras.Sequential([
tf.keras.layers.Input(shape=(input_dim,)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(1) # Output layer for regression
])
model.compile(
optimizer=tf.keras.optimizers.Adam(
learning_rate=self.config.get('training', {}).get('learning_rate', 0.001)
),
loss='mse'
)
return model
def train_local(self, data):
"""Train the model on local data."""
logger = logging.getLogger(__name__)
X, y = data
# Log training parameters
logger.info(f"\nTraining Parameters:")
logger.info("-" * 50)
logger.info(f"Input shape: {X.shape}")
logger.info(f"Output shape: {y.shape}")
logger.info(f"Batch size: {self.config.get('data', {}).get('batch_size', 32)}")
logger.info(f"Epochs: {self.config.get('training', {}).get('local_epochs', 5)}")
logger.info(f"Learning rate: {self.config.get('training', {}).get('learning_rate', 0.001)}")
logger.info("-" * 50)
class LogCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs=None):
logger.info(f"Epoch {epoch + 1} - loss: {logs['loss']:.4f}")
# Enable verbose mode for training
history = self.model.fit(
X, y,
batch_size=self.config.get('data', {}).get('batch_size', 32),
epochs=self.config.get('training', {}).get('local_epochs', 5),
verbose=0, # Disable default verbose output
callbacks=[LogCallback()] # Use our custom callback
)
return history.history
def get_weights(self) -> List:
"""Get the model weights."""
return self.model.get_weights()
def set_weights(self, weights: List):
"""Update local model with global weights."""
self.model.set_weights(weights)
|