|
|
|
|
|
import numpy as np
|
|
from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array
|
|
from tensorflow.keras.applications import MobileNetV2
|
|
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
|
|
from tensorflow.keras.models import Model
|
|
from tensorflow.keras.layers import AveragePooling2D, Dropout, Flatten, Dense, Input
|
|
from tensorflow.keras.optimizers import Adam
|
|
from tensorflow.keras.utils import to_categorical
|
|
from tensorflow.keras.regularizers import l2
|
|
from tensorflow.keras.callbacks import EarlyStopping
|
|
from sklearn.model_selection import train_test_split
|
|
import os
|
|
from PIL import UnidentifiedImageError
|
|
import matplotlib.pyplot as plt
|
|
|
|
|
|
data_dir = "dataset"
|
|
categories = ["with_mask", "without_mask"]
|
|
data = []
|
|
labels = []
|
|
|
|
print("Loading and preprocessing images for final model training...")
|
|
for category in categories:
|
|
path = os.path.join(data_dir, category)
|
|
for img_name in os.listdir(path):
|
|
img_path = os.path.join(path, img_name)
|
|
try:
|
|
image = load_img(img_path, target_size=(224, 224))
|
|
image = img_to_array(image)
|
|
image = preprocess_input(image)
|
|
data.append(image)
|
|
labels.append(0 if category == "with_mask" else 1)
|
|
except UnidentifiedImageError:
|
|
print(f"Skipped invalid image file: {img_path}")
|
|
except Exception as e:
|
|
print(f"Error loading image {img_path}: {e}")
|
|
print(f"Loaded {len(data)} images.")
|
|
|
|
|
|
data = np.array(data, dtype="float32")
|
|
labels = to_categorical(labels)
|
|
|
|
x_train, x_test, y_train, y_test = train_test_split(data, labels, test_size=0.2, stratify=labels, random_state=42)
|
|
print(f"Training samples: {len(x_train)}, Validation samples: {len(x_test)}")
|
|
|
|
|
|
aug = ImageDataGenerator(
|
|
rotation_range=30,
|
|
zoom_range=0.2,
|
|
width_shift_range=0.2,
|
|
height_shift_range=0.2,
|
|
shear_range=0.2,
|
|
horizontal_flip=True,
|
|
brightness_range=[0.7, 1.3],
|
|
channel_shift_range=50,
|
|
fill_mode="nearest"
|
|
)
|
|
|
|
|
|
OPTIMAL_UNITS = 128
|
|
OPTIMAL_L2_REGULARIZER = 0.0001
|
|
OPTIMAL_DROPOUT_RATE = 0.3
|
|
OPTIMAL_LEARNING_RATE = 0.0001
|
|
|
|
|
|
base_model = MobileNetV2(weights="imagenet", include_top=False, input_tensor=Input(shape=(224, 224, 3)))
|
|
|
|
for layer in base_model.layers[:-20]:
|
|
layer.trainable = False
|
|
|
|
head_model = base_model.output
|
|
head_model = AveragePooling2D(pool_size=(7, 7))(head_model)
|
|
head_model = Flatten()(head_model)
|
|
|
|
head_model = Dense(OPTIMAL_UNITS, activation="relu", kernel_regularizer=l2(OPTIMAL_L2_REGULARIZER))(head_model)
|
|
head_model = Dropout(OPTIMAL_DROPOUT_RATE)(head_model)
|
|
head_model = Dense(2, activation="softmax")(head_model)
|
|
|
|
model = Model(inputs=base_model.input, outputs=head_model)
|
|
|
|
model.compile(optimizer=Adam(learning_rate=OPTIMAL_LEARNING_RATE),
|
|
loss="categorical_crossentropy",
|
|
metrics=["accuracy"])
|
|
|
|
print("\nModel compiled with optimal hyperparameters. Starting final training...")
|
|
|
|
|
|
|
|
early_stopping_final = EarlyStopping(
|
|
monitor='val_loss',
|
|
patience=10,
|
|
restore_best_weights=True
|
|
)
|
|
|
|
history = model.fit(
|
|
aug.flow(x_train, y_train, batch_size=32),
|
|
validation_data=(x_test, y_test),
|
|
steps_per_epoch=len(x_train) // 32,
|
|
epochs=100,
|
|
callbacks=[early_stopping_final]
|
|
)
|
|
|
|
print("\nFinal training complete.")
|
|
|
|
|
|
final_train_loss = history.history['loss'][-1]
|
|
final_train_accuracy = history.history['accuracy'][-1]
|
|
final_val_loss = history.history['val_loss'][-1]
|
|
final_val_accuracy = history.history['val_accuracy'][-1]
|
|
|
|
print("\nFinal Training Metrics:")
|
|
print(f"Training Loss: {final_train_loss:.4f}")
|
|
print(f"Training Accuracy: {final_train_accuracy:.4f}")
|
|
print("\nFinal Validation Metrics:")
|
|
print(f"Validation Loss: {final_val_loss:.4f}")
|
|
print(f"Validation Accuracy: {final_val_accuracy:.4f}")
|
|
|
|
plt.figure(figsize=(12, 5))
|
|
|
|
plt.subplot(1, 2, 1)
|
|
plt.plot(history.history['loss'], label='Training Loss')
|
|
plt.plot(history.history['val_loss'], label='Validation Loss')
|
|
plt.title('Training and Validation Loss')
|
|
plt.xlabel('Epoch')
|
|
plt.ylabel('Loss')
|
|
plt.legend()
|
|
plt.grid(True)
|
|
|
|
plt.subplot(1, 2, 2)
|
|
plt.plot(history.history['accuracy'], label='Training Accuracy')
|
|
plt.plot(history.history['val_accuracy'], label='Validation Accuracy')
|
|
plt.title('Training and Validation Accuracy')
|
|
plt.xlabel('Epoch')
|
|
plt.ylabel('Accuracy')
|
|
plt.legend()
|
|
plt.grid(True)
|
|
|
|
plt.tight_layout()
|
|
plt.show()
|
|
|
|
|
|
model.save("mask_detector_final_model.keras")
|
|
print("Final model saved as mask_detector_final_model.keras") |