File size: 5,060 Bytes
995ef3a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 |
import numpy as np
from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array
from tensorflow.keras.applications import MobileNetV2
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.models import Model
from tensorflow.keras.layers import AveragePooling2D, Dropout, Flatten, Dense, Input
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.regularizers import l2
from tensorflow.keras.callbacks import EarlyStopping
from sklearn.model_selection import train_test_split
import os
from PIL import UnidentifiedImageError
import matplotlib.pyplot as plt
# --- Data Loading and Preprocessing ---
data_dir = "dataset"
categories = ["with_mask", "without_mask"]
data = []
labels = []
print("Loading and preprocessing images for final model training...")
for category in categories:
path = os.path.join(data_dir, category)
for img_name in os.listdir(path):
img_path = os.path.join(path, img_name)
try:
image = load_img(img_path, target_size=(224, 224))
image = img_to_array(image)
image = preprocess_input(image)
data.append(image)
labels.append(0 if category == "with_mask" else 1)
except UnidentifiedImageError:
print(f"Skipped invalid image file: {img_path}")
except Exception as e:
print(f"Error loading image {img_path}: {e}")
print(f"Loaded {len(data)} images.")
data = np.array(data, dtype="float32")
labels = to_categorical(labels)
x_train, x_test, y_train, y_test = train_test_split(data, labels, test_size=0.2, stratify=labels, random_state=42)
print(f"Training samples: {len(x_train)}, Validation samples: {len(x_test)}")
# --- Data Augmentation Configuration ---
aug = ImageDataGenerator(
rotation_range=30,
zoom_range=0.2,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
horizontal_flip=True,
brightness_range=[0.7, 1.3],
channel_shift_range=50,
fill_mode="nearest"
)
# --- Define Optimal Hyperparameters (THESE NEED TO BE FILLED IN MANUALLY) ---
OPTIMAL_UNITS = 128 # <--- Update this value
OPTIMAL_L2_REGULARIZER = 0.0001 # <--- Update this value
OPTIMAL_DROPOUT_RATE = 0.3 # <--- Update this value
OPTIMAL_LEARNING_RATE = 0.0001 # <--- Update this value
# --- Model Building with Optimal Hyperparameters ---
base_model = MobileNetV2(weights="imagenet", include_top=False, input_tensor=Input(shape=(224, 224, 3)))
for layer in base_model.layers[:-20]:
layer.trainable = False
head_model = base_model.output
head_model = AveragePooling2D(pool_size=(7, 7))(head_model)
head_model = Flatten()(head_model)
head_model = Dense(OPTIMAL_UNITS, activation="relu", kernel_regularizer=l2(OPTIMAL_L2_REGULARIZER))(head_model)
head_model = Dropout(OPTIMAL_DROPOUT_RATE)(head_model)
head_model = Dense(2, activation="softmax")(head_model)
model = Model(inputs=base_model.input, outputs=head_model)
model.compile(optimizer=Adam(learning_rate=OPTIMAL_LEARNING_RATE),
loss="categorical_crossentropy",
metrics=["accuracy"])
print("\nModel compiled with optimal hyperparameters. Starting final training...")
# --- Training the Final Model ---
early_stopping_final = EarlyStopping(
monitor='val_loss',
patience=10,
restore_best_weights=True
)
history = model.fit(
aug.flow(x_train, y_train, batch_size=32),
validation_data=(x_test, y_test),
steps_per_epoch=len(x_train) // 32,
epochs=100,
callbacks=[early_stopping_final]
)
print("\nFinal training complete.")
# --- Evaluation and Plotting ---
final_train_loss = history.history['loss'][-1]
final_train_accuracy = history.history['accuracy'][-1]
final_val_loss = history.history['val_loss'][-1]
final_val_accuracy = history.history['val_accuracy'][-1]
print("\nFinal Training Metrics:")
print(f"Training Loss: {final_train_loss:.4f}")
print(f"Training Accuracy: {final_train_accuracy:.4f}")
print("\nFinal Validation Metrics:")
print(f"Validation Loss: {final_val_loss:.4f}")
print(f"Validation Accuracy: {final_val_accuracy:.4f}")
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
plt.plot(history.history['loss'], label='Training Loss')
plt.plot(history.history['val_loss'], label='Validation Loss')
plt.title('Training and Validation Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
plt.grid(True)
plt.subplot(1, 2, 2)
plt.plot(history.history['accuracy'], label='Training Accuracy')
plt.plot(history.history['val_accuracy'], label='Validation Accuracy')
plt.title('Training and Validation Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()
plt.grid(True)
plt.tight_layout()
plt.show()
# --- Save the Final Trained Model ---
model.save("mask_detector_final_model.keras")
print("Final model saved as mask_detector_final_model.keras") |