import os import torch import torch.nn as nn import torch.optim as optim import torchvision.transforms as transforms import torchvision.datasets as datasets from torch.utils.data import DataLoader from torchvision import models from tqdm import tqdm # ✅ Progress bar # ✅ Define dataset paths train_dir = "train" test_dir = "dataset/test" # ✅ Optimized Transformations (Smaller image size) transform = transforms.Compose([ transforms.Resize((128, 128)), # Reduce size for faster training transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]) ]) # ✅ Load datasets train_dataset = datasets.ImageFolder(root=train_dir, transform=transform) test_dataset = datasets.ImageFolder(root=test_dir, transform=transform) # ✅ Get class names class_names = train_dataset.classes print(f"Class Names: {class_names}") # ✅ Save class names for later use in `app.py` with open("class_names.txt", "w") as f: for name in class_names: f.write(name + "\n") # ✅ Optimized DataLoaders (Smaller batch size) batch_size = 16 # Reduce batch size for speed train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True) test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False) # ✅ Use a Faster Model (MobileNetV2) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = models.mobilenet_v2(pretrained=True) model.classifier[1] = nn.Linear(model.classifier[1].in_features, len(class_names)) model = model.to(device) # ✅ Define Loss Function & Optimizer criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(model.parameters(), lr=0.001) # ✅ Train the Model with Progress Bar num_epochs = 3 # Reduce epochs for faster training for epoch in range(num_epochs): model.train() running_loss = 0.0 progress_bar = tqdm(train_loader, desc=f"Epoch {epoch+1}/{num_epochs}", leave=False) for images, labels in progress_bar: images, labels = images.to(device), labels.to(device) optimizer.zero_grad() outputs = model(images) loss = criterion(outputs, labels) loss.backward() optimizer.step() running_loss += loss.item() progress_bar.set_postfix(loss=f"{running_loss/len(train_loader):.4f}") print(f"✅ Epoch [{epoch+1}/{num_epochs}] - Loss: {running_loss/len(train_loader):.4f}") # ✅ Save the Trained Model torch.save(model.state_dict(), "plant_disease_model.pth") print("✅ Model training complete and saved as plant_disease_model.pth") # ✅ Generate and Save y_true.pth and y_pred.pth print("🔍 Running model on test dataset to save predictions...") y_true = [] y_pred = [] model.eval() with torch.no_grad(): for images, labels in tqdm(test_loader, desc="Evaluating"): images, labels = images.to(device), labels.to(device) outputs = model(images) preds = torch.argmax(outputs, dim=1) y_true.extend(labels.cpu().numpy()) # Move to CPU before saving y_pred.extend(preds.cpu().numpy()) # ✅ Save the true labels and predictions torch.save(y_true, "y_true.pth") torch.save(y_pred, "y_pred.pth") print("✅ Test labels (y_true.pth) and predictions (y_pred.pth) saved successfully.")