MoinulwithAI commited on
Commit
a631cc3
·
verified ·
1 Parent(s): 757a038

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +75 -93
app.py CHANGED
@@ -6,10 +6,12 @@ from torchvision import transforms
6
  from PIL import Image
7
  import xml.etree.ElementTree as ET
8
  import torch.optim as optim
9
- from torch import nn
10
 
11
- # Your model training and evaluation functions (already defined in your previous code)
12
- # Define the custom dataset
 
 
13
  class FaceMaskDataset(Dataset):
14
  def __init__(self, images_dir, annotations_dir, transform=None, resize=(800, 800)):
15
  self.images_dir = images_dir
@@ -26,18 +28,19 @@ class FaceMaskDataset(Dataset):
26
  image = Image.open(image_path).convert("RGB")
27
  image = image.resize(self.resize)
28
 
29
- annotation_path = os.path.join(self.annotations_dir, self.image_files[idx].replace(".jpg", ".xml").replace(".png", ".xml"))
30
-
 
 
31
  if not os.path.exists(annotation_path):
32
- print(f"Warning: Annotation file {annotation_path} does not exist. Skipping image {self.image_files[idx]}.")
33
- return None, None # Return None if annotation is missing
34
-
35
  boxes, labels = self.load_annotations(annotation_path)
36
  if boxes is None or labels is None:
37
- return None, None # Skip if annotations are invalid
38
 
39
  target = {'boxes': boxes, 'labels': labels}
40
-
41
  if self.transform:
42
  image = self.transform(image)
43
 
@@ -57,119 +60,98 @@ class FaceMaskDataset(Dataset):
57
  xmax = float(bndbox.find('xmax').text)
58
  ymax = float(bndbox.find('ymax').text)
59
  boxes.append([xmin, ymin, xmax, ymax])
60
- labels.append(1 if label == "mask" else 0) # "mask" = 1, "no_mask" = 0
61
 
62
- if len(boxes) == 0 or len(labels) == 0:
63
- return None, None # If no boxes or labels, return None
64
 
65
- boxes = torch.as_tensor(boxes, dtype=torch.float32)
66
- labels = torch.tensor(labels, dtype=torch.int64)
67
 
68
- return boxes, labels
 
 
 
 
69
 
70
- # Model Training Loop (referred to from previous code)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
  def train_model(model, train_loader, val_loader, optimizer, num_epochs=10):
72
  for epoch in range(num_epochs):
73
- # Training loop
74
  running_loss = 0.0
75
  model.train()
76
  for images, targets in train_loader:
77
  if images is None or targets is None:
78
- continue # Skip invalid images/annotations
79
-
80
- # Move data to device
81
- images = [image.to(device) for image in images]
82
  targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
83
-
84
  optimizer.zero_grad()
85
  loss_dict = model(images, targets)
86
-
87
- # Calculate total loss
88
  total_loss = sum(loss for loss in loss_dict.values())
89
  total_loss.backward()
90
  optimizer.step()
91
-
92
  running_loss += total_loss.item()
93
 
94
- print(f"Epoch {epoch+1}/{num_epochs}, Loss: {running_loss / len(train_loader)}")
95
-
96
- # Evaluate after every epoch
97
  val_loss = evaluate_model(model, val_loader)
98
- print(f"Validation Loss: {val_loss}")
99
 
100
- # Validation function
101
- def evaluate_model(model, val_loader):
102
- model.eval()
103
- running_loss = 0.0
104
- with torch.no_grad():
105
- for images, targets in val_loader:
106
- if images is None or targets is None:
107
- continue # Skip invalid data
108
 
109
- # Move data to device
110
- images = [image.to(device) for image in images]
111
- targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
 
112
 
113
- loss_dict = model(images, targets)
 
114
 
115
- # Calculate total loss
116
- total_loss = sum(loss for loss in loss_dict.values())
117
- running_loss += total_loss.item()
 
118
 
119
- return running_loss / len(val_loader)
 
 
120
 
121
- # Function to upload dataset and start training
122
- def train_on_uploaded_data(train_data, val_data):
123
- # Save the uploaded dataset (files)
124
- train_data_path = "train_data.zip"
125
- val_data_path = "val_data.zip"
126
-
127
- # Unzip and prepare directories (assuming you upload zip files for simplicity)
128
- with open(train_data.name, 'wb') as f:
129
- f.write(train_data.read())
130
- with open(val_data.name, 'wb') as f:
131
- f.write(val_data.read())
132
-
133
- # Extract zip files
134
- os.system(f"unzip {train_data_path} -d ./train/")
135
- os.system(f"unzip {val_data_path} -d ./val/")
136
-
137
- # Load datasets
138
- train_dataset = FaceMaskDataset(
139
- images_dir="train/images",
140
- annotations_dir="train/annotations",
141
- transform=transforms.Compose([transforms.Resize((224, 224)), transforms.ToTensor()])
142
- )
143
- val_dataset = FaceMaskDataset(
144
- images_dir="val/images",
145
- annotations_dir="val/annotations",
146
- transform=transforms.Compose([transforms.Resize((224, 224)), transforms.ToTensor()])
147
- )
148
-
149
- # Dataloaders
150
- train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True, collate_fn=collate_fn)
151
- val_loader = DataLoader(val_dataset, batch_size=32, shuffle=False, collate_fn=collate_fn)
152
-
153
- # Train the model
154
- model = get_model(num_classes=2) # Assuming you have a model function
155
  model.to(device)
156
  optimizer = optim.SGD(model.parameters(), lr=0.005, momentum=0.9, weight_decay=0.0005)
157
 
158
- # Train the model and return feedback
159
- train_model(model, train_loader, val_loader, optimizer, num_epochs=10)
160
-
161
- return "Training completed and model saved."
162
 
163
- # Create Gradio Interface
164
  iface = gr.Interface(
165
- fn=train_on_uploaded_data,
166
- inputs=[
167
- gr.File(label="Upload Train Dataset (ZIP)"),
168
- gr.File(label="Upload Validation Dataset (ZIP)")
169
- ],
170
- outputs=gr.Textbox(label="Training Status"),
171
- live=True
172
  )
173
 
174
- # Launch Gradio interface
175
  iface.launch()
 
6
  from PIL import Image
7
  import xml.etree.ElementTree as ET
8
  import torch.optim as optim
9
+ import zipfile
10
 
11
+ # Device config
12
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
13
+
14
+ # Custom Dataset
15
  class FaceMaskDataset(Dataset):
16
  def __init__(self, images_dir, annotations_dir, transform=None, resize=(800, 800)):
17
  self.images_dir = images_dir
 
28
  image = Image.open(image_path).convert("RGB")
29
  image = image.resize(self.resize)
30
 
31
+ annotation_path = os.path.join(
32
+ self.annotations_dir,
33
+ self.image_files[idx].replace(".jpg", ".xml").replace(".png", ".xml")
34
+ )
35
  if not os.path.exists(annotation_path):
36
+ print(f"Warning: Annotation file {annotation_path} not found.")
37
+ return None, None
38
+
39
  boxes, labels = self.load_annotations(annotation_path)
40
  if boxes is None or labels is None:
41
+ return None, None
42
 
43
  target = {'boxes': boxes, 'labels': labels}
 
44
  if self.transform:
45
  image = self.transform(image)
46
 
 
60
  xmax = float(bndbox.find('xmax').text)
61
  ymax = float(bndbox.find('ymax').text)
62
  boxes.append([xmin, ymin, xmax, ymax])
63
+ labels.append(1 if label == "mask" else 0)
64
 
65
+ if not boxes or not labels:
66
+ return None, None
67
 
68
+ return torch.as_tensor(boxes, dtype=torch.float32), torch.tensor(labels, dtype=torch.int64)
 
69
 
70
+ # Placeholder collate function
71
+ def collate_fn(batch):
72
+ batch = list(filter(lambda x: x[0] is not None, batch))
73
+ images, targets = zip(*batch)
74
+ return images, targets
75
 
76
+ # Dummy get_model function (replace with real model)
77
+ def get_model(num_classes):
78
+ import torchvision
79
+ model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
80
+ in_features = model.roi_heads.box_predictor.cls_score.in_features
81
+ model.roi_heads.box_predictor = torchvision.models.detection.faster_rcnn.FastRCNNPredictor(in_features, num_classes)
82
+ return model
83
+
84
+ # Validation Function
85
+ def evaluate_model(model, val_loader):
86
+ model.eval()
87
+ running_loss = 0.0
88
+ with torch.no_grad():
89
+ for images, targets in val_loader:
90
+ if images is None or targets is None:
91
+ continue
92
+ images = [img.to(device) for img in images]
93
+ targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
94
+ loss_dict = model(images, targets)
95
+ total_loss = sum(loss for loss in loss_dict.values())
96
+ running_loss += total_loss.item()
97
+ return running_loss / len(val_loader)
98
+
99
+ # Training Function
100
  def train_model(model, train_loader, val_loader, optimizer, num_epochs=10):
101
  for epoch in range(num_epochs):
 
102
  running_loss = 0.0
103
  model.train()
104
  for images, targets in train_loader:
105
  if images is None or targets is None:
106
+ continue
107
+ images = [img.to(device) for img in images]
 
 
108
  targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
 
109
  optimizer.zero_grad()
110
  loss_dict = model(images, targets)
 
 
111
  total_loss = sum(loss for loss in loss_dict.values())
112
  total_loss.backward()
113
  optimizer.step()
 
114
  running_loss += total_loss.item()
115
 
116
+ print(f"[Epoch {epoch+1}] Train Loss: {running_loss / len(train_loader):.4f}")
 
 
117
  val_loss = evaluate_model(model, val_loader)
118
+ print(f"[Epoch {epoch+1}] Validation Loss: {val_loss:.4f}")
119
 
120
+ torch.save(model.state_dict(), "facemask_detector.pth")
 
 
 
 
 
 
 
121
 
122
+ # Main Training Trigger
123
+ def train_from_files_tab():
124
+ train_zip_path = "train.zip"
125
+ val_zip_path = "val.zip"
126
 
127
+ if not os.path.exists(train_zip_path) or not os.path.exists(val_zip_path):
128
+ return "❌ 'train.zip' or 'val.zip' not found in the Files section."
129
 
130
+ # Extract
131
+ for zip_path, folder in [(train_zip_path, "train"), (val_zip_path, "val")]:
132
+ with zipfile.ZipFile(zip_path, 'r') as zip_ref:
133
+ zip_ref.extractall(folder)
134
 
135
+ transform = transforms.Compose([transforms.Resize((224, 224)), transforms.ToTensor()])
136
+ train_dataset = FaceMaskDataset("train/images", "train/annotations", transform=transform)
137
+ val_dataset = FaceMaskDataset("val/images", "val/annotations", transform=transform)
138
 
139
+ train_loader = DataLoader(train_dataset, batch_size=4, shuffle=True, collate_fn=collate_fn)
140
+ val_loader = DataLoader(val_dataset, batch_size=4, shuffle=False, collate_fn=collate_fn)
141
+
142
+ model = get_model(num_classes=2)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
143
  model.to(device)
144
  optimizer = optim.SGD(model.parameters(), lr=0.005, momentum=0.9, weight_decay=0.0005)
145
 
146
+ train_model(model, train_loader, val_loader, optimizer, num_epochs=5)
147
+ return "✅ Training complete. Model saved as 'facemask_detector.pth'."
 
 
148
 
149
+ # Gradio UI
150
  iface = gr.Interface(
151
+ fn=train_from_files_tab,
152
+ inputs=[],
153
+ outputs=gr.Textbox(label="Training Output"),
154
+ title="Face Mask Detector Trainer (from Files Tab)"
 
 
 
155
  )
156
 
 
157
  iface.launch()