MoinulwithAI commited on
Commit
c4bd279
·
verified ·
1 Parent(s): a51d4e2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +114 -98
app.py CHANGED
@@ -1,124 +1,140 @@
 
1
  import os
2
  import zipfile
3
- from PIL import Image
4
  import torch
5
- import torch.nn as nn
6
- from torchvision import transforms, models
7
  from torch.utils.data import Dataset, DataLoader
8
- import gradio as gr
 
 
 
 
9
 
10
- # ----------- SETUP -----------
11
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
12
- print("Using device:", device)
13
-
14
- # ----------- UNZIP DATA -----------
15
-
16
- def unzip_file(zip_path, extract_to):
17
- if not os.path.exists(extract_to):
18
- os.makedirs(extract_to)
19
- with zipfile.ZipFile(zip_path, 'r') as zip_ref:
20
- zip_ref.extractall(extract_to)
21
- print(f"Extracted {zip_path} to {extract_to}")
22
-
23
- unzip_file("train.zip", "./data/train")
24
- unzip_file("val.zip", "./data/val")
25
-
26
- # ----------- DATASET -----------
27
 
 
28
  class FaceMaskDataset(Dataset):
29
- def __init__(self, root_dir, transform=None):
30
- self.image_paths = []
31
- self.labels = []
32
  self.transform = transform
33
- for label_name in ['mask', 'no_mask']:
34
- class_path = os.path.join(root_dir, label_name)
35
- for img_name in os.listdir(class_path):
36
- if img_name.endswith(".jpg") or img_name.endswith(".png"):
37
- self.image_paths.append(os.path.join(class_path, img_name))
38
- self.labels.append(0 if label_name == 'mask' else 1)
39
 
40
  def __len__(self):
41
- return len(self.image_paths)
42
 
43
  def __getitem__(self, idx):
44
- image = Image.open(self.image_paths[idx]).convert("RGB")
45
- if self.transform:
46
- image = self.transform(image)
47
- return image, self.labels[idx]
48
-
49
- transform = transforms.Compose([
50
- transforms.Resize((224, 224)),
51
- transforms.ToTensor(),
52
- ])
53
-
54
- train_dataset = FaceMaskDataset("./data/train", transform)
55
- val_dataset = FaceMaskDataset("./data/val", transform)
56
- train_loader = DataLoader(train_dataset, batch_size=16, shuffle=True)
57
- val_loader = DataLoader(val_dataset, batch_size=16)
58
 
59
- # ----------- MODEL -----------
 
 
60
 
61
- model = models.mobilenet_v2(pretrained=True)
62
- model.classifier[1] = nn.Linear(model.last_channel, 2)
63
- model = model.to(device)
64
 
65
- criterion = nn.CrossEntropyLoss()
66
- optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
67
 
68
- # ----------- TRAINING -----------
 
69
 
70
- def train_model(model, epochs=2): # keep epochs small for HF Spaces
71
- for epoch in range(epochs):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
  model.train()
73
- running_loss = 0.0
74
- for imgs, labels in train_loader:
75
- imgs, labels = imgs.to(device), labels.to(device)
 
 
76
  optimizer.zero_grad()
77
- outputs = model(imgs)
78
- loss = criterion(outputs, labels)
79
  loss.backward()
80
  optimizer.step()
81
- running_loss += loss.item()
82
-
83
- print(f"Epoch {epoch+1}, Loss: {running_loss/len(train_loader):.4f}")
84
-
85
- # Validation Accuracy
86
- correct = 0
87
- total = 0
88
- model.eval()
89
- with torch.no_grad():
90
- for imgs, labels in val_loader:
91
- imgs, labels = imgs.to(device), labels.to(device)
92
- outputs = model(imgs)
93
- _, predicted = torch.max(outputs.data, 1)
94
- total += labels.size(0)
95
- correct += (predicted == labels).sum().item()
96
- acc = 100 * correct / total
97
- print(f"Validation Accuracy: {acc:.2f}%")
98
-
99
- train_model(model)
100
- torch.save(model.state_dict(), "face_mask_model.pth")
101
-
102
- # ----------- INFERENCE -----------
103
-
104
- def predict(image):
105
- model.eval()
106
- img = image.convert("RGB")
107
- img = transform(img).unsqueeze(0).to(device)
108
- with torch.no_grad():
109
- outputs = model(img)
110
- _, predicted = torch.max(outputs.data, 1)
111
- return "Mask" if predicted.item() == 0 else "No Mask"
112
-
113
- # ----------- GRADIO APP -----------
114
 
 
 
 
 
 
 
 
 
115
  iface = gr.Interface(
116
- fn=predict,
117
- inputs=gr.Image(source="webcam", tool="editor", type="pil", label="Upload or Webcam"),
118
- outputs=gr.Label(label="Prediction"),
119
- live=True,
120
- title="Face Mask Detection",
121
- description="Upload or use webcam to detect if a person is wearing a face mask."
122
  )
123
 
124
- iface.launch()
 
 
1
+ import gradio as gr
2
  import os
3
  import zipfile
 
4
  import torch
 
 
5
  from torch.utils.data import Dataset, DataLoader
6
+ from torchvision import transforms
7
+ from PIL import Image
8
+ import xml.etree.ElementTree as ET
9
+ import torchvision.models.detection
10
+ from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
11
 
 
12
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
13
+ print(f"Using device: {device}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
+ # Dataset class
16
  class FaceMaskDataset(Dataset):
17
+ def __init__(self, images_dir, annotations_dir, transform=None, resize=(800, 800)):
18
+ self.images_dir = images_dir
19
+ self.annotations_dir = annotations_dir
20
  self.transform = transform
21
+ self.resize = resize
22
+ self.image_files = [f for f in os.listdir(images_dir) if f.endswith(('.jpg', '.png'))]
 
 
 
 
23
 
24
  def __len__(self):
25
+ return len(self.image_files)
26
 
27
  def __getitem__(self, idx):
28
+ image_path = os.path.join(self.images_dir, self.image_files[idx])
29
+ image = Image.open(image_path).convert("RGB")
30
+ image = image.resize(self.resize)
 
 
 
 
 
 
 
 
 
 
 
31
 
32
+ annotation_path = os.path.join(self.annotations_dir, self.image_files[idx].replace(".jpg", ".xml").replace(".png", ".xml"))
33
+ if not os.path.exists(annotation_path):
34
+ return None, None
35
 
36
+ boxes, labels = self.load_annotations(annotation_path)
37
+ if boxes is None or labels is None:
38
+ return None, None
39
 
40
+ target = {'boxes': boxes, 'labels': labels}
 
41
 
42
+ if self.transform:
43
+ image = self.transform(image)
44
 
45
+ return image, target
46
+
47
+ def load_annotations(self, annotation_path):
48
+ tree = ET.parse(annotation_path)
49
+ root = tree.getroot()
50
+
51
+ boxes = []
52
+ labels = []
53
+ for obj in root.iter('object'):
54
+ label = obj.find('name').text
55
+ bndbox = obj.find('bndbox')
56
+ xmin = float(bndbox.find('xmin').text)
57
+ ymin = float(bndbox.find('ymin').text)
58
+ xmax = float(bndbox.find('xmax').text)
59
+ ymax = float(bndbox.find('ymax').text)
60
+ boxes.append([xmin, ymin, xmax, ymax])
61
+ labels.append(1 if label == "mask" else 0)
62
+
63
+ if not boxes or not labels:
64
+ return None, None
65
+
66
+ boxes = torch.tensor(boxes, dtype=torch.float32)
67
+ labels = torch.tensor(labels, dtype=torch.int64)
68
+
69
+ return boxes, labels
70
+
71
+ def collate_fn(batch):
72
+ batch = [b for b in batch if b[0] is not None and b[1] is not None]
73
+ images, targets = zip(*batch)
74
+ return list(images), list(targets)
75
+
76
+ def get_model(num_classes):
77
+ model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
78
+ in_features = model.roi_heads.box_predictor.cls_score.in_features
79
+ model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
80
+ return model
81
+
82
+ def extract_zip(zip_file, extract_to):
83
+ with zipfile.ZipFile(zip_file, 'r') as zip_ref:
84
+ zip_ref.extractall(extract_to)
85
+
86
+ def train_model(train_zip, val_zip):
87
+ extract_zip(train_zip, './data/train')
88
+ extract_zip(val_zip, './data/val')
89
+
90
+ transform = transforms.Compose([transforms.ToTensor()])
91
+
92
+ train_dataset = FaceMaskDataset(
93
+ images_dir='./data/train/train/images',
94
+ annotations_dir='./data/train/train/annotations',
95
+ transform=transform
96
+ )
97
+ val_dataset = FaceMaskDataset(
98
+ images_dir='./data/val/val/images',
99
+ annotations_dir='./data/val/val/annotations',
100
+ transform=transform
101
+ )
102
+
103
+ train_loader = DataLoader(train_dataset, batch_size=4, shuffle=True, collate_fn=collate_fn)
104
+ val_loader = DataLoader(val_dataset, batch_size=4, shuffle=False, collate_fn=collate_fn)
105
+
106
+ model = get_model(num_classes=2).to(device)
107
+ optimizer = torch.optim.SGD(model.parameters(), lr=0.005, momentum=0.9, weight_decay=0.0005)
108
+
109
+ for epoch in range(3): # Reduce for demo
110
  model.train()
111
+ total_loss = 0
112
+ for images, targets in train_loader:
113
+ images = [img.to(device) for img in images]
114
+ targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
115
+
116
  optimizer.zero_grad()
117
+ loss_dict = model(images, targets)
118
+ loss = sum(loss for loss in loss_dict.values())
119
  loss.backward()
120
  optimizer.step()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
 
122
+ total_loss += loss.item()
123
+
124
+ print(f"Epoch {epoch+1}, Loss: {total_loss / len(train_loader)}")
125
+
126
+ torch.save(model.state_dict(), "model.pth")
127
+ return "Training completed. Model saved as model.pth"
128
+
129
+ # Gradio upload interface
130
  iface = gr.Interface(
131
+ fn=train_model,
132
+ inputs=[
133
+ gr.File(label="Upload Train ZIP"),
134
+ gr.File(label="Upload Val ZIP")
135
+ ],
136
+ outputs="text"
137
  )
138
 
139
+ if __name__ == "__main__":
140
+ iface.launch()