Spaces:
				
			
			
	
			
			
		Running
		
			on 
			
			Zero
	
	
	
			
			
	
	
	
	
		
		
		Running
		
			on 
			
			Zero
	Update app.py
Browse files
    	
        app.py
    CHANGED
    
    | @@ -8,7 +8,6 @@ from torchvision.ops import nms, box_iou | |
| 8 | 
             
            import torch.nn.functional as F
         | 
| 9 | 
             
            from torchvision import transforms
         | 
| 10 | 
             
            from PIL import Image, ImageDraw, ImageFont, ImageFilter
         | 
| 11 | 
            -
            from sklearn.cluster import KMeans
         | 
| 12 | 
             
            from data_manager import get_dog_description
         | 
| 13 | 
             
            from urllib.parse import quote
         | 
| 14 | 
             
            from ultralytics import YOLO
         | 
| @@ -168,39 +167,10 @@ async def predict_single_dog(image): | |
| 168 | 
             
                return top1_prob, topk_breeds, topk_probs_percent
         | 
| 169 |  | 
| 170 |  | 
| 171 | 
            -
             | 
| 172 | 
            -
            #     results = model_yolo(image, conf=conf_threshold, iou=iou_threshold)[0]
         | 
| 173 | 
            -
            #     dogs = []
         | 
| 174 | 
            -
            #     boxes = []
         | 
| 175 | 
            -
            #     for box in results.boxes:
         | 
| 176 | 
            -
            #         if box.cls == 16:  # COCO dataset class for dog is 16
         | 
| 177 | 
            -
            #             xyxy = box.xyxy[0].tolist()
         | 
| 178 | 
            -
            #             confidence = box.conf.item()
         | 
| 179 | 
            -
            #             boxes.append((xyxy, confidence))
         | 
| 180 | 
            -
                
         | 
| 181 | 
            -
            #     if not boxes:
         | 
| 182 | 
            -
            #         dogs.append((image, 1.0, [0, 0, image.width, image.height]))
         | 
| 183 | 
            -
            #     else:
         | 
| 184 | 
            -
            #         nms_boxes = non_max_suppression(boxes, iou_threshold)
         | 
| 185 | 
            -
                    
         | 
| 186 | 
            -
            #         for box, confidence in nms_boxes:
         | 
| 187 | 
            -
            #             x1, y1, x2, y2 = box
         | 
| 188 | 
            -
            #             w, h = x2 - x1, y2 - y1
         | 
| 189 | 
            -
            #             x1 = max(0, x1 - w * 0.05)
         | 
| 190 | 
            -
            #             y1 = max(0, y1 - h * 0.05)
         | 
| 191 | 
            -
            #             x2 = min(image.width, x2 + w * 0.05)
         | 
| 192 | 
            -
            #             y2 = min(image.height, y2 + h * 0.05)
         | 
| 193 | 
            -
            #             cropped_image = image.crop((x1, y1, x2, y2))
         | 
| 194 | 
            -
            #             dogs.append((cropped_image, confidence, [x1, y1, x2, y2]))
         | 
| 195 | 
            -
                
         | 
| 196 | 
            -
            #     return dogs
         | 
| 197 | 
            -
             | 
| 198 | 
            -
             | 
| 199 | 
            -
            async def detect_multiple_dogs(image, conf_threshold=0.35, iou_threshold=0.5):
         | 
| 200 | 
             
                results = model_yolo(image, conf=conf_threshold, iou=iou_threshold)[0]
         | 
| 201 | 
             
                dogs = []
         | 
| 202 | 
             
                boxes = []
         | 
| 203 | 
            -
                
         | 
| 204 | 
             
                for box in results.boxes:
         | 
| 205 | 
             
                    if box.cls == 16:  # COCO dataset class for dog is 16
         | 
| 206 | 
             
                        xyxy = box.xyxy[0].tolist()
         | 
| @@ -213,37 +183,66 @@ async def detect_multiple_dogs(image, conf_threshold=0.35, iou_threshold=0.5): | |
| 213 | 
             
                    nms_boxes = non_max_suppression(boxes, iou_threshold)
         | 
| 214 |  | 
| 215 | 
             
                    for box, confidence in nms_boxes:
         | 
| 216 | 
            -
                        x1, y1, x2, y2 =  | 
|  | |
|  | |
|  | |
|  | |
|  | |
| 217 | 
             
                        cropped_image = image.crop((x1, y1, x2, y2))
         | 
| 218 | 
             
                        dogs.append((cropped_image, confidence, [x1, y1, x2, y2]))
         | 
| 219 |  | 
| 220 | 
            -
                # 應用過濾器來移除可能的錯誤檢測
         | 
| 221 | 
            -
                dogs = filter_detections(dogs, (image.width, image.height))
         | 
| 222 | 
            -
                
         | 
| 223 | 
             
                return dogs
         | 
| 224 |  | 
| 225 | 
            -
            def filter_detections(dogs, image_size):
         | 
| 226 | 
            -
                filtered_dogs = []
         | 
| 227 | 
            -
                image_area = image_size[0] * image_size[1]
         | 
| 228 | 
            -
                num_dogs = len(dogs)
         | 
| 229 |  | 
| 230 | 
            -
             | 
| 231 | 
            -
             | 
| 232 | 
            -
             | 
| 233 | 
            -
             | 
| 234 | 
            -
             | 
| 235 | 
            -
             | 
| 236 | 
            -
             | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 237 |  | 
| 238 | 
            -
             | 
| 239 | 
            -
             | 
| 240 | 
            -
             | 
| 241 | 
            -
             | 
| 242 |  | 
| 243 | 
            -
             | 
| 244 | 
            -
             | 
| 245 |  | 
| 246 | 
            -
             | 
| 247 |  | 
| 248 |  | 
| 249 | 
             
            def non_max_suppression(boxes, iou_threshold):
         | 
|  | |
| 8 | 
             
            import torch.nn.functional as F
         | 
| 9 | 
             
            from torchvision import transforms
         | 
| 10 | 
             
            from PIL import Image, ImageDraw, ImageFont, ImageFilter
         | 
|  | |
| 11 | 
             
            from data_manager import get_dog_description
         | 
| 12 | 
             
            from urllib.parse import quote
         | 
| 13 | 
             
            from ultralytics import YOLO
         | 
|  | |
| 167 | 
             
                return top1_prob, topk_breeds, topk_probs_percent
         | 
| 168 |  | 
| 169 |  | 
| 170 | 
            +
            async def detect_multiple_dogs(image, conf_threshold=0.4, iou_threshold=0.55):
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
| 171 | 
             
                results = model_yolo(image, conf=conf_threshold, iou=iou_threshold)[0]
         | 
| 172 | 
             
                dogs = []
         | 
| 173 | 
             
                boxes = []
         | 
|  | |
| 174 | 
             
                for box in results.boxes:
         | 
| 175 | 
             
                    if box.cls == 16:  # COCO dataset class for dog is 16
         | 
| 176 | 
             
                        xyxy = box.xyxy[0].tolist()
         | 
|  | |
| 183 | 
             
                    nms_boxes = non_max_suppression(boxes, iou_threshold)
         | 
| 184 |  | 
| 185 | 
             
                    for box, confidence in nms_boxes:
         | 
| 186 | 
            +
                        x1, y1, x2, y2 = box
         | 
| 187 | 
            +
                        w, h = x2 - x1, y2 - y1
         | 
| 188 | 
            +
                        x1 = max(0, x1 - w * 0.05)
         | 
| 189 | 
            +
                        y1 = max(0, y1 - h * 0.05)
         | 
| 190 | 
            +
                        x2 = min(image.width, x2 + w * 0.05)
         | 
| 191 | 
            +
                        y2 = min(image.height, y2 + h * 0.05)
         | 
| 192 | 
             
                        cropped_image = image.crop((x1, y1, x2, y2))
         | 
| 193 | 
             
                        dogs.append((cropped_image, confidence, [x1, y1, x2, y2]))
         | 
| 194 |  | 
|  | |
|  | |
|  | |
| 195 | 
             
                return dogs
         | 
| 196 |  | 
|  | |
|  | |
|  | |
|  | |
| 197 |  | 
| 198 | 
            +
            # async def detect_multiple_dogs(image, conf_threshold=0.35, iou_threshold=0.5):
         | 
| 199 | 
            +
            #     results = model_yolo(image, conf=conf_threshold, iou=iou_threshold)[0]
         | 
| 200 | 
            +
            #     dogs = []
         | 
| 201 | 
            +
            #     boxes = []
         | 
| 202 | 
            +
                
         | 
| 203 | 
            +
            #     for box in results.boxes:
         | 
| 204 | 
            +
            #         if box.cls == 16:  # COCO dataset class for dog is 16
         | 
| 205 | 
            +
            #             xyxy = box.xyxy[0].tolist()
         | 
| 206 | 
            +
            #             confidence = box.conf.item()
         | 
| 207 | 
            +
            #             boxes.append((xyxy, confidence))
         | 
| 208 | 
            +
                
         | 
| 209 | 
            +
            #     if not boxes:
         | 
| 210 | 
            +
            #         dogs.append((image, 1.0, [0, 0, image.width, image.height]))
         | 
| 211 | 
            +
            #     else:
         | 
| 212 | 
            +
            #         nms_boxes = non_max_suppression(boxes, iou_threshold)
         | 
| 213 | 
            +
                    
         | 
| 214 | 
            +
            #         for box, confidence in nms_boxes:
         | 
| 215 | 
            +
            #             x1, y1, x2, y2 = [int(coord) for coord in box]
         | 
| 216 | 
            +
            #             cropped_image = image.crop((x1, y1, x2, y2))
         | 
| 217 | 
            +
            #             dogs.append((cropped_image, confidence, [x1, y1, x2, y2]))
         | 
| 218 | 
            +
                
         | 
| 219 | 
            +
            #     # 應用過濾器來移除可能的錯誤檢測
         | 
| 220 | 
            +
            #     dogs = filter_detections(dogs, (image.width, image.height))
         | 
| 221 | 
            +
                
         | 
| 222 | 
            +
            #     return dogs
         | 
| 223 | 
            +
             | 
| 224 | 
            +
            # def filter_detections(dogs, image_size):
         | 
| 225 | 
            +
            #     filtered_dogs = []
         | 
| 226 | 
            +
            #     image_area = image_size[0] * image_size[1]
         | 
| 227 | 
            +
            #     num_dogs = len(dogs)
         | 
| 228 | 
            +
             | 
| 229 | 
            +
            #     # 根據檢測到的狗的數量動態調整閾值
         | 
| 230 | 
            +
            #     if num_dogs > 5:
         | 
| 231 | 
            +
            #         min_ratio, max_ratio = 0.003, 0.5
         | 
| 232 | 
            +
            #     elif num_dogs > 2:
         | 
| 233 | 
            +
            #         min_ratio, max_ratio = 0.005, 0.6
         | 
| 234 | 
            +
            #     else:
         | 
| 235 | 
            +
            #         min_ratio, max_ratio = 0.01, 0.7
         | 
| 236 |  | 
| 237 | 
            +
            #     for dog in dogs:
         | 
| 238 | 
            +
            #         _, confidence, box = dog
         | 
| 239 | 
            +
            #         dog_area = (box[2] - box[0]) * (box[3] - box[1])
         | 
| 240 | 
            +
            #         area_ratio = dog_area / image_area
         | 
| 241 |  | 
| 242 | 
            +
            #         if min_ratio < area_ratio < max_ratio:
         | 
| 243 | 
            +
            #             filtered_dogs.append(dog)
         | 
| 244 |  | 
| 245 | 
            +
            #     return filtered_dogs
         | 
| 246 |  | 
| 247 |  | 
| 248 | 
             
            def non_max_suppression(boxes, iou_threshold):
         | 
