Keshabwi66 commited on
Commit
f958125
·
verified ·
1 Parent(s): 3a5cd6f

Update Self-Correction-Human-Parsing/datasets/simple_extractor_dataset.py

Browse files
Self-Correction-Human-Parsing/datasets/simple_extractor_dataset.py CHANGED
@@ -1,36 +1,18 @@
1
- #!/usr/bin/env python
2
- # -*- encoding: utf-8 -*-
3
-
4
- """
5
- @Author : Peike Li
6
- @Contact : [email protected]
7
- @File : dataset.py
8
- @Time : 8/30/19 9:12 PM
9
- @Desc : Dataset Definition
10
- @License : This source code is licensed under the license found in the
11
- LICENSE file in the root directory of this source tree.
12
- """
13
-
14
- import os
15
  import cv2
16
  import numpy as np
17
-
18
  from torch.utils import data
19
  from utils.transforms import get_affine_transform
20
 
21
-
22
- class SimpleFolderDataset(data.Dataset):
23
- def __init__(self, root, input_size=[512, 512], transform=None):
24
- self.root = root
25
  self.input_size = input_size
26
  self.transform = transform
27
  self.aspect_ratio = input_size[1] * 1.0 / input_size[0]
28
  self.input_size = np.asarray(input_size)
29
 
30
- self.file_list = os.listdir(self.root)
31
-
32
  def __len__(self):
33
- return len(self.file_list)
34
 
35
  def _box2cs(self, box):
36
  x, y, w, h = box[:4]
@@ -48,16 +30,14 @@ class SimpleFolderDataset(data.Dataset):
48
  return center, scale
49
 
50
  def __getitem__(self, index):
51
- img_name = self.file_list[index]
52
- img_path = os.path.join(self.root, img_name)
53
- img = cv2.imread(img_path, cv2.IMREAD_COLOR)
54
  h, w, _ = img.shape
55
 
56
  # Get person center and scale
57
  person_center, s = self._box2cs([0, 0, w - 1, h - 1])
58
  r = 0
59
  trans = get_affine_transform(person_center, s, r, self.input_size)
60
- input = cv2.warpAffine(
61
  img,
62
  trans,
63
  (int(self.input_size[1]), int(self.input_size[0])),
@@ -65,9 +45,9 @@ class SimpleFolderDataset(data.Dataset):
65
  borderMode=cv2.BORDER_CONSTANT,
66
  borderValue=(0, 0, 0))
67
 
68
- input = self.transform(input)
69
  meta = {
70
- 'name': img_name,
71
  'center': person_center,
72
  'height': h,
73
  'width': w,
@@ -75,4 +55,4 @@ class SimpleFolderDataset(data.Dataset):
75
  'rotation': r
76
  }
77
 
78
- return input, meta
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import cv2
2
  import numpy as np
 
3
  from torch.utils import data
4
  from utils.transforms import get_affine_transform
5
 
6
+ class SimpleFileDataset(data.Dataset):
7
+ def __init__(self, img_path, input_size=[512, 512], transform=None):
8
+ self.img_path = img_path # A single file path
 
9
  self.input_size = input_size
10
  self.transform = transform
11
  self.aspect_ratio = input_size[1] * 1.0 / input_size[0]
12
  self.input_size = np.asarray(input_size)
13
 
 
 
14
  def __len__(self):
15
+ return 1 # Only one image, so the length is 1
16
 
17
  def _box2cs(self, box):
18
  x, y, w, h = box[:4]
 
30
  return center, scale
31
 
32
  def __getitem__(self, index):
33
+ img = cv2.imread(self.img_path, cv2.IMREAD_COLOR)
 
 
34
  h, w, _ = img.shape
35
 
36
  # Get person center and scale
37
  person_center, s = self._box2cs([0, 0, w - 1, h - 1])
38
  r = 0
39
  trans = get_affine_transform(person_center, s, r, self.input_size)
40
+ input_img = cv2.warpAffine(
41
  img,
42
  trans,
43
  (int(self.input_size[1]), int(self.input_size[0])),
 
45
  borderMode=cv2.BORDER_CONSTANT,
46
  borderValue=(0, 0, 0))
47
 
48
+ input_img = self.transform(input_img)
49
  meta = {
50
+ 'name': self.img_path,
51
  'center': person_center,
52
  'height': h,
53
  'width': w,
 
55
  'rotation': r
56
  }
57
 
58
+ return input_img, meta