admin commited on
Commit
2f2017c
·
1 Parent(s): 06806f7

Update vi_backbones.py

Browse files
Files changed (1) hide show
  1. vi_backbones.py +13 -43
vi_backbones.py CHANGED
@@ -1,6 +1,5 @@
1
  import os
2
  import re
3
- # import hashlib
4
  import requests
5
  import datasets
6
  from bs4 import BeautifulSoup
@@ -17,12 +16,9 @@ class vi_backbones(datasets.GeneratorBasedBuilder):
17
  features=datasets.Features(
18
  {
19
  "ver": datasets.Value("string"),
20
- "name": datasets.Value("string"),
21
  "type": datasets.Value("string"),
22
  "input_size": datasets.Value("int16"),
23
- # "output_size": datasets.Value("int64"),
24
  "url": datasets.Value("string"),
25
- # "md5": datasets.Value("string"),
26
  }
27
  ),
28
  supervised_keys=("ver", "type"),
@@ -30,27 +26,6 @@ class vi_backbones(datasets.GeneratorBasedBuilder):
30
  license="mit"
31
  )
32
 
33
- # def _get_file_md5(self, url):
34
- # """
35
- # Calculate the MD5 hash value of a file using its URL
36
-
37
- # :param url: the URL address of the file
38
- # :return: the MD5 hash value in string format
39
- # """
40
- # try:
41
- # response = requests.get(url, stream=True)
42
- # if response.status_code == 200:
43
- # md5obj = hashlib.md5()
44
- # for chunk in response.iter_content(chunk_size=1024*1024):
45
- # md5obj.update(chunk)
46
- # return md5obj.hexdigest()
47
- # else:
48
- # raise ValueError(
49
- # f"Error downloading file from {url}. Status code: {response.status_code}")
50
- # except Exception as e:
51
- # raise ValueError(
52
- # f"Error calculating MD5 of file at {url}: {str(e)}")
53
-
54
  def _parse_url(self, url):
55
  response = requests.get(url)
56
  html = response.text
@@ -58,31 +33,25 @@ class vi_backbones(datasets.GeneratorBasedBuilder):
58
 
59
  def _special_type(self, m_ver):
60
  m_type = re.search('[a-zA-Z]+', m_ver).group(0)
61
- m_name = m_ver
62
 
63
  if m_type == 'wide' or m_type == 'resnext':
64
- m_type = 'resnet'
65
 
66
  elif m_type == 'swin':
67
- m_type = 'swin_transformer'
68
 
69
  elif m_type == 'inception':
70
- m_type = 'googlenet'
71
-
72
- pattern = r'_v\d+_'
73
- if re.search(pattern, m_name):
74
- m_name = re.sub(pattern, '_', m_name)
75
 
76
- return m_type, m_name
77
 
78
- def _info_on_dataset(self, m_ver, m_name, m_type, in1k_span):
79
  url_span = in1k_span.find_next_sibling('span', {'class': 's2'})
80
  size_span = url_span.find_next_sibling('span', {'class': 'mi'})
81
  m_url = str(url_span.text[1:-1])
82
  input_size = int(size_span.text)
83
  m_dict = {
84
  'ver': m_ver,
85
- 'name': m_name,
86
  'type': m_type,
87
  'input_size': input_size,
88
  'url': m_url
@@ -99,7 +68,9 @@ class vi_backbones(datasets.GeneratorBasedBuilder):
99
  name = str(li.text)
100
  if name.__contains__('torchvision.models.') and len(name.split('.')) == 3:
101
 
102
- if name.__contains__('_api') or name.__contains__('feature_extraction') or name.__contains__('maxvit'):
 
 
103
  continue
104
 
105
  href = li.find('a').get('href')
@@ -110,7 +81,11 @@ class vi_backbones(datasets.GeneratorBasedBuilder):
110
  div_id = str(div['id'])
111
  if div_id.__contains__('_Weights'):
112
  m_ver = div_id.split('_Weight')[0].lower()
113
- m_type, m_name = self._special_type(m_ver)
 
 
 
 
114
 
115
  in1k_v1_span = div.find(
116
  name='span',
@@ -123,7 +98,6 @@ class vi_backbones(datasets.GeneratorBasedBuilder):
123
 
124
  m_dict, size_span = self._info_on_dataset(
125
  m_ver,
126
- m_name,
127
  m_type,
128
  in1k_v1_span
129
  )
@@ -138,7 +112,6 @@ class vi_backbones(datasets.GeneratorBasedBuilder):
138
  if in1k_v2_span != None:
139
  m_dict, _ = self._info_on_dataset(
140
  m_ver,
141
- m_name,
142
  m_type,
143
  in1k_v2_span
144
  )
@@ -168,10 +141,7 @@ class vi_backbones(datasets.GeneratorBasedBuilder):
168
  for i, model in enumerate(subset):
169
  yield i, {
170
  "ver": model['ver'],
171
- "name": model['name'],
172
  "type": model['type'],
173
  "input_size": model['input_size'],
174
- # "output_size": 1234,
175
  "url": model['url'],
176
- # "md5": self._get_file_md5(model['url']),
177
  }
 
1
  import os
2
  import re
 
3
  import requests
4
  import datasets
5
  from bs4 import BeautifulSoup
 
16
  features=datasets.Features(
17
  {
18
  "ver": datasets.Value("string"),
 
19
  "type": datasets.Value("string"),
20
  "input_size": datasets.Value("int16"),
 
21
  "url": datasets.Value("string"),
 
22
  }
23
  ),
24
  supervised_keys=("ver", "type"),
 
26
  license="mit"
27
  )
28
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  def _parse_url(self, url):
30
  response = requests.get(url)
31
  html = response.text
 
33
 
34
  def _special_type(self, m_ver):
35
  m_type = re.search('[a-zA-Z]+', m_ver).group(0)
 
36
 
37
  if m_type == 'wide' or m_type == 'resnext':
38
+ return 'resnet'
39
 
40
  elif m_type == 'swin':
41
+ return 'swin_transformer'
42
 
43
  elif m_type == 'inception':
44
+ return 'googlenet'
 
 
 
 
45
 
46
+ return m_type
47
 
48
+ def _info_on_dataset(self, m_ver, m_type, in1k_span):
49
  url_span = in1k_span.find_next_sibling('span', {'class': 's2'})
50
  size_span = url_span.find_next_sibling('span', {'class': 'mi'})
51
  m_url = str(url_span.text[1:-1])
52
  input_size = int(size_span.text)
53
  m_dict = {
54
  'ver': m_ver,
 
55
  'type': m_type,
56
  'input_size': input_size,
57
  'url': m_url
 
68
  name = str(li.text)
69
  if name.__contains__('torchvision.models.') and len(name.split('.')) == 3:
70
 
71
+ if name.__contains__('_api') or \
72
+ name.__contains__('feature_extraction') or \
73
+ name.__contains__('maxvit'):
74
  continue
75
 
76
  href = li.find('a').get('href')
 
81
  div_id = str(div['id'])
82
  if div_id.__contains__('_Weights'):
83
  m_ver = div_id.split('_Weight')[0].lower()
84
+
85
+ if m_ver.__contains__('swin_v2_'):
86
+ continue
87
+
88
+ m_type = self._special_type(m_ver)
89
 
90
  in1k_v1_span = div.find(
91
  name='span',
 
98
 
99
  m_dict, size_span = self._info_on_dataset(
100
  m_ver,
 
101
  m_type,
102
  in1k_v1_span
103
  )
 
112
  if in1k_v2_span != None:
113
  m_dict, _ = self._info_on_dataset(
114
  m_ver,
 
115
  m_type,
116
  in1k_v2_span
117
  )
 
141
  for i, model in enumerate(subset):
142
  yield i, {
143
  "ver": model['ver'],
 
144
  "type": model['type'],
145
  "input_size": model['input_size'],
 
146
  "url": model['url'],
 
147
  }