File size: 6,108 Bytes
2334ed3 d24adcc 2334ed3 d1a8dc5 c693f0b d1a8dc5 eb8f7c4 2334ed3 8b3e836 2334ed3 680fb78 72ad8e8 2334ed3 72ad8e8 2334ed3 9d28e90 d24adcc 9d28e90 d24adcc 9d28e90 d24adcc 9d28e90 d24adcc 9d28e90 d24adcc 9d28e90 2334ed3 4b9c3a3 0acb356 8b3e836 0acb356 8b3e836 0acb356 8b3e836 0acb356 8b3e836 0acb356 4b9c3a3 d7c586c 60bf99f d7c586c 8b3e836 d7c586c 4fed6f6 d7c586c 2334ed3 680fb78 d1a8dc5 0ad2fd2 2334ed3 a9b40cc 2334ed3 680fb78 2334ed3 680fb78 6830819 4b9c3a3 a9b40cc 87d37ae a9b40cc 87d37ae 4fed6f6 a9b40cc e2308b4 a9b40cc 6830819 599a4c4 4fed6f6 a9b40cc 87d37ae 6830819 a9b40cc e2308b4 a9b40cc 6830819 680fb78 2334ed3 a7bb4e7 680fb78 2334ed3 a7bb4e7 2334ed3 a7bb4e7 2334ed3 a7bb4e7 2334ed3 8b3e836 2334ed3 72ad8e8 2334ed3 72ad8e8 2334ed3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 |
import os
import re
import hashlib
import requests
import datasets
# import subprocess
from bs4 import BeautifulSoup
# subprocess.call(['pip', 'install', 'torchvision'])
_DBNAME = os.path.basename(__file__).split('.')[0]
_HOMEPAGE = "https://huggingface.co/datasets/george-chou/" + _DBNAME
_URL = 'https://pytorch.org/vision/main/_modules/'
class vi_backbones(datasets.GeneratorBasedBuilder):
def _info(self):
return datasets.DatasetInfo(
features=datasets.Features(
{
"ver": datasets.Value("string"),
"name": datasets.Value("string"),
"type": datasets.Value("string"),
"input_size": datasets.Value("int16"),
"output_size": datasets.Value("int64"),
"url": datasets.Value("string"),
# "md5": datasets.Value("string"),
}
),
supervised_keys=("ver", "type"),
homepage=_HOMEPAGE,
license="mit"
)
def _get_file_md5(self, url):
"""
Calculate the MD5 hash value of a file using its URL
:param url: the URL address of the file
:return: the MD5 hash value in string format
"""
try:
response = requests.get(url, stream=True)
if response.status_code == 200:
md5obj = hashlib.md5()
for chunk in response.iter_content(chunk_size=1024*1024):
md5obj.update(chunk)
return md5obj.hexdigest()
else:
raise ValueError(
f"Error downloading file from {url}. Status code: {response.status_code}")
except Exception as e:
raise ValueError(
f"Error calculating MD5 of file at {url}: {str(e)}")
def _parse_url(self, url):
response = requests.get(url)
html = response.text
return BeautifulSoup(html, 'html.parser')
def _special_type(self, m_ver):
m_type = re.search('[a-zA-Z]+', m_ver).group(0)
m_name = m_ver
if m_type == 'wide' or m_type == 'resnext':
m_type = 'resnet'
elif m_type == 'swin':
m_type = 'swin_transformer'
elif m_type == 'inception':
m_type = 'googlenet'
pattern = r'_v\d+_'
if re.search(pattern, m_name):
m_name = re.sub(pattern, '_', m_name)
return m_type, m_name
def _info_on_dataset(self, m_ver, m_name, m_type, in1k_span):
url_span = in1k_span.find_next_sibling('span', {'class': 's2'})
size_span = url_span.find_next_sibling('span', {'class': 'mi'})
m_url = str(url_span.text[1:-1])
input_size = int(size_span.text)
m_dict = {
'ver': m_ver,
'name': m_name,
'type': m_type,
'input_size': input_size,
'url': m_url
}
return m_dict, size_span
def _generate_dataset(self, url):
torch_page = self._parse_url(url)
article = torch_page.find('article', {'id': 'pytorch-article'})
ul = article.find('ul').find('ul')
in1k_v1, in1k_v2 = [], []
# import torchvision.models as models
for li in ul.find_all('li'):
name = str(li.text)
if name.__contains__('torchvision.models.') and len(name.split('.')) == 3:
if name.__contains__('_api') or name.__contains__('feature_extraction'):
continue
href = li.find('a').get('href')
model_page = self._parse_url(url + href)
divs = model_page.select('div.viewcode-block')
for div in divs:
div_id = str(div['id'])
if div_id.__contains__('_Weights'):
m_ver = div_id.split('_Weight')[0].lower()
m_type, m_name = self._special_type(m_ver)
in1k_v1_span = div.find(
name='span',
attrs={'class': 'n'},
string='IMAGENET1K_V1'
)
if in1k_v1_span == None:
continue
m_dict, size_span = self._info_on_dataset(
m_ver,
m_name,
m_type,
in1k_v1_span
)
in1k_v1.append(m_dict)
in1k_v2_span = size_span.find_next_sibling(
name='span',
attrs={'class': 'n'},
string='IMAGENET1K_V2'
)
if in1k_v2_span != None:
m_dict, _ = self._info_on_dataset(
m_ver,
m_name,
m_type,
in1k_v2_span
)
in1k_v2.append(m_dict)
return in1k_v1, in1k_v2
def _split_generators(self, _):
in1k_v1, in1k_v2 = self._generate_dataset(_URL)
return [
datasets.SplitGenerator(
name="IMAGENET1K_V1",
gen_kwargs={
"subset": in1k_v1,
},
),
datasets.SplitGenerator(
name="IMAGENET1K_V2",
gen_kwargs={
"subset": in1k_v2,
},
),
]
def _generate_examples(self, subset):
for i, model in enumerate(subset):
yield i, {
"ver": model['ver'],
"name": model['name'],
"type": model['type'],
"input_size": model['input_size'],
"output_size": 1234,
"url": model['url'],
# "md5": self._get_file_md5(model['url']),
}
|