File size: 4,160 Bytes
2334ed3 f903536 2334ed3 680fb78 e281b75 2334ed3 d7c586c e281b75 d7c586c e281b75 d7c586c e281b75 d7c586c 4fed6f6 d7c586c 2334ed3 680fb78 2334ed3 680fb78 2334ed3 680fb78 2334ed3 680fb78 6830819 4fed6f6 6830819 599a4c4 4fed6f6 6830819 680fb78 2334ed3 680fb78 2334ed3 680fb78 2334ed3 680fb78 2334ed3 e281b75 2334ed3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 |
import os
import re
import requests
import datasets
from bs4 import BeautifulSoup
_DBNAME = os.path.basename(__file__).split('.')[0]
_HOMEPAGE = "https://huggingface.co/datasets/george-chou/" + _DBNAME
_URL = 'https://pytorch.org/vision/main/_modules/'
class vi_backbones(datasets.GeneratorBasedBuilder):
def _info(self):
return datasets.DatasetInfo(
features=datasets.Features(
{
"ver": datasets.Value("string"),
"type": datasets.Value("string"),
"input_size": datasets.Value("int16"),
# "num_params": datasets.Value("int64"),
"url": datasets.Value("string"),
}
),
supervised_keys=("ver", "type"),
homepage=_HOMEPAGE,
license="mit"
)
def _parse_url(self, url):
response = requests.get(url)
html = response.text
return BeautifulSoup(html, 'html.parser')
def _info_on_dataset(self, m_ver, m_type, in1k_span):
url_span = in1k_span.find_next_sibling('span', {'class': 's2'})
size_span = url_span.find_next_sibling('span', {'class': 'mi'})
# params_label_span = size_span.find_next_sibling(
# 'span', string='"num_params"')
# params_span = params_label_span.find_next_sibling(
# 'span', {'class': 'mi'})
m_url = str(url_span.text[1:-2])
input_size = int(size_span.text)
# num_params = int(params_span.text)
m_dict = {
'ver': m_ver,
'type': m_type,
'input_size': input_size,
# 'num_params': num_params,
'url': m_url
}
return m_dict, size_span
def _generate_dataset(self, url):
torch_page = self._parse_url(url)
article = torch_page.find('article', {'id': 'pytorch-article'})
ul = article.find('ul').find('ul')
in1k_v1, in1k_v2 = [], []
for li in ul.find_all('li'):
name = str(li.text)
if name.__contains__('torchvision.models.') and len(name.split('.')) == 3:
if name.__contains__('_api') or name.__contains__('feature_extraction'):
continue
href = li.find('a').get('href')
model_page = self._parse_url(url + href)
divs = model_page.select('div.viewcode-block')
for div in divs:
div_id = str(div['id'])
if div_id.__contains__('_Weights'):
m_ver = div_id.split('_Weight')[0].lower()
m_type = re.search('[a-zA-Z]+', m_ver).group(0)
in1k_v1_span = div.find('span', string='IMAGENET1K_V1')
m_dict, size_span = self._info_on_dataset(
m_ver, m_type, in1k_v1_span)
in1k_v1.append(m_dict)
in1k_v2_span = size_span.find_next_sibling(
'span', string='IMAGENET1K_V2')
if in1k_v2_span != None:
m_dict, _ = self._info_on_dataset(
m_ver, m_type, in1k_v2_span)
in1k_v2.append(m_dict)
return in1k_v1, in1k_v2
def _split_generators(self, dl_manager):
in1k_v1, in1k_v2 = self._generate_dataset(_URL)
return [
datasets.SplitGenerator(
name="IMAGENET1K_V1",
gen_kwargs={
"files": in1k_v1,
},
),
datasets.SplitGenerator(
name="IMAGENET1K_V2",
gen_kwargs={
"files": in1k_v2,
},
),
]
def _generate_examples(self, files):
for i, model in enumerate(files):
yield i, {
"ver": model['ver'],
"type": model['type'],
"input_size": model['input_size'],
# "num_params": model['num_params'],
"url": model['url'],
}
|