admin commited on
Commit
2334ed3
·
1 Parent(s): 5782f7d

try use py to auto create dataset

Browse files
backbone.csv → data/backbone.csv RENAMED
File without changes
vi_backbones.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import requests
4
+ from bs4 import BeautifulSoup
5
+ import pandas as pd
6
+ # import random
7
+ import datasets
8
+
9
+
10
+ _DBNAME = os.path.basename(__file__).split('.')[0]
11
+
12
+ _HOMEPAGE = "https://huggingface.co/datasets/george-chou/" + _DBNAME
13
+
14
+ _URL = 'https://pytorch.org/vision/main/_modules/'
15
+
16
+ _TYPES = pd.read_csv(_HOMEPAGE + '/resolve/main/data/backbone.csv',
17
+ index_col='ver').to_dict()['type']
18
+
19
+ _NAMES = sorted(list(set(_TYPES.values())))
20
+
21
+
22
+ class vi_backbones(datasets.GeneratorBasedBuilder):
23
+
24
+ def _info(self):
25
+ return datasets.DatasetInfo(
26
+ features=datasets.Features(
27
+ {
28
+ "ver": datasets.Value("string"),
29
+ "type": datasets.Value("string"),
30
+ # "type": datasets.features.ClassLabel(names=_NAMES),
31
+ "input_size": datasets.Value("int32"),
32
+ "input_size": datasets.Value("int32"),
33
+ "url": datasets.Value("string"),
34
+ }
35
+ ),
36
+ supervised_keys=("ver", "type"),
37
+ homepage=_HOMEPAGE,
38
+ license="mit"
39
+ )
40
+
41
+ def _parse_url(self, url):
42
+ response = requests.get(url)
43
+ html = response.text
44
+ return BeautifulSoup(html, 'html.parser')
45
+
46
+ def _generate_dataset(self, url):
47
+ torch_page = self._parse_url(url)
48
+ article = torch_page.find('article', {'id': 'pytorch-article'})
49
+ ul = article.find('ul').find('ul')
50
+ dataset = []
51
+ for li in ul.find_all('li'):
52
+ name = str(li.text)
53
+ if name.__contains__('torchvision.models.') and len(name.split('.')) == 3:
54
+ if name.__contains__('_api') or name.__contains__('feature_extraction'):
55
+ continue
56
+ href = li.find('a').get('href')
57
+ model_page = self._parse_url(url + href)
58
+ divs = model_page.select('div.viewcode-block')
59
+ for div in divs:
60
+ div_id = str(div['id'])
61
+ if div_id.__contains__('_Weights'):
62
+ m_ver = div_id.split('_Weight')[0].lower()
63
+ m_type = re.search('[a-zA-Z]+', m_ver).group(0)
64
+ input_size = int(
65
+ div.find('span', {'class': 'mi'}).text)
66
+ m_url = str(div.find('span', {'class': 's2'}).text)
67
+ m_dict = {
68
+ 'ver': m_ver,
69
+ 'type': m_type,
70
+ 'input_size': input_size,
71
+ 'url': m_url
72
+ }
73
+ print('Adding ' + m_ver)
74
+ dataset.append(m_dict)
75
+ return dataset
76
+
77
+ def _split_generators(self, dl_manager):
78
+ dataset = self._generate_dataset(_URL)
79
+
80
+ return [
81
+ datasets.SplitGenerator(
82
+ name="IMAGENET1K_V1",
83
+ gen_kwargs={
84
+ "files": dataset,
85
+ },
86
+ ),
87
+ datasets.SplitGenerator(
88
+ name="IMAGENET1K_V2",
89
+ gen_kwargs={
90
+ "files": dataset,
91
+ },
92
+ ),
93
+ ]
94
+
95
+ def _generate_examples(self, files):
96
+ for i, model in enumerate(files):
97
+ yield i, {
98
+ "ver": model['ver'],
99
+ "type": model['type'],
100
+ "input_size": model['input_size'],
101
+ "output_size": 9216,
102
+ "url": model['url'],
103
+ }