Datasets:
mb23
/

Languages:
English
License:
mickylan2367 commited on
Commit
5973fe6
·
1 Parent(s): c146a92

Update README

Browse files
Files changed (1) hide show
  1. README.md +137 -4
README.md CHANGED
@@ -6,7 +6,7 @@ tags:
6
  - music
7
  - spectrogram
8
  size_categories:
9
- - n<1K
10
  ---
11
 
12
  ## Google/MusicCapsをスペクトログラムにしたデータ。
@@ -29,7 +29,7 @@ size_categories:
29
  </tbody>
30
  </table>
31
 
32
- ### データ作った方法
33
 
34
  * コード:https://colab.research.google.com/drive/13m792FEoXszj72viZuBtusYRUL1z6Cu2?usp=sharing
35
  * 参考にしたKaggle Notebook : https://www.kaggle.com/code/osanseviero/musiccaps-explorer
@@ -48,7 +48,140 @@ image = Image.fromarray(np.uint8(D), mode='L') # 'L'は1チャンネルのグ
48
  image.save('spectrogram_{}.png')
49
  ```
50
 
51
- ### ♪復元方法
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  ```python
53
  im = Image.open("pngファイル")
54
  db_ud = np.uint8(np.array(im))
@@ -60,4 +193,4 @@ print(amp.shape)
60
 
61
  y_inv = librosa.griffinlim(amp*200)
62
  display(IPython.display.Audio(y_inv, rate=sr))
63
- ```
 
6
  - music
7
  - spectrogram
8
  size_categories:
9
+ - 10K<n<100K
10
  ---
11
 
12
  ## Google/MusicCapsをスペクトログラムにしたデータ。
 
29
  </tbody>
30
  </table>
31
 
32
+ ### How this dataset was made
33
 
34
  * コード:https://colab.research.google.com/drive/13m792FEoXszj72viZuBtusYRUL1z6Cu2?usp=sharing
35
  * 参考にしたKaggle Notebook : https://www.kaggle.com/code/osanseviero/musiccaps-explorer
 
48
  image.save('spectrogram_{}.png')
49
  ```
50
 
51
+ ## How to use this
52
+ * <font color="red">Subset <b>data 1300-1600</b> and <b>data 3400-3600</b> are not working now, so please get subset_name_list</n>
53
+ those were removed first</font>.
54
+ ### get information about this dataset:
55
+ ```python
56
+ # Extract dataset's information using huggingface API
57
+ import requests
58
+ headers = {"Authorization": f"Bearer {API token}"}
59
+ API_URL = "https://datasets-server.huggingface.co/info?dataset=mb23%2FGraySpectrogram"
60
+ def query():
61
+ response = requests.get(API_URL, headers=headers)
62
+ return response.json()
63
+ data = query()
64
+
65
+ # Make subset name list.
66
+ subset__name_list = list()
67
+ for dic in data["failed"]:
68
+ subset_name_list.append(dic["config"])
69
+ # print(dic["config"])
70
+ subset_name_list = sorted(subset_list, key=natural_keys)
71
+
72
+
73
+ remove_list = [
74
+ "data 1300-1600",
75
+ "data 3400-3600"
76
+ ]
77
+ for remove_dataset in remove_list:
78
+ if remove_dataset in subset_list:
79
+ subset_list.remove(remove_dataset)
80
+ else:
81
+ pass
82
+ subset_list
83
+
84
+ '''
85
+ return subset name list. for example,
86
+ ['data 0-200',
87
+ 'data 200-600',
88
+ 'data 600-1000',
89
+ 'data 1000-1300',
90
+ 'data 1600-2000',
91
+ 'data 2000-2200',
92
+ 'data 2200-2400',
93
+ 'data 2400-2600',
94
+ 'data 2600-2800',
95
+ 'data 3000-3200',
96
+ 'data 3200-3400',
97
+ 'data 3600-3800',
98
+ 'data 3800-4000',
99
+ 'data 4000-4200',
100
+ 'data 4200-4400',
101
+ 'data 4400-4600',
102
+ 'data 4600-4800',
103
+ 'data 4800-5000',
104
+ 'data 5000-5200',
105
+ 'data 5200-5520']
106
+ '''
107
+ ```
108
+
109
+
110
+ ### load dataset and change to dataloader:
111
+ * You can use the code below:
112
+ * <font color="red">...but (;・∀・)I don't know whether this code works efficiently, because I haven't tried this code so far</color>
113
+ ```python
114
+ import datasets
115
+ from datasets import load_dataset, DatasetDict
116
+ from torchvision import transforms
117
+ from torch.utils.data import DataLoader
118
+ BATCH_SIZE = ???
119
+ IMAGE_SIZE = ???
120
+ TRAIN_SIZE = ??? # the number of training data
121
+ TEST_SIZE = ??? # the number of test data
122
+
123
+ def load_datasets():
124
+
125
+ # Define data transforms
126
+ data_transforms = [
127
+ transforms.Resize((IMG_SIZE, IMG_SIZE)),
128
+ transforms.ToTensor(), # Scales data into [0,1]
129
+ transforms.Lambda(lambda t: (t * 2) - 1) # Scale between [-1, 1]
130
+ ]
131
+ data_transform = transforms.Compose(data_transforms)
132
+
133
+ data = load_dataset("mb23/GraySpectrogram", subset_list[0])
134
+ for subset in subset_list:
135
+ # Confirm subset_list doesn't include "remove_list" datasets in the above cell.
136
+ print(subset)
137
+ new_ds = load_dataset("mb23/GraySpectrogram", subset)
138
+ new_dataset_train = datasets.concatenate_datasets([data["train"], new_ds["train"]])
139
+ new_dataset_test = datasets.concatenate_datasets([data["test"], new_ds["test"]])
140
+
141
+ # take place of data[split]
142
+ data["train"] = new_dataset_train
143
+ data["test"] = new_dataset_test
144
+
145
+ # memo:
146
+ # 特徴量上手く抽出する方法が...わからん。これは力づく。
147
+ # 本当はload_dataset()の時点で抽出したかったけど、無理そう
148
+ # リポジトリ作り直してpush_to_hub()したほうがいいかもしれない。
149
+
150
+ new_dataset = dict()
151
+ new_dataset["train"] = Dataset.from_dict({
152
+ "image" : data["train"]["image"],
153
+ "caption" : data["train"]["caption"]
154
+ })
155
+
156
+ new_dataset["test"] = Dataset.from_dict({
157
+ "image" : data["test"]["image"],
158
+ "caption" : data["test"]["caption"]
159
+ })
160
+ data = datasets.DatasetDict(new_dataset)
161
+ train = data["train"]
162
+ test = data["test"]
163
+
164
+ for idx in range(len(train["image"])):
165
+ train["image"][idx] = data_transform(train["image"][idx])
166
+ test["image"][idx] = data_transform(test["image"][idx])
167
+
168
+ train = Dataset.from_dict(train)
169
+ train = train.with_format("torch") # リスト型回避
170
+ test = Dataset.from_dict(train)
171
+ test = test.with_format("torch") # リスト型回避
172
+
173
+ # or
174
+ train_loader = DataLoader(train, batch_size=BATCH_SIZE, shuffle=True, drop_last=True)
175
+ test_loader = DataLoader(test, batch_size=BATCH_SIZE, shuffle=True, drop_last=True)
176
+ return train_loader, test_loader
177
+
178
+ ```
179
+ * then try this?
180
+ ```
181
+ train_loader, test_loader = load_datasets()
182
+ ```
183
+
184
+ ### Recover music(wave form) from sprctrogram
185
  ```python
186
  im = Image.open("pngファイル")
187
  db_ud = np.uint8(np.array(im))
 
193
 
194
  y_inv = librosa.griffinlim(amp*200)
195
  display(IPython.display.Audio(y_inv, rate=sr))
196
+ ```