python_code
stringlengths
0
229k
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import os from functools import partial from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper, IterDataPipe from .utils import _add_docstring_header, _create_dataset_directory, _wrap_split_argument URL = { "train": "https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v2.0.json", "dev": "https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json", } MD5 = { "train": "62108c273c268d70893182d5cf8df740", "dev": "246adae8b7002f8679c027697b0b7cf8", } NUM_LINES = { "train": 130319, "dev": 11873, } DATASET_NAME = "SQuAD2" def _path_fn(root, path): return os.path.join(root, os.path.basename(path)) class _ParseSQuADQAData(IterDataPipe): def __init__(self, source_datapipe) -> None: self.source_datapipe = source_datapipe def __iter__(self): for _, stream in self.source_datapipe: raw_json_data = stream["data"] for layer1 in raw_json_data: for layer2 in layer1["paragraphs"]: for layer3 in layer2["qas"]: _context, _question = layer2["context"], layer3["question"] _answers = [item["text"] for item in layer3["answers"]] _answer_start = [item["answer_start"] for item in layer3["answers"]] if len(_answers) == 0: _answers = [""] _answer_start = [-1] yield (_context, _question, _answers, _answer_start) @_add_docstring_header(num_lines=NUM_LINES) @_create_dataset_directory(dataset_name=DATASET_NAME) @_wrap_split_argument(("train", "dev")) def SQuAD2(root, split): """Demonstrates use case when more complex processing is needed on data-stream Here we process dictionary returned by standard JSON reader and write custom datapipe to orchestrates data samples for Q&A use-case """ url_dp = IterableWrapper([URL[split]]) # cache data on-disk with sanity check cache_dp = url_dp.on_disk_cache( filepath_fn=partial(_path_fn, root), hash_dict={_path_fn(root, URL[split]): MD5[split]}, hash_type="md5", ) cache_dp = HttpReader(cache_dp).end_caching(mode="wb", same_filepath_fn=True) cache_dp = FileOpener(cache_dp, mode="b") # stack custom data pipe on top of JSON reader to orchestrate data samples for Q&A dataset return _ParseSQuADQAData(cache_dp.parse_json_files())
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from torchdata.datapipes.iter import HttpReader from .utils import _add_docstring_header, _create_dataset_directory, _wrap_split_argument URL = { "train": "https://raw.githubusercontent.com/mhjabreel/CharCnn_Keras/master/data/ag_news_csv/train.csv", "test": "https://raw.githubusercontent.com/mhjabreel/CharCnn_Keras/master/data/ag_news_csv/test.csv", } MD5 = { "train": "b1a00f826fdfbd249f79597b59e1dc12", "test": "d52ea96a97a2d943681189a97654912d", } NUM_LINES = { "train": 120000, "test": 7600, } DATASET_NAME = "AG_NEWS" def _process_tuple(t): return int(t[0]), " ".join(t[1:]) @_add_docstring_header(num_lines=NUM_LINES, num_classes=4) @_create_dataset_directory(dataset_name=DATASET_NAME) @_wrap_split_argument(("train", "test")) def AG_NEWS(root, split): """Demonstrating streaming use case This might be useful when we do not want to cache or download the data. The limitation is that we do not have any checking mechanism or data sanity check. """ # Stack CSV Parser directly on top of web-stream return HttpReader([URL[split]]).parse_csv().map(_process_tuple)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn import torchtext import torchtext.functional as F import torchtext.transforms as T from torch.hub import load_state_dict_from_url from torch.optim import AdamW from torchdata.dataloader2 import DataLoader2 from torchtext.datasets import SST2 LEARNING_RATE = 1e-5 PADDING_IDX = 1 BOS_IDX = 0 EOS_IDX = 2 MAX_SEQ_LEN = 256 XLMR_VOCAB_PATH = r"https://download.pytorch.org/models/text/xlmr.vocab.pt" XLMR_SPM_MODEL_PATH = r"https://download.pytorch.org/models/text/xlmr.sentencepiece.bpe.model" DEVICE = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") text_transform = T.Sequential( T.SentencePieceTokenizer(XLMR_SPM_MODEL_PATH), T.VocabTransform(load_state_dict_from_url(XLMR_VOCAB_PATH)), T.Truncate(MAX_SEQ_LEN - 2), T.AddToken(token=BOS_IDX, begin=True), T.AddToken(token=EOS_IDX, begin=False), ) NUM_EPOCHS = 1 BATCH_SIZE = 8 NUM_CLASSES = 2 INPUT_DIM = 768 def apply_transform(x): return text_transform(x[0]), x[1] def train_step(input: torch.Tensor, target: torch.Tensor) -> None: output = model(input) loss = criteria(output, target) optim.zero_grad() loss.backward() optim.step() def eval_step(input: torch.Tensor, target: torch.Tensor) -> None: output = model(input) loss = criteria(output, target).item() return float(loss), (output.argmax(1) == target).type(torch.float).sum().item() def evaluate() -> None: model.eval() total_loss = 0 correct_predictions = 0 total_predictions = 0 counter = 0 with torch.no_grad(): for batch in eval_dataloader: input = F.to_tensor(batch["token_ids"], padding_value=PADDING_IDX).to(DEVICE) target = torch.tensor(batch["target"]).to(DEVICE) loss, predictions = eval_step(input, target) total_loss += loss correct_predictions += predictions total_predictions += len(target) counter += 1 return total_loss / counter, correct_predictions / total_predictions if __name__ == "__main__": train_datapipe = SST2(split="train") eval_datapipe = SST2(split="dev") train_datapipe = train_datapipe.map(apply_transform) train_datapipe = train_datapipe.batch(BATCH_SIZE) train_datapipe = train_datapipe.rows2columnar(["token_ids", "target"]) train_dataloader = DataLoader2(datapipe=train_datapipe) print("Created train dataloader") eval_datapipe = eval_datapipe.map(apply_transform) eval_datapipe = eval_datapipe.batch(BATCH_SIZE) eval_datapipe = eval_datapipe.rows2columnar(["token_ids", "target"]) eval_dataloader = DataLoader2(datapipe=eval_datapipe) print("Created eval dataloader") classifier_head = torchtext.models.RobertaClassificationHead(num_classes=NUM_CLASSES, input_dim=INPUT_DIM) model = torchtext.models.XLMR_BASE_ENCODER.get_model(head=classifier_head) model.to(DEVICE) optim = AdamW(model.parameters(), lr=LEARNING_RATE) criteria = nn.CrossEntropyLoss() for epoch in range(NUM_EPOCHS): for step, batch in enumerate(train_dataloader): input = F.to_tensor(batch["token_ids"], padding_value=PADDING_IDX).to(DEVICE) target = torch.tensor(batch["target"]).to(DEVICE) train_step(input, target) # stop early for example purpose if step == 10: break loss, accuracy = evaluate() print(f"Epoch: {epoch}, loss: {loss}, accuracy: {accuracy}") print("Finished Training")
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import torch from torchdata.dataloader2 import DataLoader2 from torchdata.datapipes.iter import IterableWrapper class ToyModel(torch.nn.Module): def __init__(self) -> None: """ In the model constructor, we instantiate four parameters and use them as member parameters. """ super().__init__() self.a = torch.nn.Parameter(torch.randn(())) self.b = torch.nn.Parameter(torch.randn(())) self.c = torch.nn.Parameter(torch.randn(())) self.d = torch.nn.Parameter(torch.randn(())) def forward(self, x: torch.Tensor) -> torch.Tensor: """ Simple model forward function """ return self.a + self.b * x + self.c * x ** 2 + self.d * x ** 3 if __name__ == "__main__": model = ToyModel() train_features = IterableWrapper([torch.rand(3) for _ in range(20000)]) train_labels = IterableWrapper([torch.rand(3) for _ in range(20000)]) train_data_pipe = train_features.zip(train_labels).shuffle() # DataLoader2 wraps an iterable around the Datapipe to enable easy access to # the features and labels. data_loader = DataLoader2(datapipe=train_data_pipe) # Construct the loss function and the optimizer. criterion = torch.nn.MSELoss(reduction="sum") optimizer = torch.optim.SGD(model.parameters(), lr=1e-6) # Loop over the dataset multiple times. Here we are doing only 3 training # epochs - that is, three passes over the training datapipes. for epoch in range(3): # Set manual seed per epoch to control the randomness for shuffle. torch.manual_seed(epoch) running_loss = 0.0 for step, data in enumerate(data_loader): # Obtain the inputs and labels from data. train_feature, train_label = data # Zero the parameter gradients. optimizer.zero_grad() # Train step: forward + backward + optimize. predicted_outputs = model(train_feature) loss = criterion(predicted_outputs, train_label) loss.backward() optimizer.step() # Calculate the statistics. running_loss += loss.item() # Print the loss every 2000 mini-batches. if step % 2000 == 1999: print("[epoch: %d, %5d] loss: %.3f" % (epoch + 1, step + 1, running_loss / 2000)) running_loss = 0.0 print("Finished Training")
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import os import torch import torch.distributed as dist from torch import nn from torchdata.dataloader2 import DataLoader2, DistributedReadingService from torchdata.datapipes.iter import IterableWrapper class ToyModel(nn.Module): def __init__(self) -> None: """ In the model constructor, we instantiate four parameters and use them as member parameters. """ super().__init__() self.a = nn.Parameter(torch.randn(())) self.b = nn.Parameter(torch.randn(())) self.c = nn.Parameter(torch.randn(())) self.d = nn.Parameter(torch.randn(())) def forward(self, x: torch.Tensor) -> torch.Tensor: """ Simple model forward function """ return self.a + self.b * x + self.c * x ** 2 + self.d * x ** 3 if __name__ == "__main__": model = ToyModel() os.environ["RANK"] = str(0) os.environ["WORLD_SIZE"] = str(2) os.environ["MASTER_ADDR"] = "localhost" os.environ["MASTER_PORT"] = "0" dist.init_process_group("gloo") # Use a prime number to make sure uneven data sharding and let # DistributedReadingService prevent hanging with the unbalanced data shard data_length = 19997 train_features = IterableWrapper([torch.rand(3) for _ in range(data_length)]) train_labels = IterableWrapper([torch.rand(3) for _ in range(data_length)]) # sharding_filter will automatically shard the data based on the # distributed ranks train_data_pipe = train_features.zip(train_labels).shuffle().sharding_filter() # Torch Distributed is required to use DistributedReadingService reading_service = DistributedReadingService() # Create DataLoader2 with DistributedReadingService data_loader2 = DataLoader2( datapipe=train_data_pipe, reading_service=reading_service, ) criterion = torch.nn.MSELoss(reduction="sum") optimizer = torch.optim.SGD(model.parameters(), lr=1e-6) for epoch in range(5): # Set manual seed per epoch to control the randomness for shuffle. torch.manual_seed(epoch) running_loss = 0.0 for step, data in enumerate(data_loader2): train_feature, train_label = data optimizer.zero_grad() predicted_outputs = model(train_feature) loss = criterion(predicted_outputs, train_label) loss.backward() optimizer.step() running_loss += loss.item() if step % 2000 == 1999: print("[epoch: %d, %5d] loss: %.3f" % (epoch + 1, step + 1, running_loss / 2000)) running_loss = 0.0 print("Finished Training") """ Training Output: [epoch: 1, 2000] loss: 0.860 [epoch: 1, 4000] loss: 0.823 [epoch: 1, 6000] loss: 0.809 [epoch: 1, 8000] loss: 0.778 [epoch: 1, 10000] loss: 0.753 [epoch: 1, 12000] loss: 0.756 [epoch: 1, 14000] loss: 0.730 [epoch: 1, 16000] loss: 0.727 [epoch: 1, 18000] loss: 0.704 [epoch: 1, 20000] loss: 0.703 [epoch: 2, 2000] loss: 0.677 [epoch: 2, 4000] loss: 0.649 [epoch: 2, 6000] loss: 0.648 [epoch: 2, 8000] loss: 0.629 [epoch: 2, 10000] loss: 0.623 [epoch: 2, 12000] loss: 0.593 [epoch: 2, 14000] loss: 0.586 [epoch: 2, 16000] loss: 0.584 [epoch: 2, 18000] loss: 0.571 [epoch: 2, 20000] loss: 0.558 [epoch: 3, 2000] loss: 0.537 [epoch: 3, 4000] loss: 0.540 [epoch: 3, 6000] loss: 0.544 [epoch: 3, 8000] loss: 0.512 [epoch: 3, 10000] loss: 0.496 [epoch: 3, 12000] loss: 0.506 [epoch: 3, 14000] loss: 0.486 [epoch: 3, 16000] loss: 0.489 [epoch: 3, 18000] loss: 0.489 [epoch: 3, 20000] loss: 0.456 [epoch: 4, 2000] loss: 0.474 [epoch: 4, 4000] loss: 0.445 [epoch: 4, 6000] loss: 0.442 [epoch: 4, 8000] loss: 0.440 [epoch: 4, 10000] loss: 0.434 [epoch: 4, 12000] loss: 0.421 [epoch: 4, 14000] loss: 0.415 [epoch: 4, 16000] loss: 0.404 [epoch: 4, 18000] loss: 0.427 [epoch: 4, 20000] loss: 0.410 [epoch: 5, 2000] loss: 0.395 [epoch: 5, 4000] loss: 0.393 [epoch: 5, 6000] loss: 0.389 [epoch: 5, 8000] loss: 0.397 [epoch: 5, 10000] loss: 0.375 [epoch: 5, 12000] loss: 0.375 [epoch: 5, 14000] loss: 0.372 [epoch: 5, 16000] loss: 0.365 [epoch: 5, 18000] loss: 0.371 [epoch: 5, 20000] loss: 0.359 Finished Training """
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import torch from torchdata.dataloader2 import DataLoader2, MultiProcessingReadingService from torchdata.datapipes.iter import IterableWrapper class ToyModel(torch.nn.Module): def __init__(self) -> None: """ In the model constructor, we instantiate four parameters and use them as member parameters. """ super().__init__() self.a = torch.nn.Parameter(torch.randn(())) self.b = torch.nn.Parameter(torch.randn(())) self.c = torch.nn.Parameter(torch.randn(())) self.d = torch.nn.Parameter(torch.randn(())) def forward(self, x: torch.Tensor) -> torch.Tensor: """ Simple model forward function """ return self.a + self.b * x + self.c * x ** 2 + self.d * x ** 3 if __name__ == "__main__": model = ToyModel() train_features = IterableWrapper([torch.rand(3) for _ in range(20000)]) train_labels = IterableWrapper([torch.rand(3) for _ in range(20000)]) train_data_pipe = train_features.zip(train_labels).shuffle().sharding_filter() # Create DataLoader2 with MultiProcessingReadingService data_loader = DataLoader2( datapipe=train_data_pipe, reading_service=MultiProcessingReadingService(num_workers=2), ) criterion = torch.nn.MSELoss(reduction="sum") optimizer = torch.optim.SGD(model.parameters(), lr=1e-6) for epoch in range(3): # Set manual seed per epoch to control the randomness for shuffle. torch.manual_seed(epoch) running_loss = 0.0 for step, data in enumerate(data_loader): train_feature, train_label = data optimizer.zero_grad() predicted_outputs = model(train_feature) loss = criterion(predicted_outputs, train_label) loss.backward() optimizer.step() running_loss += loss.item() if step % 2000 == 1999: print("[epoch: %d, %5d] loss: %.3f" % (epoch + 1, step + 1, running_loss / 2000)) running_loss = 0.0 print("Finished Training")
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import argparse import hashlib import os import time from functools import partial from typing import Callable import pandas as pd import psutil from torchdata.dataloader2 import DataLoader2, MultiProcessingReadingService from torchdata.datapipes.iter import IterableWrapper def map_read(t): """ Read stream and close. Used for tar files. Args: t: (path, data_stream) tuple """ data = t[1].read() t[1].close() return t[0], data def map_calculate_md5(t, n_md5): """ Calculate MD5 hash of data for `n_md5` number of times. Increasing the number of md5 calculation will determine CPU usage (this is an approximate for the complexity of data transforms). Args: t: (path, data) tuple n_md5: number of times to compute hash of the data """ path, data = t long_str = "" for _ in range(n_md5): long_str += str(hashlib.md5(data).hexdigest()) result = hashlib.md5(long_str.encode()).hexdigest() size = len(data) return path, str(result), size def check_and_output_speed(prefix: str, create_dp_fn: Callable, n_prefetch: int, n_md5: int, n_workers: int): """ Benchmark the speed of the prefetching setup and prints the results. Args: prefix: String indicating what is being executed create_dp_fn: function that returns a DataPipe n_prefetch: number of batches to prefetch n_md5: number of times to compute hash of the data """ initial_memory_usage = psutil.virtual_memory().used max_memory_usage = initial_memory_usage dp = create_dp_fn() rs_type = "DataLoader2 w/ tar archives" new_rs = MultiProcessingReadingService( num_workers=n_workers, worker_prefetch_cnt=n_prefetch, main_prefetch_cnt=n_prefetch ) dl: DataLoader2 = DataLoader2(dp, reading_service=new_rs) start = time.time() items_len = 0 # Number of items processed total_size = 0 # Number of bytes processed time_to_first = None for _name, _md5, size in dl: if items_len > 10 and time_to_first is None: time_to_first = time.time() - start total_size += size items_len += 1 if psutil.virtual_memory().used > max_memory_usage: max_memory_usage = psutil.virtual_memory().used total = time.time() - start speed = int(items_len / total) # item per sec function_name = create_dp_fn.__name__ io_speed = int(total_size / total / 1024 / 1024) # size MiBs per sec total_size = int(total_size / 1024 / 1024) # total size in MiBs total = int(total) print( f"{prefix} {function_name} and {rs_type} with n_prefetch {n_prefetch} | " f"n_md5 {n_md5} results are: total time {total} sec, with {items_len} items at {speed} files per/sec. " f"{total_size} MiB with io speed at {io_speed} MiBps" ) change_in_memory_usage = (max_memory_usage - initial_memory_usage) / 1024 / 1024 print(f"initial_memory_usage: {initial_memory_usage / 1024 / 1024:0.1f} MiBs") print(f"change_in_memory_usage: {change_in_memory_usage:0.1f} MiBs\n") return ( function_name, rs_type, n_prefetch, total, items_len, speed, total_size, io_speed, int(change_in_memory_usage), ) def append_result( df, workers, n_tar_files, n_md5, fs, iteration, columns, fn_name, rs_type, prefetch, total, items_len, speed, total_size, io_speed, change_in_memory_usage, ): return pd.concat( [ df, pd.DataFrame( data=[ [ workers, fn_name, rs_type, prefetch, n_md5, total, n_tar_files, items_len, total_size, speed, io_speed, fs, iteration, change_in_memory_usage, ] ], columns=columns, ), ] ) def save_result(df, csv_name: str, directory: str = ""): file_path = os.path.join(directory, f"{csv_name}.csv") df.to_csv(file_path, mode="a") # Append result def main(args): def get_datapipe(path, n_items, n_md5, use_source_prefetch, use_s3=False): if use_s3: dp = IterableWrapper([path] * n_items).shuffle().sharding_filter() dp = dp.open_files_by_fsspec(mode="rb", anon=True) if use_source_prefetch: dp = dp.prefetch(5) dp = dp.load_from_tar(mode="r|") else: tar_files = [f"{path}/images{i}.tar" for i in range(n_items)] dp = IterableWrapper(tar_files).shuffle().sharding_filter().open_files(mode="b") if use_source_prefetch: dp = dp.prefetch(5) dp = dp.load_from_tar(mode="r:") dp = dp.map(map_read) dp = dp.map(partial(map_calculate_md5, n_md5=n_md5)) return dp columns = [ "n_workers", "file_type", "RS Type", "n_prefetch", "n_md5", "total_time", "n_tar_files", "n_items", "total_size (MB)", "speed (file/s)", "io_speed (MB/s)", "fs", "iteration", "change_in_memory_usage", ] df = pd.DataFrame(columns=columns) if args.use_s3: print("Loading data from S3...") fs_str = "s3" path = "s3://torchdatabenchmarkdatasets/images0.tar" dp_fn = partial(get_datapipe, path, args.n_tar_files, args.n_md5, args.use_source_prefetch, args.use_s3) dp_fn.__name__ = "S3_Tar" # type: ignore[attr-defined] else: print("Loading data from disk...") fs_str = "Local" path = "/home/ubuntu/source_data/large_images_tars" dp_fn = partial(get_datapipe, path, args.n_tar_files, args.n_md5, args.use_source_prefetch, args.use_s3) dp_fn.__name__ = "Tar" # type: ignore[attr-defined] # print(f"{path = }") for n_workers in [4, 8, 12]: for i in range(1 + args.n_epochs): # 1 warm-up + n runs params = check_and_output_speed( f"[prefetch is True, {n_workers} workers]", dp_fn, n_prefetch=args.n_prefetch, n_md5=args.n_md5, n_workers=n_workers, ) df = append_result(df, n_workers, args.n_tar_files, args.n_md5, fs_str, i, columns, *params) # Save CSV print(df) save_result(df, csv_name=args.output_file) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--n-epochs", default=3, type=int, help="Number of times to benchmark per setup excluding warm up" ) parser.add_argument("--n-tar-files", default=200, type=int, help="Number of tar files (~100MB each)") parser.add_argument("--n-prefetch", default=20, type=int, help="Number of batches to prefetch") parser.add_argument( "--n-md5", default=22, type=int, help="Number of times to compute MD5 hash per file, " "a proxy for transformation complexity " "(Low ~3ms: 22, Med ~7ms: 54, High ~10ms: 77)", ) parser.add_argument("--output-file", default="benchmark_result", type=str, help="output csv file name") parser.add_argument("--use-s3", default=False, action="store_true", help="Load file from S3 instead of local") parser.add_argument("--use-source-prefetch", default=False, action="store_true", help="Use source prefetch") args = parser.parse_args() main(args) # python ~/data/benchmarks/cloud/aws_s3.py --n-tar-files 500 --n-epoch 1 --n-md5 22 && # python ~/data/benchmarks/cloud/aws_s3.py --n-tar-files 500 --n-epoch 1 --n-md5 22 --use-s3 && # python ~/data/benchmarks/cloud/aws_s3.py --n-tar-files 500 --n-epoch 1 --n-md5 54 && # python ~/data/benchmarks/cloud/aws_s3.py --n-tar-files 500 --n-epoch 1 --n-md5 54 --use-s3 && # python ~/data/benchmarks/cloud/aws_s3.py --n-tar-files 500 --n-epoch 1 --n-md5 77 && # python ~/data/benchmarks/cloud/aws_s3.py --n-tar-files 500 --n-epoch 1 --n-md5 77 --use-s3
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import torch from torchvision.transforms import transforms class ClassificationPresetTrain: def __init__( self, *, crop_size, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), hflip_prob=0.5, ): trans = [transforms.RandomResizedCrop(crop_size)] if hflip_prob > 0: trans.append(transforms.RandomHorizontalFlip(hflip_prob)) trans.extend( [ transforms.PILToTensor(), transforms.ConvertImageDtype(torch.float), transforms.Normalize(mean=mean, std=std), ] ) self.transforms = transforms.Compose(trans) def __call__(self, img): return self.transforms(img) class ClassificationPresetEval: def __init__( self, *, crop_size, resize_size=256, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), ): self.transforms = transforms.Compose( [ transforms.Resize(resize_size), transforms.CenterCrop(crop_size), transforms.PILToTensor(), transforms.ConvertImageDtype(torch.float), transforms.Normalize(mean=mean, std=std), ] ) def __call__(self, img): return self.transforms(img)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import datetime import errno import os import time from collections import defaultdict, deque import torch import torch.distributed as dist class SmoothedValue: """Track a series of values and provide access to smoothed values over a window or the global series average. """ def __init__(self, window_size=20, fmt=None): if fmt is None: fmt = "{median:.4f} ({global_avg:.4f})" self.deque = deque(maxlen=window_size) self.total = 0.0 self.count = 0 self.fmt = fmt def update(self, value, n=1): self.deque.append(value) self.count += n self.total += value * n def synchronize_between_processes(self): """ Warning: does not synchronize the deque! """ t = reduce_across_processes([self.count, self.total]) t = t.tolist() self.count = int(t[0]) self.total = t[1] @property def median(self): if not self.deque: return 0 d = torch.tensor(list(self.deque)) return d.median().item() @property def avg(self): if not self.deque: return 0 d = torch.tensor(list(self.deque), dtype=torch.float32) return d.mean().item() @property def global_avg(self): try: return self.total / self.count except ZeroDivisionError: return 0 @property def max(self): if not self.deque: return 0 return max(self.deque) @property def value(self): if not self.deque: return 0 return self.deque[-1] def __str__(self): return self.fmt.format( median=self.median, avg=self.avg, global_avg=self.global_avg, max=self.max, value=self.value ) class MetricLogger: def __init__(self, delimiter="\t"): self.meters = defaultdict(SmoothedValue) self.delimiter = delimiter def update(self, **kwargs): for k, v in kwargs.items(): if isinstance(v, torch.Tensor): v = v.item() assert isinstance(v, (float, int)) self.meters[k].update(v) def __getattr__(self, attr): if attr in self.meters: return self.meters[attr] if attr in self.__dict__: return self.__dict__[attr] raise AttributeError(f"'{type(self).__name__}' object has no attribute '{attr}'") def __str__(self): loss_str = [] for name, meter in self.meters.items(): loss_str.append(f"{name}: {str(meter)}") return self.delimiter.join(loss_str) def synchronize_between_processes(self): for meter in self.meters.values(): meter.synchronize_between_processes() def add_meter(self, name, meter): self.meters[name] = meter def log_every(self, iterable, print_freq, header=None): i = 0 if not header: header = "" start_time = time.time() end = time.time() iter_time = SmoothedValue(fmt="{avg:.4f}") data_time = SmoothedValue(fmt="{avg:.4f}") model_time = SmoothedValue(fmt="{avg:.4f}") space_fmt = ":" + str(len(str(len(iterable)))) + "d" if torch.cuda.is_available(): log_msg = self.delimiter.join( [ header, "[{0" + space_fmt + "}/{1}]", "eta: {eta}", "{meters}", "time: {time}", "data: {data}", "model: {model}", "max mem: {memory:.0f}", ] ) else: log_msg = self.delimiter.join( [header, "[{0" + space_fmt + "}/{1}]", "eta: {eta}", "{meters}", "time: {time}", "data: {data}"] ) MB = 1024.0 * 1024.0 for obj in iterable: dtime = time.time() - end data_time.update(dtime) yield obj ttime = time.time() - end iter_time.update(ttime) model_time.update(ttime - dtime) if i % print_freq == 0: eta_seconds = iter_time.global_avg * (len(iterable) - i) eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) if torch.cuda.is_available(): print( log_msg.format( i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time), model=str(model_time), memory=torch.cuda.max_memory_allocated() / MB, ) ) else: print( log_msg.format( i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time) ) ) i += 1 end = time.time() total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) print(f"{header} Total time: {total_time_str}") def accuracy(output, target, topk=(1,)): """Computes the accuracy over the k top predictions for the specified values of k""" with torch.inference_mode(): maxk = max(topk) batch_size = target.size(0) if target.ndim == 2: target = target.max(dim=1)[1] _, pred = output.topk(maxk, 1, True, True) pred = pred.t() correct = pred.eq(target[None]) res = [] for k in topk: correct_k = correct[:k].flatten().sum(dtype=torch.float32) res.append(correct_k * (100.0 / batch_size)) return res def mkdir(path): try: os.makedirs(path) except OSError as e: if e.errno != errno.EEXIST: raise def setup_for_distributed(is_master): """ This function disables printing when not in master process """ import builtins as __builtin__ builtin_print = __builtin__.print def print(*args, **kwargs): force = kwargs.pop("force", False) if is_master or force: builtin_print(*args, **kwargs) __builtin__.print = print def is_dist_avail_and_initialized(): if not dist.is_available(): return False if not dist.is_initialized(): return False return True def get_world_size(): if not is_dist_avail_and_initialized(): return 1 return dist.get_world_size() def get_rank(): if not is_dist_avail_and_initialized(): return 0 return dist.get_rank() def is_main_process(): return get_rank() == 0 def save_on_master(*args, **kwargs): if is_main_process(): torch.save(*args, **kwargs) def init_distributed_mode(args): if "RANK" in os.environ and "WORLD_SIZE" in os.environ: args.rank = int(os.environ["RANK"]) args.world_size = int(os.environ["WORLD_SIZE"]) args.gpu = int(os.environ["LOCAL_RANK"]) elif "SLURM_PROCID" in os.environ: args.rank = int(os.environ["SLURM_PROCID"]) args.gpu = args.rank % torch.cuda.device_count() elif hasattr(args, "rank"): pass else: print("Not using distributed mode") args.distributed = False return args.distributed = True torch.cuda.set_device(args.gpu) args.dist_backend = "nccl" print(f"| distributed init (rank {args.rank}): {args.dist_url}", flush=True) torch.distributed.init_process_group( backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank ) torch.distributed.barrier() if args.data_loader.lower() != "ffcv": setup_for_distributed(args.rank == 0) def reduce_across_processes(val): if not is_dist_avail_and_initialized(): # nothing to sync, but we still convert to tensor for consistency with the distributed case. return torch.tensor(val) t = torch.tensor(val, device="cuda") dist.barrier() dist.all_reduce(t) return t
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import datetime import os import time import warnings import helpers import presets import torch import torch.utils.data import torchvision import utils from torch import nn from torchdata.dataloader2 import adapter, DataLoader2, MultiProcessingReadingService def train_one_epoch(model, criterion, optimizer, data_loader, device, epoch, args): model.train() metric_logger = utils.MetricLogger(delimiter=" ") metric_logger.add_meter("lr", utils.SmoothedValue(window_size=1, fmt="{value}")) metric_logger.add_meter("img/s", utils.SmoothedValue(window_size=10, fmt="{value}")) header = f"Epoch: [{epoch}]" for i, (image, target) in enumerate(metric_logger.log_every(data_loader, args.print_freq, header)): if args.data_loading_only: continue start_time = time.time() image, target = image.to(device), target.to(device) output = model(image) loss = criterion(output, target) optimizer.zero_grad() loss.backward() optimizer.step() acc1, acc5 = utils.accuracy(output, target, topk=(1, 5)) batch_size = image.shape[0] metric_logger.update(loss=loss.item(), lr=optimizer.param_groups[0]["lr"]) metric_logger.meters["acc1"].update(acc1.item(), n=batch_size) metric_logger.meters["acc5"].update(acc5.item(), n=batch_size) metric_logger.meters["img/s"].update(batch_size / (time.time() - start_time)) def evaluate(model, criterion, data_loader, device, args, print_freq=100, log_suffix=""): model.eval() metric_logger = utils.MetricLogger(delimiter=" ") header = f"Test: {log_suffix}" metric_logger.add_meter("acc1", utils.SmoothedValue()) metric_logger.add_meter("acc5", utils.SmoothedValue()) num_processed_samples = 0 with torch.inference_mode(): for image, target in metric_logger.log_every(data_loader, print_freq, header): if args.data_loading_only: continue image, target = image.to(device), target.to(device) output = model(image) loss = criterion(output, target) acc1, acc5 = utils.accuracy(output, target, topk=(1, 5)) batch_size = image.shape[0] metric_logger.update(loss=loss.item()) metric_logger.meters["acc1"].update(acc1.item(), n=batch_size) metric_logger.meters["acc5"].update(acc5.item(), n=batch_size) num_processed_samples += batch_size # gather the stats from all processes num_processed_samples = utils.reduce_across_processes(num_processed_samples) if ( hasattr(data_loader, "dataset") and hasattr(data_loader.dataset, "__len__") and len(data_loader.dataset) != num_processed_samples and torch.distributed.get_rank() == 0 ): warnings.warn( f"It looks like the dataset has {len(data_loader.dataset)} samples, but {num_processed_samples} " "samples were used for the validation, which might bias the results. " "Try adjusting the batch size and / or the world size. " "Setting the world size to 1 is always a safe bet." ) metric_logger.synchronize_between_processes() print(f"{header} Acc@1 {metric_logger.acc1.global_avg:.3f} Acc@5 {metric_logger.acc5.global_avg:.3f}") return metric_logger.acc1.global_avg def create_data_loaders(args): print(f"file-system = {args.fs}") if args.fs == "fsx": dataset_dir = "/datasets01" elif args.fs == "fsx_isolated": dataset_dir = "/fsx_isolated" elif args.fs == "ontap": dataset_dir = "/datasets01_ontap" elif args.fs == "ontap_isolated": dataset_dir = "/ontap_isolated" else: raise ValueError(f"bad args.fs, got {args.fs}") dataset_dir += "/imagenet_full_size/061417/" train_dir = os.path.join(dataset_dir, "train") val_dir = os.path.join(dataset_dir, "val") val_resize_size, val_crop_size, train_crop_size = args.val_resize_size, args.val_crop_size, args.train_crop_size if args.no_transforms: train_preset = val_preset = helpers.no_transforms else: train_preset = presets.ClassificationPresetTrain(crop_size=train_crop_size) val_preset = presets.ClassificationPresetEval(crop_size=val_crop_size, resize_size=val_resize_size) if args.ds_type == "dp": builder = helpers.make_pre_loaded_dp if args.preload_ds else helpers.make_dp train_dataset = builder(train_dir, transforms=train_preset) val_dataset = builder(val_dir, transforms=val_preset) train_sampler = val_sampler = None train_shuffle = True elif args.ds_type == "iterable": train_dataset = torchvision.datasets.ImageFolder(train_dir, transform=train_preset) train_dataset = helpers.MapStyleToIterable(train_dataset, shuffle=True) val_dataset = torchvision.datasets.ImageFolder(val_dir, transform=val_preset) val_dataset = helpers.MapStyleToIterable(val_dataset, shuffle=False) train_sampler = val_sampler = None train_shuffle = None # but actually True elif args.ds_type == "mapstyle": builder = helpers.PreLoadedMapStyle if args.preload_ds else torchvision.datasets.ImageFolder train_dataset = builder(train_dir, transform=train_preset) val_dataset = builder(val_dir, transform=val_preset) train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, shuffle=True) val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset, shuffle=False) train_shuffle = None # but actually True else: raise ValueError(f"Invalid value for args.ds_type ({args.ds_type})") data_loader_arg = args.data_loader.lower() if data_loader_arg == "v1": train_data_loader = torch.utils.data.DataLoader( train_dataset, batch_size=args.batch_size, shuffle=train_shuffle, sampler=train_sampler, num_workers=args.workers, pin_memory=True, drop_last=True, ) val_data_loader = torch.utils.data.DataLoader( val_dataset, batch_size=args.batch_size, sampler=val_sampler, num_workers=args.workers, pin_memory=True, ) elif data_loader_arg == "v2": if args.ds_type != "dp": raise ValueError("DataLoader2 only works with datapipes.") # Note: we are batching and collating here *after the transforms*, which is consistent with DLV1. # But maybe it would be more efficient to do that before, so that the transforms can work on batches?? train_dataset = train_dataset.batch(args.batch_size, drop_last=True).collate() train_data_loader = DataLoader2( train_dataset, datapipe_adapter_fn=adapter.Shuffle(), reading_service=MultiProcessingReadingService(num_workers=args.workers), ) val_dataset = val_dataset.batch(args.batch_size, drop_last=True).collate() # TODO: Do we need drop_last here? val_data_loader = DataLoader2( val_dataset, reading_service=MultiProcessingReadingService(num_workers=args.workers), ) else: raise ValueError(f"invalid data-loader param. Got {args.data_loader}") return train_data_loader, val_data_loader, train_sampler def main(args): if args.output_dir: utils.mkdir(args.output_dir) utils.init_distributed_mode(args) print("\n".join(f"{k}: {str(v)}" for k, v in sorted(dict(vars(args)).items()))) device = torch.device(args.device) if args.use_deterministic_algorithms: torch.backends.cudnn.benchmark = False torch.use_deterministic_algorithms(True) else: torch.backends.cudnn.benchmark = True train_data_loader, val_data_loader, train_sampler = create_data_loaders(args) num_classes = 1000 # I'm lazy. TODO change this print("Creating model") model = torchvision.models.__dict__[args.model](weights=args.weights, num_classes=num_classes) model.to(device) criterion = nn.CrossEntropyLoss() optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum) lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.lr_step_size, gamma=args.lr_gamma) model_without_ddp = model if args.distributed: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) model_without_ddp = model.module if args.test_only: # We disable the cudnn benchmarking because it can noticeably affect the accuracy torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True evaluate(model, criterion, val_data_loader, device=device, args=args) return print("Start training") start_time = time.time() for epoch in range(args.epochs): if args.distributed and train_sampler is not None: train_sampler.set_epoch(epoch) train_one_epoch(model, criterion, optimizer, train_data_loader, device, epoch, args) lr_scheduler.step() evaluate(model, criterion, val_data_loader, device=device, args=args) if args.output_dir: checkpoint = { "model": model_without_ddp.state_dict(), "optimizer": optimizer.state_dict(), "lr_scheduler": lr_scheduler.state_dict(), "epoch": epoch, "args": args, } utils.save_on_master(checkpoint, os.path.join(args.output_dir, f"model_{epoch}.pth")) utils.save_on_master(checkpoint, os.path.join(args.output_dir, "checkpoint.pth")) if epoch == 0: first_epoch_time = time.time() - start_time total_time = time.time() - start_time print(f"Training time: {datetime.timedelta(seconds=int(total_time))}") print(f"Training time (w/o 1st epoch): {datetime.timedelta(seconds=int(total_time - first_epoch_time))}") def get_args_parser(add_help=True): import argparse parser = argparse.ArgumentParser(description="PyTorch Classification Training", add_help=add_help) parser.add_argument("--fs", default="fsx", type=str) parser.add_argument("--model", default="resnet18", type=str, help="model name") parser.add_argument("--device", default="cuda", type=str, help="device (Use cuda or cpu Default: cuda)") parser.add_argument( "-b", "--batch-size", default=32, type=int, help="images per gpu, the total batch size is $NGPU x batch_size" ) parser.add_argument("--epochs", default=90, type=int, metavar="N", help="number of total epochs to run") parser.add_argument( "-j", "--workers", default=12, type=int, metavar="N", help="number of data loading workers (default: 16)" ) parser.add_argument("--lr", default=0.1, type=float, help="initial learning rate") parser.add_argument("--lr-step-size", default=30, type=int, help="decrease lr every step-size epochs") parser.add_argument("--lr-gamma", default=0.1, type=float, help="decrease lr by a factor of lr-gamma") parser.add_argument("--momentum", default=0.9, type=float, metavar="M", help="momentum") parser.add_argument("--print-freq", default=10, type=int, help="print frequency") parser.add_argument("--output-dir", default=".", type=str, help="path to save outputs") parser.add_argument( "--test-only", dest="test_only", help="Only test the model", action="store_true", ) # distributed training parameters parser.add_argument("--world-size", default=1, type=int, help="number of distributed processes") parser.add_argument("--dist-url", default="env://", type=str, help="url used to set up distributed training") parser.add_argument( "--use-deterministic-algorithms", action="store_true", help="Forces the use of deterministic algorithms only." ) parser.add_argument( "--val-resize-size", default=256, type=int, help="the resize size used for validation (default: 256)" ) parser.add_argument( "--val-crop-size", default=224, type=int, help="the central crop size used for validation (default: 224)" ) parser.add_argument( "--train-crop-size", default=224, type=int, help="the random crop size used for training (default: 224)" ) parser.add_argument("--weights", default=None, type=str, help="the weights enum name to load") parser.add_argument( "--ds-type", default="mapstyle", type=str, help="'dp' or 'iterable' or 'mapstyle' (for regular indexable datasets)", ) parser.add_argument( "--preload-ds", action="store_true", help="whether to use a fake dataset where all images are pre-loaded in RAM and already transformed. " "Mostly useful to benchmark how fast a model training would be without data-loading bottlenecks." "Acc results are irrevant because we don't cache the entire dataset, only a very small fraction of it.", ) parser.add_argument( "--data-loading-only", action="store_true", help="When on, we bypass the model's forward and backward passes. So mostly only the dataloading happens", ) parser.add_argument( "--no-transforms", action="store_true", help="Whether to apply transforms to the images. No transforms means we " "load and decode PIL images as usual, but we don't transform them. Instead we discard them " "and the dataset will produce random tensors instead. We " "need to create random tensors because without transforms, the images would still be PIL images " "and they wouldn't be of the required size." "Obviously, Acc resuts will not be relevant.", ) parser.add_argument( "--data-loader", default="V1", type=str, help="'V1' or 'V2'. V2 only works for datapipes", ) return parser if __name__ == "__main__": args = get_args_parser().parse_args() main(args)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import itertools import os import random from functools import partial from pathlib import Path import torch import torch.distributed as dist import torchvision from PIL import Image from torchdata.datapipes.iter import FileLister, IterDataPipe # TODO: maybe infinite buffer can / is already natively supported by torchdata? INFINITE_BUFFER_SIZE = 1_000_000_000 IMAGENET_TRAIN_LEN = 1_281_167 IMAGENET_TEST_LEN = 50_000 class _LenSetter(IterDataPipe): # TODO: Ideally, we woudn't need this extra class def __init__(self, dp, root): self.dp = dp if "train" in str(root): self.size = IMAGENET_TRAIN_LEN elif "val" in str(root): self.size = IMAGENET_TEST_LEN else: raise ValueError("oops?") def __iter__(self): yield from self.dp def __len__(self): # TODO The // world_size part shouldn't be needed. See https://github.com/pytorch/data/issues/533 return self.size // dist.get_world_size() def _decode(path, root, category_to_int): category = Path(path).relative_to(root).parts[0] image = Image.open(path).convert("RGB") label = category_to_int(category) return image, label def _apply_tranforms(img_and_label, transforms): img, label = img_and_label return transforms(img), label def make_dp(root, transforms): root = Path(root).expanduser().resolve() categories = sorted(entry.name for entry in os.scandir(root) if entry.is_dir()) category_to_int = {category: i for (i, category) in enumerate(categories)} dp = FileLister(str(root), recursive=True, masks=["*.JPEG"]) dp = dp.shuffle(buffer_size=INFINITE_BUFFER_SIZE).set_shuffle(False).sharding_filter() dp = dp.map(partial(_decode, root=root, category_to_int=category_to_int)) dp = dp.map(partial(_apply_tranforms, transforms=transforms)) dp = _LenSetter(dp, root=root) return dp class PreLoadedMapStyle: # All the data is pre-loaded and transformed in __init__, so the DataLoader should be crazy fast. # This is just to assess how fast a model could theoretically be trained if there was no data bottleneck at all. def __init__(self, dir, transform, buffer_size=100): dataset = torchvision.datasets.ImageFolder(dir, transform=transform) self.size = len(dataset) self.samples = [dataset[torch.randint(0, len(dataset), size=(1,)).item()] for i in range(buffer_size)] def __len__(self): return self.size def __getitem__(self, idx): return self.samples[idx % len(self.samples)] class _PreLoadedDP(IterDataPipe): # Same as above, but this is a DataPipe def __init__(self, root, transforms, buffer_size=100): dataset = torchvision.datasets.ImageFolder(root, transform=transforms) self.size = len(dataset) self.samples = [dataset[torch.randint(0, len(dataset), size=(1,)).item()] for i in range(buffer_size)] # Note: the rng might be different across DDP workers so they'll all have different samples. # But we don't care about accuracy here so whatever. def __iter__(self): for idx in range(self.size): yield self.samples[idx % len(self.samples)] def make_pre_loaded_dp(root, transforms): dp = _PreLoadedDP(root=root, transforms=transforms) dp = dp.shuffle(buffer_size=INFINITE_BUFFER_SIZE).set_shuffle(False).sharding_filter() dp = _LenSetter(dp, root=root) return dp class MapStyleToIterable(torch.utils.data.IterableDataset): # This converts a MapStyle dataset into an iterable one. # Not sure this kind of Iterable dataset is actually useful to benchmark. It # was necessary when benchmarking async-io stuff, but not anymore. # If anything, it shows how tricky Iterable datasets are to implement. def __init__(self, dataset, shuffle): self.dataset = dataset self.shuffle = shuffle self.size = len(self.dataset) self.seed = 0 # has to be hard-coded for all DDP workers to have the same shuffling def __len__(self): return self.size // dist.get_world_size() def __iter__(self): worker_info = torch.utils.data.get_worker_info() num_dl_workers = worker_info.num_workers dl_worker_id = worker_info.id num_ddp_workers = dist.get_world_size() ddp_worker_id = dist.get_rank() num_total_workers = num_ddp_workers * num_dl_workers current_worker_id = ddp_worker_id + (num_ddp_workers * dl_worker_id) indices = range(self.size) if self.shuffle: rng = random.Random(self.seed) indices = rng.sample(indices, k=self.size) indices = itertools.islice(indices, current_worker_id, None, num_total_workers) samples = (self.dataset[i] for i in indices) yield from samples # TODO: maybe only generate these when --no-transforms is passed? _RANDOM_IMAGE_TENSORS = [torch.randn(3, 224, 224) for _ in range(300)] def no_transforms(_): # see --no-transforms doc return random.choice(_RANDOM_IMAGE_TENSORS)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. # This file is adpated from PyTorch Core # https://github.com/pytorch/pytorch/blob/master/scripts/release_notes/common.py import json import locale import os import re import subprocess from collections import namedtuple import requests topics = [ "bc_breaking", "deprecations", "new_features", "improvements", "bug_fixes", "performance", "docs", "devs", "Untopiced", ] Features = namedtuple( "Features", [ "title", "body", "pr_number", "files_changed", "labels", ], ) def dict_to_features(dct): return Features( title=dct["title"], body=dct["body"], pr_number=dct["pr_number"], files_changed=dct["files_changed"], labels=dct["labels"], ) def features_to_dict(features): return dict(features._asdict()) def run(command): """Returns (return-code, stdout, stderr)""" p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) output, err = p.communicate() rc = p.returncode enc = locale.getpreferredencoding() output = output.decode(enc) err = err.decode(enc) return rc, output.strip(), err.strip() def commit_body(commit_hash): cmd = f"git log -n 1 --pretty=format:%b {commit_hash}" ret, out, err = run(cmd) return out if ret == 0 else None def commit_title(commit_hash): cmd = f"git log -n 1 --pretty=format:%s {commit_hash}" ret, out, err = run(cmd) return out if ret == 0 else None def commit_files_changed(commit_hash): cmd = f"git diff-tree --no-commit-id --name-only -r {commit_hash}" ret, out, err = run(cmd) return out.split("\n") if ret == 0 else None def parse_pr_number(body, commit_hash, title): regex = r"Pull Request resolved: https://github.com/pytorch/data/pull/([0-9]+)" matches = re.findall(regex, body) if len(matches) == 0: if "revert" not in title.lower() and "updating submodules" not in title.lower(): print(f"[{commit_hash}: {title}] Could not parse PR number, ignoring PR") return None if len(matches) > 1: print(f"[{commit_hash}: {title}] Got two PR numbers, using the first one") return matches[0] return matches[0] def get_ghstack_token(): pattern = "github_oauth = (.*)" with open(os.path.expanduser("~/.ghstackrc"), "r+") as f: config = f.read() matches = re.findall(pattern, config) if len(matches) == 0: raise RuntimeError("Can't find a github oauth token") return matches[0] token = get_ghstack_token() headers = {"Authorization": f"token {token}"} def run_query(query): request = requests.post("https://api.github.com/graphql", json={"query": query}, headers=headers) if request.status_code == 200: return request.json() else: raise Exception(f"Query failed to run by returning code of {request.status_code}. {query}") def gh_labels(pr_number): query = f""" {{ repository(owner: "pytorch", name: "data") {{ pullRequest(number: {pr_number}) {{ labels(first: 10) {{ edges {{ node {{ name }} }} }} }} }} }} """ query = run_query(query) edges = query["data"]["repository"]["pullRequest"]["labels"]["edges"] return [edge["node"]["name"] for edge in edges] def get_features(commit_hash, return_dict=False): title, body, files_changed = ( commit_title(commit_hash), commit_body(commit_hash), commit_files_changed(commit_hash), ) pr_number = parse_pr_number(body, commit_hash, title) labels = [] if pr_number is not None: labels = gh_labels(pr_number) result = Features(title, body, pr_number, files_changed, labels) if return_dict: return features_to_dict(result) return result class CommitDataCache: def __init__(self, path="results/data.json"): self.path = path self.data = {} if os.path.exists(path): self.data = self.read_from_disk() def get(self, commit): if commit not in self.data.keys(): # Fetch and cache the data self.data[commit] = get_features(commit) self.write_to_disk() return self.data[commit] def read_from_disk(self): with open(self.path) as f: data = json.load(f) data = {commit: dict_to_features(dct) for commit, dct in data.items()} return data def write_to_disk(self): data = {commit: features._asdict() for commit, features in self.data.items()} with open(self.path, "w") as f: json.dump(data, f)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. # This file is adpated from PyTorch Core # https://github.com/pytorch/pytorch/blob/master/scripts/release_notes/commitlist.py import argparse import csv import os import pprint import re from collections import defaultdict from common import CommitDataCache, get_features, run, topics class Commit: def __init__(self, commit_hash, category, topic, title): self.commit_hash = commit_hash self.category = category self.topic = topic self.title = title def __eq__(self, other): if not isinstance(other, self.__class__): return False return ( self.commit_hash == other.commit_hash and self.category == other.category and self.topic == other.topic and self.title == other.title ) def __repr__(self): return f"Commit({self.commit_hash}, {self.category}, {self.topic}, {self.title})" class CommitList: # NB: Private ctor. Use `from_existing` or `create_new`. def __init__(self, path, commits): self.path = path self.commits = commits @staticmethod def from_existing(path): commits = CommitList.read_from_disk(path) return CommitList(path, commits) @staticmethod def create_new(path, base_version, new_version): if os.path.exists(path): raise ValueError("Attempted to create a new commitlist but one exists already!") commits = CommitList.get_commits_between(base_version, new_version) return CommitList(path, commits) @staticmethod def read_from_disk(path): with open(path) as csvfile: reader = csv.reader(csvfile) rows = list(reader) assert all(len(row) >= 4 for row in rows) return [Commit(*row[:4]) for row in rows] def write_to_disk(self): path = self.path rows = self.commits directory = os.path.dirname(path) os.makedirs(directory, exist_ok=True) with open(path, "w") as csvfile: writer = csv.writer(csvfile) for commit in rows: writer.writerow([commit.commit_hash, commit.category, commit.topic, commit.title]) def keywordInFile(file, keywords): for key in keywords: if key in file: return True return False @staticmethod def categorize(commit_hash, title): features = get_features(commit_hash, return_dict=True) title = features["title"] labels = features["labels"] category = "Uncategorized" topic = "Untopiced" # We ask contributors to label their PR's appropriately # when they're first landed. # Check if the labels are there first. already_categorized = already_topiced = False for label in labels: if label.startswith("release notes: "): category = label.split("release notes: ", 1)[1] already_categorized = True if label.startswith("topic: "): topic = label.split("topic: ", 1)[1] already_topiced = True if already_categorized and already_topiced: return Commit(commit_hash, category, topic, title) if "deprecation" in title.lower(): topic = "deprecations" files_changed = features["files_changed"] for file in files_changed: if CommitList.keywordInFile(file, ["docker/", ".github", "packaging/"]): category = "releng" break if CommitList.keywordInFile( file, [ "torchdata/dataloader2", ], ): category = "dataloader2" break if CommitList.keywordInFile( file, [ "torchdata/datapipes", ], ): category = "datapipe" break return Commit(commit_hash, category, topic, title) @staticmethod def get_commits_between(base_version, new_version): cmd = f"git merge-base {base_version} {new_version}" rc, merge_base, _ = run(cmd) assert rc == 0 # Returns a list of something like # b33e38ec47 Allow a higher-precision step type for Vec256::arange (#34555) cmd = f"git log --reverse --oneline {merge_base}..{new_version}" rc, commits, _ = run(cmd) assert rc == 0 log_lines = commits.split("\n") hashes, titles = zip(*[log_line.split(" ", 1) for log_line in log_lines]) return [CommitList.categorize(commit_hash, title) for commit_hash, title in zip(hashes, titles)] def filter(self, *, category=None, topic=None): commits = self.commits if category is not None: commits = [commit for commit in commits if commit.category == category] if topic is not None: commits = [commit for commit in commits if commit.topic == topic] return commits def update_to(self, new_version): last_hash = self.commits[-1].commit_hash new_commits = CommitList.get_commits_between(last_hash, new_version) self.commits += new_commits def stat(self): counts = defaultdict(lambda: defaultdict(int)) for commit in self.commits: counts[commit.category][commit.topic] += 1 return counts def create_new(path, base_version, new_version): commits = CommitList.create_new(path, base_version, new_version) commits.write_to_disk() def update_existing(path, new_version): commits = CommitList.from_existing(path) commits.update_to(new_version) commits.write_to_disk() def to_markdown(commit_list, category): def cleanup_title(commit): match = re.match(r"(.*) \(#\d+\)", commit.title) if match is None: return commit.title return match.group(1) cdc = CommitDataCache() lines = [f"\n## {category}\n"] for topic in topics: lines.append(f"### {topic}\n") commits = commit_list.filter(category=category, topic=topic) for commit in commits: result = cleanup_title(commit) maybe_pr_number = cdc.get(commit.commit_hash).pr_number if maybe_pr_number is None: result = f"- {result} ({commit.commit_hash})\n" else: result = f"- {result} ([#{maybe_pr_number}](https://github.com/pytorch/data/pull/{maybe_pr_number}))\n" lines.append(result) return lines def get_markdown_header(category): header = f""" # Release Notes worksheet {category} The main goal of this process is to rephrase all the commit messages below to make them clear and easy to read by the end user. You should follow the following instructions to do so: * **Please cleanup, and format commit titles to be readable by the general pytorch user.** [Detailed intructions here](https://fb.quip.com/OCRoAbEvrRD9#HdaACARZZvo) * Please sort commits into the following categories (you should not rename the categories!), I tried to pre-sort these to ease your work, feel free to move commits around if the current categorization is not good. * Please drop any commits that are not user-facing. * If anything is from another domain, leave it in the UNTOPICED section at the end and I'll come and take care of it. The categories below are as follows: * BC breaking: All commits that are BC-breaking. These are the most important commits. If any pre-sorted commit is actually BC-breaking, do move it to this section. Each commit should contain a paragraph explaining the rational behind the change as well as an example for how to update user code (guidelines here: https://quip.com/OCRoAbEvrRD9) * Deprecations: All commits introducing deprecation. Each commit should include a small example explaining what should be done to update user code. * new_features: All commits introducing a new feature (new functions, new submodule, new supported platform etc) * improvements: All commits providing improvements to existing feature should be here (new backend for a function, new argument, better numerical stability) * bug fixes: All commits that fix bugs and behaviors that do not match the documentation * performance: All commits that are added mainly for performance (we separate this from improvements above to make it easier for users to look for it) * documentation: All commits that add/update documentation * Developers: All commits that are not end-user facing but still impact people that compile from source, develop into pytorch, extend pytorch, etc """ return [ header, ] def main(): """ Example Usages Create a new commitlist. Said commitlist contains commits between v1.5.0 and f5bc91f851. python commitlist.py --create_new tags/v1.5.0 f5bc91f851 Update the existing commitlist to commit bfcb687b9c. python commitlist.py --update_to bfcb687b9c """ parser = argparse.ArgumentParser(description="Tool to create a commit list") group = parser.add_mutually_exclusive_group(required=True) group.add_argument("--create_new", nargs=2) group.add_argument("--update_to") group.add_argument("--stat", action="store_true") group.add_argument("--export_markdown", action="store_true") parser.add_argument("--path", default="results/commitlist.csv") args = parser.parse_args() if args.create_new: create_new(args.path, args.create_new[0], args.create_new[1]) return if args.update_to: update_existing(args.path, args.update_to) return if args.stat: commits = CommitList.from_existing(args.path) stats = commits.stat() pprint.pprint(stats) return if args.export_markdown: commits = CommitList.from_existing(args.path) categories = list(commits.stat().keys()) for category in categories: print(f"Exporting {category}...") lines = get_markdown_header(category) lines += to_markdown(commits, category) filename = f"results/export/result_{category}.md" os.makedirs(os.path.dirname(filename), exist_ok=True) with open(filename, "w") as f: f.writelines(lines) return if __name__ == "__main__": main()
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import os import sys def collect_init_dps(init_file_location): init_dps = set() with open(init_file_location) as init_file: while (line := init_file.readline()) != "": if line.startswith("__all__ "): while (line := init_file.readline()) != "" and (stripped_line := line.strip()).startswith('"'): init_dps.add(stripped_line.replace(",", "").replace('"', "")) break return init_dps def collect_rst_dps(rst_file_location): rst_dps = set() with open(rst_file_location) as rst_file: while (line := rst_file.readline()) != "": if line.count("class_template.rst") > 0 or line.count("function.rst") > 0: rst_file.readline() while (line := rst_file.readline()) != "" and len(stripped_line := line.strip()) > 1: rst_dps.add(stripped_line) return rst_dps def compare_sets(set_a, set_b, ignore_set=None): res = set_a.difference(set_b) if ignore_set is not None: res.difference_update(ignore_set) return res def main(): datapipes_folder = os.path.join("torchdata", "datapipes") init_file = "__init__.py" docs_source_folder = os.path.join("docs", "source") exit_code = 0 for target, ignore_set in zip(["iter", "map", "utils"], [{"IterDataPipe", "Extractor"}, {"MapDataPipe"}, {}]): init_path = os.path.join(datapipes_folder, target, init_file) rst_path = os.path.join(docs_source_folder, "torchdata.datapipes." + target + ".rst") init_set = collect_init_dps(init_path) rst_set = collect_rst_dps(rst_path) dif_init = compare_sets(init_set, rst_set, ignore_set) dif_rst = compare_sets(rst_set, init_set) for elem in dif_init: print(f"Please add {elem} to {rst_path}") exit_code = 1 for elem in dif_rst: print(f"{elem} is present in {rst_path} but not in {init_path}") exit_code = 1 sys.exit(exit_code) if __name__ == "__main__": main()
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. # Use the same timeout as PyTorch Distributed default_timeout_in_s = 30 * 60 default_dl2_worker_join_timeout_in_s = 20
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from torchdata import _extension # noqa: F401 from . import datapipes janitor = datapipes.utils.janitor try: from .version import __version__ # noqa: F401 except ImportError: pass __all__ = [ "datapipes", "janitor", ] # Please keep this list sorted assert __all__ == sorted(__all__)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import importlib.machinery import os from pathlib import Path _LIB_DIR = Path(__file__).parent def _init_extension(): lib_dir = os.path.dirname(__file__) # TODO(631): If any extension had dependency of shared library, # in order to support load these shred libraries dynamically, # we need to add logic to load dll path on Windows # See: https://github.com/pytorch/pytorch/blob/master/torch/__init__.py#L56-L140 loader_details = (importlib.machinery.ExtensionFileLoader, importlib.machinery.EXTENSION_SUFFIXES) extfinder = importlib.machinery.FileFinder(lib_dir, loader_details) # type: ignore[arg-type] ext_specs = extfinder.find_spec("_torchdata") if ext_specs is None: return from torchdata import _torchdata as _torchdata _init_extension()
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import sys import traceback class KeyErrorMessage(str): r"""str subclass that returns itself in repr""" def __repr__(self): return self class ExceptionWrapper: r""" Wraps an exception with traceback to communicate across threads/processes """ def __init__(self, exc_info=None, where: str = "in background"): if exc_info is None: exc_info = sys.exc_info() self.exc_type = exc_info[0] self.exc_msg = "".join(traceback.format_exception(*exc_info)) self.where = where def reraise(self): r""" Reraises the wrapped exception in the current thread/process """ # Format a message such as: "Caught ValueError in DataLoader worker # process 2. Original Traceback:", followed by the traceback. msg = f"Caught {self.exc_type.__name__} {self.where}.\nOriginal {self.exc_msg}" if self.exc_type == KeyError: # KeyError calls repr() on its argument (usually a dict key). This # makes stack traces unreadable. It will not be changed in Python # (https://bugs.python.org/issue2651), so we work around it. msg = KeyErrorMessage(msg) elif getattr(self.exc_type, "message", None): # Some exceptions have first argument as non-str but explicitly # have message field raise self.exc_type(message=msg) try: exception = self.exc_type(msg) except TypeError: # If the exception takes multiple arguments, don't try to # instantiate since we don't know how to raise RuntimeError(msg) from None raise exception
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import pickle import warnings from typing import Any, Dict, Generic, Iterable, Iterator, Optional, TypeVar, Union from torchdata.dataloader2.adapter import Adapter from torchdata.dataloader2.error import PauseIteration from torchdata.dataloader2.graph._serialization import ( clone, DataPipe, deserialize_datapipe, MapDataPipe, serialize_datapipe, ) from torchdata.dataloader2.random import SeedGenerator from torchdata.dataloader2.random.seed_generator import _UINT64_UPPER_BOUND from torchdata.dataloader2.reading_service import CheckpointableReadingServiceInterface, ReadingServiceInterface T_co = TypeVar("T_co", covariant=True) SERIALIZED_DATAPIPE_KEY_NAME = "serialized_datapipe" READING_SERVICE_STATE_KEY_NAME = "reading_service_state" RANDOMNESS_STATE_KEY_NAME = "randomness_state" class DataLoader2Iterator(Iterator[T_co]): r""" An iterator wrapper returned by ``DataLoader2``'s ``__iter__` method. It delegates method/attribute calls to the DataPipe iterator object. The purpose of this wrapper object is to track the validity of an iterator to enforce the single iterator per ``DataLoader2`` constraint, and to finalize iteration/shutdown when necessary. """ def __init__(self, dataloader: "DataLoader2", iterator_id: int): self.dataloader = dataloader self.iterator_id = iterator_id self.limit_counter: Optional[int] = None self.limit_threshold: Optional[int] = None def __next__(self) -> T_co: if self.iterator_id == self.dataloader.valid_iterator_id: self.dataloader._reset_iter = True try: if self.dataloader._is_paused: raise PauseIteration("DataLoader2 has been paused. `resume` must be called before continuing.") else: next_val = next(self.dataloader._datapipe_iter) # type: ignore[arg-type] if self.limit_threshold is not None: self.limit_counter = self.limit_counter + 1 # type: ignore[operator] return next_val except PauseIteration: # This can be used for raising `StopIteration` without `finalize_iteration` raise StopIteration except StopIteration: if self.dataloader.reading_service is not None: self.dataloader.reading_service.finalize_iteration() raise except Exception: if self.dataloader: self.dataloader.shutdown() raise finally: # Call `pause` if threshold is reached if ( not self.dataloader._is_paused and self.limit_threshold is not None and self.limit_counter >= self.limit_threshold # type: ignore[operator] ): self._pause() else: # `iterator_id` is not valid if self.dataloader.reading_service is not None: self.dataloader.reading_service.finalize_iteration() raise RuntimeError( "This iterator has been invalidated because another iterator has been created " "from the same DataLoader2.\n" "This may be caused multiple references to the same DataLoader2. " "For feedback regarding this single iterator per DataLoader2 constraint, feel free " "to comment on this issue: https://github.com/pytorch/data/issues/45." ) def _pause(self) -> None: r""" Pauses ``DataLoader2`` by halting its threads and ensure that its state remains unchanged, allowing ``DataLoader2`` to safely perform snapshotting and similar operations afterwards. The ``limit_counter`` is also reset to ``0``. """ self.dataloader._pause() self.limit_counter = 0 def resume(self) -> None: r""" Restarts the threads within ``DataLoader2`` and allows it to yield additional batches. """ self.dataloader._resume() def limit(self, num_batches: Optional[int]) -> None: """ Pauses ``DataLoader2`` from yielding additional batches after ``num_batches`` has been yielded. The count begins after this method is invoked (i.e. previously yielded batches do not count towards the threshold). While paused, ``DataLoader2``'s threads are halted and its state remains unchanged, allowing ``DataLoader2`` to safely perform snapshotting and similar operations. After ``DataLoader2`` is paused, ``resume()`` must be called before it can start yielding again. Note: - ``limit_threshold`` persists after ``pause`` and ``resume``. Use ``.limit(None)`` to remove it. - If dispatching process is present, in order to make sure limit is in sync across processes, please place 1-to-N ``DataPipes`` in the dispatching process (before ``sharding_round_robin_dispatch``) Args: num_batches: Number of batches after which the DataLoader2 will pause, use ``None`` to remove the limit """ self.limit_counter = 0 self.limit_threshold = num_batches self.dataloader._limit(num_batches) def __getattr__(self, name): """ To delegate operations to ``dataloader._datapipe_iter``. """ if "dataloader" not in self.__dict__ or self.dataloader._datapipe_iter is None: raise AttributeError return getattr(self.dataloader._datapipe_iter, name) class DataLoader2(Generic[T_co]): r""" ``DataLoader2`` is used to optimize and execute the given ``DataPipe`` graph based on ``ReadingService`` and ``Adapter`` functions, with support for - Dynamic sharding for multiprocess and distributed data loading - Multiple backend ``ReadingServices`` - ``DataPipe`` graph in-place modification like shuffle control, memory pinning, etc. - Snapshot the state of data-preprocessing pipeline (WIP) Args: datapipe (``IterDataPipe`` or ``MapDataPipe``): ``DataPipe`` from which to load the data. A deepcopy of this datapipe will be made during initialization, allowing the input to be re-used in a different ``DataLoader2`` without sharing states. Input ``None`` can only be used if ``load_state_dict`` is called right after the creation of the DataLoader. datapipe_adapter_fn (``Iterable[Adapter]`` or ``Adapter``, optional): ``Adapter`` function(s) that will be applied to the DataPipe (default: ``None``). reading_service (ReadingServiceInterface, optional): defines how ``DataLoader2`` should execute operations over the ``DataPipe``, e.g. multiprocessing/distributed (default: ``None``). A deepcopy of this will be created during initialization, allowing the ReadingService to be re-used in a different ``DataLoader2`` without sharing states. Note: When a ``MapDataPipe`` is passed into ``DataLoader2``, in order to iterate through the data, ``DataLoader2`` will attempt to create an iterator via ``iter(datapipe)``. If the object has a non-zero-indexed indices, this may fail. Consider using ``.shuffle()`` (which converts ``MapDataPipe`` to ``IterDataPipe``) or ``datapipe.to_iter_datapipe(custom_indices)``. """ def __init__( self, datapipe: Optional[DataPipe], datapipe_adapter_fn: Optional[Union[Iterable[Adapter], Adapter]] = None, reading_service: Optional[ReadingServiceInterface] = None, ) -> None: if isinstance(datapipe, MapDataPipe): datapipe = datapipe.to_iter_datapipe() self.datapipe = clone(datapipe) if datapipe is not None else None self._adapted: bool = False self._datapipe_iter: Optional[Iterator[T_co]] = None self._reset_iter: bool = True # Sets to `False` when `__iter__` runs, and `True` when `__next__` is called # TODO(630): Some ReadingServices might want to validate adapters, we can add this feature if datapipe_adapter_fn is None: self.datapipe_adapter_fns = None elif isinstance(datapipe_adapter_fn, Iterable): self.datapipe_adapter_fns = datapipe_adapter_fn else: self.datapipe_adapter_fns = [datapipe_adapter_fn] self.reading_service = clone(reading_service) self.reading_service_state: Optional[bytes] = None # is not `None` when `load_state_dict` is called self._terminated: bool = False self.valid_iterator_id: Optional[int] = None self._is_paused = False if self.datapipe is not None and self.datapipe_adapter_fns is not None: for adapter_fn in self.datapipe_adapter_fns: self.datapipe = adapter_fn(self.datapipe) self._datapipe_before_reading_service_adapt: DataPipe = clone(self.datapipe) self._seed_generator: SeedGenerator = SeedGenerator() self._seed: Optional[int] = None self._reset_seed: bool = True # Seed generator as of beginning of each epoch self._initial_seed_generator: SeedGenerator = clone(self._seed_generator) def __iter__(self) -> DataLoader2Iterator[T_co]: r""" Return a singleton iterator from the ``DataPipe`` graph adapted by ``ReadingService``. ``DataPipe`` will be restored if the serialized state is provided to construct ``DataLoader2``. And, ``initialize_iteration`` and ``finalize_iterator`` will be invoked at the beginning and end of the iteration correspondingly. """ if self.datapipe is None: raise RuntimeError("Please provide datapipe or use load_state_dict to load datapipe from state") if self._terminated: raise RuntimeError("Cannot iterate over the DataLoader as it has already been shut down") if self._reset_iter: if self._seed is not None: if self._reset_seed: self._seed_generator.seed(self._seed) self._reset_seed = False else: self._seed_generator.seed() # Saving initial seed generator state self._initial_seed_generator = clone(self._seed_generator) if not self._adapted and self.reading_service is not None: if self.reading_service_state is None: self.datapipe = self.reading_service.initialize(self.datapipe) else: if not isinstance(self.reading_service, CheckpointableReadingServiceInterface): raise TypeError("Cannot restore from non-checkpointable reading service") self.datapipe = self.reading_service.restore(self.datapipe, self.reading_service_state) self._adapted = True if self.reading_service is not None: iter_reset_fn = self.reading_service.initialize_iteration(self._seed_generator) if iter_reset_fn: self.datapipe = iter_reset_fn(self.datapipe) self._datapipe_iter = iter(self.datapipe) self._reset_iter = False self.valid_iterator_id = 0 if self.valid_iterator_id is None else self.valid_iterator_id + 1 return DataLoader2Iterator(self, self.valid_iterator_id) def seed(self, seed: int) -> None: r""" Set random seed for DataLoader2 to control determinism. Args: seed: Random uint64 seed """ if seed >= _UINT64_UPPER_BOUND: raise ValueError(f"Expected an uint64 seed, but got {seed}.") self._seed = seed self._reset_seed = True def __del__(self) -> None: self.shutdown() def shutdown(self) -> None: r""" Shuts down ``ReadingService`` and clean up iterator. """ try: if not self._terminated: self._terminated = True if self.reading_service is not None: self.reading_service.finalize_iteration() self.reading_service.finalize() if not self._reset_iter: self._reset_iter = True self._datapipe_iter = None # Ignore AttributeError in case any attribute has been removed before `__del__` except AttributeError: pass def __enter__(self) -> "DataLoader2[T_co]": return self def __exit__(self, exc_type, exc_value, traceback) -> None: self.shutdown() def state_dict(self) -> Dict[str, Any]: r""" Return a dictionary to represent the state of data-processing pipeline with keys: - ``serialized_datapipe``:Serialized ``DataPipe`` before ``ReadingService`` adaption. - ``reading_service_state``: The state of ``ReadingService`` and adapted ``DataPipe``. """ reading_service_state = None if self.reading_service is not None and isinstance(self.reading_service, CheckpointableReadingServiceInterface): reading_service_state = self.reading_service.checkpoint() # Serialize datapipe after applying adapters and before reading service adaption serialized_datapipe = serialize_datapipe(self._datapipe_before_reading_service_adapt) serialized_randomness_state = ( self._seed, self._reset_seed, pickle.dumps(self._seed_generator), pickle.dumps(self._initial_seed_generator), ) return { SERIALIZED_DATAPIPE_KEY_NAME: serialized_datapipe, READING_SERVICE_STATE_KEY_NAME: reading_service_state, RANDOMNESS_STATE_KEY_NAME: serialized_randomness_state, } @classmethod def from_state( cls, state: Dict[str, Any], reading_service: CheckpointableReadingServiceInterface, ) -> "DataLoader2[T_co]": """ Create new ``DataLoader2`` with ``DataPipe`` graph and ``ReadingService`` restored from the serialized state. """ serialized_datapipe = state[SERIALIZED_DATAPIPE_KEY_NAME] reading_service_state = state[READING_SERVICE_STATE_KEY_NAME] data_loader: "DataLoader2[T_co]" = DataLoader2( datapipe=deserialize_datapipe(serialized_datapipe), datapipe_adapter_fn=None, reading_service=reading_service, ) data_loader.reading_service_state = reading_service_state # This check is needed for backward compatibility of `state_dict` for users loading from older version if RANDOMNESS_STATE_KEY_NAME in state: randomness_state = state[RANDOMNESS_STATE_KEY_NAME] data_loader._seed, data_loader._reset_seed = randomness_state[0], randomness_state[1] data_loader._seed_generator = pickle.loads(randomness_state[2]) data_loader._initial_seed_generator = pickle.loads(randomness_state[3]) return data_loader def load_state_dict(self, state_dict: Dict[str, Any]) -> None: """ For the existing ``DataLoader2``, load serialized state to restore ``DataPipe`` graph and reset the internal state of ``ReadingService``. """ # edge case checking # iterator has already been created: 1) iterator is just created 2) iterator is created and iter is exhausted if self._datapipe_iter is not None: raise RuntimeError( "DataLoaderV2 iterator has already been created, `load_state_dict()` can’t be called. " "Please create a new dataloader in order to use load state dict." ) serialized_datapipe = state_dict[SERIALIZED_DATAPIPE_KEY_NAME] reading_service_state = state_dict[READING_SERVICE_STATE_KEY_NAME] # deserialize datapipe deserialized_datapipe = deserialize_datapipe(serialized_datapipe) assert deserialized_datapipe is not None # override existing datapipe and reading service state self.datapipe = deserialized_datapipe self.reading_service_state = reading_service_state # This check is needed for backward compatibility of `state_dict` for users loading from older version if RANDOMNESS_STATE_KEY_NAME in state_dict: randomness_state = state_dict[RANDOMNESS_STATE_KEY_NAME] self._seed, self._reset_seed = randomness_state[0], randomness_state[1] self._seed_generator = pickle.loads(randomness_state[2]) self._initial_seed_generator = pickle.loads(randomness_state[3]) # re-initialize datapipe_adapter_fn and _datapipe_before_reading_service_adapt if self.datapipe_adapter_fns is not None: for adapter_fn in self.datapipe_adapter_fns: self.datapipe = adapter_fn(self.datapipe) self._datapipe_before_reading_service_adapt = clone(self.datapipe) def _restore_checkpoint_beginning_of_epoch(self) -> None: r""" At the beginning of each iteration (epoch), the initial state of randomness is automatically saved. That state is also saved as part of ``state_dict``. This method restores the current DataLoader2 RNG state to that initial state. The common use case is to invoke this method after ``DataLoader2``'s state is restored (through ``.from_state(...)`` or ``load_state_dict(...)``) in order to resume from the beginning of the last-ran epoch. """ self._seed_generator = self._initial_seed_generator def _pause(self) -> None: if hasattr(self.reading_service, "_pause"): self._is_paused = True pause_fn = self.reading_service._pause() if pause_fn is not None: self.datapipe = pause_fn(self.datapipe) else: warnings.warn("ReadingService doesn't support `pause`.") def _resume(self) -> None: if hasattr(self.reading_service, "_resume"): if not self._is_paused: warnings.warn("Resume is called when `DataLoader2` is not paused. No operation is performed.") else: resume_fn = self.reading_service._resume() if resume_fn is not None: self.datapipe = resume_fn(self.datapipe) self._is_paused = False else: warnings.warn("ReadingService doesn't support `resume`.") def _limit(self, num_batches: Optional[int]) -> None: if hasattr(self.reading_service, "_limit"): limit_fn = self.reading_service._limit(num_batches) if limit_fn is not None: self.datapipe = limit_fn(self.datapipe, num_batches) else: warnings.warn("ReadingService doesn't support `limit`.")
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. class PauseIteration(StopIteration): pass
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from abc import abstractmethod import torch from torchdata.dataloader2.graph import DataPipe, traverse_dps from torchdata.datapipes.iter.util.cacheholder import _WaitPendingCacheItemIterDataPipe __all__ = [ "Adapter", "CacheTimeout", "Shuffle", ] assert __all__ == sorted(__all__) class Adapter: r""" Adapter Base Class that follows python Callable protocol. """ @abstractmethod def __call__(self, datapipe: DataPipe) -> DataPipe: r""" Callable function that either runs in-place modification of the ``DataPipe`` graph, or returns a new ``DataPipe`` graph. Args: datapipe: ``DataPipe`` that needs to be adapted. Returns: Adapted ``DataPipe`` or new ``DataPipe``. """ pass class Shuffle(Adapter): r""" Shuffle DataPipes adapter allows control over all existing Shuffler (``shuffle``) DataPipes in the graph. Args: enable: Optional boolean argument to enable/disable shuffling in the ``DataPipe`` graph. True by default. - True: Enables all previously disabled ``ShufflerDataPipes``. If none exists, it will add a new ``shuffle`` at the end of the graph. - False: Disables all ``ShufflerDataPipes`` in the graph. - None: No-op. Introduced for backward compatibility. Example: .. testsetup:: from torchdata.datapipes.iter import IterableWrapper from torchdata.dataloader2 import DataLoader2 from torchdata.dataloader2.adapter import Shuffle size = 12 .. testcode:: dp = IterableWrapper(range(size)).shuffle() dl = DataLoader2(dp, [Shuffle(False)]) assert list(range(size)) == list(dl) """ def __init__(self, enable=True): self.enable = enable def __call__(self, datapipe: DataPipe) -> DataPipe: return torch.utils.data.graph_settings.apply_shuffle_settings(datapipe, shuffle=self.enable) class CacheTimeout(Adapter): r""" CacheTimeout DataPipes adapter allows control over timeouts of all existing EndOnDiskCacheHolder (``end_caching``) in the graph. Useful when cached pipeline takes too long to execute (ex. slow file downloading). Args: timeout: int - amount of seconds parallel processes will wait for cached files to appear. Example: .. testsetup:: from torchdata.datapipes.iter import IterableWrapper from torchdata.dataloader2 import DataLoader2 from torchdata.dataloader2.adapter import CacheTimeout size = 12 .. testcode:: dp = IterableWrapper(range(size)).shuffle() dl = DataLoader2(dp, [CacheTimeout(600)]) """ def __init__(self, timeout=None): if timeout is None: raise ValueError("timeout should be integer") self.timeout = timeout def __call__(self, datapipe: DataPipe) -> DataPipe: graph = traverse_dps(datapipe) all_pipes = torch.utils.data.graph_settings.get_all_graph_pipes(graph) cache_locks = {pipe for pipe in all_pipes if isinstance(pipe, _WaitPendingCacheItemIterDataPipe)} for cache_lock in cache_locks: cache_lock.set_timeout(self.timeout) return datapipe
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import abc class ShuffleSpec(abc.ABC): """Defines a shuffle specification."""
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from torchdata.dataloader2.dataloader2 import DataLoader2, DataLoader2Iterator from torchdata.dataloader2.error import PauseIteration from torchdata.dataloader2.reading_service import ( CheckpointableReadingServiceInterface, DistributedReadingService, InProcessReadingService, MultiProcessingReadingService, PrototypeMultiProcessingReadingService, ReadingServiceInterface, SequentialReadingService, ) from torchdata.dataloader2.shuffle_spec import ShuffleSpec __all__ = [ "CheckpointableReadingServiceInterface", "DataLoader2", "DataLoader2Iterator", "DistributedReadingService", "InProcessReadingService", "MultiProcessingReadingService", "PauseIteration", "PrototypeMultiProcessingReadingService", "ReadingServiceInterface", "SequentialReadingService", "ShuffleSpec", ] assert __all__ == sorted(__all__)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from torchdata.dataloader2.graph import DataPipe, DataPipeGraph, traverse_dps from torchdata.datapipes.iter import ShardingFilter, Shuffler def _check_shuffle_before_sharding(datapipe: DataPipe) -> bool: """ This function will check if a ``shuffle`` operation is presented before each ``sharding_filter`` operation for every single path in the ``DataPipe`` graph. """ graph: DataPipeGraph = traverse_dps(datapipe) # type: ignore[arg-type] return _check_shuffler_before_sharding_helper(graph) def _check_shuffler_before_sharding_helper(graph: DataPipeGraph) -> bool: if not graph: return True if len(graph) > 1: for dp, sub_graph in graph.values(): if isinstance(dp, ShardingFilter): if not _has_shuffler(sub_graph): return False else: if not _check_shuffler_before_sharding_helper(sub_graph): return False return True dp, dp_graph = list(graph.values())[0] if isinstance(dp, ShardingFilter): return _has_shuffler(dp_graph) return _check_shuffler_before_sharding_helper(dp_graph) def _has_shuffler(graph: DataPipeGraph) -> bool: if not graph: return False if len(graph) > 1: for dp, sub_graph in graph.values(): if not (isinstance(dp, Shuffler) or _has_shuffler(sub_graph)): return False return True dp, dp_graph = list(graph.values())[0] if isinstance(dp, Shuffler): return True return _has_shuffler(dp_graph)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import multiprocessing as py_mp import pickle import warnings from abc import ABC, abstractmethod from datetime import timedelta from functools import partial from multiprocessing.queues import Queue from typing import Callable, List, Optional, Tuple import torch import torch.distributed as dist import torch.multiprocessing as mp from torch.utils.data.datapipes.iter.sharding import SHARDING_PRIORITIES from torchdata._constants import default_dl2_worker_join_timeout_in_s, default_timeout_in_s from torchdata.dataloader2 import communication from torchdata.dataloader2.graph import DataPipe, list_dps, replace_dp, set_graph_random_seed, traverse_dps from torchdata.dataloader2.graph._serialization import attach_wrapper from torchdata.dataloader2.graph.utils import _find_replicable_branches from torchdata.dataloader2.random import dist_share_seed, SeedGenerator from torchdata.dataloader2.utils import process_init_fn, WorkerInfo from torchdata.dataloader2.utils.dispatch import _DummyIterDataPipe, find_lca_round_robin_sharding_dp from torchdata.datapipes.iter import FullSync class ReadingServiceInterface(ABC): r""" Interface for ``ReadingService``. Please extend custom ``ReadingService`` based on this interface class. ReadingService must be picklable prior to ``initialize`` being called. This is because a copy of it will be created by ``DataLoader2`` to avoid the situation where the same ReadingService object is used by multiple ``DataLoader2``, and its internal state will be modifiable by each of them. As a result of this constraint, certain initialization steps may need to take place within the ``initialize`` method rather than ``__init__`` of the ReadingService class. """ @abstractmethod def initialize(self, datapipe: DataPipe) -> DataPipe: r""" ``ReadingService`` takes a ``DataPipe`` graph, adapts it into a new ``DataPipe`` graph based on the custom need. Called once in creating ``DataLoader2`` iterator at first time. Prior to calling this method, the ``ReadingService`` object must be picklable. Args: datapipe: Original ``DataPipe`` graph. Return: An adapted or a new ``DataPipe`` graph. """ pass def finalize(self) -> None: r""" ``ReadingService`` cleans up internal states and fully shuts down the service. Called in ``DataLoader2``'s ``shutdown`` and ``__del__``. """ pass def initialize_iteration( self, seed_generator: SeedGenerator, iter_reset_fn: Optional[Callable[[DataPipe], DataPipe]] = None ) -> Optional[Callable[[DataPipe], DataPipe]]: r""" ``ReadingService`` spins up service for an epoch. Called at the beginning of every time getting ``DataLoader2`` iterator. Args: seed_generator: SeedGenerator object created and managed by DataLoader2. As the single source of randomness, it will govern the determinism for all of random operations with the graph of DataPipes. iter_reset_fn: Optional reset function from the prior ``ReadingServcie`` when ``SequentialReadingService`` chains multiple ``ReadingServices`` Returns: A new ``iter_reset_fn`` to be used by subseqeuent ``ReadingService`` Example: MultiProcessingReadingService starts setting worker seeds per process and prefetching items from the graph. """ pass def finalize_iteration(self) -> None: r""" ``ReadingService`` ends service after an epoch is finished. Called when the iterator of ``DataLoader2`` is depleted. """ pass def __del__(self): # Due to non-deterministic order of destruction, by the time `finalize` is called, # some objects may already be `None`. try: self.finalize() except AttributeError: pass class CheckpointableReadingServiceInterface(ReadingServiceInterface): r""" Extend ``ReadingServiceInterface`` with two additional methods to save/restore the state of the data-processing graph. """ @abstractmethod def checkpoint(self) -> bytes: """ ``ReadingService`` serializes the internal states. Called in ``DataLoader2.state_dict``. """ pass @abstractmethod def restore(self, datapipe: DataPipe, serialized_state: bytes) -> DataPipe: """ ``ReadingService`` adapts ``DataPipe`` graph based on the serialized state. Called once in creating ``DataLoader2`` iterator at first time. Counterpart of ``initialize``, which adapt ``DataPipe`` graph from scratch. Args: datapipe: original ``DataPipe`` graph before adapted by ``ReadingService`` serialized_state: The serialized state of internal state used to restore the state of the adapted ``DataPipe`` graph. Returns: Adapted ``DataPipe`` generated from the serialized state. """ pass def _collate_no_op(batch): return batch[0] class PrototypeMultiProcessingReadingService(ReadingServiceInterface): def __new__(cls, *args, **kwargs): warnings.warn( "`PrototypeMultiProcessingReadingService` is deprecated and will be removed in TorchData 0.8. " "Please use `MultiProcessingReadingService`." ) return MultiProcessingReadingService(*args, **kwargs) class InProcessReadingService(ReadingServiceInterface): r""" Default ReadingService to serve the ``DataPipe` graph in the main process, and apply graph settings like determinism control to the graph. Args: prefetch_cnt: (int, 0 by default): Number of data will be prefetched in the main process. init_fn: (Callable, optional): Custom function to be called when the main process starts to iterate over ``DataPipe`` graph. reset_fn: (Callable, optional): Custom function to be called at the beginning of each epoch with ``DataPipe``, ``WorkerInfo`` and ``SeedGenerator`` as the expected arguments. """ _prefetch_cnt: int _init_fn: Optional[Callable[[DataPipe, WorkerInfo], DataPipe]] _reset_fn: Optional[Callable[[DataPipe, WorkerInfo, SeedGenerator], DataPipe]] _end_datapipe: Optional[DataPipe] def __init__( self, prefetch_cnt: int = 0, init_fn: Optional[Callable[[DataPipe, WorkerInfo], DataPipe]] = None, reset_fn: Optional[Callable[[DataPipe, WorkerInfo, SeedGenerator], DataPipe]] = None, ) -> None: self._prefetch_cnt = prefetch_cnt self._init_fn = init_fn self._reset_fn = reset_fn self._end_datapipe = None def initialize(self, datapipe: DataPipe) -> DataPipe: worker_info = WorkerInfo(1, 0) datapipe = process_init_fn(datapipe, worker_info, self._init_fn) self._end_datapipe = datapipe return datapipe def initialize_iteration( self, seed_generator: SeedGenerator, iter_reset_fn: Optional[Callable[[DataPipe], DataPipe]] = None ) -> Optional[Callable[[DataPipe], DataPipe]]: assert self._end_datapipe is not None # Set random seeds for DataPipe that are in the main process (NOT those in worker processes) # Worker seeds are set in `process_reset_fn` set_graph_random_seed(self._end_datapipe, seed_generator) return None def _pause( self, pause_fn: Optional[Callable[[DataPipe], DataPipe]] = None ) -> Optional[Callable[[DataPipe], DataPipe]]: """ Pauses DataPipes' activities in the main process in order to collect state. """ assert self._end_datapipe is not None dp_list = list_dps(traverse_dps(self._end_datapipe)) for dp in dp_list: if hasattr(dp, "pause") and callable(dp.pause): dp.pause() return None def _resume( self, resume_fn: Optional[Callable[[DataPipe], DataPipe]] = None ) -> Optional[Callable[[DataPipe], DataPipe]]: """ Resumes DataPipes' activities. This is required to be called after `_pause` before the DataLoader can keep yielding elements. """ assert self._end_datapipe is not None dp_list = list_dps(traverse_dps(self._end_datapipe)) # Reversed order for dp in dp_list[::-1]: if hasattr(dp, "resume") and callable(dp.resume): dp.resume() return None def _limit( self, num_batches: Optional[int], limit_fn: Optional[Callable[[DataPipe, Optional[int]], DataPipe]] = None ) -> Optional[Callable[[DataPipe, Optional[int]], DataPipe]]: r""" Apply limit_fn to the DataPipe graph. """ if limit_fn is not None: # TODO: Remove when flexible checkpoint is supported limit_fn(self._end_datapipe, num_batches) # type: ignore[arg-type] return None class MultiProcessingReadingService(ReadingServiceInterface): r""" Spawns multiple worker processes to load data from the ``DataPipe`` graph. If any non-replicable ``DataPipe`` (``sharding_round_robin_dispatch``) is presented in the graph, a separate dispatching process will be created to load data from the lowest common ancestor of all non-replicable ``DataPipes`` and distributes data to each worker process in the round-robin manner Then, the subsequent ``DataPipe`` graph in each worker process will process the data from the dispatching process and eventually return the result to the main process. Args: num_workers (int): How many subprocesses to use for data loading. multiprocessing_context (str, optional): Multiprocessing starting method. If method is None then the default context is returned. Otherwise, method should be 'fork', 'spawn'. worker_prefetch_cnt: (int, 10 by default): Number of data will be prefetched at the end of each worker process. main_prefetch_cnt: (int, 10 by default): Number of data will be prefetched at the end of the whole pipeline in the main process. worker_init_fn: (Callable, optional): Function to be called when each worker process launches with ``DataPipe`` and ``WorkerInfo`` as the expected arguments. worker_reset_fn: (Callable, optional): Function to be called at the beginning of each epoch in each worker process with ``DataPipe``, ``WorkerInfo`` and ``SeedGenerator`` as the expected arguments. """ num_workers: int multiprocessing_context: Optional[str] worker_prefetch_cnt: int main_prefetch_cnt: int worker_init_fn: Optional[Callable[[DataPipe, WorkerInfo], DataPipe]] worker_reset_fn: Optional[Callable[[DataPipe, WorkerInfo, SeedGenerator], DataPipe]] _worker_processes: List[Tuple[py_mp.process.BaseProcess, Queue, Queue]] _dispatch_process: Optional[Tuple[py_mp.process.BaseProcess, List[Queue], List[Queue]]] _worker_datapipes: List[DataPipe] _worker_consumer_datapipe: Optional[DataPipe] _main_prefetch_datapipe: Optional[DataPipe] _end_datapipe: Optional[DataPipe] _mp: bool _finalized: bool = False def __init__( self, num_workers: int = 0, multiprocessing_context: Optional[str] = None, worker_prefetch_cnt: int = 10, main_prefetch_cnt: int = 10, worker_init_fn: Optional[Callable[[DataPipe, WorkerInfo], DataPipe]] = None, worker_reset_fn: Optional[Callable[[DataPipe, WorkerInfo, SeedGenerator], DataPipe]] = None, ) -> None: if num_workers == 0: warnings.warn("Please use `InProcessReadingService` for num_workers=0") self.num_workers = num_workers if multiprocessing_context is not None: _all_start_methods = mp.get_all_start_methods() assert ( multiprocessing_context in _all_start_methods ), f"Please choose one available multiprocessing context from {_all_start_methods}" self.multiprocessing_context = multiprocessing_context self.worker_prefetch_cnt = worker_prefetch_cnt self.main_prefetch_cnt = main_prefetch_cnt self.worker_init_fn = worker_init_fn self.worker_reset_fn = worker_reset_fn self._worker_processes = [] self._dispatch_process = None self._worker_datapipes = [] self._worker_consumer_datapipe = None self._main_prefetch_datapipe = None self._end_datapipe = None self._mp = num_workers > 0 def initialize(self, datapipe: DataPipe) -> DataPipe: r""" ``MultiProcessingReadingService`` finds information about sharding, separates graph by multiple pieces and reconnects it using queues. creates subprocesses. """ if not self._mp: # TODO(616): Warn and recommend usage of InProcessReadingService worker_info = WorkerInfo(1, 0) datapipe = process_init_fn(datapipe, worker_info, self.worker_init_fn) self._end_datapipe = datapipe return datapipe ctx = mp.get_context(self.multiprocessing_context) # Launch dispatching process for the lowest common ancestor of non-replicable DataPipes graph = traverse_dps(datapipe) dispatching_dp = find_lca_round_robin_sharding_dp(graph) # TODO(ejguan): When the last DataPipe is round_robin_sharding, use InPrcoessReadingService if dispatching_dp is not None: dummy_dp = _DummyIterDataPipe() graph = replace_dp(graph, dispatching_dp, dummy_dp) # type: ignore[arg-type] datapipe = list(graph.values())[0][0] # TODO(ejguan): Determine buffer_size at runtime or use unlimited buffer round_robin_dps = dispatching_dp.round_robin_demux(num_instances=self.num_workers) # TODO(ejguan): Benchmark if we need to prefetch in dispatching process worker_info = WorkerInfo(self.num_workers, 0) process, req_queues, res_queues = communication.eventloop.CreateProcessForMultipleDataPipelines( ctx, round_robin_dps, process_name="dispatching process", worker_info=worker_info, custom_reset_fn=self.worker_reset_fn, ) assert len(req_queues) == self.num_workers and len(res_queues) == self.num_workers for req_queue in req_queues: req_queue.cancel_join_thread() for res_queue in res_queues: res_queue.cancel_join_thread() process.daemon = True process.start() self._dispatch_process = (process, req_queues, res_queues) # Find replicable branches for worker processes # The rest of non-replicable DataPipes will remain in the main process replicable_dps = _find_replicable_branches(graph) assert ( len(replicable_dps) == 1 ), "MultiProcessingReadingService only supports single replicable branch currently" replicable_dp = replicable_dps[0] replicable_dp = attach_wrapper(replicable_dp) for worker_id in range(self.num_workers): worker_info = WorkerInfo(self.num_workers, worker_id) # Dispatching process for non-replicable DataPipes exists dispatching_req_queue = None if self._dispatch_process is None else self._dispatch_process[1][worker_id] dispatching_res_queue = None if self._dispatch_process is None else self._dispatch_process[2][worker_id] call_on_process_init = partial( process_init_fn, worker_info=worker_info, custom_init_fn=self.worker_init_fn, worker_prefetch_cnt=self.worker_prefetch_cnt, dispatching_req_queue=dispatching_req_queue, dispatching_res_queue=dispatching_res_queue, ) (process, req_queue, res_queue) = communication.eventloop.CreateProcessForDataPipeline( ctx, replicable_dp, process_name="worker process", worker_info=worker_info, call_on_process_init=call_on_process_init, custom_reset_fn=self.worker_reset_fn, ) req_queue.cancel_join_thread() process.daemon = True process.start() self._worker_processes.append((process, req_queue, res_queue)) # These queues are independent local_datapipe = communication.iter.QueueWrapper( communication.protocol.IterDataPipeQueueProtocolClient(req_queue, res_queue) ) self._worker_datapipes.append(local_datapipe) end_datapipe = communication.iter._IterateQueueDataPipes(self._worker_datapipes) # type: ignore[assignment] self._worker_consumer_datapipe = end_datapipe if self.main_prefetch_cnt > 0: end_datapipe = self._worker_consumer_datapipe.prefetch(self.main_prefetch_cnt) # type: ignore[union-attr] self._main_prefetch_datapipe = end_datapipe # Attach non-replicable DataPipes if replicable_dps[0] is not datapipe: graph = replace_dp(graph, replicable_dps[0], end_datapipe) end_datapipe = datapipe # type: ignore[assignment] self._end_datapipe = end_datapipe assert self._end_datapipe is not None return self._end_datapipe # type: ignore[return-value] def initialize_iteration( self, seed_generator: SeedGenerator, iter_reset_fn: Optional[Callable[[DataPipe], DataPipe]] = None ) -> Optional[Callable[[DataPipe], DataPipe]]: assert self._end_datapipe is not None # Set random seeds for DataPipe that are in the main process (NOT those in worker processes) # Worker seeds are set in `process_reset_fn` set_graph_random_seed(self._end_datapipe, seed_generator) if self._mp: if self.main_prefetch_cnt > 0: # Stop prefetching first self._main_prefetch_datapipe.reset() # type: ignore[union-attr] # Send the shared seed to subprocesses assert self._worker_consumer_datapipe is not None self._worker_consumer_datapipe.reset_epoch(seed_generator, iter_reset_fn) # In-process (num_workers == 0) else: # Technically speaking, we should call `_process_reset_fn` to reset global RNGs # for data-related operations. However, it would pollute the state of global RNGs # (random, torch and numpy), if users have already seeded them in the main process # TODO(ejguan): This should be fixed by adding a method to isolate global RNGs pass return None def finalize(self) -> None: r""" ``MultiProcessingReadingService`` invalidate states & properly exits all subprocesses. """ if self._finalized: return self._finalized = True # TODO(618): Check if anyone stuck with messages # Clean up worker processes if self.num_workers > 0: self._worker_consumer_datapipe.request_terminate() # type: ignore[union-attr] for process, req_queue, _ in self._worker_processes: try: process.join(default_dl2_worker_join_timeout_in_s) except TimeoutError: pass req_queue.close() # Clean up dispatching process if self._dispatch_process is not None: try: self._dispatch_process[0].join(default_dl2_worker_join_timeout_in_s) except TimeoutError: pass for req_queue in self._dispatch_process[1]: req_queue.close() self._worker_processes = [] self._dispatch_process = None def _pause( self, pause_fn: Optional[Callable[[DataPipe], DataPipe]] = None ) -> Optional[Callable[[DataPipe], DataPipe]]: r""" Pauses DataPipes' activities such as prefetching within main/worker/dispatching processes, in order to collect state. The provided ``pause_fn`` will be executed in worker/dispatching processes. """ if self.num_workers == 0: raise RuntimeError( "If you would like to use `pause` with `MultiProcessingReadingService`, " "please use more than 0 worker." ) assert self._end_datapipe is not None # Call pause for DataPipes in the main process (e.g. prefetch, fullsync) dp_list = list_dps(traverse_dps(self._end_datapipe)) for dp in dp_list: if hasattr(dp, "pause") and callable(dp.pause): dp.pause() self._worker_consumer_datapipe.request_pause(pause_fn) # type: ignore[union-attr] return None def _resume( self, resume_fn: Optional[Callable[[DataPipe], DataPipe]] = None ) -> Optional[Callable[[DataPipe], DataPipe]]: r""" Resumes DataPipes' activities. This is required to be called after `_pause` before the DataLoader can keep yielding elements. """ if self.num_workers > 0: self._worker_consumer_datapipe.request_resume(resume_fn) # type: ignore[union-attr] else: raise RuntimeError( "If you would like to use `resume` with `MultiProcessingReadingService`, " "please use more than 0 worker." ) assert self._end_datapipe is not None # Call resume for DataPipes in the main process (e.g. prefetch, fullsync) dp_list = list_dps(traverse_dps(self._end_datapipe)) for dp in dp_list[::-1]: if hasattr(dp, "resume") and callable(dp.resume): dp.resume() return None def _limit( self, num_batches: Optional[int], limit_fn: Optional[Callable[[DataPipe, Optional[int]], DataPipe]] = None ) -> Optional[Callable[[DataPipe, Optional[int]], DataPipe]]: r""" Send limit_fn to worker/dispatching process to set the limit number to the specified DataPipes. """ if limit_fn is not None: # Only propogate limit when dispatching process exists num_batches = None if self._dispatch_process is None else num_batches self._worker_consumer_datapipe.request_limit(num_batches, limit_fn) # type: ignore[union-attr] # TODO: Remove when flexible checkpoint is supported limit_fn(self._end_datapipe, num_batches) # type: ignore[arg-type] return None class DistributedReadingService(ReadingServiceInterface): r""" ``DistributedReadingSerivce`` handles distributed sharding on the graph of ``DataPipe`` and guarantee the randomness by sharing the same seed across the distributed processes. Args: timeout: Timeout for operations executed against the process group in seconds. Default value equals 30 minutes. """ def __init__(self, timeout: int = default_timeout_in_s): if not dist.is_available(): raise RuntimeError("Torch Distributed is required to be available") self._world_size: int = 1 self._rank: int = 0 self._datapipe: Optional[DataPipe] = None self._timeout: int = timeout self._pg: Optional[dist.ProcessGroup] = None def initialize(self, datapipe: DataPipe) -> DataPipe: r""" Launches the ``gloo``-backend distributed process group. Carries out distributed sharding on the graph of ``DataPipe`` and returns the graph attached with a ``FullSyncIterDataPipe`` at the end. """ if not (dist.is_available() and dist.is_initialized()): raise RuntimeError("Torch Distributed is required to be initialized") self._world_size = dist.get_world_size() self._rank = dist.get_rank() self._pg = dist.new_group(backend="gloo", timeout=timedelta(seconds=self._timeout)) torch.utils.data.graph_settings.apply_sharding( datapipe, self._world_size, self._rank, SHARDING_PRIORITIES.DISTRIBUTED ) # Only append FullSyncIterDataPipe if it's not presented at the end of the pipeline if not isinstance(datapipe, FullSync): datapipe = datapipe.fullsync(self._timeout) self._datapipe = datapipe return datapipe def initialize_iteration( self, seed_generator: SeedGenerator, iter_reset_fn: Optional[Callable[[DataPipe], DataPipe]] = None ) -> Optional[Callable[[DataPipe], DataPipe]]: r""" Shares the same seed from rank 0 to other ranks across the distributed processes and apply the random seed to the ``DataPipe`` graph. """ assert self._datapipe is not None shared_seed = dist_share_seed(seed_generator.generate_shared_seed(), self._pg) seed_generator.seed(shared_seed) seed_generator = seed_generator.spawn(self._rank, inplace=True) set_graph_random_seed(self._datapipe, seed_generator) return None def finalize(self) -> None: r""" Clean up the distributed process group. """ if self._pg is not None: dist.destroy_process_group(self._pg) self._pg = None class SequentialReadingService(CheckpointableReadingServiceInterface): def __init__(self, *reading_services): self.reading_services = reading_services # Sequential Order def initialize(self, datapipe: DataPipe) -> DataPipe: for rs in self.reading_services: datapipe = rs.initialize(datapipe) return datapipe # Reversed Order def finalize(self) -> None: for rs in reversed(self.reading_services): rs.finalize() # Sequential Order def initialize_iteration( self, seed_generator: SeedGenerator, iter_reset_fn: Optional[Callable[[DataPipe], DataPipe]] = None ) -> Optional[Callable[[DataPipe], DataPipe]]: chained_iter_reset_fn = iter_reset_fn for rs in self.reading_services: chained_iter_reset_fn = rs.initialize_iteration( seed_generator=seed_generator, iter_reset_fn=chained_iter_reset_fn ) return chained_iter_reset_fn # Reversed Order def finalize_iteration(self) -> None: for rs in reversed(self.reading_services): rs.finalize_iteration() # Sequential Order def checkpoint(self) -> bytes: states = [] for rs in self.reading_services: if hasattr(rs, "checkpoint") and callable(rs.checkpoint): states.append(rs.checkpoint()) else: warnings.warn(f"{rs} doesn't support `checkpoint`, skipping...") states.append(b"") return pickle.dumps(states) # Sequential Order, to align with initialize def restore(self, datapipe, serialized_state: bytes) -> DataPipe: states = pickle.loads(serialized_state) assert len(states) == len(self.reading_services) for rs, state in zip(self.reading_services, states): if hasattr(rs, "restore") and callable(rs.restore): datapipe = rs.restore(datapipe, state) else: warnings.warn(f"{rs} doesn't support `restore` from state, initialize from scratch") datapipe = rs.initialize(datapipe) return datapipe def _pause( self, pause_fn: Optional[Callable[[DataPipe], DataPipe]] = None ) -> Optional[Callable[[DataPipe], DataPipe]]: r""" Pause the ``DataPipe`` graph defined in all ``ReadingServices``. For example of ``MultiProcessingReadingService`` would accept a ``pause_fn`` from a prior ``ReadingService`` to execute custom pause logic within worker/dispatching processes. """ for rs in self.reading_services: if hasattr(rs, "_pause"): pause_fn = rs._pause(pause_fn) return pause_fn def _resume( self, resume_fn: Optional[Callable[[DataPipe], DataPipe]] = None ) -> Optional[Callable[[DataPipe], DataPipe]]: r""" Resume the ``DataPipe`` graph defined in all ``ReadingServices``. For example of ``MultiProcessingReadingService`` would accept a ``resume_fn`` from a prior ``ReadingService`` to execute custom resume logic within worker/dispatching processes. """ for rs in self.reading_services: if hasattr(rs, "_resume"): resume_fn = rs._resume(resume_fn) return resume_fn def _limit( self, num_batches: Optional[int], limit_fn: Optional[Callable[[DataPipe, Optional[int]], DataPipe]] = None ) -> Optional[Callable[[DataPipe, Optional[int]], DataPipe]]: r""" Limit the ``DataPipe`` graph defined in all ``ReadingServices``. For example of ``MultiProcessingReadingService`` would accept a ``limit_fn`` from a prior ``ReadingService`` to set limit to ``DataPipes` within worker/dispatching processes. """ for rs in self.reading_services: if hasattr(rs, "_limit"): limit_fn = rs._limit(num_batches, limit_fn) return limit_fn
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import pickle from torch.utils.data.datapipes.datapipe import ( _DataPipeSerializationWrapper, _IterDataPipeSerializationWrapper, _MapDataPipeSerializationWrapper, ) from torchdata.dataloader2.graph import DataPipe from torchdata.datapipes.iter import IterDataPipe from torchdata.datapipes.map import MapDataPipe try: import dill # XXX: By default, dill writes the Pickler dispatch table to inject its # own logic there. This globally affects the behavior of the standard library # pickler for any user who transitively depends on this module! # Undo this extension to avoid altering the behavior of the pickler globally. dill.extend(use_dill=False) HAS_DILL = True except ImportError: HAS_DILL = False __all__ = [ "attach_wrapper", "clone", "deserialize_datapipe", "extract_wrapper", "serialize_datapipe", ] def serialize_datapipe(datapipe: DataPipe) -> bytes: datapipe = attach_wrapper(datapipe) try: return pickle.dumps(datapipe) except pickle.PickleError as e: raise NotImplementedError(f"Prototype only support pickle-able datapipes for checkpoint: {e}") def deserialize_datapipe(serialized_state: bytes) -> DataPipe: try: datapipe = pickle.loads(serialized_state) except pickle.PickleError as e: raise NotImplementedError(f"Prototype only support pickle-able datapipes for checkpoint: {e}") return extract_wrapper(datapipe) def attach_wrapper(datapipe: DataPipe) -> DataPipe: r""" Wraps the ``DataPipe`` with the corresponding serialization wrapper. """ wrapped_dp: DataPipe = datapipe if not isinstance(datapipe, _DataPipeSerializationWrapper): if isinstance(datapipe, IterDataPipe): wrapped_dp = _IterDataPipeSerializationWrapper(datapipe) elif isinstance(datapipe, MapDataPipe): wrapped_dp = _MapDataPipeSerializationWrapper(datapipe) return wrapped_dp def extract_wrapper(datapipe: DataPipe) -> DataPipe: r""" Extracts the ``DataPipe`` from the serialization wrapper. """ if isinstance(datapipe, _DataPipeSerializationWrapper): datapipe = datapipe._datapipe return datapipe def clone(obj): r""" Standardized way to copy an object when needed, such as for DataPipe/ReadingService. This uses `pickle` to serialize/deserialize to create the copy. """ use_dill = False try: states = pickle.dumps(obj) except Exception: if HAS_DILL: states = dill.dumps(obj) use_dill = True else: raise if use_dill: return dill.loads(states) else: return pickle.loads(states)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from torch.utils.data.graph import DataPipe, DataPipeGraph, traverse_dps from torchdata.dataloader2.graph.settings import set_datapipes_seed, set_graph_random_seed from torchdata.dataloader2.graph.utils import find_dps, list_dps, remove_dp, replace_dp __all__ = [ "DataPipe", "DataPipeGraph", "find_dps", "list_dps", "remove_dp", "replace_dp", "set_datapipes_seed", "set_graph_random_seed", "traverse_dps", ] assert __all__ == sorted(__all__)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from collections import deque from typing import Deque, Dict, List, Optional, Set, Type, Union from torchdata.dataloader2.graph import DataPipe, DataPipeGraph, traverse_dps from torchdata.datapipes.iter import IterDataPipe from torchdata.datapipes.map import MapDataPipe def find_dps(graph: DataPipeGraph, dp_type: Type[DataPipe]) -> List[DataPipe]: r""" Given the graph of DataPipe generated by ``traverse_dps`` function, return DataPipe instances with the provided DataPipe type. """ dps: List[DataPipe] = [] cache: Set[int] = set() def helper(g) -> None: # pyre-ignore for dp_id, (dp, src_graph) in g.items(): if dp_id in cache: continue cache.add(dp_id) if type(dp) is dp_type: # Please not use `isinstance`, there is a bug. dps.append(dp) helper(src_graph) helper(graph) return dps def list_dps(graph: DataPipeGraph, exclude_dps: Optional[Union[DataPipe, List[DataPipe]]] = None) -> List[DataPipe]: r""" Given the graph of DataPipe generated by ``traverse_dps`` function, return a list of all DataPipe instances without duplication. If ``exclude_dps`` is provided, the provided ``DataPipes`` and their predecessors will be ignored. Note: - The returned list is in the order of breadth first search of the graph """ dps: List[DataPipe] = [] cache: Set[int] = set() if exclude_dps is not None: if isinstance(exclude_dps, (IterDataPipe, MapDataPipe)): exclude_dps = [ exclude_dps, ] for exclude_dp in exclude_dps: # type: ignore[union-attr] assert isinstance(exclude_dp, (IterDataPipe, MapDataPipe)) # Skip DataPipe that has already been excluded if id(exclude_dp) in cache: continue for dp in list_dps(traverse_dps(exclude_dp)): # type: ignore[arg-type] cache.add(id(dp)) q: Deque = deque() # Initialization for dp_id, (dp, subgraph) in graph.items(): if dp_id not in cache: q.append((dp_id, dp, subgraph)) cache.add(dp_id) while len(q) > 0: dp_id, dp, subgraph = q.popleft() dps.append(dp) for parent_dp_id, (parent_dp, parent_subgraph) in subgraph.items(): if parent_dp_id not in cache: q.append((parent_dp_id, parent_dp, parent_subgraph)) cache.add(parent_dp_id) return dps # Given the DataPipe needs to be replaced and the expected DataPipe, return a new graph def replace_dp(graph: DataPipeGraph, old_datapipe: DataPipe, new_datapipe: DataPipe) -> DataPipeGraph: r""" Given the graph of DataPipe generated by ``traverse_dps`` function and the DataPipe to be replaced and the new DataPipe, return the new graph of DataPipe. """ assert len(graph) == 1 if id(old_datapipe) in graph: graph = traverse_dps(new_datapipe) final_datapipe = list(graph.values())[0][0] for recv_dp, send_graph in graph.values(): _replace_dp(recv_dp, send_graph, old_datapipe, new_datapipe) return traverse_dps(final_datapipe) def remove_dp(graph: DataPipeGraph, datapipe: DataPipe) -> DataPipeGraph: r""" Given the graph of DataPipe generated by ``traverse_dps`` function and the DataPipe to be removed, return the new graph of DataPipe. Note: - This function can not remove DataPipe that takes multiple DataPipes as the input. """ assert len(graph) == 1 dp_graph = traverse_dps(datapipe) dp_id = id(datapipe) if len(dp_graph[dp_id][1]) == 0: raise RuntimeError("Cannot remove the source DataPipe from the graph of DataPipe") if len(dp_graph[dp_id][1]) > 1: raise RuntimeError("Cannot remove the receiving DataPipe having multiple sending DataPipes") if dp_id in graph: graph = graph[dp_id][1] for recv_dp, send_graph in graph.values(): _remove_dp(recv_dp, send_graph, datapipe) # Get the last DataPipe in graph assert len(graph) == 1 datapipe = list(graph.values())[0][0] return traverse_dps(datapipe) def _find_replicable_branches(graph: DataPipeGraph) -> List[DataPipe]: r""" Given the graph of DataPipe generated by ``traverse_dps`` function, return DataPipe instances of which all of prior DataPipes are replicable (``dp.is_replicable() == True``). """ assert len(graph) == 1, "DataPipeGraph should only contain a single output DataPipe" dps: List[DataPipe] = [] dp_ids: Set[int] = set() branch_is_replicable: Dict[int, bool] = {} root_dp_id = list(graph.keys())[0] root_dp, root_graph = graph[root_dp_id] def _is_replicable(root_dp_id, root_dp, root_graph) -> bool: # pyre-ignore if root_dp_id in branch_is_replicable: return branch_is_replicable[root_dp_id] # Temporarily set to True branch_is_replicable[root_dp_id] = True if hasattr(root_dp, "is_replicable") and not root_dp.is_replicable(): branch_is_replicable[root_dp_id] = False for dp_id, (dp, src_graph) in root_graph.items(): if not _is_replicable(dp_id, dp, src_graph): branch_is_replicable[root_dp_id] = False # Do not break to go through all children if not branch_is_replicable[root_dp_id]: # All children should have been added to branch_is_replicable already for dp_id, (dp, _) in root_graph.items(): if dp_id in dp_ids: continue if branch_is_replicable[dp_id]: # Guarantee returning the frontmost replicable DataPipe prior_dps = list_dps(traverse_dps(dp)) if all(id(p_dp) not in dp_ids for p_dp in prior_dps): dps.append(dp) dp_ids.add(dp_id) return branch_is_replicable[root_dp_id] if _is_replicable(root_dp_id, root_dp, root_graph): if root_dp_id not in dp_ids: # Guarantee returning the frontmost replicable DataPipe prior_dps = list_dps(traverse_dps(root_dp)) if all(id(p_dp) not in dp_ids for p_dp in prior_dps): dps.append(root_dp) dp_ids.add(root_dp_id) return dps # For each `recv_dp`, find if the source_datapipe needs to be replaced by the new one. # If found, find where the `old_dp` is located in `recv_dp` and switch it to the `new_dp` def _replace_dp(recv_dp, send_graph: DataPipeGraph, old_dp: DataPipe, new_dp: DataPipe) -> None: old_dp_id = id(old_dp) for send_id in send_graph: if send_id == old_dp_id: _assign_attr(recv_dp, old_dp, new_dp, inner_dp=True) else: send_dp, sub_send_graph = send_graph[send_id] _replace_dp(send_dp, sub_send_graph, old_dp, new_dp) # For each `recv_dp`, find if the source_datapipe needs to be replaced by the new one. # If found, find where the `old_dp` is located in `dp` and switch it to the `new_dp` def _remove_dp(recv_dp, send_graph: DataPipeGraph, datapipe: DataPipe) -> None: dp_id = id(datapipe) for send_dp_id in send_graph: if send_dp_id == dp_id: send_dp, sub_send_graph = send_graph[send_dp_id] # if len(sub_send_graph) == 0: # raise RuntimeError("Cannot remove the source DataPipe from the graph of DataPipe") # if len(sub_send_graph) > 1: # raise RuntimeError("Cannot remove the receiving DataPipe having multiple sending DataPipes") src_dp = list(sub_send_graph.values())[0][0] _assign_attr(recv_dp, send_dp, src_dp, inner_dp=True) else: send_dp, sub_send_graph = send_graph[send_dp_id] _remove_dp(send_dp, sub_send_graph, datapipe) # Recursively re-assign datapipe for the sake of nested data structure # `inner_dp` is used to prevent recursive call if we have already met a `DataPipe` def _assign_attr(obj, old_dp, new_dp, inner_dp: bool = False): if obj is old_dp: return new_dp elif isinstance(obj, (IterDataPipe, MapDataPipe)): # Prevent recursive call for DataPipe if not inner_dp: return None for k in list(obj.__dict__.keys()): new_obj = _assign_attr(obj.__dict__[k], old_dp, new_dp) if new_obj is not None: obj.__dict__[k] = new_obj break return None elif isinstance(obj, dict): for k in list(obj.keys()): new_obj = _assign_attr(obj[k], old_dp, new_dp) if new_obj is not None: obj[k] = new_obj break return None # Tuple is immutable, has to re-create a tuple elif isinstance(obj, tuple): temp_list = [] flag = False for o in obj: new_obj = _assign_attr(o, old_dp, new_dp, inner_dp) if new_obj is not None: flag = True temp_list.append(new_dp) else: temp_list.append(o) if flag: return tuple(temp_list) # Special case else: return None elif isinstance(obj, list): for i in range(len(obj)): new_obj = _assign_attr(obj[i], old_dp, new_dp, inner_dp) if new_obj is not None: obj[i] = new_obj break return None elif isinstance(obj, set): new_obj = None for o in obj: if _assign_attr(o, old_dp, new_dp, inner_dp) is not None: new_obj = new_dp break if new_obj is not None: obj.remove(old_dp) obj.add(new_dp) return None else: return None
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import inspect from typing import List from torchdata.dataloader2.graph.utils import DataPipe, find_dps, list_dps, traverse_dps from torchdata.dataloader2.random import SeedGenerator from torchdata.datapipes.iter import ShardingFilter def _is_random_datapipe(datapipe: DataPipe) -> bool: if hasattr(datapipe, "set_seed") and inspect.ismethod(datapipe.set_seed): return True return False def set_datapipes_seed(datapipes: List[DataPipe], seed_generator: SeedGenerator, distributed_shared: bool) -> None: for dp in datapipes: if _is_random_datapipe(dp): if distributed_shared: dp.set_seed(seed_generator.generate_shared_seed()) else: dp.set_seed(seed_generator.generate_seed()) def set_graph_random_seed(datapipe: DataPipe, seed_generator: SeedGenerator) -> DataPipe: r""" Set seeds to the graph of ``DataPipes`` based on a Seed Generator. All random ``DataPipes`` prior to ``ShardingFilter`` will be set seeds by the same Seed Generator to preserve the same random state across distributed/non-distributed workers. And, the random ``DataPipes`` after ``ShardingFilter`` will be set seeds by the worker-local Seed Generator deterministically created based on ``worker_id``. Args: datapipe: seed_generator: """ graph = traverse_dps(datapipe) sharding_filter_dps = find_dps(graph, ShardingFilter) # Set the same seed before sharding_filter # Using cache to exclude potential duplciate DataPipe cache = set() dps_before_sharding = [] for sf_dp in sharding_filter_dps: dps = list_dps(traverse_dps(sf_dp)) for dp in dps: if id(dp) not in cache: cache.add(id(dp)) dps_before_sharding.append(dp) set_datapipes_seed(dps_before_sharding, seed_generator, distributed_shared=True) # Set different seeds after sharding_filter dps_after_sharding = list_dps(graph, exclude_dps=sharding_filter_dps) set_datapipes_seed(dps_after_sharding, seed_generator, distributed_shared=False) return datapipe
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import random from dataclasses import dataclass from multiprocessing.queues import Queue from typing import Callable, Optional import torch from torch.utils.data.datapipes.iter.sharding import SHARDING_PRIORITIES from torchdata.dataloader2 import communication from torchdata.dataloader2.graph import ( DataPipe, find_dps, list_dps, replace_dp, set_datapipes_seed, set_graph_random_seed, traverse_dps, ) from torchdata.dataloader2.random import SeedGenerator from torchdata.dataloader2.utils.dispatch import _DummyIterDataPipe, find_non_dispatching_branches from torchdata.datapipes.iter import IterDataPipe from torchdata.datapipes.map import MapDataPipe try: import numpy HAS_NUMPY = True except ModuleNotFoundError: HAS_NUMPY = False @dataclass(frozen=True) class WorkerInfo: r""" Message class for keeping track of worker information. Args: num_workers (int): Total number of worker processes worker_id (int): Worker ID for the current worker process """ num_workers: int worker_id: int def process_init_fn( datapipe: DataPipe, worker_info: WorkerInfo, custom_init_fn: Optional[Callable[[DataPipe, WorkerInfo], DataPipe]] = None, worker_prefetch_cnt: int = 0, dispatching_req_queue: Optional[Queue] = None, dispatching_res_queue: Optional[Queue] = None, ) -> DataPipe: r""" Based on the worker information, shard the ``DataPipe`` graph dynamically. """ # Find if there is non-replicable DataPipe graph = traverse_dps(datapipe) non_replicable_dp = find_dps(graph, _DummyIterDataPipe) # type: ignore # There are two cases for DataPipe graph in terms of mp sharding: # 1) All DataPipes are replicable, apply mp sharding to the whole graph if len(non_replicable_dp) == 0: torch.utils.data.graph_settings.apply_sharding( datapipe, worker_info.num_workers, worker_info.worker_id, SHARDING_PRIORITIES.MULTIPROCESSING ) assert dispatching_req_queue is None and dispatching_res_queue is None # 2) There is non-replicable DataPipe. Since we have replaced the lowest common # ancestor by a `_DummyIterDataPipe`, we would only apply mp sharding # to replicable branches that don't have `_DummyIterDataPipe`. else: assert len(non_replicable_dp) == 1 assert not (dispatching_req_queue is None and dispatching_res_queue is None) dispatching_req_queue.cancel_join_thread() # type: ignore[union-attr] non_dispatching_branches = find_non_dispatching_branches(graph) for dp in non_dispatching_branches: torch.utils.data.graph_settings.apply_sharding( dp, worker_info.num_workers, worker_info.worker_id, SHARDING_PRIORITIES.MULTIPROCESSING ) queue_wrapper = communication.iter.QueueWrapper( communication.protocol.IterDataPipeQueueProtocolClient(dispatching_req_queue, dispatching_res_queue) ) dispatch_process_dp = communication.iter._IterateQueueDataPipes([queue_wrapper]) graph = replace_dp(graph, non_replicable_dp[0], dispatch_process_dp) datapipe = list(graph.values())[0][0] if custom_init_fn is not None: datapipe = custom_init_fn(datapipe, worker_info) assert isinstance(datapipe, (IterDataPipe, MapDataPipe)) if worker_prefetch_cnt > 0: datapipe = datapipe.prefetch(worker_prefetch_cnt) return datapipe def _set_global_random_state(seed_generator: SeedGenerator, distributed_shared: bool = False) -> None: py_seed = seed_generator.generate_shared_seed() if distributed_shared else seed_generator.generate_seed() random.seed(py_seed) torch_seed = seed_generator.generate_shared_seed() if distributed_shared else seed_generator.generate_seed() torch.manual_seed(torch_seed) if HAS_NUMPY: # Convert uint64 to uint32 for Numpy np_seed = seed_generator.generate_shared_seed() if distributed_shared else seed_generator.generate_seed() np_seed = np_seed >> 32 numpy.random.seed(np_seed) def process_reset_fn( datapipe: DataPipe, worker_info: WorkerInfo, seed_generator: SeedGenerator, distributed_shared_seed: bool = False, iter_reset_fn: Optional[Callable[[DataPipe], DataPipe]] = None, custom_reset_fn: Optional[Callable[[DataPipe, WorkerInfo, SeedGenerator], DataPipe]] = None, ) -> DataPipe: r""" Based on the distributed shared random seed and worker id, this function is used to reset the random state of the ``DataPipe`` graph and the global random states for ``torch``, ``random`` and ``numpy``. """ # Set global random states _set_global_random_state(seed_generator, distributed_shared=distributed_shared_seed) if distributed_shared_seed: graph = traverse_dps(datapipe) dps = list_dps(graph) set_datapipes_seed(dps, seed_generator=seed_generator, distributed_shared=distributed_shared_seed) else: set_graph_random_seed(datapipe, seed_generator) if iter_reset_fn is not None: datapipe = iter_reset_fn(datapipe) assert isinstance(datapipe, (IterDataPipe, MapDataPipe)) if custom_reset_fn is not None: datapipe = custom_reset_fn(datapipe, worker_info, seed_generator) assert isinstance(datapipe, (IterDataPipe, MapDataPipe)) return datapipe
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. # from multiprocessing.queues import Queue from typing import Dict, List, Optional, Set from torchdata.dataloader2.graph import DataPipe, DataPipeGraph, list_dps, traverse_dps from torchdata.datapipes.iter import IterDataPipe, ShardingRoundRobinDispatcher __all__ = ["_DummyIterDataPipe", "find_lca_round_robin_sharding_dp", "find_non_dispatching_branches"] class _DummyIterDataPipe(IterDataPipe): r""" This DataPipe is a placeholder to be replaced by the ``QueueWrapper`` that connects the worker process for non-replicable DataPipe. """ # TODO: Revert `_DummyIterDataPipe` as the placeholder when `_SerializationWrapper` # can handle mp.Queue. See: https://github.com/pytorch/data/issues/934 # req_queue: Queue # res_queue: Queue def find_lca_round_robin_sharding_dp(graph: DataPipeGraph) -> Optional[DataPipe]: r""" Given the graph of DataPipe generated by ``traverse_dps`` function, return the DataPipe instance that is the lowest common ancestor of all ``sharding_round_robin_dispatch`` DataPipes Note: - If multiple branches share the same source DataPipe and any branch contains a non-replicable DataPipe, the lowest common ancestor of all branches is returned. - If there is any non-replicable DataPipe in a circular-referenced (sub)graph, the whole (sub)graph is treated as non-replicable and the last DataPipe is returned. """ assert len(graph) == 1, "DataPipeGraph should only contain a single output DataPipe" def _is_round_robin_sharding(dp: DataPipe) -> bool: return type(dp) == ShardingRoundRobinDispatcher dps = list_dps(graph) non_replicable_dps: Set[int] = set() for dp in dps: # Skip when it has been visited if id(dp) in non_replicable_dps: continue if _is_round_robin_sharding(dp): parent_dps = list_dps(traverse_dps(dp)) for par_dp in parent_dps: non_replicable_dps.add(id(par_dp)) root_dp_id = list(graph.keys())[0] root_dp, root_graph = graph[root_dp_id] lca_for_subgraph: Dict[int, Optional[DataPipe]] = {} def _get_lca_from_graph(root_dp_id, root_dp, root_graph) -> Optional[DataPipe]: # pyre-ignore if root_dp_id in lca_for_subgraph: return lca_for_subgraph[root_dp_id] if root_dp_id in non_replicable_dps: lca_for_subgraph[root_dp_id] = root_dp return root_dp lca_for_subgraph[root_dp_id] = None non_replicable_parents = [] for dp_id, (dp, src_graph) in root_graph.items(): res = _get_lca_from_graph(dp_id, dp, src_graph) if res is not None: non_replicable_parents.append(res) # `root_dp` becomes the lowest common ancestor of this branch, # if there are more than one unique non-replicable DataPipe prior to it. if len(non_replicable_parents) > 0: # One unique non-replicable DataPipe if len(non_replicable_parents) == 1 or all( dp == non_replicable_parents[0] for dp in non_replicable_parents ): lca_for_subgraph[root_dp_id] = non_replicable_parents[0] # Multiple non-replicable DataPipes else: lca_for_subgraph[root_dp_id] = root_dp return lca_for_subgraph[root_dp_id] return _get_lca_from_graph(root_dp_id, root_dp, root_graph) def find_non_dispatching_branches(graph: DataPipeGraph) -> List[DataPipe]: r""" Given the graph of DataPipe generated by ``traverse_dps`` function, return the DataPipe instances that don't have ``_DummyIterDataPipe`` (dipatching process) in the prior graph. """ assert len(graph) == 1, "DataPipeGraph should only contain a single output DataPipe" dps: List[DataPipe] = [] non_dispatching_branches: Dict[int, bool] = {} root_dp_id = list(graph.keys())[0] root_dp, root_graph = graph[root_dp_id] def _is_non_dispatching(root_dp_id, root_dp, root_graph) -> bool: # pyre-ignore if root_dp_id in non_dispatching_branches: return non_dispatching_branches[root_dp_id] if type(root_dp) == _DummyIterDataPipe: non_dispatching_branches[root_dp_id] = False return False non_dispatching_branches[root_dp_id] = True for dp_id, (dp, src_graph) in root_graph.items(): if not _is_non_dispatching(dp_id, dp, src_graph): non_dispatching_branches[root_dp_id] = False # Do not break to go through all children if not non_dispatching_branches[root_dp_id]: # All children should have been added to non_dispatching_branches already for dp_id, (dp, _) in root_graph.items(): if non_dispatching_branches[dp_id]: dps.append(dp) return non_dispatching_branches[root_dp_id] if _is_non_dispatching(root_dp_id, root_dp, root_graph): dps.append(root_dp) return dps
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from torchdata.dataloader2.utils.worker import process_init_fn, process_reset_fn, WorkerInfo __all__ = [ "WorkerInfo", "process_init_fn", "process_reset_fn", ] assert __all__ == sorted(__all__)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import threading import time class LocalQueue: ops = 0 stored = 0 uid = 0 empty = 0 def __init__(self, name="unnamed"): self.items = [] self.name = name self.uid = LocalQueue.uid LocalQueue.uid += 1 def put(self, item, block=True): LocalQueue.ops += 1 LocalQueue.stored += 1 self.items.append(item) def get(self, block=True, timeout=0): # TODO(622): Add support of block and timeout arguments LocalQueue.ops += 1 if not len(self.items): LocalQueue.empty += 1 raise Exception("LocalQueue is empty") LocalQueue.stored -= 1 return self.items.pop() class ThreadingQueue: def __init__(self, name="unnamed"): self.lock = threading.Lock() self.items = [] self.name = name def put(self, item, block=True): with self.lock: self.items.append(item) def get(self, block=True, timeout=0): # TODO(623): Add support of block and timeout arguments while True: with self.lock: if len(self.items) > 0: return self.items.pop() if not block: raise Exception("Not available") # TODO(624): Figure out what to do if nothing in the queue time.sleep(0.000001)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import time import types import warnings from collections import deque from itertools import cycle from typing import Callable, Deque, List, Optional from torch.utils.data import IterDataPipe from torchdata._utils import ExceptionWrapper from torchdata.dataloader2 import communication from torchdata.dataloader2.graph import DataPipe, find_dps, list_dps, traverse_dps from torchdata.dataloader2.random import SeedGenerator from torchdata.dataloader2.utils import process_reset_fn DEFAULT_NON_BLOCKING_SLEEP = 0.001 __all__ = [ "DataPipeBehindQueues", "EnsureNonBlockingDataPipe", "InvalidStateResetRequired", "NonBlocking", "NotAvailable", "QueueWrapper", "default_not_available_hook", ] def default_not_available_hook(): time.sleep(DEFAULT_NON_BLOCKING_SLEEP) class NotAvailable(Exception): pass class InvalidStateResetRequired(Exception): """ Returned by DataPipe when it is expecting to get reset request, for example RouterDataPipe expecting all workers to request reset. """ pass class TerminateRequired(Exception): """ Returned by DataPipe when it is expecting to get terminate request, for example it got terminate request from other source and at the process of stopping. """ pass class NonBlocking(IterDataPipe): not_available_hook = default_not_available_hook def __iter__(self): self.reset_iterator() return self def __next__(self): while True: try: return self.nonblocking_next() except NotAvailable: if NonBlocking.not_available_hook is not None: NonBlocking.not_available_hook() def nonblocking_next(self): raise NotImplementedError("nonblocking_next is not implemented for %s" % self.__class__) def reset_iterator(self): raise NotImplementedError("reset_iterator is not implemented for %s" % self.__class__) @staticmethod def register_not_available_hook(hook_function): NonBlocking.not_available_hook = hook_function def EnsureNonBlockingDataPipe(validated_datapipe): if not isinstance(validated_datapipe, IterDataPipe): raise Exception("Not Iterable DataPipe " + str(validated_datapipe.__class__)) if isinstance(validated_datapipe, NonBlocking): return validated_datapipe if not hasattr(validated_datapipe, "_as_iterator"): validated_datapipe._as_iterator = None # type: ignore[attr-defined] if not hasattr(validated_datapipe, "nonblocking_next"): def nonblocking_next(self): if self._as_iterator is None: self._as_iterator = iter(self) return next(self._as_iterator) validated_datapipe.nonblocking_next = types.MethodType( # type: ignore[attr-defined] nonblocking_next, validated_datapipe ) if not hasattr(validated_datapipe, "reset_iterator"): def reset_iterator(self): self._as_iterator = None validated_datapipe.reset_iterator = types.MethodType( # type: ignore[attr-defined] reset_iterator, validated_datapipe ) return validated_datapipe def _sync_recv(request_counter, msg): if request_counter is not None: request_counter.increment(msg) # Make sure all loops have reached while not request_counter.is_reached(msg): yield True def _sync_resp(request_counter, msg): if request_counter is not None: request_counter.reset(msg) while request_counter.is_reached(msg): yield True def DataPipeBehindQueues( source_datapipe, protocol, process_name, loop_id, worker_info, custom_reset_fn, blocking_request_get=False, request_counter=None, ): """ Indefinitely iterates over ``req_queue`` and passing values from source_datapipe to ``res_queue``. Request Types: `ResetEpoch` - Call the `reset_epoch_fn` on the protocol's DataPipe and reset DataPipe iterator `Terminate` - exits the infinite while loop `GetNext` - returns the value from the DataPipe, and handles exceptions such as `StopIteration` as appropriate `Limit` - Set limit to the DataPipe graph `Pause` - Pause the DataPipe graph `Resume` - Resume the DataPipe graph Args: source_datapipe: DataPipe protocol: ``IterDataPipeQueueProtocolServer`` that contains ``req_queue`` and ``res_queue`` process_name: Process name loop_id: Loop ID worker_info: Worker info include worker id and number of workers custom_reset_fn: function to call after each request is received blocking_request_get: determines if ``protocol.get_new_request`` will block request_counter: Optional counter to synchronize all loops that have received requests for reset/limit/pause/resume within the dispatching process. It would guarantee that all loops starts to reset iterator and get next element at the same time. """ if not isinstance(protocol, communication.protocol.IterDataPipeQueueProtocolServer): raise Exception("Expecting IterDataPipeQueueProtocolServer, got", protocol) source_datapipe = EnsureNonBlockingDataPipe(source_datapipe) forever = True while forever: try: # TODO: Non-blocking call is extremely slow here for python.mp, need to figure out a good workaround request = protocol.get_new_request(block=blocking_request_get) except communication.protocol.EmptyQueue: yield True continue # TODO: Handle Error caused by requests other than GetNext and send it to main process if isinstance(request, communication.messages.ResetEpochRequest): yield from _sync_recv(request_counter, "reset_epoch") distributed_shared_seed = request_counter is not None if request_counter is None or loop_id == 0: seed_generator = request.seed_generator iter_reset_fn = request.iter_reset_fn dispatching_dps = find_dps(traverse_dps(source_datapipe), _IterateQueueDataPipes) for dp in dispatching_dps: dp.reset_epoch(seed_generator, iter_reset_fn) source_datapipe = process_reset_fn( source_datapipe, worker_info, seed_generator, distributed_shared_seed, iter_reset_fn, custom_reset_fn, ) source_datapipe.reset_iterator() yield from _sync_resp(request_counter, "reset_epoch") protocol.response_reset_epoch() yield True # Returns control elif isinstance(request, communication.messages.LimitRequest): yield from _sync_recv(request_counter, "limit") if request_counter is None or loop_id == 0: num_batches = request.num_batches limit_fn = request.limit_fn worker_num_batches = num_batches if request.worker_num_batches is None else request.worker_num_batches # Send limit to the worker/dispatching process dispatching_dps = find_dps(traverse_dps(source_datapipe), _IterateQueueDataPipes) for dp in dispatching_dps: dp.request_limit(num_batches, limit_fn, worker_num_batches) if limit_fn is not None: # Set limit to the DataPipe graph in worker/dispatching process source_datapipe = limit_fn(source_datapipe, worker_num_batches) yield from _sync_resp(request_counter, "limit") protocol.response_limit() yield True # Returns control elif isinstance(request, communication.messages.PauseRequest): yield from _sync_recv(request_counter, "pause") if request_counter is None or loop_id == 0: graph = traverse_dps(source_datapipe) dp_list = list_dps(graph) for dp in dp_list: if hasattr(dp, "pause") and callable(dp.pause): dp.pause() dispatching_dps = find_dps(graph, _IterateQueueDataPipes) for dp in dispatching_dps: dp.request_pause(request.pause_fn) if request.pause_fn is not None: source_datapipe = request.pause_fn(source_datapipe) yield from _sync_resp(request_counter, "pause") protocol.response_pause() yield True # Returns control elif isinstance(request, communication.messages.ResumeRequest): yield from _sync_recv(request_counter, "resume") if request_counter is None or loop_id == 0: if request.resume_fn is not None: source_datapipe = request.resume_fn(source_datapipe) graph = traverse_dps(source_datapipe) # Send resume to the dispatching process dispatching_dps = find_dps(graph, _IterateQueueDataPipes) for dp in dispatching_dps: dp.request_resume(request.resume_fn) for dp in reversed(list_dps(graph)): if hasattr(dp, "resume") and callable(dp.resume): dp.resume() yield from _sync_resp(request_counter, "resume") protocol.response_resume() yield True # Returns control elif isinstance(request, communication.messages.TerminateRequest): forever = False dispatch_dps = find_dps(traverse_dps(source_datapipe), _IterateQueueDataPipes) for dispatch_dp in dispatch_dps: dispatch_dp.request_terminate() protocol.response_terminate() yield True # Returns control elif isinstance(request, communication.messages.GetNextRequest): while forever: if protocol.is_paused(): protocol.response_stop_iteration() warnings.warn( "Cannot `GetNext` after `Pause` has been called. " "`Resume` must be called first before additional elements can be yielded." ) yield True break try: value = source_datapipe.nonblocking_next() except NotAvailable: yield True continue except StopIteration: protocol.response_stop_iteration() yield True break except InvalidStateResetRequired: protocol.response_invalid_state() yield True break except Exception: exc = ExceptionWrapper(where=f"in {process_name} {loop_id}") protocol.response_worker_exception(exc) return protocol.response_next(value) yield True # Returns control break else: raise Exception("Unrecognized type of request received", request) class QueueWrapper(NonBlocking): """ Creates an IterDataPipe which sends requests and reads the response from the DataLoader.Queue. The input is a ProtocolClient that contains request queue and response queue. """ def __init__(self, protocol, response_wait_time=0.00001): if not isinstance(protocol, communication.protocol.IterDataPipeQueueProtocolClient): raise Exception("Got", protocol) self.protocol = protocol self.counter = 0 self._stop_iteration = False self._response_wait_time = response_wait_time def request_reset_epoch(self, seed_generator, iter_reset_fn): self._stop_iteration = False self.counter = 0 self.protocol.request_reset_epoch(seed_generator, iter_reset_fn) def _get_response(self, fn_name) -> None: assert hasattr(self.protocol, fn_name) and callable(getattr(self.protocol, fn_name)) get_response_fn = getattr(self.protocol, fn_name) while True: try: get_response_fn() break except communication.protocol.EmptyQueue: if NonBlocking.not_available_hook is not None: NonBlocking.not_available_hook() def get_reset_epoch_response(self) -> None: self._get_response("get_response_reset_epoch") def request_limit( self, num_batches: Optional[int], limit_fn: Optional[Callable[[DataPipe, Optional[int]], DataPipe]] = None, worker_num_batches: Optional[int] = None, ) -> None: self.protocol.request_limit(num_batches, limit_fn, worker_num_batches) def get_limit_response(self) -> None: self._get_response("get_response_limit") def request_pause(self, pause_fn: Optional[Callable[[DataPipe], DataPipe]] = None) -> None: self.protocol.request_pause(pause_fn) def get_pause_response(self) -> None: self._get_response("get_response_pause") def request_resume(self, resume_fn: Optional[Callable[[DataPipe], DataPipe]] = None) -> None: self.protocol.request_resume(resume_fn) def get_resume_response(self) -> None: self._get_response("get_response_resume") def nonblocking_next(self): if self._stop_iteration: raise Exception("`next` or `nonblocking_next` called after receiving StopIteration") if self.protocol.can_take_request(): self.protocol.request_next() try: response = self.protocol.get_response_next(block=True, timeout=self._response_wait_time) except communication.protocol.EmptyQueue: raise NotAvailable if isinstance(response, communication.messages.StopIterationResponse): self._stop_iteration = True raise StopIteration if isinstance(response, communication.messages.InvalidStateResponse): raise NotAvailable return response.value class _IterateQueueDataPipes(IterDataPipe): r""" Takes in ``QueueWrapper``s and iterates through them in a round-robin manner to get batches one-by-one. Typically, each worker has one ``QueueWrapper``. """ def __init__(self, datapipes): # TODO(VitalyFedyunin): Consider combining _IterateQueueDataPipes and QueueWrapper # into one class, which supports any number of queues. for dp in datapipes: if not isinstance(dp, communication.iter.QueueWrapper): raise Exception("Source datapipes should be an instance of iter.QueueWrapper") self.datapipes = datapipes self._num_processes = len(datapipes) self.res_buffers: List[Deque] = [deque() for _ in range(len(datapipes))] self._terminated: bool = False self._limit: Optional[int] = None self._request_cnt: int = 0 def __iter__(self): disabled_pipe = [False] * len(self.datapipes) cnt_disabled_pipes = 0 total_req_cnt = 0 req_idx_cycle = cycle(range(self._num_processes)) req_idx = next(req_idx_cycle) total_res_cnt = 0 res_idx_cycle = cycle(range(self._num_processes)) res_idx = next(res_idx_cycle) while cnt_disabled_pipes < self._num_processes and not self._terminated: # Send a round of requests until limit is reached (limit is smaller than total pipes) for _ in range(self._num_processes): if not disabled_pipe[req_idx]: self.datapipes[req_idx].protocol.request_next() self._request_cnt += 1 total_req_cnt += 1 req_idx = next(req_idx_cycle) if self._limit is not None and self._request_cnt == self._limit: break # Receive responses from each of the workers with pending requests while total_res_cnt < total_req_cnt and cnt_disabled_pipes < self._num_processes: disabled = disabled_pipe[res_idx] if not disabled: if len(self.res_buffers[res_idx]): response = self.res_buffers[res_idx].popleft() else: while not self._terminated: try: # Using non-blocking next to make sure termination reached response = self.datapipes[res_idx].protocol.get_response_next(block=False) break except communication.protocol.EmptyQueue: time.sleep(DEFAULT_NON_BLOCKING_SLEEP) if isinstance(response, communication.messages.InvalidStateResponse): raise communication.iter.InvalidStateResetRequired if isinstance(response, communication.messages.TerminateResponse): raise communication.iter.TerminateRequired if isinstance(response, communication.messages.WorkerExceptionResponse): response.exc.reraise() if self._terminated: break if isinstance(response, communication.messages.StopIterationResponse): disabled_pipe[res_idx] = True cnt_disabled_pipes += 1 disabled = True req_idx = next(req_idx_cycle) else: # Only request if buffer is empty and has not reached the limit if len(self.res_buffers[res_idx]) == 0 and ( self._limit is None or self._request_cnt < self._limit ): self.datapipes[req_idx].protocol.request_next() req_idx = next(req_idx_cycle) self._request_cnt += 1 total_req_cnt += 1 total_res_cnt += 1 res_idx = next(res_idx_cycle) if not disabled: yield response.value def reset_epoch( self, seed_generator: SeedGenerator, iter_reset_fn: Optional[Callable[[DataPipe], DataPipe]], ): self._request_cnt = 0 for dp in self.datapipes: dp.protocol.discard_existing_request() for worker_id, dp in enumerate(self.datapipes): worker_seed_generator = seed_generator.spawn(worker_id) dp.request_reset_epoch(worker_seed_generator, iter_reset_fn) for dp in self.datapipes: dp.get_reset_epoch_response() def request_pause(self, pause_fn: Optional[Callable[[DataPipe], DataPipe]] = None) -> None: # Store results of pending requests for idx, dp in enumerate(self.datapipes): if dp.protocol.waiting_for_response(): res = dp.protocol.get_response_next(block=True) self.res_buffers[idx].append(res) for dp in self.datapipes: dp.request_pause(pause_fn) for dp in self.datapipes: dp.get_pause_response() def request_resume(self, resume_fn: Optional[Callable[[DataPipe], DataPipe]] = None) -> None: for dp in self.datapipes: dp.request_resume(resume_fn) for dp in self.datapipes: dp.get_resume_response() self._request_cnt = 0 def request_limit( self, num_batches: Optional[int], limit_fn: Optional[Callable[[DataPipe, Optional[int]], DataPipe]] = None, worker_num_batches: Optional[int] = None, ) -> None: self._limit = num_batches if worker_num_batches is None else worker_num_batches avg_num_batches = num_batches if num_batches is None else num_batches // self._num_processes batch_remainder = 0 if num_batches is None else num_batches % self._num_processes for idx, dp in enumerate(self.datapipes): ext_batch = 1 if batch_remainder > idx else 0 wnb = None if avg_num_batches is None or worker_num_batches is not None else avg_num_batches + ext_batch dp.request_limit(num_batches, limit_fn, wnb) for dp in self.datapipes: dp.get_limit_response() def request_terminate(self): self._terminated = True for dp in self.datapipes: dp.protocol.discard_existing_request() for dp in self.datapipes: dp.protocol.request_terminate()
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from queue import Empty as EmptyException from torchdata.dataloader2 import communication class Protocol: __slots__ = ("request_queue", "response_queue") def __init__(self, request_queue, response_queue): self.request_queue = request_queue self.response_queue = response_queue class ProtocolClient(Protocol): """ ProtocolClient takes charge of putting requests into req_queue and returning results from res_queue. """ _req_sent = None def __init__(self, request_queue, response_queue): self.request_queue = request_queue self.response_queue = response_queue self._req_sent = None def can_take_request(self): return self._req_sent is None def waiting_for_response(self): return self._req_sent is not None def request_sent(self, request=True): if not self.can_take_request(): raise Exception("Protocol only supports one request in the Queue") self._req_sent = request def request_served(self, result=None): if not self.waiting_for_response(): raise Exception("Expected no pending requests, but something got served", result) self._req_sent = None def discard_existing_request(self): if self.waiting_for_response(): response = self.response_queue.get(block=True) self.request_served(response) def request_limit(self, num_batches, limit_fn=None, worker_num_batches=None): if not self.can_take_request(): raise Exception("Can not `limit` while we are still waiting response for previous request") request = communication.messages.LimitRequest(num_batches, limit_fn, worker_num_batches) self.request_queue.put(request) self.request_sent(request) def request_pause(self, pause_fn=None): if not self.can_take_request(): raise Exception("Can not `pause` while we are still waiting response for previous request") request = communication.messages.PauseRequest(pause_fn) self.request_queue.put(request) self.request_sent(request) def request_resume(self, resume_fn=None): if not self.can_take_request(): raise Exception("Can not `resume` while we are still waiting response for previous request") request = communication.messages.ResumeRequest(resume_fn) self.request_queue.put(request) self.request_sent(request) def request_terminate(self): r""" Drop the existing request and send TerminateRequest directly """ if not self.can_take_request(): self._req_sent = None request = communication.messages.TerminateRequest() self.request_queue.put(request) self.request_sent(request) class ProtocolServer(Protocol): """ ProtocolServer takes charge of getting requests from req_queue and fetching data from source datapipe. """ # TODO(966): Update the exceptions raised in this class to be more specific _req_received = None _paused = False # When `True`, prevents `GetNext` in `DataPipeBehindQueues`. def __init__(self, request_queue, response_queue): self.request_queue = request_queue self.response_queue = response_queue self._req_received = None self._paused = False def is_paused(self): return self._paused def have_pending_request(self): return self._req_received is not None def get_new_request(self, block=False): if self.have_pending_request(): raise Exception("Trying to get next request, while having one un-served") try: response = self.request_queue.get(block=block) except EmptyException: raise EmptyQueue("queue is empty") self._req_received = response return response # TODO(626): Validate supported requests def response_terminate(self): if not self.have_pending_request(): raise Exception("Attempting to reply with pending request") if not isinstance(self._req_received, communication.messages.TerminateRequest): raise Exception("Replaying with `terminate` status to other type of message") self.response_queue.put(communication.messages.TerminateResponse()) self._req_received = None def response_reset_epoch(self): if not self.have_pending_request(): raise Exception("Attempting to reply with pending request") if not isinstance(self._req_received, communication.messages.ResetEpochRequest): raise Exception("Replaying with `reset_epoch` status to other type of message") self.response_queue.put(communication.messages.ResetEpochResponse()) self._req_received = None def response_limit(self): if not self.have_pending_request(): raise Exception("Attempting to reply with pending request") if not isinstance(self._req_received, communication.messages.LimitRequest): raise Exception("Replaying with `limit` status to other type of message") self.response_queue.put(communication.messages.LimitResponse()) self._req_received = None def response_pause(self): if not self.have_pending_request(): raise Exception("Attempting to reply with pending request") if not isinstance(self._req_received, communication.messages.PauseRequest): raise Exception("Replaying with `pause` status to other type of message") self._paused = True self.response_queue.put(communication.messages.PauseResponse()) self._req_received = None def response_resume(self): if not self.have_pending_request(): raise Exception("Attempting to reply with pending request") if not isinstance(self._req_received, communication.messages.ResumeRequest): raise Exception("Replaying with `resume` status to other type of message") self._paused = False self.response_queue.put(communication.messages.ResumeResponse()) self._req_received = None def response_worker_exception(self, exception): if not self.have_pending_request(): raise Exception("Attempting to reply with pending request") self.response_queue.put(communication.messages.WorkerExceptionResponse(exception)) self._req_received = None class MapDataPipeQueueProtocolServer(ProtocolServer): def response_item(self, key, value): if not self.have_pending_request(): raise Exception("Attempting to reply with pending request") self.response_queue.put(communication.messages.GetItemResponse(key, value)) self._req_received = None def response_len(self, size): if not self.have_pending_request(): raise Exception("Attempting to reply with pending request") self.response_queue.put(communication.messages.LenResponse(size)) self._req_received = None def response_index_out_of_bound(self): if not self.have_pending_request(): raise Exception("Attempting to reply with pending request") self.response_queue.put(communication.messages.StopIterationResponse()) self._req_received = None class MapDataPipeQueueProtocolClient(ProtocolClient): def request_len(self): if not self.can_take_request(): raise Exception("Can not request len while we are still waiting response for previous request") request = communication.messages.LenRequest() self.request_queue.put(request) self.request_sent(request) def request_reset_epoch(self, seed_generator, iter_reset_fn): if not self.can_take_request(): raise Exception("Can not reset while we are still waiting response for previous request") request = communication.messages.ResetEpochRequest(seed_generator, iter_reset_fn) self.request_queue.put(request) self.request_sent(request) def request_item(self, index): if not self.can_take_request(): raise Exception("Can not request item while we are still waiting response for previous request") request = communication.messages.GetItemRequest(index) self.request_queue.put(request) self.request_sent(request) def get_response_len(self, block=False, timeout=None): if not self.waiting_for_response(): raise Exception("Can not expect any response without submitted request") try: response = self.response_queue.get(block=block, timeout=timeout) except TimeoutError: raise EmptyQueue("queue is empty") self.request_served(response) if not isinstance(response, communication.messages.LenResponse): raise Exception("Invalid response received") return response def get_response_item(self, block=False, timeout=None): if not self.waiting_for_response(): raise Exception("Can not expect any response without submitted request") try: response = self.response_queue.get(block=block, timeout=timeout) except TimeoutError: raise EmptyQueue("queue is empty") self.request_served(response) # if not isinstance(response, communication.messages.GetItemResponse): # raise Exception('Invalid response received') return response class EmptyQueue(Exception): pass class IterDataPipeQueueProtocolServer(ProtocolServer): def response_next(self, value): if not self.have_pending_request(): raise Exception("Attempting to reply with pending request") self.response_queue.put(communication.messages.GetNextResponse(value)) self._req_received = None def response_stop_iteration(self): if not self.have_pending_request(): raise Exception("Attempting to reply with pending request") self.response_queue.put(communication.messages.StopIterationResponse()) self._req_received = None def response_invalid_state(self): if not self.have_pending_request(): raise Exception("Attempting to reply with pending request") self.response_queue.put(communication.messages.InvalidStateResponse()) self._req_received = None class IterDataPipeQueueProtocolClient(ProtocolClient): def request_reset_epoch(self, seed_generator, iter_reset_fn): if not self.can_take_request(): raise Exception("Can not reset while we are still waiting response for previous request") request = communication.messages.ResetEpochRequest(seed_generator, iter_reset_fn) self.request_queue.put(request) self.request_sent(request) def request_next(self): if not self.can_take_request(): raise Exception("Can not request next item while we are still waiting response for previous request") request = communication.messages.GetNextRequest() self.request_queue.put(request) self.request_sent(request) def get_response_reset_epoch(self, block=False): try: response = self.response_queue.get(block=block) except EmptyException: raise EmptyQueue("queue is empty") self.request_served(response) if not isinstance(response, communication.messages.ResetEpochResponse): raise Exception("Invalid response received") def get_response_limit(self, block=False): try: response = self.response_queue.get(block=block) except EmptyException: raise EmptyQueue("queue is empty") self.request_served(response) if not isinstance(response, communication.messages.LimitResponse): raise Exception("Invalid response received when expecting `LimitResponse`") def get_response_pause(self, block=False): try: response = self.response_queue.get(block=block) except EmptyException: raise EmptyQueue("queue is empty") self.request_served(response) if not isinstance(response, communication.messages.PauseResponse): raise Exception("Invalid response received when expecting `PauseResponse`") def get_response_resume(self, block=False): try: response = self.response_queue.get(block=block) except EmptyException: raise EmptyQueue("queue is empty") self.request_served(response) if not isinstance(response, communication.messages.ResumeResponse): raise Exception("Invalid response received when expecting `ResumeResponse`") def get_response_next(self, block=False, timeout=None): if not self.waiting_for_response(): raise Exception("Can not expect any response without submitted request") try: response = self.response_queue.get(block=block, timeout=timeout) except EmptyException: raise EmptyQueue("queue is empty") self.request_served(response) # TODO(629): Add possible response types validation here return response
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from . import eventloop, iter, map, messages, protocol, queue
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import time from itertools import zip_longest from typing import Dict, List import torch from torch.utils.data import IterDataPipe, MapDataPipe from torchdata.dataloader2 import communication from torchdata.dataloader2.graph._serialization import extract_wrapper try: import dill # XXX: By default, dill writes the Pickler dispatch table to inject its # own logic there. This globally affects the behavior of the standard library # pickler for any user who transitively depends on this module! # Undo this extension to avoid altering the behavior of the pickler globally. dill.extend(use_dill=False) HAS_DILL = True except ImportError: HAS_DILL = False __all__ = [ "DataPipeToQueuesLoop", "CreateProcessForDataPipeline", "CreateProcessForMultipleDataPipelines", ] class _RequestCounter: r""" _RequestCounter is used to synchronize between eventloops within the dispatching process. It guarantees to only handle the limit/pause/reset_epoch/resume request util all loops have received the same message. """ exp_cnt: int _keys: List[str] = ["limit", "pause", "reset_epoch", "resume"] _cnt: Dict[str, int] _reached: Dict[str, bool] def __init__(self, exp_cnt: int): self.exp_cnt = exp_cnt self._cnt = {k: 0 for k in self._keys} self._reached = {k: False for k in self._keys} def increment(self, key: str) -> None: assert key in self._reached self._cnt[key] += 1 assert self._cnt[key] <= self.exp_cnt if self._cnt[key] == self.exp_cnt: self._reached[key] = True def is_reached(self, key: str) -> bool: assert key in self._reached return self._reached[key] def reset(self, key: str) -> None: assert key in self._reached and self._reached[key] assert self._cnt[key] >= 1 self._cnt[key] -= 1 if self._cnt[key] == 0: self._reached[key] = False def MultipleDataPipesToQueuesLoop( source_datapipes, req_queues, res_queues, process_name, worker_info, call_on_process_init=None, custom_reset_fn=None ): r""" Set the appropriate pipes and protocol server type, and create a loop over multiple datapipes with the protocol server in a non-blocking manner. Args: source_datapipe: DataPipe being iterated in the dispatching process req_queue: Multiprocessing queue providing requests from the worker process res_queue: Multiprocessing queue sending results to the worker process process_name: The name of process (used for logging and exception handling) worker_info: Worker information (worker id and number of workers) call_on_process_init: Not allowed by dispatching process for now. custom_reset_fn: Optional callable function to reset the DataPipe. """ assert call_on_process_init is None, "``MultipleDataPipesToQueuesLoop`` does not support call_on_process_init" num_loops = len(source_datapipes) assert num_loops == len(req_queues) and num_loops == len( res_queues ), "``MultipleDataPipesToQueuesLoop`` requires the same number of datapipes, request queues and response queues" torch.set_num_threads(1) loops = [] request_counter = _RequestCounter(num_loops) loop_id = 0 for source_datapipe, req_queue, res_queue in zip(source_datapipes, req_queues, res_queues): loops.append( _create_datapipe_queue_loop( source_datapipe, req_queue, res_queue, process_name, loop_id, worker_info, custom_reset_fn, blocking_request_get=False, request_counter=request_counter, ) ) # Non-blocking request with reset counters loop_id += 1 # Using `zip_longest` to guarantee the process is terminated only when # all loops have received `TerminateRequest` for _ in zip_longest(*loops): # time.sleep to make Python switch context to get/send message in mp.Queue # TODO(ejguan): Microbenchmarked a synthetic non-replicable case that sleep perform similar to pass. # A more comprehensive benchmarking in real-world scneario is needed. time.sleep(0) def DataPipeToQueuesLoop( source_datapipe, req_queue, res_queue, process_name, worker_info, call_on_process_init=None, custom_reset_fn=None ): r""" Initialize with the given init function, set the appropriate pipe and protocol server type, and create a loop with the protocol server. Args: source_datapipe: DataPipe being iterated in the worker process req_queue: Multiprocessing queue providing requests from the main process res_queue: Multiprocessing queue sending results to the main process process_name: The name of process (used for logging and exception handling) worker_info: Worker information (worker id and number of workers) call_on_process_init: Callable function will be called at the time of worker process initialization. Users can provide it to modify the DataPipe grpah in the worker process. custom_reset_fn: Optional callable function to reset the DataPipe. """ # Extract Serialization Wrapper source_datapipe = extract_wrapper(source_datapipe) if call_on_process_init is not None: source_datapipe = call_on_process_init(source_datapipe) torch.set_num_threads(1) loop = _create_datapipe_queue_loop( source_datapipe, req_queue, res_queue, process_name, worker_info.worker_id, worker_info, custom_reset_fn, blocking_request_get=True, ) for _ in loop: pass def _create_datapipe_queue_loop( source_datapipe, req_queue, res_queue, process_name, loop_id, worker_info, custom_reset_fn=None, blocking_request_get=True, request_counter=None, ): if isinstance(source_datapipe, IterDataPipe): pipe_type = communication.iter protocol_type = communication.protocol.IterDataPipeQueueProtocolServer elif isinstance(source_datapipe, MapDataPipe): pipe_type = communication.map # type: ignore[misc] protocol_type = communication.protocol.MapDataPipeQueueProtocolServer # type: ignore[assignment] else: raise Exception("Only supports IterDataPipe or MapDataPipe, got", source_datapipe) return pipe_type.DataPipeBehindQueues( source_datapipe, protocol_type(req_queue, res_queue), process_name=process_name, loop_id=loop_id, worker_info=worker_info, custom_reset_fn=custom_reset_fn, blocking_request_get=blocking_request_get, request_counter=request_counter, ) def CreateProcessForDataPipeline( multiprocessing_ctx, datapipe, process_name, worker_info, call_on_process_init=None, custom_reset_fn=None ): r""" Given a DataPipe, creates a new process with ``DataPipeToQueuesLoop`` as target, and returns ``(process, req_queue, res_queue)``. """ req_queue = multiprocessing_ctx.Queue() res_queue = multiprocessing_ctx.Queue() process = multiprocessing_ctx.Process( target=DataPipeToQueuesLoop, args=(datapipe, req_queue, res_queue, process_name, worker_info, call_on_process_init, custom_reset_fn), ) return process, req_queue, res_queue def CreateProcessForMultipleDataPipelines( multiprocessing_ctx, datapipes, process_name, worker_info, custom_reset_fn=None ): r""" Given a DataPipe, creates a new process with ``MultipleDataPipesToQueuesLoop`` as target, and returns ``(process, [req_queue_0, ...], [res_queue_0, ...])``. """ req_queues = [] res_queues = [] for _ in datapipes: req_queues.append(multiprocessing_ctx.Queue()) res_queues.append(multiprocessing_ctx.Queue()) process = multiprocessing_ctx.Process( target=MultipleDataPipesToQueuesLoop, args=(datapipes, req_queues, res_queues, process_name, worker_info, custom_reset_fn), ) return process, req_queues, res_queues
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import time import types from torch.utils.data import MapDataPipe from torchdata._utils import ExceptionWrapper from torchdata.dataloader2 import communication from torchdata.dataloader2.utils import process_reset_fn DEFAULT_NON_BLOCKING_SLEEP = 0.001 __all__ = [ "DataPipeBehindQueues", "EnsureNonBlockingMapDataPipe", "NonBlockingMap", "NotAvailable", "QueueWrapperForMap", "default_not_available_hook", ] def default_not_available_hook(): time.sleep(DEFAULT_NON_BLOCKING_SLEEP) class NotAvailable(Exception): pass class NonBlockingMap(MapDataPipe): not_available_hook = default_not_available_hook def __getitem__(self, index): while True: try: return self.nonblocking_getitem(index) except NotAvailable: if NonBlockingMap.not_available_hook is not None: NonBlockingMap.not_available_hook() def __len__(self): try: return self.nonblocking_len() except NotAvailable: if NonBlockingMap.not_available_hook is not None: NonBlockingMap.not_available_hook() def nonblocking_len(self): raise NotImplementedError("nonblocking_len is not implemented for %s" % self.__class__) def nonblocking_getitem(self, index): raise NotImplementedError("nonblocking_getitem is not implemented for %s" % self.__class__) @staticmethod def register_not_available_hook(hook_function): NonBlockingMap.not_available_hook = hook_function def EnsureNonBlockingMapDataPipe(validated_datapipe): if not isinstance(validated_datapipe, MapDataPipe): raise Exception(f"Not Map DataPipe - got {validated_datapipe.__class__}") if isinstance(validated_datapipe, NonBlockingMap): return validated_datapipe if not hasattr(validated_datapipe, "nonblocking_len"): def nonblocking_len(self): return self.__len__() validated_datapipe.nonblocking_len = types.MethodType( # type: ignore[attr-defined] nonblocking_len, validated_datapipe ) if not hasattr(validated_datapipe, "nonblocking_getitem"): def nonblocking_getitem(self, index): return self.__getitem__(index) validated_datapipe.nonblocking_getitem = types.MethodType( # type: ignore[attr-defined] nonblocking_getitem, validated_datapipe ) return validated_datapipe def DataPipeBehindQueues( source_datapipe, protocol, process_name, loop_id, worker_info, custom_reset_fn, blocking_request_get=False, request_counter=None, ): """ Indefinitely iterates over req_queue and passing values from source_datapipe to res_queue. Args: source_datapipe: DataPipe protocol: ``MapDataPipeQueueProtocolServer`` that contains ``req_queue`` and ``res_queue`` process_name: Process name loop_id: Loop ID worker_info: Worker info include worker id and number of workers custom_reset_fn: function to call after each request is received blocking_request_get: determines if ``protocol.get_new_request`` will block """ if not isinstance(protocol, communication.protocol.MapDataPipeQueueProtocolServer): raise Exception("Expecting MapDataPipeQueueProtocolServer, got", protocol) source_datapipe = EnsureNonBlockingMapDataPipe(source_datapipe) forever = True while forever: try: # TODO: non-blocking call is extremely slow here for python.mp, need to figure out a good workaround request = protocol.get_new_request(block=blocking_request_get) except communication.protocol.EmptyQueue: yield True continue if isinstance(request, communication.messages.ResetEpochRequest): distributed_shared_seed = request_counter is not None source_datapipe = process_reset_fn( source_datapipe, worker_info, request.seed_generator, distributed_shared_seed, request.iter_reset_fn, custom_reset_fn, ) protocol.response_reset_epoch() elif isinstance(request, communication.messages.TerminateRequest): forever = False protocol.response_terminate() elif isinstance(request, communication.messages.LenRequest): size = source_datapipe.nonblocking_len() protocol.response_len(size) elif isinstance(request, communication.messages.GetItemRequest): while forever: try: value = source_datapipe.nonblocking_getitem(request.key) except NotAvailable: yield True continue except IndexError: # Alternatively, we can just allow the underlying DataPipe to throw an exception? protocol.response_index_out_of_bound() yield True break except Exception: exc = ExceptionWrapper(where=f"in {process_name} {loop_id}") protocol.response_worker_exception(exc) break protocol.response_item(request.key, value) yield True # Returns control break else: raise Exception("Unrecognized type of request received", request) class QueueWrapperForMap(NonBlockingMap): """ Creates map.DataPipe which reads data from the DataLoader.Queue """ def __init__(self, protocol, response_wait_time=0.00001): if not isinstance(protocol, communication.protocol.MapDataPipeQueueProtocolClient): raise Exception("Got", protocol) self.protocol = protocol self.counter = 0 self._stop_iteration = False self._response_wait_time = response_wait_time def nonblocking_getitem(self, index): if self._stop_iteration: raise Exception("`getitem` or `nonblocking_getitem` called after receiving StopIteration") if self.protocol.can_take_request(): self.protocol.request_item(index) try: response = self.protocol.get_response_item(block=True, timeout=self._response_wait_time) except communication.protocol.EmptyQueue: raise NotAvailable if isinstance(response, communication.messages.StopIterationResponse): self._stop_iteration = True raise IndexError(f"Index {index} is out of bound.") if isinstance(response, communication.messages.WorkerExceptionResponse): self._stop_iteration = True response.exc.reraise() return response.key, response.value def nonblocking_len(self): if self._stop_iteration: raise Exception("`len` or `nonblocking_len` called after receiving StopIteration") if self.protocol.can_take_request(): self.protocol.request_len() try: response = self.protocol.get_response_len(block=True, timeout=self._response_wait_time) except communication.protocol.EmptyQueue: raise NotAvailable return response.len
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from torchdata._utils import ExceptionWrapper class DataLoaderQueueMessage: pass class Request(DataLoaderQueueMessage): pass class Response(DataLoaderQueueMessage): pass class ResetEpochRequest(Request): __slots__ = ("seed_generator", "iter_reset_fn") def __init__(self, seed_generator, iter_reset_fn): self.seed_generator = seed_generator self.iter_reset_fn = iter_reset_fn class ResetEpochResponse(Response): pass class LimitRequest(Request): __slots__ = ("num_batches", "limit_fn", "worker_num_batches") def __init__(self, num_batches, limit_fn, worker_num_batches=None): self.num_batches = num_batches self.limit_fn = limit_fn self.worker_num_batches = worker_num_batches class LimitResponse(Response): pass class PauseRequest(Request): __slots__ = "pause_fn" def __init__(self, pause_fn): self.pause_fn = pause_fn class PauseResponse(Response): pass class ResumeRequest(Request): __slots__ = "resume_fn" def __init__(self, resume_fn): self.resume_fn = resume_fn class ResumeResponse(Response): pass class TerminateRequest(Request): pass class TerminateResponse(Response): pass class LenRequest(Request): pass class LenResponse(Response): __slots__ = "len" def __init__(self, len): self.len = len class GetItemRequest(Request): __slots__ = "key" def __init__(self, key): self.key = key class GetItemResponse(Response): __slots__ = ("key", "value") def __init__(self, key, value): self.key = key self.value = value class GetNextRequest(Request): pass class GetNextResponse(Response): __slots__ = "value" def __init__(self, value): self.value = value class StopIterationResponse(Response): pass class InvalidStateResponse(Response): """ Returned by DataPipe when it is expecting to get reset request, for example RouterDataPipe expecting all workers to request reset' """ pass class WorkerExceptionResponse(Response): __slots__ = "exc" def __init__(self, exc: ExceptionWrapper): self.exc: ExceptionWrapper = exc
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from typing import List, Optional, Tuple # Note [Philox Engine implementation] # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Refer to: http://www.thesalmons.org/john/random123/papers/random123sc11.pdf for details regarding the engine. # Using Philox4×32-10 for the sake of performance, randomness and crush-resistance. # The following code could be optimized into C++ bindings # Philox Constants kPhilox10A = 0x9E3779B9 kPhilox10B = 0xBB67AE85 kPhiloxSA = 0xD2511F53 kPhiloxSB = 0xCD9E8D57 MASK_32b = 0xFFFFFFFF MASK_64b = 0xFFFFFFFFFFFFFFFF HALF_UINT64 = 0x8000000000000000 def mulhilo32(a: int, b: int) -> Tuple[int, int]: product = a * b return product & MASK_32b, (product >> 32) & MASK_32b def single_round(key: List[int], ctr: List[int]) -> List[int]: lo0, hi0 = mulhilo32(kPhiloxSA, ctr[0]) lo1, hi1 = mulhilo32(kPhiloxSB, ctr[2]) res = [0] * 4 res[0] = hi1 ^ ctr[1] ^ key[0] res[1] = lo1 res[2] = hi0 ^ ctr[3] ^ key[1] res[3] = lo0 return res def philox_10_round(key: Tuple[int, int], ctr: List[int]) -> List[int]: _key = list(key) _ctr = list(ctr) for _ in range(9): _ctr = single_round(_key, _ctr) _key[0] = (_key[0] + kPhilox10A) & MASK_32b _key[1] = (_key[1] + kPhilox10B) & MASK_32b return single_round(_key, _ctr) class PhiloxEngine: r""" Philox is a counter-based RNG with a certain properties: - High performance - Statistiacl random - Crush-resistance Bijection Generate new seeds or spawn parallel seeds for worker processes. """ def __init__(self, seed: Optional[int] = None) -> None: self._seed: Tuple[int, int] = (-1, -1) self._ctr: List[int] = [0] * 4 self._generated_seeds: Optional[List[int]] = None self._spawn_seed: Tuple[int, int] = (-1, -1) if seed is not None: self.seed(seed) def _incr_ctr(self) -> None: for i in range(3): self._ctr[i] += 1 if self._ctr[i] <= MASK_32b: return self._ctr[i] = 0 self._ctr[3] += 1 # if overflow (2^128) has occurred during addition, back to the initial counter if self._ctr[3] > MASK_32b: self._ctr[3] = 0 self._incr_ctr() def seed(self, seed: int) -> "PhiloxEngine": seed = seed & MASK_64b # Convert seed from int64 to uint64 if seed < 0: seed = seed + HALF_UINT64 lo = seed & MASK_32b hi = (seed >> 32) & MASK_32b self._seed = (lo, hi) # Reset counter and cached seed self._ctr = [0] * 4 self._generated_seeds = None # Generate the spawn seed self._spawn_seed = tuple(philox_10_round(self._seed, self._ctr)[:2]) # type: ignore[assignment] self._incr_ctr() return self def generate(self) -> int: assert self._seed != (-1, -1), "Please provide seed to PhiloxEngine" if self._generated_seeds is None: self._generated_seeds = philox_10_round(self._seed, self._ctr) self._incr_ctr() res = self._generated_seeds[:2] else: res = self._generated_seeds[2:] self._generated_seeds = None return (res[1] << 32) + res[0] def clone(self) -> "PhiloxEngine": new_engine = PhiloxEngine(None) new_engine._seed = self._seed # immutable tuple new_engine._ctr = self._ctr.copy() new_engine._generated_seeds = None if self._generated_seeds is None else self._generated_seeds.copy() new_engine._spawn_seed = self._spawn_seed # immutable tuple return new_engine def spawn(self, index: int) -> "PhiloxEngine": assert index >= 0, f"Expected a non-negative value for spawn, but found {index}" assert self._spawn_seed != (-1, -1), "Please provide seed to PhiloxEngine" offset = index % 2 val = index if offset == 0 else index - 1 ctr = [] for _ in range(4): ctr.append(val & MASK_32b) val = val >> 32 res = philox_10_round(self._spawn_seed, ctr)[offset * 2 : offset * 2 + 2] sub_seed = (res[1] << 32) + res[0] return PhiloxEngine(sub_seed)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from torchdata.dataloader2.random.distributed import dist_share_seed from torchdata.dataloader2.random.seed_generator import SeedGenerator __all__ = ["SeedGenerator", "dist_share_seed"]
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from typing import Optional import torch import torch.distributed as dist _HALF_UINT64 = 0x8000000000000000 def dist_share_seed(seed: int, process_group: Optional[dist.ProcessGroup] = None) -> int: # Convert uint64 to int64 to prevent overflow for integer Tensor seed -= _HALF_UINT64 shared_seed = torch.tensor(seed, dtype=torch.int64) dist.broadcast(shared_seed, src=0, group=process_group) # Revert int64 back to uint64 return int(shared_seed.item()) + _HALF_UINT64
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from typing import Optional, Tuple import torch from torchdata.dataloader2.random._philox import PhiloxEngine _UINT64_UPPER_BOUND = 2 ** 64 def _get_torch_random_seed(): iinfo = torch.iinfo(torch.int64) seed = torch.randint(iinfo.min, iinfo.max, ()).item() # Convert int64 to uint64 seed += 2 ** 63 return seed class SeedGenerator: r""" ``SeedGenerator`` is used to generate seeds in a deterministic and randomized manner based on a user-provided initial seed. Internally, it utilizes a counter-based PRNG called Philox to generate random seeds. Args: seed: The base seed to generate random seeds """ _shared_rng: PhiloxEngine _worker_rng: PhiloxEngine def __init__(self, seed: Optional[int] = None, _rngs: Optional[Tuple[PhiloxEngine, PhiloxEngine]] = None) -> None: if seed is not None and _rngs is not None: raise ValueError("SeedGenerator doesn't allow both seed and _rng specified at the same time") if _rngs is None: self._shared_rng = PhiloxEngine() self._worker_rng = PhiloxEngine() self.seed(seed) else: assert len(_rngs) == 2 self._shared_rng, self._worker_rng = _rngs def seed(self, seed: Optional[int] = None) -> None: r""" Re-seed the ``SeedGenerator``. When ``None`` is provided, a random seed generated by the default PyTorch RNG. """ if seed is None: seed = _get_torch_random_seed() if seed >= _UINT64_UPPER_BOUND: raise ValueError(f"Expected an uint64 seed, but got {seed}.") self._shared_rng.seed(seed) self._worker_rng.seed(seed) def generate_shared_seed(self) -> int: r""" Generate one uint64 random seed that is supposed to be the same across distributed processes. """ return self._shared_rng.generate() def generate_seed(self) -> int: r""" Generate one unique uint64 random seed based on distributed and multiprocessing information. """ return self._worker_rng.generate() def spawn(self, worker_id: int, inplace: bool = False) -> "SeedGenerator": r""" Spawn a sub-SeedGenerator based on the provided worker_id. If inplace is turn on, the SeedGenerator will evolve itself rather than spawning a new """ if worker_id < 0: raise ValueError(f"Expected `rank` equal or larger than 0, but got {worker_id}.") if inplace: self._worker_rng = self._worker_rng.spawn(worker_id) return self return SeedGenerator(seed=None, _rngs=(self._shared_rng.clone(), self._worker_rng.spawn(worker_id))) def __getstate__(self): state = ( self._shared_rng, self._worker_rng, ) return state def __setstate__(self, state): self._shared_rng, self._worker_rng = state
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from torch.utils.data import DataChunk, functional_datapipe from . import iter, map, utils __all__ = ["DataChunk", "functional_datapipe", "iter", "map", "utils"]
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. ############################################################################### # Reference From PyTorch Core ############################################################################### from torch.utils.data import IterDataPipe from torch.utils.data.datapipes.iter import ( Batcher, Collator, Concater, Demultiplexer, FileLister, FileOpener, Filter, Forker, Grouper, IterableWrapper, Mapper, Multiplexer, RoutedDecoder, Sampler, ShardingFilter, Shuffler, StreamReader, UnBatcher, Zipper, ) from torchdata.datapipes.iter.load.aisio import ( AISFileListerIterDataPipe as AISFileLister, AISFileLoaderIterDataPipe as AISFileLoader, ) ############################################################################### # TorchData ############################################################################### from torchdata.datapipes.iter.load.fsspec import ( FSSpecFileListerIterDataPipe as FSSpecFileLister, FSSpecFileOpenerIterDataPipe as FSSpecFileOpener, FSSpecSaverIterDataPipe as FSSpecSaver, ) from torchdata.datapipes.iter.load.huggingface import HuggingFaceHubReaderIterDataPipe as HuggingFaceHubReader from torchdata.datapipes.iter.load.iopath import ( IoPathFileListerIterDataPipe as IoPathFileLister, IoPathFileOpenerIterDataPipe as IoPathFileOpener, IoPathSaverIterDataPipe as IoPathSaver, ) from torchdata.datapipes.iter.load.online import ( GDriveReaderDataPipe as GDriveReader, HTTPReaderIterDataPipe as HttpReader, OnlineReaderIterDataPipe as OnlineReader, ) from torchdata.datapipes.iter.load.s3io import ( S3FileListerIterDataPipe as S3FileLister, S3FileLoaderIterDataPipe as S3FileLoader, ) from torchdata.datapipes.iter.transform.bucketbatcher import ( BucketBatcherIterDataPipe as BucketBatcher, InBatchShufflerIterDataPipe as InBatchShuffler, MaxTokenBucketizerIterDataPipe as MaxTokenBucketizer, ) from torchdata.datapipes.iter.transform.callable import ( BatchAsyncMapperIterDataPipe as BatchAsyncMapper, BatchMapperIterDataPipe as BatchMapper, DropperIterDataPipe as Dropper, FlatMapperIterDataPipe as FlatMapper, FlattenIterDataPipe as Flattener, ShuffledFlatMapperIterDataPipe as ShuffledFlatMapper, SliceIterDataPipe as Slicer, ThreadPoolMapperIterDataPipe as ThreadPoolMapper, ) from torchdata.datapipes.iter.util.bz2fileloader import Bz2FileLoaderIterDataPipe as Bz2FileLoader from torchdata.datapipes.iter.util.cacheholder import ( EndOnDiskCacheHolderIterDataPipe as EndOnDiskCacheHolder, InMemoryCacheHolderIterDataPipe as InMemoryCacheHolder, OnDiskCacheHolderIterDataPipe as OnDiskCacheHolder, ) from torchdata.datapipes.iter.util.combining import ( IterKeyZipperIterDataPipe as IterKeyZipper, MapKeyZipperIterDataPipe as MapKeyZipper, RoundRobinDemultiplexerIterDataPipe as RoundRobinDemultiplexer, UnZipperIterDataPipe as UnZipper, ) from torchdata.datapipes.iter.util.cycler import CyclerIterDataPipe as Cycler, RepeaterIterDataPipe as Repeater from torchdata.datapipes.iter.util.dataframemaker import ( DataFrameMakerIterDataPipe as DataFrameMaker, ParquetDFLoaderIterDataPipe as ParquetDataFrameLoader, ) from torchdata.datapipes.iter.util.decompressor import ( DecompressorIterDataPipe as Decompressor, ExtractorIterDataPipe as Extractor, ) from torchdata.datapipes.iter.util.distributed import FullSyncIterDataPipe as FullSync from torchdata.datapipes.iter.util.hashchecker import HashCheckerIterDataPipe as HashChecker from torchdata.datapipes.iter.util.header import HeaderIterDataPipe as Header, LengthSetterIterDataPipe as LengthSetter from torchdata.datapipes.iter.util.indexadder import ( EnumeratorIterDataPipe as Enumerator, IndexAdderIterDataPipe as IndexAdder, ) from torchdata.datapipes.iter.util.jsonparser import JsonParserIterDataPipe as JsonParser from torchdata.datapipes.iter.util.mux_longest import MultiplexerLongestIterDataPipe as MultiplexerLongest from torchdata.datapipes.iter.util.paragraphaggregator import ParagraphAggregatorIterDataPipe as ParagraphAggregator from torchdata.datapipes.iter.util.plain_text_reader import ( CSVDictParserIterDataPipe as CSVDictParser, CSVParserIterDataPipe as CSVParser, LineReaderIterDataPipe as LineReader, ) from torchdata.datapipes.iter.util.prefetcher import ( PinMemoryIterDataPipe as PinMemory, PrefetcherIterDataPipe as Prefetcher, ) from torchdata.datapipes.iter.util.randomsplitter import RandomSplitterIterDataPipe as RandomSplitter from torchdata.datapipes.iter.util.rararchiveloader import RarArchiveLoaderIterDataPipe as RarArchiveLoader from torchdata.datapipes.iter.util.rows2columnar import Rows2ColumnarIterDataPipe as Rows2Columnar from torchdata.datapipes.iter.util.samplemultiplexer import SampleMultiplexerDataPipe as SampleMultiplexer from torchdata.datapipes.iter.util.saver import SaverIterDataPipe as Saver from torchdata.datapipes.iter.util.shardexpander import ShardExpanderIterDataPipe as ShardExpander from torchdata.datapipes.iter.util.sharding import ( ShardingRoundRobinDispatcherIterDataPipe as ShardingRoundRobinDispatcher, ) from torchdata.datapipes.iter.util.tararchiveloader import TarArchiveLoaderIterDataPipe as TarArchiveLoader from torchdata.datapipes.iter.util.tfrecordloader import ( TFRecordExample, TFRecordExampleSpec, TFRecordLoaderIterDataPipe as TFRecordLoader, ) from torchdata.datapipes.iter.util.webdataset import WebDatasetIterDataPipe as WebDataset from torchdata.datapipes.iter.util.xzfileloader import XzFileLoaderIterDataPipe as XzFileLoader from torchdata.datapipes.iter.util.zip_longest import ZipperLongestIterDataPipe as ZipperLongest from torchdata.datapipes.iter.util.ziparchiveloader import ZipArchiveLoaderIterDataPipe as ZipArchiveLoader from torchdata.datapipes.map.util.converter import MapToIterConverterIterDataPipe as MapToIterConverter __all__ = [ "AISFileLister", "AISFileLoader", "BatchAsyncMapper", "BatchMapper", "Batcher", "BucketBatcher", "Bz2FileLoader", "CSVDictParser", "CSVParser", "Collator", "Concater", "Cycler", "DataFrameMaker", "Decompressor", "Demultiplexer", "Dropper", "EndOnDiskCacheHolder", "Enumerator", "Extractor", "FSSpecFileLister", "FSSpecFileOpener", "FSSpecSaver", "FileLister", "FileOpener", "Filter", "FlatMapper", "Flattener", "Forker", "FullSync", "GDriveReader", "Grouper", "HashChecker", "Header", "HttpReader", "HuggingFaceHubReader", "InBatchShuffler", "InMemoryCacheHolder", "IndexAdder", "IoPathFileLister", "IoPathFileOpener", "IoPathSaver", "IterDataPipe", "IterKeyZipper", "IterableWrapper", "JsonParser", "LengthSetter", "LineReader", "MapKeyZipper", "MapToIterConverter", "Mapper", "MaxTokenBucketizer", "Multiplexer", "MultiplexerLongest", "OnDiskCacheHolder", "OnlineReader", "ParagraphAggregator", "ParquetDataFrameLoader", "PinMemory", "Prefetcher", "RandomSplitter", "RarArchiveLoader", "Repeater", "RoundRobinDemultiplexer", "RoutedDecoder", "Rows2Columnar", "S3FileLister", "S3FileLoader", "SampleMultiplexer", "Sampler", "Saver", "ShardExpander", "ShardingFilter", "ShardingRoundRobinDispatcher", "ShuffledFlatMapper", "Shuffler", "Slicer", "StreamReader", "TFRecordLoader", "TarArchiveLoader", "ThreadPoolMapper", "UnBatcher", "UnZipper", "WebDataset", "XzFileLoader", "ZipArchiveLoader", "Zipper", "ZipperLongest", ] # Please keep this list sorted assert __all__ == sorted(__all__)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from typing import Iterator, Optional, TypeVar from torch.utils.data.datapipes.iter.sharding import SHARDING_PRIORITIES from torchdata.datapipes import functional_datapipe from torchdata.datapipes.iter import IterDataPipe T_co = TypeVar("T_co", covariant=True) @functional_datapipe("sharding_round_robin_dispatch") class ShardingRoundRobinDispatcherIterDataPipe(IterDataPipe): r""" Wrapper that indicates the prior section of ``DataPipe`` graph is non-replicable and will be iterated in a separate, single dispatching process to distribute data to worker processes in a round-robin manner when multiprocessing is being used. (functional name: ``sharding_round_robin_dispatch``). Args: source_datapipe: Iterable DataPipe that will be sharded sharding_group_filter: Optional ``SHARDING_PRIORITIES`` value Note: - ``sharding_group_filter`` only accepts ``SHARDING_PRIORITIES.MULTIPROCESSING`` for now - When using distributed training, you can add a ``sharding_filter()`` prior to this DataPipe to distribute samples among worker nodes. Examples: >>> # xdoctest: +SKIP >>> from torchdata.datapipes.iter import IterableWrapper >>> from torch.utils.data.datapipes.iter.sharding import SHARDING_PRIORITIES >>> dp = IterableWrapper(range(10)) >>> # `.shuffle()` will be executed in a single dispatching processing, then the samples are distributed >>> # to worker processes >>> dp = dp.shuffle().sharding_round_robin_dispatch(SHARDING_PRIORITIES.MULTIPROCESSING) >>> # `.map()` will be executed within each worker process >>> dp = dp.map(lambda x: x + 1) >>> # Distributed case: the 10 samples will be distributed among the nodes >>> dp = IterableWrapper(range(10)).sharding_filter() >>> # `.map()` will be executed in a single dispatching processing in each node >>> # You may apply further transformation after within each worker process >>> dp = dp.map(lambda x: x + 1).sharding_round_robin_dispatch(SHARDING_PRIORITIES.MULTIPROCESSING) """ def __init__(self, source_datapipe: IterDataPipe, sharding_group_filter: Optional[SHARDING_PRIORITIES] = None): self.source_datapipe = source_datapipe if sharding_group_filter != SHARDING_PRIORITIES.MULTIPROCESSING: raise NotImplementedError( "`sharding_round_robin_dispatch` currently only supports `SHARDING_PRIORITIES.MULTIPROCESSING`." "Please open issue on github for your feature request." ) self.sharding_group_filter = sharding_group_filter def __iter__(self) -> Iterator[T_co]: yield from self.source_datapipe def __len__(self) -> int: return len(self.source_datapipe)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import re from typing import Any, Dict, Iterator, List, Union from torchdata.datapipes import functional_datapipe from torchdata.datapipes.iter import IterDataPipe def pathsplit(p): """Split a path into a WebDataset prefix and suffix. The prefix is used for grouping files into samples, the suffix is used as key in the output dictionary. The suffix consists of all components after the first "." in the filename. In torchdata, the prefix consists of the .tar file path followed by the file name inside the archive. Any backslash in the prefix is replaced by a forward slash to make Windows prefixes consistent with POSIX paths. """ # convert Windows pathnames to UNIX pathnames, otherwise # we get an inconsistent mix of the Windows path to the tar # file followed by the POSIX path inside that tar file p = p.replace("\\", "/") if "." not in p: return p, "" # we need to use a regular expression because os.path is # platform specific, but tar files always contain POSIX paths match = re.search(r"^(.*?)(\.[^/]*)$", p) if not match: return p, "" prefix, suffix = match.groups() return prefix, suffix @functional_datapipe("webdataset") class WebDatasetIterDataPipe(IterDataPipe[Dict]): r""" Iterable DataPipe that accepts stream of (path, data) tuples, usually, representing the pathnames and files of a tar archive (functional name: ``webdataset``). This aggregates consecutive items with the same basename into a single dictionary, using the extensions as keys (WebDataset file convention). Any text after the first "." in the filename is used as a key/extension. File names that do not have an extension are ignored. Args: source_datapipe: a DataPipe yielding a stream of (path, data) pairs Returns: a DataPipe yielding a stream of dictionaries Examples: >>> from torchdata.datapipes.iter import FileLister, FileOpener >>> >>> def decode(item): >>> key, value = item >>> if key.endswith(".txt"): >>> return key, value.read().decode("utf-8") >>> if key.endswith(".bin"): >>> return key, value.read().decode("utf-8") >>> >>> datapipe1 = FileLister("test/_fakedata", "wds*.tar") >>> datapipe2 = FileOpener(datapipe1, mode="b") >>> dataset = datapipe2.load_from_tar().map(decode).webdataset() >>> for obj in dataset: >>> print(obj) """ def __init__(self, source_datapipe: IterDataPipe[List[Union[Dict, List]]]) -> None: self.source_datapipe: IterDataPipe[List[Union[Dict, List]]] = source_datapipe def __iter__(self) -> Iterator[Dict]: sample: Dict[str, Any] = {} current = "" for path, data in self.source_datapipe: assert isinstance(path, str), path prefix, suffix = pathsplit(path) if suffix == "": # files with empty suffixes can be used for metadata # they cannot be used for data since they wouldn't have a key continue if prefix != current: if current != "": yield sample sample = {} current = prefix sample["__key__"] = current sample[suffix] = data if sample != {}: yield sample
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import lzma import warnings from io import BufferedIOBase from typing import Iterable, Iterator, Tuple from torchdata.datapipes import functional_datapipe from torchdata.datapipes.iter import IterDataPipe from torchdata.datapipes.utils import StreamWrapper from torchdata.datapipes.utils.common import validate_pathname_binary_tuple @functional_datapipe("load_from_xz") class XzFileLoaderIterDataPipe(IterDataPipe[Tuple[str, BufferedIOBase]]): r""" Decompresses xz (lzma) binary streams from an Iterable DataPipe which contains tuples of path name and xy binary streams, and yields a tuple of path name and extracted binary stream (functional name: ``load_from_xz``). Args: datapipe: Iterable DataPipe that provides tuples of path name and xy binary stream length: Nominal length of the DataPipe Note: The opened file handles will be closed automatically if the default ``DecoderDataPipe`` is attached. Otherwise, user should be responsible to close file handles explicitly or let Python's GC close them periodically. Example: >>> from torchdata.datapipes.iter import FileLister, FileOpener >>> datapipe1 = FileLister(".", "*.xz") >>> datapipe2 = FileOpener(datapipe1, mode="b") >>> xz_loader_dp = datapipe2.load_from_xz() >>> for _, stream in xz_loader_dp: >>> print(stream.read()) b'0123456789abcdef' """ def __init__(self, datapipe: Iterable[Tuple[str, BufferedIOBase]], length: int = -1) -> None: super().__init__() self.datapipe: Iterable[Tuple[str, BufferedIOBase]] = datapipe self.length: int = length def __iter__(self) -> Iterator[Tuple[str, BufferedIOBase]]: for data in self.datapipe: validate_pathname_binary_tuple(data) pathname, data_stream = data try: extracted_fobj = lzma.open(data_stream, mode="rb") # type: ignore[call-overload] new_pathname = pathname.rstrip(".xz") yield new_pathname, StreamWrapper(extracted_fobj, data_stream, name=pathname) # type: ignore[misc] except Exception as e: warnings.warn(f"Unable to extract files from corrupted xz/lzma stream {pathname} due to: {e}, abort!") raise e finally: if isinstance(data_stream, StreamWrapper): data_stream.autoclose() def __len__(self) -> int: if self.length == -1: raise TypeError(f"{type(self).__name__} instance doesn't have valid length") return self.length
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import contextlib import csv from typing import IO, Iterator, Tuple, TypeVar, Union from torchdata.datapipes import functional_datapipe from torchdata.datapipes.iter import IterDataPipe D = TypeVar("D") Str_Or_Bytes = Union[str, bytes] class PlainTextReaderHelper: def __init__( self, *, skip_lines: int = 0, strip_newline: bool = True, decode: bool = True, encoding="utf-8", errors: str = "ignore", return_path: bool = False, as_tuple: bool = False, ) -> None: if skip_lines < 0: raise ValueError("'skip_lines' is required to be a positive integer.") self._skip_lines = skip_lines self._strip_newline = strip_newline self._decode = decode self._encoding = encoding self._errors = errors self._return_path = return_path self._as_tuple = as_tuple def skip_lines(self, file: IO) -> Union[Iterator[bytes], Iterator[str]]: with contextlib.suppress(StopIteration): for _ in range(self._skip_lines): next(file) try: yield from file finally: file.close() def strip_newline(self, stream: Union[Iterator[bytes], Iterator[str]]) -> Union[Iterator[bytes], Iterator[str]]: if not self._strip_newline: yield from stream return for line in stream: if isinstance(line, str): yield line.strip("\r\n") else: yield line.strip(b"\r\n") def decode(self, stream: Union[Iterator[bytes], Iterator[str]]) -> Union[Iterator[bytes], Iterator[str]]: if not self._decode: yield from stream else: for line in stream: yield line.decode(self._encoding, self._errors) if isinstance(line, bytes) else line def return_path(self, stream: Iterator[D], *, path: str) -> Iterator[Union[D, Tuple[str, D]]]: if not self._return_path: yield from stream return for data in stream: yield path, data def as_tuple(self, stream: Iterator[D]) -> Iterator[Union[D, Tuple]]: if not self._as_tuple: yield from stream return for data in stream: if isinstance(data, list): yield tuple(data) else: yield data @functional_datapipe("readlines") class LineReaderIterDataPipe(IterDataPipe[Union[Str_Or_Bytes, Tuple[str, Str_Or_Bytes]]]): r""" Accepts a DataPipe consisting of tuples of file name and string data stream, and for each line in the stream, yields a tuple of file name and the line (functional name: ``readlines``). Args: source_datapipe: a DataPipe with tuples of file name and string data stream skip_lines: number of lines to skip at the beginning of each file strip_newline: if ``True``, the new line character will be stripped decode: if ``True``, this will decode the contents of the file based on the specified ``encoding`` encoding: the character encoding of the files (`default='utf-8'`) errors: the error handling scheme used while decoding return_path: if ``True``, each line will return a tuple of path and contents, rather than just the contents Example: >>> from torchdata.datapipes.iter import IterableWrapper >>> import io >>> text1 = "Line1\nLine2" >>> text2 = "Line2,1\r\nLine2,2\r\nLine2,3" >>> source_dp = IterableWrapper([("file1", io.StringIO(text1)), ("file2", io.StringIO(text2))]) >>> line_reader_dp = source_dp.readlines() >>> list(line_reader_dp) [('file1', 'Line1'), ('file1', 'Line2'), ('file2', 'Line2,1'), ('file2', 'Line2,2'), ('file2', 'Line2,3')] """ def __init__( self, source_datapipe: IterDataPipe[Tuple[str, IO]], *, skip_lines: int = 0, strip_newline: bool = True, decode: bool = False, encoding="utf-8", errors: str = "ignore", return_path: bool = True, ) -> None: self.source_datapipe = source_datapipe self._helper = PlainTextReaderHelper( skip_lines=skip_lines, strip_newline=strip_newline, decode=decode, encoding=encoding, errors=errors, return_path=return_path, ) def __iter__(self) -> Iterator[Union[Str_Or_Bytes, Tuple[str, Str_Or_Bytes]]]: for path, file in self.source_datapipe: stream = self._helper.skip_lines(file) stream = self._helper.strip_newline(stream) stream = self._helper.decode(stream) yield from self._helper.return_path(stream, path=path) # type: ignore[misc] class _CSVBaseParserIterDataPipe(IterDataPipe): def __init__( self, source_datapipe, csv_reader, *, skip_lines: int = 0, decode: bool = False, encoding="utf-8", errors: str = "ignore", return_path: bool = True, as_tuple: bool = False, **fmtparams, ) -> None: self.source_datapipe = source_datapipe self._csv_reader = csv_reader self._helper = PlainTextReaderHelper( skip_lines=skip_lines, decode=decode, encoding=encoding, errors=errors, return_path=return_path, as_tuple=as_tuple, ) self.fmtparams = fmtparams def __iter__(self) -> Iterator[Union[D, Tuple[str, D]]]: for path, file in self.source_datapipe: stream = self._helper.skip_lines(file) stream = self._helper.decode(stream) stream = self._csv_reader(stream, **self.fmtparams) stream = self._helper.as_tuple(stream) # type: ignore[assignment] yield from self._helper.return_path(stream, path=path) # type: ignore[misc] @functional_datapipe("parse_csv") class CSVParserIterDataPipe(_CSVBaseParserIterDataPipe): r""" Accepts a DataPipe consists of tuples of file name and CSV data stream, reads and returns the contents within the CSV files one row at a time (functional name: ``parse_csv``). Each output is a `List` by default, but it depends on ``fmtparams``. Args: source_datapipe: source DataPipe with tuples of file name and CSV data stream skip_lines: number of lines to skip at the beginning of each file strip_newline: if ``True``, the new line character will be stripped decode: if ``True``, this will decode the contents of the file based on the specified ``encoding`` encoding: the character encoding of the files (`default='utf-8'`) errors: the error handling scheme used while decoding return_path: if ``True``, each line will return a tuple of path and contents, rather than just the contents as_tuple: if ``True``, each line will return a tuple instead of a list Example: >>> from torchdata.datapipes.iter import IterableWrapper, FileOpener >>> import os >>> def get_name(path_and_stream): >>> return os.path.basename(path_and_stream[0]), path_and_stream[1] >>> datapipe1 = IterableWrapper(["1.csv", "empty.csv", "empty2.csv"]) >>> datapipe2 = FileOpener(datapipe1, mode="b") >>> datapipe3 = datapipe2.map(get_name) >>> csv_parser_dp = datapipe3.parse_csv() >>> list(csv_parser_dp) [['key', 'item'], ['a', '1'], ['b', '2'], []] """ def __init__( self, source_datapipe: IterDataPipe[Tuple[str, IO]], *, skip_lines: int = 0, decode: bool = True, encoding: str = "utf-8", errors: str = "ignore", return_path: bool = False, as_tuple: bool = False, **fmtparams, ) -> None: super().__init__( source_datapipe, csv.reader, skip_lines=skip_lines, decode=decode, encoding=encoding, errors=errors, return_path=return_path, as_tuple=as_tuple, **fmtparams, ) @functional_datapipe("parse_csv_as_dict") class CSVDictParserIterDataPipe(_CSVBaseParserIterDataPipe): r""" Accepts a DataPipe consists of tuples of file name and CSV data stream, reads and returns the contents within the CSV files one row at a time (functional name: ``parse_csv_as_dict``). Each output is a `Dict` by default, but it depends on ``fmtparams``. The first row of each file, unless skipped, will be used as the header; the contents of the header row will be used as keys for the `Dict`\s generated from the remaining rows. Args: source_datapipe: source DataPipe with tuples of file name and CSV data stream skip_lines: number of lines to skip at the beginning of each file strip_newline: if ``True``, the new line character will be stripped decode: if ``True``, this will decode the contents of the file based on the specified ``encoding`` encoding: the character encoding of the files (`default='utf-8'`) errors: the error handling scheme used while decoding return_path: if ``True``, each line will return a tuple of path and contents, rather than just the contents Example: >>> from torchdata.datapipes.iter import FileLister, FileOpener >>> import os >>> def get_name(path_and_stream): >>> return os.path.basename(path_and_stream[0]), path_and_stream[1] >>> datapipe1 = FileLister(".", "*.csv") >>> datapipe2 = FileOpener(datapipe1, mode="b") >>> datapipe3 = datapipe2.map(get_name) >>> csv_dict_parser_dp = datapipe3.parse_csv_as_dict() >>> list(csv_dict_parser_dp) [{'key': 'a', 'item': '1'}, {'key': 'b', 'item': '2'}] """ def __init__( self, source_datapipe: IterDataPipe[Tuple[str, IO]], *, skip_lines: int = 0, decode: bool = True, encoding: str = "utf-8", errors: str = "ignore", return_path: bool = False, **fmtparams, ) -> None: super().__init__( source_datapipe, csv.DictReader, skip_lines=skip_lines, decode=decode, encoding=encoding, errors=errors, return_path=return_path, **fmtparams, )
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from typing import Iterator, Optional, TypeVar from warnings import warn from torchdata.datapipes import functional_datapipe from torchdata.datapipes.iter import IterDataPipe T_co = TypeVar("T_co", covariant=True) @functional_datapipe("header") class HeaderIterDataPipe(IterDataPipe[T_co]): r""" Yields elements from the source DataPipe from the start, up to the specfied limit (functional name: ``header``). If you would like to manually set the length of a DataPipe to a certain value; we recommend you to use :class:`.LengthSetter`. Args: source_datapipe: the DataPipe from which elements will be yielded limit: the number of elements to yield before stopping Example: >>> from torchdata.datapipes.iter import IterableWrapper >>> dp = IterableWrapper(range(10)) >>> header_dp = dp.header(3) >>> list(header_dp) [0, 1, 2] """ def __init__(self, source_datapipe: IterDataPipe[T_co], limit: Optional[int] = 10) -> None: self.source_datapipe: IterDataPipe[T_co] = source_datapipe self.limit: Optional[int] = limit def __iter__(self) -> Iterator[T_co]: i: int = 0 for value in self.source_datapipe: i += 1 if self.limit is None or i <= self.limit: yield value else: break def __len__(self) -> int: try: source_len = len(self.source_datapipe) return source_len if self.limit is None else min(source_len, self.limit) except TypeError as error: if self.limit is None: raise TypeError("The length of this HeaderIterDataPipe cannot be determined.") from error warn( "The length of this HeaderIterDataPipe is inferred to be equal to its limit." "The actual value may be smaller if the actual length of source_datapipe is smaller than the limit." ) return self.limit @functional_datapipe("set_length") class LengthSetterIterDataPipe(IterDataPipe[T_co]): r""" Set the length attribute of the DataPipe, which is returned by ``__len__`` (functional name: ``set_length``). This can be used after DataPipes whose final length cannot be known in advance (e.g. ``filter``). If you know the final length with certainty, you can manually set it, which can then be used by DataLoader or other DataPipes. Note: This DataPipe differs from :class:`.Header` in that this doesn't restrict the number of elements that can be yielded from the DataPipe; this is strictly used for setting an attribute so that it can be used later. Args: source_datapipe: a DataPipe length: the integer value that will be set as the length Example: >>> from torchdata.datapipes.iter import IterableWrapper >>> dp = IterableWrapper(range(10)).filter(lambda x: x < 5).set_length(3) >>> list(dp) # Notice that the number of elements yielded is unchanged [0, 1, 2, 3, 4] >>> len(dp) 3 >>> header_dp = IterableWrapper(range(10)).filter(lambda x: x < 5).header(3) >>> list(header_dp) # Use `.header()` if you want to limit the number of elements yielded [0, 1, 2] >>> len(header_dp) 3 """ def __init__(self, source_datapipe: IterDataPipe[T_co], length: int) -> None: self.source_datapipe: IterDataPipe[T_co] = source_datapipe assert length >= 0 self.length: int = length def __iter__(self) -> Iterator[T_co]: yield from self.source_datapipe def __len__(self) -> int: return self.length
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from collections import defaultdict from typing import Dict, Iterator, List, Union from torchdata.datapipes import functional_datapipe from torchdata.datapipes.iter import IterDataPipe @functional_datapipe("rows2columnar") class Rows2ColumnarIterDataPipe(IterDataPipe[Dict]): r""" Accepts an input DataPipe with batches of data, and processes one batch at a time and yields a Dict for each batch, with ``column_names`` as keys and lists of corresponding values from each row as values (functional name: ``rows2columnar``). Within the input DataPipe, each row within a batch must either be a `Dict` or a `List` Note: If ``column_names`` are not given and each row is a `Dict`, the keys of that Dict will be used as column names. Args: source_datapipe: a DataPipe where each item is a batch. Within each batch, there are rows and each row is a `List` or `Dict` column_names: if each element in a batch contains `Dict`, ``column_names`` act as a filter for matching keys; otherwise, these are used as keys to for the generated `Dict` of each batch Example: >>> # Each element in a batch is a `Dict` >>> from torchdata.datapipes.iter import IterableWrapper >>> dp = IterableWrapper([[{'a': 1}, {'b': 2, 'a': 1}], [{'a': 1, 'b': 200}, {'b': 2, 'c': 3, 'a': 100}]]) >>> row2col_dp = dp.rows2columnar() >>> list(row2col_dp) [defaultdict(<class 'list'>, {'a': [1, 1], 'b': [2]}), defaultdict(<class 'list'>, {'a': [1, 100], 'b': [200, 2], 'c': [3]})] >>> row2col_dp = dp.rows2columnar(column_names=['a']) >>> list(row2col_dp) [defaultdict(<class 'list'>, {'a': [1, 1]}), defaultdict(<class 'list'>, {'a': [1, 100]})] >>> # Each element in a batch is a `List` >>> dp = IterableWrapper([[[0, 1, 2, 3], [4, 5, 6, 7]]]) >>> row2col_dp = dp.rows2columnar(column_names=["1st_in_batch", "2nd_in_batch", "3rd_in_batch", "4th_in_batch"]) >>> list(row2col_dp) [defaultdict(<class 'list'>, {'1st_in_batch': [0, 4], '2nd_in_batch': [1, 5], '3rd_in_batch': [2, 6], '4th_in_batch': [3, 7]})] """ column_names: List[str] def __init__(self, source_datapipe: IterDataPipe[List[Union[Dict, List]]], column_names: List[str] = None) -> None: self.source_datapipe: IterDataPipe[List[Union[Dict, List]]] = source_datapipe self.column_names: List[str] = [] if column_names is None else column_names def __iter__(self) -> Iterator[Dict]: for batch in self.source_datapipe: columnar = defaultdict(list) for list_or_dict_row in batch: if isinstance(list_or_dict_row, dict): # if column_names provided, we use it as a filter if len(self.column_names) > 0: for column_name in self.column_names: # this line will raise a KeyError if column_name # is not within list_or_dict_row which is the # expected behavior columnar[column_name].append(list_or_dict_row[column_name]) else: for k, v in list_or_dict_row.items(): columnar[k].append(v) else: for i, v in enumerate(list_or_dict_row): columnar[self.column_names[i]].append(v) yield columnar def __len__(self) -> int: return len(self.source_datapipe)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import os import sys import warnings import zipfile from io import BufferedIOBase from typing import cast, IO, Iterable, Iterator, Tuple from torchdata.datapipes import functional_datapipe from torchdata.datapipes.iter import IterDataPipe from torchdata.datapipes.utils import StreamWrapper from torchdata.datapipes.utils.common import validate_pathname_binary_tuple @functional_datapipe("load_from_zip") class ZipArchiveLoaderIterDataPipe(IterDataPipe[Tuple[str, BufferedIOBase]]): r""" Opens/decompresses zip binary streams from an Iterable DataPipe which contains a tuple of path name and zip binary stream, and yields a tuple of path name and extracted binary stream (functional name: ``load_from_zip``). Args: datapipe: Iterable DataPipe that provides tuples of path name and zip binary stream length: Nominal length of the DataPipe Note: The opened file handles will be closed automatically if the default ``DecoderDataPipe`` is attached. Otherwise, user should be responsible to close file handles explicitly or let Python's GC close them periodically. Due to how `zipfiles` implements its ``open()`` method, the data_stream variable below cannot be closed within the scope of this function. Example: >>> from torchdata.datapipes.iter import FileLister, FileOpener >>> datapipe1 = FileLister(".", "*.zip") >>> datapipe2 = FileOpener(datapipe1, mode="b") >>> zip_loader_dp = datapipe2.load_from_zip() >>> for _, stream in zip_loader_dp: >>> print(stream.read()) b'0123456789abcdef' """ def __init__(self, datapipe: Iterable[Tuple[str, BufferedIOBase]], length: int = -1) -> None: super().__init__() self.datapipe: Iterable[Tuple[str, BufferedIOBase]] = datapipe self.length: int = length def __iter__(self) -> Iterator[Tuple[str, BufferedIOBase]]: for data in self.datapipe: validate_pathname_binary_tuple(data) pathname, data_stream = data try: # typing.cast is used here to silence mypy's type checker zips = zipfile.ZipFile(cast(IO[bytes], data_stream)) for zipinfo in zips.infolist(): # major version should always be 3 here. if sys.version_info[1] >= 6: if zipinfo.is_dir(): continue elif zipinfo.filename.endswith("/"): continue extracted_fobj = zips.open(zipinfo) inner_pathname = os.path.normpath(os.path.join(pathname, zipinfo.filename)) yield inner_pathname, StreamWrapper(extracted_fobj, data_stream, name=inner_pathname) # type: ignore[misc] except Exception as e: warnings.warn(f"Unable to extract files from corrupted zipfile stream {pathname} due to: {e}, abort!") raise e finally: if isinstance(data_stream, StreamWrapper): data_stream.autoclose() # We are unable to close 'data_stream' here, because it needs to be available to use later def __len__(self) -> int: if self.length == -1: raise TypeError(f"{type(self).__name__} instance doesn't have valid length") return self.length
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from typing import Dict, Iterator, Tuple, TypeVar from torchdata.datapipes import functional_datapipe from torchdata.datapipes.iter import IterDataPipe K = TypeVar("K") @functional_datapipe("enumerate") class EnumeratorIterDataPipe(IterDataPipe[Tuple[int, K]]): r""" Adds an index to an existing DataPipe through enumeration, with the index starting from 0 by default (functional name: ``enumerate``). Args: source_datapipe: Iterable DataPipe being indexed starting_index: Index from which enumeration will start Example: >>> from torchdata.datapipes.iter import IterableWrapper >>> dp = IterableWrapper(['a', 'b', 'c']) >>> enum_dp = dp.enumerate() >>> list(enum_dp) [(0, 'a'), (1, 'b'), (2, 'c')] """ def __init__(self, source_datapipe: IterDataPipe[K], starting_index: int = 0) -> None: self.source_datapipe: IterDataPipe[K] = source_datapipe self.starting_index = starting_index def __iter__(self): yield from enumerate(self.source_datapipe, self.starting_index) def __len__(self): return len(self.source_datapipe) @functional_datapipe("add_index") class IndexAdderIterDataPipe(IterDataPipe[Dict]): r""" Adds an index to an existing Iterable DataPipe with (functional name: ``add_index``). The row or batch within the DataPipe must have the type `Dict`; otherwise, a `NotImplementedError` will be thrown. The index of the data is set to the provided ``index_name``. Args: source_datapipe: Iterable DataPipe being indexed, its row/batch must be of type `Dict` index_name: Name of the key to store data index Example: >>> from torchdata.datapipes.iter import IterableWrapper >>> dp = IterableWrapper([{'a': 1, 'b': 2}, {'c': 3, 'a': 1}]) >>> index_dp = dp.add_index("order") >>> list(index_dp) [{'a': 1, 'b': 2, 'order': 0}, {'c': 3, 'a': 1, 'order': 1}] """ def __init__(self, source_datapipe: IterDataPipe[Dict], index_name: str = "index") -> None: self.source_datapipe = source_datapipe self.index_name = index_name def __iter__(self) -> Iterator[Dict]: for i, row_or_batch in enumerate(self.source_datapipe): if isinstance(row_or_batch, dict): row_or_batch[self.index_name] = i yield row_or_batch else: raise NotImplementedError("We only support adding index to row or batch in dict type") def __len__(self) -> int: return len(self.source_datapipe)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import random from typing import Dict, Iterator, Optional, Sized, TypeVar from torchdata.datapipes.iter import IterDataPipe T_co = TypeVar("T_co", covariant=True) class SampleMultiplexerDataPipe(IterDataPipe[T_co]): """ Takes a `Dict` of (IterDataPipe, Weight), and yields items by sampling from these DataPipes with respect to their weights. When individual DataPipes are exhausted, continues to sample from the remaining DataPipes according to their relative weights. If you wish to maintain the same ratio of weights indefinitely, you need to ensure that the inputs are never exhausted, by, for instance, applying ``cycle`` to them. Sampling is controlled by the provided random ``seed``. If you don't provide it, the sampling will not be deterministic. Args: pipes_to_weights_dict: a `Dict` of IterDataPipes and Weights. The total weight of unexhausted DataPipes will be normalized to 1 for the purpose of sampling. seed: random seed to initialize the random number generator Example: >>> from torchdata.datapipes.iter import IterableWrapper, SampleMultiplexer >>> source_dp1 = IterableWrapper([0] * 10) >>> source_dp2 = IterableWrapper([1] * 10) >>> d = {source_dp1: 99999999, source_dp2: 0.0000001} >>> sample_mul_dp = SampleMultiplexer(pipes_to_weights_dict=d, seed=0) >>> list(sample_mul_dp) [0, 0, 0, 0, 0, 1, 1, 1, 1, 1] """ def __init__( self, pipes_to_weights_dict: Dict[IterDataPipe[T_co], float], seed: Optional[int] = None, ): if not pipes_to_weights_dict: raise ValueError("Empty dictionary passed to SampleMultiplexerDataPipe") total_weight: float = 0 for v in pipes_to_weights_dict.values(): if v <= 0: raise ValueError(f"Expecting a positive and non-zero weight, got {v}") total_weight += v self.pipes_and_weights = [(k, v / total_weight) for k, v in pipes_to_weights_dict.items()] if seed is None: self.random = random.Random() else: self.random = random.Random(seed) def __iter__(self) -> Iterator[T_co]: pipes_and_weights = [(iter(k), v) for k, v in self.pipes_and_weights] while len(pipes_and_weights) > 1: r = self.random.random() s: float = 0 for it, weight in pipes_and_weights: s += weight if r < s: try: item = next(it) yield item except StopIteration: # remove the current stream new_total = 1 - weight assert new_total > 0 pipes_and_weights = [(k, v / new_total) for k, v in pipes_and_weights if k != it] break # only one stream left for item in pipes_and_weights[0][0]: yield item def __len__(self) -> int: if all(isinstance(dp, Sized) for dp, _ in self.pipes_and_weights): return sum(len(dp) for dp, _ in self.pipes_and_weights) else: raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import hashlib from io import IOBase from typing import Dict, Iterator, Tuple, Union from torchdata.datapipes import functional_datapipe from torchdata.datapipes.iter import IterDataPipe from torchdata.datapipes.utils import StreamWrapper D_type = Union[str, bytes, bytearray] U = Union[D_type, StreamWrapper] @functional_datapipe("check_hash") class HashCheckerIterDataPipe(IterDataPipe[Tuple[str, U]]): r""" Computes and checks the hash of each file, from an input DataPipe of tuples of file name and data/stream (functional name: ``check_hash``). If the hashes match the given hash in the dictionary, it yields a tuple of file name and data/stream. Otherwise, it will raise an error. Args: source_datapipe: IterDataPipe with tuples of file name and data/stream hash_dict: Dictionary that maps file names to their corresponding hashes hash_type: The type of hash function to apply rewind: Rewind the stream after using the stream to compute the hash (this does not work with non-seekable stream, e.g. HTTP) Example: >>> from torchdata.datapipes.iter import IterableWrapper, FileOpener >>> expected_MD5_hash = "bb9675028dd39d2dd2bf71002b93e66c" File is from "https://raw.githubusercontent.com/pytorch/data/main/LICENSE" >>> file_dp = FileOpener(IterableWrapper(["LICENSE.txt"]), mode='rb') >>> # An exception is only raised when the hash doesn't match, otherwise (path, stream) is returned >>> check_hash_dp = file_dp.check_hash({"LICENSE.txt": expected_MD5_hash}, "md5", rewind=True) >>> reader_dp = check_hash_dp.readlines() >>> it = iter(reader_dp) >>> path, line = next(it) >>> path LICENSE.txt >>> line b'BSD 3-Clause License' """ def __init__( self, source_datapipe: IterDataPipe[Tuple[str, IOBase]], hash_dict: Dict[str, str], hash_type: str = "sha256", rewind: bool = True, ) -> None: self.source_datapipe: IterDataPipe[Tuple[str, IOBase]] = source_datapipe self.hash_dict: Dict[str, str] = hash_dict self.hash_type: str = hash_type self.rewind: bool = rewind if self.hash_type not in ["sha256", "md5"]: raise ValueError("Invalid hash_type requested, should be one of {}".format(["sha256", "md5"])) def __iter__(self) -> Iterator[Tuple[str, StreamWrapper]]: for file_name, data in self.source_datapipe: if self.hash_type == "sha256": hash_func = hashlib.sha256() else: hash_func = hashlib.md5() if isinstance(data, (str, bytes, bytearray)): if isinstance(data, str): data = data.decode() hash_func.update(data) # File Stream else: # Not all streams have `read(bytes)` method. # `__iter__` method is chosen because it is a common interface for IOBase. for d in data: hash_func.update(d) # TODO(133): this will not work (or work crappy for non-seekable steams like http) if self.rewind: data.seek(0) if file_name not in self.hash_dict: raise RuntimeError(f"Unspecified hash for file {file_name}") if hash_func.hexdigest() != self.hash_dict[file_name]: raise RuntimeError( f"The computed hash {hash_func.hexdigest()} of {file_name} does not match the expected" f"hash {self.hash_dict[file_name]}. Delete the file manually and retry." ) if isinstance(data, (str, bytes, bytearray)): yield file_name, data else: yield file_name, StreamWrapper(data) def __len__(self) -> int: return len(self.source_datapipe)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import warnings from typing import Callable, Dict, Optional from torch.utils.data import IterDataPipe, MapDataPipe from torch.utils.data.datapipes.utils.common import _check_unpickable_fn, DILL_AVAILABLE if DILL_AVAILABLE: import dill dill.extend(use_dill=False) # @functional_datapipe("to_map_datapipe") # This line must be kept for .pyi signature parser class IterToMapConverterMapDataPipe(MapDataPipe): r""" Lazily load data from ``IterDataPipe`` to construct a ``MapDataPipe`` with the key-value pair generated by ``key_value_fn`` (functional name: ``to_map_datapipe``). If ``key_value_fn`` is not given, each data from the source IterDataPipe must itself be an iterable with exactly two objects. The first object of each item becomes a key in the new dictionary, and the second object the corresponding value. For the opposite converter, use :class:`.MapToIterConverter`. Args: datapipe: Source IterDataPipe key_value_fn: Function being applied over each data to generate key-value pair Note: If a key being added is already present, the corresponding value will be replaced by the new value. Example: >>> from torchdata.datapipes.iter import IterableWrapper >>> source_dp = IterableWrapper([(i, i) for i in range(10)]) >>> map_dp = source_dp.to_map_datapipe() >>> list(map_dp) [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] >>> source_dp2 = IterableWrapper([('a', 1), ('b', 2), ('c', 1)]) >>> map_dp2 = source_dp2.to_map_datapipe() >>> map_dp2['a'] 1 >>> def row_to_tuple(row): >>> label = row[0] >>> data = row[1:] >>> return label, data >>> source_dp3 = IterableWrapper([('a', 1, 1, 1, 1, 1, 1), ('b', 2, 2, 2, 2, 2, 2), ('c', 3, 3, 3, 3, 3, 3)]) >>> map_dp3 = source_dp3.to_map_datapipe(key_value_fn=row_to_tuple) >>> map_dp3['a'] (1, 1, 1, 1, 1, 1) """ datapipe: IterDataPipe key_value_fn: Optional[Callable] _map: Optional[Dict] _length: int def __init__(self, datapipe: IterDataPipe, key_value_fn: Optional[Callable] = None): if not isinstance(datapipe, IterDataPipe): raise TypeError(f"IterToMapConverter can only apply on IterDataPipe, but found {type(datapipe)}") self.datapipe = datapipe if key_value_fn is not None: _check_unpickable_fn(key_value_fn) self.key_value_fn = key_value_fn # type: ignore[assignment] self._map = None def _load_map(self): self._map = {} for d in self.datapipe: inp = d if self.key_value_fn is None else self.key_value_fn(d) try: length = len(inp) except TypeError: raise TypeError(f"Cannot convert dictionary update element {type(inp)} ({inp}) to a sequence") if length != 2: raise ValueError(f"dictionary update sequence element has length {length}, 2 is required") key, value = inp if key in self._map: warnings.warn(f"Found duplicate key {key}. Please check your `key_value_fn`") self._map[key] = value def __getitem__(self, index): try: if self._map is None: self._load_map() return self._map[index] # type: ignore[index] except KeyError: raise IndexError(f"Index {index} is invalid for IterToMapConverter.") def __len__(self): if self._map is not None: return len(self._map) # type: ignore[arg-type] try: return len(self.datapipe) except (TypeError, NotImplementedError): pass warnings.warn( "Data from prior DataPipe are loaded to get length of" "IterToMapConverter before execution of the pipeline." "Please consider removing len()." ) self._load_map() return len(self._map) # type: ignore[arg-type] def __getstate__(self): if DILL_AVAILABLE: dill_key_value_fn = dill.dumps(self.key_value_fn) else: dill_key_value_fn = self.key_value_fn return ( self.datapipe, dill_key_value_fn, self._map, ) def __setstate__(self, state): (self.datapipe, dill_key_value_fn, self._map) = state if DILL_AVAILABLE: self.key_value_fn = dill.loads(dill_key_value_fn) # type: ignore[assignment] else: self.key_value_fn = dill_key_value_fn # type: ignore[assignment] # Register for functional API # See https://github.com/pytorch/data/issues/200 IterDataPipe.register_datapipe_as_function("to_map_datapipe", IterToMapConverterMapDataPipe)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree.
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import warnings from collections import OrderedDict from typing import Callable, final, Iterator, List, Optional, Sequence, TypeVar from torch.utils.data import functional_datapipe, IterDataPipe, MapDataPipe from torch.utils.data.datapipes.iter.combining import _ChildDataPipe, _DemultiplexerIterDataPipe, _ForkerIterDataPipe from torch.utils.data.datapipes.utils.common import _check_unpickable_fn from torchdata.datapipes.utils.janitor import janitor T_co = TypeVar("T_co", covariant=True) T = TypeVar("T") @functional_datapipe("zip_with_iter") class IterKeyZipperIterDataPipe(IterDataPipe[T_co]): r""" Zips two IterDataPipes together based on the matching key (functional name: ``zip_with_iter``). The keys are computed by ``key_fn`` and ``ref_key_fn`` for the two IterDataPipes, respectively. When there isn't a match between the elements of the two IterDataPipes, the element from ``ref_datapipe`` is stored in a buffer. Then, the next element from ``ref_datapipe`` is tried. After a match is found, the ``merge_fn`` determines how they will be combined and returned (a tuple is generated by default). Args: source_datapipe: IterKeyZipper will yield data based on the order of this IterDataPipe ref_datapipe: Reference IterDataPipe from which IterKeyZipper will find items with matching key for ``source_datapipe`` key_fn: Callable function that will compute keys using elements from ``source_datapipe`` ref_key_fn: Callable function that will compute keys using elements from ``ref_datapipe`` If it's not specified, the ``key_fn`` will also be applied to elements from ``ref_datapipe`` keep_key: Option to yield the matching key along with the items in a tuple, resulting in `(key, merge_fn(item1, item2))`. buffer_size: The size of buffer used to hold key-data pairs from reference DataPipe until a match is found. If it's specified as ``None``, the buffer size is set as infinite. merge_fn: Function that combines the item from ``source_datapipe`` and the item from ``ref_datapipe``, by default a tuple is created Example: >>> from torchdata.datapipes.iter import IterableWrapper >>> from operator import itemgetter >>> def merge_fn(t1, t2): >>> return t1[1] + t2[1] >>> dp1 = IterableWrapper([('a', 100), ('b', 200), ('c', 300)]) >>> dp2 = IterableWrapper([('a', 1), ('b', 2), ('c', 3), ('d', 4)]) >>> res_dp = dp1.zip_with_iter(dp2, key_fn=itemgetter(0), >>> ref_key_fn=itemgetter(0), keep_key=True, merge_fn=merge_fn) >>> list(res_dp) [('a', 101), ('b', 202), ('c', 303)] """ def __init__( self, source_datapipe: IterDataPipe, ref_datapipe: IterDataPipe, key_fn: Callable, ref_key_fn: Optional[Callable] = None, keep_key: bool = False, buffer_size: int = 10000, merge_fn: Optional[Callable] = None, ) -> None: if not isinstance(ref_datapipe, IterDataPipe): raise TypeError(f"ref_datapipe must be a IterDataPipe, but its type is {type(ref_datapipe)} instead.") self.source_datapipe = source_datapipe self.ref_datapipe = ref_datapipe _check_unpickable_fn(key_fn) self.key_fn = key_fn if ref_key_fn is not None: _check_unpickable_fn(ref_key_fn) self.ref_key_fn = key_fn if ref_key_fn is None else ref_key_fn self.keep_key = keep_key if merge_fn is not None: _check_unpickable_fn(merge_fn) self.merge_fn = merge_fn if buffer_size is not None and buffer_size <= 0: raise ValueError("'buffer_size' is required to be either None or a positive integer.") self.buffer_size: int = buffer_size self.buffer: OrderedDict = OrderedDict() def __iter__(self) -> Iterator: ref_it = iter(self.ref_datapipe) warn_once_flag = True try: for data in self.source_datapipe: key = self.key_fn(data) while key not in self.buffer: try: ref_data = next(ref_it) except StopIteration: raise BufferError( f"No matching key can be found from reference DataPipe for the data {data}. " "Please consider increasing the buffer size." ) ref_key = self.ref_key_fn(ref_data) if ref_key in self.buffer: raise ValueError("Duplicate key is found in reference DataPipe") if self.buffer_size is not None and len(self.buffer) > self.buffer_size: if warn_once_flag: warn_once_flag = False warnings.warn( "Buffer reaches the upper limit, so reference key-data pair begins to " "be removed from buffer in FIFO order. Please consider increase buffer size." ) self.buffer.popitem(last=False) self.buffer[ref_key] = ref_data res = self.merge_fn(data, self.buffer.pop(key)) if self.merge_fn else (data, self.buffer.pop(key)) if self.keep_key: yield key, res else: yield res finally: del ref_it # TODO(633): This should be Exception or warn when debug mode is enabled if self.buffer: for _, v in self.buffer.items(): janitor(v) self.buffer.clear() def __len__(self) -> int: return len(self.source_datapipe) @final def reset(self) -> None: self.buffer = OrderedDict() def __getstate__(self): state = ( self.source_datapipe, self.ref_datapipe, self.key_fn, self.ref_key_fn, self.keep_key, self.merge_fn, self.buffer_size, ) if IterDataPipe.getstate_hook is not None: return IterDataPipe.getstate_hook(state) return state def __setstate__(self, state): ( self.source_datapipe, self.ref_datapipe, self.key_fn, self.ref_key_fn, self.keep_key, self.merge_fn, self.buffer_size, ) = state self.buffer = OrderedDict() def __del__(self): if self.buffer: for _, v in self.buffer.items(): janitor(v) self.buffer.clear() @functional_datapipe("zip_with_map") class MapKeyZipperIterDataPipe(IterDataPipe[T_co]): r""" Joins the items from the source IterDataPipe with items from a MapDataPipe (functional name: ``zip_with_map``). The matching is done by the provided ``key_fn``, which maps an item from ``source_iterdatapipe`` to a key that should exist in the ``map_datapipe``. The return value is created by the ``merge_fn``, which returns a tuple of the two items by default. Args: source_iterdatapipe: IterDataPipe from which items are yield and will be combined with an item from ``map_datapipe`` map_datapipe: MapDataPipe that takes a key from ``key_fn``, and returns an item key_fn: Function that maps each item from ``source_iterdatapipe`` to a key that exists in ``map_datapipe`` keep_key: Option to yield the matching key along with the items in a tuple, resulting in ``(key, merge_fn(item1, item2))``. merge_fn: Function that combines the item from ``source_iterdatapipe`` and the matching item from ``map_datapipe``, by default a tuple is created Example: .. testsetup:: from operator import itemgetter .. testcode:: from torchdata.datapipes.iter import IterableWrapper from torchdata.datapipes.map import SequenceWrapper def merge_fn(tuple_from_iter, value_from_map): return tuple_from_iter[0], tuple_from_iter[1] + value_from_map dp1 = IterableWrapper([('a', 1), ('b', 2), ('c', 3)]) mapdp = SequenceWrapper({'a': 100, 'b': 200, 'c': 300, 'd': 400}) res_dp = dp1.zip_with_map(map_datapipe=mapdp, key_fn=itemgetter(0), merge_fn=merge_fn) print(list(res_dp)) Output: .. testoutput:: [('a', 101), ('b', 202), ('c', 303)] """ def __init__( self, source_iterdatapipe: IterDataPipe, map_datapipe: MapDataPipe, key_fn: Callable, merge_fn: Optional[Callable] = None, keep_key: bool = False, ): if not isinstance(map_datapipe, MapDataPipe): raise TypeError(f"map_datapipe must be a MapDataPipe, but its type is {type(map_datapipe)} instead.") self.source_iterdatapipe: IterDataPipe = source_iterdatapipe self.map_datapipe: MapDataPipe = map_datapipe _check_unpickable_fn(key_fn) self.key_fn: Callable = key_fn if merge_fn is not None: _check_unpickable_fn(merge_fn) self.merge_fn: Optional[Callable] = merge_fn self.keep_key = keep_key def __iter__(self) -> Iterator: for item in self.source_iterdatapipe: key = self.key_fn(item) try: map_item = self.map_datapipe[key] except (KeyError, IndexError): raise KeyError(f"key_fn maps {item} to {key}, which is not a valid key in the given MapDataPipe.") res = self.merge_fn(item, map_item) if self.merge_fn else (item, map_item) if self.keep_key: yield key, res else: yield res def __len__(self) -> int: return len(self.source_iterdatapipe) def _drop_index(idx_data): _, data = idx_data return data @functional_datapipe("round_robin_demux") class RoundRobinDemultiplexerIterDataPipe(IterDataPipe): r""" Splits the input DataPipe into multiple child DataPipes in the round-robin order (functional name: ``round_robin_demux``). A list of the child DataPipes is returned from this operation. Args: datapipe: Iterable DataPipe being filtered num_instances: number of instances of the DataPipe to create buffer_size: this defines the maximum number of inputs that the buffer can hold across all child DataPipes while waiting for their values to be yielded. Defaults to ``1000``. Use ``-1`` for the unlimited buffer. Examples: >>> from torchdata.datapipes.iter import IterableWrapper >>> source_dp = IterableWrapper(range(5)) >>> dp1, dp2 = source_dp.round_robin_demux(2) >>> list(dp1) [0, 2, 4] >>> len(dp1) 3 >>> list(dp2) [1, 3] >>> len(dp2) 2 """ def __new__(cls, datapipe: IterDataPipe, num_instances: int, buffer_size: int = 1000): if num_instances < 1: raise ValueError(f"Expected `num_instaces` larger than 0, but {num_instances} is found") if num_instances == 1: warnings.warn( "The operation of `round_robin_demux` with `num_instances=1` is an no-op and returns the provided `datapipe` in a list directly" ) return [datapipe] datapipe = datapipe.enumerate() container = _RoundRobinDemultiplexerIterDataPipe(datapipe, num_instances, buffer_size=buffer_size) return [_ChildDataPipe(container, i).map(_drop_index) for i in range(num_instances)] class _RoundRobinDemultiplexerIterDataPipe(_DemultiplexerIterDataPipe): def __init__(self, datapipe: IterDataPipe[T_co], num_instances: int, buffer_size: int): super().__init__(datapipe, num_instances, self._round_robin_fn, drop_none=False, buffer_size=buffer_size) def _round_robin_fn(self, idx_data) -> int: idx, _ = idx_data return idx % self.num_instances def get_length_by_instance(self, instance_id: int) -> int: n = len(self.main_datapipe) avg_length = n // self.num_instances return avg_length + 1 if n - avg_length * self.num_instances > instance_id else avg_length @functional_datapipe("unzip") class UnZipperIterDataPipe(IterDataPipe[T]): r""" Takes in a DataPipe of Sequences, unpacks each Sequence, and return the elements in separate DataPipes based on their position in the Sequence (functional name: ``unzip``). The number of instances produced equals to the sequence length minus the number of columns to skip. Note: Each sequence within the DataPipe should have the same length, specified by the input argument `sequence_length`. Args: source_datapipe: Iterable DataPipe with sequences of data sequence_length: Length of the sequence within the source_datapipe. All elements should have the same length. buffer_size: this restricts how far ahead the leading child DataPipe can read relative to the slowest child DataPipe. Use -1 for the unlimited buffer. columns_to_skip: optional indices of columns that the DataPipe should skip (each index should be an integer from 0 to sequence_length - 1) Example: >>> from torchdata.datapipes.iter import IterableWrapper >>> source_dp = IterableWrapper([(i, i + 10, i + 20) for i in range(3)]) >>> dp1, dp2, dp3 = source_dp.unzip(sequence_length=3) >>> list(dp1) [0, 1, 2] >>> list(dp2) [10, 11, 12] >>> list(dp3) [20, 21, 22] """ def __new__( cls, source_datapipe: IterDataPipe[Sequence[T]], sequence_length: int, buffer_size: int = 1000, columns_to_skip: Optional[Sequence[int]] = None, ): if columns_to_skip is None: instance_ids = list(range(sequence_length)) else: skips = set(columns_to_skip) instance_ids = [i for i in range(sequence_length) if i not in skips] if len(instance_ids) == 0: raise RuntimeError( "All instances are being filtered out in UnZipperIterDataPipe. Please check" "the input `sequence_length` and `columns_to_skip`." ) # The implementation basically uses Forker but only yields a specific element within the sequence container = _UnZipperIterDataPipe(source_datapipe, instance_ids, buffer_size) # type: ignore[arg-type] return [_ChildDataPipe(container, i) for i in range(len(instance_ids))] class _UnZipperIterDataPipe(_ForkerIterDataPipe): def __init__(self, datapipe: IterDataPipe, instance_ids: List[int], buffer_size: int = 1000): super().__init__(datapipe, len(instance_ids), buffer_size) # type: ignore[arg-type] self.instance_ids = instance_ids def get_next_element_by_instance(self, instance_id: int): r""" Note: Each element returned from the source datapipe is required to be a sequnce that can be subscribed with a column index """ for return_val in super().get_next_element_by_instance(instance_id): yield return_val[self.instance_ids[instance_id]] def __getstate__(self): state = super().__getstate__() return (*state, self.instance_ids) def __setstate__(self, state): super().__setstate__(state[:-1]) self.instance_ids = state[-1]
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import bz2 import gzip import lzma import os import pathlib import tarfile import zipfile from enum import Enum from io import IOBase from typing import Iterator, Optional, Tuple, Union from torchdata.datapipes import functional_datapipe from torchdata.datapipes.iter import IterDataPipe from torchdata.datapipes.utils import StreamWrapper class CompressionType(Enum): GZIP = "gzip" LZMA = "lzma" TAR = "tar" ZIP = "zip" BZIP2 = "bz2" @functional_datapipe("decompress") class DecompressorIterDataPipe(IterDataPipe[Tuple[str, StreamWrapper]]): r""" Takes tuples of path and compressed stream of data, and returns tuples of path and decompressed stream of data (functional name: ``decompress``). The input compression format can be specified or automatically detected based on the files' file extensions. Args: source_datapipe: IterDataPipe containing tuples of path and compressed stream of data file_type: Optional `string` or ``CompressionType`` that represents what compression format of the inputs Example: >>> from torchdata.datapipes.iter import FileLister, FileOpener >>> tar_file_dp = FileLister(self.temp_dir.name, "*.tar") >>> tar_load_dp = FileOpener(tar_file_dp, mode="b") >>> tar_decompress_dp = Decompressor(tar_load_dp, file_type="tar") >>> for _, stream in tar_decompress_dp: >>> print(stream.read()) b'0123456789abcdef' """ types = CompressionType _DECOMPRESSORS = { types.GZIP: lambda file: gzip.GzipFile(fileobj=file), types.LZMA: lambda file: lzma.LZMAFile(file), types.TAR: lambda file: tarfile.open(fileobj=file, mode="r:*"), types.ZIP: lambda file: zipfile.ZipFile(file=file), types.BZIP2: lambda file: bz2.BZ2File(filename=file), } def __init__( self, source_datapipe: IterDataPipe[Tuple[str, IOBase]], file_type: Optional[Union[str, CompressionType]] = None ) -> None: self.source_datapipe: IterDataPipe[Tuple[str, IOBase]] = source_datapipe if isinstance(file_type, str): file_type = self.types(file_type.lower()) self.file_type: Optional[CompressionType] = file_type def _detect_compression_type(self, path: str) -> CompressionType: if self.file_type: return self.file_type ext = "".join(pathlib.Path(path).suffixes) if ext in {".tar.gz", ".tar.xz"}: return self.types.TAR else: ext = os.path.splitext(path)[1] if ext == ".tar": return self.types.TAR elif ext == ".xz": return self.types.LZMA elif ext == ".gz": return self.types.GZIP elif ext == ".zip": return self.types.ZIP elif ext == ".bz2": return self.types.BZIP2 else: raise RuntimeError( f"File at {path} has file extension {ext}, which does not match what are supported by" f"ExtractorIterDataPipe." ) def __iter__(self) -> Iterator[Tuple[str, StreamWrapper]]: for path, file in self.source_datapipe: try: file_type = self._detect_compression_type(path) decompressor = self._DECOMPRESSORS[file_type] yield path, StreamWrapper(decompressor(file), file, name=path) finally: if isinstance(file, StreamWrapper): file.autoclose() @functional_datapipe("extract") class ExtractorIterDataPipe(IterDataPipe[Tuple[str, StreamWrapper]]): r""" Please use ``Decompressor`` or ``.decompress`` instead. """ def __new__( cls, source_datapipe: IterDataPipe[Tuple[str, IOBase]], file_type: Optional[Union[str, CompressionType]] = None ): return DecompressorIterDataPipe(source_datapipe, file_type)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import threading import time from collections import deque from typing import Deque, final, Optional, Sized import torch from torchdata.datapipes import functional_datapipe from torchdata.datapipes.iter import IterDataPipe from torchdata.datapipes.utils import pin_memory_fn PRODUCER_SLEEP_INTERVAL = 0.0001 # Interval between buffer fulfillment checks CONSUMER_SLEEP_INTERVAL = 0.0001 # Interval between checking items availability in buffer class _PrefetchData: def __init__(self, source_datapipe, buffer_size: int): self.run_prefetcher: bool = True self.prefetch_buffer: Deque = deque() self.buffer_size: int = buffer_size self.source_datapipe = source_datapipe self.stop_iteration: bool = False self.paused: bool = False @functional_datapipe("prefetch") class PrefetcherIterDataPipe(IterDataPipe): r""" Prefetches elements from the source DataPipe and puts them into a buffer (functional name: ``prefetch``). Prefetching performs the operations (e.g. I/O, computations) of the DataPipes up to this one ahead of time and stores the result in the buffer, ready to be consumed by the subsequent DataPipe. It has no effect aside from getting the sample ready ahead of time. This is used by ``MultiProcessingReadingService`` when the arguments ``worker_prefetch_cnt`` (for prefetching at each worker process) or ``main_prefetch_cnt`` (for prefetching at the main loop) are greater than 0. Beyond the built-in use cases, this can be useful to put after I/O DataPipes that have expensive I/O operations (e.g. takes a long time to request a file from a remote server). Args: source_datapipe: IterDataPipe from which samples are prefetched buffer_size: the size of the buffer which stores the prefetched samples Example: >>> from torchdata.datapipes.iter import IterableWrapper >>> dp = IterableWrapper(file_paths).open_files().prefetch(5) """ def __init__(self, source_datapipe, buffer_size: int = 10): self.source_datapipe = source_datapipe if buffer_size <= 0: raise ValueError("'buffer_size' is required to be a positive integer.") self.buffer_size = buffer_size self.thread: Optional[threading.Thread] = None self.prefetch_data: Optional[_PrefetchData] = None @staticmethod def thread_worker(prefetch_data: _PrefetchData): itr = iter(prefetch_data.source_datapipe) while not prefetch_data.stop_iteration: # Run if not paused while prefetch_data.run_prefetcher: if len(prefetch_data.prefetch_buffer) < prefetch_data.buffer_size: try: item = next(itr) prefetch_data.prefetch_buffer.append(item) except Exception as e: prefetch_data.run_prefetcher = False prefetch_data.stop_iteration = True prefetch_data.prefetch_buffer.append(e) else: # Buffer is full, waiting for main thread to consume items # TODO: Calculate sleep interval based on previous consumption speed time.sleep(PRODUCER_SLEEP_INTERVAL) prefetch_data.paused = True # Sleep longer when this prefetcher thread is paused time.sleep(PRODUCER_SLEEP_INTERVAL * 10) def __iter__(self): try: prefetch_data = _PrefetchData(self.source_datapipe, self.buffer_size) self.prefetch_data = prefetch_data thread = threading.Thread(target=PrefetcherIterDataPipe.thread_worker, args=(prefetch_data,), daemon=True) thread.start() self.thread = thread # Lazily import to prevent circular import from torchdata.dataloader2 import communication while not prefetch_data.stop_iteration or len(prefetch_data.prefetch_buffer) > 0: if len(prefetch_data.prefetch_buffer) > 0: data = prefetch_data.prefetch_buffer.popleft() if isinstance(data, Exception): if isinstance(data, (StopIteration, communication.iter.TerminateRequired)): break raise data yield data else: time.sleep(CONSUMER_SLEEP_INTERVAL) finally: if "prefetch_data" in locals(): prefetch_data.run_prefetcher = False prefetch_data.stop_iteration = True prefetch_data.paused = False if "thread" in locals(): thread.join() def __getstate__(self): """ Getting state in threading environment requires next operations: 1) Stopping of the producer thread. 2) Saving buffer. 3) Adding lazy restart of producer thread when __next__ is called again (this will guarantee that you only change state of the source_datapipe after entire state of the graph is saved). """ # TODO: Update __getstate__ and __setstate__ to support snapshotting and restoration return {"source_datapipe": self.source_datapipe, "buffer_size": self.buffer_size} def __setstate__(self, state): self.source_datapipe = state["source_datapipe"] self.buffer_size = state["buffer_size"] self.thread = None @final def reset(self): self.shutdown() def pause(self): if self.thread is not None: assert self.prefetch_data is not None self.prefetch_data.run_prefetcher = False if self.thread.is_alive(): # Blocking until the thread is paused while not self.prefetch_data.paused: time.sleep(PRODUCER_SLEEP_INTERVAL * 10) @final def resume(self): if ( self.thread is not None and self.prefetch_data is not None and (not self.prefetch_data.stop_iteration or len(self.prefetch_data.prefetch_buffer) > 0) ): self.prefetch_data.run_prefetcher = True self.prefetch_data.paused = False @final def shutdown(self): if hasattr(self, "prefetch_data") and self.prefetch_data is not None: self.prefetch_data.run_prefetcher = False self.prefetch_data.stop_iteration = True self.prefetch_data.paused = False self.prefetch_data = None if hasattr(self, "thread") and self.thread is not None: self.thread.join() self.thread = None def __del__(self): self.shutdown() def __len__(self) -> int: if isinstance(self.source_datapipe, Sized): return len(self.source_datapipe) raise TypeError(f"{type(self).__name__} instance doesn't have valid length") @functional_datapipe("pin_memory") class PinMemoryIterDataPipe(PrefetcherIterDataPipe): r""" Prefetches one element from the source DataPipe and moves it to pinned memory (functional name: ``pin_memory``). When used with ``MultiProcessingReadingService``, this DataPipe would be kept in the main process to prevent duplicated CUDA context creation. Args: source_datapipe: IterDataPipe from which samples are moved to pinned memory. device: The device to pin samples. pin_memory_fn: Optional callable function to move data to pinned memory. A ``pin_memory_fn`` to handle general objects is provided by default. Example: >>> from torchdata.datapipes.iter import IterableWrapper >>> dp = IterableWrapper(file_paths).open_files().readlines().map(tokenize_fn).pin_memory() """ def __init__(self, source_datapipe, device=None, pin_memory_fn=pin_memory_fn): if not torch.cuda.is_available(): raise RuntimeError("``pin_memory`` can only be used when CUDA is available.") # TODO: Add support for dynamic buffer based on the available size of pinned memory super().__init__(source_datapipe, buffer_size=2) if device is None: device = torch.cuda.current_device() self.device = device self.pin_memory_fn = pin_memory_fn def is_replicable(self) -> bool: return False @staticmethod def thread_worker(prefetch_data: _PrefetchData, pin_memory_fn, device): # type: ignore[override] itr = iter(prefetch_data.source_datapipe) while not prefetch_data.stop_iteration: # Run if not paused while prefetch_data.run_prefetcher: if len(prefetch_data.prefetch_buffer) < prefetch_data.buffer_size: try: item = pin_memory_fn(next(itr), device) prefetch_data.prefetch_buffer.append(item) except Exception as e: prefetch_data.run_prefetcher = False prefetch_data.stop_iteration = True prefetch_data.prefetch_buffer.append(e) else: # Buffer is full, waiting for main thread to consume items # TODO: Calculate sleep interval based on previous consumption speed time.sleep(PRODUCER_SLEEP_INTERVAL) # Sleep longer when this prefetcher thread is paused time.sleep(PRODUCER_SLEEP_INTERVAL * 10) def __iter__(self): try: prefetch_data = _PrefetchData(self.source_datapipe, self.buffer_size) self.prefetch_data = prefetch_data thread = threading.Thread( target=PinMemoryIterDataPipe.thread_worker, args=(prefetch_data, self.pin_memory_fn, self.device), daemon=True, ) thread.start() self.thread = thread # Lazily import to prevent circular import from torchdata.dataloader2 import communication while not prefetch_data.stop_iteration or len(prefetch_data.prefetch_buffer) > 0: if len(prefetch_data.prefetch_buffer) > 0: data = prefetch_data.prefetch_buffer.popleft() if isinstance(data, Exception): if isinstance(data, (StopIteration, communication.iter.TerminateRequired)): break raise data yield data else: time.sleep(CONSUMER_SLEEP_INTERVAL) finally: if "prefetch_data" in locals(): prefetch_data.run_prefetcher = False prefetch_data.stop_iteration = True prefetch_data.paused = False if "thread" in locals(): thread.join() def __getstate__(self): state = super().__getstate__() state["pin_memory_fn"] = self.pin_memory_fn state["device"] = self.device return state def __setstate__(self, state): super().__setstate__(state) self.pin_memory_fn = state["pin_memory_fn"] self.device = state["device"]
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from typing import Set, Sized from torch.utils.data.datapipes._decorator import functional_datapipe from torch.utils.data.datapipes.datapipe import IterDataPipe @functional_datapipe("mux_longest") class MultiplexerLongestIterDataPipe(IterDataPipe): r""" Yields one element at a time from each of the input Iterable DataPipes (functional name: ``mux_longest``). As in, one element from the 1st input DataPipe, then one element from the 2nd DataPipe in the next iteration, and so on. It skips over DataPipes that are exhausted, and ends when all input DataPipes are exhausted. Args: datapipes: Iterable DataPipes that will take turn to yield their elements, until they are all exhausted Example: >>> from torchdata.datapipes.iter import IterableWrapper >>> dp1, dp2, dp3 = IterableWrapper(range(5)), IterableWrapper(range(10, 15)), IterableWrapper(range(20, 25)) >>> list(dp1.mux_longest(dp2, dp3)) [0, 10, 20, 1, 11, 21, 2, 12, 22, 3, 13, 23, 4, 14, 24] """ def __init__(self, *datapipes): self.datapipes = datapipes def __iter__(self): iterators = [iter(x) for x in self.datapipes] finished: Set[int] = set() while len(finished) < len(iterators): for i in range(len(iterators)): if i not in finished: try: value = next(iterators[i]) yield value except StopIteration: finished.add(i) def __len__(self): if all(isinstance(dp, Sized) for dp in self.datapipes): return sum(len(dp) for dp in self.datapipes) else: raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import bz2 import warnings from io import BufferedIOBase from typing import Iterable, Iterator, Tuple from torchdata.datapipes import functional_datapipe from torchdata.datapipes.iter import IterDataPipe from torchdata.datapipes.utils import StreamWrapper from torchdata.datapipes.utils.common import validate_pathname_binary_tuple @functional_datapipe("load_from_bz2") class Bz2FileLoaderIterDataPipe(IterDataPipe[Tuple[str, BufferedIOBase]]): r""" Decompresses bz2 binary streams from an Iterable DataPipe which contains tuples of path name and bz2 binary streams, and yields a tuple of path name and extracted binary stream (functional name: ``load_from_bz2``). Args: datapipe: Iterable DataPipe that provides tuples of path name and bz2 binary stream length: Nominal length of the DataPipe Note: The opened file handles will be closed automatically if the default ``DecoderDataPipe`` is attached. Otherwise, user should be responsible to close file handles explicitly or let Python's GC close them periodically. Example: >>> from torchdata.datapipes.iter import FileLister, FileOpener >>> datapipe1 = FileLister(".", "*.bz2") >>> datapipe2 = FileOpener(datapipe1, mode="b") >>> bz2_loader_dp = datapipe2.load_from_bz2() >>> for _, stream in bz2_loader_dp: >>> print(stream.read()) b'0123456789abcdef' """ def __init__(self, datapipe: Iterable[Tuple[str, BufferedIOBase]], length: int = -1) -> None: super().__init__() self.datapipe: Iterable[Tuple[str, BufferedIOBase]] = datapipe self.length: int = length def __iter__(self) -> Iterator[Tuple[str, BufferedIOBase]]: for data in self.datapipe: validate_pathname_binary_tuple(data) pathname, data_stream = data try: extracted_fobj = bz2.open(data_stream, mode="rb") # type: ignore[call-overload] new_pathname = pathname.rstrip(".bz2") yield new_pathname, StreamWrapper(extracted_fobj, data_stream, name=new_pathname) # type: ignore[misc] except Exception as e: warnings.warn(f"Unable to extract files from corrupted bzip2 stream {pathname} due to: {e}, abort!") raise e finally: if isinstance(data_stream, StreamWrapper): data_stream.autoclose() def __len__(self) -> int: if self.length == -1: raise TypeError(f"{type(self).__name__} instance doesn't have valid length") return self.length
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from typing import Callable, final, Iterator, List, Tuple, TypeVar from torch.utils.data.datapipes.utils.common import _check_unpickable_fn from torchdata.datapipes import functional_datapipe from torchdata.datapipes.iter import IterDataPipe T_co = TypeVar("T_co", covariant=True) def _default_line_join(lines: List[str]) -> str: return "\n".join(lines) @functional_datapipe("lines_to_paragraphs") class ParagraphAggregatorIterDataPipe(IterDataPipe[Tuple[str, str]]): r""" Aggregates lines of text from the same file into a single paragraph (functional name: ``lines_to_paragraphs``). Specifically, this accepts a DataPipe consisting of tuples of a file name and a line. For each tuple, it checks if the file name matches the file name from the previous tuple. If yes, it joins the current line with existing paragraph. If the file names do not match, the existing paragraph is yielded and a new paragraph starts. Args: source_datapipe: a DataPipe with tuples of a file name and a line joiner: a function that joins a list of lines together Example: >>> from torchdata.datapipes.iter import IterableWrapper >>> source_dp = IterableWrapper( >>> [("file1", "Line1"), ("file1", "Line2"), ("file2", "Line2,1"), ("file2", "Line2,2"), ("file2", "Line2,3")] >>> ) >>> para_agg_dp = source_dp.lines_to_paragraphs(joiner=lambda ls: " ".join(ls)) >>> list(para_agg_dp) [('file1', 'Line1 Line2'), ('file2', 'Line2,1 Line2,2 Line2,3')] """ def __init__(self, source_datapipe: IterDataPipe[Tuple[str, T_co]], joiner: Callable = _default_line_join) -> None: self.source_datapipe: IterDataPipe[Tuple[str, T_co]] = source_datapipe _check_unpickable_fn(joiner) self.joiner: Callable = joiner self.buffer: List = [] def __iter__(self) -> Iterator[Tuple[str, str]]: prev_filename = None for filename, line in self.source_datapipe: if prev_filename is None: prev_filename = filename if line and prev_filename == filename: self.buffer.append(line) else: if self.buffer: yield prev_filename, self.joiner(self.buffer) # type: ignore[misc] if line: self.buffer = [line] else: self.buffer = [] prev_filename = filename if self.buffer: yield prev_filename, self.joiner(self.buffer) # type: ignore[misc] @final def reset(self) -> None: self.buffer = [] def __getstate__(self): state = (self.source_datapipe, self.joiner) if IterDataPipe.getstate_hook is not None: return IterDataPipe.getstate_hook(state) return state def __setstate__(self, state): (self.source_datapipe, self.joiner) = state self.buffer = [] def __del__(self): self.buffer.clear()
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import threading import time from collections import deque from concurrent.futures import Future, ThreadPoolExecutor, TimeoutError from dataclasses import dataclass from functools import partial from typing import Callable, Deque, final, Iterator, Optional, TypeVar import torch import torch.distributed as dist from torchdata._constants import default_timeout_in_s from torchdata.datapipes import functional_datapipe from torchdata.datapipes.iter import IterDataPipe from torchdata.datapipes.iter.util.prefetcher import PRODUCER_SLEEP_INTERVAL T_co = TypeVar("T_co", covariant=True) __all__ = ["Expected", "FullSyncIterDataPipe", "PrefetchTimeoutError"] class PrefetchTimeoutError(RuntimeError): def __init__(self, timeout: int) -> None: super().__init__(f"Fail to fetch data within {timeout} seconds") self.timeout = timeout class _EndOfPrefetch: ... @dataclass class Expected: r""" Expected data provided to callback function in ``_PrefetchExecutor``. """ index: int error: Optional[BaseException] = None def has_error(self) -> bool: return self.error is not None class _PrefetchExecutor: # TODO: Improvement - merge with the `_PrefetchData` class of prefetcher.py # May not be possible right now due to circular import def __init__( self, datapipe_iterator: Iterator, prefetch_size: int = 1, callback_fn: Optional[Callable[[Expected], None]] = None, timeout: int = default_timeout_in_s, ) -> None: self.datapipe_iterator = datapipe_iterator self.prefetch_size = prefetch_size self.callback_fn = callback_fn self.timeout = timeout # Use max_workers as 1 to guarantee the order of data fetched from iterator self._executor = ThreadPoolExecutor(max_workers=1) self._futures: Deque[Future] = deque() self._lock = threading.RLock() # `_end_flag` indicates the end of epoch or an exception has been raised, # with the exception being handled by `callback_fn` self._end_flag: bool = False self._paused: bool = False self._is_shutdown: bool = False # indicates if `_executor` has been shutdown by `shutdown` method self._idx = 0 for _ in range(prefetch_size): with self._lock: if self._end_flag: break fetch_future: Future = self._executor.submit(self.fetch_next) fetch_future.add_done_callback(partial(self._done_callback_fn, self._idx)) self._futures.append(fetch_future) with self._lock: self._idx += 1 def fetch_next(self): while self._paused: time.sleep(PRODUCER_SLEEP_INTERVAL * 10) return next(self.datapipe_iterator) def _done_callback_fn(self, index: int, f: Future): if f.exception(): with self._lock: self._end_flag = True if self.callback_fn is not None: # Invoke `callback_fn` in order to set `FullSyncDP._done_callback` to `True` self.callback_fn(Expected(index, f.exception())) def return_next(self): if self._futures: fetch_future = self._futures.popleft() try: data = fetch_future.result(timeout=self.timeout) except TimeoutError: raise PrefetchTimeoutError(self.timeout) with self._lock: if not self._end_flag and not self._is_shutdown: next_future = self._executor.submit(self.fetch_next) next_future.add_done_callback(partial(self._done_callback_fn, self._idx)) self._futures.append(next_future) self._idx += 1 else: data = _EndOfPrefetch() return data def shutdown(self): self._paused = False self._is_shutdown = True while self._futures: self._futures.popleft().cancel() self._executor.shutdown(wait=True) def pause(self): self._paused = True def resume(self): self._paused = False @functional_datapipe("fullsync") class FullSyncIterDataPipe(IterDataPipe[T_co]): r""" Synchronizes data across distributed processes to prevent hanging during training, which is caused by uneven sharded data (functional name: ``fullsync``). It stops when the shortest distributed shard is exhausted. It would be appended at the end of the graph of ``DataPipe`` by ``DistributedReadingService`` automatically. Args: datapipe: IterDataPipe that needs to be synchronized timeout: Timeout for prefetching data in seconds. Default value equals to 30 minutes Example: >>> from torchdata.datapipes.iter import IterableWrapper >>> # Distributed training with world size 2 >>> world_size = 2 >>> dp = IterableWrapper(list(range(23))).sharding_filter() >>> torch.utils.data.graph_settings.apply_sharding(dp, world_size, rank) >>> # Rank 0 has 12 elements; Rank 1 has 11 elements >>> for d in dp: ... model(d) # Hanging at the end of epoch due to uneven sharding >>> dp = dp.fullsync() >>> # Both ranks have 11 elements >>> for d in dp: ... model(d) # Not hanging anymore """ def __init__(self, datapipe: IterDataPipe, timeout=default_timeout_in_s): if not dist.is_available(): raise RuntimeError("Torch Distributed is required to be available") self.datapipe = datapipe self.timeout: int = timeout self._process_group: Optional[dist.ProcessGroup] = None self._world_size: int = 1 self._lock = threading.RLock() self._cv = threading.Condition(lock=self._lock) self._executor: Optional[_PrefetchExecutor] = None # Use single values rather than deques for the following variables # because fullsync only prefetches 1 element self._error = None self._sync_counter = torch.tensor([0], dtype=torch.int32) self._done_callback = False def _callback_fn(self, exp: Expected) -> None: with self._cv: if exp.has_error(): if not isinstance(exp.error, StopIteration): self._error = exp.error # type: ignore[assignment] self._sync_counter = torch.tensor([0], dtype=torch.int32) else: self._sync_counter = torch.tensor([1], dtype=torch.int32) dist.all_reduce( tensor=self._sync_counter, op=dist.ReduceOp.SUM, group=self._process_group, ) self._done_callback = True self._cv.notify() def __iter__(self) -> Iterator[T_co]: assert self._executor is None if not (dist.is_available() and dist.is_initialized()): raise RuntimeError("Torch Distributed is required to be initialized to use `FullSync`.") if self._process_group is None: self._process_group = dist.new_group(backend="gloo") self._world_size = dist.get_world_size() if self._world_size == 1: # The below functionalities are not needed if `_world_size == 1` yield from self.datapipe return self._executor = _PrefetchExecutor(iter(self.datapipe), 1, self._callback_fn, self.timeout) while True: with self._cv: is_success = self._cv.wait_for( lambda: self._done_callback is True, self.timeout, ) if not is_success: raise PrefetchTimeoutError(self.timeout) if self._error is not None: raise self._error if bool(self._sync_counter < self._world_size): break self._done_callback = False data = self._executor.return_next() # type: ignore[attr-defined] if isinstance(data, _EndOfPrefetch): break yield data @final def reset(self): if self._executor is not None: self._executor.shutdown() self._executor = None self._world_size = 1 with self._cv: self._error = None self._sync_counter = torch.tensor([0], dtype=torch.int32) self._done_callback = False def is_replicable(self): return False def __getstate__(self): state = ( self.datapipe, self.timeout, ) if IterDataPipe.getstate_hook is not None: return IterDataPipe.getstate_hook(state) return state def __setstate__(self, state): self.datapipe, self.timeout = state self._process_group = None self._world_size = 1 self._lock = threading.RLock() self._cv = threading.Condition(lock=self._lock) self._executor = None self._error = None self._sync_counter = torch.tensor([0], dtype=torch.int32) self._done_callback = False @final def pause(self): if self._executor is not None: self._executor.pause() @final def resume(self): if self._executor is not None: self._executor.resume() @final def shutdown(self): if self._executor is not None: self._executor.shutdown() self._executor = None def __del__(self): self.shutdown()
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import os from typing import Any, Callable, Iterator, Optional, Tuple, Union from torchdata.datapipes import functional_datapipe from torchdata.datapipes.iter import IterDataPipe U = Union[bytes, bytearray, str] @functional_datapipe("save_to_disk") class SaverIterDataPipe(IterDataPipe[str]): r""" Takes in a DataPipe of tuples of metadata and data, saves the data to the target path generated by the ``filepath_fn`` and metadata, and yields file path on local file system (functional name: ``save_to_disk``). Args: source_datapipe: Iterable DataPipe with tuples of metadata and data mode: Node in which the file will be opened for write the data (``"w"`` by default) filepath_fn: Function that takes in metadata and returns the target path of the new file Example: >>> from torchdata.datapipes.iter import IterableWrapper >>> import os >>> def filepath_fn(name: str) -> str: >>> return os.path.join(".", os.path.basename(name)) >>> name_to_data = {"1.txt": b"DATA1", "2.txt": b"DATA2", "3.txt": b"DATA3"} >>> source_dp = IterableWrapper(sorted(name_to_data.items())) >>> saver_dp = source_dp.save_to_disk(filepath_fn=filepath_fn, mode="wb") >>> res_file_paths = list(saver_dp) >>> res_file_paths ['./1.txt', './2.txt', './3.txt'] """ def __init__( self, source_datapipe: IterDataPipe[Tuple[Any, U]], mode: str = "w", filepath_fn: Optional[Callable] = None, ): self.source_datapipe: IterDataPipe[Tuple[Any, U]] = source_datapipe self.mode: str = mode if "w" in mode else "w" + mode self.fn: Optional[Callable] = filepath_fn def __iter__(self) -> Iterator[str]: for filepath, data in self.source_datapipe: if self.fn is not None: filepath = self.fn(filepath) dirname = os.path.dirname(filepath) if not os.path.exists(dirname): os.makedirs(dirname) # with portalocker.Lock(filepath, self.mode, flags=portalocker.LockFlags.EXCLUSIVE) as f: # TODO(639): Enabling line above will require all read sites to be updated (Win). with open(filepath, self.mode) as f: f.write(data) yield filepath def __len__(self) -> int: return len(self.source_datapipe)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import io import os.path from typing import Iterator, Tuple from unittest.mock import patch from torchdata.datapipes import functional_datapipe from torchdata.datapipes.iter import IterDataPipe from torchdata.datapipes.utils import StreamWrapper from torchdata.datapipes.utils.common import validate_pathname_binary_tuple class RarfilePatcher: def __init__(self): from rarfile import DirectReader unpatched_read = DirectReader._read def patched_read(self, cnt=-1): self._fd.seek(self._inf.header_offset, 0) self._cur = self._parser._parse_header(self._fd) self._cur_avail = self._cur.add_size return unpatched_read(self, cnt) self._patch = patch("rarfile.DirectReader._read", new=patched_read) def start(self): self._patch.start() def stop(self): self._patch.stop() _PATCHED = False @functional_datapipe("load_from_rar") class RarArchiveLoaderIterDataPipe(IterDataPipe[Tuple[str, io.BufferedIOBase]]): r""" Decompresses rar binary streams from input Iterable Datapipes which contains tuples of path name and rar binary stream, and yields a tuple of path name and extracted binary stream (functional name: ``load_from_rar``). Note: The nested RAR archive is not supported by this DataPipe due to the limitation of the archive type. Please extract outer RAR archive before reading the inner archive. Args: datapipe: Iterable DataPipe that provides tuples of path name and rar binary stream length: Nominal length of the DataPipe Example: >>> from torchdata.datapipes.iter import FileLister, FileOpener >>> datapipe1 = FileLister(".", "*.rar") >>> datapipe2 = FileOpener(datapipe1, mode="b") >>> rar_loader_dp = datapipe2.load_from_rar() >>> for _, stream in rar_loader_dp: >>> print(stream.read()) b'0123456789abcdef' """ def __init__(self, datapipe: IterDataPipe[Tuple[str, io.BufferedIOBase]], *, length: int = -1): try: import rarfile except ImportError as error: raise ModuleNotFoundError( "Package `rarfile` is required to be installed to use this datapipe. " "Please use `pip install rarfile` or `conda -c conda-forge install rarfile` to install it." ) from error # check if at least one system library for reading rar archives is available to be used by rarfile rarfile.tool_setup() self.datapipe = datapipe self.length = length def __iter__(self) -> Iterator[Tuple[str, io.BufferedIOBase]]: import rarfile global _PATCHED if not _PATCHED: patcher = RarfilePatcher() patcher.start() _PATCHED = True for data in self.datapipe: try: validate_pathname_binary_tuple(data) path, stream = data if isinstance(stream, rarfile.RarExtFile) or ( isinstance(stream, StreamWrapper) and isinstance(stream.file_obj, rarfile.RarExtFile) ): raise ValueError( f"Nested RAR archive is not supported by {type(self).__name__}. Please extract outer archive first." ) rar = rarfile.RarFile(stream) for info in rar.infolist(): if info.is_dir(): continue inner_path = os.path.join(path, info.filename) file_obj = rar.open(info) yield inner_path, StreamWrapper(file_obj, stream, name=path) # type: ignore[misc] finally: if isinstance(stream, StreamWrapper): stream.autoclose() def __len__(self) -> int: if self.length == -1: raise TypeError(f"{type(self).__name__} instance doesn't have valid length") return self.length
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from typing import Any, Iterator, List, Optional, Set, Sized, Tuple from torch.utils.data.datapipes._decorator import functional_datapipe from torch.utils.data.datapipes.datapipe import IterDataPipe @functional_datapipe("zip_longest") class ZipperLongestIterDataPipe(IterDataPipe): r""" Aggregates elements into a tuple from each of the input DataPipes (functional name: ``zip_longest``). The output is stopped until all input DataPipes are exhausted. If any input DataPipe is exhausted, missing values are filled-in with `fill_value` (default value is None). Args: *datapipes: Iterable DataPipes being aggregated *fill_value: Value that user input to fill in the missing values from DataPipe. Default value is None. Example: >>> from torchdata.datapipes.iter import IterableWrapper >>> dp1, dp2, dp3 = IterableWrapper(range(3)), IterableWrapper(range(10, 15)), IterableWrapper(range(20, 25)) >>> list(dp1.zip_longest(dp2, dp3)) [(0, 10, 20), (1, 11, 21), (2, 12, 22), (None, 13, 23), (None, 14, 24)] >>> list(dp1.zip_longest(dp2, dp3, -1)) [(0, 10, 20), (1, 11, 21), (2, 12, 22), (-1, 13, 23), (-1, 14, 24)] """ datapipes: Tuple[IterDataPipe] length: Optional[int] fill_value: Any def __init__( self, *datapipes: IterDataPipe, fill_value: Any = None, ): if not all(isinstance(dp, IterDataPipe) for dp in datapipes): raise TypeError("All inputs are required to be `IterDataPipe` " "for `ZipperLongestIterDataPipe`.") super().__init__() self.datapipes = datapipes # type: ignore[assignment] self.fill_value = fill_value def __iter__(self) -> Iterator[Tuple]: iterators = [iter(x) for x in self.datapipes] finished: Set[int] = set() while len(finished) < len(iterators): values: List[Any] = [] for i in range(len(iterators)): value = self.fill_value if i not in finished: try: value = next(iterators[i]) except StopIteration: finished.add(i) if len(finished) == len(iterators): return values.append(value) yield tuple(values) def __len__(self) -> int: if all(isinstance(dp, Sized) for dp in self.datapipes): return max(len(dp) for dp in self.datapipes) else: raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import json from typing import Dict, IO, Iterator, Tuple from torchdata.datapipes import functional_datapipe from torchdata.datapipes.iter import IterDataPipe @functional_datapipe("parse_json_files") class JsonParserIterDataPipe(IterDataPipe[Tuple[str, Dict]]): r""" Reads from JSON data streams and yields a tuple of file name and JSON data (functional name: ``parse_json_files``). Args: source_datapipe: a DataPipe with tuples of file name and JSON data stream kwargs: keyword arguments that will be passed through to ``json.loads`` Example: >>> from torchdata.datapipes.iter import IterableWrapper, FileOpener >>> import os >>> def get_name(path_and_stream): >>> return os.path.basename(path_and_stream[0]), path_and_stream[1] >>> datapipe1 = IterableWrapper(["empty.json", "1.json", "2.json"]) >>> datapipe2 = FileOpener(datapipe1, mode="b") >>> datapipe3 = datapipe2.map(get_name) >>> json_dp = datapipe3.parse_json_files() >>> list(json_dp) [('1.json', ['foo', {'bar': ['baz', None, 1.0, 2]}]), ('2.json', {'__complex__': True, 'real': 1, 'imag': 2})] """ def __init__(self, source_datapipe: IterDataPipe[Tuple[str, IO]], **kwargs) -> None: self.source_datapipe: IterDataPipe[Tuple[str, IO]] = source_datapipe self.kwargs = kwargs def __iter__(self) -> Iterator[Tuple[str, Dict]]: for file_name, stream in self.source_datapipe: data = stream.read() stream.close() yield file_name, json.loads(data, **self.kwargs) def __len__(self) -> int: return len(self.source_datapipe)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import hashlib import inspect import os.path import sys import time import uuid import warnings from collections import deque from functools import partial from typing import Any, Callable, Deque, Dict, Iterator, List, Optional, Tuple, TypeVar try: import portalocker except ImportError: portalocker = None from torch.utils.data.datapipes.utils.common import _check_unpickable_fn, DILL_AVAILABLE from torch.utils.data.graph import traverse_dps from torchdata.datapipes import functional_datapipe from torchdata.datapipes.iter import IterableWrapper, IterDataPipe if DILL_AVAILABLE: import dill dill.extend(use_dill=False) def _assert_portalocker() -> None: try: import portalocker # noqa: F401 except ImportError as e: if os.name == "nt" and str(e).startswith("DLL load failed while importing"): print( "Please take a look at FAQ in https://github.com/pytorch/data#frequently-asked-questions-faq" "for the solution of this Error." ) raise else: raise ModuleNotFoundError( "Package `portalocker` is required to be installed to use this datapipe." "Please use `pip install 'portalocker>=2.0.0'` or" "`conda install -c conda-forge 'portalocker>=2/0.0'`" "to install the package" ) T_co = TypeVar("T_co", covariant=True) PROMISE_FILE_DELETE_TIMEOUT = 30 PROMISE_FILE_DELETE_RETRY_INTERVAL = 0.005 from enum import IntEnum class CacheState(IntEnum): UNCACHED = 0 CACHED_SINGLE_ENTITY = 1 CACHED_MULTIPLE_ENTITIES = 2 @functional_datapipe("in_memory_cache") class InMemoryCacheHolderIterDataPipe(IterDataPipe[T_co]): r""" Stores elements from the source DataPipe in memory, up to a size limit if specified (functional name: ``in_memory_cache``). This cache is FIFO - once the cache is full, further elements will not be added to the cache until the previous ones are yielded and popped off from the cache. Args: source_dp: source DataPipe from which elements are read and stored in memory size: The maximum size (in megabytes) that this DataPipe can hold in memory. This defaults to unlimited. Example: >>> from torchdata.datapipes.iter import IterableWrapper >>> source_dp = IterableWrapper(range(10)) >>> cache_dp = source_dp.in_memory_cache(size=5) >>> list(cache_dp) [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] """ size: Optional[int] = None idx: int def __init__(self, source_dp: IterDataPipe[T_co], size: Optional[int] = None) -> None: self.source_dp: IterDataPipe[T_co] = source_dp # cache size in MB if size is not None: self.size = size * 1024 * 1024 self.cache: Optional[Deque] = None self.idx: int = 0 def __iter__(self) -> Iterator[T_co]: if self.cache: if self.idx > 0: for idx, data in enumerate(self.source_dp): if idx < self.idx: yield data else: break yield from self.cache else: # Local cache cache: Deque = deque() idx = 0 for data in self.source_dp: cache.append(data) # Cache reaches limit if self.size is not None and sys.getsizeof(cache) > self.size: cache.popleft() idx += 1 yield data self.cache = cache self.idx = idx def __len__(self) -> int: try: return len(self.source_dp) except TypeError: if self.cache: return self.idx + len(self.cache) else: raise TypeError(f"{type(self).__name__} instance doesn't have valid length until the cache is loaded.") def _generator_to_list(gen_fn): def list_fn(*args, **kwargs): gen = gen_fn(*args, **kwargs) return list(gen) return list_fn def _hash_check(filepath, hash_dict, hash_type): if filepath not in hash_dict: return False if hash_type == "sha256": hash_func = hashlib.sha256() else: hash_func = hashlib.md5() # with portalocker.Lock(filepath, "rb", flags=portalocker.LockFlags.SHARED) as f: # TODO(634): Line above will require all readers (Win) to obtain proper locks, # I'm putting it on hold as we need to modify PyTorch core codebase heavily. with open(filepath, "rb") as f: chunk = f.read(1024 ** 2) while chunk: hash_func.update(chunk) chunk = f.read(1024 ** 2) return hash_func.hexdigest() == hash_dict[filepath] def _promise_filename(filename, cache_uuid): return filename + ".promise." + str(cache_uuid) @functional_datapipe("on_disk_cache") class OnDiskCacheHolderIterDataPipe(IterDataPipe): """ Caches the outputs of multiple DataPipe operations to local files, which are typically performance bottleneck such download, decompress, and etc (functional name: ``on_disk_cache``). Must use ``.end_caching()`` to stop tracing the sequence of DataPipe operations and save the results to local files. Args: source_datapipe: IterDataPipe filepath_fn: Given data from ``source_datapipe``, returns file path(s) on local file system. Single file path is only allowed as output of the function. If resulted file name is different from the filename generated by the filename function of the end_cache original file name used to store list of yield files (and as cached items availability check) hash_dict: A Dictionary mapping file names to their corresponding hashes. If ``hash_dict`` is specified, the extra hash check will be attached before saving data to local file system. If the data doesn't meet the hash, the pipeline will raise an Error. hash_type: The type of hash function to apply extra_check_fn: Optional function to carry out extra validation on the given file path from ``filepath_fn``. Example: >>> from torchdata.datapipes.iter import IterableWrapper, HttpReader >>> url = IterableWrapper(["https://path/to/filename", ]) >>> def _filepath_fn(url): >>> temp_dir = tempfile.gettempdir() >>> return os.path.join(temp_dir, os.path.basename(url)) >>> hash_dict = {"expected_filepath": expected_MD5_hash} >>> cache_dp = url.on_disk_cache(filepath_fn=_filepath_fn, hash_dict=_hash_dict, hash_type="md5") >>> # You must call ``.end_caching`` at a later point to stop tracing and save the results to local files. >>> cache_dp = HttpReader(cache_dp).end_caching(mode="wb", filepath_fn=_filepath_fn) """ _temp_dict: Dict = {} def __init__( self, source_datapipe: IterDataPipe, filepath_fn: Optional[Callable] = None, hash_dict: Dict[str, str] = None, hash_type: str = "sha256", extra_check_fn: Optional[Callable[[str], bool]] = None, ): _assert_portalocker() self.source_datapipe = source_datapipe if filepath_fn is not None: _check_unpickable_fn(filepath_fn) assert not inspect.isgeneratorfunction(filepath_fn) # BC breaking, now only str is accepted as return if hash_dict is not None and hash_type not in ("sha256", "md5"): raise ValueError("Invalid hash_type requested, should be one of {}".format(("sha256", "md5"))) # TODO(VitalyFedyunin): We need some way to generate pipe uuids which will have similar result for # same graph but different nodes of distributed system self._uuid = uuid.uuid4() OnDiskCacheHolderIterDataPipe._temp_dict[self] = (filepath_fn, hash_dict, hash_type, extra_check_fn, self._uuid) self._end_caching_flag: bool = False self._download_everything = False # This is internal field used for load testing only def __iter__(self): if self._end_caching_flag: yield from self.source_datapipe else: # In case of BC breaking, use RuntimeError for now. Warning is another option raise RuntimeError("Please call `end_caching()` before iteration.") def __add__(self, other_datapipe): raise RuntimeError("`OnDiskCacheHolder` doesn't support add operation") # Since Demux is using this function, we should not attach it to OnDiskCacheHolder instance. # Otherwise, it would cause infinite recursion in graph traversal @staticmethod def _cache_check_fn(data, filepath_fn, hash_dict, hash_type, extra_check_fn, cache_uuid): filepath = data if filepath_fn is None else filepath_fn(data) assert not isinstance(filepath, (list, tuple)) # BC breaking, now only str is accepted as return result = CacheState.CACHED_SINGLE_ENTITY cached_file_exists = True if os.path.exists(_get_list_filename(filepath)): return int(CacheState.CACHED_MULTIPLE_ENTITIES) if not os.path.exists(filepath): cached_file_exists = False elif hash_dict is not None and not _hash_check(filepath, hash_dict, hash_type): # TODO: It is safer to assume that entire cache is compromised and require user to wipe it cached_file_exists = False elif extra_check_fn is not None and not extra_check_fn(filepath): # TODO: It is safer to assume that entire cache is compromised and require user to wipe it cached_file_exists = False if not cached_file_exists: promise_filepath = _promise_filename(filepath, cache_uuid) dirname = os.path.dirname(promise_filepath) if not os.path.exists(dirname): os.makedirs(dirname) with portalocker.Lock(promise_filepath, "a+", flags=portalocker.LockFlags.EXCLUSIVE) as promise_fh: promise_fh.seek(0) data = promise_fh.read() # TODO(VitalyFedyunin): Potentially there is old .promise file from previous failed run, we # need to somehow propagate uniq session id for dataloader, save and compare it here, # raising error file_exists = len(data) > 0 if not file_exists: result = CacheState.UNCACHED promise_fh.seek(0) data = promise_fh.read() # TODO(635): Potentially there is old .promise file from previous failed run, we # need to somehow propagate uniq session id for dataloader, save and compare it here, # raising error file_exists = len(data) > 0 if not file_exists: promise_fh.seek(0) promise_fh.write("[dataloader session uid]") promise_fh.truncate() promise_fh.flush() return int(result) def _end_caching(self): filepath_fn, hash_dict, hash_type, extra_check_fn, cache_uuid = OnDiskCacheHolderIterDataPipe._temp_dict.pop( self ) todo_dp: Any cached_dp: Any one_many_cached_dp: Any if self._download_everything: todo_dp = self.source_datapipe cached_dp = IterableWrapper([]) one_many_cached_dp = IterableWrapper([]) else: todo_dp, cached_dp, one_many_cached_dp = self.source_datapipe.demux( 3, partial( OnDiskCacheHolderIterDataPipe._cache_check_fn, filepath_fn=filepath_fn, hash_dict=hash_dict, hash_type=hash_type, extra_check_fn=extra_check_fn, cache_uuid=cache_uuid, ), ) # Cached: keep filepath(s) cached_dp = cached_dp.map(fn=filepath_fn) one_many_cached_dp = one_many_cached_dp.map(fn=filepath_fn) one_many_cached_dp = _ExtractFilesFromList(one_many_cached_dp) self.source_datapipe = todo_dp.memory_cell() self._end_caching_flag = True return cached_dp, one_many_cached_dp def _read_bytes(fd): return b"".join(fd) def _read_str(fd): return "".join(fd) def _is_promise_pending(promise_filename): return os.path.exists(promise_filename) class _WaitPendingCacheItemIterDataPipe(IterDataPipe): def __init__(self, source_datapipe, timeout=300, input_col=None, cache_uuid=None): self.source_datapipe = source_datapipe self.timeout = timeout self.input_col = input_col self._cache_uuid = cache_uuid def set_timeout(self, timeout): self.timeout = timeout def __iter__(self): for data in self.source_datapipe: if self.input_col is not None: filename = data[self.input_col] else: filename = data promise_filename = _promise_filename(filename, self._cache_uuid) start = time.time() while _is_promise_pending(promise_filename): time.sleep(0.01) if time.time() - start > self.timeout: raise Exception( f"OnDiskCache Exception: {filename} expected to be written by different process, " + f"but file is not ready in {self.timeout} seconds." ) yield data @functional_datapipe("memory_cell") class _MemoryCellIterDataPipe(IterDataPipe): def __init__(self, source_datapipe, remember_elements=1000): self.source_datapipe = source_datapipe self.buffer: List[Optional[Tuple[Any, Any]]] = [None for i in range(remember_elements)] self.remember_elements = remember_elements self.buffer_pos = -1 # TODO(VitalyFedyunin): Make it friendly to save/restore state def __iter__(self): for item in self.source_datapipe: item_id = uuid.uuid4() self.buffer_pos = (self.buffer_pos + 1) % self.remember_elements self.buffer[self.buffer_pos] = (item_id, item) yield item def get_last(self): # Returns tuple of elements, autogenerated id of the last returned row and its value return self.buffer[self.buffer_pos] def get_buffer(self): # Returns last returned id+element and others in the order from latest to oldest. result = [] for i in range(self.remember_elements): idx = (self.buffer_pos - i) % self.remember_elements if self.buffer[idx] is not None: result.append(self.buffer[idx]) return result def _get_list_filename(file_name): return file_name + ".torchdata_list" class _ExtractFilesFromList(IterDataPipe): def __init__(self, source_datapipe): self.source_datapipe = source_datapipe def __iter__(self): for filename in self.source_datapipe: with open(_get_list_filename(filename)) as fh: for line in fh: inner_file_name = line.rstrip() yield filename, inner_file_name class _FulfilledPromisesIterDataPipe(IterDataPipe): def __init__(self, source_datapipe, memory_cell_dp, first_filepath_fn, cache_uuid): self.source_datapipe = source_datapipe self.memory_cell_dp = memory_cell_dp self.first_filepath_fn = first_filepath_fn self._cache_uuid = cache_uuid @staticmethod def _del_promise_file(promise_filename, filename): if os.path.exists(promise_filename): retry = True start = time.time() while retry: retry = False try: os.unlink(promise_filename) except Exception as e: # Workaround about Windows not letting to delete file, while it is open by another process retry = True if time.time() - start > PROMISE_FILE_DELETE_TIMEOUT: raise Exception("Timeout while trying to recover from the ", type(e), e) time.sleep(PROMISE_FILE_DELETE_RETRY_INTERVAL) else: warnings.warn( f"Attempt to mark {promise_filename} promise (base of file {filename}) as fulfilled failed. Potentially missmatching filename functions of on_disk_cache and end_cache." ) def __iter__(self): last_record_uuid = None one_to_many_detected = False one_to_one_detected = False def fulfill_old_promises(buffer, last_record_uuid, first_filepath_fn, cache_uuid): for old_rec_uuid, old_rec in buffer: original_file_name = first_filepath_fn(old_rec) old_promise_filename = _promise_filename(original_file_name, cache_uuid) self._del_promise_file(old_promise_filename, original_file_name) if old_rec_uuid == last_record_uuid: break # TODO(VitalyFedyunin): If no match found, that means we exceeded length of memory_cell # and there is aggressive amount 1-to-zero cases, raise error and explain how to fix try: for filename in self.source_datapipe: rec_uuid, record = self.memory_cell_dp.get_last() original_file_name = self.first_filepath_fn(record) # TODO(VitalyFedyunin): For debug mode we can detect duplicate keys situations here and warn user if original_file_name != filename: # Situations when every archive unpacks to single file only are also considered as 1-M one_to_many_detected = True if one_to_one_detected: raise Exception("Disovered different keys when one-to-one mode previously assumed") # We are dealing with one-to-many situation now with open(_get_list_filename(original_file_name), "a") as fh: fh.write(f"{filename}\n") else: one_to_one_detected = True if one_to_many_detected: # Keys should be always the same (1-1 situation) or always different (1-many) sutuation raise Exception("first key somehow equal to secondary key") if rec_uuid != last_record_uuid: fulfill_old_promises( self.memory_cell_dp.get_buffer()[1:], last_record_uuid, self.first_filepath_fn, self._cache_uuid ) last_record_uuid = rec_uuid yield filename finally: if last_record_uuid is not None: fulfill_old_promises( self.memory_cell_dp.get_buffer(), last_record_uuid, self.first_filepath_fn, self._cache_uuid ) def _leave_second(x): return x[1] @functional_datapipe("end_caching") class EndOnDiskCacheHolderIterDataPipe(IterDataPipe): """ Indicates when the result of prior DataPipe will be saved local files specified by ``filepath_fn`` (functional name: ``end_caching``). Moreover, the result of source DataPipe is required to be a tuple of metadata and data, or a tuple of metadata and file handle. Args: datapipe: IterDataPipe with at least one ``OnDiskCacheHolder`` in the graph. mode: Mode in which the cached files are opened to write the data on disk. This is needed to be aligned with the type of data or file handle from ``datapipe``. ``"wb"`` is used by default. filepath_fn: Optional function to extract filepath from the metadata from ``datapipe``. By default, it would directly use the ?metadata? as file path. same_filepath_fn: Set to ``True`` to use same ``filepath_fn`` from the ``OnDiskCacheHolder``. skip_read: Boolean value to skip reading the file handle from ``datapipe``. By default, reading is enabled and reading function is created based on the ``mode``. timeout: Integer value of seconds to wait for uncached item to be written to disk Example: >>> from torchdata.datapipes.iter import IterableWrapper, HttpReader >>> url = IterableWrapper(["https://path/to/filename", ]) >>> def _filepath_fn(url): >>> temp_dir = tempfile.gettempdir() >>> return os.path.join(temp_dir, os.path.basename(url)) >>> hash_dict = {"expected_filepath": expected_MD5_hash} >>> # You must call ``.on_disk_cache`` at some point before ``.end_caching`` >>> cache_dp = url.on_disk_cache(filepath_fn=_filepath_fn, hash_dict=_hash_dict, hash_type="md5") >>> # You must call ``.end_caching`` at a later point to stop tracing and save the results to local files. >>> cache_dp = HttpReader(cache_dp).end_caching(mode="wb", filepath_fn=_filepath_fn) """ def __new__(cls, datapipe, mode="wb", filepath_fn=None, *, same_filepath_fn=False, skip_read=False, timeout=300): if filepath_fn is not None and same_filepath_fn: raise ValueError("`filepath_fn` is mutually exclusive with `same_filepath_fn`") graph = traverse_dps(datapipe) # Get the last CacheHolder cache_holder = EndOnDiskCacheHolderIterDataPipe._recursive_search(graph) if cache_holder is None: raise RuntimeError("Expected `OnDiskCacheHolder` existing in pipeline when `end_caching` is invoked") if cache_holder._end_caching_flag: raise RuntimeError("`end_caching` can only be invoked once per `OnDiskCacheHolder`") first_filepath_fn, _hash_dict, _hash_type, _, cache_uuid = OnDiskCacheHolderIterDataPipe._temp_dict[ cache_holder ] cached_dp, one_many_cached_dp = cache_holder._end_caching() cached_dp = _WaitPendingCacheItemIterDataPipe(cached_dp, timeout=timeout, cache_uuid=cache_uuid) one_many_cached_dp = _WaitPendingCacheItemIterDataPipe( one_many_cached_dp, timeout=timeout, cache_uuid=cache_uuid, input_col=0 ) one_many_cached_dp = one_many_cached_dp.map(_leave_second) memory_cell_dp = cache_holder.source_datapipe if same_filepath_fn: filepath_fn = first_filepath_fn todo_dp = datapipe if not skip_read: if "t" in mode: todo_dp = todo_dp.map(fn=_read_str, input_col=1) else: todo_dp = todo_dp.map(fn=_read_bytes, input_col=1) if filepath_fn is not None: todo_dp = todo_dp.map(fn=filepath_fn, input_col=0) # Extra hash check here when hash is provided. # And, raise Error if data returned from prior operations doesn't meet hash if _hash_dict is not None: todo_dp = todo_dp.check_hash(_hash_dict, _hash_type) todo_dp = todo_dp.save_to_disk(mode=mode) todo_dp = _FulfilledPromisesIterDataPipe(todo_dp, memory_cell_dp, first_filepath_fn, cache_uuid=cache_uuid) # TODO(VitalyFedyunin): This impacts determinism for partial cache situations return todo_dp.concat(cached_dp).concat(one_many_cached_dp) @staticmethod def _recursive_search(graph): for dp, _ in graph.values(): # Find the closest CacheHolder if isinstance(dp, OnDiskCacheHolderIterDataPipe): return dp for _, sub_graph in graph.values(): res = EndOnDiskCacheHolderIterDataPipe._recursive_search(sub_graph) if res is not None: return res return None
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from typing import Iterator, Optional, TypeVar from torchdata.datapipes import functional_datapipe from torchdata.datapipes.iter import IterDataPipe T_co = TypeVar("T_co", covariant=True) @functional_datapipe("cycle") class CyclerIterDataPipe(IterDataPipe[T_co]): """ Cycles the specified input in perpetuity by default, or for the specified number of times (functional name: ``cycle``). If the ordering does not matter (e.g. because you plan to ``shuffle`` later) or if you would like to repeat an element multiple times before moving onto the next element, use :class:`.Repeater`. Args: source_datapipe: source DataPipe that will be cycled through count: the number of times to read through ``source_datapipe` (if ``None``, it will cycle in perpetuity) Example: >>> from torchdata.datapipes.iter import IterableWrapper >>> dp = IterableWrapper(range(3)) >>> dp = dp.cycle(2) >>> list(dp) [0, 1, 2, 0, 1, 2] """ def __init__(self, source_datapipe: IterDataPipe[T_co], count: Optional[int] = None) -> None: self.source_datapipe: IterDataPipe[T_co] = source_datapipe self.count: Optional[int] = count if count is not None and count < 0: raise ValueError(f"Expected non-negative count, got {count}") def __iter__(self) -> Iterator[T_co]: i = 0 while self.count is None or i < self.count: yield from self.source_datapipe i += 1 def __len__(self) -> int: if self.count is None: raise TypeError( f"This {type(self).__name__} instance cycles forever, and therefore doesn't have valid length" ) else: return self.count * len(self.source_datapipe) @functional_datapipe("repeat") class RepeaterIterDataPipe(IterDataPipe[T_co]): """ Repeatedly yield each element of source DataPipe for the specified number of times before moving onto the next element (functional name: ``repeat``). Note that no copy is made in this DataPipe, the same element is yielded repeatedly. If you would like to yield the whole DataPipe in order multiple times, use :class:`.Cycler`. Args: source_datapipe: source DataPipe that will be iterated through times: the number of times an element of ``source_datapipe`` will be yielded before moving onto the next element Example: >>> from torchdata.datapipes.iter import IterableWrapper >>> dp = IterableWrapper(range(3)) >>> dp = dp.repeat(2) >>> list(dp) [0, 0, 1, 1, 2, 2] """ def __init__(self, source_datapipe: IterDataPipe[T_co], times: int) -> None: self.source_datapipe: IterDataPipe[T_co] = source_datapipe self.times: int = times if times <= 1: raise ValueError(f"The number of repetition must be > 1, got {times}") def __iter__(self) -> Iterator[T_co]: for element in self.source_datapipe: for _ in range(self.times): yield element def __len__(self) -> int: return self.times * len(self.source_datapipe)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from functools import partial from typing import List, Optional, TypeVar from torch.utils.data.datapipes.utils.common import DILL_AVAILABLE from torchdata.datapipes import functional_datapipe from torchdata.datapipes.iter import IterDataPipe try: # TODO(637): Create dependency on TorchArrow? import pyarrow.parquet as parquet import torcharrow except ImportError: torcharrow = None parquet = None if DILL_AVAILABLE: import dill dill.extend(use_dill=False) T_co = TypeVar("T_co") def _construct_dataframe(data, dtype=None, dtype_generator=None, columns=None, device=None): if dtype is None: dtype = dtype_generator() return torcharrow.dataframe(data, dtype=dtype, columns=columns, device=device) @functional_datapipe("dataframe") class DataFrameMakerIterDataPipe(IterDataPipe): # IterDataPipe[torcharrow.IDataFrame[T_co]] r""" Takes rows of data, batches a number of them together and creates `TorchArrow` DataFrames (functional name: ``dataframe``). Note: There is a trade-off between having a large number of rows within a DataFrame and usage of memory. Please choose a value carefully. Args: source_dp: IterDataPipe containing rows of data dataframe_size: number of rows of data within each DataFrame, page size can be option dtype: specify the `TorchArrow` dtype for the DataFrame, use ``torcharrow.dtypes.DType`` dtype_generator: function with no input argument that generates a torcharrow.dtypes.DType, which overrides dtype if both are given. This is useful for when the desired dtype is not serializable. columns: List of str that specifies the column names of the DataFrame device: specify the device on which the DataFrame will be stored Example: >>> from torchdata.datapipes.iter import IterableWrapper >>> import torcharrow.dtypes as dt >>> source_data = [(i,) for i in range(3)] >>> source_dp = IterableWrapper(source_data) >>> DTYPE = dt.Struct([dt.Field("Values", dt.int32)]) >>> df_dp = source_dp.dataframe(dtype=DTYPE) >>> list(df_dp)[0] index Values ------- -------- 0 0 1 1 2 2 dtype: Struct([Field('Values', int32)]), count: 3, null_count: 0 """ def __new__( cls, source_dp: IterDataPipe[T_co], dataframe_size: int = 1000, dtype=None, dtype_generator=None, columns: Optional[List[str]] = None, device: str = "", ): if torcharrow is None: raise ImportError( "The library 'torcharrow' is necessary for this DataPipe but it is not available." "Please visit https://github.com/facebookresearch/torcharrow/ to install it." ) # In this version, DF tracing is not available, which would allow DataPipe to run DataFrame operations batch_dp = source_dp.batch(dataframe_size) df_dp = batch_dp.map( partial(_construct_dataframe, dtype=dtype, dtype_generator=dtype_generator, columns=columns, device=device) ) return df_dp @functional_datapipe("load_parquet_as_df") class ParquetDFLoaderIterDataPipe(IterDataPipe): # IterDataPipe[torcharrow.IDataFrame[T_co]] r""" Takes in paths to Parquet files and return a `TorchArrow` DataFrame for each row group within a Parquet file (functional name: ``load_parquet_as_df``). Args: source_dp: source DataPipe containing paths to the Parquet files columns: List of `str` that specifies the column names of the DataFrame use_threads: if ``True``, Parquet reader will perform multi-threaded column reads dtype: specify the `TorchArrow` dtype for the DataFrame, use ``torcharrow.dtypes.DType`` device: specify the device on which the DataFrame will be stored Example: >>> from torchdata.datapipes.iter import FileLister >>> import torcharrow.dtypes as dt >>> DTYPE = dt.Struct([dt.Field("Values", dt.int32)]) >>> source_dp = FileLister(".", masks="df*.parquet") >>> parquet_df_dp = source_dp.load_parquet_as_df(dtype=DTYPE) >>> list(parquet_df_dp)[0] index Values ------- -------- 0 0 1 1 2 2 dtype: Struct([Field('Values', int32)]), count: 3, null_count: 0 """ def __init__( self, source_dp: IterDataPipe[str], dtype=None, columns: Optional[List[str]] = None, device: str = "", use_threads: bool = False, ): if torcharrow is None: raise ImportError( "The library 'torcharrow' is necessary for this DataPipe but it is not available." "Please visit https://github.com/facebookresearch/torcharrow/ to install it." ) if parquet is None: raise ImportError("The library 'parquet' is necessary for this DataPipe but it is not available.") self.source_dp = source_dp self.columns = columns self.use_threads = use_threads self.dtype = dtype self.device = device def __iter__(self): for path in self.source_dp: parquet_file = parquet.ParquetFile(path) num_row_groups = parquet_file.num_row_groups for i in range(num_row_groups): # TODO(638): More fine-grain control over the number of rows or row group per DataFrame row_group = parquet_file.read_row_group(i, columns=self.columns, use_threads=self.use_threads) yield torcharrow.from_arrow(row_group, dtype=self.dtype) def __getstate__(self): if DILL_AVAILABLE: dill_dtype = dill.dumps(self.dtype) else: dill_dtype = self.dtype state = (self.source_dp, dill_dtype, self.columns, self.device, self.use_threads) if IterDataPipe.getstate_hook is not None: return IterDataPipe.getstate_hook(state) return state def __setstate__(self, state): (self.source_dp, dill_dtype, self.columns, self.device, self.use_threads) = state if DILL_AVAILABLE: self.dtype = dill.loads(dill_dtype) # type: ignore[assignment] else: self.dtype = dill_dtype # type: ignore[assignment]
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import struct import warnings from functools import partial from io import BufferedIOBase from typing import Any, cast, Dict, Iterable, Iterator, List, NamedTuple, Optional, Tuple, Union import torch from torchdata.datapipes import functional_datapipe from torchdata.datapipes.iter import IterDataPipe from torchdata.datapipes.utils.common import validate_pathname_binary_tuple try: from math import prod # type: ignore except ImportError: # Implementation for older Python # NOTE: this is not supported by mypy yet # https://github.com/python/mypy/issues/1393 import operator from functools import reduce def prod(xs): # type: ignore[no-redef] return reduce(operator.mul, xs, 1) try: import google.protobuf as _protobuf del _protobuf HAS_PROTOBUF = True except ImportError: HAS_PROTOBUF = False U = Union[bytes, bytearray, str] TFRecordFeatureSpec = Tuple[Tuple[int, ...], torch.dtype] TFRecordExampleSpec = Dict[str, TFRecordFeatureSpec] # Note, reccursive types not supported by mypy at the moment # TODO(640): uncomment as soon as it becomes supported # https://github.com/python/mypy/issues/731 # BinaryData = Union[str, List['BinaryData']] TFRecordBinaryData = Union[str, List[str], List[List[str]], List[List[List[Any]]]] TFRecordExampleFeature = Union[torch.Tensor, List[torch.Tensor], TFRecordBinaryData] TFRecordExample = Dict[str, TFRecordExampleFeature] class SequenceExampleSpec(NamedTuple): context: TFRecordExampleSpec feature_lists: TFRecordExampleSpec def _assert_protobuf() -> None: if not HAS_PROTOBUF: raise ModuleNotFoundError( "Package `protobuf` is required to be installed to use this datapipe." "Please use `pip install protobuf` or `conda install -c conda-forge protobuf`" "to install the package" ) def iterate_tfrecord_file(data: BufferedIOBase) -> Iterator[memoryview]: length_bytes = bytearray(8) crc_bytes = bytearray(4) data_bytes = bytearray(1024) while True: bytes_read = data.readinto(length_bytes) if bytes_read == 0: break elif bytes_read != 8: raise RuntimeError("Invalid tfrecord file: failed to read the record size.") if data.readinto(crc_bytes) != 4: raise RuntimeError("Invalid tfrecord file: failed to read the start token.") (length,) = struct.unpack("<Q", length_bytes) if length > len(data_bytes): data_bytes = data_bytes.zfill(int(length * 1.5)) data_bytes_view = memoryview(data_bytes)[:length] if data.readinto(data_bytes_view) != length: raise RuntimeError("Invalid tfrecord file: failed to read the record.") if data.readinto(crc_bytes) != 4: raise RuntimeError("Invalid tfrecord file: failed to read the end token.") # TODO(641): check CRC yield data_bytes_view def process_feature(feature) -> torch.Tensor: # NOTE: We assume that each key in the example has only one field # (either "bytes_list", "float_list", or "int64_list")! field = feature.ListFields()[0] inferred_typename, value = field[0].name, field[1].value if inferred_typename == "bytes_list": pass elif inferred_typename == "float_list": value = torch.tensor(value, dtype=torch.float32) elif inferred_typename == "int64_list": value = torch.tensor(value, dtype=torch.int64) return value def _reshape_list(value, shape): # Flatten list flat_list = [] def flatten(value): if isinstance(value, (str, bytes)): flat_list.append(value) else: for x in value: flatten(x) flatten(value) # Compute correct shape common_divisor = prod(x for x in shape if x != -1) if sum(1 for x in shape if x == -1) > 1: raise RuntimeError("Shape can contain at most one dynamic dimension (-1).") if len(flat_list) % max(common_divisor, 1) != 0: raise RuntimeError(f"Cannot reshape {len(flat_list)} values into shape {shape}") shape = [x if x != -1 else (len(flat_list) // common_divisor) for x in shape] # Reshape list into the correct shape def _reshape(value, shape): if len(shape) == 0: assert len(value) == 1 return value[0] elif len(shape) == 1: # To make the reccursion faster assert len(value) == shape[0] return value dim_size = len(value) // shape[0] return [_reshape(value[i * dim_size : (i + 1) * dim_size], shape[1:]) for i in range(dim_size)] return _reshape(flat_list, shape) def _apply_feature_spec(value, feature_spec): if feature_spec is not None: shape, dtype = feature_spec if isinstance(dtype, torch.dtype): if shape is not None: value = value.reshape(shape) value = value.to(dtype) elif shape is not None: # Manual list reshape value = _reshape_list(value, shape) return value def _parse_tfrecord_features(features, spec: Optional[TFRecordExampleSpec]) -> Dict[str, torch.Tensor]: result = dict() features = features.feature for key in features.keys(): if spec is not None and key not in spec: continue feature_spec = None if spec is None else spec[key] feature = features[key] result[key] = _apply_feature_spec(process_feature(feature), feature_spec) return result def parse_tfrecord_sequence_example(example, spec: Optional[TFRecordExampleSpec]) -> TFRecordExample: # Parse context features result = cast(TFRecordExample, _parse_tfrecord_features(example.context, spec)) # Parse feature lists feature_lists_keys = None if spec is None else set(spec.keys()) - set(result.keys()) features = example.feature_lists.feature_list for key in features.keys(): if feature_lists_keys is not None and key not in feature_lists_keys: continue feature_spec = None if spec is None else spec[key] feature = features[key].feature if key in result: raise RuntimeError( "TFRecord example's key {key} is contained in both the context and feature lists. This is not supported." ) value: Union[torch.Tensor, List[Any]] = list(map(partial(process_feature), feature)) # For known torch dtypes, we stack the list features if feature_spec is not None and isinstance(feature_spec[1], torch.dtype): value = torch.stack(cast(List[torch.Tensor], value), 0) value = _apply_feature_spec(value, feature_spec) result[key] = value if spec is not None and len(result.keys()) != len(spec.keys()): raise RuntimeError(f"Example is missing some required keys: {sorted(result.keys())} != {sorted(spec.keys())}") return result @functional_datapipe("load_from_tfrecord") class TFRecordLoaderIterDataPipe(IterDataPipe[TFRecordExample]): r""" Opens/decompresses tfrecord binary streams from an Iterable DataPipe which contains tuples of path name and tfrecord binary stream, and yields the stored records (functional name: ``load_from_tfrecord``). Args: datapipe: Iterable DataPipe that provides tuples of path name and tfrecord binary stream length: a nominal length of the DataPipe Note: The opened file handles will be closed automatically if the default ``DecoderDataPipe`` is attached. Otherwise, user should be responsible to close file handles explicitly or let Python's GC close them periodically. Example: >>> from torchdata.datapipes.iter import FileLister, FileOpener >>> datapipe1 = FileLister(".", "*.tfrecord") >>> datapipe2 = FileOpener(datapipe1, mode="b") >>> tfrecord_loader_dp = datapipe2.load_from_tfrecord() >>> for example in tfrecord_loader_dp: >>> print(example) """ def __init__( self, datapipe: Iterable[Tuple[str, BufferedIOBase]], spec: Optional[TFRecordExampleSpec] = None, length: int = -1, ) -> None: super().__init__() _assert_protobuf() self.datapipe: Iterable[Tuple[str, BufferedIOBase]] = datapipe self.length: int = length self.spec = spec def __iter__(self) -> Iterator[TFRecordExample]: # We assume that the "example.proto" and "feature.proto" # stays the same for future TensorFlow versions. # If it changed, newer TensorFlow versions would # not be able to load older tfrecord datasets. from .protobuf_template import _tfrecord_example_pb2 as example_pb2 for data in self.datapipe: validate_pathname_binary_tuple(data) pathname, data_stream = data try: for example_bytes in iterate_tfrecord_file(data_stream): example = example_pb2.SequenceExample() # type: ignore example.ParseFromString(example_bytes) # type: ignore yield parse_tfrecord_sequence_example(example, self.spec) except RuntimeError as e: warnings.warn(f"Unable to read from corrupted tfrecord stream {pathname} due to: {e}, abort!") raise e def __len__(self) -> int: if self.length == -1: raise TypeError(f"{type(self).__name__} instance doesn't have valid length") return self.length
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import re from typing import Iterator, List from torchdata.datapipes import functional_datapipe from torchdata.datapipes.iter import IterDataPipe def _shard_expand(s: str) -> List[str]: expansion = r"[{][0-9]+[.][.][0-9]+[}]" m = re.search(expansion, s) if not m: return [s] prefix = s[: m.start()] rest = _shard_expand(s[m.end() :]) rng = s[m.start() + 1 : m.end() - 1] lohi = rng.split("..") if len(lohi[0]) == len(lohi[1]) and lohi[0].startswith("0"): fmt = "{prefix}{i:0>{l}d}{r}" elif len(lohi[0]) <= len(lohi[1]): if lohi[0].startswith("0") and lohi[0] != "0": raise ValueError("shard_expand: low bound must not start with 0 if low bound is shorter") fmt = "{prefix}{i}{r}" else: raise ValueError("shard_expand: low bound must be shorter than high bound") lo, hi = (int(x) for x in lohi) if lo >= hi: raise ValueError(f"shard_expand: bad range in in shard spec {s}.") result = [] for i in range(lo, hi + 1): for r in rest: expanded: str = fmt.format(prefix=prefix, i=i, r=r, l=len(lohi[1])) result.append(expanded) return result @functional_datapipe("shard_expand") class ShardExpanderIterDataPipe(IterDataPipe[str]): r""" Expands incoming shard strings into shards. Sharded data files are named using shell-like brace notation. For example, an ImageNet dataset sharded into 1200 shards and stored on a web server might be named `imagenet-{000000..001199}.tar`. Note that shard names can be expanded without any server transactions; this makes `shard_expand` reproducible and storage system independent (unlike :class `.FileLister` etc.). Args: source_datapipe: a DataPipe yielding a stream of pairs Returns: a DataPipe yielding a stream of expanded pathnames. Example: >>> from torchdata.datapipes.iter import IterableWrapper >>> source_dp = IterableWrapper(["ds-{00..05}.tar"]) >>> expand_dp = source_dp.shard_expand() >>> list(expand_dp) ['ds-00.tar', 'ds-01.tar', 'ds-02.tar', 'ds-03.tar', 'ds-04.tar', 'ds-05.tar'] >>> source_dp = IterableWrapper(["imgs_{00..05}.tar", "labels_{00..05}.tar"]) >>> expand_dp = source_dp.shard_expand() >>> list(expand_dp) ['imgs_00.tar', 'imgs_01.tar', 'imgs_02.tar', 'labels_00.tar', 'labels_01.tar', 'labels_02.tar'] """ def __init__(self, source_datapipe: IterDataPipe[str]) -> None: super().__init__() self.source_datapipe: IterDataPipe[str] = source_datapipe def __iter__(self) -> Iterator[str]: for path in self.source_datapipe: yield from _shard_expand(path)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import os import tarfile import warnings from io import BufferedIOBase from typing import cast, IO, Iterable, Iterator, Optional, Tuple from torchdata.datapipes import functional_datapipe from torchdata.datapipes.iter import IterDataPipe from torchdata.datapipes.utils import StreamWrapper from torchdata.datapipes.utils.common import validate_pathname_binary_tuple @functional_datapipe("load_from_tar") class TarArchiveLoaderIterDataPipe(IterDataPipe[Tuple[str, BufferedIOBase]]): r""" Opens/decompresses tar binary streams from an Iterable DataPipe which contains tuples of path name and tar binary stream, and yields a tuple of path name and extracted binary stream (functional name: ``load_from_tar``). Args: datapipe: Iterable DataPipe that provides tuples of path name and tar binary stream mode: File mode used by `tarfile.open` to read file object. Mode has to be a string of the form `'filemode[:compression]'` length: a nominal length of the DataPipe Note: The opened file handles will be closed automatically if the default ``DecoderDataPipe`` is attached. Otherwise, user should be responsible to close file handles explicitly or let Python's GC close them periodically. Example: >>> from torchdata.datapipes.iter import FileLister, FileOpener >>> datapipe1 = FileLister(".", "*.tar") >>> datapipe2 = FileOpener(datapipe1, mode="b") >>> tar_loader_dp = datapipe2.load_from_tar() >>> for _, stream in tar_loader_dp: >>> print(stream.read()) b'0123456789abcdef' """ def __init__(self, datapipe: Iterable[Tuple[str, BufferedIOBase]], mode: str = "r:*", length: int = -1) -> None: super().__init__() self.datapipe: Iterable[Tuple[str, BufferedIOBase]] = datapipe self.mode: str = mode self.length: int = length def __iter__(self) -> Iterator[Tuple[str, BufferedIOBase]]: for data in self.datapipe: validate_pathname_binary_tuple(data) pathname, data_stream = data try: if isinstance(data_stream, StreamWrapper) and isinstance(data_stream.file_obj, tarfile.TarFile): tar = data_stream.file_obj else: reading_mode = ( self.mode if hasattr(data_stream, "seekable") and data_stream.seekable() else self.mode.replace(":", "|") ) # typing.cast is used here to silence mypy's type checker tar = tarfile.open(fileobj=cast(Optional[IO[bytes]], data_stream), mode=reading_mode) for tarinfo in tar: if not tarinfo.isfile(): continue extracted_fobj = tar.extractfile(tarinfo) if extracted_fobj is None: warnings.warn(f"failed to extract file {tarinfo.name} from source tarfile {pathname}") raise tarfile.ExtractError inner_pathname = os.path.normpath(os.path.join(pathname, tarinfo.name)) yield inner_pathname, StreamWrapper(extracted_fobj, data_stream, name=inner_pathname) # type: ignore[misc] except Exception as e: warnings.warn(f"Unable to extract files from corrupted tarfile stream {pathname} due to: {e}, abort!") raise e finally: if isinstance(data_stream, StreamWrapper): data_stream.autoclose() def __len__(self) -> int: if self.length == -1: raise TypeError(f"{type(self).__name__} instance doesn't have valid length") return self.length
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import random from typing import Dict, final, List, Optional, TypeVar, Union from torchdata.datapipes import functional_datapipe from torchdata.datapipes.iter import IterDataPipe T = TypeVar("T") @functional_datapipe("random_split") class RandomSplitterIterDataPipe(IterDataPipe): r""" Randomly split samples from a source DataPipe into groups (functional name: ``random_split``). Since there is no buffer, only ONE group of samples (i.e. one child DataPipe) can be iterated through at any time. Attempts to iterate through multiple of them simultaneously will fail. Note that by default, multiple iterations of this DataPipe will yield the same split for consistency across epochs. You can invoke ``override_seed`` on the output(s) to update the seed whenever needed (such as per epoch to get a different split per epoch). Args: source_datapipe: Iterable DataPipe being split weights: Dict of weights; the length of this list determines how many output DataPipes there will be. It is recommended to provide integer weights that sum up to ``total_length``, which allows resulting DataPipes' length values to be known in advance. seed: random _seed used to determine the randomness of the split total_length: Length of the ``source_datapipe``, optional but providing an integer is highly encouraged, because not all ``IterDataPipe`` has ``len``, espeically ones that can be easily known in advance. target: Optional key (that must exist in ``weights``) to indicate the specific group to return. If set to the default ``None``, returns ``List[IterDataPipe]``. If target is specified, returns ``IterDataPipe``. Example: >>> from torchdata.datapipes.iter import IterableWrapper >>> dp = IterableWrapper(range(10)) >>> train, valid = dp.random_split(total_length=10, weights={"train": 0.5, "valid": 0.5}, seed=0) >>> list(train) [2, 3, 5, 7, 8] >>> list(valid) [0, 1, 4, 6, 9] >>> # You can also specify a target key if you only need a specific group of samples >>> train = dp.random_split(total_length=10, weights={"train": 0.5, "valid": 0.5}, seed=0, target='train') >>> list(train) [2, 3, 5, 7, 8] >>> # Be careful to use the same seed as before when specifying `target` to get the correct split. >>> valid = dp.random_split(total_length=10, weights={"train": 0.5, "valid": 0.5}, seed=0, target='valid') >>> list(valid) [0, 1, 4, 6, 9] """ def __new__( cls, source_datapipe: IterDataPipe, weights: Dict[T, Union[int, float]], seed, total_length: Optional[int] = None, target: Optional[T] = None, ): if total_length is None: try: # TODO: This is an issue for DataPipes which only have runtime lengths. Revisit to see if this # is problematic. total_length = len(source_datapipe) except TypeError: raise TypeError( "RandomSplitter needs `total_length`, but it is unable to infer it from " f"the `source_datapipe`: {source_datapipe}." ) container = _RandomSplitterIterDataPipe(source_datapipe, total_length, weights, seed) if target is None: return [SplitterIterator(container, k) for k in list(weights.keys())] else: if target in weights.keys(): return SplitterIterator(container, target) else: raise KeyError(f"`target={target}` does not match any key in `weights`.") class _RandomSplitterIterDataPipe(IterDataPipe): def __init__( self, source_datapipe: IterDataPipe, total_length: int, weights: Dict[T, Union[int, float]], seed, ): self.source_datapipe: IterDataPipe = source_datapipe self.total_length: int = total_length self.remaining_length: int = total_length self._seed = seed self.keys: List[T] = list(weights.keys()) self.key_to_index: Dict[T, int] = {k: i for i, k in enumerate(self.keys)} self.norm_weights: List[float] = self.normalize_weights([weights[k] for k in self.keys], total_length) self.weights: List[float] = self.norm_weights.copy() self._rng = random.Random(self._seed) self._lengths: List[int] = [] def draw(self) -> T: selected_key = self._rng.choices(self.keys, self.weights)[0] index = self.key_to_index[selected_key] self.weights[index] -= 1 self.remaining_length -= 1 if self.weights[index] < 0: self.weights[index] = 0 self.weights = self.normalize_weights(self.weights, self.remaining_length) return selected_key @staticmethod def normalize_weights(weights: List[float], total_length: int) -> List[float]: """ Given a ``List`` of weights, normalize them according to ``total_length``. """ total_weight = sum(weights) return [float(w) * total_length / total_weight for w in weights] @final def reset(self) -> None: self._rng = random.Random(self._seed) self.weights = self.norm_weights.copy() self.remaining_length = self.total_length def override_seed(self, seed): """ Update the `seed`. The new `seed` will be used in the next iteration. """ self._seed = seed return self def __getstate__(self): state = ( self.source_datapipe, self.total_length, self._seed, self.norm_weights, self.keys, self.key_to_index, self.weights, self._rng.getstate(), ) if IterDataPipe.getstate_hook is not None: return IterDataPipe.getstate_hook(state) return state def __setstate__(self, state): ( self.source_datapipe, self.total_length, self._seed, self.norm_weights, self.keys, self.key_to_index, self.weights, rng_state, ) = state self._rng = random.Random() self._rng.setstate(rng_state) def get_length(self, target: T) -> int: if not self._lengths: if all(w.is_integer() for w in self.norm_weights) and sum(self.norm_weights) == self.total_length: self._lengths = [int(w) for w in self.norm_weights] else: raise TypeError( "Lengths of the split cannot be known in advance. Please supply " "integer `weights` that sum up to `total_length`.\nAlternatively, " "use `datapipe.set_length(LENGTH)` to manually set the desired length." ) index = self.key_to_index[target] return self._lengths[index] class SplitterIterator(IterDataPipe): def __init__(self, main_datapipe: _RandomSplitterIterDataPipe, target: T): self.main_datapipe = main_datapipe self.target = target def __iter__(self): self.main_datapipe.reset() for sample in self.main_datapipe.source_datapipe: if self.main_datapipe.draw() == self.target: yield sample def override_seed(self, seed): """ Update the `seed`. The new `seed` will be used in the next iteration. For use cases that require a different split for each epoch, you call this method before or after the epoch as necessary. """ self.main_datapipe.override_seed(seed) return self def __len__(self): return self.main_datapipe.get_length(self.target)
# type: ignore # Generated by the protocol buffer compiler. DO NOT EDIT! # source: example.proto import sys _b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import ( descriptor as _descriptor, descriptor_pb2, message as _message, reflection as _reflection, symbol_database as _symbol_database, ) # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name="example.proto", package="tfrecord", syntax="proto3", serialized_pb=_b( '\n\rexample.proto\x12\x08tfrecord"\x1a\n\tBytesList\x12\r\n\x05value\x18\x01 \x03(\x0c"\x1e\n\tFloatList\x12\x11\n\x05value\x18\x01 \x03(\x02\x42\x02\x10\x01"\x1e\n\tInt64List\x12\x11\n\x05value\x18\x01 \x03(\x03\x42\x02\x10\x01"\x92\x01\n\x07\x46\x65\x61ture\x12)\n\nbytes_list\x18\x01 \x01(\x0b\x32\x13.tfrecord.BytesListH\x00\x12)\n\nfloat_list\x18\x02 \x01(\x0b\x32\x13.tfrecord.FloatListH\x00\x12)\n\nint64_list\x18\x03 \x01(\x0b\x32\x13.tfrecord.Int64ListH\x00\x42\x06\n\x04kind"\x7f\n\x08\x46\x65\x61tures\x12\x30\n\x07\x66\x65\x61ture\x18\x01 \x03(\x0b\x32\x1f.tfrecord.Features.FeatureEntry\x1a\x41\n\x0c\x46\x65\x61tureEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12 \n\x05value\x18\x02 \x01(\x0b\x32\x11.tfrecord.Feature:\x02\x38\x01"1\n\x0b\x46\x65\x61tureList\x12"\n\x07\x66\x65\x61ture\x18\x01 \x03(\x0b\x32\x11.tfrecord.Feature"\x98\x01\n\x0c\x46\x65\x61tureLists\x12=\n\x0c\x66\x65\x61ture_list\x18\x01 \x03(\x0b\x32\'.tfrecord.FeatureLists.FeatureListEntry\x1aI\n\x10\x46\x65\x61tureListEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12$\n\x05value\x18\x02 \x01(\x0b\x32\x15.tfrecord.FeatureList:\x02\x38\x01"/\n\x07\x45xample\x12$\n\x08\x66\x65\x61tures\x18\x01 \x01(\x0b\x32\x12.tfrecord.Features"e\n\x0fSequenceExample\x12#\n\x07\x63ontext\x18\x01 \x01(\x0b\x32\x12.tfrecord.Features\x12-\n\rfeature_lists\x18\x02 \x01(\x0b\x32\x16.tfrecord.FeatureListsB\x03\xf8\x01\x01\x62\x06proto3' ), ) _sym_db.RegisterFileDescriptor(DESCRIPTOR) _BYTESLIST = _descriptor.Descriptor( name="BytesList", full_name="tfrecord.BytesList", filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name="value", full_name="tfrecord.BytesList.value", index=0, number=1, type=12, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, ), ], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[], serialized_start=27, serialized_end=53, ) _FLOATLIST = _descriptor.Descriptor( name="FloatList", full_name="tfrecord.FloatList", filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name="value", full_name="tfrecord.FloatList.value", index=0, number=1, type=2, cpp_type=6, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b("\020\001")), ), ], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[], serialized_start=55, serialized_end=85, ) _INT64LIST = _descriptor.Descriptor( name="Int64List", full_name="tfrecord.Int64List", filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name="value", full_name="tfrecord.Int64List.value", index=0, number=1, type=3, cpp_type=2, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b("\020\001")), ), ], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[], serialized_start=87, serialized_end=117, ) _FEATURE = _descriptor.Descriptor( name="Feature", full_name="tfrecord.Feature", filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name="bytes_list", full_name="tfrecord.Feature.bytes_list", index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, ), _descriptor.FieldDescriptor( name="float_list", full_name="tfrecord.Feature.float_list", index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, ), _descriptor.FieldDescriptor( name="int64_list", full_name="tfrecord.Feature.int64_list", index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, ), ], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[ _descriptor.OneofDescriptor( name="kind", full_name="tfrecord.Feature.kind", index=0, containing_type=None, fields=[] ), ], serialized_start=120, serialized_end=266, ) _FEATURES_FEATUREENTRY = _descriptor.Descriptor( name="FeatureEntry", full_name="tfrecord.Features.FeatureEntry", filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name="key", full_name="tfrecord.Features.FeatureEntry.key", index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, ), _descriptor.FieldDescriptor( name="value", full_name="tfrecord.Features.FeatureEntry.value", index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, ), ], extensions=[], nested_types=[], enum_types=[], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b("8\001")), is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[], serialized_start=330, serialized_end=395, ) _FEATURES = _descriptor.Descriptor( name="Features", full_name="tfrecord.Features", filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name="feature", full_name="tfrecord.Features.feature", index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, ), ], extensions=[], nested_types=[ _FEATURES_FEATUREENTRY, ], enum_types=[], options=None, is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[], serialized_start=268, serialized_end=395, ) _FEATURELIST = _descriptor.Descriptor( name="FeatureList", full_name="tfrecord.FeatureList", filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name="feature", full_name="tfrecord.FeatureList.feature", index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, ), ], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[], serialized_start=397, serialized_end=446, ) _FEATURELISTS_FEATURELISTENTRY = _descriptor.Descriptor( name="FeatureListEntry", full_name="tfrecord.FeatureLists.FeatureListEntry", filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name="key", full_name="tfrecord.FeatureLists.FeatureListEntry.key", index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, ), _descriptor.FieldDescriptor( name="value", full_name="tfrecord.FeatureLists.FeatureListEntry.value", index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, ), ], extensions=[], nested_types=[], enum_types=[], options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b("8\001")), is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[], serialized_start=528, serialized_end=601, ) _FEATURELISTS = _descriptor.Descriptor( name="FeatureLists", full_name="tfrecord.FeatureLists", filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name="feature_list", full_name="tfrecord.FeatureLists.feature_list", index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, ), ], extensions=[], nested_types=[ _FEATURELISTS_FEATURELISTENTRY, ], enum_types=[], options=None, is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[], serialized_start=449, serialized_end=601, ) _EXAMPLE = _descriptor.Descriptor( name="Example", full_name="tfrecord.Example", filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name="features", full_name="tfrecord.Example.features", index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, ), ], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[], serialized_start=603, serialized_end=650, ) _SEQUENCEEXAMPLE = _descriptor.Descriptor( name="SequenceExample", full_name="tfrecord.SequenceExample", filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name="context", full_name="tfrecord.SequenceExample.context", index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, ), _descriptor.FieldDescriptor( name="feature_lists", full_name="tfrecord.SequenceExample.feature_lists", index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None, ), ], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[], serialized_start=652, serialized_end=753, ) _FEATURE.fields_by_name["bytes_list"].message_type = _BYTESLIST _FEATURE.fields_by_name["float_list"].message_type = _FLOATLIST _FEATURE.fields_by_name["int64_list"].message_type = _INT64LIST _FEATURE.oneofs_by_name["kind"].fields.append(_FEATURE.fields_by_name["bytes_list"]) _FEATURE.fields_by_name["bytes_list"].containing_oneof = _FEATURE.oneofs_by_name["kind"] _FEATURE.oneofs_by_name["kind"].fields.append(_FEATURE.fields_by_name["float_list"]) _FEATURE.fields_by_name["float_list"].containing_oneof = _FEATURE.oneofs_by_name["kind"] _FEATURE.oneofs_by_name["kind"].fields.append(_FEATURE.fields_by_name["int64_list"]) _FEATURE.fields_by_name["int64_list"].containing_oneof = _FEATURE.oneofs_by_name["kind"] _FEATURES_FEATUREENTRY.fields_by_name["value"].message_type = _FEATURE _FEATURES_FEATUREENTRY.containing_type = _FEATURES _FEATURES.fields_by_name["feature"].message_type = _FEATURES_FEATUREENTRY _FEATURELIST.fields_by_name["feature"].message_type = _FEATURE _FEATURELISTS_FEATURELISTENTRY.fields_by_name["value"].message_type = _FEATURELIST _FEATURELISTS_FEATURELISTENTRY.containing_type = _FEATURELISTS _FEATURELISTS.fields_by_name["feature_list"].message_type = _FEATURELISTS_FEATURELISTENTRY _EXAMPLE.fields_by_name["features"].message_type = _FEATURES _SEQUENCEEXAMPLE.fields_by_name["context"].message_type = _FEATURES _SEQUENCEEXAMPLE.fields_by_name["feature_lists"].message_type = _FEATURELISTS DESCRIPTOR.message_types_by_name["BytesList"] = _BYTESLIST DESCRIPTOR.message_types_by_name["FloatList"] = _FLOATLIST DESCRIPTOR.message_types_by_name["Int64List"] = _INT64LIST DESCRIPTOR.message_types_by_name["Feature"] = _FEATURE DESCRIPTOR.message_types_by_name["Features"] = _FEATURES DESCRIPTOR.message_types_by_name["FeatureList"] = _FEATURELIST DESCRIPTOR.message_types_by_name["FeatureLists"] = _FEATURELISTS DESCRIPTOR.message_types_by_name["Example"] = _EXAMPLE DESCRIPTOR.message_types_by_name["SequenceExample"] = _SEQUENCEEXAMPLE BytesList = _reflection.GeneratedProtocolMessageType( "BytesList", (_message.Message,), dict( DESCRIPTOR=_BYTESLIST, __module__="example_pb2" # @@protoc_insertion_point(class_scope:tfrecord.BytesList) ), ) _sym_db.RegisterMessage(BytesList) FloatList = _reflection.GeneratedProtocolMessageType( "FloatList", (_message.Message,), dict( DESCRIPTOR=_FLOATLIST, __module__="example_pb2" # @@protoc_insertion_point(class_scope:tfrecord.FloatList) ), ) _sym_db.RegisterMessage(FloatList) Int64List = _reflection.GeneratedProtocolMessageType( "Int64List", (_message.Message,), dict( DESCRIPTOR=_INT64LIST, __module__="example_pb2" # @@protoc_insertion_point(class_scope:tfrecord.Int64List) ), ) _sym_db.RegisterMessage(Int64List) Feature = _reflection.GeneratedProtocolMessageType( "Feature", (_message.Message,), dict( DESCRIPTOR=_FEATURE, __module__="example_pb2" # @@protoc_insertion_point(class_scope:tfrecord.Feature) ), ) _sym_db.RegisterMessage(Feature) Features = _reflection.GeneratedProtocolMessageType( "Features", (_message.Message,), dict( FeatureEntry=_reflection.GeneratedProtocolMessageType( "FeatureEntry", (_message.Message,), dict( DESCRIPTOR=_FEATURES_FEATUREENTRY, __module__="example_pb2" # @@protoc_insertion_point(class_scope:tfrecord.Features.FeatureEntry) ), ), DESCRIPTOR=_FEATURES, __module__="example_pb2" # @@protoc_insertion_point(class_scope:tfrecord.Features) ), ) _sym_db.RegisterMessage(Features) _sym_db.RegisterMessage(Features.FeatureEntry) FeatureList = _reflection.GeneratedProtocolMessageType( "FeatureList", (_message.Message,), dict( DESCRIPTOR=_FEATURELIST, __module__="example_pb2" # @@protoc_insertion_point(class_scope:tfrecord.FeatureList) ), ) _sym_db.RegisterMessage(FeatureList) FeatureLists = _reflection.GeneratedProtocolMessageType( "FeatureLists", (_message.Message,), dict( FeatureListEntry=_reflection.GeneratedProtocolMessageType( "FeatureListEntry", (_message.Message,), dict( DESCRIPTOR=_FEATURELISTS_FEATURELISTENTRY, __module__="example_pb2" # @@protoc_insertion_point(class_scope:tfrecord.FeatureLists.FeatureListEntry) ), ), DESCRIPTOR=_FEATURELISTS, __module__="example_pb2" # @@protoc_insertion_point(class_scope:tfrecord.FeatureLists) ), ) _sym_db.RegisterMessage(FeatureLists) _sym_db.RegisterMessage(FeatureLists.FeatureListEntry) Example = _reflection.GeneratedProtocolMessageType( "Example", (_message.Message,), dict( DESCRIPTOR=_EXAMPLE, __module__="example_pb2" # @@protoc_insertion_point(class_scope:tfrecord.Example) ), ) _sym_db.RegisterMessage(Example) SequenceExample = _reflection.GeneratedProtocolMessageType( "SequenceExample", (_message.Message,), dict( DESCRIPTOR=_SEQUENCEEXAMPLE, __module__="example_pb2" # @@protoc_insertion_point(class_scope:tfrecord.SequenceExample) ), ) _sym_db.RegisterMessage(SequenceExample) DESCRIPTOR.has_options = True DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b("\370\001\001")) _FLOATLIST.fields_by_name["value"].has_options = True _FLOATLIST.fields_by_name["value"]._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b("\020\001")) _INT64LIST.fields_by_name["value"].has_options = True _INT64LIST.fields_by_name["value"]._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b("\020\001")) _FEATURES_FEATUREENTRY.has_options = True _FEATURES_FEATUREENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b("8\001")) _FEATURELISTS_FEATURELISTENTRY.has_options = True _FEATURELISTS_FEATURELISTENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b("8\001")) # @@protoc_insertion_point(module_scope)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from io import BytesIO from typing import Iterator, List, Tuple, Union import torchdata from torch.utils.data.datapipes.utils.common import match_masks from torchdata.datapipes import functional_datapipe from torchdata.datapipes.iter import IterDataPipe from torchdata.datapipes.utils import StreamWrapper @functional_datapipe("list_files_by_s3") class S3FileListerIterDataPipe(IterDataPipe[str]): r""" Iterable DataPipe that lists Amazon S3 file URLs with the given prefixes (functional name: ``list_files_by_s3``). Acceptable prefixes include ``s3://bucket-name``, ``s3://bucket-name/``, ``s3://bucket-name/folder``. Note: 1. ``source_datapipe`` **must** contain a list of valid S3 URLs 2. ``length`` is `-1` by default, and any call to ``__len__()`` is invalid, because the length is unknown until all files are iterated. 3. ``request_timeout_ms`` and ``region`` will overwrite settings in the configuration file or environment variables. 4. The lack of AWS proper configuration can lead empty response. For more details related to S3 IO DataPipe setup and AWS config, please see the `README file`_. .. _README file: https://github.com/pytorch/data/tree/main/torchdata/datapipes/iter/load#s3-io-datapipe-documentation Args: source_datapipe: a DataPipe that contains URLs/URL prefixes to s3 files length: Nominal length of the datapipe request_timeout_ms: timeout setting for each reqeust (3,000ms by default) region: region for access files (inferred from credentials by default) Example: .. testsetup:: from unittest import mock from torchdata.datapipes.iter import IterableWrapper, S3FileLister file_lister_patch = mock.patch.object(S3FileLister, "__iter__", return_value=iter([])) file_lister_patch.start() .. testcode:: from torchdata.datapipes.iter import IterableWrapper, S3FileLister s3_prefixes = IterableWrapper(['s3://bucket-name/folder/', ...]) dp_s3_urls = S3FileLister(s3_prefixes) for d in dp_s3_urls: pass # Functional API dp_s3_urls = s3_prefixes.list_files_by_s3(request_timeout_ms=100) for d in dp_s3_urls: pass .. testcleanup:: file_lister_patch.stop() """ def __init__( self, source_datapipe: IterDataPipe[str], length: int = -1, request_timeout_ms=-1, region="", masks: Union[str, List[str]] = "", ) -> None: if not hasattr(torchdata, "_torchdata") or not hasattr(torchdata._torchdata, "S3Handler"): raise ModuleNotFoundError("TorchData must be built with BUILD_S3=1 to use this datapipe.") self.source_datapipe: IterDataPipe[str] = source_datapipe self.length: int = length self.handler = torchdata._torchdata.S3Handler(request_timeout_ms, region) self.masks = masks def __iter__(self) -> Iterator[str]: for prefix in self.source_datapipe: while True: urls = self.handler.list_files(prefix) for url in urls: if match_masks(url, self.masks): yield url if not urls: break self.handler.clear_marker() def __len__(self) -> int: if self.length == -1: raise TypeError(f"{type(self).__name__} instance doesn't have valid length") return self.length @functional_datapipe("load_files_by_s3") class S3FileLoaderIterDataPipe(IterDataPipe[Tuple[str, StreamWrapper]]): r""" Iterable DataPipe that loads Amazon S3 files from the given S3 URLs (functional name: ``load_files_by_s3``). ``S3FileLoader`` iterates all given S3 URLs in ``BytesIO`` format with ``(url, BytesIO)`` tuples. Note: 1. ``source_datapipe`` **must** contain a list of valid S3 URLs. 2. ``request_timeout_ms`` and ``region`` will overwrite settings in the configuration file or environment variables. 3. The lack of AWS proper configuration can lead empty response. For more details related to S3 IO DataPipe setup and AWS config, please see the `README file`_. .. _README file: https://github.com/pytorch/data/tree/main/torchdata/datapipes/iter/load#s3-io-datapipe-documentation Args: source_datapipe: a DataPipe that contains URLs to s3 files request_timeout_ms: timeout setting for each reqeust (3,000ms by default) region: region for access files (inferred from credentials by default) buffer_size: buffer size of each chunk to download large files progressively (128Mb by default) multi_part_download: flag to split each chunk into small packets and download those packets in parallel (enabled by default) Example: .. testsetup:: from unittest import mock from torchdata.datapipes.iter import S3FileLister file_lister_patch = mock.patch.object(S3FileLister, "__iter__", return_value=iter([])) file_lister_patch.start() .. testcode:: from torchdata.datapipes.iter import IterableWrapper, S3FileLoader dp_s3_urls = IterableWrapper(['s3://bucket-name/folder/', ...]).list_files_by_s3() # In order to make sure data are shuffled and sharded in the # distributed environment, `shuffle` and `sharding_filter` # are required. For detail, please check our tutorial in: # https://pytorch.org/data/main/tutorial.html#working-with-dataloader sharded_s3_urls = dp_s3_urls.shuffle().sharding_filter() dp_s3_files = S3FileLoader(sharded_s3_urls) for url, fd in dp_s3_files: # Start loading data data = fd.read() # Functional API dp_s3_files = sharded_s3_urls.load_files_by_s3(buffer_size=256) for url, fd in dp_s3_files: data = fd.read() .. testcleanup:: file_lister_patch.stop() """ def __init__( self, source_datapipe: IterDataPipe[str], request_timeout_ms=-1, region="", buffer_size=None, multi_part_download=None, ) -> None: if not hasattr(torchdata, "_torchdata") or not hasattr(torchdata._torchdata, "S3Handler"): raise ModuleNotFoundError("TorchData must be built with BUILD_S3=1 to use this datapipe.") self.source_datapipe: IterDataPipe[str] = source_datapipe self.handler = torchdata._torchdata.S3Handler(request_timeout_ms, region) if buffer_size: self.handler.set_buffer_size(buffer_size) if multi_part_download: self.handler.set_multi_part_download(multi_part_download) def __iter__(self) -> Iterator[Tuple[str, StreamWrapper]]: for url in self.source_datapipe: yield url, StreamWrapper(BytesIO(self.handler.s3_read(url))) def __len__(self) -> int: return len(self.source_datapipe)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import re import urllib import warnings from typing import Any, Dict, Iterator, Optional, Tuple import requests from torchdata.datapipes import functional_datapipe from torchdata.datapipes.iter import IterDataPipe from torchdata.datapipes.utils import StreamWrapper # TODO(642): Remove this helper function when https://bugs.python.org/issue42627 is resolved def _get_proxies() -> Optional[Dict[str, str]]: import os if os.name == "nt": proxies = urllib.request.getproxies() address = proxies.get("https") # The default proxy type of Windows is HTTP if address and address.startswith("https"): address = "http" + address[5:] proxies["https"] = address return proxies return None def _get_response_from_http( url: str, *, timeout: Optional[float], **query_params: Optional[Dict[str, Any]] ) -> Tuple[str, StreamWrapper]: with requests.Session() as session: proxies = _get_proxies() r = session.get(url, timeout=timeout, proxies=proxies, stream=True, **query_params) # type: ignore[attr-defined] r.raise_for_status() return url, StreamWrapper(r.raw) @functional_datapipe("read_from_http") class HTTPReaderIterDataPipe(IterDataPipe[Tuple[str, StreamWrapper]]): r""" Takes file URLs (HTTP URLs pointing to files), and yields tuples of file URL and IO stream (functional name: ``read_from_http``). Args: source_datapipe: a DataPipe that contains URLs timeout: timeout in seconds for HTTP request skip_on_error: whether to skip over urls causing problems, otherwise an exception is raised **kwargs: a Dictionary to pass optional arguments that requests takes. For the full list check out https://docs.python-requests.org/en/master/api/ Example: .. testcode:: from torchdata.datapipes.iter import IterableWrapper, HttpReader file_url = "https://raw.githubusercontent.com/pytorch/data/main/LICENSE" query_params = {"auth" : ("fake_username", "fake_password"), "allow_redirects" : True} timeout = 120 http_reader_dp = HttpReader(IterableWrapper([file_url]), timeout=timeout, **query_params) reader_dp = http_reader_dp.readlines() it = iter(reader_dp) path, line = next(it) print((path, line)) Output: .. testoutput:: ('https://raw.githubusercontent.com/pytorch/data/main/LICENSE', b'BSD 3-Clause License') """ def __init__( self, source_datapipe: IterDataPipe[str], timeout: Optional[float] = None, skip_on_error: bool = False, **kwargs: Optional[Dict[str, Any]], ) -> None: self.source_datapipe: IterDataPipe[str] = source_datapipe self.timeout = timeout self.skip_on_error = skip_on_error self.query_params = kwargs def __iter__(self) -> Iterator[Tuple[str, StreamWrapper]]: for url in self.source_datapipe: try: yield _get_response_from_http(url, timeout=self.timeout, **self.query_params) except Exception as e: if self.skip_on_error: warnings.warn(f"{e}, skipping...") else: raise def __len__(self) -> int: return len(self.source_datapipe) def _extract_gdrive_api_response(content: str) -> Optional[str]: match = re.search("<title>Google Drive - (?P<api_response>.+?)</title>", content) return match["api_response"] if match is not None else None def _get_response_from_google_drive( url: str, *, timeout: Optional[float], **query_params: Optional[Dict[str, Any]] ) -> Tuple[str, StreamWrapper]: confirm_token = None with requests.Session() as session: response = session.get(url, timeout=timeout, stream=True, **query_params) # type: ignore[attr-defined] response.raise_for_status() for k, v in response.cookies.items(): if k.startswith("download_warning"): confirm_token = v break else: api_response = _extract_gdrive_api_response(response.text) if api_response == "Virus scan warning": confirm_token = "t" elif api_response == "Quota exceeded": raise RuntimeError(f"Google drive link {url} is currently unavailable, because the quota was exceeded.") if confirm_token: url = url + "&confirm=" + confirm_token response = session.get(url, timeout=timeout, stream=True, **query_params) # type: ignore[attr-defined] response.raise_for_status() if "content-disposition" not in response.headers: raise RuntimeError( f"Google drive link {url} internal error: " "headers don't contain content-disposition. This is usually caused by " "using a sharing/viewing link instead of a download link. Click 'Download' on the " "Google Drive page, which should redirect you to a download page, and use the link " "of that page." ) filename = re.findall('filename="(.+)"', response.headers["content-disposition"]) if filename is None: raise RuntimeError(f"Google drive link {url}: filename could not be autodetected") return filename[0], StreamWrapper(response.raw) @functional_datapipe("read_from_gdrive") class GDriveReaderDataPipe(IterDataPipe[Tuple[str, StreamWrapper]]): r""" Takes URLs pointing at GDrive files, and yields tuples of file name and IO stream (functional name: ``read_from_gdrive``). Args: source_datapipe: a DataPipe that contains URLs to GDrive files timeout: timeout in seconds for HTTP request skip_on_error: whether to skip over urls causing problems, otherwise an exception is raised **kwargs: a Dictionary to pass optional arguments that requests takes. For the full list check out https://docs.python-requests.org/en/master/api/ Example: .. testsetup:: from torchdata.datapipes.iter import GDriveReader GDriveReader.readlines = lambda self: [ ("https://drive.google.com/uc?export=download&id=SomeIDToAGDriveFile", b"<First line from the GDrive File>") ] .. testcode:: from torchdata.datapipes.iter import IterableWrapper, GDriveReader gdrive_file_url = "https://drive.google.com/uc?export=download&id=SomeIDToAGDriveFile" gdrive_reader_dp = GDriveReader(IterableWrapper([gdrive_file_url])) reader_dp = gdrive_reader_dp.readlines() it = iter(reader_dp) path, line = next(it) print((path, line)) Output: .. testoutput:: ('https://drive.google.com/uc?export=download&id=SomeIDToAGDriveFile', b'<First line from the GDrive File>') """ source_datapipe: IterDataPipe[str] def __init__( self, source_datapipe: IterDataPipe[str], *, timeout: Optional[float] = None, skip_on_error: bool = False, **kwargs: Optional[Dict[str, Any]], ) -> None: self.source_datapipe = source_datapipe self.timeout = timeout self.skip_on_error = skip_on_error self.query_params = kwargs def __iter__(self) -> Iterator[Tuple[str, StreamWrapper]]: for url in self.source_datapipe: try: yield _get_response_from_google_drive(url, timeout=self.timeout, **self.query_params) except Exception as e: if self.skip_on_error: warnings.warn(f"{e}, skipping...") else: raise def __len__(self) -> int: return len(self.source_datapipe) @functional_datapipe("read_from_remote") class OnlineReaderIterDataPipe(IterDataPipe[Tuple[str, StreamWrapper]]): r""" Takes file URLs (can be HTTP URLs pointing to files or URLs to GDrive files), and yields tuples of file URL and IO stream (functional name: ``read_from_remote``). Args: source_datapipe: a DataPipe that contains URLs timeout: timeout in seconds for HTTP request skip_on_error: whether to skip over urls causing problems, otherwise an exception is raised **kwargs: a Dictionary to pass optional arguments that requests takes. For the full list check out https://docs.python-requests.org/en/master/api/ Example: .. testcode:: from torchdata.datapipes.iter import IterableWrapper, OnlineReader file_url = "https://raw.githubusercontent.com/pytorch/data/main/LICENSE" online_reader_dp = OnlineReader(IterableWrapper([file_url])) reader_dp = online_reader_dp.readlines() it = iter(reader_dp) path, line = next(it) print((path, line)) Output: .. testoutput:: ('https://raw.githubusercontent.com/pytorch/data/main/LICENSE', b'BSD 3-Clause License') """ source_datapipe: IterDataPipe[str] def __init__( self, source_datapipe: IterDataPipe[str], *, timeout: Optional[float] = None, skip_on_error: bool = False, **kwargs: Optional[Dict[str, Any]], ) -> None: self.source_datapipe = source_datapipe self.timeout = timeout self.skip_on_error = skip_on_error self.query_params = kwargs def __iter__(self) -> Iterator[Tuple[str, StreamWrapper]]: for url in self.source_datapipe: parts = urllib.parse.urlparse(url) if re.match(r"(drive|docs)[.]google[.]com", parts.netloc): try: yield _get_response_from_google_drive(url, timeout=self.timeout, **self.query_params) except Exception as e: if self.skip_on_error: warnings.warn(f"{e}, skipping...") else: raise else: try: yield _get_response_from_http(url, timeout=self.timeout, **self.query_params) except Exception as e: if self.skip_on_error: warnings.warn(f"{e}, skipping...") else: raise def __len__(self) -> int: return len(self.source_datapipe)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree.
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from typing import Iterator, Tuple from torchdata.datapipes import functional_datapipe from torchdata.datapipes.iter import IterDataPipe from torchdata.datapipes.utils import StreamWrapper try: from aistore.client import Client from aistore.pytorch.utils import parse_url, unparse_url HAS_AIS = True except ImportError: HAS_AIS = False try: import aistore from packaging.version import parse as parse_version AIS_VERSION_CHECK = parse_version(aistore.__version__) >= parse_version("1.0.2") except (ImportError, AttributeError): AIS_VERSION_CHECK = False def _assert_aistore() -> None: if not HAS_AIS: raise ModuleNotFoundError( "Package `aistore` (>=1.0.2) is required to be installed to use this datapipe." "Please run `pip install --upgrade aistore` or `conda install aistore` to install the package" "For more info visit: https://github.com/NVIDIA/aistore/blob/master/sdk/python/" ) def _assert_aistore_version() -> None: if not AIS_VERSION_CHECK: raise ImportError( "AIStore version >= 1.0.2 required" "Please run `pip install --upgrade aistore` or `conda update aistore` to install the latest version" ) @functional_datapipe("list_files_by_ais") class AISFileListerIterDataPipe(IterDataPipe[str]): """ Iterable Datapipe that lists files from the AIStore backends with the given URL prefixes (functional name: ``list_files_by_ais``). Acceptable prefixes include but not limited to - `ais://bucket-name`, `ais://bucket-name/` Note: - This function also supports files from multiple backends (`aws://..`, `gcp://..`, `azure://..`, etc) - Input must be a list and direct URLs are not supported. - length is -1 by default, all calls to len() are invalid as not all items are iterated at the start. - This internally uses AIStore Python SDK. Args: source_datapipe(IterDataPipe[str]): a DataPipe that contains URLs/URL prefixes to objects on AIS url(str): AIStore endpoint length(int): length of the datapipe Example: >>> from torchdata.datapipes.iter import IterableWrapper, AISFileLister >>> ais_prefixes = IterableWrapper(['gcp://bucket-name/folder/', 'aws:bucket-name/folder/', 'ais://bucket-name/folder/', ...]) >>> dp_ais_urls = AISFileLister(url='localhost:8080', source_datapipe=ais_prefixes) >>> for url in dp_ais_urls: ... pass >>> # Functional API >>> dp_ais_urls = ais_prefixes.list_files_by_ais(url='localhost:8080') >>> for url in dp_ais_urls: ... pass """ def __init__(self, source_datapipe: IterDataPipe[str], url: str, length: int = -1) -> None: _assert_aistore() _assert_aistore_version() self.source_datapipe: IterDataPipe[str] = source_datapipe self.length: int = length self.client = Client(url) def __iter__(self) -> Iterator[str]: for prefix in self.source_datapipe: provider, bck_name, prefix = parse_url(prefix) obj_iter = self.client.bucket(bck_name, provider).list_objects_iter(prefix=prefix) for entry in obj_iter: yield unparse_url(provider=provider, bck_name=bck_name, obj_name=entry.name) def __len__(self) -> int: if self.length == -1: raise TypeError(f"{type(self).__name__} instance doesn't have valid length") return self.length @functional_datapipe("load_files_by_ais") class AISFileLoaderIterDataPipe(IterDataPipe[Tuple[str, StreamWrapper]]): """ Iterable DataPipe that loads files from AIStore with the given URLs (functional name: ``load_files_by_ais``). Iterates all files in BytesIO format and returns a tuple (url, BytesIO). Note: - This function also supports files from multiple backends (`aws://..`, `gcp://..`, `azure://..`, etc) - Input must be a list and direct URLs are not supported. - This internally uses AIStore Python SDK. Args: source_datapipe(IterDataPipe[str]): a DataPipe that contains URLs/URL prefixes to objects url(str): AIStore endpoint length(int): length of the datapipe Example: >>> from torchdata.datapipes.iter import IterableWrapper, AISFileLister,AISFileLoader >>> ais_prefixes = IterableWrapper(['gcp://bucket-name/folder/', 'aws:bucket-name/folder/', 'ais://bucket-name/folder/', ...]) >>> dp_ais_urls = AISFileLister(url='localhost:8080', source_datapipe=ais_prefixes) >>> dp_cloud_files = AISFileLoader(url='localhost:8080', source_datapipe=dp_ais_urls) >>> for url, file in dp_cloud_files: ... pass >>> # Functional API >>> dp_cloud_files = dp_ais_urls.load_files_by_ais(url='localhost:8080') >>> for url, file in dp_cloud_files: ... pass """ def __init__(self, source_datapipe: IterDataPipe[str], url: str, length: int = -1) -> None: _assert_aistore() _assert_aistore_version() self.source_datapipe: IterDataPipe[str] = source_datapipe self.length = length self.client = Client(url) def __iter__(self) -> Iterator[Tuple[str, StreamWrapper]]: for url in self.source_datapipe: provider, bck_name, obj_name = parse_url(url) yield url, StreamWrapper( self.client.bucket(bck_name=bck_name, provider=provider).object(obj_name=obj_name).get().raw() ) def __len__(self) -> int: return len(self.source_datapipe)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import os import posixpath from typing import Any, Callable, Dict, Iterator, List, Optional, Sequence, Tuple, Union from torch.utils.data.datapipes.utils.common import match_masks from torchdata.datapipes import functional_datapipe from torchdata.datapipes.iter import IterableWrapper, IterDataPipe from torchdata.datapipes.utils import StreamWrapper try: import fsspec except ImportError: fsspec = None U = Union[bytes, bytearray, str] def _assert_fsspec() -> None: if fsspec is None: raise ModuleNotFoundError( "Package `fsspec` is required to be installed to use this datapipe." "Please use `pip install fsspec` or `conda install -c conda-forge fsspec`" "to install the package" ) @functional_datapipe("list_files_by_fsspec") class FSSpecFileListerIterDataPipe(IterDataPipe[str]): r""" Lists the contents of the directory at the provided ``root`` pathname or URL, and yields the full pathname or URL for each file within the directory (functional name: ``list_files_by_fsspec``). Args: root: The root `fsspec` path directory or list of path directories to list files from masks: Unix style filter string or string list for filtering file name(s) kwargs: Extra options that make sense to a particular storage connection, e.g. host, port, username, password, etc. Example: .. testsetup:: dir_path = "path" .. testcode:: from torchdata.datapipes.iter import FSSpecFileLister datapipe = FSSpecFileLister(root=dir_path) """ def __init__( self, root: Union[str, Sequence[str], IterDataPipe], masks: Union[str, List[str]] = "", **kwargs, ) -> None: _assert_fsspec() if isinstance(root, str): root = [ root, ] if not isinstance(root, IterDataPipe): self.datapipe: IterDataPipe = IterableWrapper(root) # type: ignore[assignment] else: self.datapipe = root self.masks = masks self.kwargs_for_connection = kwargs def __iter__(self) -> Iterator[str]: for root in self.datapipe: fs, path = fsspec.core.url_to_fs(root, **self.kwargs_for_connection) if isinstance(fs.protocol, str): protocol_list = [fs.protocol] else: protocol_list = fs.protocol # fspec.core.url_to_fs will return "abfs" for both, "az://" and "abfs://" urls if "abfs" in protocol_list: protocol_list.append("az") is_local = fs.protocol == "file" or not any(root.startswith(protocol) for protocol in protocol_list) if fs.isfile(path): yield root else: for file_name in fs.ls(path, detail=False): # Ensure it returns List[str], not List[Dict] if not match_masks(file_name, self.masks): continue # ensure the file name has the full fsspec protocol path if any(file_name.startswith(protocol) for protocol in protocol_list): yield file_name else: if is_local: abs_path = os.path.join(path, file_name) elif not file_name.startswith(path): abs_path = posixpath.join(path, file_name) else: abs_path = file_name starts_with = False for protocol in protocol_list: if root.startswith(protocol): starts_with = True yield protocol + "://" + abs_path break if not starts_with: yield abs_path @functional_datapipe("open_files_by_fsspec") class FSSpecFileOpenerIterDataPipe(IterDataPipe[Tuple[str, StreamWrapper]]): r""" Opens files from input datapipe which contains `fsspec` paths and yields a tuple of pathname and opened file stream (functional name: ``open_files_by_fsspec``). Args: source_datapipe: Iterable DataPipe that provides the pathnames or URLs mode: An optional string that specifies the mode in which the file is opened (``"r"`` by default) kwargs_for_open: Optional Dict to specify kwargs for opening files (``fs.open()``) kwargs: Extra options that are used to establish a particular storage connection, e.g. host, port, username, password, etc. Example: .. testsetup:: dir_path = "path" .. testcode:: from torchdata.datapipes.iter import FSSpecFileLister datapipe = FSSpecFileLister(root=dir_path) file_dp = datapipe.open_files_by_fsspec() """ def __init__( self, source_datapipe: IterDataPipe[str], mode: str = "r", *, kwargs_for_open: Optional[Dict] = None, **kwargs ) -> None: _assert_fsspec() self.source_datapipe: IterDataPipe[str] = source_datapipe self.mode: str = mode self.kwargs_for_open = kwargs_for_open if kwargs_for_open is not None else {} self.kwargs_for_connection = kwargs def __iter__(self) -> Iterator[Tuple[str, StreamWrapper]]: for file_uri in self.source_datapipe: fs, path = fsspec.core.url_to_fs(file_uri, **self.kwargs_for_connection) file = fs.open(path, self.mode, **self.kwargs_for_open) yield file_uri, StreamWrapper(file) def __len__(self) -> int: return len(self.source_datapipe) @functional_datapipe("save_by_fsspec") class FSSpecSaverIterDataPipe(IterDataPipe[str]): r""" Takes in a DataPipe of tuples of metadata and data, saves the data to the target path (generated by the filepath_fn and metadata), and yields the resulting `fsspec` path (functional name: ``save_by_fsspec``). Args: source_datapipe: Iterable DataPipe with tuples of metadata and data mode: Mode in which the file will be opened for write the data (``"w"`` by default) filepath_fn: Function that takes in metadata and returns the target path of the new file kwargs_for_open: Optional Dict to specify kwargs for opening files (``fs.open()``) kwargs: Extra options that are used to establish a particular storage connection, e.g. host, port, username, password, etc. Example: .. testsetup:: file_prefix = "file" .. testcode:: from torchdata.datapipes.iter import IterableWrapper def filepath_fn(name: str) -> str: return file_prefix + name name_to_data = {"1.txt": b"DATA1", "2.txt": b"DATA2", "3.txt": b"DATA3"} source_dp = IterableWrapper(sorted(name_to_data.items())) fsspec_saver_dp = source_dp.save_by_fsspec(filepath_fn=filepath_fn, mode="wb") res_file_paths = list(fsspec_saver_dp) .. testcleanup:: import os for name in name_to_data.keys(): os.remove(file_prefix + name) """ def __init__( self, source_datapipe: IterDataPipe[Tuple[Any, U]], mode: str = "w", filepath_fn: Optional[Callable] = None, *, kwargs_for_open: Optional[Dict] = None, **kwargs, ): _assert_fsspec() self.source_datapipe: IterDataPipe[Tuple[Any, U]] = source_datapipe self.mode: str = mode self.filepath_fn: Optional[Callable] = filepath_fn self.kwargs_for_open = kwargs_for_open if kwargs_for_open is not None else {} self.kwargs_for_connection = kwargs def __iter__(self) -> Iterator[str]: for meta, data in self.source_datapipe: filepath = meta if self.filepath_fn is None else self.filepath_fn(meta) fs, path = fsspec.core.url_to_fs(filepath, **self.kwargs_for_connection) with fs.open(path, self.mode, **self.kwargs_for_open) as f: f.write(data) yield filepath def __len__(self) -> int: return len(self.source_datapipe)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from typing import Any, Iterator, Tuple from torchdata.datapipes.iter import IterDataPipe from torchdata.datapipes.utils import StreamWrapper try: import datasets except ImportError: datasets = None def _get_response_from_huggingface_hub( dataset: str, streaming: bool = True, **config_kwargs, ) -> Iterator[Any]: hf_dataset = datasets.load_dataset(path=dataset, streaming=streaming, **config_kwargs) return iter(hf_dataset) class HuggingFaceHubReaderIterDataPipe(IterDataPipe[Tuple[str, StreamWrapper]]): r""" Takes in dataset names and returns an Iterable HuggingFace dataset. Please refer to https://huggingface.co/docs/datasets/loading for the meaning and type of each argument. Contrary to their implementation, default behavior differs in the following: * ``streaming`` is set to ``True`` Args: dataset: path or name of the dataset **config_kwargs: additional arguments for ``datasets.load_dataset()`` Example: .. testsetup:: import datasets from torchdata.datapipes.iter import IterableWrapper, HuggingFaceHubReader from unittest.mock import MagicMock datasets.load_dataset = MagicMock(return_value=datasets.Dataset.from_dict( {"id": ["7bd227d9-afc9-11e6-aba1-c4b301cdf627", "7bd22905-afc9-11e6-a5dc-c4b301cdf627" ], "package_name": ["com.mantz_it.rfanalyzer"] * 2} )) .. testcode:: huggingface_reader_dp = HuggingFaceHubReader("lhoestq/demo1", revision="main") elem = next(iter(huggingface_reader_dp)) assert elem["package_name"] == "com.mantz_it.rfanalyzer" """ def __init__( self, dataset: str, **config_kwargs, ) -> None: if datasets is None: raise ModuleNotFoundError( "Package `datasets` is required to be installed to use this datapipe." "Please use `pip install datasets` or `conda install -c conda-forge datasets`" "to install the package" ) self.dataset = dataset self.config_kwargs = config_kwargs def __iter__(self) -> Iterator[Any]: return _get_response_from_huggingface_hub(dataset=self.dataset, **self.config_kwargs) def __len__(self) -> int: raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import os from typing import Any, Callable, Iterator, List, Optional, Sequence, Tuple, Union from torch.utils.data.datapipes.utils.common import match_masks from torchdata.datapipes import functional_datapipe from torchdata.datapipes.iter import IterableWrapper, IterDataPipe from torchdata.datapipes.utils import StreamWrapper try: import iopath except ImportError: iopath = None U = Union[bytes, bytearray, str] def _create_default_pathmanager(): from iopath.common.file_io import HTTPURLHandler, OneDrivePathHandler, PathManager pathmgr = PathManager() pathmgr.register_handler(HTTPURLHandler(), allow_override=True) pathmgr.register_handler(OneDrivePathHandler(), allow_override=True) # S3PathHandler is not included in 0.1.8 try: from iopath.common.s3 import S3PathHandler pathmgr.register_handler(S3PathHandler(), allow_override=True) except ImportError: pass return pathmgr @functional_datapipe("list_files_by_iopath") class IoPathFileListerIterDataPipe(IterDataPipe[str]): r""" Lists the contents of the directory at the provided ``root`` pathname or URL, and yields the full pathname or URL for each file within the directory (functional name: ``list_files_by_iopath``). Args: root: The root local filepath or URL directory or list of roots to list files from masks: Unix style filter string or string list for filtering file name(s) pathmgr: Custom ``iopath.PathManager``. If not specified, a default ``PathManager`` is created. Note: Default ``PathManager`` currently supports local file path, normal HTTP URL and OneDrive URL. S3 URL is supported only with ``iopath``>=0.1.9. Example: .. testsetup:: s3_url = "path" .. testcode:: from torchdata.datapipes.iter import IoPathFileLister datapipe = IoPathFileLister(root=s3_url) """ def __init__( self, root: Union[str, Sequence[str], IterDataPipe], masks: Union[str, List[str]] = "", *, pathmgr=None, handler=None, ) -> None: if iopath is None: raise ModuleNotFoundError( "Package `iopath` is required to be installed to use this datapipe." "Please use `pip install iopath` or `conda install -c conda-forge iopath`" "to install the package" ) if isinstance(root, str): root = [ root, ] if not isinstance(root, IterDataPipe): self.datapipe: IterDataPipe = IterableWrapper(root) # type: ignore[assignment] else: self.datapipe = root self.pathmgr = _create_default_pathmanager() if pathmgr is None else pathmgr self.masks = masks if handler is not None: self.register_handler(handler, allow_override=True) def register_handler(self, handler, allow_override=False): self.pathmgr.register_handler(handler, allow_override=allow_override) def __iter__(self) -> Iterator[str]: for path in self.datapipe: if self.pathmgr.isfile(path): yield path else: for file_name in self.pathmgr.ls(path): if match_masks(file_name, self.masks): yield os.path.join(path, file_name) @functional_datapipe("open_files_by_iopath") class IoPathFileOpenerIterDataPipe(IterDataPipe[Tuple[str, StreamWrapper]]): r""" Opens files from input datapipe which contains pathnames or URLs, and yields a tuple of pathname and opened file stream (functional name: ``open_files_by_iopath``). Args: source_datapipe: Iterable DataPipe that provides the pathnames or URLs mode: An optional string that specifies the mode in which the file is opened (``"r"`` by default) pathmgr: Custom ``iopath.PathManager``. If not specified, a default ``PathManager`` is created. Note: Default ``PathManager`` currently supports local file path, normal HTTP URL and OneDrive URL. S3 URL is supported only with `iopath`>=0.1.9. Example: .. testsetup:: s3_url = "path" .. testcode:: from torchdata.datapipes.iter import IoPathFileLister datapipe = IoPathFileLister(root=s3_url) file_dp = datapipe.open_files_by_iopath() """ def __init__(self, source_datapipe: IterDataPipe[str], mode: str = "r", pathmgr=None, handler=None) -> None: if iopath is None: raise ModuleNotFoundError( "Package `iopath` is required to be installed to use this datapipe." "Please use `pip install iopath` or `conda install -c conda-forge iopath`" "to install the package" ) self.source_datapipe: IterDataPipe[str] = source_datapipe self.pathmgr = _create_default_pathmanager() if pathmgr is None else pathmgr self.mode: str = mode if handler is not None: self.register_handler(handler, allow_override=True) def register_handler(self, handler, allow_override=False): self.pathmgr.register_handler(handler, allow_override=allow_override) def __iter__(self) -> Iterator[Tuple[str, StreamWrapper]]: for file_uri in self.source_datapipe: file = self.pathmgr.open(file_uri, self.mode) yield file_uri, StreamWrapper(file) def __len__(self) -> int: return len(self.source_datapipe) @functional_datapipe("save_by_iopath") class IoPathSaverIterDataPipe(IterDataPipe[str]): r""" Takes in a DataPipe of tuples of metadata and data, saves the data to the target path which is generated by the ``filepath_fn`` and metadata, and yields the resulting path in `iopath` format (functional name: ``save_by_iopath``). Args: source_datapipe: Iterable DataPipe with tuples of metadata and data mode: Mode in which the file will be opened for write the data (``"w"`` by default) filepath_fn: Function that takes in metadata and returns the target path of the new file pathmgr: Custom ``iopath.PathManager``. If not specified, a default ``PathManager`` is created. Note: Default ``PathManager`` currently supports local file path, normal HTTP URL and OneDrive URL. S3 URL is supported only with `iopath`>=0.1.9. Example: .. testsetup:: s3_url = "url" .. testcode:: from torchdata.datapipes.iter import IterableWrapper def filepath_fn(name: str) -> str: return s3_url + name name_to_data = {"1.txt": b"DATA1", "2.txt": b"DATA2", "3.txt": b"DATA3"} source_dp = IterableWrapper(sorted(name_to_data.items())) iopath_saver_dp = source_dp.save_by_iopath(filepath_fn=filepath_fn, mode="wb") res_file_paths = list(iopath_saver_dp) .. testcleanup:: import os for file in ["1.txt", "1.txt.lock", "2.txt", "2.txt.lock", "3.txt", "3.txt.lock"]: os.remove(s3_url + file) """ def __init__( self, source_datapipe: IterDataPipe[Tuple[Any, U]], mode: str = "w", filepath_fn: Optional[Callable] = None, *, pathmgr=None, handler=None, ): if iopath is None: raise ModuleNotFoundError( "Package `iopath` is required to be installed to use this datapipe." "Please use `pip install iopath` or `conda install -c conda-forge iopath`" "to install the package" ) self.source_datapipe: IterDataPipe[Tuple[Any, U]] = source_datapipe self.mode: str = mode self.filepath_fn: Optional[Callable] = filepath_fn self.pathmgr = _create_default_pathmanager() if pathmgr is None else pathmgr if handler is not None: self.register_handler(handler, allow_override=True) def __iter__(self) -> Iterator[str]: for meta, data in self.source_datapipe: filepath = meta if self.filepath_fn is None else self.filepath_fn(meta) with iopath.file_lock(filepath): if not os.path.exists(filepath): with self.pathmgr.open(filepath, self.mode) as f: f.write(data) yield filepath def register_handler(self, handler, allow_override=False): self.pathmgr.register_handler(handler, allow_override=allow_override) def __len__(self) -> int: return len(self.source_datapipe)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import asyncio import inspect import random import warnings from collections import deque from concurrent import futures from typing import Callable, Hashable, Iterator, List, Optional, Set, Sized, TypeVar, Union import torch from torch.utils.data.datapipes.utils.common import _check_unpickable_fn, validate_input_col from torchdata.datapipes import functional_datapipe from torchdata.datapipes.iter import IterDataPipe T_co = TypeVar("T_co", covariant=True) def _no_op_fn(*args): """ No-operation function, returns passed arguments. """ if len(args) == 1: return args[0] return args @functional_datapipe("map_batches") class BatchMapperIterDataPipe(IterDataPipe[T_co]): r""" Combines elements from the source DataPipe to batches and applies a function over each batch, then flattens the outputs to a single, unnested IterDataPipe (functional name: ``map_batches``). Args: datapipe: Source IterDataPipe fn: The function to be applied to each batch of data batch_size: The size of batch to be aggregated from ``datapipe`` input_col: Index or indices of data which ``fn`` is applied, such as: - ``None`` as default to apply ``fn`` to the data directly. - Integer(s) is used for list/tuple. - Key(s) is used for dict. Example: >>> from torchdata.datapipes.iter import IterableWrapper >>> def fn(batch): >>> return [d + 1 for d in batch] >>> source_dp = IterableWrapper(list(range(5))) >>> mapped_dp = source_dp.map_batches(fn, batch_size=3) >>> list(mapped_dp) [1, 2, 3, 4, 5] Notes: Compared with ``map``, the reason that ``map_batches`` doesn't take ``output_col`` argument is the size of ``fn`` output is not guaranteed to be the same as input batch. With different size, this operation cannot assign data back to original data structure. And, this operation is introduced based on the use case from `TorchText`. A pybinded C++ vectorized function can be applied for efficiency. """ datapipe: IterDataPipe fn: Callable batch_size: int def __init__( self, datapipe: IterDataPipe, fn: Callable, batch_size: int, input_col=None, ) -> None: self.datapipe = datapipe _check_unpickable_fn(fn) self.fn = fn # type: ignore[assignment] assert batch_size > 0, "Batch size is required to be larger than 0!" self.batch_size = batch_size self.input_col = input_col def _apply_fn(self, batch): if self.input_col is None: return self.fn(batch) if isinstance(self.input_col, (list, tuple)): args = [[data[idx] for idx in self.input_col] for data in batch] else: args = [data[self.input_col] for data in batch] return self.fn(args) def __iter__(self) -> Iterator[T_co]: batch: List = [] for d in self.datapipe: batch.append(d) if len(batch) == self.batch_size: yield from self._apply_fn(batch) batch = [] if batch: yield from self._apply_fn(batch) def __len__(self) -> int: raise TypeError(f"{type(self).__name__}'s length relies on the output of its function.") @functional_datapipe("flatmap") class FlatMapperIterDataPipe(IterDataPipe[T_co]): r""" Applies a function over each item from the source DataPipe, then flattens the outputs to a single, unnested IterDataPipe (functional name: ``flatmap``). Note: The output from ``fn`` must be a Sequence. Otherwise, an error will be raised. If ``fn`` is ``None``, source DataPipe will be just flattened vertically, provided that items can be unpacked. Args: datapipe: Source IterDataPipe fn: the function to be applied to each element in the DataPipe, the output must be a Sequence input_col: Index or indices of data which ``fn`` is applied, such as: - ``None`` as default to apply ``fn`` to the data directly. - Integer(s) is/are used for list/tuple. - Key(s) is/are used for dict. Example: >>> from torchdata.datapipes.iter import IterableWrapper >>> def fn(e): >>> return [e, e * 10] >>> source_dp = IterableWrapper(list(range(5))) >>> flatmapped_dp = source_dp.flatmap(fn) >>> list(flatmapped_dp) [0, 0, 1, 10, 2, 20, 3, 30, 4, 40] >>> >>> source_dp = IterableWrapper([[1, 2, 3], [4, 5, 6]]) >>> flatmapped_dp = source_dp.flatmap() >>> list(flatmapped_dp) [1, 2, 3, 4, 5, 6] """ datapipe: IterDataPipe fn: Optional[Callable] def __init__(self, datapipe: IterDataPipe, fn: Optional[Callable] = None, input_col=None) -> None: self.datapipe = datapipe if fn is None: fn = _no_op_fn _check_unpickable_fn(fn) self.fn = fn # type: ignore[assignment] self.input_col = input_col validate_input_col(fn, input_col) def _apply_fn(self, data): if self.input_col is None: return self.fn(data) # type: ignore[misc] elif isinstance(self.input_col, (list, tuple)): args = tuple(data[col] for col in self.input_col) return self.fn(*args) # type: ignore[misc] else: return self.fn(data[self.input_col]) # type: ignore[misc] def __iter__(self) -> Iterator[T_co]: for d in self.datapipe: yield from self._apply_fn(d) def __len__(self) -> int: raise TypeError(f"{type(self).__name__}'s length relies on the output of its function.") @functional_datapipe("shuffled_flatmap") class ShuffledFlatMapperIterDataPipe(IterDataPipe): r""" Applies a function over each item from the source DataPipe, then collects the iterables returned in a buffer, then, at every iteration, chooses at random one of the iterables in the buffer and yields one item from this iterable (functional name: ``shuffled_flatmap``). When the buffer is full, the DataPipe will begin to yield elements from iterables within the buffer. New iterables will be added to the buffer once the existing ones run out of elements. Note: The output from ``fn`` must be an Iterable. Otherwise, an error will be raised. If ``fn`` is ``None``, source DataPipe will be just flattened vertically, provided that items can be unpacked. Args: datapipe: Source IterDataPipe fn: the function to be applied to each element in the DataPipe, the output must be a Sequence input_col: Index or indices of data which ``fn`` is applied, such as: - ``None`` as default to apply ``fn`` to the data directly. - Integer(s) is/are used for list/tuple. - Key(s) is/are used for dict. buffer_size: the max number of iterables this DataPipe can hold at a time (default to ``100``) Example: >>> from torchdata.datapipes.iter import IterableWrapper >>> source_dp = IterableWrapper([[1, 2, 3, 4], 'abcd', 'ABCD']) >>> shuffled_flatmapped_dp = source_dp.shuffled_flatmap(buffer_size=2) >>> list(shuffled_flatmapped_dp) ['a', 'b', 'c', 1, 'd', 'A', 'B', 'C', 2, 'D', 3, 4] >>> >>> # To shuffle all the elements, you can combine `shuffled_flatmap` with `in_batch_shuffle` like this: >>> fully_shuffled_flatmapped_dp = source_dp.in_batch_shuffle() >>> fully_shuffled_flatmapped_dp = fully_shuffled_flatmapped_dp.shuffled_flatmap() >>> list(fully_shuffled_flatmapped_dp) ['b', 3, 'c', 'd', 'C', 'A', 'a', 2, 'B', 'D', 4, 1] """ datapipe: IterDataPipe fn: Optional[Callable] buffer_size: int _buffer: List[Iterator] _enabled: bool _seed: Optional[int] _rng: random.Random _no_op_fn: bool = False def __init__( self, datapipe: IterDataPipe, fn: Optional[Callable] = None, input_col=None, buffer_size: int = 100 ) -> None: super().__init__() self._buffer = [] self.datapipe = datapipe if fn is None: fn = _no_op_fn self._no_op_fn = True _check_unpickable_fn(fn) self.fn = fn # type: ignore[assignment] self.input_col = input_col validate_input_col(fn, input_col) assert buffer_size > 0, "buffer_size should be larger than 0" self.buffer_size = buffer_size self._enabled = True self._seed = None self._rng = random.Random() def set_shuffle(self, shuffle=True): self._enabled = shuffle return self def set_seed(self, seed: int): self._seed = seed return self def reset(self) -> None: self._buffer = [] if self._enabled: if self._seed is None: self._seed = int(torch.empty((), dtype=torch.int64).random_().item()) self._rng.seed(self._seed) self._seed = None def _apply_fn(self, data): if self.input_col is None: return self.fn(data) # type: ignore[misc] elif isinstance(self.input_col, (list, tuple)): args = tuple(data[col] for col in self.input_col) return self.fn(*args) # type: ignore[misc] else: return self.fn(data[self.input_col]) # type: ignore[misc] def __iter__(self) -> Iterator[T_co]: if not self._enabled: # equivalent to flatmap for x in self.datapipe: yield from self._apply_fn(x) else: idx = self._rng.randint(0, self.buffer_size - 1) for x in self.datapipe: while len(self._buffer) == self.buffer_size: try: yield next(self._buffer[idx]) idx = self._rng.randint(0, self.buffer_size - 1) except StopIteration: self._buffer.pop(idx) self._buffer.append(iter(self._apply_fn(x))) while self._buffer: try: idx = self._rng.randint(0, len(self._buffer) - 1) yield next(self._buffer[idx]) except StopIteration: self._buffer.pop(idx) def __len__(self) -> int: if self._no_op_fn: return sum(map(len, self.datapipe)) raise TypeError(f"{type(self).__name__}'s length relies on the output of its function.") def __getstate__(self): state = ( self.datapipe, self.fn, self.input_col, self.buffer_size, self._buffer, self._enabled, self._seed, self._rng.getstate(), self._valid_iterator_id, self._number_of_samples_yielded, ) if IterDataPipe.getstate_hook is not None: return IterDataPipe.getstate_hook(state) return state def __setstate__(self, state): ( self.datapipe, self.fn, self.input_col, self.buffer_size, self._buffer, self._enabled, self._seed, rng_state, self._valid_iterator_id, self._number_of_samples_yielded, ) = state self._rng = random.Random() self._rng.setstate(rng_state) def __del__(self): self._buffer.clear() @functional_datapipe("drop") class DropperIterDataPipe(IterDataPipe[T_co]): r""" Drop columns/elements in input DataPipe via its indices (functional name: ``drop``). Args: datapipe: IterDataPipe with columns to be dropped indices: a single column index to be dropped or a list of indices - Integer(s) is/are used for list/tuple. - Key(s) is/are used for dict. Example: >>> from torchdata.datapipes.iter import IterableWrapper, ZipperMapDataPipe >>> dp1 = IterableWrapper(range(5)) >>> dp2 = IterableWrapper(range(10, 15)) >>> dp = dp1.zip(dp2) >>> list(dp) [(0, 10), (1, 11), (2, 12), (3, 13), (4, 14)] >>> drop_dp = dp.drop(1) >>> list(drop_dp) [(0), (1), (2), (3), (4)] """ datapipe: IterDataPipe def __init__( self, datapipe: IterDataPipe, indices: Union[Hashable, List[Hashable]], ) -> None: super().__init__() self.datapipe = datapipe if isinstance(indices, list): self.indices = set(indices) else: self.indices = {indices} def __iter__(self) -> Iterator[T_co]: for old_item in self.datapipe: if isinstance(old_item, tuple): new_item = tuple(x for i, x in enumerate(old_item) if i not in self.indices) # type: ignore[assignment] elif isinstance(old_item, list): new_item = [x for i, x in enumerate(old_item) if i not in self.indices] # type: ignore[assignment] elif isinstance(old_item, dict): new_item = {k: v for (k, v) in old_item.items() if k not in self.indices} # type: ignore[assignment] else: new_item = old_item warnings.warn( "The next item was not an iterable and cannot be filtered, " "please be aware that no filter was done or new item created." ) # check to make sure all indices requested were in the item. warn if not try: for i in self.indices: old_item[i] except (IndexError, KeyError): warnings.warn( "At least one index in the filter is not present in the item being returned," " please be aware that expected columns/keys may be missing." ) yield new_item # type: ignore[misc] def __len__(self) -> int: if isinstance(self.datapipe, Sized): return len(self.datapipe) raise TypeError(f"{type(self).__name__} instance doesn't have valid length") @functional_datapipe("slice") class SliceIterDataPipe(IterDataPipe[T_co]): r""" returns a slice of elements in input DataPipe via start/stop/step or indices (functional name: ``slice``). Args: datapipe: IterDataPipe with iterable elements index: a single start index for the slice or a list of indices to be returned instead of a start/stop slice - Integer(s) is/are used for list/tuple. - Key(s) is/are used for dict. stop: the slice stop. ignored if index is a list or if element is a dict step: step to be taken from start to stop. ignored if index is a list or if element is a dict Example: >>> from torchdata.datapipes.iter import IterableWrapper >>> dp = IterableWrapper([(0, 10, 100), (1, 11, 111), (2, 12, 122), (3, 13, 133), (4, 14, 144)]) >>> slice_dp = dp.slice(0, 2) >>> list(slice_dp) [(0, 10), (1, 11), (2, 12), (3, 13), (4, 14)] """ datapipe: IterDataPipe def __init__( self, datapipe: IterDataPipe, index: Union[int, List[Hashable]], stop: Optional[int] = None, step: Optional[int] = None, ) -> None: super().__init__() self.datapipe = datapipe self.index = index self.stop = stop self.step = step if isinstance(index, list): if stop or step: warnings.warn( "A list of indices was passed as well as a stop or step for the slice, " "these arguments can't be used together so only the indices list will be used." ) def __iter__(self) -> Iterator[T_co]: for old_item in self.datapipe: if isinstance(old_item, tuple): if isinstance(self.index, list): new_item = tuple(x for i, x in enumerate(old_item) if i in self.index) # type: ignore[assignment] else: new_item = old_item[self.index : self.stop : self.step] # type: ignore[assignment] elif isinstance(old_item, list): if isinstance(self.index, list): new_item = [x for i, x in enumerate(old_item) if i in self.index] # type: ignore[assignment] else: new_item = old_item[self.index : self.stop : self.step] # type: ignore[assignment] elif isinstance(old_item, dict): if isinstance(self.index, list): new_item = {k: v for (k, v) in old_item.items() if k in self.index} # type: ignore[assignment] elif self.index in old_item.keys(): new_item = {self.index: old_item.get(self.index)} # type: ignore[assignment] else: new_item = old_item # type: ignore[assignment] warnings.warn( "Dictionaries are not sliced by steps, only direct index. " "Please be aware that no filter was done or new item created." ) else: new_item = old_item # type: ignore[assignment] warnings.warn( "The next item was not an iterable and cannot be filtered, " "please be aware that no filter was done or new item created." ) if isinstance(self.index, list): # check to make sure all indices requested were in the item. warn if not try: for i in self.index: old_item[i] except (IndexError, KeyError): warnings.warn( "At least one index in the filter is not present in the item being returned," " please be aware that expected columns/keys may be missing." ) yield new_item # type: ignore[misc] def __len__(self) -> int: if isinstance(self.datapipe, Sized): return len(self.datapipe) raise TypeError(f"{type(self).__name__} instance doesn't have valid length") @functional_datapipe("flatten") class FlattenIterDataPipe(IterDataPipe[T_co]): r""" returns a flattened copy of the input DataPipe at the per sample/element level based on provided indices (functional name: ``flatten``). Note: no args will flatten each item in the datapipe 1 level Args: datapipe: IterDataPipe with iterable elements indices: a single index/key for the item to flatten from an iterator item or a list of indices/keys to be flattened - Integer(s) is/are used for list/tuple. - Key(s) is/are used for dict. Example: >>> from torchdata.datapipes.iter import IterableWrapper >>> dp = IterableWrapper([(0, 10, (100, 1000)), (1, 11, (111, 1001)), (2, 12, (122, 1002)), (3, 13, (133, 1003)), (4, 14, (144, 1004))]) >>> flatten_dp = dp.flatten(2) >>> list(flatten_dp) [(0, 10, 100, 1000), (1, 11, 111, 1001), (2, 12, 122, 1002), (3, 13, 133, 1003), (4, 14, 144, 1004)] >>> >>> dp = IterableWrapper([(0, (1, 2)), (3, (4, 5)), (6, (7, 8))]) >>> flatten_dp = dp.flatten() >>> list(flatten_dp) [(0, 1, 2), (3, 4, 5), (6, 7, 8)] """ datapipe: IterDataPipe indices: Set[Hashable] = set() def __init__( self, datapipe: IterDataPipe, indices: Optional[Union[Hashable, List[Hashable]]] = None, ) -> None: super().__init__() self.datapipe = datapipe if indices: if isinstance(indices, list): self.indices = set(indices) else: self.indices = {indices} def __iter__(self) -> Iterator[T_co]: flatten_all = False if not self.indices: flatten_all = True for old_item in self.datapipe: if isinstance(old_item, dict): new_item = {} # type: ignore[assignment] for k, v in old_item.items(): if k in self.indices: pass if (flatten_all or (k in self.indices)) and isinstance(v, dict): for k_sub, v_sub in v.items(): if k_sub not in old_item: new_item[k_sub] = v_sub else: warnings.warn( "Flattener tried to insert the same key twice into the dict item," "the second key,value pair has been dropped." ) else: if k not in new_item: new_item[k] = v else: warnings.warn( "Flattener tried to insert the same key twice into the dict item," "the second key,value pair has been dropped." ) else: is_tuple = False new_item = [] # type: ignore[assignment] if isinstance(old_item, tuple): is_tuple = True old_item = list(old_item) for i, item in enumerate(old_item): if (flatten_all or (i in self.indices)) and isinstance(item, (list, tuple)): new_item.extend(list(item)) # type: ignore[attr-defined] else: new_item.append(item) # type: ignore[attr-defined] if is_tuple: new_item = tuple(new_item) # type: ignore[assignment] # check to make sure all indices requested were in the item. warn if not try: if self.indices: for index in self.indices: old_item[index] except (IndexError, KeyError): warnings.warn( "At least one index in the filter is not present in the item being returned," " please be aware that expected columns/keys may be missing." ) yield new_item # type: ignore[misc] def __len__(self) -> int: if isinstance(self.datapipe, Sized): return len(self.datapipe) raise TypeError(f"{type(self).__name__} instance doesn't have valid length") class _BatchAsyncMapperIterDataPipe(IterDataPipe): datapipe: IterDataPipe async_fn: Callable def __init__( self, source_datapipe: IterDataPipe, async_fn: Callable, input_col=None, output_col=None, max_concurrency: int = 32, ): self.source_datapipe = source_datapipe if not inspect.iscoroutinefunction(async_fn): raise ValueError(f"Expected a corotine function with an async def syntax, but got a {type(async_fn)}") self.async_fn = async_fn # type: ignore[assignment] if input_col is None and output_col is not None: raise ValueError("`output_col` must be None when `input_col` is None.") self.input_col = input_col if isinstance(output_col, (list, tuple)): if len(output_col) > 1: raise ValueError("`output_col` must be a single-element list or tuple") output_col = output_col[0] self.output_col = output_col self.max_concurrency = max_concurrency def __iter__(self): policy = asyncio.get_event_loop_policy() loop = policy.new_event_loop() try: for batch in self.source_datapipe: policy.set_event_loop(loop) new_batch = loop.run_until_complete(self.processbatch(batch)) yield new_batch finally: loop.run_until_complete(loop.shutdown_asyncgens()) loop.close() async def processbatch(self, batch): sem = asyncio.Semaphore(self.max_concurrency) async def controlled_async_fn(async_fn, *data): async with sem: return await async_fn(*data) coroutines = [] if self.input_col is None: for data in batch: coroutines.append(controlled_async_fn(self.async_fn, data)) results = await asyncio.gather(*coroutines) return results for data in batch: if isinstance(self.input_col, (list, tuple)): args = tuple(data[col] for col in self.input_col) coroutines.append(controlled_async_fn(self.async_fn, *args)) else: coroutines.append(controlled_async_fn(self.async_fn, data[self.input_col])) results = await asyncio.gather(*coroutines) new_batch = [] for data, res in zip(batch, results): t_flag = isinstance(data, tuple) if t_flag: data = list(data) if self.output_col is None: if isinstance(self.input_col, (list, tuple)): data[self.input_col[0]] = res for idx in sorted(self.input_col[1:], reverse=True): del data[idx] else: data[self.input_col] = res elif self.output_col == -1: data.append(res) else: data[self.output_col] = res if t_flag: data = tuple(data) new_batch.append(data) return new_batch def __len__(self): return len(self.source_datapipe) @functional_datapipe("async_map_batches") class BatchAsyncMapperIterDataPipe(IterDataPipe): r""" Combines elements from the source DataPipe to batches and applies a coroutine function over each element within the batch concurrently, then flattens the outpus to a single, unnested IterDataPipe (functional name: ``async_map_batches``). Args: source_datapipe: Source IterDataPipe async_fn: The coroutine function to be applied to each batch of data batch_size: The size of batch to be aggregated from ``source_datapipe`` input_col: Index or indices of data which ``fn`` is applied, such as: - ``None`` as default to apply ``fn`` to the data directly. - Integer(s) is used for list/tuple. - Key(s) is used for dict. output_col: Index of data where result of ``fn`` is placed. ``output_col`` can be specified only when ``input_col`` is not ``None`` - ``None`` as default to replace the index that ``input_col`` specified; For ``input_col`` with multiple indices, the left-most one is used, and other indices will be removed. - Integer is used for list/tuple. ``-1`` represents to append result at the end. - Key is used for dict. New key is acceptable. max_concurrency: Maximum concurrency to call async functions. (Default: ``32``) flatten: Determine if the batches get flatten in the end (Default: ``True``) If ``False``, outputs will be in batches of size ``batch_size`` Example: >>> from torchdata.datapipes.iter import IterableWrapper >>> async def mul_ten(x): ... await asyncio.sleep(1) ... return x * 10 >>> dp = IterableWrapper(range(50)) >>> dp = dp.async_map_batches(mul_ten, 16) >>> list(dp) [0, 10, 20, 30, ...] >>> dp = IterableWrapper([(i, i) for i in range(50)]) >>> dp = dp.async_map_batches(mul_ten, 16, input_col=1) >>> list(dp) [(0, 0), (1, 10), (2, 20), (3, 30), ...] >>> dp = IterableWrapper([(i, i) for i in range(50)]) >>> dp = dp.async_map_batches(mul_ten, 16, input_col=1, output_col=-1) >>> list(dp) [(0, 0, 0), (1, 1, 10), (2, 2, 20), (3, 3, 30), ...] # Async fetching html from remote >>> from aiohttp import ClientSession >>> async def fetch_html(url: str, **kwargs): ... async with ClientSession() as session: ... resp = await session.request(method="GET", url=url, **kwargs) ... resp.raise_for_status() ... html = await resp.text() ... return html >>> dp = IterableWrapper(urls) >>> dp = dp.async_map_batches(fetch_html, 16) """ def __new__( self, source_datapipe, async_fn: Callable, batch_size: int, input_col=None, output_col=None, max_concurrency: int = 32, flatten: bool = True, ): dp = source_datapipe.batch(batch_size) dp = _BatchAsyncMapperIterDataPipe(dp, async_fn, input_col, output_col, max_concurrency) if flatten: dp = dp.flatmap() try: source_length = len(source_datapipe) if isinstance(source_length, int) and source_length >= 0: dp = dp.set_length(source_length) except (TypeError, NotImplementedError): pass return dp @functional_datapipe("threadpool_map") class ThreadPoolMapperIterDataPipe(IterDataPipe[T_co]): r""" Applies a function over each item from the source DataPipe concurrently using ``ThreadPoolExecutor`` (functional name: ``threadpool_map``). The function can be any regular Python function or partial object. Lambda function is not recommended as it is not supported by pickle. Args: source_datapipe: Source IterDataPipe fn: Function being applied over each item input_col: Index or indices of data which ``fn`` is applied, such as: - ``None`` as default to apply ``fn`` to the data directly. - Integer(s) is used for list/tuple. - Key(s) is used for dict. output_col: Index of data where result of ``fn`` is placed. ``output_col`` can be specified only when ``input_col`` is not ``None`` - ``None`` as default to replace the index that ``input_col`` specified; For ``input_col`` with multiple indices, the left-most one is used, and other indices will be removed. - Integer is used for list/tuple. ``-1`` represents to append result at the end. - Key is used for dict. New key is acceptable. scheduled_tasks: How many tasks will be scheduled at any given time (Default value: 128) max_workers: Maximum number of threads to execute function calls **threadpool_kwargs: additional arguments to be given to the ``ThreadPoolExecutor`` Note: For more information about ``max_workers`` and additional arguments for the ``ThreadPoolExecutor`` please refer to: https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor Note: For optimal use of all threads, ``scheduled_tasks`` > ``max_workers`` is strongly recommended. The higher the variance of the time needed to finish execution of the given ``fn`` is, the higher the value of ``scheduled_tasks`` needs to be to avoid threads sitting idle while waiting for the next result (as results are returned in correct order). However, too high value of ``scheduled_tasks`` might lead to long waiting period until the first element is yielded as ``next`` is called ``scheduled_tasks`` many times on ``source_datapipe`` before yielding. We encourage you to try out different values of ``max_workers`` and ``scheduled_tasks`` in search for optimal values for your use-case. Example: .. testsetup:: from torchdata.datapipes.iter import IterableWrapper import requests import time from unittest.mock import MagicMock requests.get = MagicMock() urls = [] .. testcode:: # fetching html from remote def fetch_html(url: str, **kwargs): r = requests.get(url, **kwargs) r.raise_for_status() return r.content dp = IterableWrapper(urls) dp = dp.threadpool_map(fetch_html,max_workers=16) .. testcode:: def mul_ten(x): time.sleep(0.1) return x * 10 dp = IterableWrapper([(i, i) for i in range(50)]) dp = dp.threadpool_map(mul_ten, input_col=1) print(list(dp)) .. testoutput:: [(0, 0), (1, 10), (2, 20), (3, 30), ...] .. testcode:: dp = IterableWrapper([(i, i) for i in range(50)]) dp = dp.threadpool_map(mul_ten, input_col=1, output_col=-1) print(list(dp)) .. testoutput:: [(0, 0, 0), (1, 1, 10), (2, 2, 20), (3, 3, 30), ...] """ datapipe: IterDataPipe fn: Callable def __init__( self, source_datapipe: IterDataPipe, fn: Callable, input_col=None, output_col=None, scheduled_tasks: int = 128, max_workers: Optional[int] = None, **threadpool_kwargs, ) -> None: super().__init__() self.datapipe = source_datapipe _check_unpickable_fn(fn) self.fn = fn # type: ignore[assignment] if scheduled_tasks <= 0: raise ValueError("'scheduled_tasks' is required to be a positive integer.") self.scheduled_tasks = scheduled_tasks if max_workers is not None and max_workers <= 0: raise ValueError("'max_workers' is required to be a positive integer.") self.max_workers = max_workers self.threadpool_kwargs = threadpool_kwargs self.input_col = input_col if input_col is None and output_col is not None: raise ValueError("`output_col` must be None when `input_col` is None.") if isinstance(output_col, (list, tuple)): if len(output_col) > 1: raise ValueError("`output_col` must be a single-element list or tuple") output_col = output_col[0] self.output_col = output_col validate_input_col(fn, input_col) def _apply_fn(self, data): if self.input_col is None and self.output_col is None: return self.fn(data) if self.input_col is None: res = self.fn(data) elif isinstance(self.input_col, (list, tuple)): args = tuple(data[col] for col in self.input_col) res = self.fn(*args) else: res = self.fn(data[self.input_col]) # Copy tuple to list and run in-place modification because tuple is immutable. if isinstance(data, tuple): t_flag = True data = list(data) else: t_flag = False if self.output_col is None: if isinstance(self.input_col, (list, tuple)): data[self.input_col[0]] = res for idx in sorted(self.input_col[1:], reverse=True): del data[idx] else: data[self.input_col] = res else: if self.output_col == -1: data.append(res) else: data[self.output_col] = res # Convert list back to tuple return tuple(data) if t_flag else data def __iter__(self) -> Iterator[T_co]: with futures.ThreadPoolExecutor(max_workers=self.max_workers, **self.threadpool_kwargs) as executor: futures_deque: deque = deque() has_next = True itr = iter(self.datapipe) for _ in range(self.scheduled_tasks): try: futures_deque.append(executor.submit(self._apply_fn, next(itr))) except StopIteration: has_next = False break while len(futures_deque) > 0: if has_next: try: futures_deque.append(executor.submit(self._apply_fn, next(itr))) except StopIteration: has_next = False yield futures_deque.popleft().result() def __len__(self) -> int: if isinstance(self.datapipe, Sized): return len(self.datapipe) raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree.
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import heapq import random from dataclasses import dataclass, field from functools import partial from typing import Callable, final, Generic, Iterator, List, Optional, TypeVar import torch from torchdata.datapipes import DataChunk, functional_datapipe from torchdata.datapipes.iter import IterDataPipe T = TypeVar("T") T_co = TypeVar("T_co", covariant=True) @functional_datapipe("in_batch_shuffle") class InBatchShufflerIterDataPipe(IterDataPipe[DataChunk[T_co]]): r""" Shuffles each mini-batch from the prior DataPipe (functional name: ``in_batch_shuffle``). Args: datapipe: Iterable DataPipe with batched data Example: >>> from torchdata.datapipes.iter import IterableWrapper >>> source_dp = IterableWrapper(range(10)) >>> batch_dp = source_dp.batch(batch_size=3, drop_last=True) >>> list(batch_dp) [[0, 1, 2], [3, 4, 5], [6, 7, 8]] >>> in_batch_shuffle_dp = batch_dp.in_batch_shuffle() >>> list(in_batch_shuffle_dp) [[2, 0, 1], [3, 5, 4], [7, 8, 6]] """ def __init__(self, datapipe: IterDataPipe[DataChunk[T_co]]): self.datapipe = datapipe self._enabled = True self._seed: Optional[int] = None self._rng = random.Random() def set_shuffle(self, shuffle=True): self._enabled = shuffle return self def set_seed(self, seed: int): self._seed = seed return self def __iter__(self) -> Iterator[DataChunk[T_co]]: if not self._enabled: for batch in self.datapipe: yield batch else: for batch in self.datapipe: new_batch = self._rng.sample(batch, len(batch)) yield DataChunk(new_batch) @final def reset(self) -> None: if self._enabled: if self._seed is None: self._seed = int(torch.empty((), dtype=torch.int64).random_().item()) self._rng.seed(self._seed) self._seed = None def __len__(self) -> int: return len(self.datapipe) def __getstate__(self): state = ( self.datapipe, self._enabled, self._seed, self._rng.getstate(), self._valid_iterator_id, self._number_of_samples_yielded, ) if IterDataPipe.getstate_hook is not None: return IterDataPipe.getstate_hook(state) return state def __setstate__(self, state): ( self.datapipe, self._enabled, self._seed, rng_state, self._valid_iterator_id, self._number_of_samples_yielded, ) = state self._rng = random.Random() self._rng.setstate(rng_state) @functional_datapipe("bucketbatch") class BucketBatcherIterDataPipe(IterDataPipe[DataChunk[T_co]]): r""" Creates mini-batches of data from sorted bucket (functional name: ``bucketbatch``). An outer dimension will be added as ``batch_size`` if ``drop_last`` is set to ``True``, or ``length % batch_size`` for the last batch if ``drop_last`` is set to ``False``. The purpose of this DataPipe is to batch samples with some similarity according to the sorting function being passed. For an example in the text domain, it may be batching examples with similar number of tokens to minimize padding and to increase throughput. Args: datapipe: Iterable DataPipe being batched batch_size: The size of each batch drop_last: Option to drop the last batch if it's not full batch_num: Number of batches within a bucket (i.e. `bucket_size = batch_size * batch_num`) bucket_num: Number of buckets to consist a pool for shuffling (i.e. `pool_size = bucket_size * bucket_num`) sort_key: Callable to sort a bucket (list) use_in_batch_shuffle: if True, do in-batch shuffle; if False, buffer shuffle Example: >>> from torchdata.datapipes.iter import IterableWrapper >>> source_dp = IterableWrapper(range(10)) >>> batch_dp = source_dp.bucketbatch(batch_size=3, drop_last=True) >>> list(batch_dp) [[5, 6, 7], [9, 0, 1], [4, 3, 2]] >>> def sort_bucket(bucket): >>> return sorted(bucket) >>> batch_dp = source_dp.bucketbatch( >>> batch_size=3, drop_last=True, batch_num=100, >>> bucket_num=1, use_in_batch_shuffle=False, sort_key=sort_bucket >>> ) >>> list(batch_dp) [[3, 4, 5], [6, 7, 8], [0, 1, 2]] """ datapipe: IterDataPipe[T_co] batch_size: int drop_last: bool batch_num: int bucket_num: int sort_key: Optional[Callable] use_in_batch_shuffle: bool def __new__( cls, datapipe: IterDataPipe[T_co], batch_size: int, drop_last: bool = False, batch_num: int = 100, bucket_num: int = 1, sort_key: Optional[Callable] = None, use_in_batch_shuffle: bool = True, ): assert batch_size > 0, "Batch size is required to be larger than 0!" assert batch_num > 0, "Number of batches is required to be larger than 0!" assert bucket_num > 0, "Number of buckets is required to be larger than 0!" bucket_size = batch_size * batch_num pool_size = bucket_size * bucket_num # Shuffle by pool_size if bucket_num > 1 or sort_key is None: if use_in_batch_shuffle: datapipe = datapipe.batch(batch_size=pool_size, drop_last=False).in_batch_shuffle().unbatch() else: datapipe = datapipe.shuffle(buffer_size=pool_size) # Sort by bucket_size if sort_key is given if sort_key is not None: datapipe = datapipe.batch(bucket_size).map(fn=sort_key).unbatch() # Batch and drop last (if needed) datapipe = datapipe.batch(batch_size, drop_last=drop_last) # Shuffle the batched data if sort_key is not None: # In-batch shuffle each bucket seems not that useful, it seems misleading since .batch is called prior. if use_in_batch_shuffle: datapipe = datapipe.batch(batch_size=bucket_num, drop_last=False).in_batch_shuffle().unbatch() else: datapipe = datapipe.shuffle(buffer_size=bucket_size) return datapipe def _default_len_fn(token): return len(token) @dataclass(order=True, frozen=True) class PrioritizedItem(Generic[T_co]): length: int data: T_co = field(compare=False) def _token_len_fn(token: T, len_fn: Callable) -> PrioritizedItem[T]: return PrioritizedItem(length=len_fn(token), data=token) def _token_filter_fn(data, *, min_len, max_len): return data.length >= min_len and data.length <= max_len @functional_datapipe("max_token_bucketize") class MaxTokenBucketizerIterDataPipe(IterDataPipe[DataChunk[T_co]]): r""" Creates mini-batches of data from a min-heap with limited size, and the total length of samples returned by ``len_fn`` within each batch will be limited by ``max_token_count`` (functional name: ``max_token_bucketize``). If ``min_len`` or ``max_len`` is set, the samples with length that is out of ``[min_len, max_len]`` will be filtered out. The purpose of this DataPipe is to batch samples with similar length according to ``len_fn``. Min-heap is used here to make sure the samples are sorted incrementally based on the length. And, the total length of samples in each batch is guaranteed to be smaller than ``max_token_count``. For an example in the audio domain, it may be batching samples with similar length. Then, given the ``max_token_count``, each batch may be concatenated to a Tensor with the same size and minimum padding. If ``include_padding`` is set to ``True``, the token count of each batch includes the padding a succeeding DataPipe could add. This guarentees that even after the batch is padded, ``max_token_count`` will not be exceeded. This can prevent out-of-memory issues for data with large variations in length. Note that batches are bucketized starting from the smallest size in a buffer. This can limit the variablity of batches if ``buffer_size`` is large. To increase variablity, apply ``torchdata.datapipes.iter.Shuffler`` before and after this DataPipe, and keep ``buffer_size`` small. Args: datapipe: Iterable DataPipe being batched max_token_count: Maximum length of total length of data in each batch len_fn: Function to be applied to each element to get lengths. ``len(data)`` is used by default. min_len: Optional minimum length to be included into each batch max_len: Optional maximum length to be included into each batch. buffer_size: This restricts how many samples are taken from prior DataPipe to bucketize include_padding: If True, the size of each batch includes the extra padding to the largest length in the batch. Example: >>> from torchdata.datapipes.iter import IterableWrapper >>> source_dp = IterableWrapper(['1', '11', '1', '1111', '111', '1', '11', '11', '111']) >>> # Using default len_fn to sort samples based on length (string length in this case) >>> batch_dp = source_dp.max_token_bucketize(max_token_count=5) >>> list(batch_dp) [['1', '1', '1', '11'], ['11', '11'], ['111'], ['111'], ['1111']] >>> batch_dp = source_dp.max_token_bucketize(max_token_count=4, buffer_size=4) >>> list(batch_dp) [['1', '1', '1'], ['11', '11'], ['11'], ['111'], ['111'], ['1111']] """ datapipe: IterDataPipe[PrioritizedItem[T_co]] max_token_count: int len_fn: Callable min_len: int max_len: Optional[int] buffer_size: int def __init__( self, datapipe: IterDataPipe[T_co], max_token_count: int, len_fn: Callable = _default_len_fn, min_len: int = 0, max_len: Optional[int] = None, buffer_size: int = 1000, include_padding: bool = False, ) -> None: if max_len is None: max_len = max_token_count if min_len < 0 or min_len > max_len: raise ValueError("``min_len`` should be larger than 0 and equal to or smaller than ``max_len``.") if max_len > max_token_count: raise ValueError("``max_token_count`` must be equal to or greater than ``max_len``.") if buffer_size <= 0: raise ValueError("'buffer_size' is required to be a positive integer.") self.datapipe = datapipe.map(partial(_token_len_fn, len_fn=len_fn)) self.datapipe = self.datapipe.filter(partial(_token_filter_fn, min_len=min_len, max_len=max_len)) self.max_token_count = max_token_count self.buffer_size = buffer_size self.include_padding = include_padding def __iter__(self) -> Iterator[DataChunk[T_co]]: buffer: List[PrioritizedItem[T_co]] = [] batch: List[T_co] = [] batch_size: int = 0 max_length: int = 0 for d in self.datapipe: heapq.heappush(buffer, d) if len(buffer) == self.buffer_size: buffer, batch, batch_size, max_length, data_chunk = self._pop_buffer( buffer, batch, batch_size, max_length ) if data_chunk is not None: yield data_chunk while buffer: buffer, batch, batch_size, max_length, data_chunk = self._pop_buffer(buffer, batch, batch_size, max_length) if data_chunk is not None: yield data_chunk if batch: yield DataChunk(batch) def _pop_buffer(self, buffer: List[PrioritizedItem[T_co]], batch: List[T_co], batch_size: int, max_length: int): data_chunk_to_yield = None d: PrioritizedItem[T_co] = heapq.heappop(buffer) length = d.length token = d.data if self.include_padding: max_length = max(length, max_length) new_batch_size = (len(batch) + 1) * max_length else: new_batch_size = batch_size + length if new_batch_size > self.max_token_count: data_chunk_to_yield = DataChunk(batch) batch = [token] batch_size = length max_length = length else: batch.append(token) batch_size = new_batch_size return buffer, batch, batch_size, max_length, data_chunk_to_yield
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import collections def pin_memory_fn(data, device=None): r""" Utility function to move data to pinned memory. If special treatment is needed to move the input data to pinned memory, please attach a ``pin_memory`` method to the expected data class. """ if hasattr(data, "pin_memory"): # Including torch.Tensor return data.pin_memory(device) elif isinstance(data, (str, bytes)): return data elif isinstance(data, collections.abc.Mapping): pinned_data = {k: pin_memory_fn(sample, device) for k, sample in data.items()} try: return type(data)(pinned_data) # type: ignore[call-arg] except TypeError: # The mapping type may not support `__init__(iterable)`. return pinned_data elif isinstance(data, collections.abc.Sequence): pinned_data = [pin_memory_fn(sample, device) for sample in data] # type: ignore[assignment] try: return type(data)(pinned_data) # type: ignore[call-arg] except TypeError: # The sequence type may not support `__init__(iterable)` (e.g., `range`). return pinned_data else: return data
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from torch.utils.data.datapipes.utils.common import StreamWrapper from torchdata.datapipes.utils._visualization import to_graph from torchdata.datapipes.utils.janitor import janitor from torchdata.datapipes.utils.pin_memory import pin_memory_fn __all__ = [ "StreamWrapper", "janitor", "pin_memory_fn", "to_graph", ] # Please keep this list sorted assert __all__ == sorted(__all__)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from io import IOBase from typing import Tuple from torchdata.datapipes.utils import StreamWrapper def validate_pathname_binary_tuple(data: Tuple[str, IOBase]): if not isinstance(data, tuple): raise TypeError(f"pathname binary data should be tuple type, but it is type {type(data)}") if len(data) != 2: raise TypeError(f"pathname binary stream tuple length should be 2, but got {len(data)}") if not isinstance(data[0], str): raise TypeError(f"pathname within the tuple should have string type pathname, but it is type {type(data[0])}") if not isinstance(data[1], IOBase) and not isinstance(data[1], StreamWrapper): raise TypeError( f"binary stream within the tuple should have IOBase or" f"its subclasses as type, but it is type {type(data[1])}" )
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import itertools from collections import defaultdict from typing import Optional, Set, TYPE_CHECKING from torch.utils.data.datapipes.iter.combining import _ChildDataPipe, IterDataPipe from torch.utils.data.graph import traverse_dps if TYPE_CHECKING: import graphviz class Node: def __init__(self, dp, *, name=None): self.dp = dp self.name = name or type(dp).__name__.replace("IterDataPipe", "") self.childs = set() self.parents = set() def add_child(self, child): self.childs.add(child) child.parents.add(self) def remove_child(self, child): self.childs.remove(child) child.parents.remove(self) def add_parent(self, parent): self.parents.add(parent) parent.childs.add(self) def remove_parent(self, parent): self.parents.remove(parent) parent.childs.remove(self) def __eq__(self, other): if not isinstance(other, Node): return NotImplemented return hash(self) == hash(other) def __hash__(self): return hash(self.dp) def __str__(self): return self.name def __repr__(self): return f"{self}-{hash(self)}" def to_nodes(dp, *, debug: bool) -> Set[Node]: def recurse(dp_graph, child=None): for _dp_id, (dp_node, dp_parents) in dp_graph.items(): node = Node(dp_node) if child is not None: node.add_child(child) yield node yield from recurse(dp_parents, child=node) def aggregate(nodes): groups = defaultdict(list) for node in nodes: groups[node].append(node) nodes = set() for node, group in groups.items(): if len(group) == 1: nodes.add(node) continue aggregated_node = Node(node.dp) for duplicate_node in group: for child in duplicate_node.childs.copy(): duplicate_node.remove_child(child) aggregated_node.add_child(child) for parent in duplicate_node.parents.copy(): duplicate_node.remove_parent(parent) aggregated_node.add_parent(parent) nodes.add(aggregated_node) if debug: return nodes child_dp_nodes = set( itertools.chain.from_iterable(node.parents for node in nodes if isinstance(node.dp, _ChildDataPipe)) ) if not child_dp_nodes: return nodes for node in child_dp_nodes: fixed_parent_node = Node( type(str(node).lstrip("_"), (IterDataPipe,), dict(dp=node.dp, childs=node.childs))() ) nodes.remove(node) nodes.add(fixed_parent_node) for parent in node.parents.copy(): node.remove_parent(parent) fixed_parent_node.add_parent(parent) for child in node.childs: nodes.remove(child) for actual_child in child.childs.copy(): actual_child.remove_parent(child) actual_child.add_parent(fixed_parent_node) return nodes return aggregate(recurse(traverse_dps(dp))) def to_graph(dp, *, debug: bool = False) -> "graphviz.Digraph": """Visualizes a DataPipe by returning a :class:`graphviz.Digraph`, which is a graph of the data pipeline. This allows you to visually inspect all the transformation that takes place in your DataPipes. .. note:: The package :mod:`graphviz` is required to use this function. .. note:: The most common interfaces for the returned graph object are: - :meth:`~graphviz.Digraph.render`: Save the graph to a file. - :meth:`~graphviz.Digraph.view`: Open the graph in a viewer. Args: dp: DataPipe that you would like to visualize (generally the last one in a chain of DataPipes). debug (bool): If ``True``, renders internal datapipes that are usually hidden from the user (such as ``ChildDataPipe`` of `demux` and `fork`). Defaults to ``False``. Example: >>> from torchdata.datapipes.iter import IterableWrapper >>> from torchdata.datapipes.utils import to_graph >>> dp = IterableWrapper(range(10)) >>> dp1, dp2 = dp.demux(num_instances=2, classifier_fn=lambda x: x % 2) >>> dp1 = dp1.map(lambda x: x + 1) >>> dp2 = dp2.filter(lambda _: True) >>> dp3 = dp1.zip(dp2).map(lambda t: t[0] + t[1]) >>> g = to_graph(dp3) >>> g.view() # This will open the graph in a viewer """ try: import graphviz except ModuleNotFoundError: raise ModuleNotFoundError( "The package `graphviz` is required to be installed to use this function. " "Please `pip install graphviz` or `conda install -c conda-forge graphviz`." ) from None # The graph style as well as the color scheme below was copied from https://github.com/szagoruyko/pytorchviz/ # https://github.com/szagoruyko/pytorchviz/blob/0adcd83af8aa7ab36d6afd139cabbd9df598edb7/torchviz/dot.py#L78-L85 node_attr = dict( style="filled", shape="box", align="left", fontsize="10", ranksep="0.1", height="0.2", fontname="monospace", ) graph = graphviz.Digraph(node_attr=node_attr, graph_attr=dict(size="12,12")) for node in to_nodes(dp, debug=debug): fillcolor: Optional[str] if not node.parents: fillcolor = "lightblue" elif not node.childs: fillcolor = "darkolivegreen1" else: fillcolor = None graph.node(name=repr(node), label=str(node), fillcolor=fillcolor) for child in node.childs: graph.edge(repr(node), repr(child)) return graph
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from torchdata.datapipes.utils import StreamWrapper def janitor(obj): """ Invokes various `obj` cleanup procedures such as: - Closing streams """ # TODO(632): We can also release caching locks here to allow filtering StreamWrapper.close_streams(obj)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from torch.utils.data import MapDataPipe from torch.utils.data.datapipes.map import Batcher, Concater, Mapper, SequenceWrapper, Shuffler, Zipper from torchdata.datapipes.iter.util.converter import IterToMapConverterMapDataPipe as IterToMapConverter from torchdata.datapipes.map.util.cacheholder import InMemoryCacheHolderMapDataPipe as InMemoryCacheHolder from torchdata.datapipes.map.util.unzipper import UnZipperMapDataPipe as UnZipper __all__ = [ "Batcher", "Concater", "InMemoryCacheHolder", "IterToMapConverter", "MapDataPipe", "Mapper", "SequenceWrapper", "Shuffler", "UnZipper", "Zipper", ] # Please keep this list sorted assert __all__ == sorted(__all__)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from typing import List, Optional from torch.utils.data import IterDataPipe, MapDataPipe # @functional_datapipe("to_iter_datapipe") # This line must be kept for .pyi signature parser class MapToIterConverterIterDataPipe(IterDataPipe): """ Convert a ``MapDataPipe`` to an ``IterDataPipe`` (functional name: ``to_iter_datapipe``). It uses ``indices`` to iterate through the ``MapDataPipe``, defaults to ``range(len(mapdatapipe))`` if not given. For the opposite converter, use :class:`.IterToMapConverter`. Args: datapipe: source MapDataPipe with data indices: optional list of indices that will dictate how the datapipe will be iterated over Example: >>> from torchdata.datapipes.map import SequenceWrapper >>> source_dp = SequenceWrapper(range(10)) >>> iter_dp = source_dp.to_iter_datapipe() >>> list(iter_dp) [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] >>> source_dp2 = SequenceWrapper({'a': 1, 'b': 2, 'c': 3}) >>> iter_dp2 = source_dp2.to_iter_datapipe(indices=['a', 'b', 'c']) >>> list(iter_dp2) [1, 2, 3] """ # Note that ``indices`` has ``Optional[List]`` instead of ``Optional[Iterable]`` as type because a generator # can be passed in as an iterable, which will complicate the serialization process as we will have # to materialize ``indices`` and store it. def __init__(self, datapipe: MapDataPipe, indices: Optional[List] = None): if not isinstance(datapipe, MapDataPipe): raise TypeError(f"MapToIterConverter can only apply on MapDataPipe, but found {type(datapipe)}") self.datapipe: MapDataPipe = datapipe self.indices = indices if indices else range(len(datapipe)) def __iter__(self): for idx in self.indices: yield self.datapipe[idx] def __len__(self): return len(self.indices) MapDataPipe.register_datapipe_as_function("to_iter_datapipe", MapToIterConverterIterDataPipe)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree.
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from typing import Any, Dict, TypeVar from torchdata.datapipes import functional_datapipe from torchdata.datapipes.map import MapDataPipe T_co = TypeVar("T_co", covariant=True) @functional_datapipe("in_memory_cache") class InMemoryCacheHolderMapDataPipe(MapDataPipe[T_co]): r""" Stores elements from the source DataPipe in memory (functional name: ``in_memory_cache``). Once an item is stored, it will remain unchanged and subsequent retrivals will return the same element. Since items from ``MapDataPipe`` are lazily computed, this can be used to store the results from previous ``MapDataPipe`` and reduce the number of duplicate computations. Note: The default ``cache`` is a ``Dict``. If another data structure is more suitable as cache for your use Args: source_dp: source DataPipe from which elements are read and stored in memory Example: >>> from torchdata.datapipes.map import SequenceWrapper >>> source_dp = SequenceWrapper(range(10)) >>> cache_dp = source_dp.in_memory_cache() >>> list(cache_dp) [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] """ def __init__(self, source_dp: MapDataPipe[T_co]) -> None: self.source_dp: MapDataPipe[T_co] = source_dp self.cache: Dict[Any, T_co] = {} def __getitem__(self, index) -> T_co: if index not in self.cache: self.cache[index] = self.source_dp[index] # type: ignore[index] return self.cache[index] # type: ignore[index] # We can potentially remove `self.source_dp` to save memory once `len(self.cache) == len(self.source_dp)` # Be careful about how that may interact with and graph traversal and other features def __len__(self) -> int: return len(self.source_dp)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from typing import Optional, Sequence, TypeVar from torchdata.datapipes import functional_datapipe from torchdata.datapipes.map import MapDataPipe T = TypeVar("T") @functional_datapipe("unzip") class UnZipperMapDataPipe(MapDataPipe): """ Takes in a DataPipe of Sequences, unpacks each Sequence, and return the elements in separate DataPipes based on their position in the Sequence (functional name: ``unzip``). The number of instances produced equals to the ``sequence_legnth`` minus the number of columns to skip. Note: Each sequence within the DataPipe should have the same length, specified by the input argument `sequence_length`. Args: source_datapipe: Iterable DataPipe with sequences of data sequence_length: Length of the sequence within the source_datapipe. All elements should have the same length. columns_to_skip: optional indices of columns that the DataPipe should skip (each index should be an integer from 0 to sequence_length - 1) Example: >>> from torchdata.datapipes.map import SequenceWrapper >>> source_dp = SequenceWrapper([(i, i + 10, i + 20) for i in range(3)]) >>> dp1, dp2, dp3 = source_dp.unzip(sequence_length=3) >>> list(dp1) [0, 1, 2] >>> list(dp2) [10, 11, 12] >>> list(dp3) [20, 21, 22] """ def __new__( cls, source_datapipe: MapDataPipe[Sequence[T]], sequence_length: int, columns_to_skip: Optional[Sequence[int]] = None, ): if sequence_length < 1: raise ValueError(f"Expected `sequence_length` larger than 0, but {sequence_length} is found") if columns_to_skip is None: instance_ids = list(range(sequence_length)) else: skips = set(columns_to_skip) instance_ids = [i for i in range(sequence_length) if i not in skips] if len(instance_ids) == 0: raise RuntimeError( f"All instances are being filtered out in {cls.__name__}. Please check" "the input `sequence_length` and `columns_to_skip`." ) return [_UnZipperMapDataPipe(source_datapipe, i) for i in instance_ids] class _UnZipperMapDataPipe(MapDataPipe[T]): def __init__(self, main_datapipe: MapDataPipe[Sequence[T]], instance_id: int): self.main_datapipe = main_datapipe self.instance_id = instance_id def __getitem__(self, index) -> T: return self.main_datapipe[index][self.instance_id] def __len__(self) -> int: return len(self.main_datapipe)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree.
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree.
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree.