python_code
stringlengths
0
229k
import fire import torch from torch.nn import CrossEntropyLoss from torch.optim import SGD from torchvision.models import wide_resnet50_2 from utils import get_train_eval_loaders from ignite.contrib.handlers import ProgressBar from ignite.engine import convert_tensor, create_supervised_evaluator, Engine, Events from ignite.handlers import Timer from ignite.metrics import Accuracy, Loss def main(dataset_path, batch_size=256, max_epochs=10): assert torch.cuda.is_available() assert torch.backends.cudnn.enabled, "NVIDIA/Apex:Amp requires cudnn backend to be enabled." torch.backends.cudnn.benchmark = True device = "cuda" train_loader, test_loader, eval_train_loader = get_train_eval_loaders(dataset_path, batch_size=batch_size) model = wide_resnet50_2(num_classes=100).to(device) optimizer = SGD(model.parameters(), lr=0.01) criterion = CrossEntropyLoss().to(device) def train_step(engine, batch): x = convert_tensor(batch[0], device, non_blocking=True) y = convert_tensor(batch[1], device, non_blocking=True) optimizer.zero_grad() y_pred = model(x) loss = criterion(y_pred, y) loss.backward() optimizer.step() return loss.item() trainer = Engine(train_step) timer = Timer(average=True) timer.attach(trainer, step=Events.EPOCH_COMPLETED) ProgressBar(persist=True).attach(trainer, output_transform=lambda out: {"batch loss": out}) metrics = {"Accuracy": Accuracy(), "Loss": Loss(criterion)} evaluator = create_supervised_evaluator(model, metrics=metrics, device=device, non_blocking=True) def log_metrics(engine, title): for name in metrics: print(f"\t{title} {name}: {engine.state.metrics[name]:.2f}") @trainer.on(Events.COMPLETED) def run_validation(_): print(f"- Mean elapsed time for 1 epoch: {timer.value()}") print("- Metrics:") with evaluator.add_event_handler(Events.COMPLETED, log_metrics, "Train"): evaluator.run(eval_train_loader) with evaluator.add_event_handler(Events.COMPLETED, log_metrics, "Test"): evaluator.run(test_loader) trainer.run(train_loader, max_epochs=max_epochs) if __name__ == "__main__": fire.Fire(main)
import os from pathlib import Path import brevitas.nn as qnn import torch import torch.nn as nn from pact import PACTReLU from torchvision import datasets, models from torchvision.transforms import Compose, Normalize, Pad, RandomCrop, RandomHorizontalFlip, ToTensor train_transform = Compose( [ Pad(4), RandomCrop(32, fill=128), RandomHorizontalFlip(), ToTensor(), Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), ] ) test_transform = Compose([ToTensor(), Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) def get_train_test_datasets(path): path = Path(path) if not path.exists(): path.mkdir(parents=True) download = True else: download = True if len(os.listdir(path)) < 1 else False train_ds = datasets.CIFAR10(root=path, train=True, download=download, transform=train_transform) test_ds = datasets.CIFAR10(root=path, train=False, download=False, transform=test_transform) return train_ds, test_ds def get_model(name): __dict__ = globals() if name in models.__dict__: fn = models.__dict__[name] elif name in ["resnet18_QAT_8b", "resnet18_QAT_6b", "resnet18_QAT_5b", "resnet18_QAT_4b"]: fn = __dict__[name] else: raise RuntimeError("Unknown model name {}".format(name)) return fn(num_classes=10) # Below code is taken from https://discuss.pytorch.org/t/evaluator-returns-nan/107972/3 def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1, weight_bit_width=8): """3x3 convolution with padding""" return qnn.QuantConv2d( in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation, weight_bit_width=weight_bit_width, ) def conv1x1(in_planes, out_planes, stride=1, weight_bit_width=8): """1x1 convolution""" return qnn.QuantConv2d( in_planes, out_planes, kernel_size=1, stride=stride, bias=False, weight_bit_width=weight_bit_width ) def make_PACT_relu(bit_width=8): relu = qnn.QuantReLU(bit_width=bit_width) relu.act_impl = PACTReLU() return relu class BasicBlock(nn.Module): expansion = 1 def __init__( self, inplanes, planes, stride=1, downsample=None, groups=1, base_width=64, dilation=1, norm_layer=None, bit_width=8, ): super().__init__() if norm_layer is None: norm_layer = nn.BatchNorm2d if groups != 1 or base_width != 64: raise ValueError("BasicBlock only supports groups=1 and base_width=64") if dilation > 1: raise NotImplementedError("Dilation > 1 not supported in BasicBlock") # Both self.conv1 and self.downsample layers downsample the input when stride != 1 self.conv1 = conv3x3(inplanes, planes, stride, weight_bit_width=bit_width) self.bn1 = norm_layer(planes) self.relu = make_PACT_relu(bit_width=bit_width) self.conv2 = conv3x3(planes, planes, weight_bit_width=bit_width) self.bn2 = norm_layer(planes) self.downsample = downsample self.stride = stride def forward(self, x): identity = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: identity = self.downsample(x) out += identity out = self.relu(out) return out class Bottleneck(nn.Module): # Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2) # while original implementation places the stride at the first 1x1 convolution(self.conv1) # according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385. # This variant is also known as ResNet V1.5 and improves accuracy according to # https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch. expansion = 4 def __init__( self, inplanes, planes, stride=1, downsample=None, groups=1, base_width=64, dilation=1, norm_layer=None, bit_width=8, ): super().__init__() if norm_layer is None: norm_layer = nn.BatchNorm2d width = int(planes * (base_width / 64.0)) * groups # Both self.conv2 and self.downsample layers downsample the input when stride != 1 self.conv1 = conv1x1(inplanes, width, weight_bit_width=bit_width) self.bn1 = norm_layer(width) self.conv2 = conv3x3(width, width, stride, groups, dilation, weight_bit_width=bit_width) self.bn2 = norm_layer(width) self.conv3 = conv1x1(width, planes * self.expansion, weight_bit_width=bit_width) self.bn3 = norm_layer(planes * self.expansion) self.relu = make_PACT_relu(bit_width=bit_width) self.downsample = downsample self.stride = stride def forward(self, x): identity = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: identity = self.downsample(x) out += identity out = self.relu(out) return out class ResNet_QAT_Xb(nn.Module): def __init__( self, block, layers, num_classes=1000, zero_init_residual=False, groups=1, width_per_group=64, replace_stride_with_dilation=None, norm_layer=None, bit_width=8, ): super().__init__() if norm_layer is None: norm_layer = nn.BatchNorm2d self._norm_layer = norm_layer self.inplanes = 64 self.dilation = 1 if replace_stride_with_dilation is None: # each element in the tuple indicates if we should replace # the 2x2 stride with a dilated convolution instead replace_stride_with_dilation = [False, False, False] if len(replace_stride_with_dilation) != 3: raise ValueError( "replace_stride_with_dilation should be None " "or a 3-element tuple, got {}".format(replace_stride_with_dilation) ) self.groups = groups self.base_width = width_per_group self.conv1 = qnn.QuantConv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = norm_layer(self.inplanes) self.relu = make_PACT_relu() self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0], bit_width=bit_width) self.layer2 = self._make_layer( block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0], bit_width=bit_width ) self.layer3 = self._make_layer( block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1], bit_width=bit_width ) self.layer4 = self._make_layer( block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2], bit_width=bit_width ) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear(512 * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): # qnn.QuantConv2d includes nn.Conv2d inside. nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu") elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) # Zero-initialize the last BN in each residual branch, # so that the residual branch starts with zeros, and each residual block behaves like an identity. # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 if zero_init_residual: for m in self.modules(): if isinstance(m, Bottleneck): nn.init.constant_(m.bn3.weight, 0) elif isinstance(m, BasicBlock): nn.init.constant_(m.bn2.weight, 0) def _make_layer(self, block, planes, blocks, stride=1, dilate=False, bit_width=8): norm_layer = self._norm_layer downsample = None previous_dilation = self.dilation if dilate: self.dilation *= stride stride = 1 if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( conv1x1(self.inplanes, planes * block.expansion, stride, weight_bit_width=bit_width), norm_layer(planes * block.expansion), ) layers = [] layers.append( block( self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer, bit_width=bit_width, ) ) self.inplanes = planes * block.expansion for _ in range(1, blocks): layers.append( block( self.inplanes, planes, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer, bit_width=bit_width, ) ) return nn.Sequential(*layers) def _forward_impl(self, x): # See note [TorchScript super()] x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.avgpool(x) x = torch.flatten(x, 1) x = self.fc(x) return x def forward(self, x): return self._forward_impl(x) def _resnet_QAT_Xb(block, layers, **kwargs): model = ResNet_QAT_Xb(block, layers, **kwargs) return model def resnet18_QAT_8b(*args, **kwargs): return _resnet_QAT_Xb(BasicBlock, [2, 2, 2, 2], **kwargs) def resnet18_QAT_6b(*args, **kwargs): return _resnet_QAT_Xb(BasicBlock, [2, 2, 2, 2], bit_width=6, **kwargs) def resnet18_QAT_5b(*args, **kwargs): return _resnet_QAT_Xb(BasicBlock, [2, 2, 2, 2], bit_width=5, **kwargs) def resnet18_QAT_4b(*args, **kwargs): return _resnet_QAT_Xb(BasicBlock, [2, 2, 2, 2], bit_width=4, **kwargs)
from datetime import datetime from pathlib import Path import fire import torch import torch.nn as nn import torch.optim as optim import utils from torch.cuda.amp import autocast, GradScaler import ignite import ignite.distributed as idist from ignite.contrib.engines import common from ignite.contrib.handlers import PiecewiseLinear from ignite.engine import create_supervised_evaluator, Engine, Events from ignite.handlers import Checkpoint, DiskSaver, global_step_from_engine from ignite.metrics import Accuracy, Loss from ignite.utils import manual_seed, setup_logger def training(local_rank, config): rank = idist.get_rank() manual_seed(config["seed"] + rank) device = idist.device() logger = setup_logger(name="CIFAR10-QAT-Training", distributed_rank=local_rank) log_basic_info(logger, config) output_path = config["output_path"] if rank == 0: now = datetime.now().strftime("%Y%m%d-%H%M%S") folder_name = f"{config['model']}_backend-{idist.backend()}-{idist.get_world_size()}_{now}" output_path = Path(output_path) / folder_name if not output_path.exists(): output_path.mkdir(parents=True) config["output_path"] = output_path.as_posix() logger.info(f"Output path: {config['output_path']}") if "cuda" in device.type: config["cuda device name"] = torch.cuda.get_device_name(local_rank) if config["with_clearml"]: from clearml import Task task = Task.init("CIFAR10-Training", task_name=output_path.stem) task.connect_configuration(config) # Log hyper parameters hyper_params = [ "model", "batch_size", "momentum", "weight_decay", "num_epochs", "learning_rate", "num_warmup_epochs", ] task.connect({k: config[k] for k in hyper_params}) # Setup dataflow, model, optimizer, criterion train_loader, test_loader = get_dataflow(config) config["num_iters_per_epoch"] = len(train_loader) model, optimizer, criterion, lr_scheduler = initialize(config) # Create trainer for current task trainer = create_trainer(model, optimizer, criterion, lr_scheduler, train_loader.sampler, config, logger) # Let's now setup evaluator engine to perform model's validation and compute metrics metrics = { "Accuracy": Accuracy(), "Loss": Loss(criterion), } # We define two evaluators as they wont have exactly similar roles: # - `evaluator` will save the best model based on validation score evaluator = create_supervised_evaluator(model, metrics=metrics, device=device, non_blocking=True) train_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device, non_blocking=True) def run_validation(engine): epoch = trainer.state.epoch state = train_evaluator.run(train_loader) log_metrics(logger, epoch, state.times["COMPLETED"], "Train", state.metrics) state = evaluator.run(test_loader) log_metrics(logger, epoch, state.times["COMPLETED"], "Test", state.metrics) trainer.add_event_handler(Events.EPOCH_COMPLETED(every=config["validate_every"]) | Events.COMPLETED, run_validation) if rank == 0: # Setup TensorBoard logging on trainer and evaluators. Logged values are: # - Training metrics, e.g. running average loss values # - Learning rate # - Evaluation train/test metrics evaluators = {"training": train_evaluator, "test": evaluator} tb_logger = common.setup_tb_logging(output_path, trainer, optimizer, evaluators=evaluators) # Store 2 best models by validation accuracy starting from num_epochs / 2: best_model_handler = Checkpoint( {"model": model}, get_save_handler(config), filename_prefix="best", n_saved=2, global_step_transform=global_step_from_engine(trainer), score_name="test_accuracy", score_function=Checkpoint.get_default_score_fn("Accuracy"), ) evaluator.add_event_handler( Events.COMPLETED(lambda *_: trainer.state.epoch > config["num_epochs"] // 2), best_model_handler ) try: trainer.run(train_loader, max_epochs=config["num_epochs"]) except Exception as e: logger.exception("") raise e if rank == 0: tb_logger.close() def run( seed=543, data_path="/tmp/cifar10", output_path="/tmp/output-cifar10/", model="resnet18_QAT_8b", batch_size=512, momentum=0.9, weight_decay=1e-4, num_workers=12, num_epochs=24, learning_rate=0.4, num_warmup_epochs=4, validate_every=3, checkpoint_every=1000, backend=None, resume_from=None, log_every_iters=15, nproc_per_node=None, with_clearml=False, with_amp=False, **spawn_kwargs, ): """Main entry to train an model on CIFAR10 dataset. Args: seed (int): random state seed to set. Default, 543. data_path (str): input dataset path. Default, "/tmp/cifar10". output_path (str): output path. Default, "/tmp/output-cifar10". model (str): model name (from torchvision) to setup model to train. Default, "resnet18". batch_size (int): total batch size. Default, 512. momentum (float): optimizer's momentum. Default, 0.9. weight_decay (float): weight decay. Default, 1e-4. num_workers (int): number of workers in the data loader. Default, 12. num_epochs (int): number of epochs to train the model. Default, 24. learning_rate (float): peak of piecewise linear learning rate scheduler. Default, 0.4. num_warmup_epochs (int): number of warm-up epochs before learning rate decay. Default, 4. validate_every (int): run model's validation every ``validate_every`` epochs. Default, 3. checkpoint_every (int): store training checkpoint every ``checkpoint_every`` iterations. Default, 200. backend (str, optional): backend to use for distributed configuration. Possible values: None, "nccl", "xla-tpu", "gloo" etc. Default, None. nproc_per_node (int, optional): optional argument to setup number of processes per node. It is useful, when main python process is spawning training as child processes. resume_from (str, optional): path to checkpoint to use to resume the training from. Default, None. log_every_iters (int): argument to log batch loss every ``log_every_iters`` iterations. It can be 0 to disable it. Default, 15. with_clearml (bool): if True, experiment ClearML logger is setup. Default, False. with_amp (bool): if True, enables native automatic mixed precision. Default, False. **spawn_kwargs: Other kwargs to spawn run in child processes: master_addr, master_port, node_rank, nnodes """ # check to see if the num_epochs is greater than or equal to num_warmup_epochs if num_warmup_epochs >= num_epochs: raise ValueError( "num_epochs cannot be less than or equal to num_warmup_epochs, please increase num_epochs or decrease " "num_warmup_epochs" ) # catch all local parameters config = locals() config.update(config["spawn_kwargs"]) del config["spawn_kwargs"] spawn_kwargs["nproc_per_node"] = nproc_per_node with idist.Parallel(backend=backend, **spawn_kwargs) as parallel: parallel.run(training, config) def get_dataflow(config): # - Get train/test datasets with idist.one_rank_first(local=True): train_dataset, test_dataset = utils.get_train_test_datasets(config["data_path"]) # Setup data loader also adapted to distributed config: nccl, gloo, xla-tpu train_loader = idist.auto_dataloader( train_dataset, batch_size=config["batch_size"], num_workers=config["num_workers"], shuffle=True, drop_last=True ) test_loader = idist.auto_dataloader( test_dataset, batch_size=2 * config["batch_size"], num_workers=config["num_workers"], shuffle=False ) return train_loader, test_loader def initialize(config): model = utils.get_model(config["model"]) # Adapt model for distributed settings if configured model = idist.auto_model(model, find_unused_parameters=True) optimizer = optim.SGD( model.parameters(), lr=config["learning_rate"], momentum=config["momentum"], weight_decay=config["weight_decay"], nesterov=True, ) optimizer = idist.auto_optim(optimizer) criterion = nn.CrossEntropyLoss().to(idist.device()) le = config["num_iters_per_epoch"] milestones_values = [ (0, 0.0), (le * config["num_warmup_epochs"], config["learning_rate"]), (le * config["num_epochs"], 0.0), ] lr_scheduler = PiecewiseLinear(optimizer, param_name="lr", milestones_values=milestones_values) return model, optimizer, criterion, lr_scheduler def log_metrics(logger, epoch, elapsed, tag, metrics): metrics_output = "\n".join([f"\t{k}: {v}" for k, v in metrics.items()]) logger.info(f"\nEpoch {epoch} - Evaluation time (seconds): {elapsed:.2f} - {tag} metrics:\n {metrics_output}") def log_basic_info(logger, config): logger.info(f"Quantization Aware Training {config['model']} on CIFAR10") logger.info(f"- PyTorch version: {torch.__version__}") logger.info(f"- Ignite version: {ignite.__version__}") if torch.cuda.is_available(): # explicitly import cudnn as # torch.backends.cudnn can not be pickled with hvd spawning procs from torch.backends import cudnn logger.info(f"- GPU Device: {torch.cuda.get_device_name(idist.get_local_rank())}") logger.info(f"- CUDA version: {torch.version.cuda}") logger.info(f"- CUDNN version: {cudnn.version()}") logger.info("\n") logger.info("Configuration:") for key, value in config.items(): logger.info(f"\t{key}: {value}") logger.info("\n") if idist.get_world_size() > 1: logger.info("\nDistributed setting:") logger.info(f"\tbackend: {idist.backend()}") logger.info(f"\tworld size: {idist.get_world_size()}") logger.info("\n") def create_trainer(model, optimizer, criterion, lr_scheduler, train_sampler, config, logger): device = idist.device() # Setup Ignite trainer: # - let's define training step # - add other common handlers: # - TerminateOnNan, # - handler to setup learning rate scheduling, # - ModelCheckpoint # - RunningAverage` on `train_step` output # - Two progress bars on epochs and optionally on iterations with_amp = config["with_amp"] scaler = GradScaler(enabled=with_amp) def train_step(engine, batch): x, y = batch[0], batch[1] if x.device != device: x = x.to(device, non_blocking=True) y = y.to(device, non_blocking=True) model.train() with autocast(enabled=with_amp): y_pred = model(x) loss = criterion(y_pred, y) optimizer.zero_grad() scaler.scale(loss).backward() scaler.step(optimizer) scaler.update() return { "batch loss": loss.item(), } trainer = Engine(train_step) trainer.logger = logger to_save = {"trainer": trainer, "model": model, "optimizer": optimizer, "lr_scheduler": lr_scheduler} metric_names = [ "batch loss", ] common.setup_common_training_handlers( trainer=trainer, train_sampler=train_sampler, to_save=to_save, save_every_iters=config["checkpoint_every"], save_handler=get_save_handler(config), lr_scheduler=lr_scheduler, output_names=metric_names if config["log_every_iters"] > 0 else None, with_pbars=False, clear_cuda_cache=False, ) resume_from = config["resume_from"] if resume_from is not None: checkpoint_fp = Path(resume_from) assert checkpoint_fp.exists(), f"Checkpoint '{checkpoint_fp.as_posix()}' is not found" logger.info(f"Resume from a checkpoint: {checkpoint_fp.as_posix()}") checkpoint = torch.load(checkpoint_fp.as_posix(), map_location="cpu") Checkpoint.load_objects(to_load=to_save, checkpoint=checkpoint) return trainer def get_save_handler(config): if config["with_clearml"]: from ignite.contrib.handlers.clearml_logger import ClearMLSaver return ClearMLSaver(dirname=config["output_path"]) return DiskSaver(config["output_path"], require_empty=False) if __name__ == "__main__": fire.Fire({"run": run})
# Implementation taken from https://discuss.pytorch.org/t/evaluator-returns-nan/107972/3 # Ref: https://arxiv.org/abs/1805.06085 import torch import torch.nn as nn class PACTClip(torch.autograd.Function): @staticmethod def forward(ctx, x, alpha): ctx.save_for_backward(x, alpha) return torch.clamp(x, 0, alpha.data) @staticmethod def backward(ctx, dy): x, alpha = ctx.saved_tensors dx = dy.clone() dx[x < 0] = 0 dx[x > alpha] = 0 dalpha = dy.clone() dalpha[x <= alpha] = 0 return dx, torch.sum(dalpha) class PACTReLU(nn.Module): def __init__(self, alpha=6.0): super().__init__() self.alpha = nn.Parameter(torch.tensor(alpha)) def forward(self, x): return PACTClip.apply(x, self.alpha)
import torch.nn as nn import torch.nn.init as init class Net(nn.Module): def __init__(self, upscale_factor): super(Net, self).__init__() self.relu = nn.ReLU() self.conv1 = nn.Conv2d(1, 64, (5, 5), (1, 1), (2, 2)) self.conv2 = nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1)) self.conv3 = nn.Conv2d(64, 32, (3, 3), (1, 1), (1, 1)) self.conv4 = nn.Conv2d(32, upscale_factor**2, (3, 3), (1, 1), (1, 1)) self.pixel_shuffle = nn.PixelShuffle(upscale_factor) self._initialize_weights() def forward(self, x): x = self.relu(self.conv1(x)) x = self.relu(self.conv2(x)) x = self.relu(self.conv3(x)) x = self.pixel_shuffle(self.conv4(x)) return x def _initialize_weights(self): init.orthogonal_(self.conv1.weight, init.calculate_gain("relu")) init.orthogonal_(self.conv2.weight, init.calculate_gain("relu")) init.orthogonal_(self.conv3.weight, init.calculate_gain("relu")) init.orthogonal_(self.conv4.weight)
import argparse import torch import torch.nn as nn import torch.optim as optim import torchvision from model import Net from torch.utils.data import DataLoader from torchvision.transforms.functional import center_crop, resize, to_tensor from ignite.contrib.handlers import ProgressBar from ignite.engine import Engine, Events from ignite.handlers import BasicTimeProfiler from ignite.metrics import PSNR # Training settings parser = argparse.ArgumentParser(description="PyTorch Super Res Example") parser.add_argument("--crop_size", type=int, default=256, help="cropped size of the images for training") parser.add_argument("--upscale_factor", type=int, required=True, help="super resolution upscale factor") parser.add_argument("--batch_size", type=int, default=64, help="training batch size") parser.add_argument("--test_batch_size", type=int, default=10, help="testing batch size") parser.add_argument("--n_epochs", type=int, default=2, help="number of epochs to train for") parser.add_argument("--lr", type=float, default=0.01, help="Learning Rate. Default=0.01") parser.add_argument("--cuda", action="store_true", help="use cuda?") parser.add_argument("--mps", action="store_true", default=False, help="enables macOS GPU training") parser.add_argument("--threads", type=int, default=4, help="number of threads for data loader to use") parser.add_argument("--seed", type=int, default=123, help="random seed to use. Default=123") parser.add_argument("--debug", action="store_true", help="use debug") opt = parser.parse_args() print(opt) if opt.cuda and not torch.cuda.is_available(): raise Exception("No GPU found, please run without --cuda") if not opt.mps and torch.backends.mps.is_available(): raise Exception("Found mps device, please run with --mps to enable macOS GPU") torch.manual_seed(opt.seed) use_mps = opt.mps and torch.backends.mps.is_available() if opt.cuda: device = torch.device("cuda") elif use_mps: device = torch.device("mps") else: device = torch.device("cpu") print("===> Loading datasets") class SRDataset(torch.utils.data.Dataset): def __init__(self, dataset, scale_factor, crop_size=256): self.dataset = dataset self.scale_factor = scale_factor self.crop_size = crop_size def __getitem__(self, index): image, _ = self.dataset[index] img = image.convert("YCbCr") hr_image, _, _ = img.split() hr_image = center_crop(hr_image, self.crop_size) lr_image = hr_image.copy() if self.scale_factor != 1: size = self.crop_size // self.scale_factor lr_image = resize(lr_image, [size, size]) hr_image = to_tensor(hr_image) lr_image = to_tensor(lr_image) return lr_image, hr_image def __len__(self): return len(self.dataset) trainset = torchvision.datasets.Caltech101(root="./data", download=True) testset = torchvision.datasets.Caltech101(root="./data", download=False) trainset_sr = SRDataset(trainset, scale_factor=opt.upscale_factor, crop_size=opt.crop_size) testset_sr = SRDataset(testset, scale_factor=opt.upscale_factor, crop_size=opt.crop_size) training_data_loader = DataLoader(dataset=trainset_sr, num_workers=opt.threads, batch_size=opt.batch_size, shuffle=True) testing_data_loader = DataLoader(dataset=testset_sr, num_workers=opt.threads, batch_size=opt.test_batch_size) print("===> Building model") model = Net(upscale_factor=opt.upscale_factor).to(device) criterion = nn.MSELoss() optimizer = optim.Adam(model.parameters(), lr=opt.lr) def train_step(engine, batch): model.train() input, target = batch[0].to(device), batch[1].to(device) optimizer.zero_grad() loss = criterion(model(input), target) loss.backward() optimizer.step() return loss.item() def validation_step(engine, batch): model.eval() with torch.no_grad(): x, y = batch[0].to(device), batch[1].to(device) y_pred = model(x) return y_pred, y trainer = Engine(train_step) evaluator = Engine(validation_step) psnr = PSNR(data_range=1) psnr.attach(evaluator, "psnr") validate_every = 1 if opt.debug: epoch_length = 10 validate_epoch_length = 1 else: epoch_length = len(training_data_loader) validate_epoch_length = len(testing_data_loader) @trainer.on(Events.EPOCH_COMPLETED(every=validate_every)) def log_validation(): evaluator.run(testing_data_loader, epoch_length=validate_epoch_length) metrics = evaluator.state.metrics print(f"Epoch: {trainer.state.epoch}, Avg. PSNR: {metrics['psnr']} dB") @trainer.on(Events.EPOCH_COMPLETED) def checkpoint(): model_out_path = "model_epoch_{}.pth".format(trainer.state.epoch) torch.save(model, model_out_path) print("Checkpoint saved to {}".format(model_out_path)) # Attach basic profiler basic_profiler = BasicTimeProfiler() basic_profiler.attach(trainer) ProgressBar().attach(trainer, output_transform=lambda x: {"loss": x}) trainer.run(training_data_loader, opt.n_epochs, epoch_length=epoch_length) results = basic_profiler.get_results() basic_profiler.print_results(results)
import argparse import numpy as np import torch from PIL import Image from torchvision.transforms.functional import to_tensor # Training settings parser = argparse.ArgumentParser(description="PyTorch Super Res Example") parser.add_argument("--input_image", type=str, required=True, help="input image to use") parser.add_argument("--model", type=str, required=True, help="model file to use") parser.add_argument("--output_filename", type=str, help="where to save the output image") parser.add_argument("--cuda", action="store_true", help="use cuda") opt = parser.parse_args() print(opt) img = Image.open(opt.input_image).convert("YCbCr") y, cb, cr = img.split() model = torch.load(opt.model) input = to_tensor(y).view(1, -1, y.size[1], y.size[0]) if opt.cuda: model = model.cuda() input = input.cuda() model.eval() with torch.no_grad(): out = model(input) out = out.cpu() out_img_y = out[0].detach().numpy() out_img_y *= 255.0 out_img_y = out_img_y.clip(0, 255) out_img_y = Image.fromarray(np.uint8(out_img_y[0]), mode="L") out_img_cb = cb.resize(out_img_y.size, Image.BICUBIC) out_img_cr = cr.resize(out_img_y.size, Image.BICUBIC) out_img = Image.merge("YCbCr", [out_img_y, out_img_cb, out_img_cr]).convert("RGB") out_img.save(opt.output_filename) print("output image saved to ", opt.output_filename)
from typing import Callable, Optional import numpy as np import torch try: from image_dataset_viz import render_datapoint except ImportError: raise ModuleNotFoundError( "Please install image-dataset-viz via pip install --upgrade git+https://github.com/vfdev-5/ImageDatasetViz.git" ) def tensor_to_numpy(t: torch.Tensor) -> np.ndarray: img = t.cpu().numpy().transpose((1, 2, 0)) return img.astype(np.uint8) def make_grid( batch_img: torch.Tensor, batch_preds: torch.Tensor, img_denormalize_fn: Callable, batch_gt: Optional[torch.Tensor] = None, ): """Create a grid from batch image and mask as i+l1+gt1 | i+l2+gt2 | i+l3+gt3 | i+l4+gt4 | ... where i+l+gt = image + predicted label + ground truth Args: batch_img (torch.Tensor) batch of images of any type batch_preds (torch.Tensor) batch of masks img_denormalize_fn (Callable): function to denormalize batch of images batch_gt (torch.Tensor, optional): batch of ground truth masks. """ assert isinstance(batch_img, torch.Tensor) and isinstance(batch_preds, torch.Tensor) assert len(batch_img) == len(batch_preds), f"{len(batch_img)} vs {len(batch_preds)}" assert batch_preds.ndim == 1, f"{batch_preds.ndim}" if batch_gt is not None: assert isinstance(batch_gt, torch.Tensor) assert len(batch_preds) == len(batch_gt) assert batch_gt.ndim == 1, f"{batch_gt.ndim}" b = batch_img.shape[0] h, w = batch_img.shape[2:] le = 1 out_image = np.zeros((h * le, w * b, 3), dtype="uint8") for i in range(b): img = batch_img[i] y_preds = batch_preds[i] img = img_denormalize_fn(img) img = tensor_to_numpy(img) pred_label = y_preds.cpu().item() target = f"p={pred_label}" if batch_gt is not None: gt_label = batch_gt[i] gt_label = gt_label.cpu().item() target += f" | gt={gt_label}" out_image[0:h, i * w : (i + 1) * w, :] = render_datapoint(img, target, text_size=12) return out_image def predictions_gt_images_handler(img_denormalize_fn, n_images=None, another_engine=None, prefix_tag=None): def wrapper(engine, logger, event_name): batch = engine.state.batch output = engine.state.output x, y = batch y_pred = output[0] if y.shape == y_pred.shape and y.ndim == 4: # Case of y of shape (B, C, H, W) y = torch.argmax(y, dim=1) y_pred = torch.argmax(y_pred, dim=1).byte() if n_images is not None: x = x[:n_images, ...] y = y[:n_images, ...] y_pred = y_pred[:n_images, ...] grid_pred_gt = make_grid(x, y_pred, img_denormalize_fn, batch_gt=y) state = engine.state if another_engine is None else another_engine.state global_step = state.get_event_attrib_value(event_name) tag = "predictions_with_gt" if prefix_tag is not None: tag = f"{prefix_tag}: {tag}" logger.writer.add_image(tag=tag, img_tensor=grid_pred_gt, global_step=global_step, dataformats="HWC") return wrapper
import torch import ignite import ignite.distributed as idist from ignite.handlers import DiskSaver def initialize(config): device = idist.device() model = config.model.to(device) optimizer = config.optimizer # Adapt model to dist config model = idist.auto_model(model) optimizer = idist.auto_optim(optimizer) criterion = config.criterion.to(device) return model, optimizer, criterion def log_basic_info(logger, config): logger.info(f"- PyTorch version: {torch.__version__}") logger.info(f"- Ignite version: {ignite.__version__}") if torch.cuda.is_available(): # explicitly import cudnn as # torch.backends.cudnn can not be pickled with hvd spawning procs from torch.backends import cudnn logger.info(f"- GPU Device: {torch.cuda.get_device_name(idist.get_local_rank())}") logger.info(f"- CUDA version: {torch.version.cuda}") logger.info(f"- CUDNN version: {cudnn.version()}") logger.info("\n") logger.info("Configuration:") for key, value in config.items(): logger.info(f"\t{key}: {value}") logger.info("\n") if idist.get_world_size() > 1: logger.info("\nDistributed setting:") logger.info(f"\tbackend: {idist.backend()}") logger.info(f"\tworld size: {idist.get_world_size()}") logger.info("\n") def log_metrics(logger, epoch, elapsed, tag, metrics): metrics_output = "\n".join([f"\t{k}: {v}" for k, v in metrics.items()]) logger.info(f"\nEpoch {epoch} - Evaluation time (seconds): {elapsed:.2f} - {tag} metrics:\n {metrics_output}") def get_save_handler(output_path, with_clearml): if with_clearml: from ignite.contrib.handlers.clearml_logger import ClearMLSaver return ClearMLSaver(dirname=output_path) return DiskSaver(output_path)
from pathlib import Path from typing import Callable, Optional, Tuple import cv2 import torch from torch.utils.data import DataLoader from torch.utils.data.dataset import Subset from torchvision.datasets import ImageFolder import ignite.distributed as idist from ignite.utils import convert_tensor def opencv_loader(path): img = cv2.imread(path) assert img is not None, f"Image at '{path}' has a problem" return cv2.cvtColor(img, cv2.COLOR_BGR2RGB) def get_dataloader(dataset, sampler=None, shuffle=False, limit_num_samples=None, **kwargs): if limit_num_samples is not None: g = torch.Generator().manual_seed(limit_num_samples) indices = torch.randperm(len(dataset), generator=g)[:limit_num_samples] dataset = Subset(dataset, indices) return idist.auto_dataloader(dataset, sampler=sampler, shuffle=(sampler is None) and shuffle, **kwargs) def get_train_val_loaders( root_path: str, train_transforms: Callable, val_transforms: Callable, batch_size: int = 16, num_workers: int = 8, val_batch_size: Optional[int] = None, limit_train_num_samples: Optional[int] = None, limit_val_num_samples: Optional[int] = None, ) -> Tuple[DataLoader, DataLoader, DataLoader]: train_ds = ImageFolder( Path(root_path) / "train", transform=lambda sample: train_transforms(image=sample)["image"], loader=opencv_loader, ) val_ds = ImageFolder( Path(root_path) / "val", transform=lambda sample: val_transforms(image=sample)["image"], loader=opencv_loader ) if len(val_ds) < len(train_ds): g = torch.Generator().manual_seed(len(train_ds)) train_eval_indices = torch.randperm(len(train_ds), generator=g)[: len(val_ds)] train_eval_ds = Subset(train_ds, train_eval_indices) else: train_eval_ds = train_ds val_batch_size = batch_size * 4 if val_batch_size is None else val_batch_size train_loader = get_dataloader( train_ds, shuffle=True, batch_size=batch_size, num_workers=num_workers, drop_last=True, limit_num_samples=limit_train_num_samples, ) val_loader = get_dataloader( val_ds, shuffle=False, batch_size=val_batch_size, num_workers=num_workers, drop_last=False, limit_num_samples=limit_val_num_samples, ) train_eval_loader = get_dataloader( train_eval_ds, shuffle=False, batch_size=val_batch_size, num_workers=num_workers, drop_last=False, limit_num_samples=limit_val_num_samples, ) return train_loader, val_loader, train_eval_loader def denormalize(t, mean, std, max_pixel_value=255): assert isinstance(t, torch.Tensor), f"{type(t)}" assert t.ndim == 3 d = t.device mean = torch.tensor(mean, device=d).unsqueeze(-1).unsqueeze(-1) std = torch.tensor(std, device=d).unsqueeze(-1).unsqueeze(-1) tensor = std * t + mean tensor *= max_pixel_value return tensor def prepare_batch(batch, device, non_blocking): x, y = batch[0], batch[1] x = convert_tensor(x, device, non_blocking=non_blocking) y = convert_tensor(y, device, non_blocking=non_blocking) return x, y
import os from functools import partial from pathlib import Path import fire import torch try: from torch.cuda.amp import autocast, GradScaler except ImportError: raise RuntimeError("Please, use recent PyTorch version, e.g. >=1.6.0") import dataflow as data import utils import vis from py_config_runner import ConfigObject, get_params, InferenceConfigSchema, TrainvalConfigSchema import ignite.distributed as idist from ignite.contrib.engines import common from ignite.engine import Engine, Events from ignite.handlers import Checkpoint, Timer from ignite.metrics import Accuracy, Frequency, TopKCategoricalAccuracy from ignite.utils import manual_seed, setup_logger def training(local_rank, config, logger, with_clearml): rank = idist.get_rank() manual_seed(config.seed + local_rank) train_loader = config.train_loader val_loader = config.val_loader train_eval_loader = config.train_eval_loader model, optimizer, criterion = utils.initialize(config) # Setup trainer for this specific task trainer = create_trainer(model, optimizer, criterion, train_loader.sampler, config, logger, with_clearml) # Setup evaluators accuracy = Accuracy() val_metrics = { "Accuracy": accuracy, "Top-5 Accuracy": TopKCategoricalAccuracy(k=5), "Error": (1.0 - accuracy) * 100, } if ("val_metrics" in config) and isinstance(config.val_metrics, dict): val_metrics.update(config.val_metrics) evaluator = create_evaluator(model, val_metrics, config, with_clearml, tag="val") train_evaluator = create_evaluator(model, val_metrics, config, with_clearml, tag="train") val_interval = config.get("val_interval", 1) # Run validation on every val_interval epoch, in the end of the training # and in the begining if config.start_by_validation is True event = Events.EPOCH_COMPLETED(every=val_interval) if config.num_epochs % val_interval != 0: event |= Events.COMPLETED if config.get("start_by_validation", False): event |= Events.STARTED @trainer.on(event) def run_validation(): epoch = trainer.state.epoch state = train_evaluator.run(train_eval_loader) utils.log_metrics(logger, epoch, state.times["COMPLETED"], "Train", state.metrics) state = evaluator.run(val_loader) utils.log_metrics(logger, epoch, state.times["COMPLETED"], "Test", state.metrics) score_metric_name = "Accuracy" if "es_patience" in config: common.add_early_stopping_by_val_score(config.es_patience, evaluator, trainer, metric_name=score_metric_name) # Store 2 best models by validation accuracy: common.gen_save_best_models_by_val_score( save_handler=utils.get_save_handler(config.output_path.as_posix(), with_clearml), evaluator=evaluator, models=model, metric_name=score_metric_name, n_saved=2, trainer=trainer, tag="val", ) # Setup Tensorboard logger if rank == 0: tb_logger = common.setup_tb_logging( config.output_path.as_posix(), trainer, optimizer, evaluators={"training": train_evaluator, "validation": evaluator}, ) # Log validation predictions as images # We define a custom event filter to log less frequently the images (to reduce storage size) # - we plot images with masks of the middle validation batch # - once every 3 validations and # - at the end of the training def custom_event_filter(_, val_iteration): c1 = val_iteration == 1 c2 = trainer.state.epoch % (config.get("val_interval", 1) * 3) == 0 c2 |= trainer.state.epoch == config.num_epochs return c1 and c2 # Image denormalization function to plot predictions with images mean = config.get("mean", (0.485, 0.456, 0.406)) std = config.get("std", (0.229, 0.224, 0.225)) img_denormalize = partial(data.denormalize, mean=mean, std=std) tb_logger.attach( evaluator, log_handler=vis.predictions_gt_images_handler( img_denormalize_fn=img_denormalize, n_images=12, another_engine=trainer, prefix_tag="validation" ), event_name=Events.ITERATION_COMPLETED(event_filter=custom_event_filter), ) tb_logger.attach( train_evaluator, log_handler=vis.predictions_gt_images_handler( img_denormalize_fn=img_denormalize, n_images=12, another_engine=trainer, prefix_tag="training" ), event_name=Events.ITERATION_COMPLETED(event_filter=custom_event_filter), ) trainer.run(train_loader, max_epochs=config.num_epochs) if idist.get_rank() == 0: tb_logger.close() def create_trainer(model, optimizer, criterion, train_sampler, config, logger, with_clearml): device = config.device prepare_batch = data.prepare_batch # Setup trainer accumulation_steps = config.get("accumulation_steps", 1) model_output_transform = config.get("model_output_transform", lambda x: x) with_amp = config.get("with_amp", True) scaler = GradScaler(enabled=with_amp) def training_step(engine, batch): model.train() x, y = prepare_batch(batch, device=device, non_blocking=True) with autocast(enabled=with_amp): y_pred = model(x) y_pred = model_output_transform(y_pred) loss = criterion(y_pred, y) / accumulation_steps output = {"supervised batch loss": loss.item(), "num_samples": len(x)} scaler.scale(loss).backward() if engine.state.iteration % accumulation_steps == 0: scaler.step(optimizer) scaler.update() optimizer.zero_grad() return output trainer = Engine(training_step) trainer.logger = logger throughput_metric = Frequency(output_transform=lambda x: x["num_samples"]) throughput_metric.attach(trainer, name="Throughput") timer = Timer(average=True) timer.attach( trainer, resume=Events.ITERATION_STARTED, pause=Events.ITERATION_COMPLETED, step=Events.ITERATION_COMPLETED, ) @trainer.on(Events.ITERATION_COMPLETED(every=20)) def log_progress(): metrics = dict(trainer.state.metrics) epoch_length = trainer.state.epoch_length metrics["ETA (seconds)"] = int((epoch_length - (trainer.state.iteration % epoch_length)) * timer.value()) metrics_str = ", ".join([f"{k}: {v}" for k, v in metrics.items()]) metrics_format = ( f"[{trainer.state.epoch}/{trainer.state.max_epochs}] " + f"Iter={trainer.state.iteration % epoch_length}/{epoch_length}: " + f"{metrics_str}" ) trainer.logger.info(metrics_format) output_names = [ "supervised batch loss", ] lr_scheduler = config.lr_scheduler to_save = { "model": model, "optimizer": optimizer, "lr_scheduler": lr_scheduler, "trainer": trainer, "amp": scaler, } save_every_iters = config.get("save_every_iters", 1000) common.setup_common_training_handlers( trainer, train_sampler, to_save=to_save, save_every_iters=save_every_iters, save_handler=utils.get_save_handler(config.output_path.as_posix(), with_clearml), lr_scheduler=lr_scheduler, output_names=output_names, # with_pbars=not with_clearml, with_pbars=False, log_every_iters=1, ) resume_from = config.get("resume_from", None) if resume_from is not None: checkpoint_fp = Path(resume_from) assert checkpoint_fp.exists(), f"Checkpoint '{checkpoint_fp.as_posix()}' is not found" logger.info(f"Resume from a checkpoint: {checkpoint_fp.as_posix()}") checkpoint = torch.load(checkpoint_fp.as_posix(), map_location="cpu") Checkpoint.load_objects(to_load=to_save, checkpoint=checkpoint) return trainer def create_evaluator(model, metrics, config, with_clearml, tag="val"): model_output_transform = config.get("model_output_transform", lambda x: x) with_amp = config.get("with_amp", True) prepare_batch = data.prepare_batch @torch.no_grad() def evaluate_step(engine, batch): model.eval() with autocast(enabled=with_amp): x, y = prepare_batch(batch, device=config.device, non_blocking=True) y_pred = model(x) y_pred = model_output_transform(y_pred) return y_pred, y evaluator = Engine(evaluate_step) for name, metric in metrics.items(): metric.attach(evaluator, name) if idist.get_rank() == 0 and (not with_clearml): common.ProgressBar(desc=f"Evaluation ({tag})", persist=False).attach(evaluator) return evaluator def setup_experiment_tracking(config, with_clearml, task_type="training"): from datetime import datetime assert task_type in ("training", "testing"), task_type output_path = "" if idist.get_rank() == 0: if with_clearml: from clearml import Task schema = TrainvalConfigSchema if task_type == "training" else InferenceConfigSchema task = Task.init("ImageNet Training", config.config_filepath.stem, task_type=task_type) task.connect_configuration(config.config_filepath.as_posix()) task.upload_artifact(config.script_filepath.name, config.script_filepath.as_posix()) task.upload_artifact(config.config_filepath.name, config.config_filepath.as_posix()) task.connect(get_params(config, schema)) output_path = Path(os.environ.get("CLEARML_OUTPUT_PATH", "/tmp")) output_path = output_path / "clearml" / datetime.now().strftime("%Y%m%d-%H%M%S") else: import shutil output_path = Path(os.environ.get("OUTPUT_PATH", "/tmp/output-imagenet")) output_path = output_path / task_type / config.config_filepath.stem output_path = output_path / datetime.now().strftime("%Y%m%d-%H%M%S") output_path.mkdir(parents=True, exist_ok=True) shutil.copyfile(config.script_filepath.as_posix(), output_path / config.script_filepath.name) shutil.copyfile(config.config_filepath.as_posix(), output_path / config.config_filepath.name) output_path = output_path.as_posix() return Path(idist.broadcast(output_path, src=0)) def run_training(config_filepath, backend="nccl", with_clearml=True): """Main entry to run training experiment Args: config_filepath (str): training configuration .py file backend (str): distributed backend: nccl, gloo or None to run without distributed config with_clearml (bool): if True, uses ClearML as experiment tracking system """ assert torch.cuda.is_available(), torch.cuda.is_available() assert torch.backends.cudnn.enabled torch.backends.cudnn.benchmark = True config_filepath = Path(config_filepath) assert config_filepath.exists(), f"File '{config_filepath.as_posix()}' is not found" with idist.Parallel(backend=backend) as parallel: logger = setup_logger(name="ImageNet Training", distributed_rank=idist.get_rank()) config = ConfigObject(config_filepath) TrainvalConfigSchema.validate(config) config.script_filepath = Path(__file__) output_path = setup_experiment_tracking(config, with_clearml=with_clearml) config.output_path = output_path utils.log_basic_info(logger, get_params(config, TrainvalConfigSchema)) try: parallel.run(training, config, logger=logger, with_clearml=with_clearml) except KeyboardInterrupt: logger.info("Catched KeyboardInterrupt -> exit") except Exception as e: # noqa logger.exception("") raise e def get_model_weights(config, logger, with_clearml): path = "" if with_clearml: from clearml import Model if idist.get_rank() > 0: idist.barrier() else: model_id = config.weights_path logger.info(f"Loading trained model: {model_id}") model = Model(model_id) assert model is not None, f"{model_id}" path = model.get_local_copy() idist.barrier() path = idist.broadcast(path, src=0) else: path = config.weights_path logger.info(f"Loading {path}") assert Path(path).exists(), f"{path} is not found" return torch.load(path) def evaluation(local_rank, config, logger, with_clearml): rank = idist.get_rank() device = idist.device() manual_seed(config.seed + local_rank) data_loader = config.data_loader model = config.model.to(device) # Load weights: state_dict = get_model_weights(config, logger, with_clearml) model.load_state_dict(state_dict) # Adapt model to dist config model = idist.auto_model(model) # Setup evaluators val_metrics = { "Accuracy": Accuracy(), "Top-5 Accuracy": TopKCategoricalAccuracy(k=5), } if ("val_metrics" in config) and isinstance(config.val_metrics, dict): val_metrics.update(config.val_metrics) evaluator = create_evaluator(model, val_metrics, config, with_clearml, tag="val") # Setup Tensorboard logger if rank == 0: tb_logger = common.TensorboardLogger(log_dir=config.output_path.as_posix()) tb_logger.attach_output_handler(evaluator, event_name=Events.COMPLETED, tag="validation", metric_names="all") state = evaluator.run(data_loader) utils.log_metrics(logger, 0, state.times["COMPLETED"], "Validation", state.metrics) if idist.get_rank() == 0: tb_logger.close() def run_evaluation(config_filepath, backend="nccl", with_clearml=True): """Main entry to run model's evaluation: - compute validation metrics Args: config_filepath (str): evaluation configuration .py file backend (str): distributed backend: nccl, gloo, horovod or None to run without distributed config with_clearml (bool): if True, uses ClearML as experiment tracking system """ assert torch.cuda.is_available(), torch.cuda.is_available() assert torch.backends.cudnn.enabled torch.backends.cudnn.benchmark = True config_filepath = Path(config_filepath) assert config_filepath.exists(), f"File '{config_filepath.as_posix()}' is not found" with idist.Parallel(backend=backend) as parallel: logger = setup_logger(name="ImageNet Evaluation", distributed_rank=idist.get_rank()) config = ConfigObject(config_filepath) InferenceConfigSchema.validate(config) config.script_filepath = Path(__file__) output_path = setup_experiment_tracking(config, with_clearml=with_clearml, task_type="testing") config.output_path = output_path utils.log_basic_info(logger, get_params(config, InferenceConfigSchema)) try: parallel.run(evaluation, config, logger=logger, with_clearml=with_clearml) except KeyboardInterrupt: logger.info("Catched KeyboardInterrupt -> exit") except Exception as e: # noqa logger.exception("") raise e if __name__ == "__main__": fire.Fire({"training": run_training, "eval": run_evaluation})
# Basic training configuration import os from functools import partial import albumentations as A import torch.nn as nn import torch.optim as optim import torch.optim.lr_scheduler as lrs from albumentations.pytorch import ToTensorV2 as ToTensor from dataflow import denormalize, get_train_val_loaders from torchvision.models.resnet import resnet50 import ignite.distributed as idist # ############################## # Global configs # ############################## seed = 19 device = "cuda" debug = True # config to measure time passed to prepare batches and report measured time before the training benchmark_dataflow = True benchmark_dataflow_num_iters = 100 train_crop_size = 224 val_crop_size = 320 batch_size = 64 * idist.get_world_size() # total batch size num_workers = 8 val_interval = 2 start_by_validation = True # ############################## # Setup Dataflow # ############################## assert "DATASET_PATH" in os.environ data_path = os.environ["DATASET_PATH"] mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] train_transforms = A.Compose( [ A.RandomResizedCrop(train_crop_size, train_crop_size, scale=(0.08, 1.0)), A.HorizontalFlip(), A.CoarseDropout(max_height=32, max_width=32), A.HueSaturationValue(), A.Normalize(mean=mean, std=std), ToTensor(), ] ) val_transforms = A.Compose( [ # https://github.com/facebookresearch/FixRes/blob/b27575208a7c48a3a6e0fa9efb57baa4021d1305/imnet_resnet50_scratch/transforms.py#L76 A.Resize(int((256 / 224) * val_crop_size), int((256 / 224) * val_crop_size)), A.CenterCrop(val_crop_size, val_crop_size), A.Normalize(mean=mean, std=std), ToTensor(), ] ) train_loader, val_loader, train_eval_loader = get_train_val_loaders( data_path, train_transforms=train_transforms, val_transforms=val_transforms, batch_size=batch_size, num_workers=num_workers, val_batch_size=batch_size, limit_train_num_samples=batch_size * 6 if debug else None, limit_val_num_samples=batch_size * 6 if debug else None, ) # Image denormalization function to plot predictions with images img_denormalize = partial(denormalize, mean=mean, std=std) # ############################## # Setup Model # ############################## model = resnet50(weights=None) # ############################## # Setup Solver # ############################## num_epochs = 2 criterion = nn.CrossEntropyLoss() le = len(train_loader) base_lr = 0.1 * (batch_size / 256.0) optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9, weight_decay=1e-4) lr_scheduler = lrs.MultiStepLR(optimizer, milestones=[30 * le, 60 * le, 90 * le, 100 * le], gamma=0.1)
# Basic training configuration import os from functools import partial import albumentations as A import torch.nn as nn import torch.optim as optim import torch.optim.lr_scheduler as lrs from albumentations.pytorch import ToTensorV2 as ToTensor from dataflow import denormalize, get_train_val_loaders from torchvision.models.resnet import resnet50 import ignite.distributed as idist # ############################## # Global configs # ############################## seed = 19 device = "cuda" debug = False # config to measure time passed to prepare batches and report measured time before the training benchmark_dataflow = True benchmark_dataflow_num_iters = 100 train_crop_size = 224 val_crop_size = 320 batch_size = 64 * idist.get_world_size() # total batch size num_workers = 8 val_interval = 2 # ############################## # Setup Dataflow # ############################## assert "DATASET_PATH" in os.environ data_path = os.environ["DATASET_PATH"] mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] train_transforms = A.Compose( [ A.RandomResizedCrop(train_crop_size, train_crop_size, scale=(0.08, 1.0)), A.HorizontalFlip(), A.CoarseDropout(max_height=32, max_width=32), A.HueSaturationValue(), A.Normalize(mean=mean, std=std), ToTensor(), ] ) val_transforms = A.Compose( [ # https://github.com/facebookresearch/FixRes/blob/b27575208a7c48a3a6e0fa9efb57baa4021d1305/imnet_resnet50_scratch/transforms.py#L76 A.Resize(int((256 / 224) * val_crop_size), int((256 / 224) * val_crop_size)), A.CenterCrop(val_crop_size, val_crop_size), A.Normalize(mean=mean, std=std), ToTensor(), ] ) train_loader, val_loader, train_eval_loader = get_train_val_loaders( data_path, train_transforms=train_transforms, val_transforms=val_transforms, batch_size=batch_size, num_workers=num_workers, val_batch_size=batch_size, limit_train_num_samples=batch_size * 6 if debug else None, limit_val_num_samples=batch_size * 6 if debug else None, ) # Image denormalization function to plot predictions with images img_denormalize = partial(denormalize, mean=mean, std=std) # ############################## # Setup Model # ############################## model = resnet50(weights=None) # ############################## # Setup Solver # ############################## num_epochs = 105 criterion = nn.CrossEntropyLoss() le = len(train_loader) base_lr = 0.1 * (batch_size / 256.0) optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9, weight_decay=1e-4) lr_scheduler = lrs.MultiStepLR(optimizer, milestones=[30 * le, 60 * le, 90 * le, 100 * le], gamma=0.1)
import numpy as np import torch from PIL import Image try: from image_dataset_viz import render_datapoint except ImportError: raise ModuleNotFoundError( "Please install image-dataset-viz via pip install --upgrade git+https://github.com/vfdev-5/ImageDatasetViz.git" ) def _getvocpallete(num_cls): n = num_cls pallete = [0] * (n * 3) for j in range(0, n): lab = j pallete[j * 3 + 0] = 0 pallete[j * 3 + 1] = 0 pallete[j * 3 + 2] = 0 i = 0 while lab > 0: pallete[j * 3 + 0] |= ((lab >> 0) & 1) << (7 - i) pallete[j * 3 + 1] |= ((lab >> 1) & 1) << (7 - i) pallete[j * 3 + 2] |= ((lab >> 2) & 1) << (7 - i) i = i + 1 lab >>= 3 return pallete vocpallete = _getvocpallete(256) def render_mask(mask): if isinstance(mask, np.ndarray): mask = Image.fromarray(mask) mask.putpalette(vocpallete) mask = mask.convert(mode="RGB") return mask def tensor_to_rgb(t): img = t.cpu().numpy().transpose((1, 2, 0)) return img.astype(np.uint8) def make_grid(batch_img, batch_mask, img_denormalize_fn, batch_gt_mask=None): """Create a grid from batch image and mask as img1 | img2 | img3 | img4 | ... i+m1 | i+m2 | i+m3 | i+m4 | ... mask1 | mask2 | mask3 | mask4 | ... i+M1 | i+M2 | i+M3 | i+M4 | ... Mask1 | Mask2 | Mask3 | Mask4 | ... i+m = image + mask blended with alpha=0.4 - maskN is predicted mask - MaskN is ground-truth mask if given Args: batch_img (torch.Tensor) batch of images of any type batch_mask (torch.Tensor) batch of masks img_denormalize_fn (Callable): function to denormalize batch of images batch_gt_mask (torch.Tensor, optional): batch of ground truth masks. """ assert isinstance(batch_img, torch.Tensor) and isinstance(batch_mask, torch.Tensor) assert len(batch_img) == len(batch_mask) if batch_gt_mask is not None: assert isinstance(batch_gt_mask, torch.Tensor) assert len(batch_mask) == len(batch_gt_mask) b = batch_img.shape[0] h, w = batch_img.shape[2:] le = 3 if batch_gt_mask is None else 3 + 2 out_image = np.zeros((h * le, w * b, 3), dtype="uint8") for i in range(b): img = batch_img[i] mask = batch_mask[i] img = img_denormalize_fn(img) img = tensor_to_rgb(img) mask = mask.cpu().numpy() mask = render_mask(mask) out_image[0:h, i * w : (i + 1) * w, :] = img out_image[1 * h : 2 * h, i * w : (i + 1) * w, :] = render_datapoint(img, mask, blend_alpha=0.4) out_image[2 * h : 3 * h, i * w : (i + 1) * w, :] = mask if batch_gt_mask is not None: gt_mask = batch_gt_mask[i] gt_mask = gt_mask.cpu().numpy() gt_mask = render_mask(gt_mask) out_image[3 * h : 4 * h, i * w : (i + 1) * w, :] = render_datapoint(img, gt_mask, blend_alpha=0.4) out_image[4 * h : 5 * h, i * w : (i + 1) * w, :] = gt_mask return out_image def predictions_gt_images_handler(img_denormalize_fn, n_images=None, another_engine=None, prefix_tag=None): def wrapper(engine, logger, event_name): batch = engine.state.batch output = engine.state.output x = batch["image"] y = batch["mask"] y_pred = output[0] if y.shape == y_pred.shape and y.ndim == 4: # Case of y of shape (B, C, H, W) y = torch.argmax(y, dim=1) y_pred = torch.argmax(y_pred, dim=1).byte() if n_images is not None: x = x[:n_images, ...] y = y[:n_images, ...] y_pred = y_pred[:n_images, ...] grid_pred_gt = make_grid(x, y_pred, img_denormalize_fn, batch_gt_mask=y) state = engine.state if another_engine is None else another_engine.state global_step = state.epoch tag = "predictions_with_gt" if prefix_tag is not None: tag = f"{prefix_tag}: {tag} - epoch={global_step}" logger.writer.add_image(tag=tag, img_tensor=grid_pred_gt, global_step=global_step, dataformats="HWC") return wrapper
import torch import ignite import ignite.distributed as idist from ignite.handlers import DiskSaver def initialize(config): device = idist.device() model = config.model.to(device) optimizer = config.optimizer # Adapt model to dist config model = idist.auto_model(model) optimizer = idist.auto_optim(optimizer) criterion = config.criterion.to(device) return model, optimizer, criterion def log_basic_info(logger, config): logger.info(f"- PyTorch version: {torch.__version__}") logger.info(f"- Ignite version: {ignite.__version__}") if torch.cuda.is_available(): # explicitly import cudnn as # torch.backends.cudnn can not be pickled with hvd spawning procs from torch.backends import cudnn logger.info(f"- GPU Device: {torch.cuda.get_device_name(idist.get_local_rank())}") logger.info(f"- CUDA version: {torch.version.cuda}") logger.info(f"- CUDNN version: {cudnn.version()}") logger.info("\n") logger.info("Configuration:") for key, value in config.items(): logger.info(f"\t{key}: {value}") logger.info("\n") if idist.get_world_size() > 1: logger.info("\nDistributed setting:") logger.info(f"\tbackend: {idist.backend()}") logger.info(f"\tworld size: {idist.get_world_size()}") logger.info("\n") def log_metrics(logger, epoch, elapsed, tag, metrics): metrics_output = "\n".join([f"\t{k}: {v}" for k, v in metrics.items()]) logger.info(f"\nEpoch {epoch} - Evaluation time (seconds): {elapsed:.2f} - {tag} metrics:\n {metrics_output}") def get_save_handler(output_path, with_clearml): if with_clearml: from ignite.contrib.handlers.clearml_logger import ClearMLSaver return ClearMLSaver(dirname=output_path) return DiskSaver(output_path)
import cv2 import numpy as np import torch from PIL import Image from torch.utils.data import Dataset from torch.utils.data.dataset import Subset from torchvision.datasets.sbd import SBDataset from torchvision.datasets.voc import VOCSegmentation import ignite.distributed as idist from ignite.utils import convert_tensor class TransformedDataset(Dataset): def __init__(self, ds, transform_fn): assert isinstance(ds, Dataset) assert callable(transform_fn) self.ds = ds self.transform_fn = transform_fn def __len__(self): return len(self.ds) def __getitem__(self, index): dp = self.ds[index] return self.transform_fn(**dp) class VOCSegmentationOpencv(VOCSegmentation): target_names = [ "background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "plant", "sheep", "sofa", "train", "tv/monitor", ] def __init__(self, *args, return_meta=False, **kwargs): super(VOCSegmentationOpencv, self).__init__(*args, **kwargs) self.return_meta = return_meta def __getitem__(self, index): img = cv2.imread(self.images[index]) assert img is not None, f"Image at '{self.images[index]}' has a problem" img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) mask = np.asarray(Image.open(self.masks[index])) if self.return_meta: return { "image": img, "mask": mask, "meta": {"index": index, "image_path": self.images[index], "mask_path": self.masks[index]}, } return {"image": img, "mask": mask} class SBDatasetOpencv(SBDataset): def __init__(self, *args, return_meta=False, **kwargs): super(SBDatasetOpencv, self).__init__(*args, **kwargs) assert self.mode == "segmentation", "SBDatasetOpencv should be in segmentation mode only" self.return_meta = return_meta def _get_segmentation_target(self, filepath): mat = self._loadmat(filepath) return mat["GTcls"][0]["Segmentation"][0] def __getitem__(self, index): img = cv2.imread(self.images[index]) assert img is not None, f"Image at '{self.images[index]}' has a problem" img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) mask = self._get_target(self.masks[index]) if self.return_meta: return { "image": img, "mask": mask, "meta": {"index": index, "image_path": self.images[index], "mask_path": self.masks[index]}, } return {"image": img, "mask": mask} def get_train_dataset(root_path, return_meta=False): return VOCSegmentationOpencv( root=root_path, year="2012", image_set="train", download=False, return_meta=return_meta ) def get_val_dataset(root_path, return_meta=False): return VOCSegmentationOpencv(root=root_path, year="2012", image_set="val", download=False, return_meta=return_meta) def get_train_noval_sbdataset(root_path, return_meta=False): return SBDatasetOpencv(root_path, image_set="train_noval", mode="segmentation", return_meta=return_meta) def get_dataloader(dataset, sampler=None, shuffle=False, limit_num_samples=None, **kwargs): if limit_num_samples is not None: g = torch.Generator().manual_seed(limit_num_samples) indices = torch.randperm(len(dataset), generator=g)[:limit_num_samples] dataset = Subset(dataset, indices) return idist.auto_dataloader(dataset, sampler=sampler, shuffle=(sampler is None) and shuffle, **kwargs) def get_train_val_loaders( root_path, train_transforms, val_transforms, batch_size=16, num_workers=8, train_sampler=None, val_batch_size=None, sbd_path=None, limit_train_num_samples=None, limit_val_num_samples=None, ): train_ds = get_train_dataset(root_path) val_ds = get_val_dataset(root_path) if sbd_path is not None: sbd_train_ds = get_train_noval_sbdataset(sbd_path) train_ds = train_ds + sbd_train_ds if len(val_ds) < len(train_ds): g = torch.Generator().manual_seed(len(train_ds)) train_eval_indices = torch.randperm(len(train_ds), generator=g)[: len(val_ds)] train_eval_ds = Subset(train_ds, train_eval_indices) else: train_eval_ds = train_ds train_ds = TransformedDataset(train_ds, transform_fn=train_transforms) val_ds = TransformedDataset(val_ds, transform_fn=val_transforms) train_eval_ds = TransformedDataset(train_eval_ds, transform_fn=val_transforms) val_batch_size = batch_size * 4 if val_batch_size is None else val_batch_size train_loader = get_dataloader( train_ds, shuffle=True, sampler=train_sampler, batch_size=batch_size, num_workers=num_workers, drop_last=True, limit_num_samples=limit_train_num_samples, ) val_loader = get_dataloader( val_ds, shuffle=False, batch_size=val_batch_size, num_workers=num_workers, drop_last=False, limit_num_samples=limit_val_num_samples, ) train_eval_loader = get_dataloader( train_eval_ds, shuffle=False, batch_size=val_batch_size, num_workers=num_workers, drop_last=False, limit_num_samples=limit_val_num_samples, ) return train_loader, val_loader, train_eval_loader def get_inference_dataloader( root_path, mode, transforms, batch_size=16, num_workers=8, pin_memory=True, limit_num_samples=None ): assert mode in ("train", "test"), "Mode should be 'train' or 'test'" get_dataset_fn = get_train_dataset if mode == "train" else get_val_dataset dataset = get_dataset_fn(root_path, return_meta=True) dataset = TransformedDataset(dataset, transform_fn=transforms) return get_dataloader( dataset, limit_num_samples=limit_num_samples, shuffle=False, batch_size=batch_size, num_workers=num_workers, pin_memory=pin_memory, drop_last=False, ) def ignore_mask_boundaries(**kwargs): assert "mask" in kwargs, "Input should contain 'mask'" mask = kwargs["mask"] mask[mask == 255] = 0 kwargs["mask"] = mask return kwargs def denormalize(t, mean, std, max_pixel_value=255): assert isinstance(t, torch.Tensor), f"{type(t)}" assert t.ndim == 3 d = t.device mean = torch.tensor(mean, device=d).unsqueeze(-1).unsqueeze(-1) std = torch.tensor(std, device=d).unsqueeze(-1).unsqueeze(-1) tensor = std * t + mean tensor *= max_pixel_value return tensor def prepare_image_mask(batch, device, non_blocking): x, y = batch["image"], batch["mask"] x = convert_tensor(x, device, non_blocking=non_blocking) y = convert_tensor(y, device, non_blocking=non_blocking).long() return x, y
import os from functools import partial from pathlib import Path import fire import torch try: from torch.cuda.amp import autocast, GradScaler except ImportError: raise RuntimeError("Please, use recent PyTorch version, e.g. >=1.6.0") import dataflow as data import utils import vis from py_config_runner import ConfigObject, get_params, InferenceConfigSchema, TrainvalConfigSchema import ignite.distributed as idist from ignite.contrib.engines import common from ignite.engine import Engine, Events from ignite.handlers import Checkpoint from ignite.metrics import ConfusionMatrix, IoU, mIoU from ignite.utils import manual_seed, setup_logger def download_datasets(output_path): """Helper tool to download datasets Args: output_path (str): path where to download and unzip the dataset """ from torchvision.datasets.sbd import SBDataset from torchvision.datasets.voc import VOCSegmentation output_path = Path(output_path) output_path.mkdir(parents=True, exist_ok=True) print("Download Pascal VOC 2012 - Training") VOCSegmentation(output_path.as_posix(), image_set="train", download=True) print("Download Pascal VOC 2012 - Validation") VOCSegmentation(output_path.as_posix(), image_set="val", download=True) print("Download SBD - Training without Pascal VOC validation part") sbd_path = output_path / "SBD" sbd_path.mkdir(exist_ok=True) SBDataset(sbd_path.as_posix(), image_set="train_noval", mode="segmentation", download=True) print("Done") print(f"Pascal VOC 2012 is at : {(output_path / 'VOCdevkit').as_posix()}") print(f"SBD is at : {sbd_path.as_posix()}") def training(local_rank, config, logger, with_clearml): rank = idist.get_rank() manual_seed(config.seed + local_rank) train_loader = config.train_loader val_loader = config.val_loader train_eval_loader = config.train_eval_loader model, optimizer, criterion = utils.initialize(config) # Setup trainer for this specific task trainer = create_trainer(model, optimizer, criterion, train_loader.sampler, config, logger, with_clearml) # Setup evaluators num_classes = config.num_classes cm_metric = ConfusionMatrix(num_classes=num_classes) val_metrics = { "IoU": IoU(cm_metric), "mIoU_bg": mIoU(cm_metric), } if ("val_metrics" in config) and isinstance(config.val_metrics, dict): val_metrics.update(config.val_metrics) evaluator = create_evaluator(model, val_metrics, config, with_clearml, tag="val") train_evaluator = create_evaluator(model, val_metrics, config, with_clearml, tag="train") val_interval = config.get("val_interval", 1) # Run validation on every val_interval epoch, in the end of the training # and in the begining if config.start_by_validation is True event = Events.EPOCH_COMPLETED(every=val_interval) if config.num_epochs % val_interval != 0: event |= Events.COMPLETED if config.get("start_by_validation", False): event |= Events.STARTED @trainer.on(event) def run_validation(): epoch = trainer.state.epoch state = train_evaluator.run(train_eval_loader) utils.log_metrics(logger, epoch, state.times["COMPLETED"], "Train", state.metrics) state = evaluator.run(val_loader) utils.log_metrics(logger, epoch, state.times["COMPLETED"], "Test", state.metrics) score_metric_name = "mIoU_bg" if "es_patience" in config: common.add_early_stopping_by_val_score(config.es_patience, evaluator, trainer, metric_name=score_metric_name) # Store 2 best models by validation accuracy: common.gen_save_best_models_by_val_score( save_handler=utils.get_save_handler(config.output_path.as_posix(), with_clearml), evaluator=evaluator, models=model, metric_name=score_metric_name, n_saved=2, trainer=trainer, tag="val", ) # Setup Tensorboard logger if rank == 0: tb_logger = common.setup_tb_logging( config.output_path.as_posix(), trainer, optimizer, evaluators={"training": train_evaluator, "validation": evaluator}, ) # Log validation predictions as images # We define a custom event filter to log less frequently the images (to reduce storage size) # - we plot images with masks of the middle validation batch # - once every 3 validations and # - at the end of the training def custom_event_filter(_, val_iteration): c1 = val_iteration == len(val_loader) // 2 c2 = trainer.state.epoch % (config.get("val_interval", 1) * 3) == 0 c2 |= trainer.state.epoch == config.num_epochs return c1 and c2 # Image denormalization function to plot predictions with images mean = config.get("mean", (0.485, 0.456, 0.406)) std = config.get("std", (0.229, 0.224, 0.225)) img_denormalize = partial(data.denormalize, mean=mean, std=std) tb_logger.attach( evaluator, log_handler=vis.predictions_gt_images_handler( img_denormalize_fn=img_denormalize, n_images=8, another_engine=trainer, prefix_tag="validation" ), event_name=Events.ITERATION_COMPLETED(event_filter=custom_event_filter), ) # Log confusion matrix to ClearML: if with_clearml: trainer.add_event_handler(Events.COMPLETED, compute_and_log_cm, cm_metric, trainer.state.iteration) trainer.run(train_loader, max_epochs=config.num_epochs) if idist.get_rank() == 0: tb_logger.close() def compute_and_log_cm(cm_metric, iteration): cm = cm_metric.compute() # CM: values are normalized such that diagonal values represent class recalls cm = ConfusionMatrix.normalize(cm, "recall").cpu().numpy() if idist.get_rank() == 0: from clearml import Task clearml_logger = Task.current_task().get_logger() try: clearml_logger.report_confusion_matrix( title="Final Confusion Matrix", matrix=cm, iteration=iteration, xlabels=data.VOCSegmentationOpencv.target_names, ylabels=data.VOCSegmentationOpencv.target_names, extra_layout=None, ) except NameError: # Temporary clearml bug work-around: # https://github.com/allegroai/clearml/pull/936 pass def create_trainer(model, optimizer, criterion, train_sampler, config, logger, with_clearml): device = config.device prepare_batch = data.prepare_image_mask # Setup trainer accumulation_steps = config.get("accumulation_steps", 1) model_output_transform = config.get("model_output_transform", lambda x: x) with_amp = config.get("with_amp", True) scaler = GradScaler(enabled=with_amp) def forward_pass(batch): model.train() x, y = prepare_batch(batch, device=device, non_blocking=True) with autocast(enabled=with_amp): y_pred = model(x) y_pred = model_output_transform(y_pred) loss = criterion(y_pred, y) / accumulation_steps return loss def amp_backward_pass(engine, loss): scaler.scale(loss).backward() if engine.state.iteration % accumulation_steps == 0: scaler.step(optimizer) scaler.update() optimizer.zero_grad() def hvd_amp_backward_pass(engine, loss): scaler.scale(loss).backward() optimizer.synchronize() with optimizer.skip_synchronize(): scaler.step(optimizer) scaler.update() optimizer.zero_grad() if idist.backend() == "horovod" and with_amp: backward_pass = hvd_amp_backward_pass else: backward_pass = amp_backward_pass def training_step(engine, batch): loss = forward_pass(batch) output = {"supervised batch loss": loss.item()} backward_pass(engine, loss) return output trainer = Engine(training_step) trainer.logger = logger output_names = [ "supervised batch loss", ] lr_scheduler = config.lr_scheduler to_save = { "model": model, "optimizer": optimizer, "lr_scheduler": lr_scheduler, "trainer": trainer, "amp": scaler, } save_every_iters = config.get("save_every_iters", 1000) common.setup_common_training_handlers( trainer, train_sampler, to_save=to_save, save_every_iters=save_every_iters, save_handler=utils.get_save_handler(config.output_path.as_posix(), with_clearml), lr_scheduler=lr_scheduler, output_names=output_names, with_pbars=not with_clearml, log_every_iters=1, ) resume_from = config.get("resume_from", None) if resume_from is not None: checkpoint_fp = Path(resume_from) assert checkpoint_fp.exists(), f"Checkpoint '{checkpoint_fp.as_posix()}' is not found" logger.info(f"Resume from a checkpoint: {checkpoint_fp.as_posix()}") checkpoint = torch.load(checkpoint_fp.as_posix(), map_location="cpu") Checkpoint.load_objects(to_load=to_save, checkpoint=checkpoint) return trainer def create_evaluator(model, metrics, config, with_clearml, tag="val"): model_output_transform = config.get("model_output_transform", lambda x: x) with_amp = config.get("with_amp", True) prepare_batch = data.prepare_image_mask @torch.no_grad() def evaluate_step(engine, batch): model.eval() with autocast(enabled=with_amp): x, y = prepare_batch(batch, device=config.device, non_blocking=True) y_pred = model(x) y_pred = model_output_transform(y_pred) return y_pred, y evaluator = Engine(evaluate_step) for name, metric in metrics.items(): metric.attach(evaluator, name) if idist.get_rank() == 0 and (not with_clearml): common.ProgressBar(desc=f"Evaluation ({tag})", persist=False).attach(evaluator) return evaluator def setup_experiment_tracking(config, with_clearml, task_type="training"): from datetime import datetime assert task_type in ("training", "testing"), task_type output_path = "" if idist.get_rank() == 0: if with_clearml: from clearml import Task schema = TrainvalConfigSchema if task_type == "training" else InferenceConfigSchema task = Task.init("Pascal-VOC12 Training", config.config_filepath.stem, task_type=task_type) task.connect_configuration(config.config_filepath.as_posix()) task.upload_artifact(config.script_filepath.name, config.script_filepath.as_posix()) task.upload_artifact(config.config_filepath.name, config.config_filepath.as_posix()) task.connect(get_params(config, schema)) output_path = Path(os.environ.get("CLEARML_OUTPUT_PATH", "/tmp")) output_path = output_path / "clearml" / datetime.now().strftime("%Y%m%d-%H%M%S") else: import shutil output_path = Path(os.environ.get("OUTPUT_PATH", "/tmp/output-pascal-voc12")) output_path = output_path / task_type / config.config_filepath.stem output_path = output_path / datetime.now().strftime("%Y%m%d-%H%M%S") output_path.mkdir(parents=True, exist_ok=True) shutil.copyfile(config.script_filepath.as_posix(), output_path / config.script_filepath.name) shutil.copyfile(config.config_filepath.as_posix(), output_path / config.config_filepath.name) output_path = output_path.as_posix() return Path(idist.broadcast(output_path, src=0)) def run_training(config_filepath, backend="nccl", with_clearml=True): """Main entry to run training experiment Args: config_filepath (str): training configuration .py file backend (str): distributed backend: nccl, gloo, horovod or None to run without distributed config with_clearml (bool): if True, uses ClearML as experiment tracking system """ assert torch.cuda.is_available(), torch.cuda.is_available() assert torch.backends.cudnn.enabled torch.backends.cudnn.benchmark = True config_filepath = Path(config_filepath) assert config_filepath.exists(), f"File '{config_filepath.as_posix()}' is not found" with idist.Parallel(backend=backend) as parallel: logger = setup_logger(name="Pascal-VOC12 Training", distributed_rank=idist.get_rank()) config = ConfigObject(config_filepath) TrainvalConfigSchema.validate(config) config.script_filepath = Path(__file__) output_path = setup_experiment_tracking(config, with_clearml=with_clearml) config.output_path = output_path utils.log_basic_info(logger, get_params(config, TrainvalConfigSchema)) try: parallel.run(training, config, logger=logger, with_clearml=with_clearml) except KeyboardInterrupt: logger.info("Catched KeyboardInterrupt -> exit") except Exception as e: # noqa logger.exception("") raise e def get_model_weights(config, logger, with_clearml): path = "" if with_clearml: from clearml import Model if idist.get_rank() > 0: idist.barrier() else: model_id = config.weights_path logger.info(f"Loading trained model: {model_id}") model = Model(model_id) assert model is not None, f"{model_id}" path = model.get_local_copy() idist.barrier() path = idist.broadcast(path, src=0) else: path = config.weights_path logger.info(f"Loading {path}") assert Path(path).exists(), f"{path} is not found" return torch.load(path) def evaluation(local_rank, config, logger, with_clearml): rank = idist.get_rank() device = idist.device() manual_seed(config.seed + local_rank) data_loader = config.data_loader model = config.model.to(device) # Load weights: state_dict = get_model_weights(config, logger, with_clearml) model.load_state_dict(state_dict) # Adapt model to dist config model = idist.auto_model(model) # Setup evaluators num_classes = config.num_classes cm_metric = ConfusionMatrix(num_classes=num_classes) val_metrics = { "IoU": IoU(cm_metric), "mIoU_bg": mIoU(cm_metric), } if ("val_metrics" in config) and isinstance(config.val_metrics, dict): val_metrics.update(config.val_metrics) evaluator = create_evaluator(model, val_metrics, config, with_clearml, tag="val") # Setup Tensorboard logger if rank == 0: tb_logger = common.TensorboardLogger(log_dir=config.output_path.as_posix()) tb_logger.attach_output_handler(evaluator, event_name=Events.COMPLETED, tag="validation", metric_names="all") # Log confusion matrix to ClearML: if with_clearml: evaluator.add_event_handler(Events.COMPLETED, compute_and_log_cm, cm_metric, evaluator.state.iteration) state = evaluator.run(data_loader) utils.log_metrics(logger, 0, state.times["COMPLETED"], "Validation", state.metrics) if idist.get_rank() == 0: tb_logger.close() def run_evaluation(config_filepath, backend="nccl", with_clearml=True): """Main entry to run model's evaluation: - compute validation metrics Args: config_filepath (str): evaluation configuration .py file backend (str): distributed backend: nccl, gloo, horovod or None to run without distributed config with_clearml (bool): if True, uses ClearML as experiment tracking system """ assert torch.cuda.is_available(), torch.cuda.is_available() assert torch.backends.cudnn.enabled torch.backends.cudnn.benchmark = True config_filepath = Path(config_filepath) assert config_filepath.exists(), f"File '{config_filepath.as_posix()}' is not found" with idist.Parallel(backend=backend) as parallel: logger = setup_logger(name="Pascal-VOC12 Evaluation", distributed_rank=idist.get_rank()) config = ConfigObject(config_filepath) InferenceConfigSchema.validate(config) config.script_filepath = Path(__file__) output_path = setup_experiment_tracking(config, with_clearml=with_clearml, task_type="testing") config.output_path = output_path utils.log_basic_info(logger, get_params(config, InferenceConfigSchema)) try: parallel.run(evaluation, config, logger=logger, with_clearml=with_clearml) except KeyboardInterrupt: logger.info("Catched KeyboardInterrupt -> exit") except Exception as e: # noqa logger.exception("") raise e if __name__ == "__main__": fire.Fire({"download": download_datasets, "training": run_training, "eval": run_evaluation})
# Basic training configuration import os from functools import partial import albumentations as A import cv2 import torch.nn as nn import torch.optim as optim import torch.optim.lr_scheduler as lrs from albumentations.pytorch import ToTensorV2 as ToTensor from dataflow import get_train_val_loaders, ignore_mask_boundaries from torchvision.models.segmentation import deeplabv3_resnet101 # ############################## # Global configs # ############################## seed = 21 device = "cuda" debug = False # Use AMP with torch native with_amp = True num_classes = 21 batch_size = 18 # total batch size val_batch_size = batch_size * 2 num_workers = 12 # total num workers per node val_interval = 3 # grads accumulation: accumulation_steps = 4 val_img_size = 513 train_img_size = 480 # ############################## # Setup Dataflow # ############################## assert "DATASET_PATH" in os.environ data_path = os.environ["DATASET_PATH"] assert "SBD_DATASET_PATH" in os.environ sbd_data_path = os.environ["SBD_DATASET_PATH"] mean = (0.485, 0.456, 0.406) std = (0.229, 0.224, 0.225) train_transforms = A.Compose( [ A.RandomScale(scale_limit=(0.0, 1.5), interpolation=cv2.INTER_LINEAR, p=1.0), A.PadIfNeeded(val_img_size, val_img_size, border_mode=cv2.BORDER_CONSTANT), A.RandomCrop(train_img_size, train_img_size), A.HorizontalFlip(), A.Blur(blur_limit=3), A.Normalize(mean=mean, std=std), ignore_mask_boundaries, ToTensor(), ] ) val_transforms = A.Compose( [ A.PadIfNeeded(val_img_size, val_img_size, border_mode=cv2.BORDER_CONSTANT), A.Normalize(mean=mean, std=std), ignore_mask_boundaries, ToTensor(), ] ) train_loader, val_loader, train_eval_loader = get_train_val_loaders( root_path=data_path, train_transforms=train_transforms, val_transforms=val_transforms, batch_size=batch_size, num_workers=num_workers, val_batch_size=val_batch_size, sbd_path=sbd_data_path, limit_train_num_samples=100 if debug else None, limit_val_num_samples=100 if debug else None, ) # ############################## # Setup model # ############################## num_classes = 21 model = deeplabv3_resnet101(num_classes=num_classes) def model_output_transform(output): return output["out"] # ############################## # Setup solver # ############################## save_every_iters = len(train_loader) num_epochs = 100 criterion = nn.CrossEntropyLoss() lr = 0.007 weight_decay = 5e-4 momentum = 0.9 nesterov = False optimizer = optim.SGD( [{"params": model.backbone.parameters()}, {"params": model.classifier.parameters()}], lr=1.0, momentum=momentum, weight_decay=weight_decay, nesterov=nesterov, ) le = len(train_loader) def lambda_lr_scheduler(iteration, lr0, n, a): return lr0 * pow((1.0 - 1.0 * iteration / n), a) lr_scheduler = lrs.LambdaLR( optimizer, lr_lambda=[ partial(lambda_lr_scheduler, lr0=lr, n=num_epochs * le, a=0.9), partial(lambda_lr_scheduler, lr0=lr * 10.0, n=num_epochs * le, a=0.9), ], )
# Basic training configuration import os from functools import partial import albumentations as A import cv2 import torch.nn as nn import torch.optim as optim import torch.optim.lr_scheduler as lrs from albumentations.pytorch import ToTensorV2 as ToTensor from dataflow import get_train_val_loaders, ignore_mask_boundaries from torchvision.models.segmentation import deeplabv3_resnet101 # ############################## # Global configs # ############################## seed = 21 device = "cuda" debug = False # Use AMP with torch native with_amp = True num_classes = 21 batch_size = 18 # total batch size val_batch_size = batch_size * 2 num_workers = 12 # total num workers per node val_interval = 3 # grads accumulation: accumulation_steps = 4 val_img_size = 513 train_img_size = 480 # ############################## # Setup Dataflow # ############################## assert "DATASET_PATH" in os.environ data_path = os.environ["DATASET_PATH"] mean = (0.485, 0.456, 0.406) std = (0.229, 0.224, 0.225) train_transforms = A.Compose( [ A.RandomScale(scale_limit=(0.0, 1.5), interpolation=cv2.INTER_LINEAR, p=1.0), A.PadIfNeeded(val_img_size, val_img_size, border_mode=cv2.BORDER_CONSTANT), A.RandomCrop(train_img_size, train_img_size), A.HorizontalFlip(), A.Blur(blur_limit=3), A.Normalize(mean=mean, std=std), ignore_mask_boundaries, ToTensor(), ] ) val_transforms = A.Compose( [ A.PadIfNeeded(val_img_size, val_img_size, border_mode=cv2.BORDER_CONSTANT), A.Normalize(mean=mean, std=std), ignore_mask_boundaries, ToTensor(), ] ) train_loader, val_loader, train_eval_loader = get_train_val_loaders( root_path=data_path, train_transforms=train_transforms, val_transforms=val_transforms, batch_size=batch_size, num_workers=num_workers, val_batch_size=val_batch_size, limit_train_num_samples=100 if debug else None, limit_val_num_samples=100 if debug else None, ) # ############################## # Setup model # ############################## num_classes = 21 model = deeplabv3_resnet101(num_classes=num_classes) def model_output_transform(output): return output["out"] # ############################## # Setup solver # ############################## save_every_iters = len(train_loader) num_epochs = 100 criterion = nn.CrossEntropyLoss() lr = 0.007 weight_decay = 5e-4 momentum = 0.9 nesterov = False optimizer = optim.SGD( [{"params": model.backbone.parameters()}, {"params": model.classifier.parameters()}], lr=1.0, momentum=momentum, weight_decay=weight_decay, nesterov=nesterov, ) le = len(train_loader) def lambda_lr_scheduler(iteration, lr0, n, a): return lr0 * pow((1.0 - 1.0 * iteration / n), a) lr_scheduler = lrs.LambdaLR( optimizer, lr_lambda=[ partial(lambda_lr_scheduler, lr0=lr, n=num_epochs * le, a=0.9), partial(lambda_lr_scheduler, lr0=lr * 10.0, n=num_epochs * le, a=0.9), ], )
# Basic training configuration import os import albumentations as A import cv2 from albumentations.pytorch import ToTensorV2 as ToTensor from dataflow import get_inference_dataloader, ignore_mask_boundaries from torchvision.models.segmentation import deeplabv3_resnet101 # ############################## # Global configs # ############################## seed = 21 device = "cuda" debug = False # Use AMP with torch native with_amp = True num_classes = 21 batch_size = 9 # total batch size num_workers = 8 # total num workers per node val_img_size = 513 # ############################## # Setup Dataflow # ############################## assert "DATASET_PATH" in os.environ data_path = os.environ["DATASET_PATH"] assert "SBD_DATASET_PATH" in os.environ sbd_data_path = os.environ["SBD_DATASET_PATH"] mean = (0.485, 0.456, 0.406) std = (0.229, 0.224, 0.225) val_transforms = A.Compose( [ A.PadIfNeeded(val_img_size, val_img_size, border_mode=cv2.BORDER_CONSTANT), A.Normalize(mean=mean, std=std), ignore_mask_boundaries, ToTensor(), ] ) data_loader = get_inference_dataloader( root_path=data_path, mode="test", transforms=val_transforms, batch_size=batch_size, num_workers=num_workers, limit_num_samples=batch_size * 5 if debug else None, ) # ############################## # Setup model # ############################## num_classes = 21 model = deeplabv3_resnet101(num_classes=num_classes) def model_output_transform(output): return output["out"] # baseline_dplv3_resnet101_sbd: best_model_78_val_miou_bg=0.6871.pt weights_path = "d8b4687d86cf445a944853fdd6a6b999" # or can specify a path # weights_path = "/path/to/best_model.pt"
import argparse from collections import deque, namedtuple import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.distributions import Categorical from ignite.engine import Engine, Events try: import gymnasium as gym except ImportError: raise ModuleNotFoundError("Please install opengym: pip install gymnasium") SavedAction = namedtuple("SavedAction", ["log_prob", "value"]) eps = np.finfo(np.float32).eps.item() class Policy(nn.Module): """ implements both actor and critic in one model """ def __init__(self): super(Policy, self).__init__() self.affine1 = nn.Linear(4, 128) # actor's layer self.action_head = nn.Linear(128, 2) # critic's layer self.value_head = nn.Linear(128, 1) # action & reward buffer self.saved_actions = [] self.rewards = [] def forward(self, x): """ forward of both actor and critic """ x = F.relu(self.affine1(x)) # actor: choses action to take from state s_t # by returning probability of each action action_prob = F.softmax(self.action_head(x), dim=-1) # critic: evaluates being in the state s_t state_values = self.value_head(x) # return values for both actor and critic as a tuple of 2 values: # 1. a list with the probability of each action over the action space # 2. the value from state s_t return action_prob, state_values def select_action(policy, observation): observation = torch.from_numpy(observation).float() probs, observation_value = policy(observation) # create a categorical distribution over the list of probabilities of actions m = Categorical(probs) # and sample an action using the distribution action = m.sample() # save to action buffer policy.saved_actions.append(SavedAction(m.log_prob(action), observation_value)) # the action to take (left or right) return action.item() def finish_episode(policy, optimizer, gamma): """ Training code. Calculates actor and critic loss and performs backprop. """ R = 0 saved_actions = policy.saved_actions policy_losses = [] # list to save actor (policy) loss value_losses = [] # list to save critic (value) loss returns = deque() # list to save the true values # calculate the true value using rewards returned from the environment for r in policy.rewards[::-1]: # calculate the discounted value R = r + gamma * R returns.appendleft(R) returns = torch.tensor(returns) returns = (returns - returns.mean()) / (returns.std() + eps) for (log_prob, value), R in zip(saved_actions, returns): advantage = R - value.item() # calculate actor (policy) loss policy_losses.append(-log_prob * advantage) # calculate critic (value) loss using L1 smooth loss value_losses.append(F.smooth_l1_loss(value, torch.tensor([R]))) # reset gradients optimizer.zero_grad() # sum up all the values of policy_losses and value_losses loss = torch.stack(policy_losses).sum() + torch.stack(value_losses).sum() # perform backprop loss.backward() optimizer.step() # reset rewards and action buffer del policy.rewards[:] del policy.saved_actions[:] EPISODE_STARTED = Events.EPOCH_STARTED EPISODE_COMPLETED = Events.EPOCH_COMPLETED def main(env, args): policy = Policy() optimizer = optim.Adam(policy.parameters(), lr=3e-2) timesteps = range(10000) def run_single_timestep(engine, timestep): observation = engine.state.observation # select action from policy action = select_action(policy, observation) # take the action engine.state.observation, reward, done, _, _ = env.step(action) if args.render: env.render() policy.rewards.append(reward) engine.state.ep_reward += reward if done: engine.terminate_epoch() engine.state.timestep = timestep trainer = Engine(run_single_timestep) trainer.state.running_reward = 10 @trainer.on(EPISODE_STARTED) def reset_environment_state(): # reset environment and episode reward torch.manual_seed(args.seed + trainer.state.epoch) trainer.state.observation, _ = env.reset(seed=args.seed + trainer.state.epoch) trainer.state.ep_reward = 0 @trainer.on(EPISODE_COMPLETED) def update_model(): # update cumulative reward t = trainer.state.timestep trainer.state.running_reward = 0.05 * trainer.state.ep_reward + (1 - 0.05) * trainer.state.running_reward # perform backprop finish_episode(policy, optimizer, args.gamma) @trainer.on(EPISODE_COMPLETED(every=args.log_interval)) def log_episode(): i_episode = trainer.state.epoch print( f"Episode {i_episode}\tLast reward: {trainer.state.ep_reward:.2f}" f"\tAverage reward: {trainer.state.running_reward:.2f}" ) @trainer.on(EPISODE_COMPLETED) def should_finish_training(): # check if we have "solved" the cart pole problem running_reward = trainer.state.running_reward if running_reward > env.spec.reward_threshold: print( f"Solved! Running reward is now {running_reward} and " f"the last episode runs to {trainer.state.timestep} time steps!" ) trainer.should_terminate = True trainer.run(timesteps, max_epochs=args.max_episodes) if __name__ == "__main__": parser = argparse.ArgumentParser(description="Ignite actor-critic example") parser.add_argument("--gamma", type=float, default=0.99, metavar="G", help="discount factor (default: 0.99)") parser.add_argument("--seed", type=int, default=543, metavar="N", help="random seed (default: 1)") parser.add_argument("--render", action="store_true", help="render the environment") parser.add_argument( "--log-interval", type=int, default=10, metavar="N", help="interval between training status logs (default: 10)" ) parser.add_argument( "--max-episodes", type=int, default=1000000, metavar="N", help="Number of episodes for the training (default: 1000000)", ) args = parser.parse_args() env = gym.make("CartPole-v1") main(env, args)
import argparse from collections import deque import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.distributions import Categorical from ignite.engine import Engine, Events try: import gymnasium as gym except ImportError: raise ModuleNotFoundError("Please install opengym: pip install gymnasium") eps = np.finfo(np.float32).eps.item() class Policy(nn.Module): def __init__(self): super(Policy, self).__init__() self.affine1 = nn.Linear(4, 128) self.dropout = nn.Dropout(p=0.6) self.affine2 = nn.Linear(128, 2) self.saved_log_probs = [] self.rewards = [] def forward(self, x): x = self.affine1(x) x = self.dropout(x) x = F.relu(x) action_scores = self.affine2(x) return F.softmax(action_scores, dim=1) def select_action(policy, observation): state = torch.from_numpy(observation).float().unsqueeze(0) probs = policy(state) m = Categorical(probs) action = m.sample() policy.saved_log_probs.append(m.log_prob(action)) return action.item() def finish_episode(policy, optimizer, gamma): R = 0 policy_loss = [] returns = deque() for r in policy.rewards[::-1]: R = r + gamma * R returns.appendleft(R) returns = torch.tensor(returns) returns = (returns - returns.mean()) / (returns.std() + eps) for log_prob, R in zip(policy.saved_log_probs, returns): policy_loss.append(-log_prob * R) optimizer.zero_grad() policy_loss = torch.cat(policy_loss).sum() policy_loss.backward() optimizer.step() del policy.rewards[:] del policy.saved_log_probs[:] EPISODE_STARTED = Events.EPOCH_STARTED EPISODE_COMPLETED = Events.EPOCH_COMPLETED def main(env, args): policy = Policy() optimizer = optim.Adam(policy.parameters(), lr=1e-2) timesteps = range(10000) def run_single_timestep(engine, timestep): observation = engine.state.observation action = select_action(policy, observation) engine.state.observation, reward, done, _, _ = env.step(action) if args.render: env.render() policy.rewards.append(reward) engine.state.ep_reward += reward if done: engine.terminate_epoch() engine.state.timestep = timestep trainer = Engine(run_single_timestep) trainer.state.running_reward = 10 @trainer.on(EPISODE_STARTED) def reset_environment_state(): torch.manual_seed(args.seed + trainer.state.epoch) trainer.state.observation, _ = env.reset(seed=args.seed + trainer.state.epoch) trainer.state.ep_reward = 0 @trainer.on(EPISODE_COMPLETED) def update_model(): trainer.state.running_reward = 0.05 * trainer.state.ep_reward + (1 - 0.05) * trainer.state.running_reward finish_episode(policy, optimizer, args.gamma) @trainer.on(EPISODE_COMPLETED(every=args.log_interval)) def log_episode(): i_episode = trainer.state.epoch print( f"Episode {i_episode}\tLast reward: {trainer.state.ep_reward:.2f}" f"\tAverage length: {trainer.state.running_reward:.2f}" ) @trainer.on(EPISODE_COMPLETED) def should_finish_training(): running_reward = trainer.state.running_reward if running_reward > env.spec.reward_threshold: print( f"Solved! Running reward is now {running_reward} and " f"the last episode runs to {trainer.state.timestep} time steps!" ) trainer.should_terminate = True trainer.run(timesteps, max_epochs=args.max_episodes) if __name__ == "__main__": parser = argparse.ArgumentParser(description="PyTorch REINFORCE example") parser.add_argument("--gamma", type=float, default=0.99, metavar="G", help="discount factor (default: 0.99)") parser.add_argument("--seed", type=int, default=543, metavar="N", help="random seed (default: 543)") parser.add_argument("--render", action="store_true", help="render the environment") parser.add_argument( "--log-interval", type=int, default=10, metavar="N", help="interval between training status logs (default: 10)" ) parser.add_argument( "--max-episodes", type=int, default=1000000, metavar="N", help="Number of episodes for the training (default: 1000000)", ) args = parser.parse_args() env = gym.make("CartPole-v1") main(env, args)
import torch class TransformerNet(torch.nn.Module): def __init__(self): super(TransformerNet, self).__init__() # Initial convolution layers self.conv1 = ConvLayer(3, 32, kernel_size=9, stride=1) self.in1 = torch.nn.InstanceNorm2d(32, affine=True) self.conv2 = ConvLayer(32, 64, kernel_size=3, stride=2) self.in2 = torch.nn.InstanceNorm2d(64, affine=True) self.conv3 = ConvLayer(64, 128, kernel_size=3, stride=2) self.in3 = torch.nn.InstanceNorm2d(128, affine=True) # Residual layers self.res1 = ResidualBlock(128) self.res2 = ResidualBlock(128) self.res3 = ResidualBlock(128) self.res4 = ResidualBlock(128) self.res5 = ResidualBlock(128) # Upsampling Layers self.deconv1 = UpsampleConvLayer(128, 64, kernel_size=3, stride=1, upsample=2) self.in4 = torch.nn.InstanceNorm2d(64, affine=True) self.deconv2 = UpsampleConvLayer(64, 32, kernel_size=3, stride=1, upsample=2) self.in5 = torch.nn.InstanceNorm2d(32, affine=True) self.deconv3 = ConvLayer(32, 3, kernel_size=9, stride=1) # Non-linearities self.relu = torch.nn.ReLU() def forward(self, X): y = self.relu(self.in1(self.conv1(X))) y = self.relu(self.in2(self.conv2(y))) y = self.relu(self.in3(self.conv3(y))) y = self.res1(y) y = self.res2(y) y = self.res3(y) y = self.res4(y) y = self.res5(y) y = self.relu(self.in4(self.deconv1(y))) y = self.relu(self.in5(self.deconv2(y))) y = self.deconv3(y) return y class ConvLayer(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride): super(ConvLayer, self).__init__() reflection_padding = kernel_size // 2 self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding) self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride) def forward(self, x): out = self.reflection_pad(x) out = self.conv2d(out) return out class ResidualBlock(torch.nn.Module): """ResidualBlock introduced in: https://arxiv.org/abs/1512.03385 recommended architecture: http://torch.ch/blog/2016/02/04/resnets.html """ def __init__(self, channels): super(ResidualBlock, self).__init__() self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1) self.in1 = torch.nn.InstanceNorm2d(channels, affine=True) self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1) self.in2 = torch.nn.InstanceNorm2d(channels, affine=True) self.relu = torch.nn.ReLU() def forward(self, x): residual = x out = self.relu(self.in1(self.conv1(x))) out = self.in2(self.conv2(out)) out = out + residual return out class UpsampleConvLayer(torch.nn.Module): """UpsampleConvLayer Upsamples the input and then does a convolution. This method gives better results compared to ConvTranspose2d. ref: http://distill.pub/2016/deconv-checkerboard/ """ def __init__(self, in_channels, out_channels, kernel_size, stride, upsample=None): super(UpsampleConvLayer, self).__init__() self.upsample = upsample reflection_padding = kernel_size // 2 self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding) self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride) def forward(self, x): x_in = x if self.upsample: x_in = torch.nn.functional.interpolate(x_in, mode="nearest", scale_factor=self.upsample) out = self.reflection_pad(x_in) out = self.conv2d(out) return out
from collections import namedtuple import torch from torchvision import models from torchvision.models.vgg import VGG16_Weights class Vgg16(torch.nn.Module): def __init__(self, requires_grad=False): super(Vgg16, self).__init__() vgg_pretrained_features = models.vgg16(weights=VGG16_Weights.IMAGENET1K_V1).features self.slice1 = torch.nn.Sequential() self.slice2 = torch.nn.Sequential() self.slice3 = torch.nn.Sequential() self.slice4 = torch.nn.Sequential() for x in range(4): self.slice1.add_module(str(x), vgg_pretrained_features[x]) for x in range(4, 9): self.slice2.add_module(str(x), vgg_pretrained_features[x]) for x in range(9, 16): self.slice3.add_module(str(x), vgg_pretrained_features[x]) for x in range(16, 23): self.slice4.add_module(str(x), vgg_pretrained_features[x]) if not requires_grad: for param in self.parameters(): param.requires_grad = False def forward(self, X): h = self.slice1(X) h_relu1_2 = h h = self.slice2(h) h_relu2_2 = h h = self.slice3(h) h_relu3_3 = h h = self.slice4(h) h_relu4_3 = h vgg_outputs = namedtuple("VggOutputs", ["relu1_2", "relu2_2", "relu3_3", "relu4_3"]) out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3) return out
import sys class Progbar(object): def __init__(self, loader, metrics): self.num_iterations = len(loader) self.output_stream = sys.stdout self.metrics = metrics self.alpha = 0.98 def _calc_running_avg(self, engine): for k, v in engine.state.output.items(): old_v = self.metrics.get(k, v) new_v = self.alpha * old_v + (1 - self.alpha) * v self.metrics[k] = new_v def __call__(self, engine): self._calc_running_avg(engine) num_seen = engine.state.iteration - self.num_iterations * (engine.state.epoch - 1) percent_seen = 100 * float(num_seen) / self.num_iterations equal_to = int(percent_seen / 10) done = int(percent_seen) == 100 bar = "[" + "=" * equal_to + ">" * (not done) + " " * (10 - equal_to) + "]" message = f"Epoch {engine.state.epoch} | {percent_seen:.2f}% | {bar}" for key, value in self.metrics.items(): message += f" | {key}: {value:.2e}" message += "\r" self.output_stream.write(message) self.output_stream.flush() if done: self.output_stream.write("\n")
# coding: utf-8 import argparse import random from collections import OrderedDict from pathlib import Path import numpy as np import torch import utils from handlers import Progbar from torch.optim import Adam from torch.utils.data import DataLoader from torchvision import datasets, transforms from transformer_net import TransformerNet from vgg import Vgg16 from ignite.engine import Engine, Events from ignite.handlers import ModelCheckpoint def check_paths(args): try: if args.checkpoint_model_dir is not None and not (Path(args.checkpoint_model_dir).exists()): Path(args.checkpoint_model_dir).mkdir(parents=True) except OSError as e: raise OSError(e) def check_manual_seed(args): seed = args.seed or random.randint(1, 10000) random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) def check_dataset(args): transform = transforms.Compose( [ transforms.Resize(args.image_size), transforms.CenterCrop(args.image_size), transforms.ToTensor(), transforms.Lambda(lambda x: x.mul(255)), ] ) if args.dataset in {"folder", "mscoco"}: train_dataset = datasets.ImageFolder(args.dataroot, transform) elif args.dataset == "test": train_dataset = datasets.FakeData( size=args.batch_size, image_size=(3, 32, 32), num_classes=1, transform=transform ) else: raise RuntimeError(f"Invalid dataset name: {args.dataset}") train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=0) return train_loader def train(args): device = torch.device("cuda" if args.cuda else "cpu") train_loader = check_dataset(args) transformer = TransformerNet().to(device) optimizer = Adam(transformer.parameters(), args.lr) mse_loss = torch.nn.MSELoss() vgg = Vgg16(requires_grad=False).to(device) style_transform = transforms.Compose([transforms.ToTensor(), transforms.Lambda(lambda x: x.mul(255))]) style = utils.load_image(args.style_image, size=args.style_size) style = style_transform(style) style = style.repeat(args.batch_size, 1, 1, 1).to(device) features_style = vgg(utils.normalize_batch(style)) gram_style = [utils.gram_matrix(y) for y in features_style] running_avgs = OrderedDict() def step(engine, batch): x, _ = batch x = x.to(device) n_batch = len(x) optimizer.zero_grad() y = transformer(x) x = utils.normalize_batch(x) y = utils.normalize_batch(y) features_x = vgg(x) features_y = vgg(y) content_loss = args.content_weight * mse_loss(features_y.relu2_2, features_x.relu2_2) style_loss = 0.0 for ft_y, gm_s in zip(features_y, gram_style): gm_y = utils.gram_matrix(ft_y) style_loss += mse_loss(gm_y, gm_s[:n_batch, :, :]) style_loss *= args.style_weight total_loss = content_loss + style_loss total_loss.backward() optimizer.step() return {"content_loss": content_loss.item(), "style_loss": style_loss.item(), "total_loss": total_loss.item()} trainer = Engine(step) checkpoint_handler = ModelCheckpoint( args.checkpoint_model_dir, "checkpoint", n_saved=10, require_empty=False, create_dir=True ) progress_bar = Progbar(loader=train_loader, metrics=running_avgs) trainer.add_event_handler( event_name=Events.EPOCH_COMPLETED(every=args.checkpoint_interval), handler=checkpoint_handler, to_save={"net": transformer}, ) trainer.add_event_handler(event_name=Events.ITERATION_COMPLETED, handler=progress_bar) trainer.run(train_loader, max_epochs=args.epochs) def stylize(args): device = torch.device("cuda" if args.cuda else "cpu") content_transform = transforms.Compose([transforms.ToTensor(), transforms.Lambda(lambda x: x.mul(255))]) content_image = utils.load_image(args.content_image, scale=args.content_scale) content_image = content_transform(content_image) content_image = content_image.unsqueeze(0).to(device) with torch.no_grad(): style_model = torch.load(args.model) style_model.to(device) output = style_model(content_image).cpu() utils.save_image(args.output_image, output[0]) def main(): main_arg_parser = argparse.ArgumentParser(description="parser for fast-neural-style") subparsers = main_arg_parser.add_subparsers(title="subcommands", dest="subcommand") train_arg_parser = subparsers.add_parser("train", help="parser for training arguments") train_arg_parser.add_argument("--epochs", type=int, default=2, help="number of training epochs, default is 2") train_arg_parser.add_argument("--batch_size", type=int, default=8, help="batch size for training, default is 8") train_arg_parser.add_argument( "--dataset", type=str, required=True, choices={"test", "folder", "mscoco"}, help="type of dataset to be used." ) train_arg_parser.add_argument( "--dataroot", type=str, required=True, help="path to training dataset, the path should point to a folder " "containing another folder with all the training images", ) train_arg_parser.add_argument("--style_image", type=str, default="test", help="path to style-image") train_arg_parser.add_argument("--test_image", type=str, default="test", help="path to test-image") train_arg_parser.add_argument( "--checkpoint_model_dir", type=str, default="/tmp/checkpoints", help="path to folder where checkpoints of trained models will be saved", ) train_arg_parser.add_argument( "--checkpoint_interval", type=int, default=1, help="number of batches after which a checkpoint of trained model will be created", ) train_arg_parser.add_argument( "--image_size", type=int, default=256, help="size of training images, default is 256 X 256" ) train_arg_parser.add_argument( "--style_size", type=int, default=None, help="size of style-image, default is the original size of style image" ) train_arg_parser.add_argument("--cuda", type=int, default=1, help="set it to 1 for running on GPU, 0 for CPU") train_arg_parser.add_argument("--seed", type=int, default=42, help="random seed for training") train_arg_parser.add_argument( "--content_weight", type=float, default=1e5, help="weight for content-loss, default is 1e5" ) train_arg_parser.add_argument( "--style_weight", type=float, default=1e10, help="weight for style-loss, default is 1e10" ) train_arg_parser.add_argument("--lr", type=float, default=1e-3, help="learning rate, default is 1e-3") eval_arg_parser = subparsers.add_parser("eval", help="parser for evaluation/stylizing arguments") eval_arg_parser.add_argument( "--content_image", type=str, required=True, help="path to content image you want to stylize" ) eval_arg_parser.add_argument( "--content_scale", type=float, default=None, help="factor for scaling down the content image" ) eval_arg_parser.add_argument("--output_image", type=str, required=True, help="path for saving the output image") eval_arg_parser.add_argument( "--model", type=str, required=True, help="saved model to be used for stylizing the image." ) eval_arg_parser.add_argument("--cuda", type=int, required=True, help="set it to 1 for running on GPU, 0 for CPU") args = main_arg_parser.parse_args() if args.subcommand is None: raise ValueError("ERROR: specify either train or eval") if args.cuda and not torch.cuda.is_available(): raise ValueError("ERROR: cuda is not available, try running on CPU") if args.subcommand == "train": check_manual_seed(args) check_paths(args) train(args) else: stylize(args) if __name__ == "__main__": main()
from PIL import Image def load_image(filename, size=None, scale=None): img = Image.open(filename) if size is not None: img = img.resize((size, size), Image.LANCZOS) elif scale is not None: img = img.resize((int(img.size[0] / scale), int(img.size[1] / scale)), Image.LANCZOS) return img def save_image(filename, data): img = data.clone().clamp(0, 255).numpy() img = img.transpose(1, 2, 0).astype("uint8") img = Image.fromarray(img) img.save(filename) def gram_matrix(y): (b, ch, h, w) = y.size() features = y.view(b, ch, w * h) features_t = features.transpose(1, 2) gram = features.bmm(features_t) / (ch * h * w) return gram def normalize_batch(batch): # normalize using imagenet mean and std mean = batch.new_tensor([0.485, 0.456, 0.406]).view(-1, 1, 1) std = batch.new_tensor([0.229, 0.224, 0.225]).view(-1, 1, 1) batch = batch.div_(255.0) return (batch - mean) / std
import torch.nn as nn from transformers import AutoConfig, AutoModelForSequenceClassification class TransformerModel(nn.Module): def __init__(self, model_name, model_dir, dropout, n_fc, n_classes): super(TransformerModel, self).__init__() self.config = AutoConfig.from_pretrained( model_name, num_labels=n_classes, output_hidden_states=n_fc, classifier_dropout=dropout, output_attentions=True, ) self.transformer = AutoModelForSequenceClassification.from_pretrained( model_name, cache_dir=model_dir, config=self.config ) def forward(self, inputs): output = self.transformer(**inputs)["logits"] return output
import torch class TransformerDataset(torch.utils.data.Dataset): def __init__(self, texts, labels, tokenizer, max_length): self.texts = texts self.labels = labels self.tokenizer = tokenizer self.max_length = max_length def __getitem__(self, idx): text = str(self.texts[idx]) text = " ".join(text.split()) inputs = self.tokenizer( text, None, add_special_tokens=True, max_length=self.max_length, truncation=True, padding="max_length", return_tensors="pt", ) inputs = {k: v.type(torch.long).squeeze(0) for k, v in inputs.items()} labels_pt = torch.tensor(self.labels[idx], dtype=torch.float) return inputs, labels_pt def __len__(self): return len(self.labels)
import torch from dataset import TransformerDataset from datasets import load_dataset from model import TransformerModel from transformers import AutoTokenizer from ignite.handlers import DiskSaver def get_tokenizer(tokenizer_name, tokenizer_dir): tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, cache_dir=tokenizer_dir, do_lower_case=True) return tokenizer def get_model(model_name, model_dir, drop_out, n_fc, num_classes): model = TransformerModel(model_name, model_dir, drop_out, n_fc, num_classes) return model def get_dataset(cache_dir, tokenizer_name, tokenizer_dir, max_length): train_dataset, test_dataset = load_dataset("imdb", split=["train", "test"], cache_dir=cache_dir) tokenizer = get_tokenizer(tokenizer_name, tokenizer_dir) train_texts, train_labels = train_dataset["text"], train_dataset["label"] test_texts, test_labels = test_dataset["text"], test_dataset["label"] train_dataset = TransformerDataset(train_texts, train_labels, tokenizer, max_length) test_dataset = TransformerDataset(test_texts, test_labels, tokenizer, max_length) return train_dataset, test_dataset def thresholded_output_transform(output): y_pred, y = output return torch.round(torch.sigmoid(y_pred)), y def get_save_handler(config): if config["with_clearml"]: from ignite.contrib.handlers.clearml_logger import ClearMLSaver return ClearMLSaver(dirname=config["output_dir"]) return DiskSaver(config["output_dir"], require_empty=False)
import os from datetime import datetime from pathlib import Path import fire import torch import torch.nn as nn import torch.optim as optim import utils from torch.cuda.amp import autocast, GradScaler import ignite import ignite.distributed as idist from ignite.contrib.engines import common from ignite.contrib.handlers import PiecewiseLinear from ignite.engine import Engine, Events from ignite.handlers import Checkpoint, global_step_from_engine from ignite.metrics import Accuracy, Loss from ignite.utils import manual_seed, setup_logger os.environ["TOKENIZERS_PARALLELISM"] = "false" # remove tokenizer paralleism warning def training(local_rank, config): rank = idist.get_rank() manual_seed(config["seed"] + rank) device = idist.device() logger = setup_logger(name="IMDB-Training", distributed_rank=local_rank) log_basic_info(logger, config) output_path = config["output_dir"] if rank == 0: now = datetime.now().strftime("%Y%m%d-%H%M%S") folder_name = f"{config['model']}_backend-{idist.backend()}-{idist.get_world_size()}_{now}" output_path = Path(output_path) / folder_name if not output_path.exists(): output_path.mkdir(parents=True) config["output_dir"] = output_path.as_posix() logger.info(f"Output path: {config['output_dir']}") if "cuda" in device.type: config["cuda device name"] = torch.cuda.get_device_name(local_rank) if config["with_clearml"]: from clearml import Task task = Task.init("IMDB-Training", task_name=output_path.stem) task.connect_configuration(config) # Log hyper parameters hyper_params = [ "model", "dropout", "n_fc", "batch_size", "max_length", "weight_decay", "num_epochs", "learning_rate", "num_warmup_epochs", ] task.connect({k: config[k] for k in hyper_params}) # Setup dataflow, model, optimizer, criterion train_loader, test_loader = get_dataflow(config) config["num_iters_per_epoch"] = len(train_loader) model, optimizer, criterion, lr_scheduler = initialize(config) # Create trainer for current task trainer = create_trainer(model, optimizer, criterion, lr_scheduler, train_loader.sampler, config, logger) # Let's now setup evaluator engine to perform model's validation and compute metrics metrics = { "Accuracy": Accuracy(output_transform=utils.thresholded_output_transform), "Loss": Loss(criterion), } # We define two evaluators as they wont have exactly similar roles: # - `evaluator` will save the best model based on validation score evaluator = create_evaluator(model, metrics, config, tag="val") train_evaluator = create_evaluator(model, metrics, config, tag="train") def run_validation(engine): epoch = trainer.state.epoch state = train_evaluator.run(train_loader) log_metrics(logger, epoch, state.times["COMPLETED"], "Train", state.metrics) state = evaluator.run(test_loader) log_metrics(logger, epoch, state.times["COMPLETED"], "Test", state.metrics) trainer.add_event_handler( Events.EPOCH_COMPLETED(every=config["validate_every"]) | Events.COMPLETED | Events.STARTED, run_validation ) if rank == 0: # Setup TensorBoard logging on trainer and evaluators. Logged values are: # - Training metrics, e.g. running average loss values # - Learning rate # - Evaluation train/test metrics evaluators = {"training": train_evaluator, "test": evaluator} tb_logger = common.setup_tb_logging( output_path, trainer, optimizer, evaluators=evaluators, log_every_iters=config["log_every_iters"] ) # Store 2 best models by validation accuracy starting from num_epochs / 2: best_model_handler = Checkpoint( {"model": model}, utils.get_save_handler(config), filename_prefix="best", n_saved=2, global_step_transform=global_step_from_engine(trainer), score_name="test_accuracy", score_function=Checkpoint.get_default_score_fn("Accuracy"), ) evaluator.add_event_handler( Events.COMPLETED(lambda *_: trainer.state.epoch > config["num_epochs"] // 2), best_model_handler ) try: trainer.run(train_loader, max_epochs=config["num_epochs"]) except Exception as e: logger.exception("") raise e if rank == 0: tb_logger.close() def run( seed=543, data_dir="/tmp/data", output_dir="/tmp/output-imdb/", model="bert-base-uncased", model_dir="/tmp/model", tokenizer_dir="/tmp/tokenizer", num_classes=1, dropout=0.3, n_fc=768, max_length=256, batch_size=32, weight_decay=0.01, num_workers=4, num_epochs=3, learning_rate=5e-5, num_warmup_epochs=0, validate_every=1, checkpoint_every=1000, backend=None, resume_from=None, log_every_iters=15, nproc_per_node=None, with_clearml=False, with_amp=False, **spawn_kwargs, ): """Main entry to fintune a transformer model on the IMDB dataset for sentiment classification. Args: seed (int): random state seed to set. Default, 543. data_dir (str): dataset cache directory. Default, "/tmp/data". output_path (str): output path. Default, "/tmp/output-IMDB". model (str): model name (from transformers) to setup model,tokenize and config to train. Default, "bert-base-uncased". model_dir (str): cache directory to download the pretrained model. Default, "/tmp/model". tokenizer_dir (str) : tokenizer cache directory. Default, "/tmp/tokenizer". num_classes (int) : number of target classes. Default, 1 (binary classification). dropout (float) : dropout probability. Default, 0.3. n_fc (int) : number of neurons in the last fully connected layer. Default, 768. max_length (int) : maximum number of tokens for the inputs to the transformer model. Default,256 batch_size (int): total batch size. Default, 128 . weight_decay (float): weight decay. Default, 0.01 . num_workers (int): number of workers in the data loader. Default, 12. num_epochs (int): number of epochs to train the model. Default, 5. learning_rate (float): peak of piecewise linear learning rate scheduler. Default, 5e-5. num_warmup_epochs (int): number of warm-up epochs before learning rate decay. Default, 3. validate_every (int): run model's validation every ``validate_every`` epochs. Default, 3. checkpoint_every (int): store training checkpoint every ``checkpoint_every`` iterations. Default, 1000. backend (str, optional): backend to use for distributed configuration. Possible values: None, "nccl", "xla-tpu", "gloo" etc. Default, None. nproc_per_node (int, optional): optional argument to setup number of processes per node. It is useful, when main python process is spawning training as child processes. resume_from (str, optional): path to checkpoint to use to resume the training from. Default, None. log_every_iters (int): argument to log batch loss every ``log_every_iters`` iterations. It can be 0 to disable it. Default, 15. with_clearml (bool): if True, experiment ClearML logger is setup. Default, False. with_amp (bool): if True, enables native automatic mixed precision. Default, False. **spawn_kwargs: Other kwargs to spawn run in child processes: master_addr, master_port, node_rank, nnodes """ # check to see if the num_epochs is greater than or equal to num_warmup_epochs if num_warmup_epochs >= num_epochs: raise ValueError( "num_epochs cannot be less than or equal to num_warmup_epochs, please increase num_epochs or decrease " "num_warmup_epochs" ) # catch all local parameters config = locals() config.update(config["spawn_kwargs"]) del config["spawn_kwargs"] spawn_kwargs["nproc_per_node"] = nproc_per_node with idist.Parallel(backend=backend, **spawn_kwargs) as parallel: parallel.run(training, config) def get_dataflow(config): # - Get train/test datasets if idist.get_local_rank() > 0: # Ensure that only local rank 0 download the dataset # Thus each node will download a copy of the dataset idist.barrier() train_dataset, test_dataset = utils.get_dataset( config["data_dir"], config["model"], config["tokenizer_dir"], config["max_length"] ) if idist.get_local_rank() == 0: # Ensure that only local rank 0 download the dataset idist.barrier() # Setup data loader also adapted to distributed config: nccl, gloo, xla-tpu train_loader = idist.auto_dataloader( train_dataset, batch_size=config["batch_size"], num_workers=config["num_workers"], shuffle=True, drop_last=True ) test_loader = idist.auto_dataloader( test_dataset, batch_size=2 * config["batch_size"], num_workers=config["num_workers"], shuffle=False ) return train_loader, test_loader def initialize(config): model = utils.get_model( config["model"], config["model_dir"], config["dropout"], config["n_fc"], config["num_classes"] ) config["learning_rate"] *= idist.get_world_size() # Adapt model for distributed settings if configured model = idist.auto_model(model) optimizer = optim.AdamW(model.parameters(), lr=config["learning_rate"], weight_decay=config["weight_decay"]) optimizer = idist.auto_optim(optimizer) criterion = nn.BCEWithLogitsLoss() le = config["num_iters_per_epoch"] milestones_values = [ (0, 0.0), (le * config["num_warmup_epochs"], config["learning_rate"]), (le * config["num_epochs"], 0.0), ] lr_scheduler = PiecewiseLinear(optimizer, param_name="lr", milestones_values=milestones_values) return model, optimizer, criterion, lr_scheduler def log_metrics(logger, epoch, elapsed, tag, metrics): metrics_output = "\n".join([f"\t{k}: {v}" for k, v in metrics.items()]) logger.info(f"\nEpoch {epoch} - Evaluation time (seconds): {elapsed:.2f} - {tag} metrics:\n {metrics_output}") def log_basic_info(logger, config): logger.info(f"Train {config['model']} on IMDB") logger.info(f"- PyTorch version: {torch.__version__}") logger.info(f"- Ignite version: {ignite.__version__}") if torch.cuda.is_available(): # explicitly import cudnn as # torch.backends.cudnn can not be pickled with hvd spawning procs from torch.backends import cudnn logger.info(f"- GPU Device: {torch.cuda.get_device_name(idist.get_local_rank())}") logger.info(f"- CUDA version: {torch.version.cuda}") logger.info(f"- CUDNN version: {cudnn.version()}") logger.info("\n") logger.info("Configuration:") for key, value in config.items(): logger.info(f"\t{key}: {value}") logger.info("\n") if idist.get_world_size() > 1: logger.info("\nDistributed setting:") logger.info(f"\tbackend: {idist.backend()}") logger.info(f"\tworld size: {idist.get_world_size()}") logger.info("\n") def create_trainer(model, optimizer, criterion, lr_scheduler, train_sampler, config, logger): device = idist.device() # Setup Ignite trainer: # - let's define training step # - add other common handlers: # - TerminateOnNan, # - handler to setup learning rate scheduling, # - ModelCheckpoint # - RunningAverage` on `train_step` output # - Two progress bars on epochs and optionally on iterations with_amp = config["with_amp"] scaler = GradScaler(enabled=with_amp) def train_step(engine, batch): input_batch = batch[0] labels = batch[1].view(-1, 1) if labels.device != device: input_batch = {k: v.to(device, non_blocking=True, dtype=torch.long) for k, v in batch[0].items()} labels = labels.to(device, non_blocking=True, dtype=torch.float) model.train() with autocast(enabled=with_amp): y_pred = model(input_batch) loss = criterion(y_pred, labels) optimizer.zero_grad() scaler.scale(loss).backward() scaler.step(optimizer) scaler.update() return { "batch loss": loss.item(), } trainer = Engine(train_step) trainer.logger = logger to_save = {"trainer": trainer, "model": model, "optimizer": optimizer, "lr_scheduler": lr_scheduler} metric_names = [ "batch loss", ] if config["log_every_iters"] == 0: # Disable logging training metrics: metric_names = None config["log_every_iters"] = 15 common.setup_common_training_handlers( trainer=trainer, train_sampler=train_sampler, to_save=to_save, save_every_iters=config["checkpoint_every"], save_handler=utils.get_save_handler(config), lr_scheduler=lr_scheduler, output_names=metric_names, log_every_iters=config["log_every_iters"], with_pbars=not config["with_clearml"], clear_cuda_cache=False, ) resume_from = config["resume_from"] if resume_from is not None: checkpoint_fp = Path(resume_from) assert checkpoint_fp.exists(), f"Checkpoint '{checkpoint_fp.as_posix()}' is not found" logger.info(f"Resume from a checkpoint: {checkpoint_fp.as_posix()}") checkpoint = torch.load(checkpoint_fp.as_posix(), map_location="cpu") Checkpoint.load_objects(to_load=to_save, checkpoint=checkpoint) return trainer def create_evaluator(model, metrics, config, tag="val"): with_amp = config["with_amp"] device = idist.device() @torch.no_grad() def evaluate_step(engine, batch): model.eval() input_batch = batch[0] labels = batch[1].view(-1, 1) if labels.device != device: input_batch = {k: v.to(device, non_blocking=True, dtype=torch.long) for k, v in batch[0].items()} labels = labels.to(device, non_blocking=True, dtype=torch.float) with autocast(enabled=with_amp): output = model(input_batch) return output, labels evaluator = Engine(evaluate_step) for name, metric in metrics.items(): metric.attach(evaluator, name) if idist.get_rank() == 0 and (not config["with_clearml"]): common.ProgressBar(desc=f"Evaluation ({tag})", persist=False).attach(evaluator) return evaluator if __name__ == "__main__": fire.Fire({"run": run})
import argparse import numpy as np import torch import torch.nn as nn import torch.optim as optim import torchvision from torch.optim.lr_scheduler import StepLR from torch.utils.data import DataLoader, Dataset from torchvision import datasets from ignite.contrib.handlers import ProgressBar from ignite.engine import Engine, Events from ignite.handlers.param_scheduler import LRScheduler from ignite.metrics import Accuracy, RunningAverage from ignite.utils import manual_seed class SiameseNetwork(nn.Module): # update Siamese Network implementation in accordance with the dataset """ Siamese network for image similarity estimation. The network is composed of two identical networks, one for each input. The output of each network is concatenated and passed to a linear layer. The output of the linear layer passed through a sigmoid function. `"FaceNet" <https://arxiv.org/pdf/1503.03832.pdf>`_ is a variant of the Siamese network. This implementation varies from FaceNet as we use the `ResNet-18` model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>` as our feature extractor. In addition we use CIFAR10 dataset along with TripletMarginLoss """ def __init__(self): super(SiameseNetwork, self).__init__() # get resnet model self.resnet = torchvision.models.resnet34(weights=None) fc_in_features = self.resnet.fc.in_features # changing the FC layer of resnet model to a linear layer self.resnet.fc = nn.Identity() # add linear layers to compare between the features of the two images self.fc = nn.Sequential( nn.Linear(fc_in_features, 256), nn.ReLU(inplace=True), nn.Linear(256, 10), nn.ReLU(inplace=True), ) # initialise relu activation self.relu = nn.ReLU() # initialize the weights self.resnet.apply(self.init_weights) self.fc.apply(self.init_weights) def init_weights(self, m): if isinstance(m, nn.Linear): nn.init.xavier_uniform_(m.weight) m.bias.data.fill_(0.01) def forward_once(self, x): output = self.resnet(x) output = output.view(output.size()[0], -1) return output def forward(self, input1, input2, input3): # pass the input through resnet output1 = self.forward_once(input1) output2 = self.forward_once(input2) output3 = self.forward_once(input3) # pass the output of resnet to sigmoid layer output1 = self.fc(output1) output2 = self.fc(output2) output3 = self.fc(output3) return output1, output2, output3 class MatcherDataset(Dataset): # following class implements data downloading and handles preprocessing def __init__(self, root, train, download=False): super(MatcherDataset, self).__init__() # get CIFAR10 dataset self.dataset = datasets.CIFAR10(root, train=train, download=download) # convert data from numpy array to Tensor self.data = torch.from_numpy(self.dataset.data) # shift the dimensions of dataset to match the initial input layer dimensions self.data = torch.movedim(self.data, (0, 1, 2, 3), (0, 2, 3, 1)) # convert targets list to torch Tensor self.dataset.targets = torch.tensor(self.dataset.targets) self.group_examples() def group_examples(self): """ To ease the accessibility of data based on the class, we will use `group_examples` to group examples based on class. The data classes have already been mapped to numeric values and so are the target outputs for each training input Every key in `grouped_examples` corresponds to a class in CIFAR10 dataset. For every key in `grouped_examples`, every value will conform to all of the indices for the CIFAR10 dataset examples that correspond to that key. """ # get the targets from CIFAR10 dataset np_arr = np.array(self.dataset.targets) # group examples based on class self.grouped_examples = {} for i in range(0, 10): self.grouped_examples[i] = np.where((np_arr == i))[0] def __len__(self): return self.data.shape[0] def __getitem__(self, index): """ For every sample in the batch we select 3 images. First one is the anchor image which is the image obtained from the current index. We also obtain the label of anchor image. Now we select two random images, one belonging to the same class as that of the anchor image (named as positive_image) and the other belonging to a different class than that of the anchor image (named as negative_image). We return the anchor image, positive image, negative image and anchor label. """ # obtain the anchor image anchor_image = self.data[index].float() # obtain the class label of the anchor image anchor_label = self.dataset.targets[index] anchor_label = int(anchor_label.item()) # find a label which is different from anchor_label labels = list(range(0, 10)) labels.remove(anchor_label) neg_index = torch.randint(0, 9, (1,)).item() neg_label = labels[neg_index] # get a random index from the range range of indices random_index = torch.randint(0, len(self.grouped_examples[anchor_label]), (1,)).item() # get the index of image in actual data using the anchor label and random index positive_index = self.grouped_examples[anchor_label][random_index] # choosing a random image using positive_index positive_image = self.data[positive_index].float() # get a random index from the range range of indices random_index = torch.randint(0, len(self.grouped_examples[neg_label]), (1,)).item() # get the index of image in actual data using the negative label and random index negative_index = self.grouped_examples[neg_label][random_index] # choosing a random image using negative_index negative_image = self.data[negative_index].float() return anchor_image, positive_image, negative_image, anchor_label def pairwise_distance(input1, input2): dist = input1 - input2 dist = torch.pow(dist, 2) return dist def calculate_loss(input1, input2): output = pairwise_distance(input1, input2) loss = torch.sum(output, 1) loss = torch.sqrt(loss) return loss def run(args, model, device, optimizer, train_loader, test_loader, lr_scheduler): # using Triplet Margin Loss criterion = nn.TripletMarginLoss(p=2, margin=2.8) # define model training step def train_step(engine, batch): model.train() anchor_image, positive_image, negative_image, anchor_label = batch anchor_image = anchor_image.to(device) positive_image, negative_image = positive_image.to(device), negative_image.to(device) anchor_label = anchor_label.to(device) optimizer.zero_grad() anchor_out, positive_out, negative_out = model(anchor_image, positive_image, negative_image) loss = criterion(anchor_out, positive_out, negative_out) loss.backward() optimizer.step() return loss # define model testing step def test_step(engine, batch): model.eval() with torch.no_grad(): anchor_image, _, _, anchor_label = batch anchor_image = anchor_image.to(device) anchor_label = anchor_label.to(device) other_image = [] other_label = [] y_true = [] for i in range(anchor_image.shape[0]): index = torch.randint(0, anchor_image.shape[0], (1,)).item() img = anchor_image[index] label = anchor_label[index] other_image.append(img) other_label.append(label) if anchor_label[i] == other_label[i]: y_true.append(1) else: y_true.append(0) other = torch.stack(other_image) other_label = torch.tensor(other_label) other, other_label = other.to(device), other_label.to(device) anchor_out, other_out, _ = model(anchor_image, other, other) test_loss = calculate_loss(anchor_out, other_out) y_pred = torch.where(test_loss < 3, 1, 0) y_true = torch.tensor(y_true) return [y_pred, y_true] # create engines for trainer and evaluator trainer = Engine(train_step) evaluator = Engine(test_step) # attach Running Average Loss metric to trainer and evaluator engines RunningAverage(output_transform=lambda x: x).attach(trainer, "loss") Accuracy(output_transform=lambda x: x).attach(evaluator, "accuracy") # attach progress bar to trainer with loss pbar1 = ProgressBar() pbar1.attach(trainer, metric_names=["loss"]) # attach progress bar to evaluator pbar2 = ProgressBar() pbar2.attach(evaluator) # attach LR Scheduler to trainer engine trainer.add_event_handler(Events.ITERATION_STARTED, lr_scheduler) # event handler triggers evauator at end of every epoch @trainer.on(Events.EPOCH_COMPLETED(every=args.log_interval)) def test(engine): state = evaluator.run(test_loader) print(f'Test Accuracy: {state.metrics["accuracy"]}') # run the trainer trainer.run(train_loader, max_epochs=args.epochs) def main(): # adds training defaults and support for terminal arguments parser = argparse.ArgumentParser(description="PyTorch Siamese network Example") parser.add_argument( "--batch-size", type=int, default=256, metavar="N", help="input batch size for training (default: 64)" ) parser.add_argument( "--test-batch-size", type=int, default=256, metavar="N", help="input batch size for testing (default: 1000)" ) parser.add_argument("--epochs", type=int, default=10, metavar="N", help="number of epochs to train (default: 14)") parser.add_argument("--lr", type=float, default=1.0, metavar="LR", help="learning rate (default: 1.0)") parser.add_argument( "--gamma", type=float, default=0.95, metavar="M", help="Learning rate step gamma (default: 0.7)" ) parser.add_argument("--no-cuda", action="store_true", default=False, help="disables CUDA training") parser.add_argument("--no-mps", action="store_true", default=False, help="disables macOS GPU training") parser.add_argument("--dry-run", action="store_true", default=False, help="quickly check a single pass") parser.add_argument("--seed", type=int, default=1, metavar="S", help="random seed (default: 1)") parser.add_argument( "--log-interval", type=int, default=1, metavar="N", help="how many batches to wait before logging training status", ) parser.add_argument("--save-model", action="store_true", default=False, help="For Saving the current Model") parser.add_argument("--num-workers", default=4, help="number of processes generating parallel batches") args = parser.parse_args() # set manual seed manual_seed(args.seed) # set device device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") # data loading train_dataset = MatcherDataset("../data", train=True, download=True) test_dataset = MatcherDataset("../data", train=False) train_loader = DataLoader(train_dataset, shuffle=True, batch_size=args.batch_size, num_workers=args.num_workers) test_loader = DataLoader(test_dataset, batch_size=args.test_batch_size, num_workers=args.num_workers) # set model parameters model = SiameseNetwork().to(device) optimizer = optim.Adadelta(model.parameters(), lr=args.lr) scheduler = StepLR(optimizer, step_size=15, gamma=args.gamma) lr_scheduler = LRScheduler(scheduler) # call run function run(args, model, device, optimizer, train_loader, test_loader, lr_scheduler) if __name__ == "__main__": main()
import os from pathlib import Path from torchvision import datasets, models from torchvision.transforms import Compose, Normalize, Pad, RandomCrop, RandomHorizontalFlip, ToTensor train_transform = Compose( [ Pad(4), RandomCrop(32, fill=128), RandomHorizontalFlip(), ToTensor(), Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), ] ) test_transform = Compose([ToTensor(), Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) def get_train_test_datasets(path): path = Path(path) if not path.exists(): path.mkdir(parents=True) download = True else: download = True if len(os.listdir(path)) < 1 else False train_ds = datasets.CIFAR10(root=path, train=True, download=download, transform=train_transform) test_ds = datasets.CIFAR10(root=path, train=False, download=False, transform=test_transform) return train_ds, test_ds def get_model(name): if name in models.__dict__: fn = models.__dict__[name] else: raise RuntimeError(f"Unknown model name {name}") return fn(num_classes=10)
from datetime import datetime from pathlib import Path from typing import Any, Optional import fire import torch import torch.nn as nn import torch.optim as optim import utils from torch.cuda.amp import autocast, GradScaler import ignite import ignite.distributed as idist from ignite.contrib.engines import common from ignite.contrib.handlers import PiecewiseLinear from ignite.engine import Engine, Events from ignite.handlers import Checkpoint, DiskSaver, global_step_from_engine from ignite.metrics import Accuracy, Loss from ignite.utils import manual_seed, setup_logger def training(local_rank, config): rank = idist.get_rank() manual_seed(config["seed"] + rank) device = idist.device() logger = setup_logger(name="CIFAR10-Training") log_basic_info(logger, config) output_path = config["output_path"] if rank == 0: if config["stop_iteration"] is None: now = datetime.now().strftime("%Y%m%d-%H%M%S") else: now = f"stop-on-{config['stop_iteration']}" folder_name = f"{config['model']}_backend-{idist.backend()}-{idist.get_world_size()}_{now}" output_path = Path(output_path) / folder_name if not output_path.exists(): output_path.mkdir(parents=True) config["output_path"] = output_path.as_posix() logger.info(f"Output path: {config['output_path']}") if "cuda" in device.type: config["cuda device name"] = torch.cuda.get_device_name(local_rank) if config["with_clearml"]: from clearml import Task task = Task.init("CIFAR10-Training", task_name=output_path.stem) task.connect_configuration(config) # Log hyper parameters hyper_params = [ "model", "batch_size", "momentum", "weight_decay", "num_epochs", "learning_rate", "num_warmup_epochs", ] task.connect({k: config[k] for k in hyper_params}) # Setup dataflow, model, optimizer, criterion train_loader, test_loader = get_dataflow(config) config["num_iters_per_epoch"] = len(train_loader) model, optimizer, criterion, lr_scheduler = initialize(config) # Create trainer for current task trainer = create_trainer(model, optimizer, criterion, lr_scheduler, train_loader.sampler, config, logger) # Let's now setup evaluator engine to perform model's validation and compute metrics metrics = { "Accuracy": Accuracy(), "Loss": Loss(criterion), } # We define two evaluators as they wont have exactly similar roles: # - `evaluator` will save the best model based on validation score evaluator = create_evaluator(model, metrics=metrics, config=config) train_evaluator = create_evaluator(model, metrics=metrics, config=config) def run_validation(engine): epoch = trainer.state.epoch state = train_evaluator.run(train_loader) log_metrics(logger, epoch, state.times["COMPLETED"], "Train", state.metrics) state = evaluator.run(test_loader) log_metrics(logger, epoch, state.times["COMPLETED"], "Test", state.metrics) trainer.add_event_handler(Events.EPOCH_COMPLETED(every=config["validate_every"]) | Events.COMPLETED, run_validation) if rank == 0: # Setup TensorBoard logging on trainer and evaluators. Logged values are: # - Training metrics, e.g. running average loss values # - Learning rate # - Evaluation train/test metrics evaluators = {"training": train_evaluator, "test": evaluator} tb_logger = common.setup_tb_logging(output_path, trainer, optimizer, evaluators=evaluators) # Store 2 best models by validation accuracy starting from num_epochs / 2: best_model_handler = Checkpoint( {"model": model}, get_save_handler(config), filename_prefix="best", n_saved=2, global_step_transform=global_step_from_engine(trainer), score_name="test_accuracy", score_function=Checkpoint.get_default_score_fn("Accuracy"), ) evaluator.add_event_handler( Events.COMPLETED(lambda *_: trainer.state.epoch > config["num_epochs"] // 2), best_model_handler ) # In order to check training resuming we can stop training on a given iteration if config["stop_iteration"] is not None: @trainer.on(Events.ITERATION_STARTED(once=config["stop_iteration"])) def _(): logger.info(f"Stop training on {trainer.state.iteration} iteration") trainer.terminate() try: trainer.run(train_loader, max_epochs=config["num_epochs"]) except Exception as e: logger.exception("") raise e if rank == 0: tb_logger.close() def run( seed: int = 543, data_path: str = "/tmp/cifar10", output_path: str = "/tmp/output-cifar10/", model: str = "resnet18", batch_size: int = 512, momentum: float = 0.9, weight_decay: float = 1e-4, num_workers: int = 12, num_epochs: int = 24, learning_rate: float = 0.4, num_warmup_epochs: int = 4, validate_every: int = 3, checkpoint_every: int = 1000, backend: Optional[str] = None, resume_from: Optional[str] = None, log_every_iters: int = 15, nproc_per_node: Optional[int] = None, stop_iteration: Optional[int] = None, with_clearml: bool = False, with_amp: bool = False, **spawn_kwargs: Any, ): """Main entry to train an model on CIFAR10 dataset. Args: seed (int): random state seed to set. Default, 543. data_path (str): input dataset path. Default, "/tmp/cifar10". output_path (str): output path. Default, "/tmp/output-cifar10". model (str): model name (from torchvision) to setup model to train. Default, "resnet18". batch_size (int): total batch size. Default, 512. momentum (float): optimizer's momentum. Default, 0.9. weight_decay (float): weight decay. Default, 1e-4. num_workers (int): number of workers in the data loader. Default, 12. num_epochs (int): number of epochs to train the model. Default, 24. learning_rate (float): peak of piecewise linear learning rate scheduler. Default, 0.4. num_warmup_epochs (int): number of warm-up epochs before learning rate decay. Default, 4. validate_every (int): run model's validation every ``validate_every`` epochs. Default, 3. checkpoint_every (int): store training checkpoint every ``checkpoint_every`` iterations. Default, 1000. backend (str, optional): backend to use for distributed configuration. Possible values: None, "nccl", "xla-tpu", "gloo" etc. Default, None. nproc_per_node (int, optional): optional argument to setup number of processes per node. It is useful, when main python process is spawning training as child processes. resume_from (str, optional): path to checkpoint to use to resume the training from. Default, None. log_every_iters (int): argument to log batch loss every ``log_every_iters`` iterations. It can be 0 to disable it. Default, 15. stop_iteration (int, optional): iteration to stop the training. Can be used to check resume from checkpoint. with_clearml (bool): if True, experiment ClearML logger is setup. Default, False. with_amp (bool): if True, enables native automatic mixed precision. Default, False. **spawn_kwargs: Other kwargs to spawn run in child processes: master_addr, master_port, node_rank, nnodes """ # check to see if the num_epochs is greater than or equal to num_warmup_epochs if num_warmup_epochs >= num_epochs: raise ValueError( "num_epochs cannot be less than or equal to num_warmup_epochs, please increase num_epochs or decrease " "num_warmup_epochs" ) # catch all local parameters config = locals() config.update(config["spawn_kwargs"]) del config["spawn_kwargs"] spawn_kwargs["nproc_per_node"] = nproc_per_node if backend == "xla-tpu" and with_amp: raise RuntimeError("The value of with_amp should be False if backend is xla") with idist.Parallel(backend=backend, **spawn_kwargs) as parallel: parallel.run(training, config) def get_dataflow(config): # - Get train/test datasets with idist.one_rank_first(local=True): train_dataset, test_dataset = utils.get_train_test_datasets(config["data_path"]) # Setup data loader also adapted to distributed config: nccl, gloo, xla-tpu train_loader = idist.auto_dataloader( train_dataset, batch_size=config["batch_size"], num_workers=config["num_workers"], shuffle=True, drop_last=True ) test_loader = idist.auto_dataloader( test_dataset, batch_size=2 * config["batch_size"], num_workers=config["num_workers"], shuffle=False ) return train_loader, test_loader def initialize(config): model = utils.get_model(config["model"]) # Adapt model for distributed settings if configured model = idist.auto_model(model) optimizer = optim.SGD( model.parameters(), lr=config["learning_rate"], momentum=config["momentum"], weight_decay=config["weight_decay"], nesterov=True, ) optimizer = idist.auto_optim(optimizer) criterion = nn.CrossEntropyLoss().to(idist.device()) le = config["num_iters_per_epoch"] milestones_values = [ (0, 0.0), (le * config["num_warmup_epochs"], config["learning_rate"]), (le * config["num_epochs"], 0.0), ] lr_scheduler = PiecewiseLinear(optimizer, param_name="lr", milestones_values=milestones_values) return model, optimizer, criterion, lr_scheduler def log_metrics(logger, epoch, elapsed, tag, metrics): metrics_output = "\n".join([f"\t{k}: {v}" for k, v in metrics.items()]) logger.info(f"Epoch[{epoch}] - Evaluation time (seconds): {elapsed:.3f}\n - {tag} metrics:\n {metrics_output}") def log_basic_info(logger, config): logger.info(f"Train {config['model']} on CIFAR10") logger.info(f"- PyTorch version: {torch.__version__}") logger.info(f"- Ignite version: {ignite.__version__}") if torch.cuda.is_available(): # explicitly import cudnn as # torch.backends.cudnn can not be pickled with hvd spawning procs from torch.backends import cudnn logger.info(f"- GPU Device: {torch.cuda.get_device_name(idist.get_local_rank())}") logger.info(f"- CUDA version: {torch.version.cuda}") logger.info(f"- CUDNN version: {cudnn.version()}") logger.info("\n") logger.info("Configuration:") for key, value in config.items(): logger.info(f"\t{key}: {value}") logger.info("\n") if idist.get_world_size() > 1: logger.info("\nDistributed setting:") logger.info(f"\tbackend: {idist.backend()}") logger.info(f"\tworld size: {idist.get_world_size()}") logger.info("\n") def create_trainer(model, optimizer, criterion, lr_scheduler, train_sampler, config, logger): device = idist.device() # Setup Ignite trainer: # - let's define training step # - add other common handlers: # - TerminateOnNan, # - handler to setup learning rate scheduling, # - ModelCheckpoint # - RunningAverage` on `train_step` output # - Two progress bars on epochs and optionally on iterations with_amp = config["with_amp"] scaler = GradScaler(enabled=with_amp) def train_step(engine, batch): x, y = batch[0], batch[1] if x.device != device: x = x.to(device, non_blocking=True) y = y.to(device, non_blocking=True) model.train() with autocast(enabled=with_amp): y_pred = model(x) loss = criterion(y_pred, y) optimizer.zero_grad() scaler.scale(loss).backward() scaler.step(optimizer) scaler.update() return { "batch loss": loss.item(), } trainer = Engine(train_step) trainer.logger = logger to_save = {"trainer": trainer, "model": model, "optimizer": optimizer, "lr_scheduler": lr_scheduler} metric_names = [ "batch loss", ] common.setup_common_training_handlers( trainer=trainer, train_sampler=train_sampler, to_save=to_save, save_every_iters=config["checkpoint_every"], save_handler=get_save_handler(config), lr_scheduler=lr_scheduler, output_names=metric_names if config["log_every_iters"] > 0 else None, with_pbars=False, clear_cuda_cache=False, ) resume_from = config["resume_from"] if resume_from is not None: checkpoint_fp = Path(resume_from) assert checkpoint_fp.exists(), f"Checkpoint '{checkpoint_fp.as_posix()}' is not found" logger.info(f"Resume from a checkpoint: {checkpoint_fp.as_posix()}") checkpoint = torch.load(checkpoint_fp.as_posix(), map_location="cpu") Checkpoint.load_objects(to_load=to_save, checkpoint=checkpoint) return trainer def create_evaluator(model, metrics, config, tag="val"): with_amp = config["with_amp"] device = idist.device() @torch.no_grad() def evaluate_step(engine: Engine, batch): model.eval() x, y = batch[0], batch[1] if x.device != device: x = x.to(device, non_blocking=True) y = y.to(device, non_blocking=True) with autocast(enabled=with_amp): output = model(x) return output, y evaluator = Engine(evaluate_step) for name, metric in metrics.items(): metric.attach(evaluator, name) return evaluator def get_save_handler(config): if config["with_clearml"]: from ignite.contrib.handlers.clearml_logger import ClearMLSaver return ClearMLSaver(dirname=config["output_path"]) return DiskSaver(config["output_path"], require_empty=False) if __name__ == "__main__": fire.Fire({"run": run})
import os from pathlib import Path import torch import torch.nn as nn import torch.optim as optim from torch.optim.lr_scheduler import StepLR from torchvision import datasets, models from torchvision.transforms import Compose, Normalize, Pad, RandomCrop, RandomHorizontalFlip, ToTensor import ignite.distributed as idist from ignite.contrib.engines import common from ignite.contrib.handlers import ProgressBar from ignite.engine import Engine, Events, create_supervised_evaluator from ignite.metrics import Accuracy in_colab = "COLAB_TPU_ADDR" in os.environ with_torchrun = "WORLD_SIZE" in os.environ train_transform = Compose( [ Pad(4), RandomCrop(32, fill=128), RandomHorizontalFlip(), ToTensor(), Normalize((0.485, 0.456, 0.406), (0.229, 0.23, 0.225)), ] ) test_transform = Compose([ToTensor(), Normalize((0.485, 0.456, 0.406), (0.229, 0.23, 0.225)),]) def get_train_test_datasets(path): # - Get train/test datasets if idist.get_rank() > 0: # Ensure that only rank 0 download the dataset idist.barrier() train_ds = datasets.CIFAR10(root=path, train=True, download=True, transform=train_transform) test_ds = datasets.CIFAR10(root=path, train=False, download=False, transform=test_transform) if idist.get_rank() == 0: # Ensure that only rank 0 download the dataset idist.barrier() return train_ds, test_ds def get_model(name): if name in models.__dict__: fn = models.__dict__[name] else: raise RuntimeError(f"Unknown model name {name}") return fn(num_classes=10) def get_dataflow(config): train_dataset, test_dataset = get_train_test_datasets(config.get("data_path", ".")) # Setup data loader also adapted to distributed config: nccl, gloo, xla-tpu train_loader = idist.auto_dataloader( train_dataset, batch_size=config.get("batch_size", 512), num_workers=config.get("num_workers", 8), shuffle=True, drop_last=True, ) config["num_iters_per_epoch"] = len(train_loader) test_loader = idist.auto_dataloader( test_dataset, batch_size=2 * config.get("batch_size", 512), num_workers=config.get("num_workers", 8), shuffle=False, ) return train_loader, test_loader def initialize(config): model = get_model(config["model"]) # Adapt model for distributed settings if configured model = idist.auto_model(model) optimizer = optim.SGD( model.parameters(), lr=config.get("learning_rate", 0.1), momentum=config.get("momentum", 0.9), weight_decay=config.get("weight_decay", 1e-5), nesterov=True, ) optimizer = idist.auto_optim(optimizer) criterion = nn.CrossEntropyLoss().to(idist.device()) le = config["num_iters_per_epoch"] lr_scheduler = StepLR(optimizer, step_size=le, gamma=0.9) return model, optimizer, criterion, lr_scheduler # slide 1 #################################################################### def create_trainer(model, optimizer, criterion, lr_scheduler, config): # Define any training logic for iteration update def train_step(engine, batch): x, y = batch[0].to(idist.device()), batch[1].to(idist.device()) model.train() y_pred = model(x) loss = criterion(y_pred, y) optimizer.zero_grad() loss.backward() optimizer.step() lr_scheduler.step() return loss.item() # Define trainer engine trainer = Engine(train_step) if idist.get_rank() == 0: # Add any custom handlers @trainer.on(Events.ITERATION_COMPLETED(every=200)) def save_checkpoint(): fp = Path(config.get("output_path", "output")) / "checkpoint.pt" torch.save(model.state_dict(), fp) # Add progress bar showing batch loss value ProgressBar().attach(trainer, output_transform=lambda x: {"batch loss": x}) return trainer # slide 2 #################################################################### def training(local_rank, config): # Setup dataflow and train_loader, val_loader = get_dataflow(config) model, optimizer, criterion, lr_scheduler = initialize(config) # Setup model trainer and evaluator trainer = create_trainer(model, optimizer, criterion, lr_scheduler, config) evaluator = create_supervised_evaluator(model, metrics={"accuracy": Accuracy()}, device=idist.device()) # Run model evaluation every 3 epochs and show results @trainer.on(Events.EPOCH_COMPLETED(every=3)) def evaluate_model(): state = evaluator.run(val_loader) if idist.get_rank() == 0: print(state.metrics) # Setup tensorboard experiment tracking if idist.get_rank() == 0: tb_logger = common.setup_tb_logging( config.get("output_path", "output"), trainer, optimizer, evaluators={"validation": evaluator}, ) trainer.run(train_loader, max_epochs=config.get("max_epochs", 3)) if idist.get_rank() == 0: tb_logger.close() # slide 3 #################################################################### # Simply run everything on your infrastructure # --- Single computation device --- # $ python main.py # if __name__ == "__main__" and not (in_colab or with_torchrun): backend = None nproc_per_node = None config = { "model": "resnet18", "dataset": "cifar10", } with idist.Parallel(backend=backend, nproc_per_node=nproc_per_node) as parallel: parallel.run(training, config) # --- Multiple GPUs --- # $ torchrun --nproc_per_node=2 main.py # if __name__ == "__main__" and with_torchrun: backend = "nccl" # or "nccl", "gloo", ... nproc_per_node = None config = { "model": "resnet18", "dataset": "cifar10", } with idist.Parallel(backend=backend, nproc_per_node=nproc_per_node) as parallel: parallel.run(training, config) # --- Multiple TPUs --- # In Colab # if in_colab: backend = "xla-tpu" nproc_per_node = 8 config = { "model": "resnet18", "dataset": "cifar10", } with idist.Parallel(backend=backend, nproc_per_node=nproc_per_node) as parallel: parallel.run(training, config) # Full featured CIFAR10 example: # https://github.com/pytorch/ignite/tree/master/examples/cifar10
import torch import torchvision from torch.utils.mobile_optimizer import optimize_for_mobile model = torchvision.models.mobilenet_v2(pretrained=True) model.eval() example = torch.rand(1, 3, 224, 224) traced_script_module = torch.jit.trace(model, example) torchscript_model_optimized = optimize_for_mobile(traced_script_module) torchscript_model_optimized._save_for_lite_interpreter("HelloWorld/HelloWorld/model/model.pt")
from typing import Dict, List, Optional, Tuple import json import math from fairseq.data import Dictionary import torch import torchaudio from torchaudio.pipelines import EMFORMER_RNNT_BASE_LIBRISPEECH from torchaudio.models import Hypothesis def get_hypo_tokens(hypo: Hypothesis) -> List[int]: return hypo[0] def get_hypo_score(hypo: Hypothesis) -> float: return hypo[3] def to_string(input: List[int], tgt_dict: List[str], bos_idx: int = 0, eos_idx: int = 2, separator: str = "",) -> str: # torchscript dislikes sets extra_symbols_to_ignore: Dict[int, int] = {} extra_symbols_to_ignore[eos_idx] = 1 extra_symbols_to_ignore[bos_idx] = 1 # it also dislikes comprehensions with conditionals filtered_idx: List[int] = [] for idx in input: if idx not in extra_symbols_to_ignore: filtered_idx.append(idx) return separator.join([tgt_dict[idx] for idx in filtered_idx]).replace("\u2581", " ") def post_process_hypos( hypos: List[Hypothesis], tgt_dict: List[str], ) -> List[Tuple[str, List[float], List[int]]]: post_process_remove_list = [ 3, # unk 2, # eos 1, # pad ] hypos_str: List[str] = [] for h in hypos: filtered_tokens: List[int] = [] for token_index in get_hypo_tokens(h)[1:]: if token_index not in post_process_remove_list: filtered_tokens.append(token_index) string = to_string(filtered_tokens, tgt_dict) hypos_str.append(string) hypos_ids = [get_hypo_tokens(h)[1:] for h in hypos] hypos_score = [[math.exp(get_hypo_score(h))] for h in hypos] nbest_batch = list(zip(hypos_str, hypos_score, hypos_ids)) return nbest_batch def _piecewise_linear_log(x): x[x > math.e] = torch.log(x[x > math.e]) x[x <= math.e] = x[x <= math.e] / math.e return x class ModelWrapper(torch.nn.Module): def __init__(self, tgt_dict: List[str]): super().__init__() self.transform = torchaudio.transforms.MelSpectrogram(sample_rate=16000, n_fft=400, n_mels=80, hop_length=160) self.decoder = EMFORMER_RNNT_BASE_LIBRISPEECH.get_decoder() self.tgt_dict = tgt_dict with open("global_stats.json") as f: blob = json.loads(f.read()) self.mean = torch.tensor(blob["mean"]) self.invstddev = torch.tensor(blob["invstddev"]) self.decibel = 2 * 20 * math.log10(32767) self.gain = pow(10, 0.05 * self.decibel) def forward( self, input: torch.Tensor, prev_hypo: Optional[Hypothesis], prev_state: Optional[List[List[torch.Tensor]]] ) -> Tuple[str, Hypothesis, Optional[List[List[torch.Tensor]]]]: spectrogram = self.transform(input).transpose(1, 0) features = _piecewise_linear_log(spectrogram * self.gain).unsqueeze(0)[:, :-1] features = (features - self.mean) * self.invstddev length = torch.tensor([features.shape[1]]) hypotheses, state = self.decoder.infer(features, length, 10, state=prev_state, hypothesis=prev_hypo) transcript = post_process_hypos(hypotheses[:1], self.tgt_dict)[0][0] return transcript, hypotheses[0], state tgt_dict = Dictionary.load("spm_bpe_4096_fairseq.dict") wrapper = ModelWrapper(tgt_dict.symbols) wrapper = torch.jit.script(wrapper) wrapper.save("scripted_wrapper_tuple.pt")
import torch import torchaudio from torch.utils.mobile_optimizer import optimize_for_mobile def get_demo_wrapper(): wrapper = torch.jit.load("scripted_wrapper_tuple.pt") return wrapper wrapper = get_demo_wrapper() scripted_model = torch.jit.script(wrapper) optimized_model = optimize_for_mobile(scripted_model) optimized_model._save_for_lite_interpreter("streaming_asrv2.ptl") print("Done _save_for_lite_interpreter")
import pyaudio import queue import numpy as np import torch import torchaudio def get_demo_wrapper(): wrapper = torch.jit.load("scripted_wrapper_tuple.pt") return wrapper wrapper = get_demo_wrapper() ################################################################ data_queue = queue.Queue() def callback(in_data, frame_count, time_info, status): global data_queue data_queue.put(in_data) return in_data, pyaudio.paContinue state = None hypo = None def transcribe(np_array, should_print=True): global state, hypo tensor = torch.tensor(np_array) transcript, hypo, state = wrapper(tensor, hypo, state) if should_print and transcript: print(transcript, end="", flush=True) previous_right_context = None def process(should_print=True): global previous_right_context if previous_right_context is None: previous_right_context = [ np.frombuffer(data_queue.get(), dtype=np.float32) for _ in range(1) ] # Get 4 segments. segments = [ np.frombuffer(data_queue.get(), dtype=np.float32) for _ in range(4) ] current_input = previous_right_context + segments with torch.no_grad(): transcribe(np.concatenate(current_input), should_print=should_print) # Save right context. previous_right_context = current_input[-1:] # Emformer is configured with input segment size of 4 and right context size of 1. # Pre- time reduction with factor 4, then, we have an input segment size of 16 and # right context size of 4 going into RNN-T. # With a hop length of 160 samples, we then have 16 * 160 = 2560 samples in the input segment # and 4 * 160 = 640 samples in the right context. # Then, since the lowest common factor between 640 and 3600 is 640, we'll # read from the stream in 640-sample increments. p = pyaudio.PyAudio() CHANNELS = 1 RATE = 16000 stream = p.open( format=pyaudio.paFloat32, channels=CHANNELS, rate=RATE, input=True, output=False, frames_per_buffer=640, stream_callback=callback, ) stream.start_stream() # We need to initialize the model by evaluating # a few samples. # If we skip this, evaluation latency will become # prohibitively large. print("Initializing model...") for _ in range(10): process(should_print=False) print("Initialization complete.") data_queue = queue.Queue() previous_right_context = None state = None prev_hypo = None while stream.is_active(): process(should_print=True) stream.stop_stream() stream.close()
import torch import torchvision from torch.backends._coreml.preprocess import ( CompileSpec, TensorSpec, CoreMLComputeUnit, ) def mobilenetv2_spec(): return { "forward": CompileSpec( inputs=( TensorSpec( shape=[1, 3, 224, 224], ), ), outputs=( TensorSpec( shape=[1, 1000], ), ), backend=CoreMLComputeUnit.ALL, allow_low_precision=True, ), } def main(): model = torchvision.models.mobilenet_v2(pretrained=True) model.eval() example = torch.rand(1, 3, 224, 224) model = torch.jit.trace(model, example) compile_spec = mobilenetv2_spec() mlmodel = torch._C._jit_to_backend("coreml", model, compile_spec) mlmodel._save_for_lite_interpreter("./mobilenetv2_coreml.ptl") if __name__ == "__main__": main()
import torch from torch.utils.mobile_optimizer import optimize_for_mobile model = torch.hub.load('pytorch/vision:v0.11.0', 'deeplabv3_resnet50', pretrained=True) model.eval() scripted_module = torch.jit.script(model) optimized_model = optimize_for_mobile(scripted_module) optimized_model.save("ImageSegmentation/deeplabv3_scripted.pt") optimized_model._save_for_lite_interpreter("ImageSegmentation/deeplabv3_scripted.ptl")
import torch from torch import Tensor from torch.utils.mobile_optimizer import optimize_for_mobile import torchaudio from torchaudio.models.wav2vec2.utils.import_huggingface import import_huggingface_model from transformers import Wav2Vec2ForCTC # Wav2vec2 model emits sequences of probability (logits) distributions over the characters # The following class adds steps to decode the transcript (best path) class SpeechRecognizer(torch.nn.Module): def __init__(self, model): super().__init__() self.model = model self.labels = [ "<s>", "<pad>", "</s>", "<unk>", "|", "E", "T", "A", "O", "N", "I", "H", "S", "R", "D", "L", "U", "M", "W", "C", "F", "G", "Y", "P", "B", "V", "K", "'", "X", "J", "Q", "Z"] def forward(self, waveforms: Tensor) -> str: """Given a single channel speech data, return transcription. Args: waveforms (Tensor): Speech tensor. Shape `[1, num_frames]`. Returns: str: The resulting transcript """ logits, _ = self.model(waveforms) # [batch, num_seq, num_label] best_path = torch.argmax(logits[0], dim=-1) # [num_seq,] prev = '' hypothesis = '' for i in best_path: char = self.labels[i] if char == prev: continue if char == '<s>': prev = '' continue hypothesis += char prev = char return hypothesis.replace('|', ' ') # Load Wav2Vec2 pretrained model from Hugging Face Hub model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h") # Convert the model to torchaudio format, which supports TorchScript. model = import_huggingface_model(model) # Remove weight normalization which is not supported by quantization. model.encoder.transformer.pos_conv_embed.__prepare_scriptable__() model = model.eval() # Attach decoder model = SpeechRecognizer(model) # Apply quantization / script / optimize for motbile quantized_model = torch.quantization.quantize_dynamic( model, qconfig_spec={torch.nn.Linear}, dtype=torch.qint8) scripted_model = torch.jit.script(quantized_model) optimized_model = optimize_for_mobile(scripted_model) # Sanity check waveform , _ = torchaudio.load('scent_of_a_woman_future.wav') print(waveform.size()) print('Result:', optimized_model(waveform)) optimized_model._save_for_lite_interpreter("SpeechRecognition/wav2vec2.ptl")
import torch from pytorchvideo.accelerator.deployment.mobile_cpu.utils.model_conversion import ( convert_to_deployable_form, ) from pytorchvideo.models.accelerator.mobile_cpu.efficient_x3d import EfficientX3d from torch.hub import load_state_dict_from_url from torch.utils.mobile_optimizer import ( optimize_for_mobile, ) model_efficient_x3d_xs = EfficientX3d(expansion='XS', head_act='identity') checkpoint_path = 'https://dl.fbaipublicfiles.com/pytorchvideo/model_zoo/kinetics/efficient_x3d_xs_original_form.pyth' checkpoint = load_state_dict_from_url(checkpoint_path) model_efficient_x3d_xs.load_state_dict(checkpoint) input_blob_size = (1, 3, 4, 160, 160) input_tensor = torch.randn(input_blob_size) model_efficient_x3d_xs_deploy = convert_to_deployable_form(model_efficient_x3d_xs, input_tensor) traced_model = torch.jit.trace(model_efficient_x3d_xs_deploy, input_tensor, strict=False) optimized_traced__model = optimize_for_mobile(traced_model) optimized_traced__model._save_for_lite_interpreter("TorchVideo/video_classification.ptl")
import torch from transformers import DistilBertTokenizer, DistilBertForQuestionAnswering from torch.utils.mobile_optimizer import optimize_for_mobile tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased-distilled-squad') model = DistilBertForQuestionAnswering.from_pretrained('distilbert-base-uncased-distilled-squad') model.eval() question, text = "When will support for GPU be available?!", "There is a growing need to execute ML models on edge devices to reduce latency, preserve privacy and enable new interactive use cases. In the past, engineers used to train models separately. They would then go through a multi-step, error prone and often complex process to transform the models for execution on a mobile device. The mobile runtime was often significantly different from the operations available during training leading to inconsistent developer and eventually user experience. PyTorch Mobile removes these friction surfaces by allowing a seamless process to go from training to deployment by staying entirely within the PyTorch ecosystem. It provides an end-to-end workflow that simplifies the research to production environment for mobile devices. In addition, it paves the way for privacy-preserving features via Federated Learning techniques. PyTorch Mobile is in beta stage right now and in wide scale production use. It will soon be available as a stable release once the APIs are locked down. Key features of PyTorch Mobile: Available for iOS, Android and Linux; Provides APIs that cover common preprocessing and integration tasks needed for incorporating ML in mobile applications; Support for tracing and scripting via TorchScript IR; Support for XNNPACK floating point kernel libraries for Arm CPUs; Integration of QNNPACK for 8-bit quantized kernels. Includes support for per-channel quantization, dynamic quantization and more; Build level optimization and selective compilation depending on the operators needed for user applications, i.e., the final binary size of the app is determined by the actual operators the app needs; Support for hardware backends like GPU, DSP, NPU will be available soon." inputs = tokenizer(question, text, return_tensors='pt') # inputs['input_ids'].size() is 360, the maximum size of the input tokens generated from the user question and text # on mobile apps, if the size of the input tokens of the text and question is less than 360, padding will be needed to make the model work correctly. model_dynamic_quantized = torch.quantization.quantize_dynamic(model, qconfig_spec={torch.nn.Linear}, dtype=torch.qint8) traced_model = torch.jit.trace(model_dynamic_quantized, inputs['input_ids'], strict=False) optimized_traced_model = optimize_for_mobile(traced_model) optimized_traced_model._save_for_lite_interpreter("QuestionAnswering/qa360_quantized.ptl") # 360 is the length of model input, i.e. the length of the tokenized ids of question+text
# based on https://pytorch.org/tutorials/intermediate/seq2seq_translation_tutorial.html from __future__ import unicode_literals, print_function, division from io import open import unicodedata import string import re import random import torch import torch.nn as nn from torch import optim import torch.nn.functional as F device = torch.device("cuda" if torch.cuda.is_available() else "cpu") SOS_token = 0 EOS_token = 1 class Lang: def __init__(self, name): self.name = name self.word2index = {} self.word2count = {} self.index2word = {0: "SOS", 1: "EOS"} self.n_words = 2 # Count SOS and EOS def addSentence(self, sentence): for word in sentence.split(' '): self.addWord(word) def addWord(self, word): if word not in self.word2index: self.word2index[word] = self.n_words self.word2count[word] = 1 self.index2word[self.n_words] = word self.n_words += 1 else: self.word2count[word] += 1 # Turn a Unicode string to plain ASCII, thanks to # https://stackoverflow.com/a/518232/2809427 def unicodeToAscii(s): return ''.join( c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn' ) def normalizeString(s): s = unicodeToAscii(s.lower().strip()) s = re.sub(r"([.!?])", r" \1", s) s = re.sub(r"[^a-zA-Z.!?]+", r" ", s) return s def readLangs(lang1, lang2, reverse=False): print("Reading lines...") # Read the file and split into lines lines = open('data/%s-%s.txt' % (lang1, lang2), encoding='utf-8').\ read().strip().split('\n') # Split every line into pairs and normalize pairs = [[normalizeString(s) for s in l.split('\t')] for l in lines] # Reverse pairs, make Lang instances if reverse: pairs = [list(reversed(p)) for p in pairs] input_lang = Lang(lang2) output_lang = Lang(lang1) else: input_lang = Lang(lang1) output_lang = Lang(lang2) return input_lang, output_lang, pairs MAX_LENGTH = 50 def filterPair(p): return len(p[0].split(' ')) < MAX_LENGTH and \ len(p[1].split(' ')) < MAX_LENGTH def filterPairs(pairs): return [pair for pair in pairs if filterPair(pair)] def prepareData(lang1, lang2, reverse=False): input_lang, output_lang, pairs = readLangs(lang1, lang2, reverse) print("Read %s sentence pairs" % len(pairs)) pairs = filterPairs(pairs) print("Trimmed to %s sentence pairs" % len(pairs)) print("Counting words...") for pair in pairs: input_lang.addSentence(pair[0]) output_lang.addSentence(pair[1]) print("Counted words:") print(input_lang.name, input_lang.n_words) print(output_lang.name, output_lang.n_words) return input_lang, output_lang, pairs input_lang, output_lang, pairs = prepareData('eng', 'fra', True) print(random.choice(pairs)) class EncoderRNN(nn.Module): def __init__(self, input_size, hidden_size): super(EncoderRNN, self).__init__() self.hidden_size = hidden_size self.embedding = nn.Embedding(input_size, hidden_size) self.gru = nn.GRU(hidden_size, hidden_size) def forward(self, input, hidden): embedded = self.embedding(input).view(1, 1, -1) output = embedded output, hidden = self.gru(output, hidden) return output, hidden def initHidden(self): return torch.zeros(1, 1, self.hidden_size, device=device) class AttnDecoderRNN(nn.Module): def __init__(self, hidden_size, output_size, dropout_p=0.1, max_length=MAX_LENGTH): super(AttnDecoderRNN, self).__init__() self.hidden_size = hidden_size self.output_size = output_size self.dropout_p = dropout_p self.max_length = max_length self.embedding = nn.Embedding(self.output_size, self.hidden_size) self.attn = nn.Linear(self.hidden_size * 2, self.max_length) self.attn_combine = nn.Linear(self.hidden_size * 2, self.hidden_size) self.dropout = nn.Dropout(self.dropout_p) self.gru = nn.GRU(self.hidden_size, self.hidden_size) self.out = nn.Linear(self.hidden_size, self.output_size) def forward(self, input, hidden, encoder_outputs): embedded = self.embedding(input).view(1, 1, -1) embedded = self.dropout(embedded) attn_weights = F.softmax( self.attn(torch.cat((embedded[0], hidden[0]), 1)), dim=1) attn_applied = torch.bmm(attn_weights.unsqueeze(0), encoder_outputs.unsqueeze(0)) output = torch.cat((embedded[0], attn_applied[0]), 1) output = self.attn_combine(output).unsqueeze(0) output = F.relu(output) output, hidden = self.gru(output, hidden) output = F.log_softmax(self.out(output[0]), dim=1) return output, hidden, attn_weights def initHidden(self): return torch.zeros(1, 1, self.hidden_size, device=device) def indexesFromSentence(lang, sentence): return [lang.word2index[word] for word in sentence.split(' ')] def tensorFromSentence(lang, sentence): indexes = indexesFromSentence(lang, sentence) indexes.append(EOS_token) return torch.tensor(indexes, dtype=torch.long, device=device).view(-1, 1) def tensorsFromPair(pair): input_tensor = tensorFromSentence(input_lang, pair[0]) target_tensor = tensorFromSentence(output_lang, pair[1]) return (input_tensor, target_tensor) teacher_forcing_ratio = 0.5 def train(input_tensor, target_tensor, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion, max_length=MAX_LENGTH): encoder_hidden = encoder.initHidden() encoder_optimizer.zero_grad() decoder_optimizer.zero_grad() input_length = input_tensor.size(0) target_length = target_tensor.size(0) encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device) loss = 0 for ei in range(input_length): encoder_output, encoder_hidden = encoder( input_tensor[ei], encoder_hidden) encoder_outputs[ei] = encoder_output[0, 0] decoder_input = torch.tensor([[SOS_token]], device=device) decoder_hidden = encoder_hidden use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False if use_teacher_forcing: # Teacher forcing: Feed the target as the next input for di in range(target_length): decoder_output, decoder_hidden, decoder_attention = decoder( decoder_input, decoder_hidden, encoder_outputs) loss += criterion(decoder_output, target_tensor[di]) decoder_input = target_tensor[di] # Teacher forcing else: # Without teacher forcing: use its own predictions as the next input for di in range(target_length): decoder_output, decoder_hidden, decoder_attention = decoder( decoder_input, decoder_hidden, encoder_outputs) topv, topi = decoder_output.topk(1) decoder_input = topi.squeeze().detach() # detach from history as input loss += criterion(decoder_output, target_tensor[di]) if decoder_input.item() == EOS_token: break loss.backward() encoder_optimizer.step() decoder_optimizer.step() return loss.item() / target_length import time import math def asMinutes(s): m = math.floor(s / 60) s -= m * 60 return '%dm %ds' % (m, s) def timeSince(since, percent): now = time.time() s = now - since es = s / (percent) rs = es - s return '%s (- %s)' % (asMinutes(s), asMinutes(rs)) def trainIters(encoder, decoder, n_iters, print_every=1000, plot_every=100, learning_rate=0.01): start = time.time() plot_losses = [] print_loss_total = 0 # Reset every print_every plot_loss_total = 0 # Reset every plot_every encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate) decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate) training_pairs = [tensorsFromPair(random.choice(pairs)) for i in range(n_iters)] criterion = nn.NLLLoss() for iter in range(1, n_iters + 1): training_pair = training_pairs[iter - 1] input_tensor = training_pair[0] target_tensor = training_pair[1] loss = train(input_tensor, target_tensor, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion) print_loss_total += loss plot_loss_total += loss if iter % print_every == 0: print_loss_avg = print_loss_total / print_every print_loss_total = 0 print('%s (%d %d%%) %.4f' % (timeSince(start, iter / n_iters), iter, iter / n_iters * 100, print_loss_avg)) if iter % 150000 == 0: torch.save({ 'encoder_state_dict': encoder.state_dict(), 'decoder_state_dict': decoder.state_dict(), 'encoder_optimizer_state_dict': encoder_optimizer.state_dict(), 'decoder_optimizer_state_dict': decoder_optimizer.state_dict(), }, "seq2seq_mt_{}.pt".format(iter)) hidden_size = 256 encoder = EncoderRNN(input_lang.n_words, hidden_size).to(device) decoder = AttnDecoderRNN(hidden_size, output_lang.n_words, dropout_p=0.1).to(device) #trainIters(encoder, decoder, 450100, print_every=5000) encoder = EncoderRNN(input_lang.n_words, hidden_size) decoder = AttnDecoderRNN(hidden_size, output_lang.n_words) encoder_optimizer = optim.SGD(encoder.parameters(), lr=0.01) decoder_optimizer = optim.SGD(decoder.parameters(), lr=0.01) checkpoint = torch.load("seq2seq_mt_150000.pt", map_location=torch.device('cpu')) encoder.load_state_dict(checkpoint['encoder_state_dict']) decoder.load_state_dict(checkpoint['decoder_state_dict']) encoder_optimizer.load_state_dict(checkpoint['encoder_optimizer_state_dict']) decoder_optimizer.load_state_dict(checkpoint['decoder_optimizer_state_dict']) encoder.eval() decoder.eval() encoder_input=torch.tensor([429]) encoder_hidden=torch.zeros(1,1,256) decoder_input1=torch.tensor([[0]]) decoder_input2=torch.zeros(1,1,256) decoder_input3=torch.zeros(50,256) # dynamic quantization can be applied to the decoder for its nn.Linear parameters quantized_decoder = torch.quantization.quantize_dynamic(decoder, qconfig_spec={torch.nn.Linear}, dtype=torch.qint8) traced_encoder = torch.jit.trace(encoder, (encoder_input, encoder_hidden)) traced_decoder = torch.jit.trace(quantized_decoder, (decoder_input1, decoder_input2, decoder_input3)) from torch.utils.mobile_optimizer import optimize_for_mobile traced_encoder_optimized = optimize_for_mobile(traced_encoder) traced_encoder_optimized._save_for_lite_interpreter("optimized_encoder_150k.ptl") traced_decoder_optimized = optimize_for_mobile(traced_decoder) traced_decoder_optimized._save_for_lite_interpreter("optimized_decoder_150k.ptl")
import torch from torch.utils.mobile_optimizer import optimize_for_mobile model = torch.hub.load('facebookresearch/deit:main', 'deit_base_patch16_224', pretrained=True) quantized_model = torch.quantization.quantize_dynamic(model, qconfig_spec={torch.nn.Linear}, dtype=torch.qint8) ts_model = torch.jit.script(quantized_model) optimized_torchscript_model = optimize_for_mobile(ts_model) optimized_torchscript_model.save("fbdeit.pt") optimized_torchscript_model._save_for_lite_interpreter("fbdeit.ptl")
import torch import torch.nn.functional as F from torch import nn from einops import rearrange class Residual(nn.Module): def __init__(self, fn): super().__init__() self.fn = fn def forward(self, x, **kwargs): return self.fn(x, **kwargs) + x class PreNorm(nn.Module): def __init__(self, dim, fn): super().__init__() self.norm = nn.LayerNorm(dim) self.fn = fn def forward(self, x, **kwargs): return self.fn(self.norm(x), **kwargs) class FeedForward(nn.Module): def __init__(self, dim, hidden_dim): super().__init__() self.net = nn.Sequential( nn.Linear(dim, hidden_dim), nn.GELU(), nn.Linear(hidden_dim, dim) ) def forward(self, x): return self.net(x) class Attention(nn.Module): def __init__(self, dim, heads=8): super().__init__() self.heads = heads self.scale = dim ** -0.5 self.to_qkv = nn.Linear(dim, dim * 3, bias=False) self.to_out = nn.Linear(dim, dim) def forward(self, x, mask = None): b, n, _, h = *x.shape, self.heads qkv = self.to_qkv(x) q, k, v = rearrange(qkv, 'b n (qkv h d) -> qkv b h n d', qkv=3, h=h) dots = torch.einsum('bhid,bhjd->bhij', q, k) * self.scale if mask is not None: mask = F.pad(mask.flatten(1), (1, 0), value = True) assert mask.shape[-1] == dots.shape[-1], 'mask has incorrect dimensions' mask = mask[:, None, :] * mask[:, :, None] dots.masked_fill_(~mask, float('-inf')) del mask attn = dots.softmax(dim=-1) out = torch.einsum('bhij,bhjd->bhid', attn, v) out = rearrange(out, 'b h n d -> b n (h d)') out = self.to_out(out) return out class Transformer(nn.Module): def __init__(self, dim, depth, heads, mlp_dim): super().__init__() self.layers = nn.ModuleList([]) for _ in range(depth): self.layers.append(nn.ModuleList([ Residual(PreNorm(dim, Attention(dim, heads = heads))), Residual(PreNorm(dim, FeedForward(dim, mlp_dim))) ])) def forward(self, x, mask=None): for attn, ff in self.layers: x = attn(x, mask=mask) x = ff(x) return x class ViT(nn.Module): def __init__(self, *, image_size, patch_size, num_classes, dim, depth, heads, mlp_dim, channels=3): super().__init__() assert image_size % patch_size == 0, 'image dimensions must be divisible by the patch size' num_patches = (image_size // patch_size) ** 2 patch_dim = channels * patch_size ** 2 self.patch_size = patch_size self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim)) self.patch_to_embedding = nn.Linear(patch_dim, dim) self.cls_token = nn.Parameter(torch.randn(1, 1, dim)) self.transformer = Transformer(dim, depth, heads, mlp_dim) self.to_cls_token = nn.Identity() self.mlp_head = nn.Sequential( nn.Linear(dim, mlp_dim), nn.GELU(), nn.Linear(mlp_dim, num_classes) ) def forward(self, img, mask=None): p = self.patch_size x = rearrange(img, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = p, p2 = p) x = self.patch_to_embedding(x) cls_tokens = self.cls_token.expand(img.shape[0], -1, -1) x = torch.cat((cls_tokens, x), dim=1) x += self.pos_embedding x = self.transformer(x, mask) x = self.to_cls_token(x[:, 0]) return self.mlp_head(x)
import torch import torchvision import time from vit_pytorch import * from torch.utils.mobile_optimizer import optimize_for_mobile torch.manual_seed(42) DOWNLOAD_PATH = 'data/mnist' BATCH_SIZE_TRAIN = 100 BATCH_SIZE_TEST = 1000 # 0.1307 and 0.3081 are the mean and std computed on the MNIST training set transform_mnist = torchvision.transforms.Compose([torchvision.transforms.ToTensor(), torchvision.transforms.Normalize((0.1307,), (0.3081,))]) train_set = torchvision.datasets.MNIST(DOWNLOAD_PATH, train=True, download=True, transform=transform_mnist) train_loader = torch.utils.data.DataLoader(train_set, batch_size=BATCH_SIZE_TRAIN, shuffle=True) test_set = torchvision.datasets.MNIST(DOWNLOAD_PATH, train=False, download=True, transform=transform_mnist) test_loader = torch.utils.data.DataLoader(test_set, batch_size=BATCH_SIZE_TEST, shuffle=True) def train_epoch(model, optimizer, data_loader, loss_history): total_samples = len(data_loader.dataset) model.train() for i, (data, target) in enumerate(data_loader): optimizer.zero_grad() output = F.log_softmax(model(data), dim=1) loss = F.nll_loss(output, target) loss.backward() optimizer.step() if i % 100 == 0: print('[' + '{:5}'.format(i * len(data)) + '/' + '{:5}'.format(total_samples) + ' (' + '{:3.0f}'.format(100 * i / len(data_loader)) + '%)] Loss: ' + '{:6.4f}'.format(loss.item())) loss_history.append(loss.item()) def evaluate(model, data_loader, loss_history): model.eval() total_samples = len(data_loader.dataset) correct_samples = 0 total_loss = 0 with torch.no_grad(): for data, target in data_loader: output = F.log_softmax(model(data), dim=1) loss = F.nll_loss(output, target, reduction='sum') _, pred = torch.max(output, dim=1) total_loss += loss.item() correct_samples += pred.eq(target).sum() avg_loss = total_loss / total_samples loss_history.append(avg_loss) print('\nAverage test loss: ' + '{:.4f}'.format(avg_loss) + ' Accuracy:' + '{:5}'.format(correct_samples) + '/' + '{:5}'.format(total_samples) + ' (' + '{:4.2f}'.format(100.0 * correct_samples / total_samples) + '%)\n') N_EPOCHS = 10 start_time = time.time() model = ViT(image_size=28, patch_size=7, num_classes=10, channels=1, dim=64, depth=6, heads=8, mlp_dim=128) optimizer = torch.optim.Adam(model.parameters(), lr=0.003) train_loss_history, test_loss_history = [], [] for epoch in range(1, N_EPOCHS + 1): print('Epoch:', epoch) train_epoch(model, optimizer, train_loader, train_loss_history) evaluate(model, test_loader, test_loss_history) print('Execution time:', '{:5.2f}'.format(time.time() - start_time), 'seconds') with torch.no_grad(): for data, target in test_loader: output = F.log_softmax(model(data), dim=1) loss = F.nll_loss(output, target, reduction='sum') _, pred = torch.max(output, dim=1) # the original trained model torch.save(model, "vit4mnist.pt") model = torch.load("vit4mnist.pt") model.eval() quantized_model = torch.quantization.quantize_dynamic(model, qconfig_spec={torch.nn.Linear}, dtype=torch.qint8) dummy_input = torch.zeros(1, 1, 28, 28) ts_model = torch.jit.trace(quantized_model, dummy_input) optimized_torchscript_model = optimize_for_mobile(ts_model) # the quantized, scripted, and optimized model optimized_torchscript_model._save_for_lite_interpreter("ViT4MNIST/vit4mnist.ptl")
#!/usr/bin/env python3 import contextlib import copy import os import unittest from PIL import Image import torch from torch.utils.mobile_optimizer import optimize_for_mobile from d2go.export.api import convert_and_export_predictor from d2go.export.d2_meta_arch import patch_d2_meta_arch from d2go.runner import create_runner, GeneralizedRCNNRunner from d2go.model_zoo import model_zoo from mobile_cv.common.misc.file_utils import make_temp_directory patch_d2_meta_arch() def test_export_torchvision_format(): cfg_name = 'faster_rcnn_fbnetv3a_dsmask_C4.yaml' pytorch_model = model_zoo.get(cfg_name, trained=True) from typing import List, Dict class Wrapper(torch.nn.Module): def __init__(self, model): super().__init__() self.model = model coco_idx_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90, 91] self.coco_idx = torch.tensor(coco_idx_list) def forward(self, inputs: List[torch.Tensor]): x = inputs[0].unsqueeze(0) * 255 scale = 320.0 / min(x.shape[-2], x.shape[-1]) x = torch.nn.functional.interpolate(x, scale_factor=scale, mode="bilinear", align_corners=True, recompute_scale_factor=True) out = self.model(x[0]) res : Dict[str, torch.Tensor] = {} res["boxes"] = out[0] / scale res["labels"] = torch.index_select(self.coco_idx, 0, out[1]) res["scores"] = out[2] return inputs, [res] size_divisibility = max(pytorch_model.backbone.size_divisibility, 10) h, w = size_divisibility, size_divisibility * 2 runner = create_runner("d2go.runner.GeneralizedRCNNRunner") cfg = model_zoo.get_config(cfg_name) datasets = list(cfg.DATASETS.TRAIN) data_loader = runner.build_detection_test_loader(cfg, datasets) predictor_path = convert_and_export_predictor( cfg, copy.deepcopy(pytorch_model), "torchscript_int8@tracing", './', data_loader, ) orig_model = torch.jit.load(os.path.join(predictor_path, "model.jit")) wrapped_model = Wrapper(orig_model) # optionally do a forward wrapped_model([torch.rand(3, 600, 600)]) scripted_model = torch.jit.script(wrapped_model) optimized_model = optimize_for_mobile(scripted_model) optimized_model.save("D2Go/d2go_optimized.pt") optimized_model._save_for_lite_interpreter("D2Go/d2go_optimized.ptl") if __name__ == '__main__': test_export_torchvision_format()
import torch import torchvision from torch.utils.mobile_optimizer import optimize_for_mobile model = torchvision.models.quantization.mobilenet_v2(pretrained=True, quantize=True) model.eval() example = torch.rand(1, 3, 224, 224) traced_script_module = torch.jit.trace(model, example) torchscript_model_optimized = optimize_for_mobile(traced_script_module) torchscript_model_optimized.save("mobilenet_quantized.pt")
#!/usr/bin/env python # Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import distutils.command.clean import os import shutil import subprocess import sys from pathlib import Path from setuptools import find_packages, setup from tools.setup_helpers.extension import CMakeBuild, get_ext_modules ROOT_DIR = Path(__file__).parent.resolve() ################################################################################ # Parameters parsed from environment ################################################################################ RUN_BUILD_DEP = True for _, arg in enumerate(sys.argv): if arg in ["clean", "egg_info", "sdist"]: RUN_BUILD_DEP = False def _get_submodule_folders(): git_modules_path = ROOT_DIR / ".gitmodules" if not os.path.exists(git_modules_path): return [] with open(git_modules_path) as f: return [ os.path.join(ROOT_DIR, line.split("=", 1)[1].strip()) for line in f.readlines() if line.strip().startswith("path") ] def _check_submodules(): def check_for_files(folder, files): if not any(os.path.exists(os.path.join(folder, f)) for f in files): print("Could not find any of {} in {}".format(", ".join(files), folder)) print("Did you run 'git submodule update --init --recursive --jobs 0'?") sys.exit(1) def not_exists_or_empty(folder): return not os.path.exists(folder) or (os.path.isdir(folder) and len(os.listdir(folder)) == 0) if bool(os.getenv("USE_SYSTEM_LIBS", False)): return folders = _get_submodule_folders() # If none of the submodule folders exists, try to initialize them if all(not_exists_or_empty(folder) for folder in folders): try: import time print(" --- Trying to initialize submodules") start = time.time() subprocess.check_call(["git", "submodule", "update", "--init", "--recursive"], cwd=ROOT_DIR) end = time.time() print(f" --- Submodule initialization took {end - start:.2f} sec") except Exception: print(" --- Submodule initalization failed") print("Please run:\n\tgit submodule update --init --recursive --jobs 0") sys.exit(1) for folder in folders: check_for_files(folder, ["CMakeLists.txt", "Makefile", "setup.py", "LICENSE", "LICENSE.md", "LICENSE.txt"]) def _get_version(): with open(os.path.join(ROOT_DIR, "version.txt")) as f: version = f.readline().strip() sha = "Unknown" try: sha = subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=str(ROOT_DIR)).decode("ascii").strip() except Exception: pass os_build_version = os.getenv("BUILD_VERSION") if os_build_version: version = os_build_version elif sha != "Unknown": version += "+" + sha[:7] return version, sha def _export_version(version, sha): version_path = ROOT_DIR / "torchdata" / "version.py" with open(version_path, "w") as f: f.write(f"__version__ = '{version}'\n") f.write(f"git_version = {repr(sha)}\n") def _get_requirements(): req_list = [] with Path("requirements.txt").open("r") as f: for line in f: req = line.strip() if len(req) == 0 or req.startswith("#"): continue req_list.append(req) return req_list # Use new version of torch on main branch pytorch_package_dep = "torch>2.0" if os.getenv("PYTORCH_VERSION"): pytorch_package_dep = pytorch_package_dep.split(">")[0] pytorch_package_dep += "==" + os.getenv("PYTORCH_VERSION") requirements = _get_requirements() requirements.append(pytorch_package_dep) class clean(distutils.command.clean.clean): def run(self): # Run default behavior first distutils.command.clean.clean.run(self) # Remove torchdata extension def remove_extension(pattern): for path in (ROOT_DIR / "torchdata").glob(pattern): print(f"removing extension '{path}'") path.unlink() for ext in ["so", "dylib", "pyd"]: remove_extension("**/*." + ext) # Remove build directory build_dirs = [ ROOT_DIR / "build", ] for path in build_dirs: if path.exists(): print(f"removing '{path}' (and everything under it)") shutil.rmtree(str(path), ignore_errors=True) if __name__ == "__main__": VERSION, SHA = _get_version() _export_version(VERSION, SHA) print("-- Building version " + VERSION) if RUN_BUILD_DEP: from tools.gen_pyi import gen_pyi _check_submodules() gen_pyi() setup( # Metadata name="torchdata", version=VERSION, description="Composable data loading modules for PyTorch", long_description=Path("README.md").read_text(encoding="utf-8"), long_description_content_type="text/markdown", url="https://github.com/pytorch/data", author="PyTorch Team", author_email="[email protected]", license="BSD", install_requires=requirements, python_requires=">=3.8", classifiers=[ "Intended Audience :: Developers", "Intended Audience :: Science/Research", "License :: OSI Approved :: BSD License", "Operating System :: MacOS :: MacOS X", "Operating System :: Microsoft :: Windows", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: Implementation :: CPython", "Topic :: Scientific/Engineering :: Artificial Intelligence", ], package_data={ "torchdata": [ "datapipes/iter/*.pyi", "datapipes/map/*.pyi", ], }, # Package Info packages=find_packages(exclude=["test*", "examples*", "tools*", "torchdata.csrc*", "build*"]), zip_safe=False, # C++ Extension Modules ext_modules=get_ext_modules(), cmdclass={ "build_ext": CMakeBuild, "clean": clean, }, )
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree.
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import os from pathlib import Path from typing import Dict, List, Optional, Set import torch.utils.data.datapipes.gen_pyi as core_gen_pyi from torch.utils.data.datapipes.gen_pyi import gen_from_template, get_method_definitions def get_lines_base_file(base_file_path: str, to_skip: Optional[Set[str]] = None): with open(base_file_path) as f: lines = f.readlines() res = [] if to_skip is None: return lines for line in lines: skip_flag = False for skip_line in to_skip: if skip_line in line: skip_flag = True if not skip_flag: line = line.replace("\n", "") res.append(line) return res def gen_pyi() -> None: DATAPIPE_DIR = Path(__file__).parent.parent.resolve() / "torchdata" / "datapipes" print(f"Generating DataPipe Python interface file in {DATAPIPE_DIR}") # Base __init__ file iter_init_base = get_lines_base_file( os.path.join(DATAPIPE_DIR, "iter/__init__.py"), {"from torch.utils.data import IterDataPipe", "# Copyright (c) Facebook, Inc. and its affiliates."}, ) map_init_base = get_lines_base_file( os.path.join(DATAPIPE_DIR, "map/__init__.py"), {"from torch.utils.data import MapDataPipe", "# Copyright (c) Facebook, Inc. and its affiliates."}, ) # Core Definitions core_iter_method_definitions = get_method_definitions( core_gen_pyi.iterDP_file_path, core_gen_pyi.iterDP_files_to_exclude, core_gen_pyi.iterDP_deprecated_files, "IterDataPipe", core_gen_pyi.iterDP_method_to_special_output_type, ) core_map_method_definitions = get_method_definitions( core_gen_pyi.mapDP_file_path, core_gen_pyi.mapDP_files_to_exclude, core_gen_pyi.mapDP_deprecated_files, "MapDataPipe", core_gen_pyi.mapDP_method_to_special_output_type, ) # TorchData Definitions # IterDataPipes iterDP_file_paths: List[str] = ["iter/load", "iter/transform", "iter/util"] iterDP_files_to_exclude: Set[str] = {"__init__.py"} iterDP_deprecated_files: Set[str] = set() iterDP_method_to_special_output_type: Dict[str, str] = { "async_map_batches": "IterDataPipe", "bucketbatch": "IterDataPipe", "dataframe": "torcharrow.DataFrame", "end_caching": "IterDataPipe", "extract": "IterDataPipe", "random_split": "Union[IterDataPipe, List[IterDataPipe]]", "read_from_tar": "IterDataPipe", "read_from_xz": "IterDataPipe", "read_from_zip": "IterDataPipe", "round_robin_demux": "List[IterDataPipe]", "to_map_datapipe": "MapDataPipe", "unzip": "List[IterDataPipe]", } iter_method_name_exclusion: Set[str] = {"def extract", "read_from_tar", "read_from_xz", "read_from_zip"} td_iter_method_definitions = get_method_definitions( iterDP_file_paths, iterDP_files_to_exclude, iterDP_deprecated_files, "IterDataPipe", iterDP_method_to_special_output_type, root=str(DATAPIPE_DIR), ) td_iter_method_definitions = [ s for s in td_iter_method_definitions if all(ex not in s for ex in iter_method_name_exclusion) ] iter_method_definitions = core_iter_method_definitions + td_iter_method_definitions iter_replacements = [("${init_base}", iter_init_base, 0), ("${IterDataPipeMethods}", iter_method_definitions, 4)] gen_from_template( dir=str(DATAPIPE_DIR), template_name="iter/__init__.pyi.in", output_name="iter/__init__.pyi", replacements=iter_replacements, ) # MapDataPipes mapDP_file_paths: List[str] = ["map/load", "map/transform", "map/util"] mapDP_files_to_exclude: Set[str] = {"__init__.py"} mapDP_deprecated_files: Set[str] = set() mapDP_method_to_special_output_type: Dict[str, str] = { "unzip": "List[MapDataPipe]", "to_iter_datapipe": "IterDataPipe", } map_method_name_exclusion: Set[str] = set() td_map_method_definitions = get_method_definitions( mapDP_file_paths, mapDP_files_to_exclude, mapDP_deprecated_files, "MapDataPipe", mapDP_method_to_special_output_type, root=str(DATAPIPE_DIR), ) td_map_method_definitions = [ s for s in td_map_method_definitions if all(ex not in s for ex in map_method_name_exclusion) ] map_method_definitions = core_map_method_definitions + td_map_method_definitions map_replacements = [("${init_base}", map_init_base, 0), ("${MapDataPipeMethods}", map_method_definitions, 4)] gen_from_template( dir=str(DATAPIPE_DIR), template_name="map/__init__.pyi.in", output_name="map/__init__.pyi", replacements=map_replacements, ) if __name__ == "__main__": gen_pyi()
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. # Scrip can be used with # find -name '*.py' | grep -v third_party | perl -ne'print "python tools/todo.py $_"' | head -n 5 | bash import configparser import os import re import shutil import sys import tempfile from github import Github # pip install PyGithub file_name = sys.argv[1] config = configparser.ConfigParser(allow_no_value=True) with open(os.path.join(os.path.expanduser("~"), ".ghstackrc")) as stream: config.read_string(stream.read()) GITHUB_KEY = config["ghstack"]["github_oauth"] def get_git_branch_hash(): stream = os.popen("git rev-parse origin/main") return stream.read().rstrip() def generate_issue_id(id_or_name, title, file_name, line_number): git_branch_hash = get_git_branch_hash() # print(file_name, line_number, title, id_or_name) match = re.match(r"\((\d+)\)", id_or_name) if match: return int(match.group(1)) match = re.match(r"\((.*)\)", id_or_name) name = None if match: name = match.group(1) if name is not None: owner = f"cc @{name}" else: owner = "" g = Github(GITHUB_KEY) repo = g.get_repo("pytorch/data") # label_be = repo.get_label("better-engineering" ) # labels = [label_be] line_reference = f"https://github.com/pytorch/data/blob/{git_branch_hash}/{file_name}#L{line_number}" line_reference = line_reference.replace("/./", "/") body = """ This issue is generated from the TODO line {line_reference} {owner} """.format( owner=owner, line_reference=line_reference, ) title = f"[TODO] {title}" issue = repo.create_issue(title=title, body=body, labels=[]) print(f"Created issue https://github.com/pytorch/data/issues/{issue.number}") return issue.number def update_file(file_name): try: f = tempfile.NamedTemporaryFile(delete=False) shutil.copyfile(file_name, f.name) with open(f.name) as f_inp: with open(file_name, "w") as f_out: for line_number, line in enumerate(f_inp.readlines()): if not re.search(r"ignore-todo", line, re.IGNORECASE): match = re.search(r"(.*?)#\s*todo\s*(\([^)]+\)){0,1}:{0,1}(.*)", line, re.IGNORECASE) if match: # print(line) prefix = match.group(1) text = match.group(3) issue_id = generate_issue_id(str(match.group(2)), text, file_name, line_number + 1) line = f"{prefix}# TODO({issue_id}):{text}\n" # ignore-todo f_out.write(line) except Exception as e: shutil.copyfile(f.name, file_name) raise e finally: os.unlink(f.name) update_file(file_name)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree.
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import distutils.sysconfig import os import platform import subprocess import sys from pathlib import Path from setuptools.command.build_ext import build_ext __all__ = [ "get_ext_modules", "CMakeBuild", ] _THIS_DIR = Path(__file__).parent.resolve() _ROOT_DIR = _THIS_DIR.parent.parent.resolve() def _get_build(var, default=False): if var not in os.environ: return default val = os.environ.get(var, "0") trues = ["1", "true", "TRUE", "on", "ON", "yes", "YES"] falses = ["0", "false", "FALSE", "off", "OFF", "no", "NO"] if val in trues: return True if val not in falses: print(f"WARNING: Unexpected environment variable value `{var}={val}`. " f"Expected one of {trues + falses}") return False _BUILD_S3 = _get_build("BUILD_S3", False) _USE_SYSTEM_AWS_SDK_CPP = _get_build("USE_SYSTEM_AWS_SDK_CPP", False) _USE_SYSTEM_PYBIND11 = _get_build("USE_SYSTEM_PYBIND11", False) _USE_SYSTEM_LIBS = _get_build("USE_SYSTEM_LIBS", False) try: # Use the pybind11 from third_party if not (_USE_SYSTEM_PYBIND11 or _USE_SYSTEM_LIBS): sys.path.insert(0, str(_ROOT_DIR / "third_party/pybind11/")) from pybind11.setup_helpers import Pybind11Extension except ImportError: from setuptools import Extension as Pybind11Extension def get_ext_modules(): if _BUILD_S3: return [Pybind11Extension(name="torchdata._torchdata", sources=[])] else: return [] class CMakeBuild(build_ext): def run(self): try: subprocess.check_output(["cmake", "--version"]) except OSError: raise RuntimeError("CMake is not available.") from None super().run() def build_extension(self, ext): # Because the following `cmake` command will build all of `ext_modules`` at the same time, # we would like to prevent multiple calls to `cmake`. # Therefore, we call `cmake` only for `torchdata._torchdata`, # in case `ext_modules` contains more than one module. if ext.name != "torchdata._torchdata": return extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name))) # required for auto-detection of auxiliary "native" libs if not extdir.endswith(os.path.sep): extdir += os.path.sep debug = int(os.environ.get("DEBUG", 0)) if self.debug is None else self.debug cfg = "Debug" if debug else "Release" cmake_args = [ f"-DCMAKE_BUILD_TYPE={cfg}", f"-DCMAKE_INSTALL_PREFIX={extdir}", f"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={extdir}", f"-DCMAKE_RUNTIME_OUTPUT_DIRECTORY={extdir}", # For Windows f"-DPython_INCLUDE_DIR={distutils.sysconfig.get_python_inc()}", f"-DBUILD_S3:BOOL={'ON' if _BUILD_S3 else 'OFF'}", f"-DUSE_SYSTEM_AWS_SDK_CPP:BOOL={'ON' if _USE_SYSTEM_AWS_SDK_CPP else 'OFF'}", f"-DUSE_SYSTEM_PYBIND11:BOOL={'ON' if _USE_SYSTEM_PYBIND11 else 'OFF'}", f"-DUSE_SYSTEM_LIBS:BOOL={'ON' if _USE_SYSTEM_LIBS else 'OFF'}", ] build_args = ["--config", cfg] # Default to Ninja if "CMAKE_GENERATOR" not in os.environ or platform.system() == "Windows": cmake_args += ["-GNinja"] if platform.system() == "Windows": python_version = sys.version_info cmake_args += [ "-DCMAKE_C_COMPILER=cl", "-DCMAKE_CXX_COMPILER=cl", f"-DPYTHON_VERSION={python_version.major}.{python_version.minor}", ] # Set CMAKE_BUILD_PARALLEL_LEVEL to control the parallel build level # across all generators. if "CMAKE_BUILD_PARALLEL_LEVEL" not in os.environ: # self.parallel is a Python 3 only way to set parallel jobs by hand # using -j in the build_ext call, not supported by pip or PyPA-build. if hasattr(self, "parallel") and self.parallel: # CMake 3.12+ only. build_args += [f"-j{self.parallel}"] if not os.path.exists(self.build_temp): os.makedirs(self.build_temp) subprocess.check_call(["cmake", str(_ROOT_DIR)] + cmake_args, cwd=self.build_temp) subprocess.check_call(["cmake", "--build", "."] + build_args, cwd=self.build_temp) def get_ext_filename(self, fullname): ext_filename = super().get_ext_filename(fullname) ext_filename_parts = ext_filename.split(".") without_abi = ext_filename_parts[:-2] + ext_filename_parts[-1:] ext_filename = ".".join(without_abi) return ext_filename
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import io import os import unittest import expecttest from torchdata.datapipes.iter import GDriveReader, IterableWrapper, OnlineReader # This TestCase is created due to the limited quota to access google drive class TestDataPipePeriod(expecttest.TestCase): def test_gdrive_iterdatapipe(self): amazon_review_url = "https://drive.google.com/uc?export=download&id=0Bz8a_Dbh9QhbaW12WVVZS2drcnM" expected_file_name = "amazon_review_polarity_csv.tar.gz" expected_MD5_hash = "fe39f8b653cada45afd5792e0f0e8f9b" query_params = {"auth": ("fake_username", "fake_password"), "allow_redirects": True} timeout = 120 gdrive_reader_dp = GDriveReader(IterableWrapper([amazon_review_url]), timeout=timeout, **query_params) # Functional Test: test if the GDrive Reader can download and read properly reader_dp = gdrive_reader_dp.readlines() it = iter(reader_dp) path, line = next(it) self.assertEqual(expected_file_name, os.path.basename(path)) self.assertTrue(line != b"") # Reset Test: gdrive_reader_dp has been read, but we reset when calling check_hash() check_cache_dp = gdrive_reader_dp.check_hash({expected_file_name: expected_MD5_hash}, "md5", rewind=False) it = iter(check_cache_dp) path, stream = next(it) self.assertEqual(expected_file_name, os.path.basename(path)) self.assertTrue(io.BufferedReader, type(stream)) # __len__ Test: returns the length of source DataPipe source_dp = IterableWrapper([amazon_review_url]) gdrive_dp = GDriveReader(source_dp) self.assertEqual(1, len(gdrive_dp)) # Error Test: test if the GDrive Reader raises an error when the url is invalid error_url = "https://drive.google.com/uc?export=download&id=filedoesnotexist" http_error_dp = GDriveReader(IterableWrapper([error_url]), timeout=timeout) with self.assertRaisesRegex( Exception, r"404.+https://drive.google.com/uc\?export=download&id=filedoesnotexist" ): next(iter(http_error_dp.readlines())) # Feature skip-error Test: test if the GDrive Reader skips urls causing problems gdrive_skip_error_dp = GDriveReader( IterableWrapper([error_url, amazon_review_url]), timeout=timeout, skip_on_error=True ) reader_dp = gdrive_skip_error_dp.readlines() with self.assertWarnsRegex( Warning, r"404.+https://drive.google.com/uc\?export=download&id=filedoesnotexist.+skipping" ): it = iter(reader_dp) path, line = next(it) self.assertEqual(expected_file_name, os.path.basename(path)) self.assertTrue(line != b"") def test_online_iterdatapipe(self): license_file_url = "https://raw.githubusercontent.com/pytorch/data/main/LICENSE" amazon_review_url = "https://drive.google.com/uc?export=download&id=0Bz8a_Dbh9QhbaW12WVVZS2drcnM" expected_license_file_name = "LICENSE" expected_amazon_file_name = "amazon_review_polarity_csv.tar.gz" expected_license_MD5_hash = "bb9675028dd39d2dd2bf71002b93e66c" expected_amazon_MD5_hash = "fe39f8b653cada45afd5792e0f0e8f9b" query_params = {"auth": ("fake_username", "fake_password"), "allow_redirects": True} timeout = 120 file_hash_dict = { license_file_url: expected_license_MD5_hash, expected_amazon_file_name: expected_amazon_MD5_hash, } # Functional Test: can read from GDrive links online_reader_dp = OnlineReader(IterableWrapper([amazon_review_url]), timeout=timeout, **query_params) reader_dp = online_reader_dp.readlines() it = iter(reader_dp) path, line = next(it) self.assertEqual(expected_amazon_file_name, os.path.basename(path)) self.assertTrue(line != b"") # Functional Test: can read from other links online_reader_dp = OnlineReader(IterableWrapper([license_file_url])) reader_dp = online_reader_dp.readlines() it = iter(reader_dp) path, line = next(it) self.assertEqual(expected_license_file_name, os.path.basename(path)) self.assertTrue(line != b"") # Reset Test: reset online_reader_dp by calling check_hash check_cache_dp = online_reader_dp.check_hash(file_hash_dict, "md5", rewind=False) it = iter(check_cache_dp) path, stream = next(it) self.assertEqual(expected_license_file_name, os.path.basename(path)) self.assertTrue(io.BufferedReader, type(stream)) # Functional Test: works with multiple URLs of different sources online_reader_dp = OnlineReader(IterableWrapper([license_file_url, amazon_review_url])) check_cache_dp = online_reader_dp.check_hash(file_hash_dict, "md5", rewind=False) it = iter(check_cache_dp) for expected_file_name, (path, stream) in zip([expected_license_file_name, expected_amazon_file_name], it): self.assertEqual(expected_file_name, os.path.basename(path)) self.assertTrue(io.BufferedReader, type(stream)) # __len__ Test: returns the length of source DataPipe self.assertEqual(2, len(online_reader_dp)) # Error Test: test if the Online Reader raises an error when the url is invalid error_url_http = "https://github.com/pytorch/data/this/url/dont/exist" online_error_dp = OnlineReader(IterableWrapper([error_url_http]), timeout=timeout) with self.assertRaisesRegex(Exception, f"404.+{error_url_http}"): next(iter(online_error_dp.readlines())) error_url_gdrive = "https://drive.google.com/uc?export=download&id=filedoesnotexist" online_error_dp = OnlineReader(IterableWrapper([error_url_gdrive]), timeout=timeout) with self.assertRaisesRegex( Exception, r"404.+https://drive.google.com/uc\?export=download&id=filedoesnotexist" ): next(iter(online_error_dp.readlines())) # Feature skip-error Test: test if the Online Reader skips urls causing problems online_skip_error_dp = OnlineReader( IterableWrapper([error_url_http, error_url_gdrive, license_file_url]), timeout=timeout, skip_on_error=True ) reader_dp = online_skip_error_dp.readlines() with self.assertWarnsRegex(Warning, f"404.+{error_url_http}.+skipping"): it = iter(reader_dp) path, line = next(it) self.assertEqual(expected_license_file_name, os.path.basename(path)) self.assertTrue(b"BSD" in line) if __name__ == "__main__": unittest.main()
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import unittest from unittest.mock import MagicMock, patch import expecttest from torch.testing._internal.common_utils import IS_SANDCASTLE from torchdata.datapipes.iter import IterableWrapper, S3FileLister skipIfSandcastle = unittest.skipIf(IS_SANDCASTLE, "Skip for internal testing") @skipIfSandcastle @patch("torchdata._torchdata") class TestS3FileListerIterDataPipe(expecttest.TestCase): def test_list_files(self, mock_torchdata): s3handler_mock = MagicMock() mock_torchdata.S3Handler.return_value = s3handler_mock s3handler_mock.list_files = MagicMock( side_effect=[["s3://bucket-name/folder/a.txt", "s3://bucket-name/folder/b.csv"], []] ) s3_prefixes = IterableWrapper(["s3://bucket-name/folder/"]) dp_s3_urls = S3FileLister(s3_prefixes) assert list(dp_s3_urls) == ["s3://bucket-name/folder/a.txt", "s3://bucket-name/folder/b.csv"] def test_list_files_with_filter_mask(self, mock_torchdata): s3handler_mock = MagicMock() mock_torchdata.S3Handler.return_value = s3handler_mock s3handler_mock.list_files = MagicMock( side_effect=[["s3://bucket-name/folder/a.txt", "s3://bucket-name/folder/b.csv"], []] ) s3_prefixes = IterableWrapper(["s3://bucket-name/folder/"]) dp_s3_urls = S3FileLister(s3_prefixes, masks="*.csv") assert list(dp_s3_urls) == ["s3://bucket-name/folder/b.csv"]
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import os import unittest import warnings import expecttest from _utils._common_utils_for_test import create_temp_dir, create_temp_files, reset_after_n_next_calls from torchdata.datapipes.iter import ( FileLister, FSSpecFileLister, FSSpecFileOpener, FSSpecSaver, IterableWrapper, IterDataPipe, ) try: import fsspec HAS_FSSPEC = True except ImportError: HAS_FSSPEC = False skipIfNoFSSpec = unittest.skipIf(not HAS_FSSPEC, "no fsspec") class TestDataPipeFSSpec(expecttest.TestCase): def setUp(self): self.temp_dir = create_temp_dir() self.temp_files = create_temp_files(self.temp_dir) self.temp_sub_dir = create_temp_dir(self.temp_dir.name) self.temp_sub_files = create_temp_files(self.temp_sub_dir, 4, False) self.temp_dir_2 = create_temp_dir() self.temp_files_2 = create_temp_files(self.temp_dir_2) self.temp_sub_dir_2 = create_temp_dir(self.temp_dir_2.name) self.temp_sub_files_2 = create_temp_files(self.temp_sub_dir_2, 4, False) def tearDown(self): try: self.temp_sub_dir.cleanup() self.temp_dir.cleanup() self.temp_sub_dir_2.cleanup() self.temp_dir_2.cleanup() except Exception as e: warnings.warn(f"TestDataPipeFSSpec was not able to cleanup temp dir due to {e}") def _write_text_files(self): def filepath_fn(name: str) -> str: return os.path.join(self.temp_dir.name, os.path.basename(name)) name_to_data = {"1.text": b"DATA", "2.text": b"DATA", "3.text": b"DATA"} source_dp = IterableWrapper(sorted(name_to_data.items())) saver_dp = source_dp.save_to_disk(filepath_fn=filepath_fn, mode="wb") list(saver_dp) @skipIfNoFSSpec def test_fsspec_file_lister_iterdatapipe(self): datapipe: IterDataPipe = FSSpecFileLister(root="file://" + self.temp_sub_dir.name) # check all file paths within sub_folder are listed for path in datapipe: self.assertIn( path.split("://")[1], {fsspec.implementations.local.make_path_posix(file) for file in self.temp_sub_files}, ) # checks for functional API datapipe = IterableWrapper(["file://" + self.temp_sub_dir.name]) datapipe = datapipe.list_files_by_fsspec() for path in datapipe: self.assertIn( path.split("://")[1], {fsspec.implementations.local.make_path_posix(file) for file in self.temp_sub_files}, ) @skipIfNoFSSpec def test_fsspec_file_lister_iterdatapipe_with_list(self): datapipe: IterDataPipe = FSSpecFileLister( root=["file://" + self.temp_sub_dir.name, "file://" + self.temp_sub_dir_2.name] ) # check all file paths within sub_folder are listed file_lister = list(map(lambda path: path.split("://")[1], datapipe)) file_lister.sort() temp_files = list( map( lambda file: fsspec.implementations.local.make_path_posix(file), self.temp_sub_files + self.temp_sub_files_2, ) ) temp_files.sort() # check all file paths within sub_folder are listed self.assertEqual(file_lister, temp_files) # checks for functional API datapipe = IterableWrapper(["file://" + self.temp_sub_dir.name, "file://" + self.temp_sub_dir_2.name]) datapipe = datapipe.list_files_by_fsspec() res = list(map(lambda path: path.split("://")[1], datapipe)) res.sort() temp_files = list( map( lambda file: fsspec.implementations.local.make_path_posix(file), self.temp_sub_files + self.temp_sub_files_2, ) ) temp_files.sort() self.assertEqual(res, temp_files) @skipIfNoFSSpec def test_fsspec_file_loader_iterdatapipe(self): datapipe1 = FSSpecFileLister(root="file://" + self.temp_sub_dir.name) datapipe2 = FSSpecFileOpener(datapipe1) datapipe3 = FSSpecFileOpener(datapipe1, kwargs_for_open={"encoding": "cp037"}) # check contents of file match for _, f in datapipe2: self.assertEqual(f.read(), "0123456789abcdef") # Opened with a different encoding, hence NotEqual for _, f in datapipe3: self.assertNotEqual(f.read(), "0123456789abcdef") # Reset Test: Ensure the resulting streams are still readable after the DataPipe is reset/exhausted self._write_text_files() lister_dp = FileLister(self.temp_dir.name, "*.text") fsspec_file_opener_dp = lister_dp.open_files_by_fsspec(mode="rb") n_elements_before_reset = 2 res_before_reset, res_after_reset = reset_after_n_next_calls(fsspec_file_opener_dp, n_elements_before_reset) self.assertEqual(2, len(res_before_reset)) self.assertEqual(3, len(res_after_reset)) for _name, stream in res_before_reset: self.assertEqual(b"DATA", stream.read()) for _name, stream in res_after_reset: self.assertEqual(b"DATA", stream.read()) @skipIfNoFSSpec def test_fsspec_saver_iterdatapipe(self): def filepath_fn(name: str) -> str: return "file://" + os.path.join(self.temp_dir.name, os.path.basename(name)) # Functional Test: Saving some data name_to_data = {"1.txt": b"DATA1", "2.txt": b"DATA2", "3.txt": b"DATA3"} source_dp = IterableWrapper(sorted(name_to_data.items())) saver_dp = source_dp.save_by_fsspec(filepath_fn=filepath_fn, mode="wb") res_file_paths = list(saver_dp) expected_paths = [filepath_fn(name) for name in name_to_data.keys()] self.assertEqual(expected_paths, res_file_paths) for name in name_to_data.keys(): p = filepath_fn(name).split("://")[1] with open(p) as f: self.assertEqual(name_to_data[name], f.read().encode()) # Reset Test: saver_dp = FSSpecSaver(source_dp, filepath_fn=filepath_fn, mode="wb") n_elements_before_reset = 2 res_before_reset, res_after_reset = reset_after_n_next_calls(saver_dp, n_elements_before_reset) self.assertEqual([filepath_fn("1.txt"), filepath_fn("2.txt")], res_before_reset) self.assertEqual(expected_paths, res_after_reset) for name in name_to_data.keys(): p = filepath_fn(name).split("://")[1] with open(p) as f: self.assertEqual(name_to_data[name], f.read().encode()) # __len__ Test: returns the length of source DataPipe self.assertEqual(3, len(saver_dp)) @skipIfNoFSSpec def test_fsspec_memory_list(self): fs = fsspec.filesystem("memory") fs.mkdir("foo") fs.touch("foo/bar1") fs.touch("foo/bar2") datapipe = FSSpecFileLister(root="memory://foo") self.assertEqual(set(datapipe), {"memory:///foo/bar1", "memory:///foo/bar2"}) datapipe = FSSpecFileLister(root="memory://foo/bar1") self.assertEqual(set(datapipe), {"memory://foo/bar1"}) @skipIfNoFSSpec def test_fsspec_memory_load(self): fs = fsspec.filesystem("memory") with fs.open("file", "w") as f: f.write("hello") with fs.open("file2", "w") as f: f.write("hello2") files = ["memory://file", "memory://file2"] datapipe = FSSpecFileOpener(files) self.assertEqual([f.read() for _, f in datapipe], ["hello", "hello2"]) @skipIfNoFSSpec def test_fsspec_memory_save(self): def filepath_fn(name: str) -> str: return "memory://" + name name_to_data = {"1.txt": b"DATA1", "2.txt": b"DATA2"} source_dp = IterableWrapper(sorted(name_to_data.items())) saver_dp = FSSpecSaver(source_dp, filepath_fn=filepath_fn, mode="wb") self.assertEqual(set(saver_dp), {"memory://1.txt", "memory://2.txt"}) if __name__ == "__main__": unittest.main()
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import unittest from unittest.mock import patch import expecttest from torchdata.datapipes.iter import HuggingFaceHubReader try: import datasets HAS_DATASETS = True except ImportError: HAS_DATASETS = False skipIfNoDatasets = unittest.skipIf(not HAS_DATASETS, "no datasets") class TestHuggingFaceHubReader(expecttest.TestCase): @skipIfNoDatasets @patch("datasets.load_dataset") def test_huggingface_hubreader(self, mock_load_dataset): mock_load_dataset.return_value = datasets.Dataset.from_dict( { "id": ["7bd227d9-afc9-11e6-aba1-c4b301cdf627", "7bd22905-afc9-11e6-a5dc-c4b301cdf627"], "package_name": ["com.mantz_it.rfanalyzer"] * 2, } ) datapipe = HuggingFaceHubReader("lhoestq/demo1", revision="branch", streaming=False, use_auth_token=True) iterator = iter(datapipe) elem = next(iterator) assert type(elem) is dict assert elem["id"] == "7bd227d9-afc9-11e6-aba1-c4b301cdf627" assert elem["package_name"] == "com.mantz_it.rfanalyzer" mock_load_dataset.assert_called_with( path="lhoestq/demo1", streaming=False, revision="branch", use_auth_token=True ) with self.assertRaises(StopIteration): next(iterator) next(iterator) with self.assertRaises(TypeError): len(datapipe) if __name__ == "__main__": unittest.main()
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import random import string import tempfile import unittest from torchdata.datapipes.iter import AISFileLister, AISFileLoader try: from aistore.client.api import Client from aistore.client.errors import AISError, ErrBckNotFound AIS_CLUSTER_ENDPT = "http://localhost:8080" HAS_AIS = Client(AIS_CLUSTER_ENDPT).cluster().is_aistore_running() except (ImportError, ConnectionError): HAS_AIS = False skipIfNoAIS = unittest.skipIf(not HAS_AIS, "AIS not running or library not installed") @skipIfNoAIS class TestAIStoreIODataPipe(unittest.TestCase): def setUp(self): # initialize client and create new bucket self.client = Client(AIS_CLUSTER_ENDPT) letters = string.ascii_lowercase self.bck_name = "".join(random.choice(letters) for _ in range(10)) self.client.bucket(self.bck_name).create() # create temp files num_objs = 10 # create 10 objects in the `/temp` dir for i in range(num_objs): object_body = "test string" * random.randrange(1, 10) content = object_body.encode("utf-8") obj_name = f"temp/obj{ i }" with tempfile.NamedTemporaryFile() as file: file.write(content) file.flush() self.client.bucket(self.bck_name).object(obj_name).put(file.name) # create 10 objects in the `/`dir for i in range(num_objs): object_body = "test string" * random.randrange(1, 10) content = object_body.encode("utf-8") obj_name = f"obj{ i }" with tempfile.NamedTemporaryFile() as file: file.write(content) file.flush() self.client.bucket(self.bck_name).object(obj_name).put(file.name) def tearDown(self): # Try to destroy bucket and its items try: self.client.bucket(self.bck_name).delete() except ErrBckNotFound: pass def test_ais_io_iterdatapipe(self): prefixes = [ ["ais://" + self.bck_name], ["ais://" + self.bck_name + "/"], ["ais://" + self.bck_name + "/temp/", "ais://" + self.bck_name + "/obj"], ] # check if the created files exist for prefix in prefixes: urls = AISFileLister(url=AIS_CLUSTER_ENDPT, source_datapipe=prefix) ais_loader = AISFileLoader(url=AIS_CLUSTER_ENDPT, source_datapipe=urls) with self.assertRaises(TypeError): len(urls) self.assertEqual(len(list(urls)), 20) self.assertEqual(sum(1 for _ in ais_loader), 20) # check for incorrect prefixes prefixes = ["ais://asdasd"] # AISFileLister: Bucket not found try: list(AISFileLister(url=AIS_CLUSTER_ENDPT, source_datapipe=prefixes)) except ErrBckNotFound as err: self.assertEqual(err.status_code, 404) # AISFileLoader: incorrect inputs url_list = [[""], ["ais:"], ["ais://"], ["s3:///unkown-bucket"]] for url in url_list: with self.assertRaises(AISError): file_loader = AISFileLoader(url=AIS_CLUSTER_ENDPT, source_datapipe=url) for _ in file_loader: pass if __name__ == "__main__": unittest.main()
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import unittest from torchdata.dataloader2.linter import _check_shuffle_before_sharding from torchdata.datapipes.iter import IterableWrapper, ShardingFilter, Shuffler def dummy_fn(x): return x class LinterTest(unittest.TestCase): def test_sharding_shuffle(self): source_dp = IterableWrapper(list(range(20))) # Single path dp = source_dp.map(dummy_fn).shuffle() self.assertTrue(_check_shuffle_before_sharding(dp)) dp = source_dp.map(dummy_fn) self.assertTrue(_check_shuffle_before_sharding(dp)) dp = source_dp.map(dummy_fn).shuffle().sharding_filter() self.assertTrue(_check_shuffle_before_sharding(dp)) dp = source_dp.map(dummy_fn).sharding_filter() self.assertFalse(_check_shuffle_before_sharding(dp)) dp = source_dp.map(dummy_fn).sharding_filter().shuffle() self.assertFalse(_check_shuffle_before_sharding(dp)) # Multi pathes def _multi_path_dp_1(shuffle): s_dp = source_dp.shuffle() if shuffle else source_dp dp1, dp2 = s_dp.unzip(2) dp1 = dp1.sharding_filter() dp2 = dp2.map(dummy_fn).sharding_filter() dp = dp1.zip(dp2) return dp self.assertTrue(_check_shuffle_before_sharding(_multi_path_dp_1(True))) self.assertFalse(_check_shuffle_before_sharding(_multi_path_dp_1(False))) def _multi_path_dp_2(shuffle): s_dp = source_dp.shuffle() if shuffle else source_dp dp1, dp2 = s_dp.unzip(2) dp1 = dp1.map(dummy_fn) dp = dp1.zip(dp2).sharding_filter() return dp self.assertTrue(_check_shuffle_before_sharding(_multi_path_dp_2(True))) self.assertFalse(_check_shuffle_before_sharding(_multi_path_dp_2(False))) def _multi_path_dp_3(shuffle): dp1, dp2 = source_dp.unzip(2) dp1 = dp1.shuffle() if shuffle else dp1 dp1 = dp1.map(dummy_fn).sharding_filter() dp2 = dp2.shuffle() if shuffle else dp1 dp2 = dp2.sharding_filter() dp = dp1.zip(dp2).map(dummy_fn) return dp self.assertTrue(_check_shuffle_before_sharding(_multi_path_dp_3(True))) self.assertFalse(_check_shuffle_before_sharding(_multi_path_dp_3(False))) # Partial paths dp1, dp2 = source_dp.unzip(2) dp1 = dp1.shuffle().map(dummy_fn) dp = dp1.zip(dp2).sharding_filter() self.assertFalse(_check_shuffle_before_sharding(dp)) if __name__ == "__main__": unittest.main()
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import unittest from torchdata.dataloader2.random import SeedGenerator from torchdata.dataloader2.random._philox import PhiloxEngine class TestPhilox(unittest.TestCase): def test_philox_engine_generate(self): prng = PhiloxEngine() with self.assertRaisesRegex(AssertionError, "Please provide seed"): prng.generate() prng.seed(123) s0 = [prng.generate() for _ in range(10)] # Same seed prng = PhiloxEngine(seed=123) s1 = [prng.generate() for _ in range(10)] self.assertEqual(s0, s1) # Reset prng.seed(123) s2 = [prng.generate() for _ in range(10)] self.assertEqual(s1, s2) # Different seeds prng = PhiloxEngine(seed=321) s3 = [prng.generate() for _ in range(10)] self.assertNotEqual(s0, s3) def test_philox_engine_spawn(self): prng = PhiloxEngine() with self.assertRaisesRegex(AssertionError, "Expected a non-negative value"): prng.spawn(-1) with self.assertRaisesRegex(AssertionError, "Please provide seed"): prng.spawn(0) prng.seed(123) s0 = [prng.spawn(i)._seed for i in range(10)] # Same seed prng = PhiloxEngine(seed=123) s1 = [prng.spawn(i)._seed for i in range(10)] self.assertEqual(s0, s1) # Generate after spawn sprng1 = prng.spawn(1) sprng2 = prng.spawn(1) ss1 = [sprng1.generate() for _ in range(10)] ss2 = [sprng2.generate() for _ in range(10)] self.assertEqual(ss1, ss2) sprng3 = prng.spawn(2) ss3 = [sprng3.generate() for _ in range(10)] self.assertNotEqual(ss1, ss3) # Reset prng.seed(123) s2 = [prng.spawn(i)._seed for i in range(10)] self.assertEqual(s1, s2) # Different seeds prng = PhiloxEngine(seed=321) s3 = [prng.spawn(i)._seed for i in range(10)] self.assertNotEqual(s0, s3) class TestSeedGenerator(unittest.TestCase): def test_seed_generator_generate(self): # Generate seeds sg = SeedGenerator(123) s0 = [sg.generate_seed() for _ in range(10)] # Reset sg.seed(123) s1 = [sg.generate_seed() for _ in range(10)] self.assertEqual(s0, s1) # Different Seeds sg.seed(321) s2 = [sg.generate_seed() for _ in range(10)] self.assertNotEqual(s0, s2) def test_seed_generator_spawn(self): sg = SeedGenerator(123) # Spawn new Seed Generators sg1 = sg.spawn(1) sg2 = sg.spawn(2) for _ in range(10): self.assertNotEqual(sg1.generate_seed(), sg2.generate_seed()) # Generate shared seeds self.assertEqual(sg1.generate_shared_seed(), sg2.generate_shared_seed()) sg1_1 = sg.spawn(1) sg1_2 = sg.spawn(1) for _ in range(10): self.assertEqual(sg1_1.generate_seed(), sg1_2.generate_seed()) if __name__ == "__main__": unittest.main()
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import asyncio import io import itertools import pickle import unittest import warnings from collections import defaultdict from functools import partial from typing import Dict, NamedTuple import expecttest import torch import torchdata from _utils._common_utils_for_test import IDP_NoLen, reset_after_n_next_calls from torch.testing._internal.common_utils import suppress_warnings from torch.utils.data.datapipes.utils.snapshot import _simple_graph_snapshot_restoration from torchdata.datapipes.iter import ( BucketBatcher, Cycler, Header, IndexAdder, InMemoryCacheHolder, IterableWrapper, IterDataPipe, IterKeyZipper, LineReader, MapKeyZipper, MaxTokenBucketizer, ParagraphAggregator, Repeater, Rows2Columnar, SampleMultiplexer, ShardExpander, UnZipper, ) from torchdata.datapipes.map import MapDataPipe, SequenceWrapper skipIfNoCUDA = unittest.skipIf(not torch.cuda.is_available(), "CUDA is not available") def test_torchdata_pytorch_consistency() -> None: def extract_datapipe_names(module): return { name for name, dp_type in module.__dict__.items() if not name.startswith("_") and isinstance(dp_type, type) and issubclass(dp_type, IterDataPipe) } pytorch_datapipes = extract_datapipe_names(torch.utils.data.datapipes.iter) torchdata_datapipes = extract_datapipe_names(torchdata.datapipes.iter) missing_datapipes = pytorch_datapipes - torchdata_datapipes deprecated_datapipes = {"FileLoader"} for dp in deprecated_datapipes: if dp in missing_datapipes: missing_datapipes.remove("FileLoader") if any(missing_datapipes): msg = ( "The following datapipes are exposed under `torch.utils.data.datapipes.iter`, " "but not under `torchdata.datapipes.iter`:\n" ) raise AssertionError(msg + "\n".join(sorted(missing_datapipes))) def _convert_to_tensor(data): if isinstance(data, dict): return {k: _convert_to_tensor(v) for k, v in data.items()} elif isinstance(data, list): return [_convert_to_tensor(v) for v in data] return torch.tensor(data) async def _async_mul_ten(x): await asyncio.sleep(0.1) return x * 10 async def _async_x_mul_y(x, y): await asyncio.sleep(0.1) return x * y class NamedTensors(NamedTuple): x: torch.Tensor y: torch.Tensor class TestIterDataPipe(expecttest.TestCase): def test_in_memory_cache_holder_iterdatapipe(self) -> None: source_dp = IterableWrapper(range(10)) cache_dp = source_dp.in_memory_cache(size=5) # Functional Test: Cache DP should just return the data without changing the values res1 = list(cache_dp) self.assertEqual(list(range(10)), res1) # Functional Test: Ensure the objects are the same ones from source DataPipe res1 = list(cache_dp) res2 = list(cache_dp) self.assertTrue(id(source) == id(cache) for source, cache in zip(source_dp, res1)) self.assertTrue(id(source) == id(cache) for source, cache in zip(source_dp, res2)) # TODO(122): Figure out a way to consistently test caching when size is in megabytes # Reset Test: reset the DataPipe after reading part of it cache_dp = InMemoryCacheHolder(source_dp, size=5) n_elements_before_reset = 5 res_before_reset, res_after_reset = reset_after_n_next_calls(cache_dp, n_elements_before_reset) self.assertEqual(list(range(5)), res_before_reset) self.assertEqual(list(range(10)), res_after_reset) # __len__ Test: inherits length from source_dp self.assertEqual(10, len(cache_dp)) # __len__ Test: source_dp has no len and cache is not yet loaded source_dp_no_len = IDP_NoLen(range(10)) cache_dp = InMemoryCacheHolder(source_dp_no_len, size=5) with self.assertRaisesRegex(TypeError, "doesn't have valid length until the cache is loaded"): len(cache_dp) # __len__ Test: source_dp has no len but we still can calculate after cache is loaded list(cache_dp) self.assertEqual(10, len(cache_dp)) def test_iter_key_zipper_iterdatapipe(self) -> None: source_dp = IterableWrapper(range(10)) ref_dp = IterableWrapper(range(20)) ref_dp2 = IterableWrapper(range(20)) # Functional Test: Output should be a zip list of tuple zip_dp = source_dp.zip_with_iter( ref_datapipe=ref_dp, key_fn=lambda x: x, ref_key_fn=lambda x: x, keep_key=False, buffer_size=100 ) self.assertEqual([(i, i) for i in range(10)], list(zip_dp)) # Functional Test: keep_key=True, and key should show up as the first element zip_dp_w_key = source_dp.zip_with_iter( ref_datapipe=ref_dp2, key_fn=lambda x: x, ref_key_fn=lambda x: x, keep_key=True, buffer_size=10 ) self.assertEqual([(i, (i, i)) for i in range(10)], list(zip_dp_w_key)) # Functional Test: using a different merge function def merge_to_string(item1, item2): return f"{item1},{item2}" zip_dp_w_str_merge = source_dp.zip_with_iter( ref_datapipe=ref_dp, key_fn=lambda x: x, ref_key_fn=lambda x: x, buffer_size=10, merge_fn=merge_to_string ) self.assertEqual([f"{i},{i}" for i in range(10)], list(zip_dp_w_str_merge)) # Functional Test: using a different merge function and keep_key=True zip_dp_w_key_str_merge = source_dp.zip_with_iter( ref_datapipe=ref_dp, key_fn=lambda x: x, ref_key_fn=lambda x: x, keep_key=True, buffer_size=10, merge_fn=merge_to_string, ) self.assertEqual([(i, f"{i},{i}") for i in range(10)], list(zip_dp_w_key_str_merge)) # Functional Test: testing nested zipping zip_dp = source_dp.zip_with_iter( ref_datapipe=ref_dp, key_fn=lambda x: x, ref_key_fn=lambda x: x, keep_key=False, buffer_size=100 ) # Without a custom merge function, there will be nested tuples zip_dp2 = zip_dp.zip_with_iter( ref_datapipe=ref_dp2, key_fn=lambda x: x[0], ref_key_fn=lambda x: x, keep_key=False, buffer_size=100 ) self.assertEqual([((i, i), i) for i in range(10)], list(zip_dp2)) # With a custom merge function, nesting can be prevented zip_dp2_w_merge = zip_dp.zip_with_iter( ref_datapipe=ref_dp2, key_fn=lambda x: x[0], ref_key_fn=lambda x: x, keep_key=False, buffer_size=100, merge_fn=lambda x, y: list(x) + [y], ) self.assertEqual([[i, i, i] for i in range(10)], list(zip_dp2_w_merge)) # Functional Test: element is in source but missing in reference ref_dp_missing = IterableWrapper(range(1, 10)) zip_dp = source_dp.zip_with_iter( ref_datapipe=ref_dp_missing, key_fn=lambda x: x, ref_key_fn=lambda x: x, keep_key=False, buffer_size=100 ) with self.assertRaisesRegex(BufferError, r"No matching key can be found"): list(zip_dp) # Functional Test: Buffer is not large enough, hence, element can't be found and raises error ref_dp_end = IterableWrapper(list(range(1, 10)) + [0]) zip_dp = source_dp.zip_with_iter( ref_datapipe=ref_dp_end, key_fn=lambda x: x, ref_key_fn=lambda x: x, keep_key=False, buffer_size=5 ) it = iter(zip_dp) with warnings.catch_warnings(record=True) as wa: # In order to find '0' at the end, the buffer is filled, hence the warning # and ref_dp is fully traversed self.assertEqual( ( 0, 0, ), next(it), ) self.assertEqual(len(wa), 1) self.assertRegex(str(wa[0].message), r"Buffer reaches the upper limit") with self.assertRaisesRegex(BufferError, r"No matching key can be found"): # '1' cannot be find because the value was thrown out when buffer was filled next(it) # Functional Test: Buffer is just big enough zip_dp = source_dp.zip_with_iter( ref_datapipe=ref_dp_end, key_fn=lambda x: x, ref_key_fn=lambda x: x, keep_key=False, buffer_size=10 ) self.assertEqual([(i, i) for i in range(10)], list(zip_dp)) # Reset Test: reset the DataPipe after reading part of it zip_dp = IterKeyZipper( source_datapipe=source_dp, ref_datapipe=ref_dp, key_fn=lambda x: x, ref_key_fn=lambda x: x, keep_key=False, buffer_size=10, ) n_elements_before_reset = 5 res_before_reset, res_after_reset = reset_after_n_next_calls(zip_dp, n_elements_before_reset) self.assertEqual([(i, i) for i in range(5)], res_before_reset) self.assertEqual([(i, i) for i in range(10)], res_after_reset) # __len__ Test: inherits length from source_dp self.assertEqual(10, len(zip_dp)) def test_map_key_zipper_datapipe(self) -> None: source_dp = IterableWrapper(range(10)) map_dp = SequenceWrapper(["even", "odd"]) # Functional Test: ensure the hash join is working and return tuple by default def odd_even(i: int) -> int: return i % 2 result_dp = source_dp.zip_with_map(map_dp, odd_even) def odd_even_string(i: int) -> str: return "odd" if i % 2 else "even" expected_res = [(i, odd_even_string(i)) for i in range(10)] self.assertEqual(expected_res, list(result_dp)) # Functional Test: ensure that a custom merge function works def custom_merge(a, b): return f"{a} is a {b} number." result_dp = source_dp.zip_with_map(map_dp, odd_even, custom_merge) expected_res2 = [f"{i} is a {odd_even_string(i)} number." for i in range(10)] self.assertEqual(expected_res2, list(result_dp)) # Functional Test: raises error when key is invalid def odd_even_bug(i: int) -> int: return 2 if i == 0 else i % 2 result_dp = MapKeyZipper(source_dp, map_dp, odd_even_bug) it = iter(result_dp) with self.assertRaisesRegex(KeyError, "is not a valid key in the given MapDataPipe"): next(it) # Functional test: ensure that keep_key option works result_dp = source_dp.zip_with_map(map_dp, odd_even, keep_key=True) expected_res_keep_key = [(key, (i, odd_even_string(i))) for i, key in zip(range(10), [0, 1] * 5)] self.assertEqual(expected_res_keep_key, list(result_dp)) # Reset Test: n_elements_before_reset = 4 result_dp = source_dp.zip_with_map(map_dp, odd_even) res_before_reset, res_after_reset = reset_after_n_next_calls(result_dp, n_elements_before_reset) self.assertEqual(expected_res[:n_elements_before_reset], res_before_reset) self.assertEqual(expected_res, res_after_reset) # __len__ Test: returns the length of source DataPipe result_dp = source_dp.zip_with_map(map_dp, odd_even) self.assertEqual(len(source_dp), len(result_dp)) def test_prefetcher_iterdatapipe(self) -> None: source_dp = IterableWrapper(range(5000)) prefetched_dp = source_dp.prefetch(10) # check if early termination resets child thread properly for _, _ in zip(range(100), prefetched_dp): pass expected = list(source_dp) actual = list(prefetched_dp) self.assertEqual(expected, actual) # __len__ Test: returns the same length as source self.assertEqual(len(source_dp), len(prefetched_dp)) def test_repeater_iterdatapipe(self) -> None: import itertools source_dp = IterableWrapper(range(5)) # Functional Test: repeat for correct number of times repeater_dp = source_dp.repeat(3) self.assertEqual( list(itertools.chain.from_iterable(itertools.repeat(x, 3) for x in range(5))), list(repeater_dp) ) # Functional Test: `times` must be > 1 with self.assertRaisesRegex(ValueError, "The number of repetition must be > 1"): source_dp.repeat(1) # Reset Test: repeater_dp = Repeater(source_dp, times=2) n_elements_before_reset = 4 res_before_reset, res_after_reset = reset_after_n_next_calls(repeater_dp, n_elements_before_reset) self.assertEqual([0, 0, 1, 1], res_before_reset) self.assertEqual(list(itertools.chain.from_iterable(itertools.repeat(x, 2) for x in range(5))), res_after_reset) # __len__ Test: returns correct length self.assertEqual(10, len(repeater_dp)) def test_cycler_iterdatapipe(self) -> None: source_dp = IterableWrapper(range(5)) # Functional Test: cycle for finite number of times and ends cycler_dp = source_dp.cycle(3) self.assertEqual(list(range(5)) * 3, list(cycler_dp)) # Functional Test: cycle for indefinitely cycler_dp = source_dp.cycle() it = iter(cycler_dp) for expected_val in list(range(5)) * 10: self.assertEqual(expected_val, next(it)) # Functional Test: zero is allowed but immediately triggers StopIteration cycler_dp = source_dp.cycle(0) self.assertEqual([], list(cycler_dp)) # Functional Test: negative value is not allowed with self.assertRaisesRegex(ValueError, "Expected non-negative count"): source_dp.cycle(-1) # Reset Test: cycler_dp = Cycler(source_dp, count=2) n_elements_before_reset = 4 res_before_reset, res_after_reset = reset_after_n_next_calls(cycler_dp, n_elements_before_reset) self.assertEqual(list(range(4)), res_before_reset) self.assertEqual(list(range(5)) * 2, res_after_reset) # __len__ Test: returns length when count is not None self.assertEqual(10, len(cycler_dp)) # __len__ Test: inherits length from source_dp cycler_dp = Cycler(source_dp) with self.assertRaisesRegex(TypeError, "instance cycles forever, and therefore doesn't have valid length"): len(cycler_dp) def test_header_iterdatapipe(self) -> None: # Functional Test: ensure the limit is enforced source_dp = IterableWrapper(range(20)) header_dp = source_dp.header(5) self.assertEqual(list(range(5)), list(header_dp)) # Functional Test: ensure it works when the source has less elements than the limit source_dp = IterableWrapper(range(5)) header_dp = source_dp.header(100) self.assertEqual(list(range(5)), list(header_dp)) # Functional Test: ensure the source is not modified if limit is set to None source_dp = IterableWrapper(range(5)) header_dp = source_dp.header(None) self.assertEqual(list(range(5)), list(header_dp)) # Reset Test: source_dp = IterableWrapper(range(20)) header_dp = Header(source_dp, 5) n_elements_before_reset = 2 res_before_reset, res_after_reset = reset_after_n_next_calls(header_dp, n_elements_before_reset) self.assertEqual(list(range(2)), res_before_reset) self.assertEqual(list(range(5)), res_after_reset) self.assertEqual(list(range(5)), list(header_dp)) # __len__ Test: returns the limit when it is less than the length of source self.assertEqual(5, len(header_dp)) # __len__ Test: returns the length of source when it is less than the limit header_dp = source_dp.header(30) self.assertEqual(20, len(header_dp)) # __len__ Test: returns the length of source when limit is set to None header_dp = source_dp.header(None) self.assertEqual(20, len(header_dp)) # __len__ Test: returns limit if source doesn't have length source_dp_NoLen = IDP_NoLen(list(range(20))) header_dp = source_dp_NoLen.header(30) with warnings.catch_warnings(record=True) as wa: self.assertEqual(30, len(header_dp)) self.assertEqual(len(wa), 1) self.assertRegex( str(wa[0].message), r"length of this HeaderIterDataPipe is inferred to be equal to its limit" ) # __len__ Test: raises TypeError if source doesn't have length and limit is set to None header_dp = source_dp_NoLen.header(None) with self.assertRaisesRegex(TypeError, "The length of this HeaderIterDataPipe cannot be determined."): len(header_dp) # __len__ Test: returns limit if source doesn't have length, even when it has been iterated through once header_dp = source_dp_NoLen.header(30) for _ in header_dp: pass self.assertEqual(30, len(header_dp)) def test_enumerator_iterdatapipe(self) -> None: letters = "abcde" source_dp = IterableWrapper(letters) enum_dp = source_dp.enumerate() # Functional Test: ensure that the correct index value is added to each element (tuple) self.assertEqual([(0, "a"), (1, "b"), (2, "c"), (3, "d"), (4, "e")], list(enum_dp)) # Functional Test: start index from non-zero enum_dp = source_dp.enumerate(starting_index=10) self.assertEqual([(10, "a"), (11, "b"), (12, "c"), (13, "d"), (14, "e")], list(enum_dp)) # Reset Test: n_elements_before_reset = 2 res_before_reset, res_after_reset = reset_after_n_next_calls(enum_dp, n_elements_before_reset) self.assertEqual([(10, "a"), (11, "b")], res_before_reset) self.assertEqual([(10, "a"), (11, "b"), (12, "c"), (13, "d"), (14, "e")], res_after_reset) # __len__ Test: returns length of source DataPipe self.assertEqual(5, len(enum_dp)) def test_index_adder_iterdatapipe(self) -> None: letters = "abcdefg" source_dp = IterableWrapper([{i: i} for i in letters]) index_adder_dp = source_dp.add_index() it = iter(index_adder_dp) def dict_content_test_helper(iterator): for i, curr_dict in enumerate(iterator): self.assertEqual(i, curr_dict["index"]) self.assertTrue(letters[i] in curr_dict) # Functional Test: ensure that the correct index value is added to each element (dict) dict_content_test_helper(it) # Functional Test: raises error when the elements of source_dp is not of type Dict source_dp = IterableWrapper(range(10)) index_adder_dp = source_dp.add_index() it = iter(index_adder_dp) with self.assertRaisesRegex(NotImplementedError, "We only support adding index to row or batch in dict type"): next(it) # Reset Test source_dp = IterableWrapper([{i: i} for i in "abcdefg"]) index_adder_dp = IndexAdder(source_dp) n_elements_before_reset = 2 res_before_reset, res_after_reset = reset_after_n_next_calls(index_adder_dp, n_elements_before_reset) dict_content_test_helper(iter(res_before_reset)) dict_content_test_helper(iter(res_after_reset)) # __len__ Test: returns length of source DataPipe self.assertEqual(7, len(index_adder_dp)) def test_line_reader_iterdatapipe(self) -> None: text1 = "Line1\nLine2" text2 = "Line2,1\r\nLine2,2\r\nLine2,3" # Functional Test: read lines correctly source_dp = IterableWrapper([("file1", io.StringIO(text1)), ("file2", io.StringIO(text2))]) line_reader_dp = source_dp.readlines() expected_result = [("file1", line) for line in text1.splitlines()] + [ ("file2", line) for line in text2.splitlines() ] self.assertEqual(expected_result, list(line_reader_dp)) # Functional Test: strip new lines for bytes source_dp = IterableWrapper( [("file1", io.BytesIO(text1.encode("utf-8"))), ("file2", io.BytesIO(text2.encode("utf-8")))] ) line_reader_dp = source_dp.readlines() expected_result_bytes = [("file1", line.encode("utf-8")) for line in text1.splitlines()] + [ ("file2", line.encode("utf-8")) for line in text2.splitlines() ] self.assertEqual(expected_result_bytes, list(line_reader_dp)) # Functional Test: do not strip new lines source_dp = IterableWrapper([("file1", io.StringIO(text1)), ("file2", io.StringIO(text2))]) line_reader_dp = source_dp.readlines(strip_newline=False) expected_result = [ ("file1", "Line1\n"), ("file1", "Line2"), ("file2", "Line2,1\r\n"), ("file2", "Line2,2\r\n"), ("file2", "Line2,3"), ] self.assertEqual(expected_result, list(line_reader_dp)) # Reset Test: source_dp = IterableWrapper([("file1", io.StringIO(text1)), ("file2", io.StringIO(text2))]) line_reader_dp = LineReader(source_dp, strip_newline=False) n_elements_before_reset = 2 res_before_reset, res_after_reset = reset_after_n_next_calls(line_reader_dp, n_elements_before_reset) self.assertEqual(expected_result[:n_elements_before_reset], res_before_reset) self.assertEqual(expected_result, res_after_reset) # __len__ Test: length isn't implemented since it cannot be known ahead of time with self.assertRaisesRegex(TypeError, "has no len"): len(line_reader_dp) def test_paragraph_aggregator_iterdatapipe(self) -> None: # Functional Test: aggregate lines correctly source_dp = IterableWrapper( [("file1", "Line1"), ("file1", "Line2"), ("file2", "Line2,1"), ("file2", "Line2,2"), ("file2", "Line2,3")] ) para_agg_dp = source_dp.lines_to_paragraphs() self.assertEqual([("file1", "Line1\nLine2"), ("file2", "Line2,1\nLine2,2\nLine2,3")], list(para_agg_dp)) # Functional Test: aggregate lines correctly with different joiner para_agg_dp = source_dp.lines_to_paragraphs(joiner=lambda ls: " ".join(ls)) self.assertEqual([("file1", "Line1 Line2"), ("file2", "Line2,1 Line2,2 Line2,3")], list(para_agg_dp)) # Reset Test: each yield is for a single file para_agg_dp = ParagraphAggregator(source_dp) n_elements_before_reset = 1 res_before_reset, res_after_reset = reset_after_n_next_calls(para_agg_dp, n_elements_before_reset) self.assertEqual([("file1", "Line1\nLine2")], res_before_reset) self.assertEqual([("file1", "Line1\nLine2"), ("file2", "Line2,1\nLine2,2\nLine2,3")], res_after_reset) # __len__ Test: length isn't implemented since it cannot be known ahead of time with self.assertRaisesRegex(TypeError, "has no len"): len(para_agg_dp) def test_rows_to_columnar_iterdatapipe(self) -> None: # Functional Test: working with DataPipe with dict column_names_dict = {"a", "b", "c"} source_dp = IterableWrapper( [ [{l: i for i, l in enumerate("abc")}, {l: i * 10 for i, l in enumerate("abc")}], [{l: i + 100 for i, l in enumerate("abc")}, {l: (i + 100) * 10 for i, l in enumerate("abc")}], ] ) result_dp = source_dp.rows2columnar(column_names_dict) batch1 = defaultdict(list, {"a": [0, 0], "b": [1, 10], "c": [2, 20]}) batch2 = defaultdict(list, {"a": [100, 1000], "b": [101, 1010], "c": [102, 1020]}) expected_output = [batch1, batch2] self.assertEqual(expected_output, list(result_dp)) # Functional Test: working with DataPipe with list column_names_list = ["a", "b", "c"] source_dp = IterableWrapper( [ [[i for i, _ in enumerate("abc")], [i * 10 for i, _ in enumerate("abc")]], [[i + 100 for i, _ in enumerate("abc")], [(i + 100) * 10 for i, _ in enumerate("abc")]], ] ) result_dp = source_dp.rows2columnar(column_names_list) self.assertEqual(expected_output, list(result_dp)) # Reset Test: result_dp = Rows2Columnar(source_dp, column_names_list) n_elements_before_reset = 1 res_before_reset, res_after_reset = reset_after_n_next_calls(result_dp, n_elements_before_reset) self.assertEqual([expected_output[0]], res_before_reset) self.assertEqual(expected_output, res_after_reset) # __len__ Test: returns length of source DataPipe self.assertEqual(2, len(result_dp)) def test_sample_multiplexer_iterdatapipe(self) -> None: # Functional Test: yields all values from the sources source_dp1 = IterableWrapper([0] * 10) source_dp2 = IterableWrapper([1] * 10) d: Dict[IterDataPipe, float] = {source_dp1: 99999999, source_dp2: 0.0000001} sample_mul_dp = SampleMultiplexer(pipes_to_weights_dict=d, seed=0) result = list(sample_mul_dp) self.assertEqual([0] * 10 + [1] * 10, result) # Functional Test: raises error for empty dict with self.assertRaisesRegex(ValueError, "Empty dictionary"): SampleMultiplexer(pipes_to_weights_dict={}, seed=0) # type: ignore[arg-type] # Functional Test: raises error for negative or zero weight d = {source_dp1: 99999999, source_dp2: 0} with self.assertRaisesRegex(ValueError, "Expecting a positive and non-zero weight"): SampleMultiplexer(pipes_to_weights_dict=d, seed=0) # Reset Test d = {source_dp1: 99999999, source_dp2: 0.0000001} sample_mul_dp = SampleMultiplexer(pipes_to_weights_dict=d, seed=0) n_elements_before_reset = 5 res_before_reset, res_after_reset = reset_after_n_next_calls(sample_mul_dp, n_elements_before_reset) self.assertEqual([0] * n_elements_before_reset, res_before_reset) self.assertEqual([0] * 10 + [1] * 10, res_after_reset) # __len__ Test: returns the sum of the lengths of the sources self.assertEqual(20, len(sample_mul_dp)) def test_in_batch_shuffler_iterdatapipe(self): input_dp = IterableWrapper(list(range(23))).batch(3) expected = list(input_dp) # Functional Test: No seed shuffler_dp = input_dp.in_batch_shuffle() for exp, res in zip(expected, shuffler_dp): self.assertEqual(sorted(res), exp) # Functional Test: With global seed torch.manual_seed(123) res = list(shuffler_dp) torch.manual_seed(123) self.assertEqual(list(shuffler_dp), res) # Functional Test: Set seed shuffler_dp = input_dp.in_batch_shuffle().set_seed(123) res = list(shuffler_dp) shuffler_dp.set_seed(123) self.assertEqual(list(shuffler_dp), res) # Functional Test: deactivate shuffling via set_shuffle unshuffled_dp = shuffler_dp.set_shuffle(False) self.assertEqual(list(unshuffled_dp), expected) # Reset Test: shuffler_dp = input_dp.in_batch_shuffle() n_elements_before_reset = 5 res_before_reset, res_after_reset = reset_after_n_next_calls(shuffler_dp, n_elements_before_reset) self.assertEqual(5, len(res_before_reset)) for exp, res in zip(expected, res_before_reset): self.assertEqual(sorted(res), exp) for exp, res in zip(expected, res_after_reset): self.assertEqual(sorted(res), exp) # __len__ Test: returns the length of the input DataPipe shuffler_dp = input_dp.in_batch_shuffle() self.assertEqual(8, len(shuffler_dp)) # Serialization Test from torch.utils.data.datapipes._hook_iterator import _SnapshotState shuffler_dp = input_dp.in_batch_shuffle() it = iter(shuffler_dp) for _ in range(2): next(it) shuffler_dp_copy = pickle.loads(pickle.dumps(shuffler_dp)) _simple_graph_snapshot_restoration(shuffler_dp_copy.datapipe, shuffler_dp.datapipe._number_of_samples_yielded) exp = list(it) shuffler_dp_copy._snapshot_state = _SnapshotState.Restored self.assertEqual(exp, list(shuffler_dp_copy)) def test_bucket_batcher_iterdatapipe(self) -> None: source_dp = IterableWrapper(range(10)) # Functional Test: drop last reduces length batch_dp = source_dp.bucketbatch( batch_size=3, drop_last=True, batch_num=100, bucket_num=1, use_in_batch_shuffle=True ) self.assertEqual(9, len(list(batch_dp.unbatch()))) # Functional Test: drop last is False preserves length batch_dp = source_dp.bucketbatch( batch_size=3, drop_last=False, batch_num=100, bucket_num=1, use_in_batch_shuffle=False ) self.assertEqual(10, len(list(batch_dp.unbatch()))) def _return_self(x): return x # Functional Test: using sort_key, with in_batch_shuffle batch_dp = source_dp.bucketbatch( batch_size=3, drop_last=True, batch_num=100, bucket_num=1, use_in_batch_shuffle=True, sort_key=_return_self ) # bucket_num = 1 means there will be no shuffling if a sort key is given self.assertEqual([[0, 1, 2], [3, 4, 5], [6, 7, 8]], list(batch_dp)) self.assertEqual(9, len(list(batch_dp.unbatch()))) # Functional Test: using sort_key, without use_in_batch_shuffle batch_dp = source_dp.bucketbatch( batch_size=3, drop_last=True, batch_num=100, bucket_num=2, use_in_batch_shuffle=False, sort_key=_return_self ) self.assertEqual(9, len(list(batch_dp.unbatch()))) # Reset Test: batch_dp = BucketBatcher( source_dp, batch_size=3, drop_last=True, batch_num=100, bucket_num=2, use_in_batch_shuffle=False, sort_key=_return_self, ) n_elements_before_reset = 2 res_before_reset, res_after_reset = reset_after_n_next_calls(batch_dp, n_elements_before_reset) self.assertEqual(n_elements_before_reset, len(res_before_reset)) self.assertEqual(6, len([item for batch in res_before_reset for item in batch])) self.assertEqual(3, len(res_after_reset)) self.assertEqual(9, len([item for batch in res_after_reset for item in batch])) # __len__ Test: returns the number of batches with self.assertRaises(TypeError): len(batch_dp) def test_max_token_bucketizer_iterdatapipe(self) -> None: source_data = ["1" * d for d in range(1, 6)] + ["2" * d for d in range(1, 6)] source_dp = IterableWrapper(source_data) # Functional Test: Invalid arguments with self.assertRaisesRegex(ValueError, "``min_len`` should be larger than 0"): source_dp.max_token_bucketize(max_token_count=2, min_len=-1) with self.assertRaisesRegex(ValueError, "``min_len`` should be larger than 0"): source_dp.max_token_bucketize(max_token_count=2, min_len=3, max_len=2) with self.assertRaises(ValueError, msg="``max_token_count`` must be equal to or greater than ``max_len``."): source_dp.max_token_bucketize(max_token_count=2, max_len=3) def _validate_batch_size(res, exp_batch_len, len_fn=lambda d: len(d)): self.assertEqual(len(res), len(exp_batch_len)) for batch, exp_token_lens in zip(res, exp_batch_len): self.assertEqual(len(batch), len(exp_token_lens)) for token, exp_token_len in zip(batch, exp_token_lens): self.assertEqual(len_fn(token), exp_token_len) # Functional Test: Filter out min_len batch_dp = source_dp.max_token_bucketize(max_token_count=5, min_len=2, buffer_size=10) exp_batch_len = [(2, 2), (3,), (3,), (4,), (4,), (5,), (5,)] _validate_batch_size(list(batch_dp), exp_batch_len) # Functional Test: Filter out max_len batch_dp = source_dp.max_token_bucketize(max_token_count=5, max_len=4, buffer_size=10) exp_batch_len = [(1, 1, 2), (2, 3), (3,), (4,), (4,)] _validate_batch_size(list(batch_dp), exp_batch_len) def _custom_len_fn(token): return len(token) + 1 # Functional Test: Custom length function batch_dp = source_dp.max_token_bucketize(max_token_count=7, len_fn=_custom_len_fn, buffer_size=10) exp_batch_len = [(1, 1, 2), (2, 3), (3,), (4,), (4,), (5,), (5,)] _validate_batch_size(list(batch_dp), exp_batch_len) # Functional Test: Small buffer batch_dp = source_dp.max_token_bucketize(max_token_count=10, buffer_size=4) exp_batch_len = [(1, 2, 1, 2, 3), (3, 4), (4, 5), (5,)] _validate_batch_size(list(batch_dp), exp_batch_len) # Reset Test: batch_dp = MaxTokenBucketizer(source_dp, max_token_count=5, buffer_size=10) n_elements_before_reset = 2 res_before_reset, res_after_reset = reset_after_n_next_calls(batch_dp, n_elements_before_reset) exp_batch_len_before_reset = [(1, 1, 2), (2, 3)] exp_batch_len_after_reset = [(1, 1, 2), (2, 3), (3,), (4,), (4,), (5,), (5,)] _validate_batch_size(res_before_reset, exp_batch_len_before_reset) _validate_batch_size(res_after_reset, exp_batch_len_after_reset) # Functional test: Padded tokens exceeding max_token_count source_data = ["111", "1111", "11111"] # 3, 4, 5 source_dp = IterableWrapper(source_data) batch_dp = source_dp.max_token_bucketize(max_token_count=7) exp_batch_len = [(3, 4), (5,)] _validate_batch_size(list(batch_dp), exp_batch_len) # Functional test: Padded tokens not exceeding max_token_count source_data = ["111", "111", "111", "1111"] # 3, 3, 3, 4 source_dp = IterableWrapper(source_data) batch_dp = source_dp.max_token_bucketize(max_token_count=7, include_padding=True) exp_batch_len = [(3, 3), (3,), (4,)] _validate_batch_size(list(batch_dp), exp_batch_len) # Functional test: sample length exceeding max_token_count source_data = ["111"] source_dp = IterableWrapper(source_data) batch_dp = source_dp.max_token_bucketize(max_token_count=2) exp_batch = [] self.assertEqual(list(batch_dp), exp_batch) # Functional test: incomparable data for heapq def _custom_len_fn(data): return data["len"] source_data = [{"len": 1}, {"len": 2}, {"len": 1}, {"len": 3}, {"len": 1}] source_dp = IterableWrapper(source_data) batch_dp = source_dp.max_token_bucketize(max_token_count=3, len_fn=_custom_len_fn) exp_batch_len = [(1, 1, 1), (2,), (3,)] _validate_batch_size(list(batch_dp), exp_batch_len, len_fn=_custom_len_fn) # __len__ Test: returns the number of batches with self.assertRaises(TypeError): len(batch_dp) def test_map_batches_iterdatapipe(self): source_dp = IterableWrapper(list(range(20))) def fn(batch): return [d + 1 for d in batch] batch_mapped_dp = source_dp.map_batches(fn, batch_size=9) expected_list = list(range(1, 21)) self.assertEqual(expected_list, list(batch_mapped_dp)) # Reset Test: reset the DataPipe after reading part of it n_elements_before_reset = 5 res_before_reset, res_after_reset = reset_after_n_next_calls(batch_mapped_dp, n_elements_before_reset) self.assertEqual(expected_list[:n_elements_before_reset], res_before_reset) self.assertEqual(expected_list, res_after_reset) # Functional Test: Different sizes between input and output def fn_less(batch): return [batch[idx] // 2 for idx in range(0, len(batch), 2)] less_batch_mapped_dp = source_dp.map_batches(fn_less, batch_size=8) self.assertEqual(list(range(10)), list(less_batch_mapped_dp)) # Functional Test: Specify input_col source_dp = IterableWrapper([(d - 1, d, d + 1) for d in range(20)]) batch_mapped_input_1_dp = source_dp.map_batches(fn, batch_size=9, input_col=0) self.assertEqual(list(range(20)), list(batch_mapped_input_1_dp)) def fn_2_cols(batch): return [(d1, d2 - 1) for d1, d2 in batch] batch_mapped_input_2_dp = source_dp.map_batches(fn_2_cols, batch_size=9, input_col=[1, 2]) self.assertEqual([(d, d) for d in range(20)], list(batch_mapped_input_2_dp)) # __len__ Test: length should be determined by ``fn`` which we can't know with self.assertRaisesRegex(TypeError, "length relies on the output of its function."): len(batch_mapped_dp) def test_flatmap_iterdatapipe(self): source_dp = IterableWrapper(list(range(20))) def fn(e): return [e, e * 10] flatmapped_dp = source_dp.flatmap(fn) expected_list = list(itertools.chain(*[(e, e * 10) for e in source_dp])) self.assertEqual(expected_list, list(flatmapped_dp)) # Funtional Test: Specify input_col tuple_source_dp = IterableWrapper([(d - 1, d, d + 1) for d in range(20)]) # Single input_col input_col_1_dp = tuple_source_dp.flatmap(fn, input_col=1) self.assertEqual(expected_list, list(input_col_1_dp)) # Multiple input_col def mul_fn(a, b): return [a - b, b - a] input_col_2_dp = tuple_source_dp.flatmap(mul_fn, input_col=(0, 2)) self.assertEqual(list(itertools.chain(*[(-2, 2) for _ in range(20)])), list(input_col_2_dp)) # flatmap with no fn specified default_dp = tuple_source_dp.flatmap() self.assertEqual(list(itertools.chain(*[(n - 1, n, n + 1) for n in range(20)])), list(default_dp)) # flatmap with no fn specified, multiple input_col default_dp = tuple_source_dp.flatmap(input_col=(0, 2)) self.assertEqual(list(itertools.chain(*[(n - 1, n + 1) for n in range(20)])), list(default_dp)) # flatmap with no fn specified, some special input tuple_source_dp = IterableWrapper([[1, 2, [3, 4]], [5, 6, [7, 8]]]) default_dp = tuple_source_dp.flatmap(input_col=(0, 2)) self.assertEqual([1, [3, 4], 5, [7, 8]], list(default_dp)) # Reset Test: reset the DataPipe after reading part of it n_elements_before_reset = 5 res_before_reset, res_after_reset = reset_after_n_next_calls(flatmapped_dp, n_elements_before_reset) self.assertEqual(expected_list[:n_elements_before_reset], res_before_reset) self.assertEqual(expected_list, res_after_reset) # __len__ Test: length should be len(source_dp)*len(fn->out_shape) which we can't know with self.assertRaisesRegex(TypeError, "length relies on the output of its function."): len(flatmapped_dp) def test_shuffled_flatmap_iterdatapipe(self): source_dp = IterableWrapper(list(range(20))) def fn(e): return [e, e * 10] # Tests with buffer_size=1 # In this case, the expected behavior is similar to flatmap shuffled_flatmapped_dp = source_dp.shuffled_flatmap(fn, buffer_size=1) expected_list = list(itertools.chain(*[(e, e * 10) for e in source_dp])) self.assertEqual(expected_list, list(shuffled_flatmapped_dp)) # Funtional Test: Specify input_col tuple_source_dp = IterableWrapper([(d - 1, d, d + 1) for d in range(20)]) # Single input_col input_col_1_dp = tuple_source_dp.shuffled_flatmap(fn, input_col=1, buffer_size=1) self.assertEqual(expected_list, list(input_col_1_dp)) # With generator as fn def gen_fn(e): yield e yield e * 10 shuffled_flatmapped_dp = source_dp.shuffled_flatmap(gen_fn, buffer_size=1) expected_list = list(itertools.chain(*[(e, e * 10) for e in source_dp])) self.assertEqual(expected_list, list(shuffled_flatmapped_dp)) # Multiple input_col def mul_fn(a, b): return [a - b, b - a] input_col_2_dp = tuple_source_dp.shuffled_flatmap(mul_fn, input_col=(0, 2), buffer_size=1) self.assertEqual(list(itertools.chain(*[(-2, 2) for _ in range(20)])), list(input_col_2_dp)) # shuffled_flatmap with no fn specified default_dp = tuple_source_dp.shuffled_flatmap(buffer_size=1) self.assertEqual(list(itertools.chain(*[(n - 1, n, n + 1) for n in range(20)])), list(default_dp)) # shuffled_flatmap with no fn specified, multiple input_col default_dp = tuple_source_dp.shuffled_flatmap(input_col=(0, 2), buffer_size=1) self.assertEqual(list(itertools.chain(*[(n - 1, n + 1) for n in range(20)])), list(default_dp)) # shuffled_flatmap with no fn specified, some special input tuple_source_dp = IterableWrapper([[1, 2, [3, 4]], [5, 6, [7, 8]]]) default_dp = tuple_source_dp.shuffled_flatmap(input_col=(0, 2), buffer_size=1) self.assertEqual([1, [3, 4], 5, [7, 8]], list(default_dp)) # Reset Test: reset the DataPipe after reading part of it n_elements_before_reset = 5 res_before_reset, res_after_reset = reset_after_n_next_calls(shuffled_flatmapped_dp, n_elements_before_reset) self.assertEqual(expected_list[:n_elements_before_reset], res_before_reset) self.assertEqual(expected_list, res_after_reset) # __len__ Test: length should be len(source_dp)*len(fn->out_shape) which we can't know with self.assertRaisesRegex(TypeError, "length relies on the output of its function."): len(shuffled_flatmapped_dp) # __len__ when no fn specified: dp = IterableWrapper([[1, 2], [], [3], [4, 5, 6, [7, 8]]]) dp = dp.shuffled_flatmap() self.assertEqual(len(dp), 7) # Tests with .set_shuffle(False) # In this case, the expected behavior is similar to flatmap shuffled_flatmapped_dp = source_dp.shuffled_flatmap(fn).set_shuffle(False) expected_list = list(itertools.chain(*[(e, e * 10) for e in source_dp])) self.assertEqual(expected_list, list(shuffled_flatmapped_dp)) # Funtional Test: Specify input_col tuple_source_dp = IterableWrapper([(d - 1, d, d + 1) for d in range(20)]) # Single input_col input_col_1_dp = tuple_source_dp.shuffled_flatmap(fn, input_col=1, buffer_size=1) self.assertEqual(expected_list, list(input_col_1_dp)) # Multiple input_col input_col_2_dp = tuple_source_dp.shuffled_flatmap(mul_fn, input_col=(0, 2)).set_shuffle(False) self.assertEqual(list(itertools.chain(*[(-2, 2) for _ in range(20)])), list(input_col_2_dp)) # shuffled_flatmap with no fn specified default_dp = tuple_source_dp.shuffled_flatmap().set_shuffle(False) self.assertEqual(list(itertools.chain(*[(n - 1, n, n + 1) for n in range(20)])), list(default_dp)) # shuffled_flatmap with no fn specified, multiple input_col default_dp = tuple_source_dp.shuffled_flatmap(input_col=(0, 2)).set_shuffle(False) self.assertEqual(list(itertools.chain(*[(n - 1, n + 1) for n in range(20)])), list(default_dp)) # shuffled_flatmap with no fn specified, some special input tuple_source_dp = IterableWrapper([[1, 2, [3, 4]], [5, 6, [7, 8]]]) default_dp = tuple_source_dp.shuffled_flatmap(input_col=(0, 2)).set_shuffle(False) self.assertEqual([1, [3, 4], 5, [7, 8]], list(default_dp)) # Reset Test: reset the DataPipe after reading part of it n_elements_before_reset = 5 res_before_reset, res_after_reset = reset_after_n_next_calls(shuffled_flatmapped_dp, n_elements_before_reset) self.assertEqual(expected_list[:n_elements_before_reset], res_before_reset) self.assertEqual(expected_list, res_after_reset) # Other tests # Test no empty buffers: with self.assertRaises(AssertionError): _ = source_dp.shuffled_flatmap(buffer_size=0) # Functional Test: No seed consecutive_tuple_source_dp = IterableWrapper([(d, d + 1, d + 2) for d in range(0, 21, 3)]) shuffled_flatmapped_dp = consecutive_tuple_source_dp.shuffled_flatmap() self.assertEqual(set(range(21)), set(shuffled_flatmapped_dp)) # Functional Test: With global seed torch.manual_seed(123) shuffled_flatmapped_dp = tuple_source_dp.shuffled_flatmap() res = list(shuffled_flatmapped_dp) torch.manual_seed(123) self.assertEqual(list(shuffled_flatmapped_dp), res) # Functional Test: Set seed shuffled_flatmapped_dp = tuple_source_dp.shuffled_flatmap().set_seed(123) res = list(shuffled_flatmapped_dp) shuffled_flatmapped_dp.set_seed(123) self.assertEqual(list(shuffled_flatmapped_dp), res) # Reset Test: shuffled_flatmapped_dp = tuple_source_dp.shuffled_flatmap() n_elements_before_reset = 5 res_before_reset, res_after_reset = reset_after_n_next_calls(shuffled_flatmapped_dp, n_elements_before_reset) self.assertEqual(5, len(res_before_reset)) def test_round_robin_demux_iterdatapipe(self): source_dp = IterableWrapper(list(range(23))) with self.assertRaisesRegex(ValueError, "Expected `num_instaces`"): _ = source_dp.round_robin_demux(0) # Funtional Test dp1, dp2, dp3 = source_dp.round_robin_demux(3) self.assertEqual(list(range(0, 23, 3)), list(dp1)) self.assertEqual(list(range(1, 23, 3)), list(dp2)) self.assertEqual(list(range(2, 23, 3)), list(dp3)) # __len__ Test self.assertEqual(len(dp1), 8) self.assertEqual(len(dp2), 8) self.assertEqual(len(dp3), 7) def test_unzipper_iterdatapipe(self): source_dp = IterableWrapper([(i, i + 10, i + 20) for i in range(10)]) # Functional Test: unzips each sequence, no `sequence_length` specified dp1, dp2, dp3 = UnZipper(source_dp, sequence_length=3) self.assertEqual(list(range(10)), list(dp1)) self.assertEqual(list(range(10, 20)), list(dp2)) self.assertEqual(list(range(20, 30)), list(dp3)) # Functional Test: unzips each sequence, with `sequence_length` specified dp1, dp2, dp3 = source_dp.unzip(sequence_length=3) self.assertEqual(list(range(10)), list(dp1)) self.assertEqual(list(range(10, 20)), list(dp2)) self.assertEqual(list(range(20, 30)), list(dp3)) # Functional Test: skipping over specified values dp2, dp3 = source_dp.unzip(sequence_length=3, columns_to_skip=[0]) self.assertEqual(list(range(10, 20)), list(dp2)) self.assertEqual(list(range(20, 30)), list(dp3)) (dp2,) = source_dp.unzip(sequence_length=3, columns_to_skip=[0, 2], buffer_size=0) self.assertEqual(list(range(10, 20)), list(dp2)) source_dp = IterableWrapper([(i, i + 10, i + 20, i + 30) for i in range(10)]) dp2, dp3 = source_dp.unzip(sequence_length=4, columns_to_skip=[0, 3]) self.assertEqual(list(range(10, 20)), list(dp2)) self.assertEqual(list(range(20, 30)), list(dp3)) # Functional Test: one child DataPipe yields all value first, but buffer_size = 5 being too small, raises error source_dp = IterableWrapper([(i, i + 10) for i in range(10)]) dp1, dp2 = source_dp.unzip(sequence_length=2, buffer_size=4) it1 = iter(dp1) for _ in range(4): next(it1) with self.assertRaises(BufferError): next(it1) with self.assertRaises(BufferError): list(dp2) dp1, dp2 = source_dp.unzip(sequence_length=2, buffer_size=4) with self.assertRaises(BufferError): list(dp2) # Reset Test: DataPipe resets when a new iterator is created, even if this datapipe hasn't been read dp1, dp2 = source_dp.unzip(sequence_length=2) _ = iter(dp1) output2 = [] with self.assertRaisesRegex(RuntimeError, r"iterator has been invalidated"): for i, n2 in enumerate(dp2): output2.append(n2) if i == 4: _ = iter(dp1) # This will reset all child DataPipes self.assertEqual(list(range(10, 15)), output2) # Reset Test: DataPipe reset when some of it have been read dp1, dp2 = source_dp.unzip(sequence_length=2) output1, output2 = [], [] for i, (n1, n2) in enumerate(zip(dp1, dp2)): output1.append(n1) output2.append(n2) if i == 4: with warnings.catch_warnings(record=True) as wa: _ = iter(dp1) # Reset both all child DataPipe self.assertEqual(len(wa), 1) self.assertRegex(str(wa[0].message), r"Some child DataPipes are not exhausted") break for n1, n2 in zip(dp1, dp2): output1.append(n1) output2.append(n2) self.assertEqual(list(range(5)) + list(range(10)), output1) self.assertEqual(list(range(10, 15)) + list(range(10, 20)), output2) # Reset Test: DataPipe reset, even when some other child DataPipes are not read source_dp = IterableWrapper([(i, i + 10, i + 20) for i in range(10)]) dp1, dp2, dp3 = source_dp.unzip(sequence_length=3) output1, output2 = list(dp1), list(dp2) self.assertEqual(list(range(10)), output1) self.assertEqual(list(range(10, 20)), output2) with warnings.catch_warnings(record=True) as wa: self.assertEqual(list(range(10)), list(dp1)) # Resets even though dp3 has not been read self.assertEqual(len(wa), 1) self.assertRegex(str(wa[0].message), r"Some child DataPipes are not exhausted") output3 = [] for i, n3 in enumerate(dp3): output3.append(n3) if i == 4: with warnings.catch_warnings(record=True) as wa: output1 = list(dp1) # Resets even though dp3 is only partially read self.assertEqual(len(wa), 1) self.assertRegex(str(wa[0].message), r"Some child DataPipes are not exhausted") self.assertEqual(list(range(20, 25)), output3) self.assertEqual(list(range(10)), output1) break self.assertEqual(list(range(20, 30)), list(dp3)) # dp3 has to read from the start again # __len__ Test: Each DataPipe inherits the source datapipe's length dp1, dp2, dp3 = source_dp.unzip(sequence_length=3) self.assertEqual(len(source_dp), len(dp1)) self.assertEqual(len(source_dp), len(dp2)) self.assertEqual(len(source_dp), len(dp3)) def test_itertomap_mapdatapipe(self): # Functional Test with None key_value_fn values = list(range(10)) keys = ["k" + str(i) for i in range(10)] source_dp = IterableWrapper(list(zip(keys, values))) map_dp = source_dp.to_map_datapipe() self.assertTrue(isinstance(map_dp, MapDataPipe)) # Lazy loading self.assertTrue(map_dp._map is None) # __len__ Test: Each DataPipe inherits the source datapipe's length self.assertEqual(len(map_dp), 10) # Functional Test self.assertEqual(list(range(10)), [map_dp["k" + str(idx)] for idx in range(10)]) self.assertFalse(map_dp._map is None) source_dp = IterableWrapper(range(10)) # TypeError test for invalid data type map_dp = source_dp.to_map_datapipe() with self.assertRaisesRegex(TypeError, "Cannot convert dictionary update element"): _ = list(map_dp) # ValueError test for wrong length map_dp = source_dp.to_map_datapipe(lambda d: (d,)) with self.assertRaisesRegex(ValueError, "dictionary update sequence element has length"): _ = list(map_dp) # Functional Test with key_value_fn map_dp = source_dp.to_map_datapipe(lambda d: ("k" + str(d), d + 1)) self.assertEqual(list(range(1, 11)), [map_dp["k" + str(idx)] for idx in range(10)]) self.assertFalse(map_dp._map is None) # No __len__ from prior DataPipe no_len_dp = source_dp.filter(lambda x: x % 2 == 0) map_dp = no_len_dp.to_map_datapipe(lambda x: (x, x + 2)) with warnings.catch_warnings(record=True) as wa: length = len(map_dp) self.assertEqual(length, 5) self.assertEqual(len(wa), 1) self.assertRegex(str(wa[0].message), r"Data from prior DataPipe") # Duplicate Key Test dup_map_dp = source_dp.to_map_datapipe(lambda x: (x % 1, x)) with warnings.catch_warnings(record=True) as wa: dup_map_dp._load_map() self.assertEqual(len(wa), 1) self.assertRegex(str(wa[0].message), r"Found duplicate key") def test_mux_longest_iterdatapipe(self): # Functional Test: Elements are yielded one at a time from each DataPipe, until they are all exhausted input_dp1 = IterableWrapper(range(4)) input_dp2 = IterableWrapper(range(4, 8)) input_dp3 = IterableWrapper(range(8, 12)) output_dp = input_dp1.mux_longest(input_dp2, input_dp3) expected_output = [0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11] self.assertEqual(len(expected_output), len(output_dp)) self.assertEqual(expected_output, list(output_dp)) # Functional Test: Uneven input Data Pipes input_dp1 = IterableWrapper([1, 2, 3, 4]) input_dp2 = IterableWrapper([10]) input_dp3 = IterableWrapper([100, 200, 300]) output_dp = input_dp1.mux_longest(input_dp2, input_dp3) expected_output = [1, 10, 100, 2, 200, 3, 300, 4] self.assertEqual(len(expected_output), len(output_dp)) self.assertEqual(expected_output, list(output_dp)) # Functional Test: Empty Data Pipe input_dp1 = IterableWrapper([0, 1, 2, 3]) input_dp2 = IterableWrapper([]) output_dp = input_dp1.mux_longest(input_dp2) self.assertEqual(len(input_dp1), len(output_dp)) self.assertEqual(list(input_dp1), list(output_dp)) # __len__ Test: raises TypeError when __len__ is called and an input doesn't have __len__ input_dp1 = IterableWrapper(range(10)) input_dp_no_len = IDP_NoLen(range(10)) output_dp = input_dp1.mux_longest(input_dp_no_len) with self.assertRaises(TypeError): len(output_dp) def test_shard_expand(self): # Functional Test: ensure expansion generates the right outputs def testexpand(s): stage1 = IterableWrapper([s]) stage2 = ShardExpander(stage1) return list(iter(stage2)) def myexpand(lo, hi, fmt): return [fmt.format(i) for i in range(lo, hi)] self.assertEqual(testexpand("ds-{000000..000009}.tar"), myexpand(0, 10, "ds-{:06d}.tar")) self.assertEqual(testexpand("{0..9}"), myexpand(0, 10, "{}")) self.assertEqual(testexpand("{0..999}"), myexpand(0, 1000, "{}")) self.assertEqual(testexpand("{123..999}"), myexpand(123, 1000, "{}")) self.assertEqual(testexpand("{000..999}"), myexpand(0, 1000, "{:03d}")) with self.assertRaisesRegex(ValueError, r"must not start with 0"): testexpand("{01..999}") with self.assertRaisesRegex(ValueError, r"must be shorter"): testexpand("{0000..999}") with self.assertRaisesRegex(ValueError, r"bad range"): testexpand("{999..123}") self.assertEqual(testexpand("{0..1}{0..1}"), "00 01 10 11".split()) def test_combining_infinite_iterdatapipe(self): r""" Test combining DataPipe can properly exit at the end of iteration with an infinite DataPipe as the input. """ def _get_dp(length=10): source_dp = IterableWrapper(list(range(length))) inf_dp = IterableWrapper(list(range(length))).cycle() return source_dp, inf_dp # zip noinf_dp, inf_dp = _get_dp(10) dp = inf_dp.zip(noinf_dp) res = list(dp) self.assertEqual(res, [(i, i) for i in range(10)]) # mux noinf_dp, inf_dp = _get_dp(10) dp = inf_dp.mux(noinf_dp) res = list(dp) self.assertEqual(res, [i for i in range(10) for _ in range(2)]) # zip_with_iter noinf_dp, inf_dp = _get_dp(10) dp = noinf_dp.zip_with_iter(inf_dp, key_fn=lambda x: x) res = list(dp) self.assertEqual(res, [(i, i) for i in range(10)]) def test_zip_longest_iterdatapipe(self): # Functional Test: raises TypeError when an input is not of type `IterDataPipe` with self.assertRaises(TypeError): input_dp1 = IterableWrapper(range(10)) input_no_dp = list(range(10)) output_dp = input_dp1.zip_longest(input_no_dp) # type: ignore[arg-type] # Functional Test: raises TypeError when an input does not have valid length input_dp1 = IterableWrapper(range(10)) input_dp_no_len = IDP_NoLen(range(5)) output_dp = input_dp1.zip_longest(input_dp_no_len) with self.assertRaisesRegex(TypeError, r"instance doesn't have valid length$"): len(output_dp) # Functional Test: zips the results properly even when lengths are different # (zips to the longest, filling missing values with default value None.) input_dp1 = IterableWrapper(range(10)) input_dp2 = IterableWrapper(range(5)) output_dp = input_dp1.zip_longest(input_dp2) exp = [(i, i) for i in range(5)] + [(i, None) for i in range(5, 10)] self.assertEqual(list(output_dp), exp) # Functional Test: zips the results properly even when lengths are different # (zips to the longest, filling missing values with user input) input_dp1 = IterableWrapper(range(10)) input_dp2 = IterableWrapper(range(5)) output_dp = input_dp1.zip_longest(input_dp2, fill_value=-1) exp = [(i, i) for i in range(5)] + [(i, -1) for i in range(5, 10)] self.assertEqual(list(output_dp), exp) # __len__ Test: length matches the length of the shortest input self.assertEqual(len(output_dp), 10) def test_drop_iterdatapipe(self): # tuple tests input_dp = IterableWrapper([(0, 1, 2), (3, 4, 5), (6, 7, 8)]) # Functional Test: single index drop for tuple elements drop_dp = input_dp.drop(1) self.assertEqual([(0, 2), (3, 5), (6, 8)], list(drop_dp)) # Functional Test: multiple indices drop for tuple elements drop_dp = input_dp.drop([0, 2]) self.assertEqual([(1,), (4,), (7,)], list(drop_dp)) # dict tests input_dp = IterableWrapper([{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4, "c": 5}, {"a": 5, "b": 6, "c": 7}]) # Functional Test: single key drop for dict elements drop_dp = input_dp.drop("a") self.assertEqual([{"b": 2, "c": 3}, {"b": 4, "c": 5}, {"b": 6, "c": 7}], list(drop_dp)) # Functional Test: multiple key drop for dict elements drop_dp = input_dp.drop(["a", "b"]) self.assertEqual([{"c": 3}, {"c": 5}, {"c": 7}], list(drop_dp)) # list tests input_dp = IterableWrapper([[0, 1, 2], [3, 4, 5], [6, 7, 8]]) # Functional Test: single key drop for list elements drop_dp = input_dp.drop(2) self.assertEqual([[0, 1], [3, 4], [6, 7]], list(drop_dp)) # Functional Test: multiple key drop for list elements drop_dp = input_dp.drop([0, 1]) self.assertEqual([[2], [5], [8]], list(drop_dp)) # Reset Test: n_elements_before_reset = 2 input_dp = IterableWrapper([[0, 1, 2], [3, 4, 5], [6, 7, 8]]) drop_dp = input_dp.drop([0, 1]) expected_res = [[2], [5], [8]] res_before_reset, res_after_reset = reset_after_n_next_calls(drop_dp, n_elements_before_reset) self.assertEqual(expected_res[:n_elements_before_reset], res_before_reset) self.assertEqual(expected_res, res_after_reset) # __len__ Test: input_dp = IterableWrapper([[0, 1, 2], [3, 4, 5], [6, 7, 8]]) drop_dp = input_dp.drop([0, 1]) self.assertEqual(3, len(drop_dp)) def test_slice_iterdatapipe(self): # tuple tests input_dp = IterableWrapper([(0, 1, 2), (3, 4, 5), (6, 7, 8)]) # Functional Test: slice with no stop and no step for tuple slice_dp = input_dp.slice(1) self.assertEqual([(1, 2), (4, 5), (7, 8)], list(slice_dp)) # Functional Test: slice with no step for tuple slice_dp = input_dp.slice(0, 2) self.assertEqual([(0, 1), (3, 4), (6, 7)], list(slice_dp)) # Functional Test: slice with step for tuple slice_dp = input_dp.slice(0, 2, 2) self.assertEqual([(0,), (3,), (6,)], list(slice_dp)) # Functional Test: slice with list of indices for tuple slice_dp = input_dp.slice([0, 1]) self.assertEqual([(0, 1), (3, 4), (6, 7)], list(slice_dp)) # list tests input_dp = IterableWrapper([[0, 1, 2], [3, 4, 5], [6, 7, 8]]) # Functional Test: slice with no stop and no step for list slice_dp = input_dp.slice(1) self.assertEqual([[1, 2], [4, 5], [7, 8]], list(slice_dp)) # Functional Test: slice with no step for list slice_dp = input_dp.slice(0, 2) self.assertEqual([[0, 1], [3, 4], [6, 7]], list(slice_dp)) # Functional Test: slice with list of indices for list slice_dp = input_dp.slice(0, 2) self.assertEqual([[0, 1], [3, 4], [6, 7]], list(slice_dp)) # dict tests input_dp = IterableWrapper([{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4, "c": 5}, {"a": 5, "b": 6, "c": 7}]) # Functional Test: slice with key for dict slice_dp = input_dp.slice("a") self.assertEqual([{"a": 1}, {"a": 3}, {"a": 5}], list(slice_dp)) # Functional Test: slice with list of keys for dict slice_dp = input_dp.slice(["a", "b"]) self.assertEqual([{"a": 1, "b": 2}, {"a": 3, "b": 4}, {"a": 5, "b": 6}], list(slice_dp)) # __len__ Test: input_dp = IterableWrapper([[0, 1, 2], [3, 4, 5], [6, 7, 8]]) slice_dp = input_dp.slice(0, 2) self.assertEqual(3, len(slice_dp)) # Reset Test: n_elements_before_reset = 2 input_dp = IterableWrapper([[0, 1, 2], [3, 4, 5], [6, 7, 8]]) slice_dp = input_dp.slice([2]) expected_res = [[2], [5], [8]] res_before_reset, res_after_reset = reset_after_n_next_calls(slice_dp, n_elements_before_reset) self.assertEqual(expected_res[:n_elements_before_reset], res_before_reset) self.assertEqual(expected_res, res_after_reset) def test_flatten_iterdatapipe(self): # tuple tests # Functional Test: flatten for an index input_dp = IterableWrapper([(0, 1, (2, 3)), (4, 5, (6, 7)), (8, 9, (10, 11))]) flatten_dp = input_dp.flatten(2) self.assertEqual([(0, 1, 2, 3), (4, 5, 6, 7), (8, 9, 10, 11)], list(flatten_dp)) # Functional Test: flatten for list of indices input_dp = IterableWrapper([((0, 10), 1, (2, 3)), ((4, 14), 5, (6, 7)), ((8, 18), 9, (10, 11))]) flatten_dp = input_dp.flatten([0, 2]) self.assertEqual([(0, 10, 1, 2, 3), (4, 14, 5, 6, 7), (8, 18, 9, 10, 11)], list(flatten_dp)) # Functional Test: flatten all iters in the datapipe one level (no argument) input_dp = IterableWrapper([(0, (1, 2)), (3, (4, 5)), (6, (7, 8))]) flatten_dp = input_dp.flatten() self.assertEqual([(0, 1, 2), (3, 4, 5), (6, 7, 8)], list(flatten_dp)) # list tests # Functional Test: flatten for an index input_dp = IterableWrapper([[0, 1, [2, 3]], [4, 5, [6, 7]], [8, 9, [10, 11]]]) flatten_dp = input_dp.flatten(2) self.assertEqual([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]], list(flatten_dp)) # Functional Test: flatten for list of indices input_dp = IterableWrapper([[[0, 10], 1, [2, 3]], [[4, 14], 5, [6, 7]], [[8, 18], 9, [10, 11]]]) flatten_dp = input_dp.flatten([0, 2]) self.assertEqual([[0, 10, 1, 2, 3], [4, 14, 5, 6, 7], [8, 18, 9, 10, 11]], list(flatten_dp)) # Functional Test: flatten all iters in the datapipe one level (no argument) input_dp = IterableWrapper([[0, [1, 2]], [3, [4, 5]], [6, [7, 8]]]) flatten_dp = input_dp.flatten() self.assertEqual([[0, 1, 2], [3, 4, 5], [6, 7, 8]], list(flatten_dp)) # Functional Test: string test, flatten all iters in the datapipe one level (no argument) input_dp = IterableWrapper([["zero", ["one", "2"]], ["3", ["4", "5"]], ["6", ["7", "8"]]]) flatten_dp = input_dp.flatten() self.assertEqual([["zero", "one", "2"], ["3", "4", "5"], ["6", "7", "8"]], list(flatten_dp)) # dict tests # Functional Test: flatten for an index input_dp = IterableWrapper([{"a": 1, "b": 2, "c": {"d": 3, "e": 4}}, {"a": 5, "b": 6, "c": {"d": 7, "e": 8}}]) flatten_dp = input_dp.flatten("c") self.assertEqual([{"a": 1, "b": 2, "d": 3, "e": 4}, {"a": 5, "b": 6, "d": 7, "e": 8}], list(flatten_dp)) # Functional Test: flatten for an index already flat input_dp = IterableWrapper([{"a": 1, "b": 2, "c": {"d": 9, "e": 10}}, {"a": 5, "b": 6, "c": {"d": 7, "e": 8}}]) flatten_dp = input_dp.flatten("a") self.assertEqual( [{"a": 1, "b": 2, "c": {"d": 9, "e": 10}}, {"a": 5, "b": 6, "c": {"d": 7, "e": 8}}], list(flatten_dp) ) # Functional Test: flatten for list of indices input_dp = IterableWrapper( [ {"a": {"f": 10, "g": 11}, "b": 2, "c": {"d": 3, "e": 4}}, {"a": {"f": 10, "g": 11}, "b": 6, "c": {"d": 7, "e": 8}}, ] ) flatten_dp = input_dp.flatten(["a", "c"]) self.assertEqual( [{"f": 10, "g": 11, "b": 2, "d": 3, "e": 4}, {"f": 10, "g": 11, "b": 6, "d": 7, "e": 8}], list(flatten_dp) ) # Functional Test: flatten all iters in the datapipe one level (no argument) input_dp = IterableWrapper([{"a": 1, "b": 2, "c": {"d": 3, "e": 4}}, {"a": 5, "b": 6, "c": {"d": 7, "e": 8}}]) flatten_dp = input_dp.flatten() self.assertEqual([{"a": 1, "b": 2, "d": 3, "e": 4}, {"a": 5, "b": 6, "d": 7, "e": 8}], list(flatten_dp)) # Functional Test: flatten all iters one level, multiple iters input_dp = IterableWrapper( [ {"a": {"f": 10, "g": 11}, "b": 2, "c": {"d": 3, "e": 4}}, {"a": {"f": 10, "g": 11}, "b": 6, "c": {"d": 7, "e": 8}}, ] ) flatten_dp = input_dp.flatten() self.assertEqual( [{"f": 10, "g": 11, "b": 2, "d": 3, "e": 4}, {"f": 10, "g": 11, "b": 6, "d": 7, "e": 8}], list(flatten_dp) ) # __len__ Test: input_dp = IterableWrapper([(0, 1, (2, 3)), (4, 5, (6, 7)), (8, 9, (10, 11))]) flatten_dp = input_dp.flatten(2) self.assertEqual(3, len(flatten_dp)) # Reset Test: n_elements_before_reset = 2 input_dp = IterableWrapper([(0, 1, (2, 3)), (4, 5, (6, 7)), (8, 9, (10, 11))]) flatten_dp = input_dp.flatten(2) expected_res = [(0, 1, 2, 3), (4, 5, 6, 7), (8, 9, 10, 11)] res_before_reset, res_after_reset = reset_after_n_next_calls(flatten_dp, n_elements_before_reset) self.assertEqual(expected_res[:n_elements_before_reset], res_before_reset) self.assertEqual(expected_res, res_after_reset) def test_length_setter_iterdatapipe(self): input_dp = IterableWrapper(range(10)) # Functional Test: Setting length doesn't change the content of the DataPipe dp: IterDataPipe = input_dp.set_length(3) self.assertEqual(list(range(10)), list(dp)) with self.assertRaises(AssertionError): input_dp.set_length(-1) # __len__ Test: Length is as specified and propagates through dp = input_dp.set_length(3).map(lambda x: x + 1) self.assertEqual(3, len(dp)) # Reset Test: n_elements_before_reset = 2 dp = input_dp.set_length(3) expected_res = list(range(10)) res_before_reset, res_after_reset = reset_after_n_next_calls(dp, n_elements_before_reset) self.assertEqual(expected_res[:n_elements_before_reset], res_before_reset) self.assertEqual(expected_res, res_after_reset) def test_random_splitter_iterdatapipe(self): n_epoch = 2 # Functional Test: Split results are the same across epochs dp = IterableWrapper(range(10)) train, valid = dp.random_split(total_length=10, weights={"train": 0.5, "valid": 0.5}, seed=0) results = [] for _ in range(n_epoch): res = list(train) self.assertEqual(5, len(res)) results.append(res) self.assertEqual(results[0], results[1]) valid_res = list(valid) self.assertEqual(5, len(valid_res)) self.assertEqual(list(range(10)), sorted(results[0] + valid_res)) # Functional Test: lengths can be known in advance because it splits evenly into integers. self.assertEqual(5, len(train)) self.assertEqual(5, len(valid)) # Functional Test: DataPipe can split into 3 DataPipes, and infer `total_length` when not given dp = IterableWrapper(range(10)) train, valid, test = dp.random_split(weights={"train": 0.6, "valid": 0.2, "test": 0.2}, seed=0) results = [] for _ in range(n_epoch): res = list(train) self.assertEqual(6, len(res)) results.append(res) self.assertEqual(results[0], results[1]) valid_res = list(valid) self.assertEqual(2, len(valid_res)) test_res = list(test) self.assertEqual(2, len(test_res)) self.assertEqual(list(range(10)), sorted(results[0] + valid_res + test_res)) # Functional Test: lengths can be known in advance because it splits evenly into integers. self.assertEqual(6, len(train)) self.assertEqual(2, len(valid)) self.assertEqual(2, len(test)) # Functional Test: Split can work even when weights do not split evenly into integers. dp = IterableWrapper(range(13)) train, valid, test = dp.random_split(weights={"train": 0.6, "valid": 0.2, "test": 0.2}, seed=0) res = list(train) + list(valid) + list(test) self.assertEqual(list(range(13)), sorted(res)) # Functional Test: lengths can be known in advance because it splits evenly into integers. with self.assertRaisesRegex(TypeError, "Lengths of the split cannot be known in advance"): len(train) # Functional Test: Error when `total_length` cannot be inferred nolen_dp = IDP_NoLen(range(10)) with self.assertRaisesRegex(TypeError, "needs `total_length`"): _, __ = nolen_dp.random_split(weights={"train": 0.5, "valid": 0.5}, seed=0) # type: ignore[call-arg] # Functional Test: `target` must match a key in the `weights` dict dp = IterableWrapper(range(10)) with self.assertRaisesRegex(KeyError, "does not match any key"): _ = dp.random_split( total_length=10, weights={"train": 0.5, "valid": 0.2, "test": 0.2}, seed=0, target="NOTINDICT" ) # Functional Test: `target` is specified, and match the results from before dp = IterableWrapper(range(10)) train = dp.random_split( total_length=10, weights={"train": 0.6, "valid": 0.2, "test": 0.2}, seed=0, target="train" ) results2 = [] for _ in range(n_epoch): res = list(train) self.assertEqual(6, len(res)) results2.append(res) self.assertEqual(results2[0], results2[1]) self.assertEqual(results, results2) # Functional Test: `override_seed` works and change split result train.override_seed(1) seed_1_res = list(train) self.assertNotEqual(results2[0], seed_1_res) # Functional Test: `override_seed` doesn't impact the current iteration, only the next one temp_res = [] for i, x in enumerate(train): temp_res.append(x) if i == 3: train.override_seed(0) self.assertEqual(seed_1_res, temp_res) # The current iteration should equal seed 1 result self.assertEqual(results2[0], list(train)) # The next iteration should equal seed 0 result # Functional Test: Raise exception if both children are used at the same time dp = IterableWrapper(range(10)) train, valid = dp.random_split(total_length=10, weights={"train": 0.5, "valid": 0.5}, seed=0) it_train = iter(train) next(it_train) it_valid = iter(valid) # This resets the DataPipe and invalidates the other iterator next(it_valid) with self.assertRaisesRegex(RuntimeError, "iterator has been invalidated"): next(it_train) next(it_valid) # No error, can keep going @skipIfNoCUDA def test_pin_memory(self): # Tensor dp = IterableWrapper([(i, i + 1) for i in range(10)]).map(_convert_to_tensor).pin_memory() self.assertTrue(all(d.is_pinned() for d in dp)) # List of Tensors dp = IterableWrapper([[(i - 1, i), (i, i + 1)] for i in range(10)]).map(_convert_to_tensor).pin_memory() self.assertTrue(all(d0.is_pinned() and d1.is_pinned() for d0, d1 in dp)) # Dict of Tensors dp = IterableWrapper([{str(i): (i, i + 1)} for i in range(10)]).map(_convert_to_tensor).pin_memory() self.assertTrue(all(v.is_pinned() for d in dp for v in d.values())) # NamedTuple dp = IterableWrapper([NamedTensors(torch.tensor(i), torch.tensor(i + 1)) for i in range(10)]).pin_memory() self.assertTrue(all(v.is_pinned() for d in dp for v in d)) # Dict of List of Tensors dp = ( IterableWrapper([{str(i): [(i - 1, i), (i, i + 1)]} for i in range(10)]) .map(_convert_to_tensor) .pin_memory() ) self.assertTrue(all(v.is_pinned() for d in dp for batch in d.values() for v in batch)) # List of Dict of Tensors dp = IterableWrapper([{str(i): (i, i + 1)} for i in range(10)]).map(_convert_to_tensor).batch(2).pin_memory() self.assertTrue(all(v.is_pinned() for batch in dp for d in batch for v in d.values())) # List of List of Tensors dp = ( IterableWrapper([[(i - 1, i), (i, i + 1)] for i in range(10)]).map(_convert_to_tensor).batch(2).pin_memory() ) self.assertTrue(all(d0.is_pinned() and d1.is_pinned() for batch in dp for d0, d1 in batch)) # Single str dp = IterableWrapper(["hello", "world"]).batch(1).collate().pin_memory() self.assertEqual(list(dp), [["hello"], ["world"]]) def test_async_map_batches(self): batch_size = 16 def _helper(input_data, exp_res, async_fn, input_col=None, output_col=None, max_concurrency=32, flatten=True): dp = IterableWrapper(input_data) dp = dp.async_map_batches(async_fn, batch_size, input_col, output_col, max_concurrency, flatten) self.assertEqual( exp_res, list(dp), msg=f"Async map test with {async_fn=}, {input_col=}, {output_col=}, {max_concurrency=}", ) if flatten: self.assertEqual(len(input_data), len(dp)) _helper(range(50), [i * 10 for i in range(50)], _async_mul_ten) # Smaller max_concurrency _helper(range(50), [i * 10 for i in range(50)], _async_mul_ten, max_concurrency=6) # Tuple with input_col _helper([(i, i) for i in range(50)], [(i * 10, i) for i in range(50)], _async_mul_ten, input_col=0) _helper([(i, i) for i in range(50)], [(i, i * 10) for i in range(50)], _async_mul_ten, input_col=1) # Tuple with input_col and output_col _helper( [(i, i) for i in range(50)], [(i, i * 10) for i in range(50)], _async_mul_ten, input_col=0, output_col=1 ) _helper( [(i, i) for i in range(50)], [(i, i, i * 10) for i in range(50)], _async_mul_ten, input_col=0, output_col=-1 ) # Dict with input_col _helper( [{"a": i, "b": i} for i in range(50)], [{"a": i, "b": i * 10} for i in range(50)], _async_mul_ten, input_col="b", ) # Dict with input_col and output_col _helper( [{"a": i, "b": i} for i in range(50)], [{"a": i * 10, "b": i} for i in range(50)], _async_mul_ten, input_col="b", output_col="a", ) _helper( [{"a": i, "b": i} for i in range(50)], [{"a": i, "b": i, "c": i * 10} for i in range(50)], _async_mul_ten, input_col="b", output_col="c", ) # Multiple input_col _helper( [(i - 1, i, i + 1) for i in range(50)], [((i - 1) * (i + 1), i) for i in range(50)], _async_x_mul_y, input_col=(0, 2), ) _helper( [(i - 1, i, i + 1) for i in range(50)], [(i, (i - 1) * (i + 1)) for i in range(50)], _async_x_mul_y, input_col=(2, 0), ) # Multiple input_col with output_col _helper( [(i - 1, i, i + 1) for i in range(50)], [(i - 1, (i - 1) * (i + 1), i + 1) for i in range(50)], _async_x_mul_y, input_col=(0, 2), output_col=1, ) # Skip over `flatten` operation _helper( range(32), [[i * 10 for i in range(16)], [i * 10 for i in range(16, 32)]], _async_mul_ten, flatten=False, ) # Test multiple asyncio eventloops dp1 = IterableWrapper(range(50)) dp1 = dp1.async_map_batches(_async_mul_ten, 16) dp2 = IterableWrapper(range(50)) dp2 = dp2.async_map_batches(_async_mul_ten, 16) for v1, v2, exp in zip(dp1, dp2, [i * 10 for i in range(50)]): self.assertEqual(v1, exp) self.assertEqual(v2, exp) def test_threadpool_map(self): target_length = 30 input_dp = IterableWrapper(range(target_length)) input_dp_parallel = IterableWrapper(range(target_length)) def fn(item, dtype=torch.float, *, sum=False): data = torch.tensor(item, dtype=dtype) return data if not sum else data.sum() # Functional Test: apply to each element correctly map_dp = input_dp.threadpool_map(fn) self.assertEqual(target_length, len(map_dp)) for x, y in zip(map_dp, range(target_length)): self.assertEqual(x, torch.tensor(y, dtype=torch.float)) # Functional Test: works with partial function map_dp = input_dp.threadpool_map(partial(fn, dtype=torch.int, sum=True)) for x, y in zip(map_dp, range(target_length)): self.assertEqual(x, torch.tensor(y, dtype=torch.int).sum()) # __len__ Test: inherits length from source DataPipe self.assertEqual(target_length, len(map_dp)) input_dp_nl = IDP_NoLen(range(target_length)) map_dp_nl = input_dp_nl.threadpool_map(lambda x: x) for x, y in zip(map_dp_nl, range(target_length)): self.assertEqual(x, torch.tensor(y, dtype=torch.float)) # __len__ Test: inherits length from source DataPipe - raises error when invalid with self.assertRaisesRegex(TypeError, r"instance doesn't have valid length$"): len(map_dp_nl) # Test: two independent ThreadPoolExecutors running at the same time map_dp_parallel = input_dp_parallel.threadpool_map(fn) for x, y, z in zip(map_dp, map_dp_parallel, range(target_length)): self.assertEqual(x, torch.tensor(z, dtype=torch.float)) self.assertEqual(y, torch.tensor(z, dtype=torch.float)) # Reset Test: DataPipe resets properly n_elements_before_reset = 5 res_before_reset, res_after_reset = reset_after_n_next_calls(map_dp, n_elements_before_reset) self.assertEqual(list(range(n_elements_before_reset)), res_before_reset) self.assertEqual(list(range(target_length)), res_after_reset) @suppress_warnings # Suppress warning for lambda fn def test_threadpool_map_tuple_list_with_col_iterdatapipe(self): def fn_11(d): return -d def fn_1n(d): return -d, d def fn_n1(d0, d1): return d0 + d1 def fn_nn(d0, d1): return -d0, -d1, d0 + d1 def fn_n1_def(d0, d1=1): return d0 + d1 def fn_n1_kwargs(d0, d1, **kwargs): return d0 + d1 def fn_n1_pos(d0, d1, *args): return d0 + d1 def fn_n1_sep_pos(d0, *args, d1): return d0 + d1 def fn_cmplx(d0, d1=1, *args, d2, **kwargs): return d0 + d1 p_fn_n1 = partial(fn_n1, d1=1) p_fn_cmplx = partial(fn_cmplx, d2=2) def _helper(ref_fn, fn, input_col=None, output_col=None, error=None): for constr in (list, tuple): datapipe = IterableWrapper([constr((0, 1, 2)), constr((3, 4, 5)), constr((6, 7, 8))]) if ref_fn is None: with self.assertRaises(error): res_dp = datapipe.threadpool_map(fn, input_col, output_col) list(res_dp) else: res_dp = datapipe.threadpool_map(fn, input_col, output_col) ref_dp = datapipe.map(ref_fn) if constr is list: ref_dp = ref_dp.map(list) self.assertEqual(list(res_dp), list(ref_dp), "First test failed") # Reset self.assertEqual(list(res_dp), list(ref_dp), "Test after reset failed") _helper(lambda data: data, fn_n1_def, 0, 1) _helper(lambda data: (data[0], data[1], data[0] + data[1]), fn_n1_def, [0, 1], 2) _helper(lambda data: data, p_fn_n1, 0, 1) _helper(lambda data: data, p_fn_cmplx, 0, 1) _helper(lambda data: (data[0], data[1], data[0] + data[1]), p_fn_cmplx, [0, 1], 2) _helper(lambda data: (data[0] + data[1],), fn_n1_pos, [0, 1, 2]) # Replacing with one input column and default output column _helper(lambda data: (data[0], -data[1], data[2]), fn_11, 1) _helper(lambda data: (data[0], (-data[1], data[1]), data[2]), fn_1n, 1) # The index of input column is out of range _helper(None, fn_1n, 3, error=IndexError) # Unmatched input columns with fn arguments _helper(None, fn_n1, 1, error=ValueError) _helper(None, fn_n1, [0, 1, 2], error=ValueError) _helper(None, lambda d0, d1: d0 + d1, 0, error=ValueError) _helper(None, lambda d0, d1: d0 + d1, [0, 1, 2], error=ValueError) _helper(None, fn_cmplx, 0, 1, ValueError) _helper(None, fn_n1_pos, 1, error=ValueError) _helper(None, fn_n1_def, [0, 1, 2], 1, error=ValueError) _helper(None, p_fn_n1, [0, 1], error=ValueError) _helper(None, fn_1n, [1, 2], error=ValueError) # _helper(None, p_fn_cmplx, [0, 1, 2], error=ValueError) _helper(None, fn_n1_sep_pos, [0, 1, 2], error=ValueError) # Fn has keyword-only arguments _helper(None, fn_n1_kwargs, 1, error=ValueError) _helper(None, fn_cmplx, [0, 1], 2, ValueError) # Replacing with multiple input columns and default output column (the left-most input column) _helper(lambda data: (data[1], data[2] + data[0]), fn_n1, [2, 0]) _helper(lambda data: (data[0], (-data[2], -data[1], data[2] + data[1])), fn_nn, [2, 1]) # output_col can only be specified when input_col is not None _helper(None, fn_n1, None, 1, error=ValueError) # output_col can only be single-element list or tuple _helper(None, fn_n1, None, [0, 1], error=ValueError) # Single-element list as output_col _helper(lambda data: (-data[1], data[1], data[2]), fn_11, 1, [0]) # Replacing with one input column and single specified output column _helper(lambda data: (-data[1], data[1], data[2]), fn_11, 1, 0) _helper(lambda data: (data[0], data[1], (-data[1], data[1])), fn_1n, 1, 2) # The index of output column is out of range _helper(None, fn_1n, 1, 3, error=IndexError) _helper(lambda data: (data[0], data[0] + data[2], data[2]), fn_n1, [0, 2], 1) _helper(lambda data: ((-data[1], -data[2], data[1] + data[2]), data[1], data[2]), fn_nn, [1, 2], 0) # Appending the output at the end _helper(lambda data: (*data, -data[1]), fn_11, 1, -1) _helper(lambda data: (*data, (-data[1], data[1])), fn_1n, 1, -1) _helper(lambda data: (*data, data[0] + data[2]), fn_n1, [0, 2], -1) _helper(lambda data: (*data, (-data[1], -data[2], data[1] + data[2])), fn_nn, [1, 2], -1) # Handling built-in functions (e.g. `dict`, `iter`, `int`, `str`) whose signatures cannot be inspected _helper(lambda data: (str(data[0]), data[1], data[2]), str, 0) _helper(lambda data: (data[0], data[1], int(data[2])), int, 2) @suppress_warnings # Suppress warning for lambda fn def test_threadpool_map_dict_with_col_iterdatapipe(self): def fn_11(d): return -d def fn_1n(d): return -d, d def fn_n1(d0, d1): return d0 + d1 def fn_nn(d0, d1): return -d0, -d1, d0 + d1 def fn_n1_def(d0, d1=1): return d0 + d1 p_fn_n1 = partial(fn_n1, d1=1) def fn_n1_pos(d0, d1, *args): return d0 + d1 def fn_n1_kwargs(d0, d1, **kwargs): return d0 + d1 def fn_kwonly(*, d0, d1): return d0 + d1 def fn_has_nondefault_kwonly(d0, *, d1): return d0 + d1 def fn_cmplx(d0, d1=1, *args, d2, **kwargs): return d0 + d1 p_fn_cmplx = partial(fn_cmplx, d2=2) # Prevent modification in-place to support resetting def _dict_update(data, newdata, remove_idx=None): _data = dict(data) _data.update(newdata) if remove_idx: for idx in remove_idx: del _data[idx] return _data def _helper(ref_fn, fn, input_col=None, output_col=None, error=None): datapipe = IterableWrapper([{"x": 0, "y": 1, "z": 2}, {"x": 3, "y": 4, "z": 5}, {"x": 6, "y": 7, "z": 8}]) if ref_fn is None: with self.assertRaises(error): res_dp = datapipe.threadpool_map(fn, input_col, output_col) list(res_dp) else: res_dp = datapipe.threadpool_map(fn, input_col, output_col) ref_dp = datapipe.map(ref_fn) self.assertEqual(list(res_dp), list(ref_dp), "First test failed") # Reset self.assertEqual(list(res_dp), list(ref_dp), "Test after reset failed") _helper(lambda data: data, fn_n1_def, "x", "y") _helper(lambda data: data, p_fn_n1, "x", "y") _helper(lambda data: data, p_fn_cmplx, "x", "y") _helper(lambda data: _dict_update(data, {"z": data["x"] + data["y"]}), p_fn_cmplx, ["x", "y", "z"], "z") _helper(lambda data: _dict_update(data, {"z": data["x"] + data["y"]}), fn_n1_def, ["x", "y"], "z") _helper(None, fn_n1_pos, "x", error=ValueError) _helper(None, fn_n1_kwargs, "x", error=ValueError) # non-default kw-only args _helper(None, fn_kwonly, ["x", "y"], error=ValueError) _helper(None, fn_has_nondefault_kwonly, ["x", "y"], error=ValueError) _helper(None, fn_cmplx, ["x", "y"], error=ValueError) # Replacing with one input column and default output column _helper(lambda data: _dict_update(data, {"y": -data["y"]}), fn_11, "y") _helper(lambda data: _dict_update(data, {"y": (-data["y"], data["y"])}), fn_1n, "y") # The key of input column is not in dict _helper(None, fn_1n, "a", error=KeyError) # Unmatched input columns with fn arguments _helper(None, fn_n1, "y", error=ValueError) _helper(None, fn_1n, ["x", "y"], error=ValueError) _helper(None, fn_n1_def, ["x", "y", "z"], error=ValueError) _helper(None, p_fn_n1, ["x", "y"], error=ValueError) _helper(None, fn_n1_kwargs, ["x", "y", "z"], error=ValueError) # Replacing with multiple input columns and default output column (the left-most input column) _helper(lambda data: _dict_update(data, {"z": data["x"] + data["z"]}, ["x"]), fn_n1, ["z", "x"]) _helper( lambda data: _dict_update(data, {"z": (-data["z"], -data["y"], data["y"] + data["z"])}, ["y"]), fn_nn, ["z", "y"], ) # output_col can only be specified when input_col is not None _helper(None, fn_n1, None, "x", error=ValueError) # output_col can only be single-element list or tuple _helper(None, fn_n1, None, ["x", "y"], error=ValueError) # Single-element list as output_col _helper(lambda data: _dict_update(data, {"x": -data["y"]}), fn_11, "y", ["x"]) # Replacing with one input column and single specified output column _helper(lambda data: _dict_update(data, {"x": -data["y"]}), fn_11, "y", "x") _helper(lambda data: _dict_update(data, {"z": (-data["y"], data["y"])}), fn_1n, "y", "z") _helper(lambda data: _dict_update(data, {"y": data["x"] + data["z"]}), fn_n1, ["x", "z"], "y") _helper( lambda data: _dict_update(data, {"x": (-data["y"], -data["z"], data["y"] + data["z"])}), fn_nn, ["y", "z"], "x", ) # Adding new key to dict for the output _helper(lambda data: _dict_update(data, {"a": -data["y"]}), fn_11, "y", "a") _helper(lambda data: _dict_update(data, {"a": (-data["y"], data["y"])}), fn_1n, "y", "a") _helper(lambda data: _dict_update(data, {"a": data["x"] + data["z"]}), fn_n1, ["x", "z"], "a") _helper( lambda data: _dict_update(data, {"a": (-data["y"], -data["z"], data["y"] + data["z"])}), fn_nn, ["y", "z"], "a", ) if __name__ == "__main__": unittest.main()
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from unittest import TestCase from torchdata.dataloader2 import DataLoader2 from torchdata.dataloader2.adapter import Shuffle from torchdata.datapipes.iter import IterableWrapper class AdapterTest(TestCase): def test_shuffle(self) -> None: size = 500 dp = IterableWrapper(range(size)) dl = DataLoader2(datapipe=dp) self.assertEqual(list(range(size)), list(dl)) with self.assertWarns(Warning, msg="`shuffle=True` was set, but the datapipe does not contain a `Shuffler`."): dl = DataLoader2(datapipe=dp, datapipe_adapter_fn=Shuffle(True)) self.assertNotEqual(list(range(size)), list(dl)) dp = IterableWrapper(range(size)).shuffle() dl = DataLoader2(datapipe=dp) self.assertNotEqual(list(range(size)), list(dl)) dl = DataLoader2(dp, Shuffle(True)) self.assertNotEqual(list(range(size)), list(dl)) dl = DataLoader2(dp, [Shuffle(None)]) self.assertNotEqual(list(range(size)), list(dl)) dl = DataLoader2(dp, [Shuffle(False)]) self.assertEqual(list(range(size)), list(dl))
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import unittest import torch.multiprocessing as mp from torch.testing._internal.common_utils import slowTest from torch.utils.data import DataLoader from torchtext.datasets import AG_NEWS, AmazonReviewPolarity, IMDB, SQuAD1, SQuAD2, SST2 # TODO(124): Replace the following tests with the corresponding tests in TorchText class TestTextExamples(unittest.TestCase): def _test_helper(self, fn): dp = fn() for stage_dp in dp: _ = list(stage_dp) @staticmethod def _collate_fn(batch): return batch def _test_DL_helper(self, fn): mp.set_sharing_strategy("file_system") dp = fn() for stage_dp in dp: dl = DataLoader( stage_dp, batch_size=8, num_workers=4, collate_fn=TestTextExamples._collate_fn, multiprocessing_context="spawn", ) _ = list(dl) def test_SST(self) -> None: self._test_helper(SST2) self._test_DL_helper(SST2) def test_AG_NEWS(self) -> None: self._test_helper(AG_NEWS) self._test_DL_helper(AG_NEWS) @slowTest def test_AmazonReviewPolarity(self) -> None: self._test_helper(AmazonReviewPolarity) self._test_DL_helper(AmazonReviewPolarity) @slowTest def test_IMDB(self) -> None: self._test_helper(IMDB) self._test_DL_helper(IMDB) def test_SQuAD1(self) -> None: self._test_helper(SQuAD1) self._test_DL_helper(SQuAD1) def test_SQuAD2(self) -> None: self._test_helper(SQuAD2) self._test_DL_helper(SQuAD2) if __name__ == "__main__": unittest.main()
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import bz2 import functools import hashlib import io import itertools import lzma import os import subprocess import tarfile import tempfile import time import unittest import warnings import zipfile from functools import partial from json.decoder import JSONDecodeError import expecttest from _utils._common_utils_for_test import create_temp_dir, create_temp_files, get_name, reset_after_n_next_calls from torch.utils.data import DataLoader from torchdata.dataloader2.adapter import CacheTimeout from torchdata.datapipes.iter import ( Bz2FileLoader, CSVDictParser, CSVParser, Decompressor, FileLister, FileOpener, HashChecker, IoPathFileLister, IoPathFileOpener, IoPathSaver, IterableWrapper, IterDataPipe, JsonParser, RarArchiveLoader, Saver, StreamReader, TarArchiveLoader, WebDataset, XzFileLoader, ZipArchiveLoader, ) try: import iopath import torch HAS_IOPATH = True except ImportError: HAS_IOPATH = False skipIfNoIoPath = unittest.skipIf(not HAS_IOPATH, "no iopath") try: import rarfile HAS_RAR_TOOLS = True try: rarfile.tool_setup() subprocess.run(("rar", "-?"), check=True) except (rarfile.RarCannotExec, subprocess.CalledProcessError): HAS_RAR_TOOLS = False except (ModuleNotFoundError, FileNotFoundError): HAS_RAR_TOOLS = False skipIfNoRarTools = unittest.skipIf(not HAS_RAR_TOOLS, "no rar tools") try: import portalocker HAS_PORTALOCKER = True except ImportError: HAS_PORTALOCKER = False skipIfNoPortalocker = unittest.skipIf(not HAS_PORTALOCKER, "No portalocker installed") def filepath_fn(temp_dir_name, name: str) -> str: return os.path.join(temp_dir_name, os.path.basename(name)) def _unbatch(x): return x[0] def _noop(x): return x class TestDataPipeLocalIO(expecttest.TestCase): def setUp(self): self.temp_dir = create_temp_dir() self.temp_files = create_temp_files(self.temp_dir) self.temp_sub_dir = create_temp_dir(self.temp_dir.name) self.temp_sub_files = create_temp_files(self.temp_sub_dir, 4, False) self.temp_dir_2 = create_temp_dir() self.temp_files_2 = create_temp_files(self.temp_dir_2) self.temp_sub_dir_2 = create_temp_dir(self.temp_dir_2.name) self.temp_sub_files_2 = create_temp_files(self.temp_sub_dir_2, 4, False) def tearDown(self): try: self.temp_sub_dir.cleanup() self.temp_dir.cleanup() self.temp_sub_dir_2.cleanup() self.temp_dir_2.cleanup() except Exception as e: warnings.warn(f"TestDataPipeLocalIO was not able to cleanup temp dir due to {e}") def _custom_files_set_up(self, files): for fname, content in files.items(): temp_file_path = os.path.join(self.temp_dir.name, fname) with open(temp_file_path, "w") as f: f.write(content) def _compressed_files_comparison_helper(self, expected_files, result, check_length: bool = True): if check_length: self.assertEqual(len(expected_files), len(result)) for res, expected_file in itertools.zip_longest(result, expected_files): self.assertTrue(res is not None and expected_file is not None) self.assertEqual(os.path.basename(res[0]), os.path.basename(expected_file)) with open(expected_file, "rb") as f: self.assertEqual(res[1].read(), f.read()) res[1].close() def _unordered_compressed_files_comparison_helper(self, expected_files, result, check_length: bool = True): expected_names_to_files = {os.path.basename(f): f for f in expected_files} if check_length: self.assertEqual(len(expected_files), len(result)) for res in result: fname = os.path.basename(res[0]) self.assertTrue(fname is not None) self.assertTrue(fname in expected_names_to_files) with open(expected_names_to_files[fname], "rb") as f: self.assertEqual(res[1].read(), f.read()) res[1].close() def test_csv_parser_iterdatapipe(self): def make_path(fname): return f"{self.temp_dir.name}/{fname}" csv_files = {"1.csv": "key,item\na,1\nb,2", "empty.csv": "", "empty2.csv": "\n"} self._custom_files_set_up(csv_files) datapipe1 = IterableWrapper([make_path(fname) for fname in ["1.csv", "empty.csv", "empty2.csv"]]) datapipe2 = FileOpener(datapipe1, mode="b") datapipe3 = datapipe2.map(get_name) # Functional Test: yield one row at time from each file, skipping over empty content csv_parser_dp = datapipe3.parse_csv() expected_res = [["key", "item"], ["a", "1"], ["b", "2"], []] self.assertEqual(expected_res, list(csv_parser_dp)) # Functional Test: yield one row at time from each file, skipping over empty content and header csv_parser_dp = datapipe3.parse_csv(skip_lines=1) expected_res = [["a", "1"], ["b", "2"]] self.assertEqual(expected_res, list(csv_parser_dp)) # Functional Test: yield one row at time from each file with file name, skipping over empty content csv_parser_dp = datapipe3.parse_csv(return_path=True) expected_res = [("1.csv", ["key", "item"]), ("1.csv", ["a", "1"]), ("1.csv", ["b", "2"]), ("empty2.csv", [])] self.assertEqual(expected_res, list(csv_parser_dp)) # Functional Test: yield one row at time from each file as tuple instead of list, skipping over empty content csv_parser_dp = datapipe3.parse_csv(as_tuple=True) expected_res = [("key", "item"), ("a", "1"), ("b", "2"), ()] self.assertEqual(expected_res, list(csv_parser_dp)) # Reset Test: csv_parser_dp = CSVParser(datapipe3, return_path=True) n_elements_before_reset = 2 expected_res = [("1.csv", ["key", "item"]), ("1.csv", ["a", "1"]), ("1.csv", ["b", "2"]), ("empty2.csv", [])] res_before_reset, res_after_reset = reset_after_n_next_calls(csv_parser_dp, n_elements_before_reset) self.assertEqual(expected_res[:n_elements_before_reset], res_before_reset) self.assertEqual(expected_res, res_after_reset) # __len__ Test: length isn't implemented since it cannot be known ahead of time with self.assertRaisesRegex(TypeError, "has no len"): len(csv_parser_dp) def test_csv_dict_parser_iterdatapipe(self): def get_name(path_and_stream): return os.path.basename(path_and_stream[0]), path_and_stream[1] csv_files = {"1.csv": "key,item\na,1\nb,2", "empty.csv": "", "empty2.csv": "\n"} self._custom_files_set_up(csv_files) datapipe1 = FileLister(self.temp_dir.name, "*.csv") datapipe2 = FileOpener(datapipe1, mode="b") datapipe3 = datapipe2.map(get_name) # Functional Test: yield one row at a time as dict, with the first row being the header (key) csv_dict_parser_dp = datapipe3.parse_csv_as_dict() expected_res1 = [{"key": "a", "item": "1"}, {"key": "b", "item": "2"}] self.assertEqual(expected_res1, list(csv_dict_parser_dp)) # Functional Test: yield one row at a time as dict, skip over first row, with the second row being the header csv_dict_parser_dp = datapipe3.parse_csv_as_dict(skip_lines=1) expected_res2 = [{"a": "b", "1": "2"}] self.assertEqual(expected_res2, list(csv_dict_parser_dp)) # Functional Test: yield one row at a time as dict with file name, and the first row being the header (key) csv_dict_parser_dp = datapipe3.parse_csv_as_dict(return_path=True) expected_res3 = [("1.csv", {"key": "a", "item": "1"}), ("1.csv", {"key": "b", "item": "2"})] self.assertEqual(expected_res3, list(csv_dict_parser_dp)) # Reset Test csv_dict_parser_dp = CSVDictParser(datapipe3) expected_res4 = [{"key": "a", "item": "1"}, {"key": "b", "item": "2"}] n_elements_before_reset = 1 res_before_reset, res_after_reset = reset_after_n_next_calls(csv_dict_parser_dp, n_elements_before_reset) self.assertEqual(expected_res4[:n_elements_before_reset], res_before_reset) self.assertEqual(expected_res4, res_after_reset) # __len__ Test: length isn't implemented since it cannot be known ahead of time with self.assertRaisesRegex(TypeError, "has no len"): len(csv_dict_parser_dp) def test_hash_checker_iterdatapipe(self): hash_dict = {} def fill_hash_dict(): for path in self.temp_files: with open(path) as f: hash_func = hashlib.sha256() content = f.read().encode("utf-8") hash_func.update(content) hash_dict[path] = hash_func.hexdigest() fill_hash_dict() datapipe1 = FileLister(self.temp_dir.name, "*") datapipe2 = FileOpener(datapipe1, mode="b") hash_check_dp = HashChecker(datapipe2, hash_dict) expected_res = list(datapipe2) # Functional Test: Ensure the DataPipe values are unchanged if the hashes are the same for (expected_path, expected_stream), (actual_path, actual_stream) in zip(expected_res, hash_check_dp): self.assertEqual(expected_path, actual_path) self.assertEqual(expected_stream.read(), actual_stream.read()) # Functional Test: Ensure the rewind option works, and the stream is empty when there is no rewind hash_check_dp_no_reset = HashChecker(datapipe2, hash_dict, rewind=False) for (expected_path, _), (actual_path, actual_stream) in zip(expected_res, hash_check_dp_no_reset): self.assertEqual(expected_path, actual_path) self.assertEqual(b"", actual_stream.read()) # Functional Test: Error when file/path is not in hash_dict hash_check_dp = HashChecker(datapipe2, {}) it = iter(hash_check_dp) with self.assertRaisesRegex(RuntimeError, "Unspecified hash for file"): next(it) # Functional Test: Error when the hash is different hash_dict[self.temp_files[0]] = "WRONG HASH" hash_check_dp = HashChecker(datapipe2, hash_dict) with self.assertRaisesRegex(RuntimeError, "does not match"): list(hash_check_dp) # Reset Test: fill_hash_dict() # Reset the dict with correct values because we changed it in the last test case hash_check_dp = datapipe2.check_hash(hash_dict) n_elements_before_reset = 2 res_before_reset, res_after_reset = reset_after_n_next_calls(hash_check_dp, n_elements_before_reset) for (expected_path, expected_stream), (actual_path, actual_stream) in zip(datapipe2, res_before_reset): self.assertEqual(expected_path, actual_path) self.assertEqual(expected_stream.read(), actual_stream.read()) for (expected_path, expected_stream), (actual_path, actual_stream) in zip(datapipe2, res_after_reset): self.assertEqual(expected_path, actual_path) self.assertEqual(expected_stream.read(), actual_stream.read()) # __len__ Test: returns the length of source DataPipe with self.assertRaisesRegex(TypeError, "FileOpenerIterDataPipe instance doesn't have valid length"): len(hash_check_dp) def test_json_parser_iterdatapipe(self): def is_empty_json(path_and_stream): return path_and_stream[0] == "empty.json" def is_nonempty_json(path_and_stream): return path_and_stream[0] != "empty.json" json_files = { "1.json": '["foo", {"bar":["baz", null, 1.0, 2]}]', "empty.json": "", "2.json": '{"__complex__": true, "real": 1, "imag": 2}', } self._custom_files_set_up(json_files) datapipe1 = IterableWrapper([f"{self.temp_dir.name}/{fname}" for fname in ["empty.json", "1.json", "2.json"]]) datapipe2 = FileOpener(datapipe1, mode="b") datapipe3 = datapipe2.map(get_name) datapipe_empty = datapipe3.filter(is_empty_json) datapipe_nonempty = datapipe3.filter(is_nonempty_json) empty_json_dp = datapipe_empty.parse_json_files() it = iter(empty_json_dp) # Functional Test: dp fails when empty JSON file is given with self.assertRaisesRegex(JSONDecodeError, "Expecting value"): next(it) # Functional Test: dp yields one json file at a time json_dp = datapipe_nonempty.parse_json_files() expected_res = [ ("1.json", ["foo", {"bar": ["baz", None, 1.0, 2]}]), ("2.json", {"__complex__": True, "real": 1, "imag": 2}), ] self.assertEqual(expected_res, list(json_dp)) # Reset Test: json_dp = JsonParser(datapipe_nonempty) n_elements_before_reset = 1 res_before_reset, res_after_reset = reset_after_n_next_calls(json_dp, n_elements_before_reset) self.assertEqual(expected_res[:n_elements_before_reset], res_before_reset) self.assertEqual(expected_res, res_after_reset) # __len__ Test: length isn't implemented since it cannot be known ahead of time with self.assertRaisesRegex(TypeError, "len"): len(json_dp) # kwargs Test: json_dp = JsonParser(datapipe_nonempty, parse_int=str) expected_res = [ ("1.json", ["foo", {"bar": ["baz", None, 1.0, "2"]}]), ("2.json", {"__complex__": True, "real": "1", "imag": "2"}), ] self.assertEqual(expected_res, list(json_dp)) def test_saver_iterdatapipe(self): # Functional Test: Saving some data name_to_data = {"1.txt": b"DATA1", "2.txt": b"DATA2", "3.txt": b"DATA3"} source_dp = IterableWrapper(sorted(name_to_data.items())) saver_dp = source_dp.save_to_disk(filepath_fn=partial(filepath_fn, self.temp_dir.name), mode="wb") res_file_paths = list(saver_dp) expected_paths = [filepath_fn(self.temp_dir.name, name) for name in name_to_data.keys()] self.assertEqual(expected_paths, res_file_paths) for name in name_to_data.keys(): p = filepath_fn(self.temp_dir.name, name) with open(p) as f: self.assertEqual(name_to_data[name], f.read().encode()) # Reset Test: saver_dp = Saver(source_dp, filepath_fn=partial(filepath_fn, self.temp_dir.name), mode="wb") n_elements_before_reset = 2 res_before_reset, res_after_reset = reset_after_n_next_calls(saver_dp, n_elements_before_reset) self.assertEqual( [filepath_fn(self.temp_dir.name, "1.txt"), filepath_fn(self.temp_dir.name, "2.txt")], res_before_reset ) self.assertEqual(expected_paths, res_after_reset) for name in name_to_data.keys(): p = filepath_fn(self.temp_dir.name, name) with open(p) as f: self.assertEqual(name_to_data[name], f.read().encode()) # __len__ Test: returns the length of source DataPipe self.assertEqual(3, len(saver_dp)) def _write_test_tar_files(self): path = os.path.join(self.temp_dir.name, "test_tar.tar") with tarfile.open(path, "w:tar") as tar: tar.add(self.temp_files[0]) tar.add(self.temp_files[1]) tar.add(self.temp_files[2]) def _write_test_tar_gz_files(self): path = os.path.join(self.temp_dir.name, "test_gz.tar.gz") with tarfile.open(path, "w:gz") as tar: tar.add(self.temp_files[0]) tar.add(self.temp_files[1]) tar.add(self.temp_files[2]) def test_tar_archive_reader_iterdatapipe(self): self._write_test_tar_files() datapipe1 = FileLister(self.temp_dir.name, "*.tar") datapipe2 = FileOpener(datapipe1, mode="b") tar_loader_dp = TarArchiveLoader(datapipe2) self._write_test_tar_gz_files() datapipe_gz_1 = FileLister(self.temp_dir.name, "*.tar.gz") datapipe_gz_2 = FileOpener(datapipe_gz_1, mode="b") gz_reader_dp = TarArchiveLoader(datapipe_gz_2) # Functional Test: Read extracted files before reaching the end of the tarfile self._compressed_files_comparison_helper(self.temp_files, tar_loader_dp, check_length=False) self._compressed_files_comparison_helper(self.temp_files, gz_reader_dp, check_length=False) # Load from decompressed file stream decomp_dp = datapipe_gz_2.decompress() decomp_reader_dp = TarArchiveLoader(decomp_dp) self._compressed_files_comparison_helper(self.temp_files, decomp_reader_dp, check_length=False) # Functional Test: Read extracted files after reaching the end of the tarfile data_refs = list(tar_loader_dp) self._compressed_files_comparison_helper(self.temp_files, data_refs) data_refs_gz = list(gz_reader_dp) self._compressed_files_comparison_helper(self.temp_files, data_refs_gz) # Reset Test: reset the DataPipe after reading part of it tar_loader_dp = datapipe2.load_from_tar() n_elements_before_reset = 1 res_before_reset, res_after_reset = reset_after_n_next_calls(tar_loader_dp, n_elements_before_reset) # Check result accumulated before reset self._compressed_files_comparison_helper(self.temp_files[:n_elements_before_reset], res_before_reset) # Check result accumulated after reset self._compressed_files_comparison_helper(self.temp_files, res_after_reset) # __len__ Test: doesn't have valid length with self.assertRaisesRegex(TypeError, "instance doesn't have valid length"): len(tar_loader_dp) def _write_test_zip_files(self): path = os.path.join(self.temp_dir.name, "test_zip.zip") with zipfile.ZipFile(path, "w") as myzip: myzip.write(self.temp_files[0], arcname=os.path.basename(self.temp_files[0])) myzip.write(self.temp_files[1], arcname=os.path.basename(self.temp_files[1])) myzip.write(self.temp_files[2], arcname=os.path.basename(self.temp_files[2])) def test_zip_archive_reader_iterdatapipe(self): self._write_test_zip_files() datapipe1 = FileLister(self.temp_dir.name, "*.zip") datapipe2 = FileOpener(datapipe1, mode="b") zip_loader_dp = ZipArchiveLoader(datapipe2) # Functional Test: read extracted files before reaching the end of the zipfile self._compressed_files_comparison_helper(self.temp_files, zip_loader_dp, check_length=False) # Functional Test: read extracted files after reaching the end of the zipile data_refs = list(zip_loader_dp) self._compressed_files_comparison_helper(self.temp_files, data_refs) # Reset Test: reset the DataPipe after reading part of it zip_loader_dp = datapipe2.load_from_zip() n_elements_before_reset = 1 res_before_reset, res_after_reset = reset_after_n_next_calls(zip_loader_dp, n_elements_before_reset) # Check the results accumulated before reset self._compressed_files_comparison_helper(self.temp_files[:n_elements_before_reset], res_before_reset) # Check the results accumulated after reset self._compressed_files_comparison_helper(self.temp_files, res_after_reset) # __len__ Test: doesn't have valid length with self.assertRaisesRegex(TypeError, "instance doesn't have valid length"): len(zip_loader_dp) def _write_test_xz_files(self): for path in self.temp_files: fname = os.path.basename(path) temp_xzfile_pathname = os.path.join(self.temp_dir.name, f"{fname}.xz") with open(path) as f: with lzma.open(temp_xzfile_pathname, "w") as xz: xz.write(f.read().encode("utf-8")) def test_xz_archive_reader_iterdatapipe(self): # Worth noting that the .tar and .zip tests write multiple files into the same compressed file # Whereas we create multiple .xz files in the same directories below. self._write_test_xz_files() datapipe1 = FileLister(self.temp_dir.name, "*.xz") datapipe2 = FileOpener(datapipe1, mode="b") xz_loader_dp = XzFileLoader(datapipe2) # Functional Test: Read extracted files before reaching the end of the xzfile self._unordered_compressed_files_comparison_helper(self.temp_files, xz_loader_dp, check_length=False) # Functional Test: Read extracted files after reaching the end of the xzfile data_refs = list(xz_loader_dp) self._unordered_compressed_files_comparison_helper(self.temp_files, data_refs) # Reset Test: reset the DataPipe after reading part of it xz_loader_dp = datapipe2.load_from_xz() n_elements_before_reset = 1 res_before_reset, res_after_reset = reset_after_n_next_calls(xz_loader_dp, n_elements_before_reset) # Check result accumulated before reset self.assertEqual(n_elements_before_reset, len(res_before_reset)) self._unordered_compressed_files_comparison_helper(self.temp_files, res_before_reset, check_length=False) # Check result accumulated after reset self._unordered_compressed_files_comparison_helper(self.temp_files, res_after_reset) # Reset Test: Ensure the order is consistent between iterations for r1, r2 in zip(list(xz_loader_dp), list(xz_loader_dp)): self.assertEqual(r1[0], r2[0]) # __len__ Test: doesn't have valid length with self.assertRaisesRegex(TypeError, "instance doesn't have valid length"): len(xz_loader_dp) def _write_test_bz2_files(self): for path in self.temp_files: fname = os.path.basename(path) temp_bz2file_pathname = os.path.join(self.temp_dir.name, f"{fname}.bz2") with open(path) as f: with bz2.open(temp_bz2file_pathname, "w") as f_bz2: f_bz2.write(f.read().encode("utf-8")) def test_bz2_archive_reader_iterdatapipe(self): self._write_test_bz2_files() filelist_dp = FileLister(self.temp_dir.name, "*.bz2") fileopen_dp = FileOpener(filelist_dp, mode="b") bz2_loader_dp = Bz2FileLoader(fileopen_dp) # Functional Test: Read extracted files before reaching the end of the bz2file self._unordered_compressed_files_comparison_helper(self.temp_files, bz2_loader_dp, check_length=False) # Functional Test: Read extracted files after reaching the end of the bz2file data_refs = list(bz2_loader_dp) self._unordered_compressed_files_comparison_helper(self.temp_files, data_refs) # Reset Test: reset the DataPipe after reading part of it bz2_loader_dp = fileopen_dp.load_from_bz2() n_elements_before_reset = 1 res_before_reset, res_after_reset = reset_after_n_next_calls(bz2_loader_dp, n_elements_before_reset) # Check result accumulated before reset self.assertEqual(n_elements_before_reset, len(res_before_reset)) self._unordered_compressed_files_comparison_helper(self.temp_files, res_before_reset, check_length=False) # Check result accumulated after reset self._unordered_compressed_files_comparison_helper(self.temp_files, res_after_reset) # Reset Test: Ensure the order is consistent between iterations for r1, r2 in zip(list(bz2_loader_dp), list(bz2_loader_dp)): self.assertEqual(r1[0], r2[0]) # __len__ Test: doesn't have valid length with self.assertRaisesRegex(TypeError, "instance doesn't have valid length"): len(bz2_loader_dp) def _decompressor_tar_test_helper(self, expected_files, tar_decompress_dp): for _file, child_obj in tar_decompress_dp: for expected_file, tarinfo in zip(expected_files, child_obj): if not tarinfo.isfile(): continue extracted_fobj = child_obj.extractfile(tarinfo) with open(expected_file, "rb") as f: self.assertEqual(f.read(), extracted_fobj.read()) def _decompressor_xz_test_helper(self, xz_decompress_dp): for xz_file_name, xz_stream in xz_decompress_dp: expected_file = xz_file_name[:-3] with open(expected_file, "rb") as f: self.assertEqual(f.read(), xz_stream.read()) def _decompressor_bz2_test_helper(self, bz2_decompress_dp): for bz2_file_name, bz2_stream in bz2_decompress_dp: expected_file = bz2_file_name.rsplit(".", 1)[0] with open(expected_file, "rb") as f: self.assertEqual(f.read(), bz2_stream.read()) def _write_single_gz_file(self): import gzip with gzip.open(f"{self.temp_dir.name}/temp.gz", "wb") as k: with open(self.temp_files[0], "rb") as f: k.write(f.read()) def test_decompressor_iterdatapipe(self): self._write_test_tar_files() self._write_test_tar_gz_files() self._write_single_gz_file() self._write_test_zip_files() self._write_test_xz_files() self._write_test_bz2_files() # Functional Test: work with .tar files tar_file_dp = FileLister(self.temp_dir.name, "*.tar") tar_load_dp = FileOpener(tar_file_dp, mode="b") tar_decompress_dp = Decompressor(tar_load_dp, file_type="tar") self._decompressor_tar_test_helper(self.temp_files, tar_decompress_dp) # Functional test: work with .tar.gz files tar_gz_file_dp = FileLister(self.temp_dir.name, "*.tar.gz") tar_gz_load_dp = FileOpener(tar_gz_file_dp, mode="b") tar_gz_decompress_dp = Decompressor(tar_gz_load_dp, file_type="tar") self._decompressor_tar_test_helper(self.temp_files, tar_gz_decompress_dp) # Functional Test: work with .gz files gz_file_dp = IterableWrapper([f"{self.temp_dir.name}/temp.gz"]) gz_load_dp = FileOpener(gz_file_dp, mode="b") gz_decompress_dp = Decompressor(gz_load_dp, file_type="gzip") for _, gz_stream in gz_decompress_dp: with open(self.temp_files[0], "rb") as f: self.assertEqual(f.read(), gz_stream.read()) # Functional Test: work with .zip files zip_file_dp = FileLister(self.temp_dir.name, "*.zip") zip_load_dp = FileOpener(zip_file_dp, mode="b") zip_decompress_dp = zip_load_dp.decompress(file_type="zip") for _, zip_stream in zip_decompress_dp: for fname in self.temp_files: with open(fname, "rb") as f: self.assertEqual(f.read(), zip_stream.read(name=os.path.basename(fname))) # Functional Test: work with .xz files xz_file_dp = FileLister(self.temp_dir.name, "*.xz") xz_load_dp = FileOpener(xz_file_dp, mode="b") xz_decompress_dp = Decompressor(xz_load_dp, file_type="lzma") self._decompressor_xz_test_helper(xz_decompress_dp) # Functional Test: work with .bz2 files bz2_file_dp = FileLister(self.temp_dir.name, "*.bz2") bz2_load_dp = FileOpener(bz2_file_dp, mode="b") bz2_decompress_dp = Decompressor(bz2_load_dp, file_type="bz2") self._decompressor_bz2_test_helper(bz2_decompress_dp) # Functional Test: work without file type as input for .tar files tar_decompress_dp = Decompressor(tar_load_dp, file_type=None) self._decompressor_tar_test_helper(self.temp_files, tar_decompress_dp) # Functional Test: work without file type as input for .xz files xz_decompress_dp = Decompressor(xz_load_dp) self._decompressor_xz_test_helper(xz_decompress_dp) # Functional Test: work without file type as input for .tar.gz files tar_gz_decompress_dp = Decompressor(tar_gz_load_dp, file_type=None) self._decompressor_tar_test_helper(self.temp_files, tar_gz_decompress_dp) # Functional Test: work without file type as input for .bz2 files bz2_decompress_dp = Decompressor(bz2_load_dp, file_type=None) self._decompressor_bz2_test_helper(bz2_decompress_dp) # Functional Test: Compression Type is works for both upper and lower case strings tar_decompress_dp = Decompressor(tar_load_dp, file_type="TAr") self._decompressor_tar_test_helper(self.temp_files, tar_decompress_dp) # Functional Test: Compression Type throws error for invalid file type with self.assertRaisesRegex(ValueError, "not a valid CompressionType"): Decompressor(tar_load_dp, file_type="ABC") # Reset Test: Ensure the order is consistent between iterations n_elements_before_reset = 2 res_before_reset, res_after_reset = reset_after_n_next_calls(xz_decompress_dp, n_elements_before_reset) self._decompressor_xz_test_helper(res_before_reset) self._decompressor_xz_test_helper(res_after_reset) # __len__ Test: doesn't have valid length with self.assertRaisesRegex(TypeError, "has no len"): len(tar_decompress_dp) def _write_text_files(self): name_to_data = {"1.text": b"DATA", "2.text": b"DATA", "3.text": b"DATA"} source_dp = IterableWrapper(sorted(name_to_data.items())) saver_dp = source_dp.save_to_disk(filepath_fn=partial(filepath_fn, self.temp_dir.name), mode="wb") list(saver_dp) @staticmethod def _slow_fn(tmpdirname, x): with open(os.path.join(tmpdirname, str(os.getpid())), "w") as pid_fh: pid_fh.write("anything") time.sleep(10) return (x, "str") @skipIfNoPortalocker def test_disk_cache_locks(self): with tempfile.TemporaryDirectory() as tmpdirname: file_name = os.path.join(tmpdirname, "test.bin") dp = IterableWrapper([file_name]) dp = dp.on_disk_cache(filepath_fn=_noop) dp = dp.map(functools.partial(self._slow_fn, tmpdirname)) dp = dp.end_caching(mode="t", filepath_fn=_noop, timeout=120) dp = FileOpener(dp) dp = StreamReader(dp) dl = DataLoader(dp, num_workers=10, multiprocessing_context="spawn", batch_size=1, collate_fn=_unbatch) result = list(dl) all_files = [] for (_, _, filenames) in os.walk(tmpdirname): all_files += filenames # We expect only two files, one with pid and 'downloaded' one self.assertEqual(2, len(all_files)) self.assertEqual("str", result[0][1]) # cleanup cached files for f in os.listdir(tmpdirname): os.remove(os.path.join(tmpdirname, f)) dp = CacheTimeout(2)(dp) # Calling adapter manually to work with classic DataLoader dl = DataLoader(dp, num_workers=10, multiprocessing_context="spawn", batch_size=1, collate_fn=_unbatch) with self.assertRaisesRegex(Exception, "OnDiskCache Exception"): result = list(dl) # TODO(120): this test currently only covers reading from local # filesystem. It needs to be modified once test data can be stored on # gdrive/onedrive @skipIfNoIoPath def test_io_path_file_lister_iterdatapipe(self): datapipe = IoPathFileLister(root=self.temp_sub_dir.name) # check all file paths within sub_folder are listed for path in datapipe: self.assertTrue(path in self.temp_sub_files) datapipe = IterableWrapper([self.temp_sub_dir.name]) datapipe = datapipe.list_files_by_iopath() for path in datapipe: self.assertTrue(path in self.temp_sub_files) @skipIfNoIoPath def test_io_path_file_lister_iterdatapipe_with_list(self): datapipe = IoPathFileLister(root=[self.temp_sub_dir.name, self.temp_sub_dir_2.name]) file_lister = list(datapipe) file_lister.sort() all_temp_files = list(self.temp_sub_files + self.temp_sub_files_2) all_temp_files.sort() # check all file paths within sub_folder are listed self.assertEqual(file_lister, all_temp_files) datapipe = IterableWrapper([self.temp_sub_dir.name, self.temp_sub_dir_2.name]) datapipe = datapipe.list_files_by_iopath() results = list(datapipe) results.sort() self.assertEqual(results, all_temp_files) @skipIfNoIoPath def test_io_path_file_loader_iterdatapipe(self): datapipe1 = IoPathFileLister(root=self.temp_sub_dir.name) datapipe2 = IoPathFileOpener(datapipe1) # check contents of file match for _, f in datapipe2: self.assertEqual(f.read(), "0123456789abcdef") # Reset Test: Ensure the resulting streams are still readable after the DataPipe is reset/exhausted self._write_text_files() lister_dp = FileLister(self.temp_dir.name, "*.text") iopath_file_opener_dp = lister_dp.open_files_by_iopath(mode="rb") n_elements_before_reset = 2 res_before_reset, res_after_reset = reset_after_n_next_calls(iopath_file_opener_dp, n_elements_before_reset) self.assertEqual(2, len(res_before_reset)) self.assertEqual(3, len(res_after_reset)) for _name, stream in res_before_reset: self.assertEqual(b"DATA", stream.read()) for _name, stream in res_after_reset: self.assertEqual(b"DATA", stream.read()) @skipIfNoIoPath def test_io_path_saver_iterdatapipe(self): # Functional Test: Saving some data name_to_data = {"1.txt": b"DATA1", "2.txt": b"DATA2", "3.txt": b"DATA3"} source_dp = IterableWrapper(sorted(name_to_data.items())) saver_dp = source_dp.save_by_iopath(filepath_fn=partial(filepath_fn, self.temp_dir.name), mode="wb") res_file_paths = list(saver_dp) expected_paths = [filepath_fn(self.temp_dir.name, name) for name in name_to_data.keys()] self.assertEqual(expected_paths, res_file_paths) for name in name_to_data.keys(): p = filepath_fn(self.temp_dir.name, name) with open(p) as f: self.assertEqual(name_to_data[name], f.read().encode()) # Reset Test: saver_dp = IoPathSaver(source_dp, filepath_fn=partial(filepath_fn, self.temp_dir.name), mode="wb") n_elements_before_reset = 2 res_before_reset, res_after_reset = reset_after_n_next_calls(saver_dp, n_elements_before_reset) self.assertEqual( [filepath_fn(self.temp_dir.name, "1.txt"), filepath_fn(self.temp_dir.name, "2.txt")], res_before_reset ) self.assertEqual(expected_paths, res_after_reset) for name in name_to_data.keys(): p = filepath_fn(self.temp_dir.name, name) with open(p) as f: self.assertEqual(name_to_data[name], f.read().encode()) # __len__ Test: returns the length of source DataPipe self.assertEqual(3, len(saver_dp)) @skipIfNoIoPath def test_io_path_saver_file_lock(self): # Same filename with different name name_to_data = {"1.txt": b"DATA1", "1.txt": b"DATA2", "2.txt": b"DATA3", "2.txt": b"DATA4"} # noqa: F601 # Add sharding_filter to shard data into 2 source_dp = IterableWrapper(list(name_to_data.items())).sharding_filter() # Use appending as the mode saver_dp = source_dp.save_by_iopath(filepath_fn=partial(filepath_fn, self.temp_dir.name), mode="ab") import torch.utils.data.graph_settings from torch.utils.data import DataLoader num_workers = 2 line_lengths = [] dl = DataLoader(saver_dp, num_workers=num_workers, multiprocessing_context="spawn") for filename in dl: with open(filename[0]) as f: lines = f.readlines() x = len(lines) line_lengths.append(x) self.assertEqual(x, 1) self.assertEqual(num_workers, len(line_lengths)) def _write_test_rar_files(self): # `rarfile` can only read but not write .rar archives so we use to system utilities rar_archive_name = os.path.join(self.temp_dir.name, "test_rar") subprocess.run(("rar", "a", rar_archive_name + ".rar", *self.temp_files), check=True) # Nested RAR subprocess.run(("rar", "a", rar_archive_name + "1.rar", self.temp_files[0]), check=True) subprocess.run(("rar", "a", rar_archive_name + "2.rar", *self.temp_files[1:]), check=True) subprocess.run( ("rar", "a", rar_archive_name + "_nested.rar", rar_archive_name + "1.rar", rar_archive_name + "2.rar"), check=True, ) # Nested RAR in TAR with tarfile.open(rar_archive_name + "_nested.tar", "w:tar") as tar: tar.add(rar_archive_name + "1.rar") tar.add(rar_archive_name + "2.rar") @skipIfNoRarTools def test_rar_archive_loader(self): self._write_test_rar_files() datapipe1 = IterableWrapper([os.path.join(self.temp_dir.name, "test_rar.rar")]) datapipe2 = FileOpener(datapipe1, mode="b") rar_loader_dp = RarArchiveLoader(datapipe2) # Functional Test: read extracted files before reaching the end of the rarfile self._unordered_compressed_files_comparison_helper(self.temp_files, rar_loader_dp, check_length=False) # Functional Test: read extracted files after reaching the end of the rarfile data_refs = list(rar_loader_dp) self._unordered_compressed_files_comparison_helper(self.temp_files, data_refs) # Reset Test: reset the DataPipe after reading part of it rar_loader_dp = datapipe2.load_from_rar() n_elements_before_reset = 2 res_before_reset, res_after_reset = reset_after_n_next_calls(rar_loader_dp, n_elements_before_reset) # Check the results accumulated before reset self._unordered_compressed_files_comparison_helper(self.temp_files[:n_elements_before_reset], res_before_reset) # Check the results accumulated after reset self._unordered_compressed_files_comparison_helper(self.temp_files, res_after_reset) # __len__ Test: doesn't have valid length with self.assertRaisesRegex(TypeError, "instance doesn't have valid length"): len(rar_loader_dp) # Nested RAR datapipe1 = IterableWrapper([os.path.join(self.temp_dir.name, "test_rar_nested.rar")]) datapipe2 = FileOpener(datapipe1, mode="b") rar_loader_dp_1 = RarArchiveLoader(datapipe2) rar_loader_dp_2 = RarArchiveLoader(rar_loader_dp_1) with self.assertRaisesRegex(ValueError, "Nested RAR archive is not supported"): list(rar_loader_dp_2) # Nested RAR in TAR datapipe1 = IterableWrapper([os.path.join(self.temp_dir.name, "test_rar_nested.tar")]) datapipe2 = FileOpener(datapipe1, mode="b") tar_loader_dp = TarArchiveLoader(datapipe2) rar_loader_dp = RarArchiveLoader(tar_loader_dp) # Functional Test: read extracted files before reaching the end of the rarfile self._unordered_compressed_files_comparison_helper(self.temp_files, rar_loader_dp, check_length=False) # Functional Test: read extracted files after reaching the end of the rarfile data_refs = list(rar_loader_dp) self._unordered_compressed_files_comparison_helper(self.temp_files, data_refs) def _add_data_to_wds_tar(self, archive, name, value): if isinstance(value, str): value = value.encode() info = tarfile.TarInfo(name) info.size = len(value) archive.addfile(info, io.BytesIO(value)) def _create_wds_tar(self, dest, nsamples): with tarfile.open(dest, mode="w") as archive: for i in range(nsamples): self._add_data_to_wds_tar(archive, f"data/{i}.txt", f"text{i}") self._add_data_to_wds_tar(archive, f"data/{i}.bin", f"bin{i}") def test_webdataset(self) -> None: # Functional Test: groups samples correctly source_dp = IterableWrapper( # simulated tar file content [ ("/path/to/file1.jpg", b"1"), ("/path/to/_something_", b"nothing"), ("/path/to/file1.cls", b"2"), ("/path/to/file2.jpg", b"3"), ("/path/to/file2.cls", b"4"), ] ) web_dataset = WebDataset(source_dp) self.assertEqual( # expected grouped output [ {".jpg": b"1", ".cls": b"2", "__key__": "/path/to/file1"}, {".jpg": b"3", ".cls": b"4", "__key__": "/path/to/file2"}, ], list(web_dataset), ) def test_webdataset2(self) -> None: # Setup nsamples = 10 self._create_wds_tar(os.path.join(self.temp_dir.name, "wds.tar"), nsamples) def decode(item): key, value = item if key.endswith(".txt"): return key, value.read().decode("utf-8") if key.endswith(".bin"): return key, value.read().decode("utf-8") datapipe1 = FileLister(self.temp_dir.name, "wds*.tar") datapipe2 = FileOpener(datapipe1, mode="b") dataset = datapipe2.load_from_tar().map(decode).webdataset() items = list(dataset) assert len(items) == nsamples assert items[0][".txt"] == "text0" assert items[9][".bin"] == "bin9" if __name__ == "__main__": unittest.main()
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import os import sys import tempfile import unittest import torch.multiprocessing as mp from torch.testing._internal.common_utils import slowTest from torch.utils.data import DataLoader current = os.path.dirname(os.path.abspath(__file__)) ROOT = os.path.dirname(current) sys.path.insert(0, ROOT) from examples.audio.librispeech import LibriSpeech class TestAudioExamples(unittest.TestCase): def setUp(self): self.temp_dir = tempfile.TemporaryDirectory() def tearDown(self): self.temp_dir.cleanup() def _test_helper(self, fn, *args, **kwargs): dp = fn(*args, **kwargs) _ = list(dp) @staticmethod def _collate_fn(batch): return batch def _test_DL_helper(self, fn, *args, **kwargs): dp = fn(*args, **kwargs) mp.set_sharing_strategy("file_system") dl = DataLoader( dp, batch_size=8, num_workers=4, collate_fn=TestAudioExamples._collate_fn, multiprocessing_context="fork", # Using Fork her because `torchaudio.load` doesn't work well with spawn ) for _ in dl: pass @slowTest def test_LibriSpeech_dev(self) -> None: root = self.temp_dir.name self._test_helper(LibriSpeech, root, "dev-other") # With cache and DataLoader self._test_DL_helper(LibriSpeech, root, "dev-other") @unittest.skipIf(True, "Dataset is too large to run on CI") def test_LibriSpeech_train(self) -> None: root = self.temp_dir.name self._test_helper(LibriSpeech, root, "train-clean-100") # With cache and DataLoader self._test_DL_helper(LibriSpeech, root, "train-clean-100") if __name__ == "__main__": unittest.main()
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import unittest import expecttest from torchdata.datapipes.iter import MapToIterConverter from torchdata.datapipes.map import InMemoryCacheHolder, MapDataPipe, SequenceWrapper, UnZipper class TestMapDataPipe(expecttest.TestCase): def test_unzipper_mapdatapipe(self) -> None: source_dp = SequenceWrapper([(i, i + 10, i + 20) for i in range(10)]) # Functional Test: unzips each sequence, with `sequence_length` specified dp1: MapDataPipe dp2: MapDataPipe dp3: MapDataPipe dp1, dp2, dp3 = UnZipper(source_dp, sequence_length=3) # type: ignore[misc] self.assertEqual(list(range(10)), list(dp1)) self.assertEqual(list(range(10, 20)), list(dp2)) self.assertEqual(list(range(20, 30)), list(dp3)) # Functional Test: skipping over specified values dp2, dp3 = source_dp.unzip(sequence_length=3, columns_to_skip=[0]) self.assertEqual(list(range(10, 20)), list(dp2)) self.assertEqual(list(range(20, 30)), list(dp3)) (dp2,) = source_dp.unzip(sequence_length=3, columns_to_skip=[0, 2]) self.assertEqual(list(range(10, 20)), list(dp2)) source_dp = SequenceWrapper([(i, i + 10, i + 20, i + 30) for i in range(10)]) dp2, dp3 = source_dp.unzip(sequence_length=4, columns_to_skip=[0, 3]) self.assertEqual(list(range(10, 20)), list(dp2)) self.assertEqual(list(range(20, 30)), list(dp3)) # __len__ Test: the lengths of child DataPipes are correct self.assertEqual((10, 10), (len(dp2), len(dp3))) def test_map_to_iter_converter_datapipe(self) -> None: # Functional Test: ensure the conversion without indices input is correct source_dp = SequenceWrapper(range(10)) iter_dp = source_dp.to_iter_datapipe() self.assertEqual(list(range(10)), list(iter_dp)) # Functional Test: ensure conversion with custom indices is correct source_dp2 = SequenceWrapper({"a": 0, "b": 1, "c": 2}) iter_dp2 = MapToIterConverter(source_dp2, indices=["a", "b", "c"]) self.assertEqual([0, 1, 2], list(iter_dp2)) # __len__ Test: the lengths of the output is correct self.assertEqual(10, len(iter_dp)) self.assertEqual(3, len(iter_dp2)) def test_in_memory_cache_holder_mapdatapipe(self) -> None: source_dp = SequenceWrapper(range(10)) cache_dp = source_dp.in_memory_cache() # Functional Test: Cache DP should just return the data without changing the values self.assertEqual(list(range(10)), list(cache_dp)) # Functional Test: Ensure the objects are the same ones from source DataPipe cache_dp = InMemoryCacheHolder(source_dp) # type: ignore[arg-type] res1 = list(cache_dp) res2 = list(cache_dp) self.assertTrue(id(source) == id(cache) for source, cache in zip(source_dp, res1)) self.assertTrue(id(source) == id(cache) for source, cache in zip(source_dp, res2)) # __len__ Test: inherits length from source_dp self.assertEqual(10, len(cache_dp)) if __name__ == "__main__": unittest.main()
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import os import queue import random import socket import sys import unittest from functools import partial from unittest import TestCase import numpy as np import torch import torch.distributed as dist import torch.multiprocessing as mp from torch.testing._internal.common_utils import instantiate_parametrized_tests, parametrize from torch.utils.data import DataLoader from torchdata.dataloader2 import DataLoader2, DistributedReadingService from torchdata.datapipes.iter import IterableWrapper from torchdata.datapipes.iter.util.distributed import PrefetchTimeoutError TEST_MASTER_ADDR = "127.0.0.1" DEFAULT_WORLD_SIZE = 2 if not dist.is_available(): print("Distributed not available, skipping tests", file=sys.stderr) sys.exit(0) _backends = ["gloo"] if dist.is_mpi_available(): _backends.append("mpi") if dist.is_nccl_available() and torch.cuda.device_count() > 0: _backends.append("nccl") world_size_parametrize = parametrize("world_size", [1, DEFAULT_WORLD_SIZE]) backend_parametrize = parametrize("backend", _backends) def abs_path(path): return os.path.join(os.path.dirname(__file__), os.path.normpath(path)) def _get_open_port(): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(("", 0)) port = s.getsockname()[1] s.close() return str(port) class TerminateSignal: pass # TODO(ejguan): Use queue for all distributed tests def launch_distributed_training(backend, world_size, *args, fn): os.environ["MASTER_ADDR"] = TEST_MASTER_ADDR os.environ["MASTER_PORT"] = _get_open_port() ctx = mp.get_context("spawn") q = ctx.Queue() ps = [] for rank in range(world_size): p = ctx.Process( target=fn, args=( rank, world_size, backend, q, *args, ), ) p.start() ps.append(p) res = [] while True: try: d = q.get() if isinstance(d, TerminateSignal): break res.append(d) except queue.Empty: continue for p in ps: p.join() return res def _dist_iterate_one_epoch(dl, seed=None): r""" Iterate a full epoch of DataLoader and set seeds for global RNGs if provided. """ if seed is not None: torch.manual_seed(seed) random.seed(seed) np.random.seed(seed) res = [] for d in dl: res.append(d) # Simulate training synchronization dist.barrier() return res def _finalize_distributed_queue(rank, q): r""" Synchronize all distributed processes to guarantee all data have been put into the Multiprocessing Queue. """ pg = dist.new_group(backend="gloo") end_tensor = torch.tensor([rank], dtype=torch.int64) dist.all_reduce(end_tensor, group=pg) if rank == 0: q.put(TerminateSignal()) dist.destroy_process_group(pg) class DistributedTest(TestCase): @staticmethod def _test_fullsync(rank, world_size, backend, q): dist.init_process_group(backend, rank=rank, world_size=world_size) # Use a prime number to make sure uneven data sharding data_length = 23 dp = IterableWrapper(list(range(data_length))).sharding_filter() torch.utils.data.graph_settings.apply_sharding(dp, world_size, rank) dp1 = dp.fullsync() for _ in range(2): res = _dist_iterate_one_epoch(dp1) assert res == list(range(rank, data_length // world_size * world_size, world_size)) # Timeout Test dp2 = dp.fullsync(timeout=0.01) try: for _ in range(2): _ = list(dp2) except Exception as e: assert isinstance(e, PrefetchTimeoutError) # Test that reset/shutdown does not hang while paused dp3 = dp.fullsync() it = iter(dp3) next(it) dp3.pause() it2 = iter(dp3) # Reset next(it2) dp4 = dp.prefetch(2) it = iter(dp4) next(it) dp4.pause() it2 = iter(dp4) # Reset next(it2) _finalize_distributed_queue(rank, q) @world_size_parametrize @backend_parametrize def test_fullsync(self, world_size, backend) -> None: world_size = world_size if backend != "nccl" else torch.cuda.device_count() launch_distributed_training(backend, world_size, fn=DistributedTest._test_fullsync) @staticmethod def _get_dataloader(data_length: int, dl2: bool, shuffle: bool, rs=None): data_source = IterableWrapper(list(range(data_length))) dp = data_source.sharding_filter() if shuffle: dp = dp.shuffle() if dl2: if rs is None: rs = DistributedReadingService() dl = DataLoader2(dp, reading_service=rs) else: dp = dp.fullsync() dl = DataLoader(dp) return dl @staticmethod def _test_distributed_training(dl2, rank, world_size, backend, q): dist.init_process_group(backend, rank=rank, world_size=world_size) # Use a prime number to make sure uneven data sharding data_length = 23 # No shuffle dl = DistributedTest._get_dataloader(data_length, dl2=dl2, shuffle=False) res = _dist_iterate_one_epoch(dl) assert sorted(res) == list(range(rank, data_length // world_size * world_size, world_size)) # Shuffle dl = DistributedTest._get_dataloader(data_length, dl2=dl2, shuffle=True) results = [] for _ in range(2): res = _dist_iterate_one_epoch(dl, seed=123) results.append(res) assert results[0] == results[1] # Different seed res = _dist_iterate_one_epoch(dl, seed=321) results.append(res) assert len(results[0]) == len(results[2]) assert results[0] != results[2] _finalize_distributed_queue(rank, q) if dl2: dl.shutdown() @backend_parametrize def test_distributed_dl2(self, backend) -> None: world_size = DEFAULT_WORLD_SIZE if backend != "nccl" else torch.cuda.device_count() launch_distributed_training(backend, world_size, fn=partial(DistributedTest._test_distributed_training, True)) @backend_parametrize def test_elastic_training_dl2(self, backend) -> None: world_size = DEFAULT_WORLD_SIZE if backend != "nccl" else torch.cuda.device_count() nnodes = 1 from torch.distributed import run run.main( [ "--run_path", f"--nnodes={nnodes}", f"--nproc_per_node={world_size}", abs_path("bin/elastic_training.py"), "--" + backend, "--dl2", ], ) @backend_parametrize def test_distributed_dl1(self, backend) -> None: world_size = DEFAULT_WORLD_SIZE if backend != "nccl" else torch.cuda.device_count() launch_distributed_training(backend, world_size, fn=partial(DistributedTest._test_distributed_training, False)) @unittest.skipIf(sys.version_info < (3, 8), "Torch Elastic requires Python >= 3.8") @backend_parametrize def test_elastic_training_dl1(self, backend) -> None: world_size = DEFAULT_WORLD_SIZE if backend != "nccl" else torch.cuda.device_count() nnodes = 1 from torch.distributed import run run.main( [ "--run_path", f"--nnodes={nnodes}", f"--nproc_per_node={world_size}", abs_path("bin/elastic_training.py"), "--" + backend, "--dl1", ], ) instantiate_parametrized_tests(DistributedTest) if __name__ == "__main__": unittest.main()
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import os import pickle import unittest import warnings from functools import partial from io import StringIO from operator import itemgetter from typing import List import expecttest import torchdata.datapipes.iter as iterdp import torchdata.datapipes.map as mapdp from _utils._common_utils_for_test import create_temp_dir, create_temp_files from torch.utils.data.datapipes.utils.common import DILL_AVAILABLE from torchdata.datapipes.iter import IterableWrapper from torchdata.datapipes.map import SequenceWrapper if DILL_AVAILABLE: import dill dill.extend(use_dill=False) try: import datasets except ImportError: datasets = None try: import fsspec except ImportError: fsspec = None try: import iopath except ImportError: iopath = None try: import subprocess import rarfile try: rarfile.tool_setup() subprocess.run(("rar", "-?"), check=True) except (rarfile.RarCannotExec, subprocess.CalledProcessError): rarfile = None except (ModuleNotFoundError, FileNotFoundError): rarfile = None try: import torcharrow import torcharrow.dtypes as dt DTYPE = dt.Struct([dt.Field("Values", dt.int32)]) except ImportError: torcharrow = None dt = None DTYPE = None def _fake_batch_fn(batch): return [d + 1 for d in batch] def _fake_fn_ls(x): return [x, x] def _filepath_fn(name: str, dir) -> str: return os.path.join(dir, os.path.basename(name)) def _filter_by_module_availability(datapipes): filter_set = set() if datasets is None: filter_set.update([iterdp.HuggingFaceHubReader]) if fsspec is None: filter_set.update([iterdp.FSSpecFileLister, iterdp.FSSpecFileOpener, iterdp.FSSpecSaver]) if iopath is None: filter_set.update([iterdp.IoPathFileLister, iterdp.IoPathFileOpener, iterdp.IoPathSaver]) if rarfile is None: filter_set.update([iterdp.RarArchiveLoader]) if torcharrow is None or not DILL_AVAILABLE: filter_set.update([iterdp.DataFrameMaker, iterdp.ParquetDataFrameLoader]) return [dp for dp in datapipes if dp[0] not in filter_set] def _convert_to_tensor(data): return torch.tensor(data) class TestIterDataPipeSerialization(expecttest.TestCase): def setUp(self): self.temp_dir = create_temp_dir() self.temp_files = create_temp_files(self.temp_dir) self.temp_sub_dir = create_temp_dir(self.temp_dir.name) self.temp_sub_files = create_temp_files(self.temp_sub_dir, 4, False) def tearDown(self): try: self.temp_sub_dir.cleanup() self.temp_dir.cleanup() except Exception as e: warnings.warn(f"TestIterDataPipeSerialization was not able to cleanup temp dir due to {e}") def _serialization_test_helper(self, datapipe, use_dill): if use_dill: serialized_dp = dill.dumps(datapipe) deserialized_dp = dill.loads(serialized_dp) else: serialized_dp = pickle.dumps(datapipe) deserialized_dp = pickle.loads(serialized_dp) try: self.assertEqual(list(datapipe), list(deserialized_dp)) except AssertionError as e: print(f"{datapipe} is failing.") raise e def _serialization_dataframe_test_helper(self, datapipe, use_dill): if use_dill: serialized_dp = dill.dumps(datapipe) deserialized_dp = dill.loads(serialized_dp) else: serialized_dp = pickle.dumps(datapipe) deserialized_dp = pickle.loads(serialized_dp) for df1, df2 in zip(datapipe, deserialized_dp): for exp, act in zip(df1, df2): self.assertEqual(exp, act) def _serialization_test_for_single_dp(self, dp, use_dill, is_dataframe=False): test_helper_fn = self._serialization_dataframe_test_helper if is_dataframe else self._serialization_test_helper # 1. Testing for serialization before any iteration starts test_helper_fn(dp, use_dill) # 2. Testing for serialization afterDataPipe is partially read it = iter(dp) _ = next(it) test_helper_fn(dp, use_dill) # 3. Testing for serialization after DataPipe is fully read it = iter(dp) _ = list(it) test_helper_fn(dp, use_dill) def _serialization_test_for_dp_with_children(self, dp1, dp2, use_dill): # 1. Testing for serialization before any iteration starts self._serialization_test_helper(dp1, use_dill=use_dill) self._serialization_test_helper(dp2, use_dill=use_dill) # 2. Testing for serialization after DataPipe is partially read it1, it2 = iter(dp1), iter(dp2) _, _ = next(it1), next(it2) self._serialization_test_helper(dp1, use_dill=use_dill) self._serialization_test_helper(dp2, use_dill=use_dill) # 2.5. Testing for serialization after one child DataPipe is fully read # (Only for DataPipes with children DataPipes) it1 = iter(dp1) _ = list(it1) # fully read one child self._serialization_test_helper(dp1, use_dill=use_dill) self._serialization_test_helper(dp2, use_dill=use_dill) # 3. Testing for serialization after DataPipe is fully read it2 = iter(dp2) _ = list(it2) # fully read the other child self._serialization_test_helper(dp1, use_dill=use_dill) self._serialization_test_helper(dp2, use_dill=use_dill) def test_serializable(self): # A tuple of 4 objects # (DataPipeConstructor, custom_input_datapipe=None, dp_args=(), dp_kwargs={}) picklable_datapipes: List = [ (iterdp.BatchMapper, IterableWrapper([(0, 0), (0, 0), (0, 0), (0, 0)]), (_fake_batch_fn, 2, 1), {}), (iterdp.BucketBatcher, IterableWrapper([0, 0, 0, 0, 0, 0, 0]), (5,), {}), (iterdp.Bz2FileLoader, None, (), {}), ( iterdp.CSVDictParser, IterableWrapper( [("f1", StringIO("Label,1,1\nLabel,2,2\nLabel,3,3")), ("f2", StringIO("L,1,1\r\nL,2,2\r\nL,3,3"))] ), (), {}, ), ( iterdp.CSVParser, IterableWrapper( [("f1", StringIO("Label,1,1\nLabel,2,2\nLabel,3,3")), ("f2", StringIO("L,1,1\r\nL,2,2\r\nL,3,3"))] ), (), {}, ), (iterdp.Cycler, None, (2,), {}), (iterdp.DataFrameMaker, IterableWrapper([(i,) for i in range(3)]), (), {"dtype": DTYPE}), (iterdp.Decompressor, None, (), {}), (iterdp.Dropper, IterableWrapper([(0, 0), (0, 0), (0, 0), (0, 0)]), ([1]), {}), (iterdp.Enumerator, None, (2,), {}), (iterdp.FlatMapper, None, (_fake_fn_ls,), {}), (iterdp.ShuffledFlatMapper, None, (_fake_fn_ls,), {"buffer_size": 1}), (iterdp.Flattener, IterableWrapper([(0, (0, 1)), (0, (0, 1)), (0, (0, 1)), (0, (0, 1))]), ([1]), {}), (iterdp.FSSpecFileLister, ".", (), {}), (iterdp.FSSpecFileOpener, None, (), {}), ( iterdp.FSSpecSaver, IterableWrapper([("1.txt", b"DATA1"), ("2.txt", b"DATA2"), ("3.txt", b"DATA3")]), (), {"mode": "wb", "filepath_fn": partial(_filepath_fn, dir=self.temp_dir.name)}, ), (iterdp.GDriveReader, None, (), {}), (iterdp.HashChecker, None, ({},), {}), (iterdp.Header, None, (3,), {}), (iterdp.HttpReader, None, (), {}), (iterdp.HuggingFaceHubReader, None, (), {}), # TODO(593): (ejguan): Deterministic serialization is required # (iterdp.InBatchShuffler, IterableWrapper(range(10)).batch(3), (), {}), (iterdp.InMemoryCacheHolder, None, (), {}), (iterdp.IndexAdder, IterableWrapper([{"a": 1, "b": 2}, {"c": 3, "a": 1}]), ("label",), {}), (iterdp.IoPathFileLister, ".", (), {}), (iterdp.IoPathFileOpener, None, (), {}), ( iterdp.IoPathSaver, IterableWrapper([("1.txt", b"DATA1"), ("2.txt", b"DATA2"), ("3.txt", b"DATA3")]), (), {"mode": "wb", "filepath_fn": partial(_filepath_fn, dir=self.temp_dir.name)}, ), ( iterdp.IterKeyZipper, IterableWrapper([("a", 100), ("b", 200), ("c", 300)]), (IterableWrapper([("a", 1), ("b", 2), ("c", 3)]), itemgetter(0), itemgetter(0)), {}, ), ( iterdp.JsonParser, IterableWrapper( [ ("1.json", StringIO('["fo", {"ba":["baz", null, 1.0, 2]}]')), ("2.json", StringIO('{"__cx__": true, "r": 1, "i": 2}')), ] ), (), {}, ), (iterdp.LengthSetter, None, (3,), {}), ( iterdp.LineReader, IterableWrapper( [("file1", StringIO("Line1\nLine2")), ("file2", StringIO("Line2,1\r\nLine2,2\r\nLine2,3"))] ), (), {}, ), (iterdp.MapToIterConverter, SequenceWrapper(range(10)), (), {}), ( iterdp.MaxTokenBucketizer, IterableWrapper(["1", "22", "1", "4444", "333", "1", "22", "22", "333"]), (4,), {}, ), ( iterdp.MapKeyZipper, IterableWrapper([("a", 1), ("b", 2), ("c", 3)]), (SequenceWrapper({"a": 100, "b": 200, "c": 300}), itemgetter(0)), {}, ), ( iterdp.MultiplexerLongest, IterableWrapper(range(10)), (), {}, ), (iterdp.OnDiskCacheHolder, None, (), {}), (iterdp.OnlineReader, None, (), {}), ( iterdp.ParagraphAggregator, IterableWrapper([("f1", "L1"), ("f1", "L2"), ("f2", "21"), ("f2", "22")]), (), {}, ), (iterdp.Prefetcher, None, (), {}), (iterdp.ParquetDataFrameLoader, None, (), {"dtype": DTYPE}), (iterdp.RarArchiveLoader, None, (), {}), ( iterdp.Rows2Columnar, IterableWrapper([[{"a": 1}, {"b": 2, "a": 1}], [{"a": 1, "b": 200}, {"c": 3}]]), (), {}, ), (iterdp.Repeater, None, (2,), {}), (iterdp.SampleMultiplexer, {IterableWrapper([0] * 10): 0.5, IterableWrapper([1] * 10): 0.5}, (), {}), ( iterdp.Saver, IterableWrapper([("1.txt", b"DATA1"), ("2.txt", b"DATA2"), ("3.txt", b"DATA3")]), (), {"mode": "wb", "filepath_fn": partial(_filepath_fn, dir=self.temp_dir.name)}, ), (iterdp.Slicer, IterableWrapper([(0, 0), (0, 0), (0, 0), (0, 0)]), ([1]), {}), (iterdp.TarArchiveLoader, None, (), {}), # TODO(594): Add serialization tests for optional DataPipe # (iterdp.TFRecordLoader, None, (), {}), (iterdp.ThreadPoolMapper, None, (_fake_fn_ls,), {}), (iterdp.UnZipper, IterableWrapper([(i, i + 10) for i in range(10)]), (), {"sequence_length": 2}), (iterdp.WebDataset, IterableWrapper([("foo.txt", b"1"), ("bar.txt", b"2")]), (), {}), (iterdp.XzFileLoader, None, (), {}), (iterdp.ZipArchiveLoader, None, (), {}), (iterdp.ZipperLongest, IterableWrapper(range(10)), (), {}), ] picklable_datapipes = _filter_by_module_availability(picklable_datapipes) # Skipping value comparison for these DataPipes # Most of them return streams not comparable by `self.assertEqual` # Others are similar to caching where the outputs depend on other DataPipes dp_skip_comparison = { iterdp.Bz2FileLoader, iterdp.Decompressor, iterdp.FileOpener, iterdp.FSSpecFileOpener, iterdp.GDriveReader, iterdp.IoPathFileOpener, iterdp.HashChecker, iterdp.HttpReader, iterdp.HuggingFaceHubReader, iterdp.OnDiskCacheHolder, iterdp.OnlineReader, iterdp.ParquetDataFrameLoader, iterdp.SampleMultiplexer, iterdp.RarArchiveLoader, iterdp.TarArchiveLoader, iterdp.TFRecordLoader, iterdp.XzFileLoader, iterdp.ZipArchiveLoader, } # These DataPipes produce multiple DataPipes as outputs and those should be compared dp_compare_children = {iterdp.UnZipper} for dpipe, custom_input, dp_args, dp_kwargs in picklable_datapipes: try: # Creating input (usually a DataPipe) for the specific dpipe being tested if custom_input is None: custom_input = IterableWrapper(range(10)) if dpipe in dp_skip_comparison: # Mke sure they are picklable and loadable (no value comparison) datapipe = dpipe(custom_input, *dp_args, **dp_kwargs) # type: ignore[call-arg] serialized_dp = pickle.dumps(datapipe) _ = pickle.loads(serialized_dp) elif dpipe in dp_compare_children: # DataPipes that have children dp1, dp2 = dpipe(custom_input, *dp_args, **dp_kwargs) # type: ignore[call-arg] self._serialization_test_for_dp_with_children(dp1, dp2, use_dill=False) else: # Single DataPipe that requires comparison datapipe = dpipe(custom_input, *dp_args, **dp_kwargs) # type: ignore[call-arg] is_dataframe = issubclass(dpipe, (iterdp.DataFrameMaker, iterdp.ParquetDataFrameLoader)) self._serialization_test_for_single_dp(datapipe, use_dill=False, is_dataframe=is_dataframe) except Exception as e: print(f"{dpipe} is failing.") raise e def test_serializable_with_dill(self): """Only for DataPipes that take in a function as argument""" input_dp = IterableWrapper(range(10)) ref_idp = IterableWrapper(range(10)) ref_mdp = SequenceWrapper(range(10)) unpicklable_datapipes: List = [ (iterdp.BatchMapper, (lambda batch: [d + 1 for d in batch], 2), {}), (iterdp.FlatMapper, (lambda x: [x, x],), {}), (iterdp.ShuffledFlatMapper, (lambda x: [x, x],), {"buffer_size": 1}), (iterdp.IterKeyZipper, (ref_idp, lambda x: x, None, True, 100), {}), (iterdp.MapKeyZipper, (ref_mdp, lambda x: x), {}), (iterdp.OnDiskCacheHolder, (lambda x: x,), {}), (iterdp.ParagraphAggregator, (lambda x: x,), {}), (iterdp.ThreadPoolMapper, (lambda x: x,), {}), ] # Skipping value comparison for these DataPipes dp_skip_comparison = {iterdp.OnDiskCacheHolder, iterdp.ParagraphAggregator} for dpipe, dp_args, dp_kwargs in unpicklable_datapipes: if DILL_AVAILABLE: try: if dpipe in dp_skip_comparison: # Make sure they are picklable/loadable (no value comparison) datapipe = dpipe(input_dp, *dp_args, **dp_kwargs) # type: ignore[call-arg] serialized_dp = dill.dumps(datapipe) _ = dill.loads(serialized_dp) else: datapipe = dpipe(input_dp, *dp_args, **dp_kwargs) # type: ignore[call-arg] self._serialization_test_for_single_dp(datapipe, use_dill=True) except Exception as e: print(f"{dpipe} is failing.") raise e else: dp_no_attribute_error = (iterdp.OnDiskCacheHolder,) try: with self.assertWarnsRegex(UserWarning, r"^Local function is not supported by pickle"): datapipe = dpipe(input_dp, *dp_args, **dp_kwargs) # type: ignore[call-arg] if isinstance(datapipe, dp_no_attribute_error): _ = pickle.dumps(datapipe) else: with self.assertRaises(AttributeError): _ = pickle.dumps(datapipe) except Exception as e: print(f"{dpipe} is failing.") raise e class TestMapDataPipeSerialization(expecttest.TestCase): def _serialization_test_helper(self, datapipe): serialized_dp = pickle.dumps(datapipe) deserialized_dp = pickle.loads(serialized_dp) try: self.assertEqual(list(datapipe), list(deserialized_dp)) except AssertionError as e: print(f"{datapipe} is failing.") raise e def _serialization_test_for_dp_with_children(self, dp1, dp2): self._serialization_test_helper(dp1) self._serialization_test_helper(dp2) def test_serializable(self): picklable_datapipes: List = [ (mapdp.InMemoryCacheHolder, None, (), {}), (mapdp.IterToMapConverter, IterableWrapper([(i, i) for i in range(10)]), (), {}), (mapdp.UnZipper, SequenceWrapper([(i, i + 10) for i in range(10)]), (), {"sequence_length": 2}), ] dp_skip_comparison = set() # These DataPipes produce multiple DataPipes as outputs and those should be compared dp_compare_children = {mapdp.UnZipper} for dpipe, custom_input, dp_args, dp_kwargs in picklable_datapipes: try: # Creating input (usually a DataPipe) for the specific dpipe being tested if custom_input is None: custom_input = SequenceWrapper(range(10)) if dpipe in dp_skip_comparison: # Mke sure they are picklable and loadable (no value comparison) datapipe = dpipe(custom_input, *dp_args, **dp_kwargs) # type: ignore[call-arg] serialized_dp = pickle.dumps(datapipe) _ = pickle.loads(serialized_dp) elif dpipe in dp_compare_children: # DataPipes that have children dp1, dp2 = dpipe(custom_input, *dp_args, **dp_kwargs) # type: ignore[call-arg] self._serialization_test_for_dp_with_children(dp1, dp2) else: # Single DataPipe that requires comparison datapipe = dpipe(custom_input, *dp_args, **dp_kwargs) # type: ignore[call-arg] self._serialization_test_helper(datapipe) except Exception as e: print(f"{dpipe} is failing.") raise e def test_serializable_with_dill(self): """Only for DataPipes that take in a function as argument""" pass if __name__ == "__main__": unittest.main()
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import types import unittest from typing import Dict, Iterator, List, Tuple, TypeVar import expecttest from _utils._common_utils_for_test import IS_WINDOWS from torch.utils.data import IterDataPipe from torch.utils.data.datapipes.iter.sharding import SHARDING_PRIORITIES from torchdata.dataloader2 import DataLoader2, ReadingServiceInterface from torchdata.dataloader2.graph import find_dps, list_dps, remove_dp, replace_dp, traverse_dps from torchdata.dataloader2.graph.utils import _find_replicable_branches from torchdata.dataloader2.random import SeedGenerator from torchdata.dataloader2.utils.dispatch import ( _DummyIterDataPipe, find_lca_round_robin_sharding_dp, find_non_dispatching_branches, ) from torchdata.datapipes.iter import IterableWrapper, Mapper, ShardingRoundRobinDispatcher from torchdata.datapipes.utils import to_graph T_co = TypeVar("T_co", covariant=True) try: import graphviz HAS_GRAPHVIZ = True except ImportError: HAS_GRAPHVIZ = False class Adaptor(IterDataPipe[T_co]): def __init__(self, datapipe: IterDataPipe) -> None: self.datapipe = datapipe self.started = False def __iter__(self) -> Iterator[T_co]: yield from self.datapipe class DummyIterDataPipe(IterDataPipe[T_co]): def __iter__(self) -> Iterator[T_co]: yield from range(10) class TempReadingService(ReadingServiceInterface): adaptors: List[IterDataPipe] = [] def initialize(self, datapipe: IterDataPipe) -> IterDataPipe: graph = traverse_dps(datapipe) dps = find_dps(graph, Mapper) for dp in reversed(dps): new_dp = Adaptor(dp) self.adaptors.append(new_dp) graph = replace_dp(graph, dp, new_dp) return list(graph.values())[0][0] def initialize_iteration(self, seed_generator: SeedGenerator) -> None: seed_generator.seed(123) for dp in self.adaptors: dp.started = True def finalize_iteration(self) -> None: for dp in self.adaptors: dp.started = False def _x_and_x_plus_5(x): return [x, x + 5] def _x_mod_2(x): return x % 2 def _x_mult_2(x): return x * 2 class TestGraph(expecttest.TestCase): def _get_datapipes(self) -> Tuple[IterDataPipe, IterDataPipe, IterDataPipe]: src_dp = IterableWrapper(range(20)) m1 = src_dp.map(_x_and_x_plus_5) ub = m1.unbatch() c1, c2 = ub.demux(2, _x_mod_2) dm = c1.main_datapipe m2 = c1.map(_x_mult_2) dp = m2.zip(c2) return traverse_dps(dp), (src_dp, m1, ub, dm, c1, c2, m2, dp) def test_find_dps(self) -> None: graph, (_, m1, *_, m2, _) = self._get_datapipes() # pyre-ignore dps = find_dps(graph, Mapper) expected_dps = {m1, m2} for dp in dps: self.assertTrue(dp in expected_dps) def test_list_dps(self) -> None: def _validate_fn(dps, exp_dps): self.assertEqual(len(dps), len(exp_dps)) # Validate BFS Order for dp, exp_dp in zip(dps, exp_dps): self.assertEqual(dp, exp_dp) graph, ( src_dp, m1, ub, dm, c1, c2, m2, dp, ) = self._get_datapipes() exp_all_dps = [dp, m2, c2, c1, dm, ub, m1, src_dp] # List all DataPipes dps = list_dps(graph) _validate_fn(dps, exp_all_dps) # List all DataPipes excluding a single DataPipe dps = list_dps(graph, exclude_dps=m1) *exp_dps, _, _ = exp_all_dps _validate_fn(dps, exp_dps) # Exclude a DataPipe on one branch dps = list_dps(graph, exclude_dps=m2) exp_dps = [dp, c2] _validate_fn(dps, exp_dps) # List all DataPipes excluding multiple DataPipes dps = list_dps(graph, exclude_dps=[m1, m2]) exp_dps = [dp, c2] _validate_fn(dps, exp_dps) def _validate_graph(self, graph, nested_dp): self.assertEqual(len(graph), len(nested_dp)) for dp_id, sub_nested_dp in zip(graph, nested_dp): self.assertEqual(graph[dp_id][0], sub_nested_dp[0]) if len(graph[dp_id][1]) > 0: self._validate_graph(graph[dp_id][1], sub_nested_dp[1]) def test_replace_dps(self) -> None: # pyre-fixme[23]: Unable to unpack 3 values, 2 were expected. graph, ( src_dp, m1, ub, dm, c1, c2, m2, dp, ) = self._get_datapipes() new_dp1 = Adaptor(m1) new_dp2 = Adaptor(m2) new_dp3 = DummyIterDataPipe() graph = replace_dp(graph, m1, new_dp1) exp_g1 = [ [ dp, [ [m2, [[c1, [[dm, [[ub, [[new_dp1, [[m1, [[src_dp, []]]]]]]]]]]]]], [c2, [[dm, [[ub, [[new_dp1, [[m1, [[src_dp, []]]]]]]]]]]], ], ] ] self._validate_graph(traverse_dps(dp), exp_g1) graph = replace_dp(graph, m2, new_dp2) exp_g2 = [ [ dp, [ [new_dp2, [[m2, [[c1, [[dm, [[ub, [[new_dp1, [[m1, [[src_dp, []]]]]]]]]]]]]]]], [c2, [[dm, [[ub, [[new_dp1, [[m1, [[src_dp, []]]]]]]]]]]], ], ] ] self._validate_graph(traverse_dps(dp), exp_g2) graph = replace_dp(graph, m1, new_dp3) exp_g3 = [ [ dp, [ [new_dp2, [[m2, [[c1, [[dm, [[ub, [[new_dp1, [[new_dp3, []]]]]]]]]]]]]], [c2, [[dm, [[ub, [[new_dp1, [[new_dp3, []]]]]]]]]], ], ] ] self._validate_graph(traverse_dps(dp), exp_g3) def test_remove_dps(self) -> None: # pyre-fixme[23]: Unable to unpack 3 values, 2 were expected. graph, ( src_dp, m1, ub, dm, c1, c2, m2, dp, ) = self._get_datapipes() graph = remove_dp(graph, m1) exp_g1 = [[dp, [[m2, [[c1, [[dm, [[ub, [[src_dp, []]]]]]]]]], [c2, [[dm, [[ub, [[src_dp, []]]]]]]]]]] self._validate_graph(traverse_dps(dp), exp_g1) graph = remove_dp(graph, m2) exp_g2 = [[dp, [[c1, [[dm, [[ub, [[src_dp, []]]]]]]], [c2, [[dm, [[ub, [[src_dp, []]]]]]]]]]] self._validate_graph(traverse_dps(dp), exp_g2) with self.assertRaisesRegex(RuntimeError, "Cannot remove the source DataPipe"): remove_dp(graph, src_dp) with self.assertRaisesRegex(RuntimeError, "Cannot remove the receiving DataPipe"): remove_dp(graph, dp) def test_reading_service(self) -> None: _, (*_, dp) = self._get_datapipes() # pyre-ignore rs = TempReadingService() dl = DataLoader2(dp, reading_service=rs) self.assertTrue(len(rs.adaptors) == 0) it = iter(dl) for new_dp in rs.adaptors: self.assertTrue(new_dp.started) res = list(it) self.assertEqual(len(res), 20) for new_dp in rs.adaptors: self.assertFalse(new_dp.started) self.assertEqual(res, list(dl)) def insert_round_robin_sharding(graph, datapipe): dispatch_dp = ShardingRoundRobinDispatcher(datapipe, SHARDING_PRIORITIES.MULTIPROCESSING) return replace_dp(graph, datapipe, dispatch_dp), dispatch_dp def replace_by_dummy(graph, datapipe): return replace_dp(graph, datapipe, _DummyIterDataPipe()) def make_non_replicable_dp(datapipe): datapipe.is_replicable = types.MethodType(lambda self: False, datapipe) return datapipe class TestNonReplicableDataPipe(expecttest.TestCase): def _make_dp(self): r""" Create a DataPipe that contains the most of cases including: - single-branch pipeline - multi-branch pipeline - pipeline that has circurlar references single_br_dp ------------------------------------- ch1 \ / \ \ multi_br_dp -->forker_dp--> -> fork_zip_dp -> end_dp -> \ / / <------- ch2 / / \ / cir_br_dp -> cir_map_dp -------------------------- """ # Single-branch single_br_dp = IterableWrapper(list(range(10))) # Multi-branch multi_br_dp = IterableWrapper(list(range(10))) ch1, ch2 = multi_br_dp.fork(2) forker_dp = ch1.main_datapipe fork_zip_dp = ch1.zip(ch2) # Circular-branch cir_br_dp = IterableWrapper(list(range(10))) cir_map_dp = cir_br_dp.map(_x_mult_2) # Force to circular reference cir_br_dp.cir_dep = cir_map_dp end_dp = single_br_dp.zip(fork_zip_dp, cir_map_dp) graph = traverse_dps(end_dp) return single_br_dp, multi_br_dp, forker_dp, ch1, ch2, fork_zip_dp, cir_br_dp, cir_map_dp, end_dp, graph def test_single_round_robin_sharding_dp(self): single_br_dp, *_, graph = self._make_dp() graph, single_br_dp = insert_round_robin_sharding(graph, single_br_dp) self.assertEqual(find_lca_round_robin_sharding_dp(graph), single_br_dp) # The same non-shardable DataPipe on both branches _, multi_br_dp, *_, graph = self._make_dp() graph, multi_br_dp = insert_round_robin_sharding(graph, multi_br_dp) self.assertEqual(find_lca_round_robin_sharding_dp(graph), multi_br_dp) _, _, _, ch1, _, fork_zip_dp, *_, graph = self._make_dp() graph, ch1 = insert_round_robin_sharding(graph, ch1) self.assertEqual(find_lca_round_robin_sharding_dp(graph), fork_zip_dp) # Circular reference *_, cir_br_dp, cir_map_dp, _, graph = self._make_dp() graph, cir_br_dp = insert_round_robin_sharding(graph, cir_br_dp) self.assertEqual(find_lca_round_robin_sharding_dp(graph), cir_map_dp) *_, cir_map_dp, _, graph = self._make_dp() graph, cir_map_dp = insert_round_robin_sharding(graph, cir_map_dp) self.assertEqual(find_lca_round_robin_sharding_dp(graph), cir_map_dp) def test_multi_round_robin_sharding_dps(self): single_br_dp, multi_br_dp, *_, end_dp, graph = self._make_dp() graph, single_br_dp = insert_round_robin_sharding(graph, single_br_dp) graph, multi_br_dp = insert_round_robin_sharding(graph, multi_br_dp) self.assertEqual(find_lca_round_robin_sharding_dp(graph), end_dp) single_br_dp, _, _, ch1, *_, end_dp, graph = self._make_dp() graph, single_br_dp = insert_round_robin_sharding(graph, single_br_dp) graph, ch1 = insert_round_robin_sharding(graph, ch1) self.assertEqual(find_lca_round_robin_sharding_dp(graph), end_dp) _, multi_br_dp, _, ch1, _, fork_zip_dp, *_, graph = self._make_dp() graph, multi_br_dp = insert_round_robin_sharding(graph, multi_br_dp) graph, ch1 = insert_round_robin_sharding(graph, ch1) self.assertEqual(find_lca_round_robin_sharding_dp(graph), fork_zip_dp) single_br_dp, *_, cir_br_dp, _, end_dp, graph = self._make_dp() graph, single_br_dp = insert_round_robin_sharding(graph, single_br_dp) graph, cir_br_dp = insert_round_robin_sharding(graph, cir_br_dp) self.assertEqual(find_lca_round_robin_sharding_dp(graph), end_dp) def test_non_dispatching_branches(self): r""" There should be a single DataPipe as the lowest common ancestor of all non-dispatching DataPipes that is replaced by ``DummyIterDataPipe``. """ single_br_dp, *_, fork_zip_dp, _, cir_map_dp, _, graph = self._make_dp() graph = replace_by_dummy(graph, single_br_dp) dps = find_non_dispatching_branches(graph) self.assertEqual(len(dps), 2) self.assertTrue(all(dp in (fork_zip_dp, cir_map_dp) for dp in dps)) single_br_dp, multi_br_dp, *_, cir_map_dp, _, graph = self._make_dp() graph = replace_by_dummy(graph, multi_br_dp) dps = find_non_dispatching_branches(graph) self.assertEqual(len(dps), 2) self.assertTrue(all(dp in (single_br_dp, cir_map_dp) for dp in dps)) # In theory, this case should never happen because LCA (fork_zip_dp) should be # replaced by _DummpyIterDataPipe if any of child is non-replicable single_br_dp, _, _, ch1, ch2, *_, cir_map_dp, _, graph = self._make_dp() graph = replace_by_dummy(graph, ch1) dps = find_non_dispatching_branches(graph) self.assertEqual(len(dps), 3) self.assertTrue(all(dp in (single_br_dp, ch2, cir_map_dp) for dp in dps)) single_br_dp, *_, fork_zip_dp, _, cir_map_dp, _, graph = self._make_dp() graph = replace_by_dummy(graph, cir_map_dp) dps = find_non_dispatching_branches(graph) self.assertTrue(all(dp in (single_br_dp, fork_zip_dp) for dp in dps)) *_, end_dp, graph = self._make_dp() graph = replace_by_dummy(graph, end_dp) dps = find_non_dispatching_branches(graph) self.assertEqual(len(dps), 0) single_br_dp, *_, fork_zip_dp, _, cir_map_dp, _, graph = self._make_dp() graph = replace_by_dummy(graph, fork_zip_dp) dps = find_non_dispatching_branches(graph) self.assertEqual(len(dps), 2) self.assertTrue(all(dp in (single_br_dp, cir_map_dp) for dp in dps)) def test_single_non_replicable_dp(self): # All replicable *_, end_dp, graph = self._make_dp() dps = _find_replicable_branches(graph) self.assertEqual(len(dps), 1) self.assertEqual(dps[0], end_dp) # Test the production use case where the last DataPipe is fullsync *_, end_dp, _ = self._make_dp() dp = end_dp.fullsync() graph = traverse_dps(dp) dps = _find_replicable_branches(graph) self.assertEqual(len(dps), 1) self.assertEqual(dps[0], end_dp) single_br_dp, *_, fork_zip_dp, _, cir_map_dp, _, graph = self._make_dp() make_non_replicable_dp(single_br_dp) dps = _find_replicable_branches(graph) self.assertEqual(len(dps), 2) self.assertTrue(all(dp in (fork_zip_dp, cir_map_dp) for dp in dps)) single_br_dp, *_, ch1, ch2, fork_zip_dp, _, cir_map_dp, _, graph = self._make_dp() make_non_replicable_dp(fork_zip_dp) dps = _find_replicable_branches(graph) self.assertEqual(len(dps), 4) self.assertTrue(all(dp in (single_br_dp, ch1, ch2, cir_map_dp) for dp in dps)) single_br_dp, _, forker_dp, ch1, *_, cir_map_dp, _, graph = self._make_dp() make_non_replicable_dp(ch1) dps = _find_replicable_branches(graph) self.assertEqual(len(dps), 3) self.assertTrue(all(dp in (single_br_dp, forker_dp, cir_map_dp) for dp in dps)) single_br_dp, *_, fork_zip_dp, cir_br_dp, cir_map_dp, _, graph = self._make_dp() make_non_replicable_dp(cir_map_dp) dps = _find_replicable_branches(graph) self.assertEqual(len(dps), 3) self.assertTrue(all(dp in (single_br_dp, fork_zip_dp, cir_br_dp) for dp in dps)) single_br_dp, *_, fork_zip_dp, _, cir_map_dp, end_dp, graph = self._make_dp() make_non_replicable_dp(end_dp) dps = _find_replicable_branches(graph) self.assertEqual(len(dps), 3) self.assertTrue(all(dp in (single_br_dp, fork_zip_dp, cir_map_dp) for dp in dps)) def test_multi_non_replicable_dps(self): single_br_dp, multi_br_dp, *_, cir_map_dp, _, graph = self._make_dp() make_non_replicable_dp(single_br_dp) make_non_replicable_dp(multi_br_dp) dps = _find_replicable_branches(graph) self.assertEqual(len(dps), 1) self.assertEqual(dps[0], cir_map_dp) single_br_dp, _, forker_dp, ch1, *_, cir_map_dp, _, graph = self._make_dp() make_non_replicable_dp(single_br_dp) make_non_replicable_dp(ch1) dps = _find_replicable_branches(graph) self.assertEqual(len(dps), 2) self.assertTrue(all(dp in (forker_dp, cir_map_dp) for dp in dps)) single_br_dp, *_, ch1, ch2, fork_zip_dp, _, cir_map_dp, _, graph = self._make_dp() make_non_replicable_dp(single_br_dp) make_non_replicable_dp(fork_zip_dp) dps = _find_replicable_branches(graph) self.assertEqual(len(dps), 3) self.assertTrue(all(dp in (ch1, ch2, cir_map_dp) for dp in dps)) single_br_dp, *_, fork_zip_dp, cir_br_dp, cir_map_dp, _, graph = self._make_dp() make_non_replicable_dp(single_br_dp) make_non_replicable_dp(cir_map_dp) dps = _find_replicable_branches(graph) self.assertEqual(len(dps), 2) self.assertTrue(all(dp in (fork_zip_dp, cir_br_dp) for dp in dps)) single_br_dp, multi_br_dp, forker_dp, ch1, *_, cir_map_dp, _, graph = self._make_dp() make_non_replicable_dp(forker_dp) make_non_replicable_dp(ch1) dps = _find_replicable_branches(graph) self.assertEqual(len(dps), 3) self.assertTrue(all(dp in (single_br_dp, multi_br_dp, cir_map_dp) for dp in dps)) single_br_dp, multi_br_dp, forker_dp, *_, cir_br_dp, cir_map_dp, _, graph = self._make_dp() make_non_replicable_dp(forker_dp) make_non_replicable_dp(cir_map_dp) dps = _find_replicable_branches(graph) self.assertEqual(len(dps), 3) self.assertTrue(all(dp in (single_br_dp, multi_br_dp, cir_br_dp) for dp in dps)) single_br_dp, *_, ch1, ch2, fork_zip_dp, cir_br_dp, cir_map_dp, _, graph = self._make_dp() make_non_replicable_dp(fork_zip_dp) make_non_replicable_dp(cir_map_dp) dps = _find_replicable_branches(graph) self.assertEqual(len(dps), 4) self.assertTrue(all(dp in (single_br_dp, ch1, ch2, cir_br_dp) for dp in dps)) class TestGraphVisualization(expecttest.TestCase): @unittest.skipIf(not HAS_GRAPHVIZ, "Package `graphviz` is required to test graph visualization functionalities.") def test_to_graph(self): dp1 = IterableWrapper(range(10)) dp2 = dp1.map(lambda x: x + 1) dp3 = dp2.filter(lambda x: x > 5) cdp1, cdp2 = dp3.fork(num_instances=2) dp4 = cdp1.zip(cdp2) cdp3, cdp4 = dp4.demux(num_instances=2, classifier_fn=lambda x: x % 2) dp5 = cdp3.concat(cdp4) # Test to ensure that we can create these graphs with runtime errors kwargs_list: List[Dict] = [ {"dp": dp1}, {"dp": dp2}, {"dp": dp3}, {"dp": cdp1, "debug": True}, {"dp": dp4}, {"dp": dp4, "debug": True}, {"dp": cdp3, "debug": True}, {"dp": dp5}, {"dp": dp5, "debug": True}, ] for kwargs in kwargs_list: g = to_graph(**kwargs) self.assertTrue(isinstance(g, graphviz.Digraph)) if __name__ == "__main__": unittest.main()
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import os import unittest import warnings from functools import partial import expecttest import torch from _utils._common_utils_for_test import IS_M1, reset_after_n_next_calls from torchdata.datapipes.iter import ( FileLister, FileOpener, FSSpecFileLister, FSSpecFileOpener, FSSpecSaver, IterableWrapper, TFRecordLoader, ) try: import google.protobuf as _protobuf del _protobuf HAS_PROTOBUF = True except ImportError: HAS_PROTOBUF = False skipIfNoPROTOBUF = unittest.skipIf(not HAS_PROTOBUF, "no google protobuf") class TestDataPipeTFRecord(expecttest.TestCase): def setUp(self): self.temp_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "_fakedata", "tfrecord") def assertArrayEqual(self, arr1, arr2): if isinstance(arr1, list): arr1 = torch.stack(arr1) if isinstance(arr2, list): arr2 = torch.stack(arr2) torch.testing.assert_close(arr1, arr2, check_dtype=False) def _ground_truth_data(self): for i in range(4): x = torch.arange(i * 10, (i + 1) * 10) yield { "x_float": x, "x_int": (x * 10).long(), "x_byte": [b"test str"], } def _ground_truth_seq_data(self): for i in range(4): x = torch.arange(i * 10, (i + 1) * 10) rep = 2 * i + 3 yield {"x_float": x, "x_int": (x * 10).long(), "x_byte": [b"test str"]}, { "x_float_seq": [x] * rep, "x_int_seq": [(x * 10).long()] * rep, "x_byte_seq": [[b"test str"]] * rep, } @skipIfNoPROTOBUF @unittest.skipIf( IS_M1, "Protobuf 3.19.* is not supported on MacOS M1, but Tensorflow is incompatible with Protobuf 4" ) @torch.no_grad() def test_tfrecord_loader_example_iterdatapipe(self): filename = f"{self.temp_dir}/example.tfrecord" datapipe1 = IterableWrapper([filename]) datapipe2 = FileOpener(datapipe1, mode="b") # Functional Test: test if the returned data is correct tfrecord_parser = datapipe2.load_from_tfrecord() result = list(tfrecord_parser) self.assertEqual(len(result), 4) expected_res = final_expected_res = list(self._ground_truth_data()) for true_data, loaded_data in zip(expected_res, result): self.assertSetEqual(set(true_data.keys()), set(loaded_data.keys())) for key in ["x_float", "x_int"]: self.assertArrayEqual(true_data[key], loaded_data[key]) self.assertEqual(len(loaded_data["x_byte"]), 1) self.assertEqual(true_data["x_byte"][0], loaded_data["x_byte"][0]) # Functional Test: test if the shape of the returned data is correct when using spec tfrecord_parser = datapipe2.load_from_tfrecord( { "x_float": ((5, 2), torch.float64), "x_int": ((5, 2), torch.int32), "x_byte": (tuple(), None), } ) result = list(tfrecord_parser) self.assertEqual(len(result), 4) expected_res = [ { "x_float": x["x_float"].reshape(5, 2), "x_int": x["x_int"].reshape(5, 2), "x_byte": x["x_byte"][0], } for x in self._ground_truth_data() ] for true_data, loaded_data in zip(expected_res, result): self.assertSetEqual(set(true_data.keys()), set(loaded_data.keys())) self.assertArrayEqual(true_data["x_float"], loaded_data["x_float"].float()) self.assertArrayEqual(true_data["x_int"], loaded_data["x_int"].long()) self.assertEqual(loaded_data["x_float"].dtype, torch.float64) self.assertEqual(loaded_data["x_int"].dtype, torch.int32) self.assertEqual(true_data["x_byte"], loaded_data["x_byte"]) # Functional Test: ignore features missing from spec tfrecord_parser = datapipe2.load_from_tfrecord( { "x_float": ((10,), torch.float32), } ) result = list(tfrecord_parser) self.assertEqual(len(result), 4) expected_res = [ { "x_float": x["x_float"], } for x in self._ground_truth_data() ] for true_data, loaded_data in zip(expected_res, result): self.assertSetEqual(set(true_data.keys()), set(loaded_data.keys())) self.assertArrayEqual(true_data["x_float"], loaded_data["x_float"].float()) # Functional Test: raises error if missing spec feature with self.assertRaises(RuntimeError): tfrecord_parser = datapipe2.load_from_tfrecord( { "x_float_unknown": ((5, 2), torch.float64), "x_int": ((5, 2), torch.int32), "x_byte": (tuple(), None), } ) result = list(tfrecord_parser) # Reset Test: tfrecord_parser = TFRecordLoader(datapipe2) expected_res = final_expected_res n_elements_before_reset = 2 res_before_reset, res_after_reset = reset_after_n_next_calls(tfrecord_parser, n_elements_before_reset) self.assertEqual(len(expected_res[:n_elements_before_reset]), len(res_before_reset)) for true_data, loaded_data in zip(expected_res[:n_elements_before_reset], res_before_reset): self.assertSetEqual(set(true_data.keys()), set(loaded_data.keys())) for key in ["x_float", "x_int"]: self.assertArrayEqual(true_data[key], loaded_data[key]) self.assertEqual(true_data["x_byte"][0], loaded_data["x_byte"][0]) self.assertEqual(len(expected_res), len(res_after_reset)) for true_data, loaded_data in zip(expected_res, res_after_reset): self.assertSetEqual(set(true_data.keys()), set(loaded_data.keys())) for key in ["x_float", "x_int"]: self.assertArrayEqual(true_data[key], loaded_data[key]) self.assertEqual(true_data["x_byte"][0], loaded_data["x_byte"][0]) # __len__ Test: length isn't implemented since it cannot be known ahead of time with self.assertRaisesRegex(TypeError, "doesn't have valid length"): len(tfrecord_parser) @skipIfNoPROTOBUF @unittest.skipIf( IS_M1, "Protobuf 3.19.* is not supported on MacOS M1, but Tensorflow is incompatible with Protobuf 4" ) @torch.no_grad() def test_tfrecord_loader_sequence_example_iterdatapipe(self): filename = f"{self.temp_dir}/sequence_example.tfrecord" datapipe1 = IterableWrapper([filename]) datapipe2 = FileOpener(datapipe1, mode="b") # Functional Test: test if the returned data is correct tfrecord_parser = datapipe2.load_from_tfrecord() result = list(tfrecord_parser) self.assertEqual(len(result), 4) expected_res = final_expected_res = list(self._ground_truth_seq_data()) for (true_data_ctx, true_data_seq), loaded_data in zip(expected_res, result): self.assertSetEqual(set(true_data_ctx.keys()).union(true_data_seq.keys()), set(loaded_data.keys())) for key in ["x_float", "x_int"]: self.assertArrayEqual(true_data_ctx[key], loaded_data[key]) self.assertEqual(len(true_data_seq[key + "_seq"]), len(loaded_data[key + "_seq"])) self.assertIsInstance(loaded_data[key + "_seq"], list) for a1, a2 in zip(true_data_seq[key + "_seq"], loaded_data[key + "_seq"]): self.assertArrayEqual(a1, a2) self.assertEqual(true_data_ctx["x_byte"], loaded_data["x_byte"]) self.assertListEqual(true_data_seq["x_byte_seq"], loaded_data["x_byte_seq"]) # Functional Test: test if the shape of the returned data is correct when using spec tfrecord_parser = datapipe2.load_from_tfrecord( { "x_float": ((5, 2), torch.float64), "x_int": ((5, 2), torch.int32), "x_byte": (tuple(), None), "x_float_seq": ((-1, 5, 2), torch.float64), "x_int_seq": ((-1, 5, 2), torch.int32), "x_byte_seq": ((-1,), None), } ) result = list(tfrecord_parser) self.assertEqual(len(result), 4) expected_res = [ ( { "x_float": x["x_float"].reshape(5, 2), "x_int": x["x_int"].reshape(5, 2), "x_byte": x["x_byte"][0], }, { "x_float_seq": [y.reshape(5, 2) for y in z["x_float_seq"]], "x_int_seq": [y.reshape(5, 2) for y in z["x_int_seq"]], "x_byte_seq": [y[0] for y in z["x_byte_seq"]], }, ) for x, z in self._ground_truth_seq_data() ] for (true_data_ctx, true_data_seq), loaded_data in zip(expected_res, result): self.assertSetEqual(set(true_data_ctx.keys()).union(true_data_seq.keys()), set(loaded_data.keys())) for key in ["x_float", "x_int"]: l_loaded_data = loaded_data[key] if key == "x_float": l_loaded_data = l_loaded_data.float() else: l_loaded_data = l_loaded_data.int() self.assertArrayEqual(true_data_ctx[key], l_loaded_data) self.assertArrayEqual(true_data_seq[key + "_seq"], loaded_data[key + "_seq"]) self.assertEqual(true_data_ctx["x_byte"], loaded_data["x_byte"]) self.assertListEqual(true_data_seq["x_byte_seq"], loaded_data["x_byte_seq"]) # Functional Test: ignore features missing from spec tfrecord_parser = datapipe2.load_from_tfrecord( { "x_float": ((10,), torch.float32), } ) result = list(tfrecord_parser) self.assertEqual(len(result), 4) expected_res = [ { "x_float": x["x_float"], } for x, z in self._ground_truth_seq_data() ] for true_data, loaded_data in zip(expected_res, result): self.assertSetEqual(set(true_data.keys()), set(loaded_data.keys())) self.assertArrayEqual(true_data["x_float"], loaded_data["x_float"].float()) # Functional Test: raises error if missing spec feature with self.assertRaises(RuntimeError): tfrecord_parser = datapipe2.load_from_tfrecord( {"x_float_unknown": ((5, 2), torch.float64), "x_int": ((5, 2), torch.int32), "x_byte": None} ) result = list(tfrecord_parser) # Reset Test: tfrecord_parser = TFRecordLoader(datapipe2) expected_res = final_expected_res n_elements_before_reset = 2 res_before_reset, res_after_reset = reset_after_n_next_calls(tfrecord_parser, n_elements_before_reset) self.assertEqual(len(expected_res[:n_elements_before_reset]), len(res_before_reset)) for (true_data_ctx, true_data_seq), loaded_data in zip( expected_res[:n_elements_before_reset], res_before_reset ): self.assertSetEqual(set(true_data_ctx.keys()).union(true_data_seq.keys()), set(loaded_data.keys())) for key in ["x_float", "x_int"]: self.assertArrayEqual(true_data_ctx[key], loaded_data[key]) self.assertEqual(len(true_data_seq[key + "_seq"]), len(loaded_data[key + "_seq"])) self.assertIsInstance(loaded_data[key + "_seq"], list) for a1, a2 in zip(true_data_seq[key + "_seq"], loaded_data[key + "_seq"]): self.assertArrayEqual(a1, a2) self.assertEqual(true_data_ctx["x_byte"], loaded_data["x_byte"]) self.assertListEqual(true_data_seq["x_byte_seq"], loaded_data["x_byte_seq"]) self.assertEqual(len(expected_res), len(res_after_reset)) for (true_data_ctx, true_data_seq), loaded_data in zip(expected_res, res_after_reset): self.assertSetEqual(set(true_data_ctx.keys()).union(true_data_seq.keys()), set(loaded_data.keys())) for key in ["x_float", "x_int"]: self.assertArrayEqual(true_data_ctx[key], loaded_data[key]) self.assertEqual(len(true_data_seq[key + "_seq"]), len(loaded_data[key + "_seq"])) self.assertIsInstance(loaded_data[key + "_seq"], list) for a1, a2 in zip(true_data_seq[key + "_seq"], loaded_data[key + "_seq"]): self.assertArrayEqual(a1, a2) self.assertEqual(true_data_ctx["x_byte"], loaded_data["x_byte"]) self.assertListEqual(true_data_seq["x_byte_seq"], loaded_data["x_byte_seq"]) # __len__ Test: length isn't implemented since it cannot be known ahead of time with self.assertRaisesRegex(TypeError, "doesn't have valid length"): len(tfrecord_parser) if __name__ == "__main__": unittest.main()
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import io import json import os import subprocess import unittest import warnings from unittest.mock import patch import expecttest from _utils._common_utils_for_test import check_hash_fn, create_temp_dir, IS_M1, IS_WINDOWS from torch.utils.data import DataLoader from torchdata.datapipes.iter import ( FileOpener, FSSpecFileLister, FSSpecFileOpener, HttpReader, IterableWrapper, OnDiskCacheHolder, S3FileLister, S3FileLoader, ) from torchdata.datapipes.iter.load.online import _get_proxies try: import fsspec HAS_FSSPEC = True except ImportError: HAS_FSSPEC = False try: import s3fs HAS_FSSPEC_S3 = True except ImportError: HAS_FSSPEC_S3 = False skipIfNoFSSpecS3 = unittest.skipIf(not (HAS_FSSPEC and HAS_FSSPEC_S3), "no FSSpec with S3fs") try: import adlfs HAS_FSSPEC_AZ = True except ImportError: HAS_FSSPEC_AZ = False skipIfNoFSSpecAZ = unittest.skipIf(not (HAS_FSSPEC and HAS_FSSPEC_AZ), "no FSSpec with adlfs") try: from torchdata._torchdata import S3Handler HAS_AWS = True except ImportError: HAS_AWS = False skipIfAWS = unittest.skipIf(HAS_AWS, "AWSSDK Enabled") skipIfNoAWS = unittest.skipIf(not HAS_AWS, "No AWSSDK Enabled") try: import portalocker HAS_PORTALOCKER = True except ImportError: HAS_PORTALOCKER = False skipIfNoPortalocker = unittest.skipIf(not HAS_PORTALOCKER, "No portalocker installed") class TestDataPipeRemoteIO(expecttest.TestCase): def setUp(self): self.temp_dir = create_temp_dir() def tearDown(self): try: self.temp_dir.cleanup() except Exception as e: warnings.warn(f"TestDataPipeRemoteIO was not able to cleanup temp dir due to {e}") def test_http_reader_iterdatapipe(self): file_url = "https://raw.githubusercontent.com/pytorch/data/main/LICENSE" expected_file_name = "LICENSE" expected_MD5_hash = "bb9675028dd39d2dd2bf71002b93e66c" query_params = {"auth": ("fake_username", "fake_password"), "allow_redirects": True} timeout = 120 http_reader_dp = HttpReader(IterableWrapper([file_url]), timeout=timeout, **query_params) # Functional Test: test if the Http Reader can download and read properly reader_dp = http_reader_dp.readlines() it = iter(reader_dp) path, line = next(it) self.assertEqual(expected_file_name, os.path.basename(path)) self.assertTrue(b"BSD" in line) # Reset Test: http_reader_dp has been read, but we reset when calling check_hash() check_cache_dp = http_reader_dp.check_hash({file_url: expected_MD5_hash}, "md5", rewind=False) it = iter(check_cache_dp) path, stream = next(it) self.assertEqual(expected_file_name, os.path.basename(path)) self.assertTrue(io.BufferedReader, type(stream)) # __len__ Test: returns the length of source DataPipe self.assertEqual(1, len(http_reader_dp)) # Error Test: test if the Http Reader raises an error when the url is invalid error_url = "https://github.com/pytorch/data/this/url/dont/exist" http_error_dp = HttpReader(IterableWrapper([error_url]), timeout=timeout) with self.assertRaisesRegex(Exception, f"404.+{error_url}"): next(iter(http_error_dp.readlines())) # Feature skip-error Test: test if the Http Reader skips urls causing problems http_skip_error_dp = HttpReader(IterableWrapper([error_url, file_url]), timeout=timeout, skip_on_error=True) reader_dp = http_skip_error_dp.readlines() with self.assertWarnsRegex(Warning, f"404.+{error_url}.+skipping"): it = iter(reader_dp) path, line = next(it) self.assertEqual(expected_file_name, os.path.basename(path)) self.assertTrue(b"BSD" in line) # test if GET-request is done with correct arguments with patch("requests.Session.get") as mock_get: http_reader_dp = HttpReader(IterableWrapper([file_url]), timeout=timeout, **query_params) _ = next(iter(http_reader_dp)) mock_get.assert_called_with( file_url, timeout=timeout, proxies=_get_proxies(), stream=True, auth=query_params["auth"], allow_redirects=query_params["allow_redirects"], ) @skipIfNoPortalocker def test_on_disk_cache_holder_iterdatapipe(self): tar_file_url = "https://raw.githubusercontent.com/pytorch/data/main/test/_fakedata/csv.tar.gz" expected_file_name = os.path.join(self.temp_dir.name, "csv.tar.gz") expected_MD5_hash = "42cd45e588dbcf64c65751fbf0228af9" tar_hash_dict = {expected_file_name: expected_MD5_hash} tar_file_dp = IterableWrapper([tar_file_url]) with self.assertRaisesRegex(RuntimeError, "Expected `OnDiskCacheHolder` existing"): _ = tar_file_dp.end_caching() def _filepath_fn(url): filename = os.path.basename(url) return os.path.join(self.temp_dir.name, filename) tar_cache_dp = tar_file_dp.on_disk_cache( filepath_fn=_filepath_fn, hash_dict=tar_hash_dict, hash_type="md5", ) # DataPipe Constructor tar_cache_dp = HttpReader(tar_cache_dp) # Start iteration without `end_caching` with self.assertRaisesRegex(RuntimeError, "Please call"): _ = list(tar_cache_dp) # Both filepath_fn and same_filepath_fn are set with self.assertRaisesRegex(ValueError, "`filepath_fn` is mutually"): _ = tar_cache_dp.end_caching(mode="wb", filepath_fn=_filepath_fn, same_filepath_fn=True) tar_cache_dp = tar_cache_dp.end_caching(mode="wb", same_filepath_fn=True) # File doesn't exist on disk self.assertFalse(os.path.exists(expected_file_name)) path = list(tar_cache_dp)[0] # File is cached to disk self.assertTrue(os.path.exists(expected_file_name)) self.assertEqual(expected_file_name, path) self.assertTrue(check_hash_fn(expected_file_name, expected_MD5_hash)) # Modify the downloaded file to trigger downloading again with open(expected_file_name, "w") as f: f.write("0123456789abcdef") self.assertFalse(check_hash_fn(expected_file_name, expected_MD5_hash)) path = list(tar_cache_dp)[0] self.assertTrue(check_hash_fn(expected_file_name, expected_MD5_hash)) # Call `end_caching` again with self.assertRaisesRegex(RuntimeError, "`end_caching` can only be invoked once"): _ = tar_cache_dp.end_caching() # Cache decompressed archive but only check root directory root_dir = "temp" file_cache_dp = OnDiskCacheHolder( tar_cache_dp, filepath_fn=lambda tar_path: os.path.join(os.path.dirname(tar_path), root_dir) ) remember_cache_dp_object = file_cache_dp file_cache_dp = FileOpener(file_cache_dp, mode="rb").load_from_tar() file_cache_dp = file_cache_dp.end_caching( mode="wb", filepath_fn=lambda file_path: os.path.join(self.temp_dir.name, root_dir, os.path.basename(file_path)), ) cached_it = iter(file_cache_dp) for i in range(3): expected_csv_path = os.path.join(self.temp_dir.name, root_dir, f"{i}.csv") # File doesn't exist on disk # Check disabled due to some elements of prefetching inside of on_disck_cache # self.assertFalse(os.path.exists(expected_csv_path)) csv_path = next(cached_it) # File is cached to disk self.assertTrue(os.path.exists(expected_csv_path)) self.assertEqual(expected_csv_path, csv_path) # This is the situation when previous process had no canche to release promise file on the file lists, # as we are in same pid, we need to force iterators to finish by deleting or exhausing them del cached_it if not IS_WINDOWS: dl = DataLoader(file_cache_dp, num_workers=3, multiprocessing_context="fork", batch_size=1) expected = [[os.path.join(self.temp_dir.name, root_dir, f"{i}.csv")] for i in range(3)] * 3 res = list(dl) self.assertEqual(sorted(expected), sorted(res)) remember_cache_dp_object._download_everything = True workers = 100 dl = DataLoader(file_cache_dp, num_workers=workers, multiprocessing_context="fork", batch_size=1) expected = [[os.path.join(self.temp_dir.name, root_dir, f"{i}.csv")] for i in range(3)] * workers res = list(dl) self.assertEqual(sorted(expected), sorted(res)) def __get_s3_cnt(self, s3_pths: list, recursive=True): """Return the count of the total objects collected from a list s3 paths""" tot_objs = set() for p in s3_pths: pth_parts = p.split("s3://")[1].split("/", 1) if len(pth_parts) == 1: bkt_name, prefix = pth_parts[0], "" else: bkt_name, prefix = pth_parts aws_cmd = f"aws --output json s3api list-objects --bucket {bkt_name} --no-sign-request" if prefix.strip(): aws_cmd += f" --prefix {prefix}" if not recursive: aws_cmd += " --delimiter /" res = subprocess.run(aws_cmd, shell=True, check=True, capture_output=True) json_res = json.loads(res.stdout) if "Contents" in json_res: objs = [v["Key"] for v in json_res["Contents"]] else: objs = [v["Prefix"] for v in json_res["CommonPrefixes"]] tot_objs |= set(objs) return len(tot_objs) @skipIfNoFSSpecS3 def test_fsspec_io_iterdatapipe(self): input_list = [ ["s3://ai2-public-datasets"], # bucket without '/' ["s3://ai2-public-datasets/charades/"], # bucket with '/' [ "s3://ai2-public-datasets/charades/Charades_v1.zip", "s3://ai2-public-datasets/charades/Charades_v1_flow.tar", "s3://ai2-public-datasets/charades/Charades_v1_rgb.tar", "s3://ai2-public-datasets/charades/Charades_v1_480.zip", ], # multiple files ] for urls in input_list: fsspec_lister_dp = FSSpecFileLister(IterableWrapper(urls), anon=True) self.assertEqual( sum(1 for _ in fsspec_lister_dp), self.__get_s3_cnt(urls, recursive=False), f"{urls} failed" ) url = "s3://ai2-public-datasets/charades/" fsspec_loader_dp = FSSpecFileOpener(FSSpecFileLister(IterableWrapper([url]), anon=True), anon=True) res = list(fsspec_loader_dp) self.assertEqual(len(res), 18, f"{input} failed") @unittest.skipIf(True, "Needs authentications. See: https://github.com/pytorch/data/issues/904") @skipIfNoFSSpecAZ def test_fsspec_azure_blob(self): url = "public/curated/covid-19/ecdc_cases/latest/ecdc_cases.csv" account_name = "pandemicdatalake" azure_prefixes = ["abfs", "az"] fsspec_loader_dp = {} for prefix in azure_prefixes: fsspec_lister_dp = FSSpecFileLister(f"{prefix}://{url}", account_name=account_name) fsspec_loader_dp[prefix] = FSSpecFileOpener(fsspec_lister_dp, account_name=account_name).parse_csv() res_abfs = list(fsspec_loader_dp["abfs"])[0] res_az = list(fsspec_loader_dp["az"])[0] self.assertEqual(res_abfs, res_az, f"{input} failed") @skipIfAWS def test_disabled_s3_io_iterdatapipe(self): file_urls = ["s3://ai2-public-datasets"] with self.assertRaisesRegex(ModuleNotFoundError, "TorchData must be built with"): _ = S3FileLister(IterableWrapper(file_urls)) with self.assertRaisesRegex(ModuleNotFoundError, "TorchData must be built with"): _ = S3FileLoader(IterableWrapper(file_urls)) @skipIfNoAWS @unittest.skipIf(IS_M1, "PyTorch M1 CI Machine doesn't allow accessing") def test_s3_io_iterdatapipe(self): # S3FileLister: different inputs input_list = [ ["s3://ai2-public-datasets"], # bucket without '/' ["s3://ai2-public-datasets/"], # bucket with '/' ["s3://ai2-public-datasets/charades"], # folder without '/' ["s3://ai2-public-datasets/charades/"], # folder without '/' ["s3://ai2-public-datasets/charad"], # prefix [ "s3://ai2-public-datasets/charades/Charades_v1", "s3://ai2-public-datasets/charades/Charades_vu17", ], # prefixes ["s3://ai2-public-datasets/charades/Charades_v1.zip"], # single file [ "s3://ai2-public-datasets/charades/Charades_v1.zip", "s3://ai2-public-datasets/charades/Charades_v1_flow.tar", "s3://ai2-public-datasets/charades/Charades_v1_rgb.tar", "s3://ai2-public-datasets/charades/Charades_v1_480.zip", ], # multiple files [ "s3://ai2-public-datasets/charades/Charades_v1.zip", "s3://ai2-public-datasets/charades/Charades_v1_flow.tar", "s3://ai2-public-datasets/charades/Charades_v1_rgb.tar", "s3://ai2-public-datasets/charades/Charades_v1_480.zip", "s3://ai2-public-datasets/charades/Charades_vu17", ], # files + prefixes ] for input in input_list: s3_lister_dp = S3FileLister(IterableWrapper(input), region="us-west-2") self.assertEqual(sum(1 for _ in s3_lister_dp), self.__get_s3_cnt(input), f"{input} failed") # S3FileLister: prefixes + different region file_urls = [ "s3://aft-vbi-pds/bin-images/111", "s3://aft-vbi-pds/bin-images/222", ] s3_lister_dp = S3FileLister(IterableWrapper(file_urls), request_timeout_ms=10000, region="us-east-1") self.assertEqual(sum(1 for _ in s3_lister_dp), 2212, f"{input} failed") # S3FileLister: incorrect inputs input_list = [ [""], ["ai2-public-datasets"], ["s3://"], ["s3:///bin-images"], ] for input in input_list: with self.assertRaises(ValueError, msg=f"{input} should raise ValueError."): s3_lister_dp = S3FileLister(IterableWrapper(input), region="us-east-1") for _ in s3_lister_dp: pass input = [["s3://aft-vbi-pds/bin-images/100730.jpg"], 1] s3_loader_dp = S3FileLoader(input[0], region="us-east-1") self.assertEqual(sum(1 for _ in s3_loader_dp), input[1], f"{input[0]} failed") # S3FileLoader: incorrect inputs input_list = [ [""], ["ai2-public-datasets"], ["s3://"], ["s3:///bin-images"], ["s3://ai2-public-datasets/bin-image"], ] for input in input_list: with self.assertRaises(ValueError, msg=f"{input} should raise ValueError."): s3_loader_dp = S3FileLoader(input, region="us-east-1") for _ in s3_loader_dp: pass # integration test input = [["s3://charades-tar-shards/"], 10] s3_lister_dp = S3FileLister(IterableWrapper(input[0]), region="us-west-2") s3_loader_dp = S3FileLoader(s3_lister_dp, region="us-west-2") self.assertEqual(sum(1 for _ in s3_loader_dp), input[1], f"{input[0]} failed") if __name__ == "__main__": unittest.main()
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import os import unittest import warnings from itertools import chain import expecttest from _utils._common_utils_for_test import create_temp_dir, reset_after_n_next_calls from torchdata.datapipes.iter import DataFrameMaker, FileLister, FileOpener, IterableWrapper, ParquetDataFrameLoader try: import torcharrow import torcharrow.dtypes as dt HAS_TORCHARROW = True except ImportError: HAS_TORCHARROW = False try: import pyarrow import pyarrow.parquet as parquet HAS_PYARROW = True except ImportError: HAS_PYARROW = False skipIfNoPyArrow = unittest.skipIf(not HAS_PYARROW, "no PyArrow.") skipIfNoTorchArrow = unittest.skipIf(not HAS_TORCHARROW, "no TorchArrow.") @skipIfNoTorchArrow class TestDataFrame(expecttest.TestCase): def setUp(self) -> None: self.temp_dir = create_temp_dir() if HAS_PYARROW: self._write_parquet_files() def tearDown(self) -> None: try: self.temp_dir.cleanup() except Exception as e: warnings.warn(f"TestDataFrame was not able to cleanup temp dir due to {e}") def _write_parquet_files(self): # Create TorchArrow DataFrames DTYPE = dt.Struct([dt.Field("Values", dt.int32)]) df1 = torcharrow.dataframe([(i,) for i in range(10)], dtype=DTYPE) df2 = torcharrow.dataframe([(i,) for i in range(100)], dtype=DTYPE) # Write them as parquet files for i, df in enumerate([df1, df2]): fname = f"df{i}.parquet" self._write_df_as_parquet(df, fname) self._write_multiple_dfs_as_parquest([df1, df2], fname="merged.parquet") def _custom_files_set_up(self, files): for fname, content in files.items(): temp_file_path = os.path.join(self.temp_dir.name, fname) with open(temp_file_path, "w") as f: f.write(content) def _compare_dataframes(self, expected_df, actual_df): self.assertEqual(len(expected_df), len(actual_df)) for exp, act in zip(expected_df, actual_df): self.assertEqual(exp, act) def _write_df_as_parquet(self, df, fname: str) -> None: table = df.to_arrow() parquet.write_table(table, os.path.join(self.temp_dir.name, fname)) def _write_multiple_dfs_as_parquest(self, dfs, fname: str) -> None: tables = [df.to_arrow() for df in dfs] merged_table = pyarrow.concat_tables(tables) parquet.write_table(merged_table, os.path.join(self.temp_dir.name, fname)) def test_dataframe_maker_iterdatapipe(self): source_data = [(i,) for i in range(10)] source_dp = IterableWrapper(source_data) DTYPE = dt.Struct([dt.Field("Values", dt.int32)]) # Functional Test: DataPipe correctly converts into a single TorchArrow DataFrame df_dp = source_dp.dataframe(dtype=DTYPE) df = list(df_dp)[0] expected_df = torcharrow.dataframe([(i,) for i in range(10)], dtype=DTYPE) self._compare_dataframes(expected_df, df) # Functional Test: DataPipe correctly converts into multiple TorchArrow DataFrames, based on size argument df_dp = DataFrameMaker(source_dp, dataframe_size=5, dtype=DTYPE) dfs = list(df_dp) expected_dfs = [ torcharrow.dataframe([(i,) for i in range(5)], dtype=DTYPE), torcharrow.dataframe([(i,) for i in range(5, 10)], dtype=DTYPE), ] for exp_df, act_df in zip(expected_dfs, dfs): self._compare_dataframes(exp_df, act_df) # __len__ Test: df_dp = source_dp.dataframe(dtype=DTYPE) self.assertEqual(1, len(df_dp)) self.assertEqual(10, len(list(df_dp)[0])) df_dp = source_dp.dataframe(dataframe_size=5, dtype=DTYPE) self.assertEqual(2, len(df_dp)) self.assertEqual(5, len(list(df_dp)[0])) # Reset Test: n_elements_before_reset = 1 res_before_reset, res_after_reset = reset_after_n_next_calls(df_dp, n_elements_before_reset) for exp_df, act_df in zip(expected_dfs[:1], res_before_reset): self._compare_dataframes(exp_df, act_df) for exp_df, act_df in zip(expected_dfs, res_after_reset): self._compare_dataframes(exp_df, act_df) def test_dataframe_maker_with_csv(self): def get_name(path_and_stream): return os.path.basename(path_and_stream[0]), path_and_stream[1] csv_files = {"1.csv": "key,item\na,1\nb,2"} self._custom_files_set_up(csv_files) datapipe1 = FileLister(self.temp_dir.name, "*.csv") datapipe2 = FileOpener(datapipe1, mode="b") datapipe3 = datapipe2.map(get_name) csv_dict_parser_dp = datapipe3.parse_csv_as_dict() # Functional Test: Correctly generate TorchArrow DataFrame from CSV DTYPE = dt.Struct([dt.Field("key", dt.string), dt.Field("item", dt.string)]) df_dp = csv_dict_parser_dp.dataframe(dtype=DTYPE, columns=["key", "item"]) expected_dfs = [torcharrow.dataframe([{"key": "a", "item": "1"}, {"key": "b", "item": "2"}], dtype=DTYPE)] for exp_df, act_df in zip(expected_dfs, list(df_dp)): self._compare_dataframes(exp_df, act_df) # Functional: making sure DataPipe works even without `columns` input df_dp = csv_dict_parser_dp.dataframe(dtype=DTYPE) for exp_df, act_df in zip(expected_dfs, list(df_dp)): self._compare_dataframes(exp_df, act_df) @skipIfNoPyArrow def test_parquet_dataframe_reader_iterdatapipe(self): DTYPE = dt.Struct([dt.Field("Values", dt.int32)]) # Functional Test: read from Parquet files and output TorchArrow DataFrames source_dp = FileLister(self.temp_dir.name, masks="df*.parquet") parquet_df_dp = ParquetDataFrameLoader(source_dp, dtype=DTYPE) expected_dfs = [ torcharrow.dataframe([(i,) for i in range(10)], dtype=DTYPE), torcharrow.dataframe([(i,) for i in range(100)], dtype=DTYPE), ] for exp_df, act_df in zip(expected_dfs, list(parquet_df_dp)): self._compare_dataframes(exp_df, act_df) # Functional Test: correctly read from a Parquet file that was a merged DataFrame merged_source_dp = FileLister(self.temp_dir.name, masks="merged.parquet") merged_parquet_df_dp = ParquetDataFrameLoader(merged_source_dp, dtype=DTYPE) expected_merged_dfs = [torcharrow.dataframe([(i,) for i in chain(range(10), range(100))], dtype=DTYPE)] for exp_df, act_df in zip(expected_merged_dfs, list(merged_parquet_df_dp)): self._compare_dataframes(exp_df, act_df) # __len__ Test: no valid length because we do not know the number of row groups in advance with self.assertRaisesRegex(TypeError, "has no len"): len(parquet_df_dp) # Reset Test: n_elements_before_reset = 1 res_before_reset, res_after_reset = reset_after_n_next_calls(parquet_df_dp, n_elements_before_reset) for exp_df, act_df in zip(expected_dfs[:1], res_before_reset): self._compare_dataframes(exp_df, act_df) for exp_df, act_df in zip(expected_dfs, res_after_reset): self._compare_dataframes(exp_df, act_df) if __name__ == "__main__": unittest.main()
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree.
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import hashlib import os import platform import sys import tempfile from typing import List, Tuple, TypeVar from torchdata.datapipes.iter import IterDataPipe T_co = TypeVar("T_co", covariant=True) IS_LINUX = sys.platform == "linux" IS_WINDOWS = sys.platform == "win32" IS_MACOS = sys.platform == "darwin" IS_M1 = IS_MACOS and "arm" in platform.platform() class IDP_NoLen(IterDataPipe): def __init__(self, input_dp) -> None: super().__init__() self.input_dp = input_dp def __iter__(self): yield from self.input_dp def get_name(path_and_stream): return os.path.basename(path_and_stream[0]), path_and_stream[1] # Given a DataPipe and integer n, iterate the DataPipe for n elements and store the elements into a list # Then, reset the DataPipe and return a tuple of two lists # 1. A list of elements yielded before the reset # 2. A list of all elements of the DataPipe after the reset def reset_after_n_next_calls(datapipe: IterDataPipe[T_co], n: int) -> Tuple[List[T_co], List[T_co]]: it = iter(datapipe) res_before_reset = [] for _ in range(n): res_before_reset.append(next(it)) return res_before_reset, list(datapipe) def create_temp_dir(dir=None): # The temp dir and files within it will be released and deleted in tearDown(). # Adding `noqa: P201` to avoid mypy's warning on not releasing the dir handle within this function. temp_dir = tempfile.TemporaryDirectory(dir=dir) # noqa: P201 return temp_dir def create_temp_files(temp_dir, prefix=1, empty=True): temp_dir_path = temp_dir.name with tempfile.NamedTemporaryFile(dir=temp_dir_path, delete=False, prefix=str(prefix), suffix=".txt") as f: temp_file1_name = f.name with open(temp_file1_name, "w") as f1: f1.write("0123456789abcdef") with tempfile.NamedTemporaryFile(dir=temp_dir_path, delete=False, prefix=str(prefix + 1), suffix=".byte") as f: temp_file2_name = f.name with open(temp_file2_name, "wb") as f2: f2.write(b"0123456789abcdef") if empty: with tempfile.NamedTemporaryFile(dir=temp_dir_path, delete=False, prefix=str(prefix + 2), suffix=".empty") as f: temp_file3_name = f.name return temp_file1_name, temp_file2_name, temp_file3_name return temp_file1_name, temp_file2_name def check_hash_fn(filepath, expected_hash, hash_type="md5"): if hash_type == "sha256": hash_fn = hashlib.sha256() elif hash_type == "md5": hash_fn = hashlib.md5() else: raise ValueError("Invalid hash_type requested, should be one of {}".format(["sha256", "md5"])) with open(filepath, "rb") as f: chunk = f.read(1024 ** 2) while chunk: hash_fn.update(chunk) chunk = f.read(1024 ** 2) return hash_fn.hexdigest() == expected_hash
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import os import tarfile NUMBER_OF_FILES = 3 FILES = [ ("bytes", "bt", "{fn}_0123456789abcdef\n", True), ("csv", "csv", "key,item\n0,{fn}_0\n1,{fn}_1\n"), ("json", "json", '{{"{fn}_0": [{{"{fn}_01": 1}}, {{"{fn}_02": 2}}], "{fn}_1": 1}}\n'), ("txt", "txt", "{fn}_0123456789abcdef\n"), ] def create_files(folder, suffix, data, encoding=False): os.makedirs(folder, exist_ok=True) for i in range(NUMBER_OF_FILES): fn = str(i) d = data.format(fn=fn) mode = "wb" if encoding else "wt" if encoding: d = d.encode() with open(folder + "/" + fn + "." + suffix, mode) as f: f.write(d) with tarfile.open(folder + ".tar", mode="w") as archive: archive.add(folder) with tarfile.open(folder + ".tar.gz", mode="w:gz") as archive: archive.add(folder) def create_tfrecord_files(path: str): try: import tensorflow as tf except ImportError: print("TensorFlow not found!") print("We will not generate tfrecord files.") return os.makedirs(path, exist_ok=True) with tf.io.TFRecordWriter(os.path.join(path, "example.tfrecord")) as writer: for i in range(4): x = tf.range(i * 10, (i + 1) * 10) record_bytes = tf.train.Example( features=tf.train.Features( feature={ "x_float": tf.train.Feature(float_list=tf.train.FloatList(value=x)), "x_int": tf.train.Feature(int64_list=tf.train.Int64List(value=tf.cast(x * 10, "int64"))), "x_byte": tf.train.Feature(bytes_list=tf.train.BytesList(value=[b"test str"])), } ) ).SerializeToString() writer.write(record_bytes) with tf.io.TFRecordWriter(os.path.join(path, "sequence_example.tfrecord")) as writer: for i in range(4): x = tf.range(i * 10, (i + 1) * 10) rep = 2 * i + 3 record_bytes = tf.train.SequenceExample( context=tf.train.Features( feature={ "x_float": tf.train.Feature(float_list=tf.train.FloatList(value=x)), "x_int": tf.train.Feature(int64_list=tf.train.Int64List(value=tf.cast(x * 10, "int64"))), "x_byte": tf.train.Feature(bytes_list=tf.train.BytesList(value=[b"test str"])), } ), feature_lists=tf.train.FeatureLists( feature_list={ "x_float_seq": tf.train.FeatureList( feature=[tf.train.Feature(float_list=tf.train.FloatList(value=x))] * rep ), "x_int_seq": tf.train.FeatureList( feature=[tf.train.Feature(int64_list=tf.train.Int64List(value=tf.cast(x * 10, "int64")))] * rep ), "x_byte_seq": tf.train.FeatureList( feature=[tf.train.Feature(bytes_list=tf.train.BytesList(value=[b"test str"]))] * rep ), } ), ).SerializeToString() writer.write(record_bytes) if __name__ == "__main__": for args in FILES: create_files(*args) create_tfrecord_files("tfrecord")
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import argparse import torchdata import torchdata.dataloader2 import torchdata.datapipes def s3_test(): from torchdata._torchdata import S3Handler if __name__ == "__main__": r""" TorchData Smoke Test """ parser = argparse.ArgumentParser() parser.add_argument("--no-s3", dest="s3", action="store_false") options = parser.parse_args() if options.s3: s3_test()
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import argparse import torch import torch.distributed as dist from torch.distributed.elastic.multiprocessing.errors import record from torch.utils.data import DataLoader from torchdata.dataloader2 import DataLoader2, DistributedReadingService from torchdata.datapipes.iter import IterableWrapper def _get_dataloader(data_length: int, dl2: bool, shuffle: bool, rs=None): data_source = IterableWrapper(list(range(data_length))) dp = data_source.sharding_filter() if shuffle: dp = dp.shuffle() if dl2: if rs is None: rs = DistributedReadingService() dl = DataLoader2(dp, reading_service=rs) else: dp = dp.fullsync() dl = DataLoader(dp) return dl @record def main(backend, dl2): dist.init_process_group(backend) rank = dist.get_rank() world_size = dist.get_world_size() # Use a prime number to make sure uneven data sharding data_length = 23 # No Shuffle dl = _get_dataloader(data_length, dl2=dl2, shuffle=False) res = [] for d in dl: res.append(d) # Simulate training synchronization dist.barrier() assert sorted(res) == list(range(rank, data_length // world_size * world_size, world_size)) # Shuffle dl = _get_dataloader(data_length, dl2=dl2, shuffle=True) results = [] for _ in range(2): res = [] torch.manual_seed(123) for d in dl: res.append(d) # Simulate training synchronization dist.barrier() results.append(res) assert results[0] == results[1] # Different seed res = [] torch.manual_seed(321) for d in dl: res.append(d) # Simulate training synchronization dist.barrier() results.append(res) assert len(results[0]) == len(results[2]) assert results[0] != results[2] # Properly shutdown the process group if isinstance(dl, DataLoader2): dl.shutdown() if __name__ == "__main__": parser = argparse.ArgumentParser(description="Elastic Training") backend_group = parser.add_mutually_exclusive_group(required=True) backend_group.add_argument("--gloo", action="store_true", help="GLOO backend") backend_group.add_argument("--nccl", action="store_true", help="NCCL backend") backend_group.add_argument("--mpi", action="store_true", help="MPI backend") dl_group = parser.add_mutually_exclusive_group(required=True) dl_group.add_argument("--dl1", action="store_true", help="DataLoader") dl_group.add_argument("--dl2", action="store_true", help="DataLoader2") args = parser.parse_args() backend = "gloo" if args.nccl: backend = "nccl" elif args.mpi: backend = "mpi" dl2 = True if args.dl1: dl2 = False main(backend, dl2)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import multiprocessing as mp import os import pickle import queue import random import socket import unittest from unittest import TestCase import numpy as np import torch import torch.distributed as dist from torch.testing._internal.common_utils import instantiate_parametrized_tests, IS_WINDOWS, parametrize from torch.utils.data.datapipes.iter.sharding import SHARDING_PRIORITIES from torchdata.dataloader2 import ( DataLoader2, DistributedReadingService, InProcessReadingService, MultiProcessingReadingService, ReadingServiceInterface, SequentialReadingService, ) from torchdata.dataloader2.dataloader2 import READING_SERVICE_STATE_KEY_NAME, SERIALIZED_DATAPIPE_KEY_NAME from torchdata.dataloader2.graph import DataPipe, list_dps, replace_dp, set_datapipes_seed, traverse_dps from torchdata.dataloader2.random import SeedGenerator from torchdata.datapipes.iter import IterableWrapper, IterDataPipe, ShardingRoundRobinDispatcher try: import dill # XXX: By default, dill writes the Pickler dispatch table to inject its # own logic there. This globally affects the behavior of the standard library # pickler for any user who transitively depends on this module! # Undo this extension to avoid altering the behavior of the pickler globally. dill.extend(use_dill=False) HAS_DILL = True except ImportError: HAS_DILL = False skipIfNoDill = unittest.skipIf(not HAS_DILL, "no dill") if dist.is_available(): HAS_DIST = True else: HAS_DIST = False skipIfNoDistributed = unittest.skipIf(not HAS_DIST, "no torch.distributed") TEST_WITH_TSAN = os.getenv("PYTORCH_TEST_WITH_TSAN", "0") == "1" mp_ctx_parametrize = parametrize("ctx", mp.get_all_start_methods()) EXCEPTION_ITERATION_NUM = 7 class _ReadingServiceWrapper: def __init__(self, dp): self.dp = dp def __iter__(self): self.it = iter(self.dp) return self def __next__(self): return next(self.it) @staticmethod def return_one(): return 1 class TestReadingService(ReadingServiceInterface): def initialize(self, dp: DataPipe) -> DataPipe: return _ReadingServiceWrapper(dp) # type: ignore[return-value] class DataLoader2Test(TestCase): def test_dataloader2(self) -> None: test_data_pipe = IterableWrapper(range(3)) data_loader: DataLoader2 = DataLoader2(datapipe=test_data_pipe) expected_batch = 0 for batch in iter(data_loader): self.assertEqual(batch, expected_batch) expected_batch += 1 def test_dataloader2_shutdown(self) -> None: test_data_pipe = IterableWrapper(range(3)) data_loader: DataLoader2 = DataLoader2(datapipe=test_data_pipe) data_loader.shutdown() def test_dataloader2_state_dict(self) -> None: test_data_pipe = IterableWrapper(range(3)) data_loader: DataLoader2 = DataLoader2(datapipe=test_data_pipe) state = data_loader.state_dict() self.assertIsNotNone(state) self.assertIsNotNone(state[SERIALIZED_DATAPIPE_KEY_NAME]) self.assertIsNone(state[READING_SERVICE_STATE_KEY_NAME]) data_loader.shutdown() def test_dataloader2_reading_service(self) -> None: test_data_pipe = IterableWrapper(range(3)) reading_service = TestReadingService() data_loader: DataLoader2 = DataLoader2(datapipe=test_data_pipe, reading_service=reading_service) expected_batch = 0 for batch in iter(data_loader): self.assertEqual(batch, expected_batch) expected_batch += 1 def test_dataloader2_load_state_dict(self) -> None: test_data_pipe = IterableWrapper(range(3)) reading_service = TestReadingService() data_loader: DataLoader2 = DataLoader2(datapipe=test_data_pipe, reading_service=reading_service) batch = next(iter(data_loader)) self.assertEqual(batch, 0) state = data_loader.state_dict() self.assertIsNotNone(state) self.assertIsNotNone(state[SERIALIZED_DATAPIPE_KEY_NAME]) self.assertIsNone(state[READING_SERVICE_STATE_KEY_NAME]) data_loader.shutdown() restored_data_loader: DataLoader2 = DataLoader2(datapipe=None, reading_service=reading_service) restored_data_loader.load_state_dict(state) restored_data_loader_datapipe = restored_data_loader.datapipe deserialized_datapipe = pickle.loads(state[SERIALIZED_DATAPIPE_KEY_NAME]) for batch_1, batch_2 in zip(restored_data_loader_datapipe, deserialized_datapipe): self.assertEqual(batch_1, batch_2) self.assertEqual( restored_data_loader.reading_service_state, state[READING_SERVICE_STATE_KEY_NAME], ) restored_data_loader.shutdown() def test_dataloader2_iterates_correctly(self) -> None: test_data_pipe = IterableWrapper(range(10)).sharding_filter() reading_services = [ None, TestReadingService(), MultiProcessingReadingService(num_workers=4), MultiProcessingReadingService(num_workers=4, worker_prefetch_cnt=0), ] for reading_service in reading_services: data_loader: DataLoader2 = DataLoader2(datapipe=test_data_pipe, reading_service=reading_service) self.assertEqual(list(range(10)), list(data_loader)) self.assertEqual(list(range(10)), list(data_loader)) self.assertEqual(list(range(10)), list(data_loader)) actual = [] for i in data_loader: actual.append(i) self.assertEqual(list(range(10)), actual) actual = [] for i in data_loader: actual.append(i) self.assertEqual(list(range(10)), actual) def test_dataloader2_reset(self) -> None: test_data_pipe = IterableWrapper(range(10)) reading_services = [None, TestReadingService(), MultiProcessingReadingService(num_workers=1)] for reading_service in reading_services: data_loader: DataLoader2 = DataLoader2(datapipe=test_data_pipe, reading_service=reading_service) # Functional Test: Ensure multiple sequential reads of DL2 is possible self.assertEqual(list(range(10)), list(data_loader)) self.assertEqual(list(range(10)), list(data_loader)) self.assertEqual(list(range(10)), list(data_loader)) # Functional Test: Ensure that the creation of a new iterator invalidates the old one it1 = iter(data_loader) self.assertEqual(0, next(it1)) self.assertEqual(1, next(it1)) it2 = iter(data_loader) self.assertEqual(0, next(it2)) self.assertEqual(1, next(it2)) with self.assertRaisesRegex(RuntimeError, "iterator has been invalidated"): next(it1) self.assertEqual(list(range(2, 10)), list(it2)) def test_dataloader2_delegate_attribute(self) -> None: test_data_pipe = IterableWrapper(range(10)) data_loader: DataLoader2 = DataLoader2(datapipe=test_data_pipe, reading_service=TestReadingService()) # Functional Test: Ensure multiple sequential reads of DL2 is possible self.assertEqual(list(range(10)), list(data_loader)) self.assertEqual(list(range(10)), list(data_loader)) # Functional Test: Ensure that attribute/method of `dataloader._datapipe_iter` can be used it = iter(data_loader) self.assertEqual(1, it.return_one()) # type: ignore[attr-defined] class DataLoader2ConsistencyTest(TestCase): r""" These tests ensure that the behaviors of `DataLoader2` are consistent across `ReadingServices` and potentially with `DataLoaderV1`. """ @staticmethod def _get_no_reading_service(): return None @staticmethod def _get_mp_reading_service(): return MultiProcessingReadingService(num_workers=2) @staticmethod def _get_in_process_reading_service(): return InProcessReadingService() def _collect_data(self, datapipe, reading_service_gen): dl: DataLoader2 = DataLoader2(datapipe, reading_service=reading_service_gen()) result = [] # Testing how RS handles partial reading and reiterations for row, _ in zip(dl, range(10)): result.append(row) for row in dl: result.append(row) dl.shutdown() return result @staticmethod def _no_op(x): return x def test_dataloader2_batch_collate(self) -> None: dp: IterDataPipe = IterableWrapper(range(100)).batch(2).sharding_filter().collate(self._no_op) # type: ignore[assignment] expected = self._collect_data(dp, reading_service_gen=self._get_no_reading_service) reading_service_generators = ( self._get_mp_reading_service, self._get_in_process_reading_service, ) for reading_service_gen in reading_service_generators: actual = self._collect_data(dp, reading_service_gen=reading_service_gen) # TODO(588): This comparison only indicates that somethings is broken and not helping with debug self.assertEqual(expected, actual, reading_service_gen) def test_dataloader2_shuffle(self) -> None: # TODO(589): Add shuffle test pass def _x_mult_2(d): return d * 2 class NonReplicableDataPipe(IterDataPipe): def __init__(self, datapipe): self.datapipe = datapipe def __iter__(self): yield from self.datapipe def is_replicable(self): return False class _CustomException(Exception): pass class MakeMistakeDataPipe(IterDataPipe): def __init__(self, source_datapipe, exc_iteration=EXCEPTION_ITERATION_NUM): self.source_datapipe = source_datapipe self.exc_iteration = exc_iteration def __iter__(self): for i, x in enumerate(self.source_datapipe): if i == self.exc_iteration: raise _CustomException("oops") yield x class MultiProcessingReadingServiceTest(TestCase): @staticmethod def _worker_init_fn(datapipe, worker_info): datapipe = datapipe.sharding_filter() torch.utils.data.graph_settings.apply_sharding( datapipe, worker_info.num_workers, worker_info.worker_id, SHARDING_PRIORITIES.MULTIPROCESSING ) return datapipe @staticmethod def _worker_reset_fn(datapipe, worker_info, worker_seed_generator: SeedGenerator): graph = traverse_dps(datapipe) dps = list_dps(graph) worker_seed_generator.seed(123) set_datapipes_seed(dps, seed_generator=worker_seed_generator, distributed_shared=True) return datapipe @mp_ctx_parametrize def test_worker_fns(self, ctx): dp: IterDataPipe = IterableWrapper(range(100)).batch(2).shuffle() rs = MultiProcessingReadingService( num_workers=2, multiprocessing_context=ctx, worker_init_fn=self._worker_init_fn, worker_reset_fn=self._worker_reset_fn, ) dl = DataLoader2(dp, reading_service=rs) res1 = list(dl) res2 = list(dl) # Test worker_init_fn to set sharding def _expand_fn(res): result = [] for batch in res: result.extend(batch) return result exp = list(range(100)) self.assertEqual(sorted(_expand_fn(res1)), exp) self.assertEqual(sorted(_expand_fn(res2)), exp) # Test worker_reset_fn to set the same random seed across epoches self.assertEqual(res1, res2) @mp_ctx_parametrize def test_single_branch_non_replicable(self, ctx): r""" For single branch pipeline with a non-replicable DataPipe, all ``sharding_filters`` in the pipeline become non-replicable. """ def _make_dp(): single_br_dp = IterableWrapper(list(range(10))).shuffle() map_dp = single_br_dp.map(_x_mult_2) end_dp = map_dp.map(_x_mult_2).shuffle() return single_br_dp, map_dp, end_dp def _assert_deterministic_dl_res(dl, exp): torch.manual_seed(123) res = list(dl) self.assertEqual(sorted(res), exp) # Second epoch torch.manual_seed(123) self.assertEqual(list(dl), res) # Different seed torch.manual_seed(321) self.assertNotEqual(list(dl), res) # Properly shutdown dl.shutdown() # By-default, all replicable single_br_dp, _, end_dp = _make_dp() graph = traverse_dps(end_dp) sf_dp = single_br_dp.sharding_filter() replace_dp(graph, single_br_dp, sf_dp) dl = DataLoader2( end_dp, reading_service=MultiProcessingReadingService(num_workers=2, multiprocessing_context=ctx) ) # Determinism and dynamic sharding # _assert_deterministic_dl_res(dl, [i * 4 for i in range(10)]) # Non-replicable before sharding_filter # shuffle in dispatch process single_br_dp, map_dp, end_dp = _make_dp() graph = traverse_dps(end_dp) round_robin_dispatcher = ShardingRoundRobinDispatcher(single_br_dp, SHARDING_PRIORITIES.MULTIPROCESSING) replace_dp(graph, single_br_dp, round_robin_dispatcher) sf_dp = map_dp.sharding_filter() replace_dp(graph, map_dp, sf_dp) dl = DataLoader2( end_dp, reading_service=MultiProcessingReadingService(num_workers=2, multiprocessing_context=ctx) ) # Determinism for non-replicable pipeline _assert_deterministic_dl_res(dl, [i * 4 for i in range(10)]) # Non-replicable after sharding_filter # shuffle in dispatch process single_br_dp, map_dp, end_dp = _make_dp() graph = traverse_dps(end_dp) sf_dp = single_br_dp.sharding_filter() replace_dp(graph, single_br_dp, sf_dp) round_robin_dispatcher = ShardingRoundRobinDispatcher(map_dp, SHARDING_PRIORITIES.MULTIPROCESSING) replace_dp(graph, map_dp, round_robin_dispatcher) dl = DataLoader2( end_dp, reading_service=MultiProcessingReadingService(num_workers=2, multiprocessing_context=ctx) ) # Determinism for non-replicable pipeline _assert_deterministic_dl_res(dl, [i * 4 for i in range(10)]) @mp_ctx_parametrize def test_multi_branch_non_replicable(self, ctx) -> None: r""" For multi-branch pipeline with a non-replicable DataPipe on one branch, all ``sharding_filter`` on the other branches should remain replicable. """ def _make_dp(): branch1_dp = IterableWrapper(list(range(10))).shuffle() branch2_dp = IterableWrapper(list(range(10))).shuffle() map_dp = branch1_dp.map(_x_mult_2) end_dp = map_dp.zip(branch2_dp) return branch1_dp, map_dp, branch2_dp, end_dp def _assert_deterministic_dl_res(dl, exp1, exp2): torch.manual_seed(123) res = list(dl) res1, res2 = list(zip(*res)) self.assertEqual(sorted(res1), exp1) self.assertEqual(sorted(res2), exp2) # Second epoch torch.manual_seed(123) self.assertEqual(list(dl), res) # Different seed torch.manual_seed(321) self.assertNotEqual(list(dl), res) # Properly shutdown dl.shutdown() # By-default, all replicable branch1_dp, _, branch2_dp, end_dp = _make_dp() graph = traverse_dps(end_dp) sf1_dp = branch1_dp.sharding_filter() sf2_dp = branch2_dp.sharding_filter() replace_dp(graph, branch1_dp, sf1_dp) replace_dp(graph, branch2_dp, sf2_dp) dl = DataLoader2( end_dp, reading_service=MultiProcessingReadingService(num_workers=2, multiprocessing_context=ctx) ) # Determinism and dynamic sharding _assert_deterministic_dl_res(dl, [i * 2 for i in range(10)], list(range(10))) # Non-replicable on one branch # shuffle in dispatch process branch1_dp, _, branch2_dp, end_dp = _make_dp() graph = traverse_dps(end_dp) non_replicable_dp = ShardingRoundRobinDispatcher(branch1_dp, SHARDING_PRIORITIES.MULTIPROCESSING) replace_dp(graph, branch1_dp, non_replicable_dp) # The other branch should has a sharding_filter to make data even sf_dp = branch2_dp.sharding_filter() replace_dp(graph, branch2_dp, sf_dp) dl = DataLoader2( end_dp, reading_service=MultiProcessingReadingService(num_workers=2, multiprocessing_context=ctx) ) # Determinism for non-replicable pipeline _assert_deterministic_dl_res(dl, [i * 2 for i in range(10)], list(range(10))) # Non-replicable on both branches # shuffle in dispatch process branch1_dp, _, branch2_dp, end_dp = _make_dp() graph = traverse_dps(end_dp) non_replicable_dp1 = ShardingRoundRobinDispatcher(branch1_dp, SHARDING_PRIORITIES.MULTIPROCESSING) replace_dp(graph, branch1_dp, non_replicable_dp1) non_replicable_dp2 = ShardingRoundRobinDispatcher(branch2_dp, SHARDING_PRIORITIES.MULTIPROCESSING) replace_dp(graph, branch2_dp, non_replicable_dp2) dl = DataLoader2( end_dp, reading_service=MultiProcessingReadingService(num_workers=2, multiprocessing_context=ctx) ) # Determinism for non-replicable pipeline _assert_deterministic_dl_res(dl, [i * 2 for i in range(10)], list(range(10))) @mp_ctx_parametrize def test_multi_worker_determinism(self, ctx): dp: IterDataPipe = IterableWrapper(range(100)) dp = dp.shuffle().sharding_filter() dp = dp.batch(2) rs = MultiProcessingReadingService( num_workers=2, multiprocessing_context=ctx, ) dl = DataLoader2(dp, reading_service=rs) torch.manual_seed(123) res = list(dl) + list(dl) torch.manual_seed(123) self.assertEqual(res, list(dl) + list(dl)) torch.manual_seed(321) self.assertNotEqual(res, list(dl) + list(dl)) # Using seed API for DataLoader2 dl.seed(123) res = list(dl) + list(dl) dl.seed(123) self.assertEqual(res, list(dl) + list(dl)) dl.seed(321) self.assertNotEqual(res, list(dl) + list(dl)) @mp_ctx_parametrize def test_dispatching_worker_determinism(self, ctx): dp: IterDataPipe = IterableWrapper(range(101)) dp = dp.shuffle().sharding_round_robin_dispatch(SHARDING_PRIORITIES.MULTIPROCESSING) dp = dp.batch(2) rs = MultiProcessingReadingService( num_workers=2, multiprocessing_context=ctx, ) dl = DataLoader2(dp, reading_service=rs) torch.manual_seed(123) res = list(dl) + list(dl) torch.manual_seed(123) self.assertEqual(res, list(dl) + list(dl)) torch.manual_seed(321) self.assertNotEqual(res, list(dl) + list(dl)) # Using seed API for DataLoader2 dl.seed(123) res = list(dl) + list(dl) dl.seed(123) self.assertEqual(res, list(dl) + list(dl)) dl.seed(321) self.assertNotEqual(res, list(dl) + list(dl)) @mp_ctx_parametrize def test_non_replicable_datapipe(self, ctx) -> None: r""" For the pipeline with non-replicable DataPipe, make sure the DataPipe remains in the main process. """ dp: IterDataPipe = IterableWrapper(range(100)) dp = dp.shuffle().sharding_filter() dp = dp.batch(2) non_rep_dp = NonReplicableDataPipe(dp) rs = MultiProcessingReadingService( num_workers=2, multiprocessing_context=ctx, ) dl = DataLoader2(non_rep_dp, reading_service=rs) torch.manual_seed(123) it = iter(dl) # Validate NonReplicableDataPipe still in the main process non_rep_dp = dl.reading_service._end_datapipe self.assertEqual(type(non_rep_dp), NonReplicableDataPipe) res = list(it) + list(dl) torch.manual_seed(123) self.assertEqual(res, list(dl) + list(dl)) torch.manual_seed(321) self.assertNotEqual(res, list(dl) + list(dl)) @parametrize("num_workers", [1, 3]) @parametrize("worker_prefetch_cnt", [0, 5, 10]) def test_worker_exception_raised(self, num_workers, worker_prefetch_cnt): dp = IterableWrapper(range(100)).sharding_filter() dp = MakeMistakeDataPipe(dp) rs = MultiProcessingReadingService(num_workers=num_workers, worker_prefetch_cnt=worker_prefetch_cnt) dl = DataLoader2(dp, reading_service=rs) it = iter(dl) for _ in range(EXCEPTION_ITERATION_NUM * num_workers): next(it) with self.assertRaises(_CustomException) as cm: next(it) exc_msg = str(cm.exception) self.assertTrue("Caught _CustomException in worker process 0" in exc_msg) self.assertTrue("Original Traceback" in exc_msg) self.assertTrue("_CustomException: oops" in exc_msg) @parametrize("num_workers", [1, 3]) @parametrize("worker_prefetch_cnt", [0, 5, 10]) def test_dispatching_exception_raised(self, num_workers, worker_prefetch_cnt): dp = IterableWrapper(range(100)) dp = MakeMistakeDataPipe(dp) dp = dp.sharding_round_robin_dispatch(SHARDING_PRIORITIES.MULTIPROCESSING) dp = dp.map(_x_mult_2) rs = MultiProcessingReadingService(num_workers=num_workers, worker_prefetch_cnt=worker_prefetch_cnt) dl = DataLoader2(dp, reading_service=rs) it = iter(dl) for _ in range(EXCEPTION_ITERATION_NUM): next(it) with self.assertRaises(_CustomException) as cm: next(it) exc_msg = str(cm.exception) self.assertTrue("Caught _CustomException in dispatching process" in exc_msg) self.assertTrue("Original Traceback" in exc_msg) self.assertTrue("_CustomException: oops" in exc_msg) TEST_MASTER_ADDR = "127.0.0.1" DEFAULT_WORLD_SIZE = 2 def _get_open_port(): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(("", 0)) port = s.getsockname()[1] s.close() return str(port) class TerminateSignal: pass def _launch_distributed_training(world_size, *args, fn): os.environ["MASTER_ADDR"] = TEST_MASTER_ADDR os.environ["MASTER_PORT"] = _get_open_port() ctx = mp.get_context("spawn") q = ctx.Queue() ps = [] for rank in range(world_size): p = ctx.Process( target=fn, args=( rank, world_size, q, *args, ), ) p.start() ps.append(p) res = [] while True: try: d = q.get() if isinstance(d, TerminateSignal): break res.append(d) except queue.Empty: continue for p in ps: p.join() return res def _dist_one_epoch(dl): res = [] for d in dl: res.append(d) # Simulate training synchronization dist.barrier() return res def _finalize_distributed_queue(rank, q): r""" Synchronize all distributed processes to guarantee all data have been put into the Multiprocessing Queue. """ pg = dist.new_group(backend="gloo") end_tensor = torch.tensor([rank], dtype=torch.int64) dist.all_reduce(end_tensor, group=pg) if rank == 0: q.put(TerminateSignal()) dist.destroy_process_group(pg) def _random_fn(data): r""" Used to validate the randomness of subprocess-local RNGs are set deterministically. """ py_random_num = random.randint(0, 2 ** 32) np_random_num = np.random.randint(0, 2 ** 32) torch_random_num = torch.randint(0, 2 ** 32, size=[]).item() return (data, py_random_num, np_random_num, torch_random_num) def _dist_training_fn(rank, world_size, q, dp_fn, rs_fn, num_workers, ctx): # Use gloo dist.init_process_group("gloo", rank=rank, world_size=world_size) # Uneven shards data_length = world_size * num_workers * 10 + 1 dp = dp_fn(data_length) rs = rs_fn(num_workers, ctx) dl = DataLoader2(dp, reading_service=rs) # No seed res = _dist_one_epoch(dl) q.put((0, rank, res)) # Shuffle with seed for epoch in range(2): dl.seed(123) res = _dist_one_epoch(dl) q.put((epoch + 1, rank, res)) # Different seed dl.seed(321) res = _dist_one_epoch(dl) q.put((3, rank, res)) _finalize_distributed_queue(rank, q) dl.shutdown() @skipIfNoDistributed @unittest.skipIf(IS_WINDOWS, "Remove when https://github.com/pytorch/data/issues/857 is fixed") class SequentialReadingServiceTest(TestCase): @staticmethod def _make_dp(data_length): data_source = IterableWrapper(list(range(data_length))) dp = data_source.shuffle().sharding_filter().map(_random_fn) return dp @staticmethod def _make_dispatching_dp(data_length): data_source = IterableWrapper(list(range(data_length))) dp = data_source.shuffle().sharding_filter() dp = dp.sharding_round_robin_dispatch(SHARDING_PRIORITIES.MULTIPROCESSING).map(_random_fn) return dp @staticmethod def _make_rs(num_workers, ctx): mp_rs = MultiProcessingReadingService( num_workers=num_workers, multiprocessing_context=ctx, ) dist_rs = DistributedReadingService() rs = SequentialReadingService(dist_rs, mp_rs) return rs @mp_ctx_parametrize def test_sequential_reading_service_normal_dp(self, ctx): world_size = DEFAULT_WORLD_SIZE num_workers = 2 res = _launch_distributed_training( world_size, SequentialReadingServiceTest._make_dp, SequentialReadingServiceTest._make_rs, num_workers, ctx, fn=_dist_training_fn, ) result = ({}, {}, {}, {}) for epoch, rank, r in res: d, *ran_nums = list(zip(*r)) result[epoch][rank] = (d, ran_nums) # Guarantee the same length per rank for rr in result: exp_len = num_workers * 10 for _, (d, _) in rr.items(): self.assertEqual(len(d), exp_len) # Same seed generate the same order of data and the same random state self.assertEqual(result[1], result[2]) # Different seeds for rank in range(world_size): # Different shuffle order self.assertNotEqual(result[1][rank][0], result[3][rank][0]) # Different subprocess-local random state self.assertNotEqual(result[1][rank][1], result[3][rank][1]) @mp_ctx_parametrize def test_sequential_reading_service_dispatching_dp(self, ctx): world_size = DEFAULT_WORLD_SIZE num_workers = 2 res = _launch_distributed_training( world_size, SequentialReadingServiceTest._make_dispatching_dp, SequentialReadingServiceTest._make_rs, num_workers, ctx, fn=_dist_training_fn, ) result = ({}, {}, {}, {}) for epoch, rank, r in res: d, *ran_nums = list(zip(*r)) result[epoch][rank] = (d, ran_nums) # Guarantee the same length per rank for rr in result: exp_len = num_workers * 10 for _, (d, _) in rr.items(): self.assertEqual(len(d), exp_len) # Same seed generate the same order of data and the same random state self.assertEqual(result[1], result[2]) # Different seeds for rank in range(world_size): # Different shuffle order self.assertNotEqual(result[1][rank][0], result[3][rank][0]) # Different subprocess-local random state self.assertNotEqual(result[1][rank][1], result[3][rank][1]) instantiate_parametrized_tests(MultiProcessingReadingServiceTest) instantiate_parametrized_tests(SequentialReadingServiceTest) if __name__ == "__main__": unittest.main()
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import multiprocessing as mp import unittest from unittest import TestCase from torch.testing._internal.common_utils import instantiate_parametrized_tests, parametrize, subtest from torch.utils.data.datapipes.iter.sharding import SHARDING_PRIORITIES from torchdata.dataloader2 import ( DataLoader2, DataLoader2Iterator, InProcessReadingService, MultiProcessingReadingService, ) from torchdata.datapipes.iter import IterableWrapper, IterDataPipe def _add_one(x: int) -> int: return x + 1 # Test DataPipes n_elements = 10 dp1 = IterableWrapper(range(n_elements)).shuffle().sharding_filter() double_pause_dp = dp1.prefetch().prefetch() test_dps = [dp1, double_pause_dp] mp_ctx_parametrize = parametrize("ctx", mp.get_all_start_methods()) dp_parametrize = parametrize("dp", test_dps) class TestInProcessReadingService(TestCase): r""" This tests specific functionalities of InProcessReadingService, notably `pause`, `resume`, `snapshot`. """ @dp_parametrize def test_reading_service_pause_resume(self, dp) -> None: # Functional Test: Testing various configuration of DataPipe/ReadingService to ensure the pipeline # properly pauses and resumes rs1 = InProcessReadingService() dl1: DataLoader2 = DataLoader2(dp, reading_service=rs1) res = [] for i, x in enumerate(dl1): res.append(x) if i in {2, n_elements - 2}: dl1._pause() dl1._resume() self.assertEqual(list(range(n_elements)), sorted(res)) dl1.shutdown() rs2 = InProcessReadingService(5) dl2: DataLoader2 = DataLoader2(dp, reading_service=rs2) res = [] for i, x in enumerate(dl2): res.append(x) if i in {2, n_elements - 2}: dl2._pause() dl2._resume() self.assertEqual(list(range(n_elements)), sorted(res)) dl2.shutdown() @dp_parametrize def test_reading_service_pause_stop_yield(self, dp) -> None: # Functional Test: Confirms that `dl` will stop yielding elements after `_pause` is called rs = InProcessReadingService(5) dl: DataLoader2 = DataLoader2(dp, reading_service=rs) res = [] for i, x in enumerate(dl): res.append(x) if i in {2}: dl._pause() self.assertEqual(3, len(res)) dl.shutdown() @dp_parametrize def test_reading_service_limit(self, dp) -> None: rs = InProcessReadingService(5) dl: DataLoader2 = DataLoader2(dp, reading_service=rs) res = [] cumulative_res = [] n_limit = 3 it: DataLoader2Iterator = iter(dl) it.limit(n_limit) for x in it: res.append(x) # Functional Test: Verify that the number of elements yielded equals to the specified limit self.assertEqual(n_limit, len(res)) # 3 cumulative_res.extend(res) # Functional Test: Calling `next` after `limit` will trigger `StopIteration` with self.assertRaises(StopIteration): next(it) # Functional Test: Verify that `limit` persists without the need to set it again it.resume() res = [] for x in it: res.append(x) self.assertEqual(n_limit, len(res)) # 3 cumulative_res.extend(res) # Functional Test: Clear the `limit` and yield the rest of the elements it.limit(None) it.resume() res = [] for x in it: res.append(x) self.assertEqual(n_elements - 2 * n_limit, len(res)) # 4 cumulative_res.extend(res) self.assertEqual(list(range(n_elements)), sorted(cumulative_res)) # Functional Test: Setting `limit` to a different value during after each mini-epoch dl2: DataLoader2 = DataLoader2(double_pause_dp, reading_service=rs) res = [] it2: DataLoader2Iterator = iter(dl2) it2.limit(3) for x in it2: res.append(x) # Limit can be set before `resume` it2.limit(4) it2.resume() for x in it2: res.append(x) self.assertEqual(7, len(res)) # Limit can also be set after `resume`, but before the next `for` loop it2.resume() it2.limit(2) for x in it2: res.append(x) self.assertEqual(9, len(res)) def test_initial_epoch_checkpointing(self): dp = IterableWrapper(range(20)).shuffle() rs = InProcessReadingService(5) # Functional Test: Saving state before iterator is created dl: DataLoader2 = DataLoader2(datapipe=dp, reading_service=rs) dl.seed(1) initial_state = dl.state_dict() it1 = iter(dl) restored_dl: DataLoader2 = DataLoader2.from_state(initial_state, rs) # type: ignore[arg-type] restored_dl._restore_checkpoint_beginning_of_epoch() self.assertEqual(list(it1), list(restored_dl)) dl.shutdown() restored_dl.shutdown() # Functional Test: Saving state after iterator is created dl = DataLoader2(datapipe=dp, reading_service=rs) dl.seed(1) it1 = iter(dl) initial_state = dl.state_dict() restored_dl = DataLoader2.from_state(initial_state, rs) # type: ignore[arg-type] restored_dl._restore_checkpoint_beginning_of_epoch() self.assertEqual(list(it1), list(restored_dl)) dl.shutdown() restored_dl.shutdown() # Functional Test: Saving state after iterator is created and began iterating dl = DataLoader2(datapipe=dp, reading_service=rs) dl.seed(1) it1 = iter(dl) temp = next(it1) # Starts iterating initial_state = dl.state_dict() restored_dl = DataLoader2.from_state(initial_state, rs) # type: ignore[arg-type] restored_dl._restore_checkpoint_beginning_of_epoch() self.assertEqual([temp] + list(it1), list(restored_dl)) # Note skipping over 1st element from actual result dl.shutdown() restored_dl.shutdown() def _non_dispatching_dp(n_elements=1000): dp = IterableWrapper(list(range(n_elements))).shuffle() dp = dp.sharding_filter() dp = dp.map(_add_one).batch(8) return dp def _dispatching_dp(n_elements=1000): dp = IterableWrapper(list(range(n_elements))).shuffle() dp = dp.prefetch(20) dp = dp.sharding_round_robin_dispatch(SHARDING_PRIORITIES.MULTIPROCESSING) dp = dp.map(_add_one).batch(16) return dp class NonShardableDataPipe(IterDataPipe): def __init__(self, dp: IterDataPipe): self.dp = dp def is_replicable(self): return False def __iter__(self): yield from self.dp class TestMultiProcessingReadingService(TestCase): r""" This tests specific functionalities of MultiProcessingReadingService, notably `pause`, `resume`, `snapshot`. """ @mp_ctx_parametrize @parametrize("dp_fn", [subtest(_non_dispatching_dp, "non_dispatch"), subtest(_dispatching_dp, "dispatch")]) @parametrize("main_prefetch", [0, 10]) @parametrize("worker_prefetch", [0, 10]) def test_early_exit(self, ctx, dp_fn, main_prefetch, worker_prefetch) -> None: dp = dp_fn(1000) rs = MultiProcessingReadingService( num_workers=2, main_prefetch_cnt=main_prefetch, worker_prefetch_cnt=worker_prefetch, multiprocessing_context=ctx, ) dl: DataLoader2 = DataLoader2(dp, reading_service=rs) it = iter(dl) for _ in range(10): _ = next(it) dl.shutdown() @mp_ctx_parametrize @parametrize("dp_fn", [subtest(_non_dispatching_dp, "non_dispatch"), subtest(_dispatching_dp, "dispatch")]) @parametrize("main_prefetch", [0, 10]) @parametrize("worker_prefetch", [0, 10]) def test_exit(self, ctx, dp_fn, main_prefetch, worker_prefetch) -> None: dp = dp_fn(1000) rs = MultiProcessingReadingService( num_workers=2, main_prefetch_cnt=main_prefetch, worker_prefetch_cnt=worker_prefetch, multiprocessing_context=ctx, ) dl: DataLoader2 = DataLoader2(dp, reading_service=rs) _ = list(dl) dl.shutdown() @mp_ctx_parametrize @dp_parametrize @parametrize( "n_workers,worker_prefetch_cnt,main_prefetch_cnt", [(1, 0, 0), (1, 0, 2), (2, 0, 0), (2, 2, 0), (2, 0, 2), (2, 2, 2)], ) def test_reading_service_pause_resume(self, ctx, dp, n_workers, worker_prefetch_cnt, main_prefetch_cnt) -> None: # Functional Test: Testing various configuration of DataPipe/ReadingService to ensure the pipeline # properly pauses and resumes rs = MultiProcessingReadingService( num_workers=n_workers, worker_prefetch_cnt=worker_prefetch_cnt, main_prefetch_cnt=main_prefetch_cnt, multiprocessing_context=ctx, ) dl: DataLoader2 = DataLoader2(dp, reading_service=rs) res = [] for i, x in enumerate(dl): res.append(x) if i in {2, n_elements - 2}: dl._pause() dl._resume() self.assertEqual( list(range(n_elements)), sorted(res), msg=f"The test is failing with '{ctx}', num_workers = {rs.num_workers}, " f"worker_prefetch_cnt = {rs.worker_prefetch_cnt}, " f"main_prefetch_cnt = {rs.main_prefetch_cnt}", ) dl.shutdown() @mp_ctx_parametrize @dp_parametrize @parametrize("n_workers,worker_prefetch_cnt,main_prefetch_cnt", [(2, 0, 1), (2, 1, 0), (2, 0, 0)]) def test_reading_service_pause_stop_yield(self, ctx, dp, n_workers, worker_prefetch_cnt, main_prefetch_cnt) -> None: # Functional Test: Confirms that `dl` will stop yielding elements after `_pause` is called rs = MultiProcessingReadingService( num_workers=n_workers, worker_prefetch_cnt=worker_prefetch_cnt, main_prefetch_cnt=main_prefetch_cnt, multiprocessing_context=ctx, ) dl: DataLoader2 = DataLoader2(dp, reading_service=rs) res = [] for i, x in enumerate(dl): res.append(x) if i in {2}: dl._pause() self.assertEqual( 3, len(res), msg=f"The test is failing with '{ctx}', num_workers = {rs.num_workers}, " f"worker_prefetch_cnt = {rs.worker_prefetch_cnt}, main_prefetch_cnt = {rs.main_prefetch_cnt}", ) dl.shutdown() @dp_parametrize @parametrize("n_workers,worker_prefetch_cnt,main_prefetch_cnt", [(1, 0, 0), (1, 0, 2), (2, 0, 0), (2, 2, 2)]) def test_reading_service_limit(self, dp, n_workers, worker_prefetch_cnt, main_prefetch_cnt) -> None: rs = MultiProcessingReadingService( num_workers=n_workers, worker_prefetch_cnt=worker_prefetch_cnt, main_prefetch_cnt=main_prefetch_cnt ) dl: DataLoader2 = DataLoader2(dp, reading_service=rs) res = [] cumulative_res = [] n_limit = 3 it: DataLoader2Iterator = iter(dl) it.limit(n_limit) for x in it: res.append(x) # Functional Test: Verify that the number of elements yielded equals to the specified limit self.assertEqual( n_limit, len(res), # 3 msg=f"The test is failing with default multiprocessing method, " f"num_workers = {rs.num_workers}, " f"worker_prefetch_cnt = {rs.worker_prefetch_cnt}, main_prefetch_cnt = {rs.main_prefetch_cnt}", ) cumulative_res.extend(res) # Functional Test: Calling `next` after `limit` will trigger `StopIteration` with self.assertRaises(StopIteration): next(it) # Functional Test: Verify that `limit` persists without the need to set it again it.resume() res = [] for x in it: res.append(x) self.assertEqual( n_limit, len(res), # 3 msg=f"The test is failing with default multiprocessing method, " f"num_workers = {rs.num_workers}, " f"worker_prefetch_cnt = {rs.worker_prefetch_cnt}, main_prefetch_cnt = {rs.main_prefetch_cnt}", ) cumulative_res.extend(res) # Functional Test: Clear the `limit` and yield the rest of the elements it.limit(None) it.resume() res = [] for x in it: res.append(x) self.assertEqual( n_elements - 2 * n_limit, len(res), # 4 msg=f"The test is failing with default multiprocessing method, " f"num_workers = {rs.num_workers}, " f"worker_prefetch_cnt = {rs.worker_prefetch_cnt}, main_prefetch_cnt = {rs.main_prefetch_cnt}", ) cumulative_res.extend(res) self.assertEqual(list(range(n_elements)), sorted(cumulative_res)) # Functional Test: Setting `limit` to a different value during after each mini-epoch dl2: DataLoader2 = DataLoader2(double_pause_dp, reading_service=rs) res = [] it2: DataLoader2Iterator = iter(dl2) it2.limit(3) for x in it2: res.append(x) # Limit can be set before `resume` it2.limit(4) it2.resume() for x in it2: res.append(x) self.assertEqual(7, len(res)) # Limit can also be set after `resume`, but before the next `for` loop it2.resume() it2.limit(2) for x in it2: res.append(x) self.assertEqual(9, len(res)) def test_initial_epoch_checkpointing(self): dp = IterableWrapper(range(20)).shuffle().sharding_filter() # Note that the second `shuffle` occurs in the main process, which uses a different RNG from # the `shuffle` done in the worker processes dp = NonShardableDataPipe(dp).shuffle() # type: ignore[assignment, arg-type] rs = MultiProcessingReadingService(num_workers=2) # Functional Test: Saving state before iterator is created dl: DataLoader2 = DataLoader2(datapipe=dp, reading_service=rs) dl.seed(1) initial_state = dl.state_dict() it1 = iter(dl) restored_dl: DataLoader2 = DataLoader2.from_state(initial_state, rs) # type: ignore[arg-type] restored_dl._restore_checkpoint_beginning_of_epoch() self.assertEqual(list(it1), list(restored_dl)) dl.shutdown() restored_dl.shutdown() # Functional Test: Saving state after iterator is created dl = DataLoader2(datapipe=dp, reading_service=rs) dl.seed(1) it1 = iter(dl) initial_state = dl.state_dict() restored_dl = DataLoader2.from_state(initial_state, rs) # type: ignore[arg-type] restored_dl._restore_checkpoint_beginning_of_epoch() self.assertEqual(list(it1), list(restored_dl)) dl.shutdown() restored_dl.shutdown() # Functional Test: Saving state after iterator is created and began iterating dl = DataLoader2(datapipe=dp, reading_service=rs) dl.seed(1) it1 = iter(dl) temp = next(it1) # Starts iterating initial_state = dl.state_dict() restored_dl = DataLoader2.from_state(initial_state, rs) # type: ignore[arg-type] restored_dl._restore_checkpoint_beginning_of_epoch() self.assertEqual([temp] + list(it1), list(restored_dl)) # Note skipping over 1st element from actual result dl.shutdown() restored_dl.shutdown() # TODO: Test cases when there is official support of `pause` and `resume` with round-robin sharding # Currently, using sharding_round_robin raises a warning # def test_round_robin_dispatching_pause_limit(self): # source_dp = IterableWrapper(range(20)) # dp = source_dp.shuffle().sharding_round_robin_dispatch(SHARDING_PRIORITIES.MULTIPROCESSING) # dp = dp.map(_add_one) # TODO: This doesn't work with `num_workers > 1` # TODO: Try checking if `dp_list`'s elements are _IterateQueueDP or QueueWrapper, we can safely assume # those DPs belong to a dispatching process and only do pause if worker_id == 0 # There might still be a race condition, need to look into the messages # rs1 = MultiProcessingReadingService(num_workers=2, worker_prefetch_cnt=0, main_prefetch_cnt=0) # rs2 = MultiProcessingReadingService(num_workers=2, worker_prefetch_cnt=0, main_prefetch_cnt=2) # rs3 = MultiProcessingReadingService(num_workers=2, worker_prefetch_cnt=2, main_prefetch_cnt=0) # rs4 = MultiProcessingReadingService(num_workers=2, worker_prefetch_cnt=2, main_prefetch_cnt=2) # rss = [rs1, rs2, rs3, rs4] # for n, rs in enumerate(rss): # dl = DataLoader2(dp, reading_service=rs) # res = [] # # cumulative_res = [] # n_limit = 3 # # it: DataLoader2Iterator = iter(dl) # it.limit(n_limit) # The `pause` call here doesn't stop # for x in it: # res.append(x) # # print() # print(res) # # dl.shutdown() # # Functional Test: Verify that the number of elements yielded equals to the specified limit # # self.assertEqual( # # n_limit, # # len(res), # 3 # # msg=f"The test is failing for rs{n + 1} with default multiprocessing method, " # # f"num_workers = {rs.num_workers}, " # # f"worker_prefetch_cnt = {rs.worker_prefetch_cnt}, main_prefetch_cnt = {rs.main_prefetch_cnt}", # # ) # cumulative_res.extend(res) # # # Functional Test: Calling `next` after `limit` will trigger `StopIteration` # with self.assertRaisesRegex(StopIteration, "pause"): # next(it) # # # Functional Test: Verify that `limit` persists without the need to set it again # it.resume() # res = [] # for x in it: # res.append(x) # # self.assertEqual( # # n_limit, # # len(res), # 3 # # msg=f"The test is failing for rs{n + 1} with default multiprocessing method, " # # f"num_workers = {rs.num_workers}, " # # f"worker_prefetch_cnt = {rs.worker_prefetch_cnt}, main_prefetch_cnt = {rs.main_prefetch_cnt}", # # ) # cumulative_res.extend(res) # # # Functional Test: Clear the `limit` and yield the rest of the elements # it.limit(None) # it.resume() # res = [] # for x in it: # res.append(x) # # self.assertEqual( # # n_elements - 2 * n_limit, # # len(res), # 4 # # msg=f"The test is failing for rs{n + 1} with default multiprocessing method, " # # f"num_workers = {rs.num_workers}, " # # f"worker_prefetch_cnt = {rs.worker_prefetch_cnt}, main_prefetch_cnt = {rs.main_prefetch_cnt}", # # ) # # cumulative_res.extend(res) # self.assertEqual(list(range(n_elements)), sorted(cumulative_res)) # TODO: Implemented in an upcoming PR # def test_reading_service_snapshot(self) -> None: # pass # # def test_dataloader2_snapshot(self) -> None: # pass instantiate_parametrized_tests(TestInProcessReadingService) instantiate_parametrized_tests(TestMultiProcessingReadingService) if __name__ == "__main__": unittest.main()
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import random import unittest from unittest import TestCase import numpy as np import torch from torch.testing._internal.common_utils import instantiate_parametrized_tests, IS_WINDOWS, parametrize from torchdata.dataloader2 import DataLoader2, InProcessReadingService, MultiProcessingReadingService from torchdata.dataloader2.graph.settings import set_graph_random_seed from torchdata.dataloader2.random import SeedGenerator from torchdata.datapipes.iter import IterableWrapper def _random_fn(data): r""" Used to validate the randomness of subprocess-local RNGs are set deterministically. """ py_random_num = random.randint(0, 2 ** 32) np_random_num = np.random.randint(0, 2 ** 32, dtype=np.uint32) torch_random_num = torch.randint(0, 2 ** 32, size=[]).item() return (data, py_random_num, np_random_num, torch_random_num) class DeterminismTest(TestCase): @unittest.skipIf(IS_WINDOWS, "Remove when https://github.com/pytorch/data/issues/857 is fixed") @parametrize("num_workers", [1, 8]) def test_mprs_determinism(self, num_workers): data_length = 64 exp = list(range(data_length)) data_source = IterableWrapper(exp) dp = data_source.shuffle().sharding_filter().map(_random_fn) rs = MultiProcessingReadingService(num_workers=num_workers) dl = DataLoader2(dp, reading_service=rs) # No seed res = [] for d, *_ in dl: res.append(d) self.assertEqual(sorted(res), exp) # Shuffle with seed results = [] for _ in range(2): res = [] ran_res = [] torch.manual_seed(123) random.seed(123) np.random.seed(123) for d, *ran_nums in dl: res.append(d) ran_res.append(ran_nums) self.assertEqual(sorted(res), exp) results.append((res, ran_res)) # Same seed generate the same order of data and the same random state self.assertEqual(results[0], results[1]) # Different seed res = [] ran_res = [] torch.manual_seed(321) random.seed(321) np.random.seed(321) for d, *ran_nums in dl: res.append(d) ran_res.append(ran_nums) self.assertEqual(sorted(res), exp) # Different shuffle order self.assertNotEqual(results[0][0], res) # Different subprocess-local random state self.assertNotEqual(results[0][1], ran_res) def test_graph_random_settings(self): def _get_dp_seeds_after_setting(worker_id, seed=123): data_source = IterableWrapper(list(range(100))) dp0 = data_source.shuffle() dp1, dp2, dp3 = dp0.fork(3) dp1 = dp1.sharding_filter() dp2 = dp2.shuffle() dp3 = dp3.shuffle() dp3_ = dp3.sharding_filter() dp4 = dp1.zip(dp2, dp3_).shuffle() sg = SeedGenerator(seed).spawn(worker_id) set_graph_random_seed(dp4, sg) # same seeds, different seeds return (dp0._seed, dp3._seed), (dp2._seed, dp4._seed) ss_0_123, ds_0_123 = _get_dp_seeds_after_setting(worker_id=0, seed=123) ss_1_123, ds_1_123 = _get_dp_seeds_after_setting(worker_id=1, seed=123) self.assertEqual(ss_0_123, ss_1_123) self.assertNotEqual(ds_0_123, ds_1_123) ss_0_123_, ds_0_123_ = _get_dp_seeds_after_setting(worker_id=0, seed=123) self.assertEqual(ss_0_123, ss_0_123_) self.assertEqual(ds_0_123, ds_0_123_) ss_0_321, ds_0_321 = _get_dp_seeds_after_setting(worker_id=0, seed=321) self.assertNotEqual(ss_0_123, ss_0_321) self.assertNotEqual(ds_0_123, ds_0_321) def test_sprs_determinism(self): data_length = 64 exp = list(range(data_length)) data_source = IterableWrapper(exp) dp = data_source.shuffle().sharding_filter().map(_random_fn) rs = InProcessReadingService() dl = DataLoader2(dp, reading_service=rs) # No seed res = [] for d, *_ in dl: res.append(d) self.assertEqual(sorted(res), exp) # Shuffle with seed results = [] for _ in range(2): res = [] ran_res = [] torch.manual_seed(123) random.seed(123) np.random.seed(123) for d, *ran_nums in dl: res.append(d) ran_res.append(ran_nums) self.assertEqual(sorted(res), exp) results.append((res, ran_res)) # Same seed generate the same order of data and the same random state self.assertEqual(results[0], results[1]) # Different seed res = [] ran_res = [] torch.manual_seed(321) random.seed(321) np.random.seed(321) for d, *ran_nums in dl: res.append(d) ran_res.append(ran_nums) self.assertEqual(sorted(res), exp) # Different shuffle order self.assertNotEqual(results[0][0], res) # Different subprocess-local random state self.assertNotEqual(results[0][1], ran_res) instantiate_parametrized_tests(DeterminismTest) if __name__ == "__main__": unittest.main()
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. # Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys import pytorch_sphinx_theme import torchdata # sys.path.insert(0, os.path.abspath('.')) current_dir = os.path.dirname(__file__) target_dir = os.path.abspath(os.path.join(current_dir, "../..")) sys.path.insert(0, target_dir) print(target_dir) # -- Project information ----------------------------------------------------- project = "TorchData" copyright = "2021 - Present, Torch Contributors" author = "Torch Contributors" # The short X.Y version version = "main (" + torchdata.__version__ + " )" # The full version, including alpha/beta/rc tags release = "main" # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "sphinx.ext.napoleon", "sphinx.ext.autodoc", "sphinx.ext.autosummary", "sphinx.ext.intersphinx", "sphinx.ext.doctest", "sphinx.ext.graphviz", ] # Do not execute standard reST doctest blocks so that documentation can # be successively migrated to sphinx's doctest directive. doctest_test_doctest_blocks = "" # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = [ "generated/torchdata.datapipes.iter.Extractor.rst", "generated/torchdata.datapipes.iter.TarArchiveReader.rst", "generated/torchdata.datapipes.iter.XzFileReader.rst", "generated/torchdata.datapipes.iter.ZipArchiveReader.rst", ] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # # html_theme = 'alabaster' html_theme = "pytorch_sphinx_theme" html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()] html_theme_options = { "collapse_navigation": False, "display_version": True, "logo_only": True, "pytorch_project": "docs", "navigation_with_keys": True, "analytics_id": "UA-117752657-2", } # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] html_css_files = [ "css/custom.css", ] # TODO(598): use regex to replace all "T" and "T_co" related signature signature_replacements = { "torch.utils.data.datapipes.datapipe.IterDataPipe": "IterDataPipe", "abc.IterDataPipe": "IterDataPipe", "torch.utils.data.datapipes.datapipe.MapDataPipe": "MapDataPipe", "abc.MapDataPipe": "MapDataPipe", "typing.Type[torch.utils.data.sampler.Sampler]": "torch.utils.data.sampler.Sampler", "<class 'torch.utils.data.sampler.SequentialSampler'>": "SequentialSampler", "torch.utils.data.datapipes.iter.combining.T_co": "T_co", "torch.utils.data.datapipes.iter.combinatorics.T_co": "T_co", "torchdata.datapipes.iter.transform.bucketbatcher.T_co": "T_co", "torch.utils.data.datapipes.map.grouping.T": "T", "torch.utils.data.datapipes.map.combining.T_co": "T_co", "torch.utils.data.datapipes.map.combinatorics.T_co": "T_co", "torchdata.datapipes.iter.util.cycler.T_co": "T_co", "torchdata.datapipes.iter.util.paragraphaggregator.T_co": "T_co", "torchdata.datapipes.map.util.cacheholder.T_co": "T_co", "Sequence[torchdata.datapipes.map.util.unzipper.T]": "Sequence[T]", "torchdata.datapipes.iter.util.samplemultiplexer.T_co": "T_co", "torchdata.datapipes.iter.util.indexadder.K": "K", "torchdata.datapipes.iter.util.unzipper.T": "T", "torch.utils.data.datapipes.iter.grouping.T_co": "T_co", "torchdata.datapipes.iter.util.dataframemaker.T_co": "T_co", "torchdata.datapipes.iter.util.cacheholder.T_co": "T_co", "torchdata.datapipes.iter.util.header.T_co": "T_co", "<class 'torch.utils.data.datapipes.datapipe.DataChunk'>": "List", "typing.": "", "Union[IterDataPipe, MapDataPipe]": "DataPipe", "Dict[int, Tuple[DataPipe, DataPipeGraph]": "DataPipeGraph", } def process_signature(app, what, name, obj, options, signature, return_annotation): """Replacing long type annotations in signature with more succinct ones.""" if isinstance(signature, str): for old, new in signature_replacements.items(): if old in signature: signature = signature.replace(old, new) return signature, return_annotation def setup(app): # Overwrite class name to allow aliasing in documentation generation import torchdata.datapipes.iter as iter import torchdata.datapipes.map as map for mod in (iter, map): for name, obj in mod.__dict__.items(): if isinstance(obj, type): obj.__name__ = name app.connect("autodoc-process-signature", process_signature) intersphinx_mapping = { "graphviz": ("https://graphviz.readthedocs.io/en/stable/", None), }
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree.
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. """ This file contains the data pipeline to read from a TSV file and output a DataFrame. """ from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple, TypeVar, Union import numpy as np import torcharrow as ta import torcharrow.dtypes as dt import torcharrow.pytorch as tap import torcharrow_wrapper # noqa: F401 from common import ( CAT_FEATURE_COUNT, DEFAULT_CAT_NAMES, DEFAULT_COLUMN_NAMES, DEFAULT_INT_NAMES, INT_FEATURE_COUNT, safe_cast, ) from iopath.common.file_io import PathManagerFactory from torch.utils.data import get_worker_info from torch.utils.data.datapipes.dataframe.dataframes import CaptureLikeMock from torcharrow import functional from torchdata.dataloader2 import DataLoader2, MultiProcessingReadingService from torchdata.datapipes.iter import Batcher, CSVParser, IoPathFileOpener, IterableWrapper, IterDataPipe, Mapper PATH_MANAGER_KEY = "torchrec" T = TypeVar("T") COLUMN_TYPE_CASTERS: List[Callable[[Union[int, str]], Union[int, str]]] = [ lambda val: safe_cast(val, int, 0), *(lambda val: safe_cast(val, int, 0) for _ in range(INT_FEATURE_COUNT)), *(lambda val: safe_cast(val, str, "") for _ in range(CAT_FEATURE_COUNT)), ] DTYPE = dt.Struct( [ dt.Field("labels", dt.int8), dt.Field( "dense_features", dt.Struct([dt.Field(int_name, dt.Int32(nullable=True)) for int_name in DEFAULT_INT_NAMES]), ), dt.Field( "sparse_features", dt.Struct([dt.Field(cat_name, dt.Int32(nullable=True)) for cat_name in DEFAULT_CAT_NAMES]), ), ] ) def _torcharrow_row_mapper(row: List[str]) -> Tuple[int, Tuple[int, ...], Tuple[int, ...]]: label = int(safe_cast(row[0], int, 0)) dense = tuple(int(safe_cast(row[i], int, 0)) for i in range(1, 1 + INT_FEATURE_COUNT)) sparse = tuple( int(safe_cast(row[i], str, "0") or "0", 16) for i in range(1 + INT_FEATURE_COUNT, 1 + INT_FEATURE_COUNT + CAT_FEATURE_COUNT) ) # TorchArrow doesn't support uint32, but we can save memory # by not using int64. Numpy will automatically handle sparse values >= 2 ** 31. sparse = tuple(np.array(sparse, dtype=np.int32).tolist()) return label, dense, sparse def criteo_dataframes_from_tsv( paths: Union[str, Iterable[str]], *, batch_size: int = 128, ) -> IterDataPipe: """ Load Criteo dataset (Kaggle or Terabyte) as TorchArrow DataFrame streams from TSV file(s) This implementaiton is inefficient and is used for prototype and test only. Args: paths (str or Iterable[str]): local paths to TSV files that constitute the Kaggle or Criteo 1TB dataset. batch_size (int): number of rows within each DataFrame Example: >>> datapipe = criteo_dataframes_from_tsv( >>> ["/home/datasets/criteo/day_0.tsv", "/home/datasets/criteo/day_1.tsv"] >>> ) >>> for df in datapipe: >>> print(df) """ if isinstance(paths, str): paths = [paths] datapipe = CriteoIterDataPipe(paths, row_mapper=_torcharrow_row_mapper) datapipe = Batcher(datapipe, batch_size) datapipe = Mapper(datapipe, lambda batch: ta.dataframe(batch, dtype=DTYPE)) return datapipe.trace_as_dataframe() def _default_row_mapper(example: List[str]) -> Dict[str, Union[int, str]]: column_names = reversed(DEFAULT_COLUMN_NAMES) column_type_casters = reversed(COLUMN_TYPE_CASTERS) return {next(column_names): next(column_type_casters)(val) for val in reversed(example)} class CriteoIterDataPipe(IterDataPipe): """ IterDataPipe that can be used to stream either the Criteo 1TB Click Logs Dataset (https://ailab.criteo.com/download-criteo-1tb-click-logs-dataset/) or the Kaggle/Criteo Display Advertising Dataset (https://www.kaggle.com/c/criteo-display-ad-challenge/) from the source TSV files. Args: paths (Iterable[str]): local paths to TSV files that constitute the Criteo dataset. row_mapper (Optional[Callable[[List[str]], Any]]): function to apply to each split TSV line. open_kw: options to pass to underlying invocation of iopath.common.file_io.PathManager.open. Example: >>> datapipe = CriteoIterDataPipe( >>> ("/home/datasets/criteo/day_0.tsv", "/home/datasets/criteo/day_1.tsv") >>> ) >>> datapipe = dp.iter.Batcher(datapipe, 100) >>> datapipe = dp.iter.Collator(datapipe) >>> batch = next(iter(datapipe)) """ def __init__( self, paths: Iterable[str], *, # pyre-ignore[2] row_mapper: Optional[Callable[[List[str]], Any]] = _default_row_mapper, ) -> None: self.paths = paths self.row_mapper = row_mapper # pyre-ignore[3] def __iter__(self) -> Iterator[Any]: worker_info = get_worker_info() paths = self.paths if worker_info is not None: paths = (path for (idx, path) in enumerate(paths) if idx % worker_info.num_workers == worker_info.id) paths = IterableWrapper(paths) datapipe = IoPathFileOpener(paths, mode="r", pathmgr=PathManagerFactory().get(PATH_MANAGER_KEY)) datapipe = CSVParser(datapipe, delimiter="\t") if self.row_mapper: datapipe = Mapper(datapipe, self.row_mapper) yield from datapipe # Creating DataFrame from TSV File df = criteo_dataframes_from_tsv("day_11_first_3k_rows_original.tsv") df = df.shuffle() df["dense_features"] = df["dense_features"].fill_null(0) df["sparse_features"] = df["sparse_features"].fill_null(0) # Remove CaptureLikeMock hen torcharrow.functional will accept StreamDataFrame with CaptureLikeMock("torcharrow.functional.array_constructor"): for field in df["sparse_features"].columns: df["sparse_features"][field] = functional.array_constructor(df["sparse_features"][field]) df["dense_features"] = (df["dense_features"] + 3).log() df["labels"] = df["labels"].cast(dt.int32) df = df.batch(10) conversion = { "dense_features": tap.rec.Dense(), "sparse_features": tap.rec.Dense(), # Sparse not implemented yet in torcharrow # Because labels are unlisted it works like "labels": tap.rec.Default(), } df = df.collate(conversion=conversion) reading_service = MultiProcessingReadingService(num_workers=0) dl = DataLoader2(df, reading_service=reading_service) print("Iterating DataLoader now") for item in dl: labels, dense_features, sparse_features = item print(labels) print(dense_features) print(sparse_features) break
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. # TODO(597): This file can be moved to the dataframe parent directory once Torcharrow # is open sourced from typing import Iterable, List, Optional, Union import torcharrow as ta from torch.utils.data.datapipes.dataframe import dataframe_wrapper as df_wrapper class TorcharrowWrapper: @classmethod def create_dataframe(cls, data: Iterable, columns: Optional[List[str]] = None): columnar_data = list(zip(*data)) # set default column values if `columns` arg is not provided column_names = columns if not columns or len(columns) == 0: column_names = [f"col{i}" for i in range(len(columnar_data))] return ta.dataframe({column_name: ta.Column(value) for column_name, value in zip(column_names, columnar_data)}) @classmethod def is_dataframe(cls, data: Union[ta.DataFrame, ta.Column]): return isinstance(data, ta.DataFrame) @classmethod def is_column(cls, data: Union[ta.DataFrame, ta.Column]): return isinstance(data, ta.Column) @classmethod def iterate(cls, df): yield from df @classmethod def concat(cls, buffer: List[ta.DataFrame]): concat_buffer = [] for b in buffer: concat_buffer += list(b) return ta.dataframe(concat_buffer, dtype=buffer[0].dtype) @classmethod def get_item(cls, df: ta.DataFrame, idx): return df[idx : idx + 1] @classmethod def get_len(cls, df: ta.DataFrame): return len(df) @classmethod def get_columns(cls, df): return list(df.columns) df_wrapper.set_df_wrapper(TorcharrowWrapper)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. """ This file contains the data pipeline to read from a Paruet and output a DataFrame. """ import torcharrow.dtypes as dt from common import DEFAULT_CAT_NAMES, DEFAULT_INT_NAMES from torchdata.datapipes.iter import FileLister, ParquetDataFrameLoader DTYPE = dt.Struct( [dt.Field("label", dt.int64)] + [dt.Field(int_name, dt.Float64(nullable=True)) for int_name in DEFAULT_INT_NAMES] + [dt.Field(cat_name, dt.Float64(nullable=True)) for cat_name in DEFAULT_CAT_NAMES] ) source_dp = FileLister(".", masks="*.parquet") parquet_df_dp = ParquetDataFrameLoader(source_dp, dtype=DTYPE)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from typing import Callable, List, TypeVar T = TypeVar("T") # Criteo Data Set Parameters INT_FEATURE_COUNT = 13 CAT_FEATURE_COUNT = 26 DEFAULT_LABEL_NAME = "label" DEFAULT_INT_NAMES: List[str] = [f"int_{idx}" for idx in range(INT_FEATURE_COUNT)] DEFAULT_CAT_NAMES: List[str] = [f"cat_{idx}" for idx in range(CAT_FEATURE_COUNT)] DEFAULT_COLUMN_NAMES: List[str] = [ DEFAULT_LABEL_NAME, *DEFAULT_INT_NAMES, *DEFAULT_CAT_NAMES, ] def safe_cast(val: T, dest_type: Callable[[T], T], default: T) -> T: """ Helper function to safely cast data with default as fallback. """ try: return dest_type(val) except ValueError: return default def safe_hex_to_int(num): try: return int(safe_cast(num, str, "0") or "0", 16) except Exception: return float("NaN")
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. """ This file pre-process the source file and save it as a TSV file and a Parquet file. You do not need to re-run this file if "day_11_first_3k_rows.parquet" and "day_11_first_3k_rows.tsv" exist locally """ import pandas import pyarrow import pyarrow.parquet as parquet from common import DEFAULT_CAT_NAMES, DEFAULT_COLUMN_NAMES, safe_hex_to_int # Read TSV File with Pandas tsv_fname = "day_11_first_3k_rows_original.tsv" df = pandas.read_csv(tsv_fname, sep="\t") df.columns = DEFAULT_COLUMN_NAMES # Convert hex strings to interger for i, row in df.iterrows(): for cat_col in DEFAULT_CAT_NAMES: df.at[i, cat_col] = safe_hex_to_int(row[cat_col]) # Convert to PyArrow table and write to disk as parquet file table = pyarrow.Table.from_pandas(df=df) parquet_fname = "day_11_first_3k_rows.parquet" parquet.write_table(table, parquet_fname) # Write to a new .tsv file df.to_csv("day_11_first_3k_rows.tsv", sep="\t")
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree.
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import http.server import os import re import threading import torchvision.datasets as datasets import torchvision.datasets.folder import torchvision.transforms as transforms from PIL import Image from torch.utils.data import DataLoader from torchdata.datapipes.iter import FileLister, HttpReader, IterDataPipe IMAGES_ROOT = os.path.join("fakedata", "imagefolder") USE_FORK_DATAPIPE = False NUM_WORKERS = 5 BATCH_SIZE = None data_transform = transforms.Compose( [ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ] ) # DataPipes implementation of ImageFolder constructs and executes graph of DataPipes (aka DataPipeline) # FileLister -> ObtainCategories # | # V # FileLister -> AttributeCategories -> LoadAndDecodeImages (using `map`) -> ApplyTorchVisionTransforms (using `map`) def get_category_name(path): rel_path = os.path.relpath(path, start=IMAGES_ROOT) elements = rel_path.split(os.sep) return elements[0] class ObtainCategories(IterDataPipe): def __init__(self, source_dp, parse_category_fn=get_category_name) -> None: self.source_dp = source_dp self.parse_category_fn = parse_category_fn def __iter__(self): categories = set() for path in self.source_dp: categories.add(self.parse_category_fn(path)) cat_to_id = {name: i for i, name in enumerate(sorted(categories))} yield cat_to_id class AttributeCategories(IterDataPipe): def __init__(self, listfiles_dp, categories_dp, parse_category_fn=get_category_name) -> None: self.listfiles_dp = listfiles_dp self.categories_dp = categories_dp self.parse_category_fn = parse_category_fn def __iter__(self): for categories in self.categories_dp: cat_to_dp = categories for data in self.listfiles_dp: if isinstance(data, tuple): category = cat_to_dp[self.parse_category_fn(data[0])] yield data + (category,) else: category = cat_to_dp[self.parse_category_fn(data)] yield (data, category) def MyImageFolder(root=IMAGES_ROOT, transform=None): if not USE_FORK_DATAPIPE: # Yes, we had to scan files twice. Alternativelly it is possible to use # `fork` DataPipe, but it will require buffer equal to the size of all # full file names # TODO(125): Make sure that `fork` complains when buffer becomes # too large list_files_0 = FileLister(root=IMAGES_ROOT, recursive=True) list_files_1 = FileLister(root=IMAGES_ROOT, recursive=True).sharding_filter() else: list_files_0, list_files_1 = FileLister(root=IMAGES_ROOT, recursive=True).fork(2) list_files_1 = list_files_1.sharding_filter() categories = ObtainCategories(list_files_0) with_categories = AttributeCategories(list_files_1, categories) using_default_loader = with_categories.map(lambda x: (torchvision.datasets.folder.default_loader(x[0]), x[1])) transformed = using_default_loader.map(lambda x: (transform(x[0]), x[1])) return transformed class ExpandURLPatternDataPipe(IterDataPipe): def __init__(self, pattern) -> None: result = re.match(r"(.*?)\{(.*?)}(.*)", pattern) if result: self.prefix = result.group(1) self.pattern = result.group(2) self.postfix = result.group(3) result = re.match(r"(\d+)\.\.(\d+)", self.pattern) if result: self.start_str = result.group(1) self.end_str = result.group(2) else: raise Exception("Invalid pattern") else: raise Exception("Invalid pattern") def __iter__(self): current_int = int(self.start_str) end_int = int(self.end_str) for i in range(current_int, end_int + 1): str_i = str(i) while len(str_i) < len(self.start_str): str_i = "0" + str_i yield self.prefix + str_i + self.postfix HTTP_PATH_ROOT = "http://localhost:8000/" HTTP_PATH_CAT = "http://localhost:8000/cat/{1..3}.jpg" HTTP_PATH_DOG = "http://localhost:8000/dog/{1..3}.jpg" def get_category_name_url(url): rel_path = os.path.relpath(url, start=HTTP_PATH_ROOT) elements = rel_path.split(os.sep) return elements[0] def stream_to_pil(stream): img = Image.open(stream) return img.convert("RGB") def MyHTTPImageFolder(transform=None): # HTTP Protocol doesn't support listing files, so we had to provide it explicitly list_files = ExpandURLPatternDataPipe(HTTP_PATH_CAT) + ExpandURLPatternDataPipe(HTTP_PATH_DOG) list_files_0, list_files_1 = list_files.fork(2) list_files_1 = list_files_1.sharding_filter().shuffle() categories = ObtainCategories(list_files_0, parse_category_fn=get_category_name_url) loaded_files = HttpReader(list_files_1) with_categories = AttributeCategories(loaded_files, categories, parse_category_fn=get_category_name_url) pil_images = with_categories.map(lambda x: (x[0], stream_to_pil(x[1]), x[2])) transformed = pil_images.map(lambda x: (transform(x[1]), x[2])) return transformed if __name__ == "__main__": dataset = datasets.ImageFolder(root=IMAGES_ROOT, transform=data_transform) dl = DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=NUM_WORKERS) items = list(dl) assert len(items) == 6 dataset = MyImageFolder(root=IMAGES_ROOT, transform=data_transform) dl = DataLoader( dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=NUM_WORKERS, ) items = list(dl) assert len(items) == 6 http_handler = http.server.SimpleHTTPRequestHandler http_handler.log_message = lambda a, b, c, d, e: None httpd = http.server.HTTPServer(("", 8000), http_handler) os.chdir(IMAGES_ROOT) thread = threading.Thread(target=httpd.serve_forever) thread.start() dataset = MyHTTPImageFolder(transform=data_transform) dl = DataLoader( dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=NUM_WORKERS, ) try: items = list(dl) assert len(items) == 6 finally: httpd.shutdown()
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. from io import BytesIO import requests from torchdata.dataloader2 import DataLoader2, MultiProcessingReadingService from torchdata.datapipes.iter import HuggingFaceHubReader try: import PIL from PIL import Image except ImportError: PIL = None Image = None def has_no_watermark(x): return x["pwatermark"] is not None and x["pwatermark"] < 0.8 def is_sfw(x): return x["punsafe"] is not None and x["punsafe"] < 0.5 def load_image(url): try: r = requests.get(url, timeout=5) return Image.open(BytesIO(r.content)) except Exception: return None def image_was_loaded(x): return x is not None # For more information about the dataset see: https://laion.ai/blog/laion-5b/ # name of the dataset to be used NAME = "laion/laion2B-en-joined" # As the dataset is too large to store locally we use a streaming approach def laion2b_en(name=NAME): dp = HuggingFaceHubReader(name) dp = dp.filter(has_no_watermark) dp = dp.filter(is_sfw) dp = dp.shuffle().sharding_filter() dp = dp.slice(index=["TEXT", "URL"]) dp = dp.map(fn=load_image, input_col="URL", output_col="IMAGE") # this needs multithreading dp = dp.filter(filter_fn=image_was_loaded, input_col="IMAGE") dp = dp.drop("URL") dp = dp.batch(20) return dp def print_label_and_copyright(label, image): try: try: exif = image.getexif() # 0x8298 is the EXIF-tag for copyright copyright_info = exif.get(0x8298, "no info") except Exception: copyright_info = "EXIF data is corrupted" if copyright_info != "no info" and copyright_info != "EXIF data is corrupted": print(f"image {i}: {label=}, {copyright_info=} ") else: print(f"image {i}: {label=}") except PIL.UnidentifiedImageError: print(f"image {i}: corrupted") if __name__ == "__main__": i = 0 dp = laion2b_en() rs = MultiProcessingReadingService(num_workers=4) dl = DataLoader2(dp, reading_service=rs) for batch in dl: for entry in batch: print_label_and_copyright(entry["TEXT"], entry["IMAGE"]) i += 1
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import os.path import re import torch from torch.utils.data.datapipes.utils.decoder import imagehandler, mathandler from torchdata.datapipes.iter import ( FileOpener, Filter, IterableWrapper, IterKeyZipper, Mapper, RoutedDecoder, TarArchiveLoader, ) # Download size is ~150 MB so fake data is provided URL = { "images": "http://www.vision.caltech.edu/Image_Datasets/Caltech101/101_ObjectCategories.tar.gz", "annotations": "http://www.vision.caltech.edu/Image_Datasets/Caltech101/Annotations.tar", } # We really shouldn't use MD5 anymore and switch to a more secure hash like SHA256 or # SHA512 MD5 = { "images": "b224c7392d521a49829488ab0f1120d9", "annotations": "f83eeb1f24d99cab4eb377263132c91", } ROOT = os.path.join("fakedata", "caltech101") IMAGES_NAME_PATTERN = re.compile(r"image_(?P<id>\d+)[.]jpg") ANNS_NAME_PATTERN = re.compile(r"annotation_(?P<id>\d+)[.]mat") ANNS_CLASS_MAP = { "Faces_2": "Faces", "Faces_3": "Faces_easy", "Motorbikes_16": "Motorbikes", "Airplanes_Side_2": "airplanes", } def is_ann(data): path, _ = data return bool(ANNS_NAME_PATTERN.match(os.path.basename(path))) def collate_ann(data): path, ann = data cls = os.path.split(os.path.dirname(path))[1] if cls in ANNS_CLASS_MAP: cls = ANNS_CLASS_MAP[cls] return path, {"cls": cls, "contour": torch.as_tensor(ann["obj_contour"])} def is_not_background_image(data): path, _ = data return os.path.split(os.path.dirname(path))[1] != "BACKGROUND_Google" def is_not_rogue_image(data) -> bool: path, _ = data return os.path.basename(path) != "RENAME2" def extract_file_id(path, *, pattern): match = pattern.match(os.path.basename(path)) return int(match.group("id")) def images_key_fn(data): path, _ = data cls = os.path.split(os.path.dirname(path))[1] id = extract_file_id(path, pattern=IMAGES_NAME_PATTERN) return cls, id def anns_key_fn(data): path, ann = data id = extract_file_id(path, pattern=ANNS_NAME_PATTERN) return ann["cls"], id def collate_sample(data): (image_path, image), (ann_path, ann) = data return dict(ann, image_path=image_path, image=image, ann_path=ann_path) def Caltech101(root=ROOT): anns_dp = IterableWrapper([os.path.join(root, "Annotations.tar")]) anns_dp = FileOpener(anns_dp, mode="b") anns_dp = TarArchiveLoader(anns_dp) anns_dp = Filter(anns_dp, is_ann) anns_dp = RoutedDecoder(anns_dp, mathandler()) anns_dp = Mapper(anns_dp, collate_ann) images_dp = IterableWrapper([os.path.join(root, "101_ObjectCategories.tar.gz")]) images_dp = FileOpener(images_dp, mode="b") images_dp = TarArchiveLoader(images_dp) images_dp = Filter(images_dp, is_not_background_image) images_dp = Filter(images_dp, is_not_rogue_image) images_dp = RoutedDecoder(images_dp, imagehandler("pil")) dp = IterKeyZipper(images_dp, anns_dp, images_key_fn, ref_key_fn=anns_key_fn, buffer_size=None) return Mapper(dp, collate_sample) if __name__ == "__main__": for _sample in Caltech101(): pass
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import os.path from torch.utils.data.datapipes.utils.decoder import imagehandler from torchdata.datapipes.iter import FileOpener, IterableWrapper, Mapper, RoutedDecoder, TarArchiveLoader # Download size is ~1.2 GB so fake data is provided URL = "http://www.vision.caltech.edu/Image_Datasets/Caltech256/256_ObjectCategories.tar" ROOT = os.path.join("datasets", "caltech256") # We really shouldn't use MD5 anymore and switch to a more secure hash like SHA256 or # SHA512 MD5 = "67b4f42ca05d46448c6bb8ecd2220f6d" def collate_sample(data): path, image = data dir = os.path.split(os.path.dirname(path))[1] label_str, cls = dir.split(".") return {"path": path, "image": image, "label": int(label_str), "cls": cls} def Caltech256(root=ROOT): dp = IterableWrapper([os.path.join(root, "256_ObjectCategories.tar")]) dp = FileOpener(dp, mode="b") dp = TarArchiveLoader(dp) dp = RoutedDecoder(dp, imagehandler("pil")) return Mapper(dp, collate_sample) if __name__ == "__main__": for _sample in Caltech256(): pass
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import functools import os from pathlib import Path from typing import Union import torchaudio from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper URL = "train-clean-100" FOLDER_IN_ARCHIVE = "LibriSpeech" BASE_URL = "http://www.openslr.org/resources/12/" _CHECKSUMS = { "dev-clean.tar.gz": "76f87d090650617fca0cac8f88b9416e0ebf80350acb97b343a85fa903728ab3", "dev-other.tar.gz": "12661c48e8c3fe1de2c1caa4c3e135193bfb1811584f11f569dd12645aa84365", "test-clean.tar.gz": "39fde525e59672dc6d1551919b1478f724438a95aa55f874b576be21967e6c23", "test-other.tar.gz": "d09c181bba5cf717b3dee7d4d592af11a3ee3a09e08ae025c5506f6ebe961c29", "train-clean-100.tar.gz": "d4ddd1d5a6ab303066f14971d768ee43278a5f2a0aa43dc716b0e64ecbbbf6e2", "train-clean-360.tar.gz": "146a56496217e96c14334a160df97fffedd6e0a04e66b9c5af0d40be3c792ecf", "train-other-500.tar.gz": "ddb22f27f96ec163645d53215559df6aa36515f26e01dd70798188350adcb6d2", } AUDIO_EXT = ".flac" TXT_EXT = ".trans.txt" def decompress_filepath_fn(file_path, root_path): file_path = os.path.normpath(file_path) if file_path.endswith((AUDIO_EXT, TXT_EXT)): return os.path.join(root_path, *file_path.split(os.sep)[-4:]) else: return os.path.join(root_path, os.path.basename(file_path)) def classify_file_fn(filepath): if filepath.endswith(AUDIO_EXT): return 0 if filepath.endswith(TXT_EXT): return 1 return None def text_split_fn(line): fileid_text, transcript = line.strip().split(" ", 1) return (fileid_text, transcript) def audio_key_fn(audio_file): audio_filename = os.path.splitext(os.path.basename(audio_file))[0] return audio_filename def load_librispeech_item(data): audio_file, transcript = data audio_filename = os.path.splitext(os.path.basename(audio_file))[0] speaker_id, chapter_id, utterance_id = audio_filename.split("-") # Load audio waveform, sample_rate = torchaudio.load(audio_file) return ( waveform, sample_rate, transcript, int(speaker_id), int(chapter_id), int(utterance_id), ) def LibriSpeech(root: Union[str, Path], url: str = URL, folder_in_archive: str = FOLDER_IN_ARCHIVE): if url in [ "dev-clean", "dev-other", "test-clean", "test-other", "train-clean-100", "train-clean-360", "train-other-500", ]: url = BASE_URL + url + ".tar.gz" # Get string representation of 'root' in case Path object is passed root = os.fspath(root) checksum_dict = {os.path.join(root, key): value for key, value in _CHECKSUMS.items()} url_dp = IterableWrapper([url]) # Cache tar.gz archive cache_compressed_dp = url_dp.on_disk_cache( filepath_fn=lambda url: os.path.join(root, os.path.basename(url)), hash_dict=checksum_dict, hash_type="sha256", ) cache_compressed_dp = HttpReader(cache_compressed_dp).end_caching(same_filepath_fn=True) # Cache decompressed archive into folder_in_archive cache_decompressed_dp = cache_compressed_dp.on_disk_cache( filepath_fn=lambda tar_path: os.path.join(root, folder_in_archive, tar_path.split(".")[0]) ) cache_decompressed_dp = FileOpener(cache_decompressed_dp, mode="b").load_from_tar() cache_decompressed_dp = cache_decompressed_dp.end_caching( filepath_fn=functools.partial(decompress_filepath_fn, root_path=os.path.join(root, folder_in_archive)), ) audio_dp, txt_dp = cache_decompressed_dp.demux(2, classify_file_fn, drop_none=True, buffer_size=-1) txt_dp = FileOpener(txt_dp, mode="t").readlines(return_path=False).map(text_split_fn) transcript_map_dp = txt_dp.to_map_datapipe() audio_transcript_dp = audio_dp.zip_with_map(transcript_map_dp, key_fn=audio_key_fn) return audio_transcript_dp.map(load_librispeech_item)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import os from functools import partial from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper, IterDataPipe from .utils import _add_docstring_header, _create_dataset_directory, _wrap_split_argument URL = { "train": "https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json", "dev": "https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json", } MD5 = { "train": "981b29407e0affa3b1b156f72073b945", "dev": "3e85deb501d4e538b6bc56f786231552", } NUM_LINES = { "train": 87599, "dev": 10570, } DATASET_NAME = "SQuAD1" def _path_fn(root, path): return os.path.join(root, os.path.basename(path)) class _ParseSQuADQAData(IterDataPipe): def __init__(self, source_datapipe) -> None: self.source_datapipe = source_datapipe def __iter__(self): for _, stream in self.source_datapipe: raw_json_data = stream["data"] for layer1 in raw_json_data: for layer2 in layer1["paragraphs"]: for layer3 in layer2["qas"]: _context, _question = layer2["context"], layer3["question"] _answers = [item["text"] for item in layer3["answers"]] _answer_start = [item["answer_start"] for item in layer3["answers"]] if len(_answers) == 0: _answers = [""] _answer_start = [-1] yield (_context, _question, _answers, _answer_start) @_add_docstring_header(num_lines=NUM_LINES) @_create_dataset_directory(dataset_name=DATASET_NAME) @_wrap_split_argument(("train", "dev")) def SQuAD1(root, split): """Demonstrates use case when more complex processing is needed on data-stream Here we process dictionary returned by standard JSON reader and write custom datapipe to orchestrates data samples for Q&A use-case """ url_dp = IterableWrapper([URL[split]]) # cache data on-disk with sanity check cache_dp = url_dp.on_disk_cache( filepath_fn=partial(_path_fn, root), hash_dict={_path_fn(root, URL[split]): MD5[split]}, hash_type="md5", ) cache_dp = HttpReader(cache_dp).end_caching(mode="wb", same_filepath_fn=True) cache_dp = FileOpener(cache_dp, mode="b") # stack custom data pipe on top of JSON reader to orchestrate data samples for Q&A dataset return _ParseSQuADQAData(cache_dp.parse_json_files())
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import os from functools import partial from pathlib import Path from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper from .utils import _add_docstring_header, _create_dataset_directory, _wrap_split_argument URL = "http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz" MD5 = "7c2ac02c03563afcf9b574c7e56c153a" NUM_LINES = { "train": 25000, "test": 25000, } _PATH = "aclImdb_v1.tar.gz" DATASET_NAME = "IMDB" def _path_fn(root, path): return os.path.join(root, os.path.basename(path)) def _filter_fn(split, t): return Path(t[0]).parts[-3] == split and Path(t[0]).parts[-2] in ["pos", "neg"] def _file_to_sample(t): return Path(t[0]).parts[-2], t[1].read().decode("utf-8") @_add_docstring_header(num_lines=NUM_LINES, num_classes=2) @_create_dataset_directory(dataset_name=DATASET_NAME) @_wrap_split_argument(("train", "test")) def IMDB(root, split): """Demonstrates complex use case where each sample is stored in separate file and compressed in tar file Here we show some fancy filtering and mapping operations. Filtering is needed to know which files belong to train/test and neg/pos label Mapping is needed to yield proper data samples by extracting label from file name and reading data from file """ url_dp = IterableWrapper([URL]) # cache data on-disk cache_dp = url_dp.on_disk_cache( filepath_fn=partial(_path_fn, root), hash_dict={_path_fn(root, URL): MD5}, hash_type="md5", ) cache_dp = HttpReader(cache_dp).end_caching(mode="wb", same_filepath_fn=True) cache_dp = FileOpener(cache_dp, mode="b") # stack TAR extractor on top of load files data pipe extracted_files = cache_dp.load_from_tar() # filter the files as applicable to create dataset for given split (train or test) filter_files = extracted_files.filter(partial(_filter_fn, split)) # map the file to yield proper data samples return filter_files.map(_file_to_sample)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. # The following utility functions are copied from torchtext # https://github.com/pytorch/text/blob/main/torchtext/data/datasets_utils.py import functools import inspect import os def _check_default_set(split, target_select, dataset_name): # Check whether given object split is either a tuple of strings or string # and represents a valid selection of options given by the tuple of strings # target_select. if isinstance(split, str): split = (split,) if isinstance(target_select, str): target_select = (target_select,) if not isinstance(split, tuple): raise ValueError("Internal error: Expected split to be of type tuple.") if not set(split).issubset(set(target_select)): raise TypeError( "Given selection {} of splits is not supported for dataset {}. Please choose from {}.".format( split, dataset_name, target_select ) ) return split def _wrap_datasets(datasets, split): # Wrap return value for _setup_datasets functions to support singular values instead # of tuples when split is a string. if isinstance(split, str): if len(datasets) != 1: raise ValueError("Internal error: Expected number of datasets is not 1.") return datasets[0] return datasets def _dataset_docstring_header(fn, num_lines=None, num_classes=None): """ Returns docstring for a dataset based on function arguments. Assumes function signature of form (root='.data', split=<some tuple of strings>, **kwargs) """ argspec = inspect.getfullargspec(fn) if not (argspec.args[0] == "root" and argspec.args[1] == "split"): raise ValueError(f"Internal Error: Given function {fn} did not adhere to standard signature.") default_split = argspec.defaults[1] if not (isinstance(default_split, tuple) or isinstance(default_split, str)): raise ValueError(f"default_split type expected to be of string or tuple but got {type(default_split)}") header_s = fn.__name__ + " dataset\n" if isinstance(default_split, tuple): header_s += "\nSeparately returns the {} split".format("/".join(default_split)) if isinstance(default_split, str): header_s += f"\nOnly returns the {default_split} split" if num_lines is not None: header_s += "\n\nNumber of lines per split:" for k, v in num_lines.items(): header_s += f"\n {k}: {v}\n" if num_classes is not None: header_s += "\n\nNumber of classes" header_s += f"\n {num_classes}\n" args_s = "\nArgs:" args_s += "\n root: Directory where the datasets are saved." args_s += "\n Default: .data" if isinstance(default_split, tuple): args_s += "\n split: split or splits to be returned. Can be a string or tuple of strings." args_s += "\n Default: {}" "".format(str(default_split)) if isinstance(default_split, str): args_s += "\n split: Only {default_split} is available." args_s += "\n Default: {default_split}.format(default_split=default_split)" return "\n".join([header_s, args_s]) + "\n" def _add_docstring_header(docstring=None, num_lines=None, num_classes=None): def docstring_decorator(fn): old_doc = fn.__doc__ fn.__doc__ = _dataset_docstring_header(fn, num_lines, num_classes) if docstring is not None: fn.__doc__ += docstring if old_doc is not None: fn.__doc__ += old_doc return fn return docstring_decorator def _wrap_split_argument_with_fn(fn, splits): """ Wraps given function of specific signature to extend behavior of split to support individual strings. The given function is expected to have a split kwarg that accepts tuples of strings, e.g. ('train', 'valid') and the returned function will have a split argument that also accepts strings, e.g. 'train', which are then turned single entry tuples. Furthermore, the return value of the wrapped function is unpacked if split is only a single string to enable behavior such as train = AG_NEWS(split='train') train, valid = AG_NEWS(split=('train', 'valid')) """ argspec = inspect.getfullargspec(fn) if not ( argspec.args[0] == "root" and argspec.args[1] == "split" and argspec.varargs is None and argspec.varkw is None and len(argspec.kwonlyargs) == 0 and len(argspec.annotations) == 0 ): raise ValueError(f"Internal Error: Given function {fn} did not adhere to standard signature.") @functools.wraps(fn) def new_fn(root=os.path.expanduser("~/.torchtext/cache"), split=splits, **kwargs): result = [] for item in _check_default_set(split, splits, fn.__name__): result.append(fn(root, item, **kwargs)) return _wrap_datasets(tuple(result), split) new_sig = inspect.signature(new_fn) new_sig_params = new_sig.parameters new_params = [] new_params.append(new_sig_params["root"].replace(default=".data")) new_params.append(new_sig_params["split"].replace(default=splits)) new_params += [entry[1] for entry in list(new_sig_params.items())[2:]] new_sig = new_sig.replace(parameters=tuple(new_params)) new_fn.__signature__ = new_sig return new_fn def _wrap_split_argument(splits): def new_fn(fn): return _wrap_split_argument_with_fn(fn, splits) return new_fn def _create_dataset_directory(dataset_name): def decorator(func): argspec = inspect.getfullargspec(func) if not ( argspec.args[0] == "root" and argspec.args[1] == "split" and argspec.varargs is None and argspec.varkw is None and len(argspec.kwonlyargs) == 0 and len(argspec.annotations) == 0 ): raise ValueError(f"Internal Error: Given function {func} did not adhere to standard signature.") @functools.wraps(func) def wrapper(root=os.path.expanduser("~/.torchtext/cache"), *args, **kwargs): new_root = os.path.join(root, dataset_name) if not os.path.exists(new_root): os.makedirs(new_root) return func(root=new_root, *args, **kwargs) return wrapper return decorator
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. import os from functools import partial from torchdata.datapipes.iter import FileOpener, GDriveReader, IterableWrapper from utils import _add_docstring_header, _create_dataset_directory, _wrap_split_argument # URL to the target file that we will be downloading URL = "https://drive.google.com/uc?export=download&id=0Bz8a_Dbh9QhbaW12WVVZS2drcnM" # Expected MD5 Hash of the target file, which will be later used to verify that the file we downloaded is authentic MD5 = "fe39f8b653cada45afd5792e0f0e8f9b" NUM_LINES = { "train": 3600000, "test": 400000, } # Path/name where we will be caching the downloaded file _PATH = "amazon_review_polarity_csv.tar.gz" # Mapping dataset type (train/test) to the corresponding expected file names. _EXTRACTED_FILES = { "train": os.path.join("amazon_review_polarity_csv", "train.csv"), "test": os.path.join("amazon_review_polarity_csv", "test.csv"), } DATASET_NAME = "AmazonReviewPolarity" def _path_fn(root, _=None): return os.path.join(root, _PATH) def _cache_path_fn(root, split, _=None): return os.path.join(root, _EXTRACTED_FILES[split]) def _filter_fn(split, fname_and_stream): return _EXTRACTED_FILES[split] in fname_and_stream[0] def _process_tuple(t): return int(t[0]), " ".join(t[1:]) @_add_docstring_header(num_lines=NUM_LINES, num_classes=2) @_create_dataset_directory(dataset_name=DATASET_NAME) @_wrap_split_argument(("train", "test")) def AmazonReviewPolarity(root, split): """Demonstrating caching, extraction and sanity check pipelines.""" # Wrapping the URL into a IterDataPipe url_dp = IterableWrapper([URL]) # `.on_disk_cache` is the functional form of `OnDiskCacheHolder`, which caches the results from the # subsequent DataPipe operations (until `.end_caching`) onto the disk to the path as specified by `filepath_fn`. # In addition, since the optional argument `hash_dict` is given, the DataPipe will also check the hashes of # the files before saving them. `.on_disk_cache` merely indicates that caching will take place, but the # content of the previous DataPipe is unchanged. Therefore, `cache_compressed_dp` still contains URL(s). cache_compressed_dp = url_dp.on_disk_cache( filepath_fn=partial(_path_fn, root), hash_dict={_path_fn(root): MD5}, hash_type="md5" ) # `GDriveReader` takes in URLs to GDrives files, and yields a tuple of file name and IO stream. cache_compressed_dp = GDriveReader(cache_compressed_dp) # `.end_caching` saves the previous DataPipe's outputs onto the disk. In this case, # the results from GDriveReader (i.e. the downloaded compressed archive) will be saved onto the disk. # Upon saving the results, the DataPipe returns the paths to the cached files. cache_compressed_dp = cache_compressed_dp.end_caching(mode="wb", same_filepath_fn=True) # Again, `.on_disk_cache` is invoked again here and the subsequent DataPipe operations (until `.end_caching`) # will be saved onto the disk. At this point, `cache_decompressed_dp` contains paths to the cached files. cache_decompressed_dp = cache_compressed_dp.on_disk_cache(filepath_fn=partial(_cache_path_fn, root, split)) # Opens the cache files using `FileOpener` cache_decompressed_dp = FileOpener(cache_decompressed_dp, mode="b") # Loads the content of the TAR archive file, yielding a tuple of file names and streams of the content. cache_decompressed_dp = cache_decompressed_dp.load_from_tar() # Filters for specific file based on the file name from the previous DataPipe (either "train.csv" or "test.csv"). cache_decompressed_dp = cache_decompressed_dp.filter(partial(_filter_fn, split)) # ".end_caching" saves the decompressed file onto disks and yields the path to the file. cache_decompressed_dp = cache_decompressed_dp.end_caching(mode="wb", same_filepath_fn=True) # Opens the decompressed file. data_dp = FileOpener(cache_decompressed_dp, mode="b") # Finally, this parses content of the decompressed CSV file and returns the result line by line. return data_dp.parse_csv().map(_process_tuple)