python_code
stringlengths 0
229k
|
---|
import torch
import torchaudio
from torch.utils.mobile_optimizer import optimize_for_mobile
def get_demo_wrapper():
wrapper = torch.jit.load("scripted_wrapper_tuple.pt")
return wrapper
wrapper = get_demo_wrapper()
scripted_model = torch.jit.script(wrapper)
optimized_model = optimize_for_mobile(scripted_model)
optimized_model._save_for_lite_interpreter("streaming_asrv2.ptl")
print("Done _save_for_lite_interpreter")
|
import pyaudio
import queue
import numpy as np
import torch
import torchaudio
def get_demo_wrapper():
wrapper = torch.jit.load("scripted_wrapper_tuple.pt")
return wrapper
wrapper = get_demo_wrapper()
################################################################
data_queue = queue.Queue()
def callback(in_data, frame_count, time_info, status):
global data_queue
data_queue.put(in_data)
return in_data, pyaudio.paContinue
state = None
hypo = None
def transcribe(np_array, should_print=True):
global state, hypo
tensor = torch.tensor(np_array)
transcript, hypo, state = wrapper(tensor, hypo, state)
if should_print and transcript:
print(transcript, end="", flush=True)
previous_right_context = None
def process(should_print=True):
global previous_right_context
if previous_right_context is None:
previous_right_context = [
np.frombuffer(data_queue.get(), dtype=np.float32) for _ in range(1)
]
# Get 4 segments.
segments = [
np.frombuffer(data_queue.get(), dtype=np.float32) for _ in range(4)
]
current_input = previous_right_context + segments
with torch.no_grad():
transcribe(np.concatenate(current_input), should_print=should_print)
# Save right context.
previous_right_context = current_input[-1:]
# Emformer is configured with input segment size of 4 and right context size of 1.
# Pre- time reduction with factor 4, then, we have an input segment size of 16 and
# right context size of 4 going into RNN-T.
# With a hop length of 160 samples, we then have 16 * 160 = 2560 samples in the input segment
# and 4 * 160 = 640 samples in the right context.
# Then, since the lowest common factor between 640 and 3600 is 640, we'll
# read from the stream in 640-sample increments.
p = pyaudio.PyAudio()
CHANNELS = 1
RATE = 16000
stream = p.open(
format=pyaudio.paFloat32,
channels=CHANNELS,
rate=RATE,
input=True,
output=False,
frames_per_buffer=640,
stream_callback=callback,
)
stream.start_stream()
# We need to initialize the model by evaluating
# a few samples.
# If we skip this, evaluation latency will become
# prohibitively large.
print("Initializing model...")
for _ in range(10):
process(should_print=False)
print("Initialization complete.")
data_queue = queue.Queue()
previous_right_context = None
state = None
prev_hypo = None
while stream.is_active():
process(should_print=True)
stream.stop_stream()
stream.close()
|
import torch
import torchvision
from torch.backends._coreml.preprocess import (
CompileSpec,
TensorSpec,
CoreMLComputeUnit,
)
def mobilenetv2_spec():
return {
"forward": CompileSpec(
inputs=(
TensorSpec(
shape=[1, 3, 224, 224],
),
),
outputs=(
TensorSpec(
shape=[1, 1000],
),
),
backend=CoreMLComputeUnit.ALL,
allow_low_precision=True,
),
}
def main():
model = torchvision.models.mobilenet_v2(pretrained=True)
model.eval()
example = torch.rand(1, 3, 224, 224)
model = torch.jit.trace(model, example)
compile_spec = mobilenetv2_spec()
mlmodel = torch._C._jit_to_backend("coreml", model, compile_spec)
mlmodel._save_for_lite_interpreter("./mobilenetv2_coreml.ptl")
if __name__ == "__main__":
main() |
import torch
from torch.utils.mobile_optimizer import optimize_for_mobile
model = torch.hub.load('pytorch/vision:v0.11.0', 'deeplabv3_resnet50', pretrained=True)
model.eval()
scripted_module = torch.jit.script(model)
optimized_model = optimize_for_mobile(scripted_module)
optimized_model.save("ImageSegmentation/deeplabv3_scripted.pt")
optimized_model._save_for_lite_interpreter("ImageSegmentation/deeplabv3_scripted.ptl")
|
import torch
from torch import Tensor
from torch.utils.mobile_optimizer import optimize_for_mobile
import torchaudio
from torchaudio.models.wav2vec2.utils.import_huggingface import import_huggingface_model
from transformers import Wav2Vec2ForCTC
# Wav2vec2 model emits sequences of probability (logits) distributions over the characters
# The following class adds steps to decode the transcript (best path)
class SpeechRecognizer(torch.nn.Module):
def __init__(self, model):
super().__init__()
self.model = model
self.labels = [
"<s>", "<pad>", "</s>", "<unk>", "|", "E", "T", "A", "O", "N", "I", "H", "S",
"R", "D", "L", "U", "M", "W", "C", "F", "G", "Y", "P", "B", "V", "K", "'", "X",
"J", "Q", "Z"]
def forward(self, waveforms: Tensor) -> str:
"""Given a single channel speech data, return transcription.
Args:
waveforms (Tensor): Speech tensor. Shape `[1, num_frames]`.
Returns:
str: The resulting transcript
"""
logits, _ = self.model(waveforms) # [batch, num_seq, num_label]
best_path = torch.argmax(logits[0], dim=-1) # [num_seq,]
prev = ''
hypothesis = ''
for i in best_path:
char = self.labels[i]
if char == prev:
continue
if char == '<s>':
prev = ''
continue
hypothesis += char
prev = char
return hypothesis.replace('|', ' ')
# Load Wav2Vec2 pretrained model from Hugging Face Hub
model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h")
# Convert the model to torchaudio format, which supports TorchScript.
model = import_huggingface_model(model)
# Remove weight normalization which is not supported by quantization.
model.encoder.transformer.pos_conv_embed.__prepare_scriptable__()
model = model.eval()
# Attach decoder
model = SpeechRecognizer(model)
# Apply quantization / script / optimize for motbile
quantized_model = torch.quantization.quantize_dynamic(
model, qconfig_spec={torch.nn.Linear}, dtype=torch.qint8)
scripted_model = torch.jit.script(quantized_model)
optimized_model = optimize_for_mobile(scripted_model)
# Sanity check
waveform , _ = torchaudio.load('scent_of_a_woman_future.wav')
print(waveform.size())
print('Result:', optimized_model(waveform))
optimized_model._save_for_lite_interpreter("SpeechRecognition/wav2vec2.ptl")
|
import torch
from pytorchvideo.accelerator.deployment.mobile_cpu.utils.model_conversion import (
convert_to_deployable_form,
)
from pytorchvideo.models.accelerator.mobile_cpu.efficient_x3d import EfficientX3d
from torch.hub import load_state_dict_from_url
from torch.utils.mobile_optimizer import (
optimize_for_mobile,
)
model_efficient_x3d_xs = EfficientX3d(expansion='XS', head_act='identity')
checkpoint_path = 'https://dl.fbaipublicfiles.com/pytorchvideo/model_zoo/kinetics/efficient_x3d_xs_original_form.pyth'
checkpoint = load_state_dict_from_url(checkpoint_path)
model_efficient_x3d_xs.load_state_dict(checkpoint)
input_blob_size = (1, 3, 4, 160, 160)
input_tensor = torch.randn(input_blob_size)
model_efficient_x3d_xs_deploy = convert_to_deployable_form(model_efficient_x3d_xs, input_tensor)
traced_model = torch.jit.trace(model_efficient_x3d_xs_deploy, input_tensor, strict=False)
optimized_traced__model = optimize_for_mobile(traced_model)
optimized_traced__model._save_for_lite_interpreter("TorchVideo/video_classification.ptl")
|
import torch
from transformers import DistilBertTokenizer, DistilBertForQuestionAnswering
from torch.utils.mobile_optimizer import optimize_for_mobile
tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-uncased-distilled-squad')
model = DistilBertForQuestionAnswering.from_pretrained('distilbert-base-uncased-distilled-squad')
model.eval()
question, text = "When will support for GPU be available?!", "There is a growing need to execute ML models on edge devices to reduce latency, preserve privacy and enable new interactive use cases. In the past, engineers used to train models separately. They would then go through a multi-step, error prone and often complex process to transform the models for execution on a mobile device. The mobile runtime was often significantly different from the operations available during training leading to inconsistent developer and eventually user experience. PyTorch Mobile removes these friction surfaces by allowing a seamless process to go from training to deployment by staying entirely within the PyTorch ecosystem. It provides an end-to-end workflow that simplifies the research to production environment for mobile devices. In addition, it paves the way for privacy-preserving features via Federated Learning techniques. PyTorch Mobile is in beta stage right now and in wide scale production use. It will soon be available as a stable release once the APIs are locked down. Key features of PyTorch Mobile: Available for iOS, Android and Linux; Provides APIs that cover common preprocessing and integration tasks needed for incorporating ML in mobile applications; Support for tracing and scripting via TorchScript IR; Support for XNNPACK floating point kernel libraries for Arm CPUs; Integration of QNNPACK for 8-bit quantized kernels. Includes support for per-channel quantization, dynamic quantization and more; Build level optimization and selective compilation depending on the operators needed for user applications, i.e., the final binary size of the app is determined by the actual operators the app needs; Support for hardware backends like GPU, DSP, NPU will be available soon."
inputs = tokenizer(question, text, return_tensors='pt')
# inputs['input_ids'].size() is 360, the maximum size of the input tokens generated from the user question and text
# on mobile apps, if the size of the input tokens of the text and question is less than 360, padding will be needed to make the model work correctly.
model_dynamic_quantized = torch.quantization.quantize_dynamic(model, qconfig_spec={torch.nn.Linear}, dtype=torch.qint8)
traced_model = torch.jit.trace(model_dynamic_quantized, inputs['input_ids'], strict=False)
optimized_traced_model = optimize_for_mobile(traced_model)
optimized_traced_model._save_for_lite_interpreter("QuestionAnswering/qa360_quantized.ptl")
# 360 is the length of model input, i.e. the length of the tokenized ids of question+text
|
# based on https://pytorch.org/tutorials/intermediate/seq2seq_translation_tutorial.html
from __future__ import unicode_literals, print_function, division
from io import open
import unicodedata
import string
import re
import random
import torch
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
SOS_token = 0
EOS_token = 1
class Lang:
def __init__(self, name):
self.name = name
self.word2index = {}
self.word2count = {}
self.index2word = {0: "SOS", 1: "EOS"}
self.n_words = 2 # Count SOS and EOS
def addSentence(self, sentence):
for word in sentence.split(' '):
self.addWord(word)
def addWord(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word[self.n_words] = word
self.n_words += 1
else:
self.word2count[word] += 1
# Turn a Unicode string to plain ASCII, thanks to
# https://stackoverflow.com/a/518232/2809427
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
)
def normalizeString(s):
s = unicodeToAscii(s.lower().strip())
s = re.sub(r"([.!?])", r" \1", s)
s = re.sub(r"[^a-zA-Z.!?]+", r" ", s)
return s
def readLangs(lang1, lang2, reverse=False):
print("Reading lines...")
# Read the file and split into lines
lines = open('data/%s-%s.txt' % (lang1, lang2), encoding='utf-8').\
read().strip().split('\n')
# Split every line into pairs and normalize
pairs = [[normalizeString(s) for s in l.split('\t')] for l in lines]
# Reverse pairs, make Lang instances
if reverse:
pairs = [list(reversed(p)) for p in pairs]
input_lang = Lang(lang2)
output_lang = Lang(lang1)
else:
input_lang = Lang(lang1)
output_lang = Lang(lang2)
return input_lang, output_lang, pairs
MAX_LENGTH = 50
def filterPair(p):
return len(p[0].split(' ')) < MAX_LENGTH and \
len(p[1].split(' ')) < MAX_LENGTH
def filterPairs(pairs):
return [pair for pair in pairs if filterPair(pair)]
def prepareData(lang1, lang2, reverse=False):
input_lang, output_lang, pairs = readLangs(lang1, lang2, reverse)
print("Read %s sentence pairs" % len(pairs))
pairs = filterPairs(pairs)
print("Trimmed to %s sentence pairs" % len(pairs))
print("Counting words...")
for pair in pairs:
input_lang.addSentence(pair[0])
output_lang.addSentence(pair[1])
print("Counted words:")
print(input_lang.name, input_lang.n_words)
print(output_lang.name, output_lang.n_words)
return input_lang, output_lang, pairs
input_lang, output_lang, pairs = prepareData('eng', 'fra', True)
print(random.choice(pairs))
class EncoderRNN(nn.Module):
def __init__(self, input_size, hidden_size):
super(EncoderRNN, self).__init__()
self.hidden_size = hidden_size
self.embedding = nn.Embedding(input_size, hidden_size)
self.gru = nn.GRU(hidden_size, hidden_size)
def forward(self, input, hidden):
embedded = self.embedding(input).view(1, 1, -1)
output = embedded
output, hidden = self.gru(output, hidden)
return output, hidden
def initHidden(self):
return torch.zeros(1, 1, self.hidden_size, device=device)
class AttnDecoderRNN(nn.Module):
def __init__(self, hidden_size, output_size, dropout_p=0.1, max_length=MAX_LENGTH):
super(AttnDecoderRNN, self).__init__()
self.hidden_size = hidden_size
self.output_size = output_size
self.dropout_p = dropout_p
self.max_length = max_length
self.embedding = nn.Embedding(self.output_size, self.hidden_size)
self.attn = nn.Linear(self.hidden_size * 2, self.max_length)
self.attn_combine = nn.Linear(self.hidden_size * 2, self.hidden_size)
self.dropout = nn.Dropout(self.dropout_p)
self.gru = nn.GRU(self.hidden_size, self.hidden_size)
self.out = nn.Linear(self.hidden_size, self.output_size)
def forward(self, input, hidden, encoder_outputs):
embedded = self.embedding(input).view(1, 1, -1)
embedded = self.dropout(embedded)
attn_weights = F.softmax(
self.attn(torch.cat((embedded[0], hidden[0]), 1)), dim=1)
attn_applied = torch.bmm(attn_weights.unsqueeze(0),
encoder_outputs.unsqueeze(0))
output = torch.cat((embedded[0], attn_applied[0]), 1)
output = self.attn_combine(output).unsqueeze(0)
output = F.relu(output)
output, hidden = self.gru(output, hidden)
output = F.log_softmax(self.out(output[0]), dim=1)
return output, hidden, attn_weights
def initHidden(self):
return torch.zeros(1, 1, self.hidden_size, device=device)
def indexesFromSentence(lang, sentence):
return [lang.word2index[word] for word in sentence.split(' ')]
def tensorFromSentence(lang, sentence):
indexes = indexesFromSentence(lang, sentence)
indexes.append(EOS_token)
return torch.tensor(indexes, dtype=torch.long, device=device).view(-1, 1)
def tensorsFromPair(pair):
input_tensor = tensorFromSentence(input_lang, pair[0])
target_tensor = tensorFromSentence(output_lang, pair[1])
return (input_tensor, target_tensor)
teacher_forcing_ratio = 0.5
def train(input_tensor, target_tensor, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion, max_length=MAX_LENGTH):
encoder_hidden = encoder.initHidden()
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
input_length = input_tensor.size(0)
target_length = target_tensor.size(0)
encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)
loss = 0
for ei in range(input_length):
encoder_output, encoder_hidden = encoder(
input_tensor[ei], encoder_hidden)
encoder_outputs[ei] = encoder_output[0, 0]
decoder_input = torch.tensor([[SOS_token]], device=device)
decoder_hidden = encoder_hidden
use_teacher_forcing = True if random.random() < teacher_forcing_ratio else False
if use_teacher_forcing:
# Teacher forcing: Feed the target as the next input
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_outputs)
loss += criterion(decoder_output, target_tensor[di])
decoder_input = target_tensor[di] # Teacher forcing
else:
# Without teacher forcing: use its own predictions as the next input
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_outputs)
topv, topi = decoder_output.topk(1)
decoder_input = topi.squeeze().detach() # detach from history as input
loss += criterion(decoder_output, target_tensor[di])
if decoder_input.item() == EOS_token:
break
loss.backward()
encoder_optimizer.step()
decoder_optimizer.step()
return loss.item() / target_length
import time
import math
def asMinutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def timeSince(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return '%s (- %s)' % (asMinutes(s), asMinutes(rs))
def trainIters(encoder, decoder, n_iters, print_every=1000, plot_every=100, learning_rate=0.01):
start = time.time()
plot_losses = []
print_loss_total = 0 # Reset every print_every
plot_loss_total = 0 # Reset every plot_every
encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate)
decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate)
training_pairs = [tensorsFromPair(random.choice(pairs))
for i in range(n_iters)]
criterion = nn.NLLLoss()
for iter in range(1, n_iters + 1):
training_pair = training_pairs[iter - 1]
input_tensor = training_pair[0]
target_tensor = training_pair[1]
loss = train(input_tensor, target_tensor, encoder,
decoder, encoder_optimizer, decoder_optimizer, criterion)
print_loss_total += loss
plot_loss_total += loss
if iter % print_every == 0:
print_loss_avg = print_loss_total / print_every
print_loss_total = 0
print('%s (%d %d%%) %.4f' % (timeSince(start, iter / n_iters),
iter, iter / n_iters * 100, print_loss_avg))
if iter % 150000 == 0:
torch.save({
'encoder_state_dict': encoder.state_dict(),
'decoder_state_dict': decoder.state_dict(),
'encoder_optimizer_state_dict': encoder_optimizer.state_dict(),
'decoder_optimizer_state_dict': decoder_optimizer.state_dict(),
}, "seq2seq_mt_{}.pt".format(iter))
hidden_size = 256
encoder = EncoderRNN(input_lang.n_words, hidden_size).to(device)
decoder = AttnDecoderRNN(hidden_size, output_lang.n_words, dropout_p=0.1).to(device)
#trainIters(encoder, decoder, 450100, print_every=5000)
encoder = EncoderRNN(input_lang.n_words, hidden_size)
decoder = AttnDecoderRNN(hidden_size, output_lang.n_words)
encoder_optimizer = optim.SGD(encoder.parameters(), lr=0.01)
decoder_optimizer = optim.SGD(decoder.parameters(), lr=0.01)
checkpoint = torch.load("seq2seq_mt_150000.pt", map_location=torch.device('cpu'))
encoder.load_state_dict(checkpoint['encoder_state_dict'])
decoder.load_state_dict(checkpoint['decoder_state_dict'])
encoder_optimizer.load_state_dict(checkpoint['encoder_optimizer_state_dict'])
decoder_optimizer.load_state_dict(checkpoint['decoder_optimizer_state_dict'])
encoder.eval()
decoder.eval()
encoder_input=torch.tensor([429])
encoder_hidden=torch.zeros(1,1,256)
decoder_input1=torch.tensor([[0]])
decoder_input2=torch.zeros(1,1,256)
decoder_input3=torch.zeros(50,256)
# dynamic quantization can be applied to the decoder for its nn.Linear parameters
quantized_decoder = torch.quantization.quantize_dynamic(decoder, qconfig_spec={torch.nn.Linear}, dtype=torch.qint8)
traced_encoder = torch.jit.trace(encoder, (encoder_input, encoder_hidden))
traced_decoder = torch.jit.trace(quantized_decoder, (decoder_input1, decoder_input2, decoder_input3))
from torch.utils.mobile_optimizer import optimize_for_mobile
traced_encoder_optimized = optimize_for_mobile(traced_encoder)
traced_encoder_optimized._save_for_lite_interpreter("optimized_encoder_150k.ptl")
traced_decoder_optimized = optimize_for_mobile(traced_decoder)
traced_decoder_optimized._save_for_lite_interpreter("optimized_decoder_150k.ptl")
|
import torch
from torch.utils.mobile_optimizer import optimize_for_mobile
model = torch.hub.load('facebookresearch/deit:main', 'deit_base_patch16_224', pretrained=True)
quantized_model = torch.quantization.quantize_dynamic(model, qconfig_spec={torch.nn.Linear}, dtype=torch.qint8)
ts_model = torch.jit.script(quantized_model)
optimized_torchscript_model = optimize_for_mobile(ts_model)
optimized_torchscript_model.save("fbdeit.pt")
optimized_torchscript_model._save_for_lite_interpreter("fbdeit.ptl")
|
import torch
import torch.nn.functional as F
from torch import nn
from einops import rearrange
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) + x
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(self.norm(x), **kwargs)
class FeedForward(nn.Module):
def __init__(self, dim, hidden_dim):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, hidden_dim),
nn.GELU(),
nn.Linear(hidden_dim, dim)
)
def forward(self, x):
return self.net(x)
class Attention(nn.Module):
def __init__(self, dim, heads=8):
super().__init__()
self.heads = heads
self.scale = dim ** -0.5
self.to_qkv = nn.Linear(dim, dim * 3, bias=False)
self.to_out = nn.Linear(dim, dim)
def forward(self, x, mask = None):
b, n, _, h = *x.shape, self.heads
qkv = self.to_qkv(x)
q, k, v = rearrange(qkv, 'b n (qkv h d) -> qkv b h n d', qkv=3, h=h)
dots = torch.einsum('bhid,bhjd->bhij', q, k) * self.scale
if mask is not None:
mask = F.pad(mask.flatten(1), (1, 0), value = True)
assert mask.shape[-1] == dots.shape[-1], 'mask has incorrect dimensions'
mask = mask[:, None, :] * mask[:, :, None]
dots.masked_fill_(~mask, float('-inf'))
del mask
attn = dots.softmax(dim=-1)
out = torch.einsum('bhij,bhjd->bhid', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
out = self.to_out(out)
return out
class Transformer(nn.Module):
def __init__(self, dim, depth, heads, mlp_dim):
super().__init__()
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
Residual(PreNorm(dim, Attention(dim, heads = heads))),
Residual(PreNorm(dim, FeedForward(dim, mlp_dim)))
]))
def forward(self, x, mask=None):
for attn, ff in self.layers:
x = attn(x, mask=mask)
x = ff(x)
return x
class ViT(nn.Module):
def __init__(self, *, image_size, patch_size, num_classes, dim, depth, heads, mlp_dim, channels=3):
super().__init__()
assert image_size % patch_size == 0, 'image dimensions must be divisible by the patch size'
num_patches = (image_size // patch_size) ** 2
patch_dim = channels * patch_size ** 2
self.patch_size = patch_size
self.pos_embedding = nn.Parameter(torch.randn(1, num_patches + 1, dim))
self.patch_to_embedding = nn.Linear(patch_dim, dim)
self.cls_token = nn.Parameter(torch.randn(1, 1, dim))
self.transformer = Transformer(dim, depth, heads, mlp_dim)
self.to_cls_token = nn.Identity()
self.mlp_head = nn.Sequential(
nn.Linear(dim, mlp_dim),
nn.GELU(),
nn.Linear(mlp_dim, num_classes)
)
def forward(self, img, mask=None):
p = self.patch_size
x = rearrange(img, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = p, p2 = p)
x = self.patch_to_embedding(x)
cls_tokens = self.cls_token.expand(img.shape[0], -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
x += self.pos_embedding
x = self.transformer(x, mask)
x = self.to_cls_token(x[:, 0])
return self.mlp_head(x)
|
import torch
import torchvision
import time
from vit_pytorch import *
from torch.utils.mobile_optimizer import optimize_for_mobile
torch.manual_seed(42)
DOWNLOAD_PATH = 'data/mnist'
BATCH_SIZE_TRAIN = 100
BATCH_SIZE_TEST = 1000
# 0.1307 and 0.3081 are the mean and std computed on the MNIST training set
transform_mnist = torchvision.transforms.Compose([torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.1307,), (0.3081,))])
train_set = torchvision.datasets.MNIST(DOWNLOAD_PATH, train=True, download=True,
transform=transform_mnist)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=BATCH_SIZE_TRAIN, shuffle=True)
test_set = torchvision.datasets.MNIST(DOWNLOAD_PATH, train=False, download=True,
transform=transform_mnist)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=BATCH_SIZE_TEST, shuffle=True)
def train_epoch(model, optimizer, data_loader, loss_history):
total_samples = len(data_loader.dataset)
model.train()
for i, (data, target) in enumerate(data_loader):
optimizer.zero_grad()
output = F.log_softmax(model(data), dim=1)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if i % 100 == 0:
print('[' + '{:5}'.format(i * len(data)) + '/' + '{:5}'.format(total_samples) +
' (' + '{:3.0f}'.format(100 * i / len(data_loader)) + '%)] Loss: ' +
'{:6.4f}'.format(loss.item()))
loss_history.append(loss.item())
def evaluate(model, data_loader, loss_history):
model.eval()
total_samples = len(data_loader.dataset)
correct_samples = 0
total_loss = 0
with torch.no_grad():
for data, target in data_loader:
output = F.log_softmax(model(data), dim=1)
loss = F.nll_loss(output, target, reduction='sum')
_, pred = torch.max(output, dim=1)
total_loss += loss.item()
correct_samples += pred.eq(target).sum()
avg_loss = total_loss / total_samples
loss_history.append(avg_loss)
print('\nAverage test loss: ' + '{:.4f}'.format(avg_loss) +
' Accuracy:' + '{:5}'.format(correct_samples) + '/' +
'{:5}'.format(total_samples) + ' (' +
'{:4.2f}'.format(100.0 * correct_samples / total_samples) + '%)\n')
N_EPOCHS = 10
start_time = time.time()
model = ViT(image_size=28, patch_size=7, num_classes=10, channels=1,
dim=64, depth=6, heads=8, mlp_dim=128)
optimizer = torch.optim.Adam(model.parameters(), lr=0.003)
train_loss_history, test_loss_history = [], []
for epoch in range(1, N_EPOCHS + 1):
print('Epoch:', epoch)
train_epoch(model, optimizer, train_loader, train_loss_history)
evaluate(model, test_loader, test_loss_history)
print('Execution time:', '{:5.2f}'.format(time.time() - start_time), 'seconds')
with torch.no_grad():
for data, target in test_loader:
output = F.log_softmax(model(data), dim=1)
loss = F.nll_loss(output, target, reduction='sum')
_, pred = torch.max(output, dim=1)
# the original trained model
torch.save(model, "vit4mnist.pt")
model = torch.load("vit4mnist.pt")
model.eval()
quantized_model = torch.quantization.quantize_dynamic(model, qconfig_spec={torch.nn.Linear}, dtype=torch.qint8)
dummy_input = torch.zeros(1, 1, 28, 28)
ts_model = torch.jit.trace(quantized_model, dummy_input)
optimized_torchscript_model = optimize_for_mobile(ts_model)
# the quantized, scripted, and optimized model
optimized_torchscript_model._save_for_lite_interpreter("ViT4MNIST/vit4mnist.ptl")
|
#!/usr/bin/env python3
import contextlib
import copy
import os
import unittest
from PIL import Image
import torch
from torch.utils.mobile_optimizer import optimize_for_mobile
from d2go.export.api import convert_and_export_predictor
from d2go.export.d2_meta_arch import patch_d2_meta_arch
from d2go.runner import create_runner, GeneralizedRCNNRunner
from d2go.model_zoo import model_zoo
from mobile_cv.common.misc.file_utils import make_temp_directory
patch_d2_meta_arch()
def test_export_torchvision_format():
cfg_name = 'faster_rcnn_fbnetv3a_dsmask_C4.yaml'
pytorch_model = model_zoo.get(cfg_name, trained=True)
from typing import List, Dict
class Wrapper(torch.nn.Module):
def __init__(self, model):
super().__init__()
self.model = model
coco_idx_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51,
52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67, 70, 72, 73, 74, 75, 76, 77,
78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90, 91]
self.coco_idx = torch.tensor(coco_idx_list)
def forward(self, inputs: List[torch.Tensor]):
x = inputs[0].unsqueeze(0) * 255
scale = 320.0 / min(x.shape[-2], x.shape[-1])
x = torch.nn.functional.interpolate(x, scale_factor=scale, mode="bilinear", align_corners=True, recompute_scale_factor=True)
out = self.model(x[0])
res : Dict[str, torch.Tensor] = {}
res["boxes"] = out[0] / scale
res["labels"] = torch.index_select(self.coco_idx, 0, out[1])
res["scores"] = out[2]
return inputs, [res]
size_divisibility = max(pytorch_model.backbone.size_divisibility, 10)
h, w = size_divisibility, size_divisibility * 2
runner = create_runner("d2go.runner.GeneralizedRCNNRunner")
cfg = model_zoo.get_config(cfg_name)
datasets = list(cfg.DATASETS.TRAIN)
data_loader = runner.build_detection_test_loader(cfg, datasets)
predictor_path = convert_and_export_predictor(
cfg,
copy.deepcopy(pytorch_model),
"torchscript_int8@tracing",
'./',
data_loader,
)
orig_model = torch.jit.load(os.path.join(predictor_path, "model.jit"))
wrapped_model = Wrapper(orig_model)
# optionally do a forward
wrapped_model([torch.rand(3, 600, 600)])
scripted_model = torch.jit.script(wrapped_model)
optimized_model = optimize_for_mobile(scripted_model)
optimized_model.save("D2Go/d2go_optimized.pt")
optimized_model._save_for_lite_interpreter("D2Go/d2go_optimized.ptl")
if __name__ == '__main__':
test_export_torchvision_format()
|
import torch
import torchvision
from torch.utils.mobile_optimizer import optimize_for_mobile
model = torchvision.models.quantization.mobilenet_v2(pretrained=True, quantize=True)
model.eval()
example = torch.rand(1, 3, 224, 224)
traced_script_module = torch.jit.trace(model, example)
torchscript_model_optimized = optimize_for_mobile(traced_script_module)
torchscript_model_optimized.save("mobilenet_quantized.pt")
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
from pathlib import Path
from typing import Dict, List, Optional, Set
import torch.utils.data.datapipes.gen_pyi as core_gen_pyi
from torch.utils.data.datapipes.gen_pyi import gen_from_template, get_method_definitions
def get_lines_base_file(base_file_path: str, to_skip: Optional[Set[str]] = None):
with open(base_file_path) as f:
lines = f.readlines()
res = []
if to_skip is None:
return lines
for line in lines:
skip_flag = False
for skip_line in to_skip:
if skip_line in line:
skip_flag = True
if not skip_flag:
line = line.replace("\n", "")
res.append(line)
return res
def gen_pyi() -> None:
DATAPIPE_DIR = Path(__file__).parent.parent.resolve() / "torchdata" / "datapipes"
print(f"Generating DataPipe Python interface file in {DATAPIPE_DIR}")
# Base __init__ file
iter_init_base = get_lines_base_file(
os.path.join(DATAPIPE_DIR, "iter/__init__.py"),
{"from torch.utils.data import IterDataPipe", "# Copyright (c) Facebook, Inc. and its affiliates."},
)
map_init_base = get_lines_base_file(
os.path.join(DATAPIPE_DIR, "map/__init__.py"),
{"from torch.utils.data import MapDataPipe", "# Copyright (c) Facebook, Inc. and its affiliates."},
)
# Core Definitions
core_iter_method_definitions = get_method_definitions(
core_gen_pyi.iterDP_file_path,
core_gen_pyi.iterDP_files_to_exclude,
core_gen_pyi.iterDP_deprecated_files,
"IterDataPipe",
core_gen_pyi.iterDP_method_to_special_output_type,
)
core_map_method_definitions = get_method_definitions(
core_gen_pyi.mapDP_file_path,
core_gen_pyi.mapDP_files_to_exclude,
core_gen_pyi.mapDP_deprecated_files,
"MapDataPipe",
core_gen_pyi.mapDP_method_to_special_output_type,
)
# TorchData Definitions
# IterDataPipes
iterDP_file_paths: List[str] = ["iter/load", "iter/transform", "iter/util"]
iterDP_files_to_exclude: Set[str] = {"__init__.py"}
iterDP_deprecated_files: Set[str] = set()
iterDP_method_to_special_output_type: Dict[str, str] = {
"async_map_batches": "IterDataPipe",
"bucketbatch": "IterDataPipe",
"dataframe": "torcharrow.DataFrame",
"end_caching": "IterDataPipe",
"extract": "IterDataPipe",
"random_split": "Union[IterDataPipe, List[IterDataPipe]]",
"read_from_tar": "IterDataPipe",
"read_from_xz": "IterDataPipe",
"read_from_zip": "IterDataPipe",
"round_robin_demux": "List[IterDataPipe]",
"to_map_datapipe": "MapDataPipe",
"unzip": "List[IterDataPipe]",
}
iter_method_name_exclusion: Set[str] = {"def extract", "read_from_tar", "read_from_xz", "read_from_zip"}
td_iter_method_definitions = get_method_definitions(
iterDP_file_paths,
iterDP_files_to_exclude,
iterDP_deprecated_files,
"IterDataPipe",
iterDP_method_to_special_output_type,
root=str(DATAPIPE_DIR),
)
td_iter_method_definitions = [
s for s in td_iter_method_definitions if all(ex not in s for ex in iter_method_name_exclusion)
]
iter_method_definitions = core_iter_method_definitions + td_iter_method_definitions
iter_replacements = [("${init_base}", iter_init_base, 0), ("${IterDataPipeMethods}", iter_method_definitions, 4)]
gen_from_template(
dir=str(DATAPIPE_DIR),
template_name="iter/__init__.pyi.in",
output_name="iter/__init__.pyi",
replacements=iter_replacements,
)
# MapDataPipes
mapDP_file_paths: List[str] = ["map/load", "map/transform", "map/util"]
mapDP_files_to_exclude: Set[str] = {"__init__.py"}
mapDP_deprecated_files: Set[str] = set()
mapDP_method_to_special_output_type: Dict[str, str] = {
"unzip": "List[MapDataPipe]",
"to_iter_datapipe": "IterDataPipe",
}
map_method_name_exclusion: Set[str] = set()
td_map_method_definitions = get_method_definitions(
mapDP_file_paths,
mapDP_files_to_exclude,
mapDP_deprecated_files,
"MapDataPipe",
mapDP_method_to_special_output_type,
root=str(DATAPIPE_DIR),
)
td_map_method_definitions = [
s for s in td_map_method_definitions if all(ex not in s for ex in map_method_name_exclusion)
]
map_method_definitions = core_map_method_definitions + td_map_method_definitions
map_replacements = [("${init_base}", map_init_base, 0), ("${MapDataPipeMethods}", map_method_definitions, 4)]
gen_from_template(
dir=str(DATAPIPE_DIR),
template_name="map/__init__.pyi.in",
output_name="map/__init__.pyi",
replacements=map_replacements,
)
if __name__ == "__main__":
gen_pyi()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# Scrip can be used with
# find -name '*.py' | grep -v third_party | perl -ne'print "python tools/todo.py $_"' | head -n 5 | bash
import configparser
import os
import re
import shutil
import sys
import tempfile
from github import Github # pip install PyGithub
file_name = sys.argv[1]
config = configparser.ConfigParser(allow_no_value=True)
with open(os.path.join(os.path.expanduser("~"), ".ghstackrc")) as stream:
config.read_string(stream.read())
GITHUB_KEY = config["ghstack"]["github_oauth"]
def get_git_branch_hash():
stream = os.popen("git rev-parse origin/main")
return stream.read().rstrip()
def generate_issue_id(id_or_name, title, file_name, line_number):
git_branch_hash = get_git_branch_hash()
# print(file_name, line_number, title, id_or_name)
match = re.match(r"\((\d+)\)", id_or_name)
if match:
return int(match.group(1))
match = re.match(r"\((.*)\)", id_or_name)
name = None
if match:
name = match.group(1)
if name is not None:
owner = f"cc @{name}"
else:
owner = ""
g = Github(GITHUB_KEY)
repo = g.get_repo("pytorch/data")
# label_be = repo.get_label("better-engineering" )
# labels = [label_be]
line_reference = f"https://github.com/pytorch/data/blob/{git_branch_hash}/{file_name}#L{line_number}"
line_reference = line_reference.replace("/./", "/")
body = """
This issue is generated from the TODO line
{line_reference}
{owner}
""".format(
owner=owner,
line_reference=line_reference,
)
title = f"[TODO] {title}"
issue = repo.create_issue(title=title, body=body, labels=[])
print(f"Created issue https://github.com/pytorch/data/issues/{issue.number}")
return issue.number
def update_file(file_name):
try:
f = tempfile.NamedTemporaryFile(delete=False)
shutil.copyfile(file_name, f.name)
with open(f.name) as f_inp:
with open(file_name, "w") as f_out:
for line_number, line in enumerate(f_inp.readlines()):
if not re.search(r"ignore-todo", line, re.IGNORECASE):
match = re.search(r"(.*?)#\s*todo\s*(\([^)]+\)){0,1}:{0,1}(.*)", line, re.IGNORECASE)
if match:
# print(line)
prefix = match.group(1)
text = match.group(3)
issue_id = generate_issue_id(str(match.group(2)), text, file_name, line_number + 1)
line = f"{prefix}# TODO({issue_id}):{text}\n" # ignore-todo
f_out.write(line)
except Exception as e:
shutil.copyfile(f.name, file_name)
raise e
finally:
os.unlink(f.name)
update_file(file_name)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import distutils.sysconfig
import os
import platform
import subprocess
import sys
from pathlib import Path
from setuptools.command.build_ext import build_ext
__all__ = [
"get_ext_modules",
"CMakeBuild",
]
_THIS_DIR = Path(__file__).parent.resolve()
_ROOT_DIR = _THIS_DIR.parent.parent.resolve()
def _get_build(var, default=False):
if var not in os.environ:
return default
val = os.environ.get(var, "0")
trues = ["1", "true", "TRUE", "on", "ON", "yes", "YES"]
falses = ["0", "false", "FALSE", "off", "OFF", "no", "NO"]
if val in trues:
return True
if val not in falses:
print(f"WARNING: Unexpected environment variable value `{var}={val}`. " f"Expected one of {trues + falses}")
return False
_BUILD_S3 = _get_build("BUILD_S3", False)
_USE_SYSTEM_AWS_SDK_CPP = _get_build("USE_SYSTEM_AWS_SDK_CPP", False)
_USE_SYSTEM_PYBIND11 = _get_build("USE_SYSTEM_PYBIND11", False)
_USE_SYSTEM_LIBS = _get_build("USE_SYSTEM_LIBS", False)
try:
# Use the pybind11 from third_party
if not (_USE_SYSTEM_PYBIND11 or _USE_SYSTEM_LIBS):
sys.path.insert(0, str(_ROOT_DIR / "third_party/pybind11/"))
from pybind11.setup_helpers import Pybind11Extension
except ImportError:
from setuptools import Extension as Pybind11Extension
def get_ext_modules():
if _BUILD_S3:
return [Pybind11Extension(name="torchdata._torchdata", sources=[])]
else:
return []
class CMakeBuild(build_ext):
def run(self):
try:
subprocess.check_output(["cmake", "--version"])
except OSError:
raise RuntimeError("CMake is not available.") from None
super().run()
def build_extension(self, ext):
# Because the following `cmake` command will build all of `ext_modules`` at the same time,
# we would like to prevent multiple calls to `cmake`.
# Therefore, we call `cmake` only for `torchdata._torchdata`,
# in case `ext_modules` contains more than one module.
if ext.name != "torchdata._torchdata":
return
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
# required for auto-detection of auxiliary "native" libs
if not extdir.endswith(os.path.sep):
extdir += os.path.sep
debug = int(os.environ.get("DEBUG", 0)) if self.debug is None else self.debug
cfg = "Debug" if debug else "Release"
cmake_args = [
f"-DCMAKE_BUILD_TYPE={cfg}",
f"-DCMAKE_INSTALL_PREFIX={extdir}",
f"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={extdir}",
f"-DCMAKE_RUNTIME_OUTPUT_DIRECTORY={extdir}", # For Windows
f"-DPython_INCLUDE_DIR={distutils.sysconfig.get_python_inc()}",
f"-DBUILD_S3:BOOL={'ON' if _BUILD_S3 else 'OFF'}",
f"-DUSE_SYSTEM_AWS_SDK_CPP:BOOL={'ON' if _USE_SYSTEM_AWS_SDK_CPP else 'OFF'}",
f"-DUSE_SYSTEM_PYBIND11:BOOL={'ON' if _USE_SYSTEM_PYBIND11 else 'OFF'}",
f"-DUSE_SYSTEM_LIBS:BOOL={'ON' if _USE_SYSTEM_LIBS else 'OFF'}",
]
build_args = ["--config", cfg]
# Default to Ninja
if "CMAKE_GENERATOR" not in os.environ or platform.system() == "Windows":
cmake_args += ["-GNinja"]
if platform.system() == "Windows":
python_version = sys.version_info
cmake_args += [
"-DCMAKE_C_COMPILER=cl",
"-DCMAKE_CXX_COMPILER=cl",
f"-DPYTHON_VERSION={python_version.major}.{python_version.minor}",
]
# Set CMAKE_BUILD_PARALLEL_LEVEL to control the parallel build level
# across all generators.
if "CMAKE_BUILD_PARALLEL_LEVEL" not in os.environ:
# self.parallel is a Python 3 only way to set parallel jobs by hand
# using -j in the build_ext call, not supported by pip or PyPA-build.
if hasattr(self, "parallel") and self.parallel:
# CMake 3.12+ only.
build_args += [f"-j{self.parallel}"]
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(["cmake", str(_ROOT_DIR)] + cmake_args, cwd=self.build_temp)
subprocess.check_call(["cmake", "--build", "."] + build_args, cwd=self.build_temp)
def get_ext_filename(self, fullname):
ext_filename = super().get_ext_filename(fullname)
ext_filename_parts = ext_filename.split(".")
without_abi = ext_filename_parts[:-2] + ext_filename_parts[-1:]
ext_filename = ".".join(without_abi)
return ext_filename
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import io
import os
import unittest
import expecttest
from torchdata.datapipes.iter import GDriveReader, IterableWrapper, OnlineReader
# This TestCase is created due to the limited quota to access google drive
class TestDataPipePeriod(expecttest.TestCase):
def test_gdrive_iterdatapipe(self):
amazon_review_url = "https://drive.google.com/uc?export=download&id=0Bz8a_Dbh9QhbaW12WVVZS2drcnM"
expected_file_name = "amazon_review_polarity_csv.tar.gz"
expected_MD5_hash = "fe39f8b653cada45afd5792e0f0e8f9b"
query_params = {"auth": ("fake_username", "fake_password"), "allow_redirects": True}
timeout = 120
gdrive_reader_dp = GDriveReader(IterableWrapper([amazon_review_url]), timeout=timeout, **query_params)
# Functional Test: test if the GDrive Reader can download and read properly
reader_dp = gdrive_reader_dp.readlines()
it = iter(reader_dp)
path, line = next(it)
self.assertEqual(expected_file_name, os.path.basename(path))
self.assertTrue(line != b"")
# Reset Test: gdrive_reader_dp has been read, but we reset when calling check_hash()
check_cache_dp = gdrive_reader_dp.check_hash({expected_file_name: expected_MD5_hash}, "md5", rewind=False)
it = iter(check_cache_dp)
path, stream = next(it)
self.assertEqual(expected_file_name, os.path.basename(path))
self.assertTrue(io.BufferedReader, type(stream))
# __len__ Test: returns the length of source DataPipe
source_dp = IterableWrapper([amazon_review_url])
gdrive_dp = GDriveReader(source_dp)
self.assertEqual(1, len(gdrive_dp))
# Error Test: test if the GDrive Reader raises an error when the url is invalid
error_url = "https://drive.google.com/uc?export=download&id=filedoesnotexist"
http_error_dp = GDriveReader(IterableWrapper([error_url]), timeout=timeout)
with self.assertRaisesRegex(
Exception, r"404.+https://drive.google.com/uc\?export=download&id=filedoesnotexist"
):
next(iter(http_error_dp.readlines()))
# Feature skip-error Test: test if the GDrive Reader skips urls causing problems
gdrive_skip_error_dp = GDriveReader(
IterableWrapper([error_url, amazon_review_url]), timeout=timeout, skip_on_error=True
)
reader_dp = gdrive_skip_error_dp.readlines()
with self.assertWarnsRegex(
Warning, r"404.+https://drive.google.com/uc\?export=download&id=filedoesnotexist.+skipping"
):
it = iter(reader_dp)
path, line = next(it)
self.assertEqual(expected_file_name, os.path.basename(path))
self.assertTrue(line != b"")
def test_online_iterdatapipe(self):
license_file_url = "https://raw.githubusercontent.com/pytorch/data/main/LICENSE"
amazon_review_url = "https://drive.google.com/uc?export=download&id=0Bz8a_Dbh9QhbaW12WVVZS2drcnM"
expected_license_file_name = "LICENSE"
expected_amazon_file_name = "amazon_review_polarity_csv.tar.gz"
expected_license_MD5_hash = "bb9675028dd39d2dd2bf71002b93e66c"
expected_amazon_MD5_hash = "fe39f8b653cada45afd5792e0f0e8f9b"
query_params = {"auth": ("fake_username", "fake_password"), "allow_redirects": True}
timeout = 120
file_hash_dict = {
license_file_url: expected_license_MD5_hash,
expected_amazon_file_name: expected_amazon_MD5_hash,
}
# Functional Test: can read from GDrive links
online_reader_dp = OnlineReader(IterableWrapper([amazon_review_url]), timeout=timeout, **query_params)
reader_dp = online_reader_dp.readlines()
it = iter(reader_dp)
path, line = next(it)
self.assertEqual(expected_amazon_file_name, os.path.basename(path))
self.assertTrue(line != b"")
# Functional Test: can read from other links
online_reader_dp = OnlineReader(IterableWrapper([license_file_url]))
reader_dp = online_reader_dp.readlines()
it = iter(reader_dp)
path, line = next(it)
self.assertEqual(expected_license_file_name, os.path.basename(path))
self.assertTrue(line != b"")
# Reset Test: reset online_reader_dp by calling check_hash
check_cache_dp = online_reader_dp.check_hash(file_hash_dict, "md5", rewind=False)
it = iter(check_cache_dp)
path, stream = next(it)
self.assertEqual(expected_license_file_name, os.path.basename(path))
self.assertTrue(io.BufferedReader, type(stream))
# Functional Test: works with multiple URLs of different sources
online_reader_dp = OnlineReader(IterableWrapper([license_file_url, amazon_review_url]))
check_cache_dp = online_reader_dp.check_hash(file_hash_dict, "md5", rewind=False)
it = iter(check_cache_dp)
for expected_file_name, (path, stream) in zip([expected_license_file_name, expected_amazon_file_name], it):
self.assertEqual(expected_file_name, os.path.basename(path))
self.assertTrue(io.BufferedReader, type(stream))
# __len__ Test: returns the length of source DataPipe
self.assertEqual(2, len(online_reader_dp))
# Error Test: test if the Online Reader raises an error when the url is invalid
error_url_http = "https://github.com/pytorch/data/this/url/dont/exist"
online_error_dp = OnlineReader(IterableWrapper([error_url_http]), timeout=timeout)
with self.assertRaisesRegex(Exception, f"404.+{error_url_http}"):
next(iter(online_error_dp.readlines()))
error_url_gdrive = "https://drive.google.com/uc?export=download&id=filedoesnotexist"
online_error_dp = OnlineReader(IterableWrapper([error_url_gdrive]), timeout=timeout)
with self.assertRaisesRegex(
Exception, r"404.+https://drive.google.com/uc\?export=download&id=filedoesnotexist"
):
next(iter(online_error_dp.readlines()))
# Feature skip-error Test: test if the Online Reader skips urls causing problems
online_skip_error_dp = OnlineReader(
IterableWrapper([error_url_http, error_url_gdrive, license_file_url]), timeout=timeout, skip_on_error=True
)
reader_dp = online_skip_error_dp.readlines()
with self.assertWarnsRegex(Warning, f"404.+{error_url_http}.+skipping"):
it = iter(reader_dp)
path, line = next(it)
self.assertEqual(expected_license_file_name, os.path.basename(path))
self.assertTrue(b"BSD" in line)
if __name__ == "__main__":
unittest.main()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from unittest.mock import MagicMock, patch
import expecttest
from torch.testing._internal.common_utils import IS_SANDCASTLE
from torchdata.datapipes.iter import IterableWrapper, S3FileLister
skipIfSandcastle = unittest.skipIf(IS_SANDCASTLE, "Skip for internal testing")
@skipIfSandcastle
@patch("torchdata._torchdata")
class TestS3FileListerIterDataPipe(expecttest.TestCase):
def test_list_files(self, mock_torchdata):
s3handler_mock = MagicMock()
mock_torchdata.S3Handler.return_value = s3handler_mock
s3handler_mock.list_files = MagicMock(
side_effect=[["s3://bucket-name/folder/a.txt", "s3://bucket-name/folder/b.csv"], []]
)
s3_prefixes = IterableWrapper(["s3://bucket-name/folder/"])
dp_s3_urls = S3FileLister(s3_prefixes)
assert list(dp_s3_urls) == ["s3://bucket-name/folder/a.txt", "s3://bucket-name/folder/b.csv"]
def test_list_files_with_filter_mask(self, mock_torchdata):
s3handler_mock = MagicMock()
mock_torchdata.S3Handler.return_value = s3handler_mock
s3handler_mock.list_files = MagicMock(
side_effect=[["s3://bucket-name/folder/a.txt", "s3://bucket-name/folder/b.csv"], []]
)
s3_prefixes = IterableWrapper(["s3://bucket-name/folder/"])
dp_s3_urls = S3FileLister(s3_prefixes, masks="*.csv")
assert list(dp_s3_urls) == ["s3://bucket-name/folder/b.csv"]
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import unittest
import warnings
import expecttest
from _utils._common_utils_for_test import create_temp_dir, create_temp_files, reset_after_n_next_calls
from torchdata.datapipes.iter import (
FileLister,
FSSpecFileLister,
FSSpecFileOpener,
FSSpecSaver,
IterableWrapper,
IterDataPipe,
)
try:
import fsspec
HAS_FSSPEC = True
except ImportError:
HAS_FSSPEC = False
skipIfNoFSSpec = unittest.skipIf(not HAS_FSSPEC, "no fsspec")
class TestDataPipeFSSpec(expecttest.TestCase):
def setUp(self):
self.temp_dir = create_temp_dir()
self.temp_files = create_temp_files(self.temp_dir)
self.temp_sub_dir = create_temp_dir(self.temp_dir.name)
self.temp_sub_files = create_temp_files(self.temp_sub_dir, 4, False)
self.temp_dir_2 = create_temp_dir()
self.temp_files_2 = create_temp_files(self.temp_dir_2)
self.temp_sub_dir_2 = create_temp_dir(self.temp_dir_2.name)
self.temp_sub_files_2 = create_temp_files(self.temp_sub_dir_2, 4, False)
def tearDown(self):
try:
self.temp_sub_dir.cleanup()
self.temp_dir.cleanup()
self.temp_sub_dir_2.cleanup()
self.temp_dir_2.cleanup()
except Exception as e:
warnings.warn(f"TestDataPipeFSSpec was not able to cleanup temp dir due to {e}")
def _write_text_files(self):
def filepath_fn(name: str) -> str:
return os.path.join(self.temp_dir.name, os.path.basename(name))
name_to_data = {"1.text": b"DATA", "2.text": b"DATA", "3.text": b"DATA"}
source_dp = IterableWrapper(sorted(name_to_data.items()))
saver_dp = source_dp.save_to_disk(filepath_fn=filepath_fn, mode="wb")
list(saver_dp)
@skipIfNoFSSpec
def test_fsspec_file_lister_iterdatapipe(self):
datapipe: IterDataPipe = FSSpecFileLister(root="file://" + self.temp_sub_dir.name)
# check all file paths within sub_folder are listed
for path in datapipe:
self.assertIn(
path.split("://")[1],
{fsspec.implementations.local.make_path_posix(file) for file in self.temp_sub_files},
)
# checks for functional API
datapipe = IterableWrapper(["file://" + self.temp_sub_dir.name])
datapipe = datapipe.list_files_by_fsspec()
for path in datapipe:
self.assertIn(
path.split("://")[1],
{fsspec.implementations.local.make_path_posix(file) for file in self.temp_sub_files},
)
@skipIfNoFSSpec
def test_fsspec_file_lister_iterdatapipe_with_list(self):
datapipe: IterDataPipe = FSSpecFileLister(
root=["file://" + self.temp_sub_dir.name, "file://" + self.temp_sub_dir_2.name]
)
# check all file paths within sub_folder are listed
file_lister = list(map(lambda path: path.split("://")[1], datapipe))
file_lister.sort()
temp_files = list(
map(
lambda file: fsspec.implementations.local.make_path_posix(file),
self.temp_sub_files + self.temp_sub_files_2,
)
)
temp_files.sort()
# check all file paths within sub_folder are listed
self.assertEqual(file_lister, temp_files)
# checks for functional API
datapipe = IterableWrapper(["file://" + self.temp_sub_dir.name, "file://" + self.temp_sub_dir_2.name])
datapipe = datapipe.list_files_by_fsspec()
res = list(map(lambda path: path.split("://")[1], datapipe))
res.sort()
temp_files = list(
map(
lambda file: fsspec.implementations.local.make_path_posix(file),
self.temp_sub_files + self.temp_sub_files_2,
)
)
temp_files.sort()
self.assertEqual(res, temp_files)
@skipIfNoFSSpec
def test_fsspec_file_loader_iterdatapipe(self):
datapipe1 = FSSpecFileLister(root="file://" + self.temp_sub_dir.name)
datapipe2 = FSSpecFileOpener(datapipe1)
datapipe3 = FSSpecFileOpener(datapipe1, kwargs_for_open={"encoding": "cp037"})
# check contents of file match
for _, f in datapipe2:
self.assertEqual(f.read(), "0123456789abcdef")
# Opened with a different encoding, hence NotEqual
for _, f in datapipe3:
self.assertNotEqual(f.read(), "0123456789abcdef")
# Reset Test: Ensure the resulting streams are still readable after the DataPipe is reset/exhausted
self._write_text_files()
lister_dp = FileLister(self.temp_dir.name, "*.text")
fsspec_file_opener_dp = lister_dp.open_files_by_fsspec(mode="rb")
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(fsspec_file_opener_dp, n_elements_before_reset)
self.assertEqual(2, len(res_before_reset))
self.assertEqual(3, len(res_after_reset))
for _name, stream in res_before_reset:
self.assertEqual(b"DATA", stream.read())
for _name, stream in res_after_reset:
self.assertEqual(b"DATA", stream.read())
@skipIfNoFSSpec
def test_fsspec_saver_iterdatapipe(self):
def filepath_fn(name: str) -> str:
return "file://" + os.path.join(self.temp_dir.name, os.path.basename(name))
# Functional Test: Saving some data
name_to_data = {"1.txt": b"DATA1", "2.txt": b"DATA2", "3.txt": b"DATA3"}
source_dp = IterableWrapper(sorted(name_to_data.items()))
saver_dp = source_dp.save_by_fsspec(filepath_fn=filepath_fn, mode="wb")
res_file_paths = list(saver_dp)
expected_paths = [filepath_fn(name) for name in name_to_data.keys()]
self.assertEqual(expected_paths, res_file_paths)
for name in name_to_data.keys():
p = filepath_fn(name).split("://")[1]
with open(p) as f:
self.assertEqual(name_to_data[name], f.read().encode())
# Reset Test:
saver_dp = FSSpecSaver(source_dp, filepath_fn=filepath_fn, mode="wb")
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(saver_dp, n_elements_before_reset)
self.assertEqual([filepath_fn("1.txt"), filepath_fn("2.txt")], res_before_reset)
self.assertEqual(expected_paths, res_after_reset)
for name in name_to_data.keys():
p = filepath_fn(name).split("://")[1]
with open(p) as f:
self.assertEqual(name_to_data[name], f.read().encode())
# __len__ Test: returns the length of source DataPipe
self.assertEqual(3, len(saver_dp))
@skipIfNoFSSpec
def test_fsspec_memory_list(self):
fs = fsspec.filesystem("memory")
fs.mkdir("foo")
fs.touch("foo/bar1")
fs.touch("foo/bar2")
datapipe = FSSpecFileLister(root="memory://foo")
self.assertEqual(set(datapipe), {"memory:///foo/bar1", "memory:///foo/bar2"})
datapipe = FSSpecFileLister(root="memory://foo/bar1")
self.assertEqual(set(datapipe), {"memory://foo/bar1"})
@skipIfNoFSSpec
def test_fsspec_memory_load(self):
fs = fsspec.filesystem("memory")
with fs.open("file", "w") as f:
f.write("hello")
with fs.open("file2", "w") as f:
f.write("hello2")
files = ["memory://file", "memory://file2"]
datapipe = FSSpecFileOpener(files)
self.assertEqual([f.read() for _, f in datapipe], ["hello", "hello2"])
@skipIfNoFSSpec
def test_fsspec_memory_save(self):
def filepath_fn(name: str) -> str:
return "memory://" + name
name_to_data = {"1.txt": b"DATA1", "2.txt": b"DATA2"}
source_dp = IterableWrapper(sorted(name_to_data.items()))
saver_dp = FSSpecSaver(source_dp, filepath_fn=filepath_fn, mode="wb")
self.assertEqual(set(saver_dp), {"memory://1.txt", "memory://2.txt"})
if __name__ == "__main__":
unittest.main()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from unittest.mock import patch
import expecttest
from torchdata.datapipes.iter import HuggingFaceHubReader
try:
import datasets
HAS_DATASETS = True
except ImportError:
HAS_DATASETS = False
skipIfNoDatasets = unittest.skipIf(not HAS_DATASETS, "no datasets")
class TestHuggingFaceHubReader(expecttest.TestCase):
@skipIfNoDatasets
@patch("datasets.load_dataset")
def test_huggingface_hubreader(self, mock_load_dataset):
mock_load_dataset.return_value = datasets.Dataset.from_dict(
{
"id": ["7bd227d9-afc9-11e6-aba1-c4b301cdf627", "7bd22905-afc9-11e6-a5dc-c4b301cdf627"],
"package_name": ["com.mantz_it.rfanalyzer"] * 2,
}
)
datapipe = HuggingFaceHubReader("lhoestq/demo1", revision="branch", streaming=False, use_auth_token=True)
iterator = iter(datapipe)
elem = next(iterator)
assert type(elem) is dict
assert elem["id"] == "7bd227d9-afc9-11e6-aba1-c4b301cdf627"
assert elem["package_name"] == "com.mantz_it.rfanalyzer"
mock_load_dataset.assert_called_with(
path="lhoestq/demo1", streaming=False, revision="branch", use_auth_token=True
)
with self.assertRaises(StopIteration):
next(iterator)
next(iterator)
with self.assertRaises(TypeError):
len(datapipe)
if __name__ == "__main__":
unittest.main()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import random
import string
import tempfile
import unittest
from torchdata.datapipes.iter import AISFileLister, AISFileLoader
try:
from aistore.client.api import Client
from aistore.client.errors import AISError, ErrBckNotFound
AIS_CLUSTER_ENDPT = "http://localhost:8080"
HAS_AIS = Client(AIS_CLUSTER_ENDPT).cluster().is_aistore_running()
except (ImportError, ConnectionError):
HAS_AIS = False
skipIfNoAIS = unittest.skipIf(not HAS_AIS, "AIS not running or library not installed")
@skipIfNoAIS
class TestAIStoreIODataPipe(unittest.TestCase):
def setUp(self):
# initialize client and create new bucket
self.client = Client(AIS_CLUSTER_ENDPT)
letters = string.ascii_lowercase
self.bck_name = "".join(random.choice(letters) for _ in range(10))
self.client.bucket(self.bck_name).create()
# create temp files
num_objs = 10
# create 10 objects in the `/temp` dir
for i in range(num_objs):
object_body = "test string" * random.randrange(1, 10)
content = object_body.encode("utf-8")
obj_name = f"temp/obj{ i }"
with tempfile.NamedTemporaryFile() as file:
file.write(content)
file.flush()
self.client.bucket(self.bck_name).object(obj_name).put(file.name)
# create 10 objects in the `/`dir
for i in range(num_objs):
object_body = "test string" * random.randrange(1, 10)
content = object_body.encode("utf-8")
obj_name = f"obj{ i }"
with tempfile.NamedTemporaryFile() as file:
file.write(content)
file.flush()
self.client.bucket(self.bck_name).object(obj_name).put(file.name)
def tearDown(self):
# Try to destroy bucket and its items
try:
self.client.bucket(self.bck_name).delete()
except ErrBckNotFound:
pass
def test_ais_io_iterdatapipe(self):
prefixes = [
["ais://" + self.bck_name],
["ais://" + self.bck_name + "/"],
["ais://" + self.bck_name + "/temp/", "ais://" + self.bck_name + "/obj"],
]
# check if the created files exist
for prefix in prefixes:
urls = AISFileLister(url=AIS_CLUSTER_ENDPT, source_datapipe=prefix)
ais_loader = AISFileLoader(url=AIS_CLUSTER_ENDPT, source_datapipe=urls)
with self.assertRaises(TypeError):
len(urls)
self.assertEqual(len(list(urls)), 20)
self.assertEqual(sum(1 for _ in ais_loader), 20)
# check for incorrect prefixes
prefixes = ["ais://asdasd"]
# AISFileLister: Bucket not found
try:
list(AISFileLister(url=AIS_CLUSTER_ENDPT, source_datapipe=prefixes))
except ErrBckNotFound as err:
self.assertEqual(err.status_code, 404)
# AISFileLoader: incorrect inputs
url_list = [[""], ["ais:"], ["ais://"], ["s3:///unkown-bucket"]]
for url in url_list:
with self.assertRaises(AISError):
file_loader = AISFileLoader(url=AIS_CLUSTER_ENDPT, source_datapipe=url)
for _ in file_loader:
pass
if __name__ == "__main__":
unittest.main()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from torchdata.dataloader2.linter import _check_shuffle_before_sharding
from torchdata.datapipes.iter import IterableWrapper, ShardingFilter, Shuffler
def dummy_fn(x):
return x
class LinterTest(unittest.TestCase):
def test_sharding_shuffle(self):
source_dp = IterableWrapper(list(range(20)))
# Single path
dp = source_dp.map(dummy_fn).shuffle()
self.assertTrue(_check_shuffle_before_sharding(dp))
dp = source_dp.map(dummy_fn)
self.assertTrue(_check_shuffle_before_sharding(dp))
dp = source_dp.map(dummy_fn).shuffle().sharding_filter()
self.assertTrue(_check_shuffle_before_sharding(dp))
dp = source_dp.map(dummy_fn).sharding_filter()
self.assertFalse(_check_shuffle_before_sharding(dp))
dp = source_dp.map(dummy_fn).sharding_filter().shuffle()
self.assertFalse(_check_shuffle_before_sharding(dp))
# Multi pathes
def _multi_path_dp_1(shuffle):
s_dp = source_dp.shuffle() if shuffle else source_dp
dp1, dp2 = s_dp.unzip(2)
dp1 = dp1.sharding_filter()
dp2 = dp2.map(dummy_fn).sharding_filter()
dp = dp1.zip(dp2)
return dp
self.assertTrue(_check_shuffle_before_sharding(_multi_path_dp_1(True)))
self.assertFalse(_check_shuffle_before_sharding(_multi_path_dp_1(False)))
def _multi_path_dp_2(shuffle):
s_dp = source_dp.shuffle() if shuffle else source_dp
dp1, dp2 = s_dp.unzip(2)
dp1 = dp1.map(dummy_fn)
dp = dp1.zip(dp2).sharding_filter()
return dp
self.assertTrue(_check_shuffle_before_sharding(_multi_path_dp_2(True)))
self.assertFalse(_check_shuffle_before_sharding(_multi_path_dp_2(False)))
def _multi_path_dp_3(shuffle):
dp1, dp2 = source_dp.unzip(2)
dp1 = dp1.shuffle() if shuffle else dp1
dp1 = dp1.map(dummy_fn).sharding_filter()
dp2 = dp2.shuffle() if shuffle else dp1
dp2 = dp2.sharding_filter()
dp = dp1.zip(dp2).map(dummy_fn)
return dp
self.assertTrue(_check_shuffle_before_sharding(_multi_path_dp_3(True)))
self.assertFalse(_check_shuffle_before_sharding(_multi_path_dp_3(False)))
# Partial paths
dp1, dp2 = source_dp.unzip(2)
dp1 = dp1.shuffle().map(dummy_fn)
dp = dp1.zip(dp2).sharding_filter()
self.assertFalse(_check_shuffle_before_sharding(dp))
if __name__ == "__main__":
unittest.main()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from torchdata.dataloader2.random import SeedGenerator
from torchdata.dataloader2.random._philox import PhiloxEngine
class TestPhilox(unittest.TestCase):
def test_philox_engine_generate(self):
prng = PhiloxEngine()
with self.assertRaisesRegex(AssertionError, "Please provide seed"):
prng.generate()
prng.seed(123)
s0 = [prng.generate() for _ in range(10)]
# Same seed
prng = PhiloxEngine(seed=123)
s1 = [prng.generate() for _ in range(10)]
self.assertEqual(s0, s1)
# Reset
prng.seed(123)
s2 = [prng.generate() for _ in range(10)]
self.assertEqual(s1, s2)
# Different seeds
prng = PhiloxEngine(seed=321)
s3 = [prng.generate() for _ in range(10)]
self.assertNotEqual(s0, s3)
def test_philox_engine_spawn(self):
prng = PhiloxEngine()
with self.assertRaisesRegex(AssertionError, "Expected a non-negative value"):
prng.spawn(-1)
with self.assertRaisesRegex(AssertionError, "Please provide seed"):
prng.spawn(0)
prng.seed(123)
s0 = [prng.spawn(i)._seed for i in range(10)]
# Same seed
prng = PhiloxEngine(seed=123)
s1 = [prng.spawn(i)._seed for i in range(10)]
self.assertEqual(s0, s1)
# Generate after spawn
sprng1 = prng.spawn(1)
sprng2 = prng.spawn(1)
ss1 = [sprng1.generate() for _ in range(10)]
ss2 = [sprng2.generate() for _ in range(10)]
self.assertEqual(ss1, ss2)
sprng3 = prng.spawn(2)
ss3 = [sprng3.generate() for _ in range(10)]
self.assertNotEqual(ss1, ss3)
# Reset
prng.seed(123)
s2 = [prng.spawn(i)._seed for i in range(10)]
self.assertEqual(s1, s2)
# Different seeds
prng = PhiloxEngine(seed=321)
s3 = [prng.spawn(i)._seed for i in range(10)]
self.assertNotEqual(s0, s3)
class TestSeedGenerator(unittest.TestCase):
def test_seed_generator_generate(self):
# Generate seeds
sg = SeedGenerator(123)
s0 = [sg.generate_seed() for _ in range(10)]
# Reset
sg.seed(123)
s1 = [sg.generate_seed() for _ in range(10)]
self.assertEqual(s0, s1)
# Different Seeds
sg.seed(321)
s2 = [sg.generate_seed() for _ in range(10)]
self.assertNotEqual(s0, s2)
def test_seed_generator_spawn(self):
sg = SeedGenerator(123)
# Spawn new Seed Generators
sg1 = sg.spawn(1)
sg2 = sg.spawn(2)
for _ in range(10):
self.assertNotEqual(sg1.generate_seed(), sg2.generate_seed())
# Generate shared seeds
self.assertEqual(sg1.generate_shared_seed(), sg2.generate_shared_seed())
sg1_1 = sg.spawn(1)
sg1_2 = sg.spawn(1)
for _ in range(10):
self.assertEqual(sg1_1.generate_seed(), sg1_2.generate_seed())
if __name__ == "__main__":
unittest.main()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import asyncio
import io
import itertools
import pickle
import unittest
import warnings
from collections import defaultdict
from functools import partial
from typing import Dict, NamedTuple
import expecttest
import torch
import torchdata
from _utils._common_utils_for_test import IDP_NoLen, reset_after_n_next_calls
from torch.testing._internal.common_utils import suppress_warnings
from torch.utils.data.datapipes.utils.snapshot import _simple_graph_snapshot_restoration
from torchdata.datapipes.iter import (
BucketBatcher,
Cycler,
Header,
IndexAdder,
InMemoryCacheHolder,
IterableWrapper,
IterDataPipe,
IterKeyZipper,
LineReader,
MapKeyZipper,
MaxTokenBucketizer,
ParagraphAggregator,
Repeater,
Rows2Columnar,
SampleMultiplexer,
ShardExpander,
UnZipper,
)
from torchdata.datapipes.map import MapDataPipe, SequenceWrapper
skipIfNoCUDA = unittest.skipIf(not torch.cuda.is_available(), "CUDA is not available")
def test_torchdata_pytorch_consistency() -> None:
def extract_datapipe_names(module):
return {
name
for name, dp_type in module.__dict__.items()
if not name.startswith("_") and isinstance(dp_type, type) and issubclass(dp_type, IterDataPipe)
}
pytorch_datapipes = extract_datapipe_names(torch.utils.data.datapipes.iter)
torchdata_datapipes = extract_datapipe_names(torchdata.datapipes.iter)
missing_datapipes = pytorch_datapipes - torchdata_datapipes
deprecated_datapipes = {"FileLoader"}
for dp in deprecated_datapipes:
if dp in missing_datapipes:
missing_datapipes.remove("FileLoader")
if any(missing_datapipes):
msg = (
"The following datapipes are exposed under `torch.utils.data.datapipes.iter`, "
"but not under `torchdata.datapipes.iter`:\n"
)
raise AssertionError(msg + "\n".join(sorted(missing_datapipes)))
def _convert_to_tensor(data):
if isinstance(data, dict):
return {k: _convert_to_tensor(v) for k, v in data.items()}
elif isinstance(data, list):
return [_convert_to_tensor(v) for v in data]
return torch.tensor(data)
async def _async_mul_ten(x):
await asyncio.sleep(0.1)
return x * 10
async def _async_x_mul_y(x, y):
await asyncio.sleep(0.1)
return x * y
class NamedTensors(NamedTuple):
x: torch.Tensor
y: torch.Tensor
class TestIterDataPipe(expecttest.TestCase):
def test_in_memory_cache_holder_iterdatapipe(self) -> None:
source_dp = IterableWrapper(range(10))
cache_dp = source_dp.in_memory_cache(size=5)
# Functional Test: Cache DP should just return the data without changing the values
res1 = list(cache_dp)
self.assertEqual(list(range(10)), res1)
# Functional Test: Ensure the objects are the same ones from source DataPipe
res1 = list(cache_dp)
res2 = list(cache_dp)
self.assertTrue(id(source) == id(cache) for source, cache in zip(source_dp, res1))
self.assertTrue(id(source) == id(cache) for source, cache in zip(source_dp, res2))
# TODO(122): Figure out a way to consistently test caching when size is in megabytes
# Reset Test: reset the DataPipe after reading part of it
cache_dp = InMemoryCacheHolder(source_dp, size=5)
n_elements_before_reset = 5
res_before_reset, res_after_reset = reset_after_n_next_calls(cache_dp, n_elements_before_reset)
self.assertEqual(list(range(5)), res_before_reset)
self.assertEqual(list(range(10)), res_after_reset)
# __len__ Test: inherits length from source_dp
self.assertEqual(10, len(cache_dp))
# __len__ Test: source_dp has no len and cache is not yet loaded
source_dp_no_len = IDP_NoLen(range(10))
cache_dp = InMemoryCacheHolder(source_dp_no_len, size=5)
with self.assertRaisesRegex(TypeError, "doesn't have valid length until the cache is loaded"):
len(cache_dp)
# __len__ Test: source_dp has no len but we still can calculate after cache is loaded
list(cache_dp)
self.assertEqual(10, len(cache_dp))
def test_iter_key_zipper_iterdatapipe(self) -> None:
source_dp = IterableWrapper(range(10))
ref_dp = IterableWrapper(range(20))
ref_dp2 = IterableWrapper(range(20))
# Functional Test: Output should be a zip list of tuple
zip_dp = source_dp.zip_with_iter(
ref_datapipe=ref_dp, key_fn=lambda x: x, ref_key_fn=lambda x: x, keep_key=False, buffer_size=100
)
self.assertEqual([(i, i) for i in range(10)], list(zip_dp))
# Functional Test: keep_key=True, and key should show up as the first element
zip_dp_w_key = source_dp.zip_with_iter(
ref_datapipe=ref_dp2, key_fn=lambda x: x, ref_key_fn=lambda x: x, keep_key=True, buffer_size=10
)
self.assertEqual([(i, (i, i)) for i in range(10)], list(zip_dp_w_key))
# Functional Test: using a different merge function
def merge_to_string(item1, item2):
return f"{item1},{item2}"
zip_dp_w_str_merge = source_dp.zip_with_iter(
ref_datapipe=ref_dp, key_fn=lambda x: x, ref_key_fn=lambda x: x, buffer_size=10, merge_fn=merge_to_string
)
self.assertEqual([f"{i},{i}" for i in range(10)], list(zip_dp_w_str_merge))
# Functional Test: using a different merge function and keep_key=True
zip_dp_w_key_str_merge = source_dp.zip_with_iter(
ref_datapipe=ref_dp,
key_fn=lambda x: x,
ref_key_fn=lambda x: x,
keep_key=True,
buffer_size=10,
merge_fn=merge_to_string,
)
self.assertEqual([(i, f"{i},{i}") for i in range(10)], list(zip_dp_w_key_str_merge))
# Functional Test: testing nested zipping
zip_dp = source_dp.zip_with_iter(
ref_datapipe=ref_dp, key_fn=lambda x: x, ref_key_fn=lambda x: x, keep_key=False, buffer_size=100
)
# Without a custom merge function, there will be nested tuples
zip_dp2 = zip_dp.zip_with_iter(
ref_datapipe=ref_dp2, key_fn=lambda x: x[0], ref_key_fn=lambda x: x, keep_key=False, buffer_size=100
)
self.assertEqual([((i, i), i) for i in range(10)], list(zip_dp2))
# With a custom merge function, nesting can be prevented
zip_dp2_w_merge = zip_dp.zip_with_iter(
ref_datapipe=ref_dp2,
key_fn=lambda x: x[0],
ref_key_fn=lambda x: x,
keep_key=False,
buffer_size=100,
merge_fn=lambda x, y: list(x) + [y],
)
self.assertEqual([[i, i, i] for i in range(10)], list(zip_dp2_w_merge))
# Functional Test: element is in source but missing in reference
ref_dp_missing = IterableWrapper(range(1, 10))
zip_dp = source_dp.zip_with_iter(
ref_datapipe=ref_dp_missing, key_fn=lambda x: x, ref_key_fn=lambda x: x, keep_key=False, buffer_size=100
)
with self.assertRaisesRegex(BufferError, r"No matching key can be found"):
list(zip_dp)
# Functional Test: Buffer is not large enough, hence, element can't be found and raises error
ref_dp_end = IterableWrapper(list(range(1, 10)) + [0])
zip_dp = source_dp.zip_with_iter(
ref_datapipe=ref_dp_end, key_fn=lambda x: x, ref_key_fn=lambda x: x, keep_key=False, buffer_size=5
)
it = iter(zip_dp)
with warnings.catch_warnings(record=True) as wa:
# In order to find '0' at the end, the buffer is filled, hence the warning
# and ref_dp is fully traversed
self.assertEqual(
(
0,
0,
),
next(it),
)
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"Buffer reaches the upper limit")
with self.assertRaisesRegex(BufferError, r"No matching key can be found"):
# '1' cannot be find because the value was thrown out when buffer was filled
next(it)
# Functional Test: Buffer is just big enough
zip_dp = source_dp.zip_with_iter(
ref_datapipe=ref_dp_end, key_fn=lambda x: x, ref_key_fn=lambda x: x, keep_key=False, buffer_size=10
)
self.assertEqual([(i, i) for i in range(10)], list(zip_dp))
# Reset Test: reset the DataPipe after reading part of it
zip_dp = IterKeyZipper(
source_datapipe=source_dp,
ref_datapipe=ref_dp,
key_fn=lambda x: x,
ref_key_fn=lambda x: x,
keep_key=False,
buffer_size=10,
)
n_elements_before_reset = 5
res_before_reset, res_after_reset = reset_after_n_next_calls(zip_dp, n_elements_before_reset)
self.assertEqual([(i, i) for i in range(5)], res_before_reset)
self.assertEqual([(i, i) for i in range(10)], res_after_reset)
# __len__ Test: inherits length from source_dp
self.assertEqual(10, len(zip_dp))
def test_map_key_zipper_datapipe(self) -> None:
source_dp = IterableWrapper(range(10))
map_dp = SequenceWrapper(["even", "odd"])
# Functional Test: ensure the hash join is working and return tuple by default
def odd_even(i: int) -> int:
return i % 2
result_dp = source_dp.zip_with_map(map_dp, odd_even)
def odd_even_string(i: int) -> str:
return "odd" if i % 2 else "even"
expected_res = [(i, odd_even_string(i)) for i in range(10)]
self.assertEqual(expected_res, list(result_dp))
# Functional Test: ensure that a custom merge function works
def custom_merge(a, b):
return f"{a} is a {b} number."
result_dp = source_dp.zip_with_map(map_dp, odd_even, custom_merge)
expected_res2 = [f"{i} is a {odd_even_string(i)} number." for i in range(10)]
self.assertEqual(expected_res2, list(result_dp))
# Functional Test: raises error when key is invalid
def odd_even_bug(i: int) -> int:
return 2 if i == 0 else i % 2
result_dp = MapKeyZipper(source_dp, map_dp, odd_even_bug)
it = iter(result_dp)
with self.assertRaisesRegex(KeyError, "is not a valid key in the given MapDataPipe"):
next(it)
# Functional test: ensure that keep_key option works
result_dp = source_dp.zip_with_map(map_dp, odd_even, keep_key=True)
expected_res_keep_key = [(key, (i, odd_even_string(i))) for i, key in zip(range(10), [0, 1] * 5)]
self.assertEqual(expected_res_keep_key, list(result_dp))
# Reset Test:
n_elements_before_reset = 4
result_dp = source_dp.zip_with_map(map_dp, odd_even)
res_before_reset, res_after_reset = reset_after_n_next_calls(result_dp, n_elements_before_reset)
self.assertEqual(expected_res[:n_elements_before_reset], res_before_reset)
self.assertEqual(expected_res, res_after_reset)
# __len__ Test: returns the length of source DataPipe
result_dp = source_dp.zip_with_map(map_dp, odd_even)
self.assertEqual(len(source_dp), len(result_dp))
def test_prefetcher_iterdatapipe(self) -> None:
source_dp = IterableWrapper(range(5000))
prefetched_dp = source_dp.prefetch(10)
# check if early termination resets child thread properly
for _, _ in zip(range(100), prefetched_dp):
pass
expected = list(source_dp)
actual = list(prefetched_dp)
self.assertEqual(expected, actual)
# __len__ Test: returns the same length as source
self.assertEqual(len(source_dp), len(prefetched_dp))
def test_repeater_iterdatapipe(self) -> None:
import itertools
source_dp = IterableWrapper(range(5))
# Functional Test: repeat for correct number of times
repeater_dp = source_dp.repeat(3)
self.assertEqual(
list(itertools.chain.from_iterable(itertools.repeat(x, 3) for x in range(5))), list(repeater_dp)
)
# Functional Test: `times` must be > 1
with self.assertRaisesRegex(ValueError, "The number of repetition must be > 1"):
source_dp.repeat(1)
# Reset Test:
repeater_dp = Repeater(source_dp, times=2)
n_elements_before_reset = 4
res_before_reset, res_after_reset = reset_after_n_next_calls(repeater_dp, n_elements_before_reset)
self.assertEqual([0, 0, 1, 1], res_before_reset)
self.assertEqual(list(itertools.chain.from_iterable(itertools.repeat(x, 2) for x in range(5))), res_after_reset)
# __len__ Test: returns correct length
self.assertEqual(10, len(repeater_dp))
def test_cycler_iterdatapipe(self) -> None:
source_dp = IterableWrapper(range(5))
# Functional Test: cycle for finite number of times and ends
cycler_dp = source_dp.cycle(3)
self.assertEqual(list(range(5)) * 3, list(cycler_dp))
# Functional Test: cycle for indefinitely
cycler_dp = source_dp.cycle()
it = iter(cycler_dp)
for expected_val in list(range(5)) * 10:
self.assertEqual(expected_val, next(it))
# Functional Test: zero is allowed but immediately triggers StopIteration
cycler_dp = source_dp.cycle(0)
self.assertEqual([], list(cycler_dp))
# Functional Test: negative value is not allowed
with self.assertRaisesRegex(ValueError, "Expected non-negative count"):
source_dp.cycle(-1)
# Reset Test:
cycler_dp = Cycler(source_dp, count=2)
n_elements_before_reset = 4
res_before_reset, res_after_reset = reset_after_n_next_calls(cycler_dp, n_elements_before_reset)
self.assertEqual(list(range(4)), res_before_reset)
self.assertEqual(list(range(5)) * 2, res_after_reset)
# __len__ Test: returns length when count is not None
self.assertEqual(10, len(cycler_dp))
# __len__ Test: inherits length from source_dp
cycler_dp = Cycler(source_dp)
with self.assertRaisesRegex(TypeError, "instance cycles forever, and therefore doesn't have valid length"):
len(cycler_dp)
def test_header_iterdatapipe(self) -> None:
# Functional Test: ensure the limit is enforced
source_dp = IterableWrapper(range(20))
header_dp = source_dp.header(5)
self.assertEqual(list(range(5)), list(header_dp))
# Functional Test: ensure it works when the source has less elements than the limit
source_dp = IterableWrapper(range(5))
header_dp = source_dp.header(100)
self.assertEqual(list(range(5)), list(header_dp))
# Functional Test: ensure the source is not modified if limit is set to None
source_dp = IterableWrapper(range(5))
header_dp = source_dp.header(None)
self.assertEqual(list(range(5)), list(header_dp))
# Reset Test:
source_dp = IterableWrapper(range(20))
header_dp = Header(source_dp, 5)
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(header_dp, n_elements_before_reset)
self.assertEqual(list(range(2)), res_before_reset)
self.assertEqual(list(range(5)), res_after_reset)
self.assertEqual(list(range(5)), list(header_dp))
# __len__ Test: returns the limit when it is less than the length of source
self.assertEqual(5, len(header_dp))
# __len__ Test: returns the length of source when it is less than the limit
header_dp = source_dp.header(30)
self.assertEqual(20, len(header_dp))
# __len__ Test: returns the length of source when limit is set to None
header_dp = source_dp.header(None)
self.assertEqual(20, len(header_dp))
# __len__ Test: returns limit if source doesn't have length
source_dp_NoLen = IDP_NoLen(list(range(20)))
header_dp = source_dp_NoLen.header(30)
with warnings.catch_warnings(record=True) as wa:
self.assertEqual(30, len(header_dp))
self.assertEqual(len(wa), 1)
self.assertRegex(
str(wa[0].message), r"length of this HeaderIterDataPipe is inferred to be equal to its limit"
)
# __len__ Test: raises TypeError if source doesn't have length and limit is set to None
header_dp = source_dp_NoLen.header(None)
with self.assertRaisesRegex(TypeError, "The length of this HeaderIterDataPipe cannot be determined."):
len(header_dp)
# __len__ Test: returns limit if source doesn't have length, even when it has been iterated through once
header_dp = source_dp_NoLen.header(30)
for _ in header_dp:
pass
self.assertEqual(30, len(header_dp))
def test_enumerator_iterdatapipe(self) -> None:
letters = "abcde"
source_dp = IterableWrapper(letters)
enum_dp = source_dp.enumerate()
# Functional Test: ensure that the correct index value is added to each element (tuple)
self.assertEqual([(0, "a"), (1, "b"), (2, "c"), (3, "d"), (4, "e")], list(enum_dp))
# Functional Test: start index from non-zero
enum_dp = source_dp.enumerate(starting_index=10)
self.assertEqual([(10, "a"), (11, "b"), (12, "c"), (13, "d"), (14, "e")], list(enum_dp))
# Reset Test:
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(enum_dp, n_elements_before_reset)
self.assertEqual([(10, "a"), (11, "b")], res_before_reset)
self.assertEqual([(10, "a"), (11, "b"), (12, "c"), (13, "d"), (14, "e")], res_after_reset)
# __len__ Test: returns length of source DataPipe
self.assertEqual(5, len(enum_dp))
def test_index_adder_iterdatapipe(self) -> None:
letters = "abcdefg"
source_dp = IterableWrapper([{i: i} for i in letters])
index_adder_dp = source_dp.add_index()
it = iter(index_adder_dp)
def dict_content_test_helper(iterator):
for i, curr_dict in enumerate(iterator):
self.assertEqual(i, curr_dict["index"])
self.assertTrue(letters[i] in curr_dict)
# Functional Test: ensure that the correct index value is added to each element (dict)
dict_content_test_helper(it)
# Functional Test: raises error when the elements of source_dp is not of type Dict
source_dp = IterableWrapper(range(10))
index_adder_dp = source_dp.add_index()
it = iter(index_adder_dp)
with self.assertRaisesRegex(NotImplementedError, "We only support adding index to row or batch in dict type"):
next(it)
# Reset Test
source_dp = IterableWrapper([{i: i} for i in "abcdefg"])
index_adder_dp = IndexAdder(source_dp)
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(index_adder_dp, n_elements_before_reset)
dict_content_test_helper(iter(res_before_reset))
dict_content_test_helper(iter(res_after_reset))
# __len__ Test: returns length of source DataPipe
self.assertEqual(7, len(index_adder_dp))
def test_line_reader_iterdatapipe(self) -> None:
text1 = "Line1\nLine2"
text2 = "Line2,1\r\nLine2,2\r\nLine2,3"
# Functional Test: read lines correctly
source_dp = IterableWrapper([("file1", io.StringIO(text1)), ("file2", io.StringIO(text2))])
line_reader_dp = source_dp.readlines()
expected_result = [("file1", line) for line in text1.splitlines()] + [
("file2", line) for line in text2.splitlines()
]
self.assertEqual(expected_result, list(line_reader_dp))
# Functional Test: strip new lines for bytes
source_dp = IterableWrapper(
[("file1", io.BytesIO(text1.encode("utf-8"))), ("file2", io.BytesIO(text2.encode("utf-8")))]
)
line_reader_dp = source_dp.readlines()
expected_result_bytes = [("file1", line.encode("utf-8")) for line in text1.splitlines()] + [
("file2", line.encode("utf-8")) for line in text2.splitlines()
]
self.assertEqual(expected_result_bytes, list(line_reader_dp))
# Functional Test: do not strip new lines
source_dp = IterableWrapper([("file1", io.StringIO(text1)), ("file2", io.StringIO(text2))])
line_reader_dp = source_dp.readlines(strip_newline=False)
expected_result = [
("file1", "Line1\n"),
("file1", "Line2"),
("file2", "Line2,1\r\n"),
("file2", "Line2,2\r\n"),
("file2", "Line2,3"),
]
self.assertEqual(expected_result, list(line_reader_dp))
# Reset Test:
source_dp = IterableWrapper([("file1", io.StringIO(text1)), ("file2", io.StringIO(text2))])
line_reader_dp = LineReader(source_dp, strip_newline=False)
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(line_reader_dp, n_elements_before_reset)
self.assertEqual(expected_result[:n_elements_before_reset], res_before_reset)
self.assertEqual(expected_result, res_after_reset)
# __len__ Test: length isn't implemented since it cannot be known ahead of time
with self.assertRaisesRegex(TypeError, "has no len"):
len(line_reader_dp)
def test_paragraph_aggregator_iterdatapipe(self) -> None:
# Functional Test: aggregate lines correctly
source_dp = IterableWrapper(
[("file1", "Line1"), ("file1", "Line2"), ("file2", "Line2,1"), ("file2", "Line2,2"), ("file2", "Line2,3")]
)
para_agg_dp = source_dp.lines_to_paragraphs()
self.assertEqual([("file1", "Line1\nLine2"), ("file2", "Line2,1\nLine2,2\nLine2,3")], list(para_agg_dp))
# Functional Test: aggregate lines correctly with different joiner
para_agg_dp = source_dp.lines_to_paragraphs(joiner=lambda ls: " ".join(ls))
self.assertEqual([("file1", "Line1 Line2"), ("file2", "Line2,1 Line2,2 Line2,3")], list(para_agg_dp))
# Reset Test: each yield is for a single file
para_agg_dp = ParagraphAggregator(source_dp)
n_elements_before_reset = 1
res_before_reset, res_after_reset = reset_after_n_next_calls(para_agg_dp, n_elements_before_reset)
self.assertEqual([("file1", "Line1\nLine2")], res_before_reset)
self.assertEqual([("file1", "Line1\nLine2"), ("file2", "Line2,1\nLine2,2\nLine2,3")], res_after_reset)
# __len__ Test: length isn't implemented since it cannot be known ahead of time
with self.assertRaisesRegex(TypeError, "has no len"):
len(para_agg_dp)
def test_rows_to_columnar_iterdatapipe(self) -> None:
# Functional Test: working with DataPipe with dict
column_names_dict = {"a", "b", "c"}
source_dp = IterableWrapper(
[
[{l: i for i, l in enumerate("abc")}, {l: i * 10 for i, l in enumerate("abc")}],
[{l: i + 100 for i, l in enumerate("abc")}, {l: (i + 100) * 10 for i, l in enumerate("abc")}],
]
)
result_dp = source_dp.rows2columnar(column_names_dict)
batch1 = defaultdict(list, {"a": [0, 0], "b": [1, 10], "c": [2, 20]})
batch2 = defaultdict(list, {"a": [100, 1000], "b": [101, 1010], "c": [102, 1020]})
expected_output = [batch1, batch2]
self.assertEqual(expected_output, list(result_dp))
# Functional Test: working with DataPipe with list
column_names_list = ["a", "b", "c"]
source_dp = IterableWrapper(
[
[[i for i, _ in enumerate("abc")], [i * 10 for i, _ in enumerate("abc")]],
[[i + 100 for i, _ in enumerate("abc")], [(i + 100) * 10 for i, _ in enumerate("abc")]],
]
)
result_dp = source_dp.rows2columnar(column_names_list)
self.assertEqual(expected_output, list(result_dp))
# Reset Test:
result_dp = Rows2Columnar(source_dp, column_names_list)
n_elements_before_reset = 1
res_before_reset, res_after_reset = reset_after_n_next_calls(result_dp, n_elements_before_reset)
self.assertEqual([expected_output[0]], res_before_reset)
self.assertEqual(expected_output, res_after_reset)
# __len__ Test: returns length of source DataPipe
self.assertEqual(2, len(result_dp))
def test_sample_multiplexer_iterdatapipe(self) -> None:
# Functional Test: yields all values from the sources
source_dp1 = IterableWrapper([0] * 10)
source_dp2 = IterableWrapper([1] * 10)
d: Dict[IterDataPipe, float] = {source_dp1: 99999999, source_dp2: 0.0000001}
sample_mul_dp = SampleMultiplexer(pipes_to_weights_dict=d, seed=0)
result = list(sample_mul_dp)
self.assertEqual([0] * 10 + [1] * 10, result)
# Functional Test: raises error for empty dict
with self.assertRaisesRegex(ValueError, "Empty dictionary"):
SampleMultiplexer(pipes_to_weights_dict={}, seed=0) # type: ignore[arg-type]
# Functional Test: raises error for negative or zero weight
d = {source_dp1: 99999999, source_dp2: 0}
with self.assertRaisesRegex(ValueError, "Expecting a positive and non-zero weight"):
SampleMultiplexer(pipes_to_weights_dict=d, seed=0)
# Reset Test
d = {source_dp1: 99999999, source_dp2: 0.0000001}
sample_mul_dp = SampleMultiplexer(pipes_to_weights_dict=d, seed=0)
n_elements_before_reset = 5
res_before_reset, res_after_reset = reset_after_n_next_calls(sample_mul_dp, n_elements_before_reset)
self.assertEqual([0] * n_elements_before_reset, res_before_reset)
self.assertEqual([0] * 10 + [1] * 10, res_after_reset)
# __len__ Test: returns the sum of the lengths of the sources
self.assertEqual(20, len(sample_mul_dp))
def test_in_batch_shuffler_iterdatapipe(self):
input_dp = IterableWrapper(list(range(23))).batch(3)
expected = list(input_dp)
# Functional Test: No seed
shuffler_dp = input_dp.in_batch_shuffle()
for exp, res in zip(expected, shuffler_dp):
self.assertEqual(sorted(res), exp)
# Functional Test: With global seed
torch.manual_seed(123)
res = list(shuffler_dp)
torch.manual_seed(123)
self.assertEqual(list(shuffler_dp), res)
# Functional Test: Set seed
shuffler_dp = input_dp.in_batch_shuffle().set_seed(123)
res = list(shuffler_dp)
shuffler_dp.set_seed(123)
self.assertEqual(list(shuffler_dp), res)
# Functional Test: deactivate shuffling via set_shuffle
unshuffled_dp = shuffler_dp.set_shuffle(False)
self.assertEqual(list(unshuffled_dp), expected)
# Reset Test:
shuffler_dp = input_dp.in_batch_shuffle()
n_elements_before_reset = 5
res_before_reset, res_after_reset = reset_after_n_next_calls(shuffler_dp, n_elements_before_reset)
self.assertEqual(5, len(res_before_reset))
for exp, res in zip(expected, res_before_reset):
self.assertEqual(sorted(res), exp)
for exp, res in zip(expected, res_after_reset):
self.assertEqual(sorted(res), exp)
# __len__ Test: returns the length of the input DataPipe
shuffler_dp = input_dp.in_batch_shuffle()
self.assertEqual(8, len(shuffler_dp))
# Serialization Test
from torch.utils.data.datapipes._hook_iterator import _SnapshotState
shuffler_dp = input_dp.in_batch_shuffle()
it = iter(shuffler_dp)
for _ in range(2):
next(it)
shuffler_dp_copy = pickle.loads(pickle.dumps(shuffler_dp))
_simple_graph_snapshot_restoration(shuffler_dp_copy.datapipe, shuffler_dp.datapipe._number_of_samples_yielded)
exp = list(it)
shuffler_dp_copy._snapshot_state = _SnapshotState.Restored
self.assertEqual(exp, list(shuffler_dp_copy))
def test_bucket_batcher_iterdatapipe(self) -> None:
source_dp = IterableWrapper(range(10))
# Functional Test: drop last reduces length
batch_dp = source_dp.bucketbatch(
batch_size=3, drop_last=True, batch_num=100, bucket_num=1, use_in_batch_shuffle=True
)
self.assertEqual(9, len(list(batch_dp.unbatch())))
# Functional Test: drop last is False preserves length
batch_dp = source_dp.bucketbatch(
batch_size=3, drop_last=False, batch_num=100, bucket_num=1, use_in_batch_shuffle=False
)
self.assertEqual(10, len(list(batch_dp.unbatch())))
def _return_self(x):
return x
# Functional Test: using sort_key, with in_batch_shuffle
batch_dp = source_dp.bucketbatch(
batch_size=3, drop_last=True, batch_num=100, bucket_num=1, use_in_batch_shuffle=True, sort_key=_return_self
)
# bucket_num = 1 means there will be no shuffling if a sort key is given
self.assertEqual([[0, 1, 2], [3, 4, 5], [6, 7, 8]], list(batch_dp))
self.assertEqual(9, len(list(batch_dp.unbatch())))
# Functional Test: using sort_key, without use_in_batch_shuffle
batch_dp = source_dp.bucketbatch(
batch_size=3, drop_last=True, batch_num=100, bucket_num=2, use_in_batch_shuffle=False, sort_key=_return_self
)
self.assertEqual(9, len(list(batch_dp.unbatch())))
# Reset Test:
batch_dp = BucketBatcher(
source_dp,
batch_size=3,
drop_last=True,
batch_num=100,
bucket_num=2,
use_in_batch_shuffle=False,
sort_key=_return_self,
)
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(batch_dp, n_elements_before_reset)
self.assertEqual(n_elements_before_reset, len(res_before_reset))
self.assertEqual(6, len([item for batch in res_before_reset for item in batch]))
self.assertEqual(3, len(res_after_reset))
self.assertEqual(9, len([item for batch in res_after_reset for item in batch]))
# __len__ Test: returns the number of batches
with self.assertRaises(TypeError):
len(batch_dp)
def test_max_token_bucketizer_iterdatapipe(self) -> None:
source_data = ["1" * d for d in range(1, 6)] + ["2" * d for d in range(1, 6)]
source_dp = IterableWrapper(source_data)
# Functional Test: Invalid arguments
with self.assertRaisesRegex(ValueError, "``min_len`` should be larger than 0"):
source_dp.max_token_bucketize(max_token_count=2, min_len=-1)
with self.assertRaisesRegex(ValueError, "``min_len`` should be larger than 0"):
source_dp.max_token_bucketize(max_token_count=2, min_len=3, max_len=2)
with self.assertRaises(ValueError, msg="``max_token_count`` must be equal to or greater than ``max_len``."):
source_dp.max_token_bucketize(max_token_count=2, max_len=3)
def _validate_batch_size(res, exp_batch_len, len_fn=lambda d: len(d)):
self.assertEqual(len(res), len(exp_batch_len))
for batch, exp_token_lens in zip(res, exp_batch_len):
self.assertEqual(len(batch), len(exp_token_lens))
for token, exp_token_len in zip(batch, exp_token_lens):
self.assertEqual(len_fn(token), exp_token_len)
# Functional Test: Filter out min_len
batch_dp = source_dp.max_token_bucketize(max_token_count=5, min_len=2, buffer_size=10)
exp_batch_len = [(2, 2), (3,), (3,), (4,), (4,), (5,), (5,)]
_validate_batch_size(list(batch_dp), exp_batch_len)
# Functional Test: Filter out max_len
batch_dp = source_dp.max_token_bucketize(max_token_count=5, max_len=4, buffer_size=10)
exp_batch_len = [(1, 1, 2), (2, 3), (3,), (4,), (4,)]
_validate_batch_size(list(batch_dp), exp_batch_len)
def _custom_len_fn(token):
return len(token) + 1
# Functional Test: Custom length function
batch_dp = source_dp.max_token_bucketize(max_token_count=7, len_fn=_custom_len_fn, buffer_size=10)
exp_batch_len = [(1, 1, 2), (2, 3), (3,), (4,), (4,), (5,), (5,)]
_validate_batch_size(list(batch_dp), exp_batch_len)
# Functional Test: Small buffer
batch_dp = source_dp.max_token_bucketize(max_token_count=10, buffer_size=4)
exp_batch_len = [(1, 2, 1, 2, 3), (3, 4), (4, 5), (5,)]
_validate_batch_size(list(batch_dp), exp_batch_len)
# Reset Test:
batch_dp = MaxTokenBucketizer(source_dp, max_token_count=5, buffer_size=10)
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(batch_dp, n_elements_before_reset)
exp_batch_len_before_reset = [(1, 1, 2), (2, 3)]
exp_batch_len_after_reset = [(1, 1, 2), (2, 3), (3,), (4,), (4,), (5,), (5,)]
_validate_batch_size(res_before_reset, exp_batch_len_before_reset)
_validate_batch_size(res_after_reset, exp_batch_len_after_reset)
# Functional test: Padded tokens exceeding max_token_count
source_data = ["111", "1111", "11111"] # 3, 4, 5
source_dp = IterableWrapper(source_data)
batch_dp = source_dp.max_token_bucketize(max_token_count=7)
exp_batch_len = [(3, 4), (5,)]
_validate_batch_size(list(batch_dp), exp_batch_len)
# Functional test: Padded tokens not exceeding max_token_count
source_data = ["111", "111", "111", "1111"] # 3, 3, 3, 4
source_dp = IterableWrapper(source_data)
batch_dp = source_dp.max_token_bucketize(max_token_count=7, include_padding=True)
exp_batch_len = [(3, 3), (3,), (4,)]
_validate_batch_size(list(batch_dp), exp_batch_len)
# Functional test: sample length exceeding max_token_count
source_data = ["111"]
source_dp = IterableWrapper(source_data)
batch_dp = source_dp.max_token_bucketize(max_token_count=2)
exp_batch = []
self.assertEqual(list(batch_dp), exp_batch)
# Functional test: incomparable data for heapq
def _custom_len_fn(data):
return data["len"]
source_data = [{"len": 1}, {"len": 2}, {"len": 1}, {"len": 3}, {"len": 1}]
source_dp = IterableWrapper(source_data)
batch_dp = source_dp.max_token_bucketize(max_token_count=3, len_fn=_custom_len_fn)
exp_batch_len = [(1, 1, 1), (2,), (3,)]
_validate_batch_size(list(batch_dp), exp_batch_len, len_fn=_custom_len_fn)
# __len__ Test: returns the number of batches
with self.assertRaises(TypeError):
len(batch_dp)
def test_map_batches_iterdatapipe(self):
source_dp = IterableWrapper(list(range(20)))
def fn(batch):
return [d + 1 for d in batch]
batch_mapped_dp = source_dp.map_batches(fn, batch_size=9)
expected_list = list(range(1, 21))
self.assertEqual(expected_list, list(batch_mapped_dp))
# Reset Test: reset the DataPipe after reading part of it
n_elements_before_reset = 5
res_before_reset, res_after_reset = reset_after_n_next_calls(batch_mapped_dp, n_elements_before_reset)
self.assertEqual(expected_list[:n_elements_before_reset], res_before_reset)
self.assertEqual(expected_list, res_after_reset)
# Functional Test: Different sizes between input and output
def fn_less(batch):
return [batch[idx] // 2 for idx in range(0, len(batch), 2)]
less_batch_mapped_dp = source_dp.map_batches(fn_less, batch_size=8)
self.assertEqual(list(range(10)), list(less_batch_mapped_dp))
# Functional Test: Specify input_col
source_dp = IterableWrapper([(d - 1, d, d + 1) for d in range(20)])
batch_mapped_input_1_dp = source_dp.map_batches(fn, batch_size=9, input_col=0)
self.assertEqual(list(range(20)), list(batch_mapped_input_1_dp))
def fn_2_cols(batch):
return [(d1, d2 - 1) for d1, d2 in batch]
batch_mapped_input_2_dp = source_dp.map_batches(fn_2_cols, batch_size=9, input_col=[1, 2])
self.assertEqual([(d, d) for d in range(20)], list(batch_mapped_input_2_dp))
# __len__ Test: length should be determined by ``fn`` which we can't know
with self.assertRaisesRegex(TypeError, "length relies on the output of its function."):
len(batch_mapped_dp)
def test_flatmap_iterdatapipe(self):
source_dp = IterableWrapper(list(range(20)))
def fn(e):
return [e, e * 10]
flatmapped_dp = source_dp.flatmap(fn)
expected_list = list(itertools.chain(*[(e, e * 10) for e in source_dp]))
self.assertEqual(expected_list, list(flatmapped_dp))
# Funtional Test: Specify input_col
tuple_source_dp = IterableWrapper([(d - 1, d, d + 1) for d in range(20)])
# Single input_col
input_col_1_dp = tuple_source_dp.flatmap(fn, input_col=1)
self.assertEqual(expected_list, list(input_col_1_dp))
# Multiple input_col
def mul_fn(a, b):
return [a - b, b - a]
input_col_2_dp = tuple_source_dp.flatmap(mul_fn, input_col=(0, 2))
self.assertEqual(list(itertools.chain(*[(-2, 2) for _ in range(20)])), list(input_col_2_dp))
# flatmap with no fn specified
default_dp = tuple_source_dp.flatmap()
self.assertEqual(list(itertools.chain(*[(n - 1, n, n + 1) for n in range(20)])), list(default_dp))
# flatmap with no fn specified, multiple input_col
default_dp = tuple_source_dp.flatmap(input_col=(0, 2))
self.assertEqual(list(itertools.chain(*[(n - 1, n + 1) for n in range(20)])), list(default_dp))
# flatmap with no fn specified, some special input
tuple_source_dp = IterableWrapper([[1, 2, [3, 4]], [5, 6, [7, 8]]])
default_dp = tuple_source_dp.flatmap(input_col=(0, 2))
self.assertEqual([1, [3, 4], 5, [7, 8]], list(default_dp))
# Reset Test: reset the DataPipe after reading part of it
n_elements_before_reset = 5
res_before_reset, res_after_reset = reset_after_n_next_calls(flatmapped_dp, n_elements_before_reset)
self.assertEqual(expected_list[:n_elements_before_reset], res_before_reset)
self.assertEqual(expected_list, res_after_reset)
# __len__ Test: length should be len(source_dp)*len(fn->out_shape) which we can't know
with self.assertRaisesRegex(TypeError, "length relies on the output of its function."):
len(flatmapped_dp)
def test_shuffled_flatmap_iterdatapipe(self):
source_dp = IterableWrapper(list(range(20)))
def fn(e):
return [e, e * 10]
# Tests with buffer_size=1
# In this case, the expected behavior is similar to flatmap
shuffled_flatmapped_dp = source_dp.shuffled_flatmap(fn, buffer_size=1)
expected_list = list(itertools.chain(*[(e, e * 10) for e in source_dp]))
self.assertEqual(expected_list, list(shuffled_flatmapped_dp))
# Funtional Test: Specify input_col
tuple_source_dp = IterableWrapper([(d - 1, d, d + 1) for d in range(20)])
# Single input_col
input_col_1_dp = tuple_source_dp.shuffled_flatmap(fn, input_col=1, buffer_size=1)
self.assertEqual(expected_list, list(input_col_1_dp))
# With generator as fn
def gen_fn(e):
yield e
yield e * 10
shuffled_flatmapped_dp = source_dp.shuffled_flatmap(gen_fn, buffer_size=1)
expected_list = list(itertools.chain(*[(e, e * 10) for e in source_dp]))
self.assertEqual(expected_list, list(shuffled_flatmapped_dp))
# Multiple input_col
def mul_fn(a, b):
return [a - b, b - a]
input_col_2_dp = tuple_source_dp.shuffled_flatmap(mul_fn, input_col=(0, 2), buffer_size=1)
self.assertEqual(list(itertools.chain(*[(-2, 2) for _ in range(20)])), list(input_col_2_dp))
# shuffled_flatmap with no fn specified
default_dp = tuple_source_dp.shuffled_flatmap(buffer_size=1)
self.assertEqual(list(itertools.chain(*[(n - 1, n, n + 1) for n in range(20)])), list(default_dp))
# shuffled_flatmap with no fn specified, multiple input_col
default_dp = tuple_source_dp.shuffled_flatmap(input_col=(0, 2), buffer_size=1)
self.assertEqual(list(itertools.chain(*[(n - 1, n + 1) for n in range(20)])), list(default_dp))
# shuffled_flatmap with no fn specified, some special input
tuple_source_dp = IterableWrapper([[1, 2, [3, 4]], [5, 6, [7, 8]]])
default_dp = tuple_source_dp.shuffled_flatmap(input_col=(0, 2), buffer_size=1)
self.assertEqual([1, [3, 4], 5, [7, 8]], list(default_dp))
# Reset Test: reset the DataPipe after reading part of it
n_elements_before_reset = 5
res_before_reset, res_after_reset = reset_after_n_next_calls(shuffled_flatmapped_dp, n_elements_before_reset)
self.assertEqual(expected_list[:n_elements_before_reset], res_before_reset)
self.assertEqual(expected_list, res_after_reset)
# __len__ Test: length should be len(source_dp)*len(fn->out_shape) which we can't know
with self.assertRaisesRegex(TypeError, "length relies on the output of its function."):
len(shuffled_flatmapped_dp)
# __len__ when no fn specified:
dp = IterableWrapper([[1, 2], [], [3], [4, 5, 6, [7, 8]]])
dp = dp.shuffled_flatmap()
self.assertEqual(len(dp), 7)
# Tests with .set_shuffle(False)
# In this case, the expected behavior is similar to flatmap
shuffled_flatmapped_dp = source_dp.shuffled_flatmap(fn).set_shuffle(False)
expected_list = list(itertools.chain(*[(e, e * 10) for e in source_dp]))
self.assertEqual(expected_list, list(shuffled_flatmapped_dp))
# Funtional Test: Specify input_col
tuple_source_dp = IterableWrapper([(d - 1, d, d + 1) for d in range(20)])
# Single input_col
input_col_1_dp = tuple_source_dp.shuffled_flatmap(fn, input_col=1, buffer_size=1)
self.assertEqual(expected_list, list(input_col_1_dp))
# Multiple input_col
input_col_2_dp = tuple_source_dp.shuffled_flatmap(mul_fn, input_col=(0, 2)).set_shuffle(False)
self.assertEqual(list(itertools.chain(*[(-2, 2) for _ in range(20)])), list(input_col_2_dp))
# shuffled_flatmap with no fn specified
default_dp = tuple_source_dp.shuffled_flatmap().set_shuffle(False)
self.assertEqual(list(itertools.chain(*[(n - 1, n, n + 1) for n in range(20)])), list(default_dp))
# shuffled_flatmap with no fn specified, multiple input_col
default_dp = tuple_source_dp.shuffled_flatmap(input_col=(0, 2)).set_shuffle(False)
self.assertEqual(list(itertools.chain(*[(n - 1, n + 1) for n in range(20)])), list(default_dp))
# shuffled_flatmap with no fn specified, some special input
tuple_source_dp = IterableWrapper([[1, 2, [3, 4]], [5, 6, [7, 8]]])
default_dp = tuple_source_dp.shuffled_flatmap(input_col=(0, 2)).set_shuffle(False)
self.assertEqual([1, [3, 4], 5, [7, 8]], list(default_dp))
# Reset Test: reset the DataPipe after reading part of it
n_elements_before_reset = 5
res_before_reset, res_after_reset = reset_after_n_next_calls(shuffled_flatmapped_dp, n_elements_before_reset)
self.assertEqual(expected_list[:n_elements_before_reset], res_before_reset)
self.assertEqual(expected_list, res_after_reset)
# Other tests
# Test no empty buffers:
with self.assertRaises(AssertionError):
_ = source_dp.shuffled_flatmap(buffer_size=0)
# Functional Test: No seed
consecutive_tuple_source_dp = IterableWrapper([(d, d + 1, d + 2) for d in range(0, 21, 3)])
shuffled_flatmapped_dp = consecutive_tuple_source_dp.shuffled_flatmap()
self.assertEqual(set(range(21)), set(shuffled_flatmapped_dp))
# Functional Test: With global seed
torch.manual_seed(123)
shuffled_flatmapped_dp = tuple_source_dp.shuffled_flatmap()
res = list(shuffled_flatmapped_dp)
torch.manual_seed(123)
self.assertEqual(list(shuffled_flatmapped_dp), res)
# Functional Test: Set seed
shuffled_flatmapped_dp = tuple_source_dp.shuffled_flatmap().set_seed(123)
res = list(shuffled_flatmapped_dp)
shuffled_flatmapped_dp.set_seed(123)
self.assertEqual(list(shuffled_flatmapped_dp), res)
# Reset Test:
shuffled_flatmapped_dp = tuple_source_dp.shuffled_flatmap()
n_elements_before_reset = 5
res_before_reset, res_after_reset = reset_after_n_next_calls(shuffled_flatmapped_dp, n_elements_before_reset)
self.assertEqual(5, len(res_before_reset))
def test_round_robin_demux_iterdatapipe(self):
source_dp = IterableWrapper(list(range(23)))
with self.assertRaisesRegex(ValueError, "Expected `num_instaces`"):
_ = source_dp.round_robin_demux(0)
# Funtional Test
dp1, dp2, dp3 = source_dp.round_robin_demux(3)
self.assertEqual(list(range(0, 23, 3)), list(dp1))
self.assertEqual(list(range(1, 23, 3)), list(dp2))
self.assertEqual(list(range(2, 23, 3)), list(dp3))
# __len__ Test
self.assertEqual(len(dp1), 8)
self.assertEqual(len(dp2), 8)
self.assertEqual(len(dp3), 7)
def test_unzipper_iterdatapipe(self):
source_dp = IterableWrapper([(i, i + 10, i + 20) for i in range(10)])
# Functional Test: unzips each sequence, no `sequence_length` specified
dp1, dp2, dp3 = UnZipper(source_dp, sequence_length=3)
self.assertEqual(list(range(10)), list(dp1))
self.assertEqual(list(range(10, 20)), list(dp2))
self.assertEqual(list(range(20, 30)), list(dp3))
# Functional Test: unzips each sequence, with `sequence_length` specified
dp1, dp2, dp3 = source_dp.unzip(sequence_length=3)
self.assertEqual(list(range(10)), list(dp1))
self.assertEqual(list(range(10, 20)), list(dp2))
self.assertEqual(list(range(20, 30)), list(dp3))
# Functional Test: skipping over specified values
dp2, dp3 = source_dp.unzip(sequence_length=3, columns_to_skip=[0])
self.assertEqual(list(range(10, 20)), list(dp2))
self.assertEqual(list(range(20, 30)), list(dp3))
(dp2,) = source_dp.unzip(sequence_length=3, columns_to_skip=[0, 2], buffer_size=0)
self.assertEqual(list(range(10, 20)), list(dp2))
source_dp = IterableWrapper([(i, i + 10, i + 20, i + 30) for i in range(10)])
dp2, dp3 = source_dp.unzip(sequence_length=4, columns_to_skip=[0, 3])
self.assertEqual(list(range(10, 20)), list(dp2))
self.assertEqual(list(range(20, 30)), list(dp3))
# Functional Test: one child DataPipe yields all value first, but buffer_size = 5 being too small, raises error
source_dp = IterableWrapper([(i, i + 10) for i in range(10)])
dp1, dp2 = source_dp.unzip(sequence_length=2, buffer_size=4)
it1 = iter(dp1)
for _ in range(4):
next(it1)
with self.assertRaises(BufferError):
next(it1)
with self.assertRaises(BufferError):
list(dp2)
dp1, dp2 = source_dp.unzip(sequence_length=2, buffer_size=4)
with self.assertRaises(BufferError):
list(dp2)
# Reset Test: DataPipe resets when a new iterator is created, even if this datapipe hasn't been read
dp1, dp2 = source_dp.unzip(sequence_length=2)
_ = iter(dp1)
output2 = []
with self.assertRaisesRegex(RuntimeError, r"iterator has been invalidated"):
for i, n2 in enumerate(dp2):
output2.append(n2)
if i == 4:
_ = iter(dp1) # This will reset all child DataPipes
self.assertEqual(list(range(10, 15)), output2)
# Reset Test: DataPipe reset when some of it have been read
dp1, dp2 = source_dp.unzip(sequence_length=2)
output1, output2 = [], []
for i, (n1, n2) in enumerate(zip(dp1, dp2)):
output1.append(n1)
output2.append(n2)
if i == 4:
with warnings.catch_warnings(record=True) as wa:
_ = iter(dp1) # Reset both all child DataPipe
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"Some child DataPipes are not exhausted")
break
for n1, n2 in zip(dp1, dp2):
output1.append(n1)
output2.append(n2)
self.assertEqual(list(range(5)) + list(range(10)), output1)
self.assertEqual(list(range(10, 15)) + list(range(10, 20)), output2)
# Reset Test: DataPipe reset, even when some other child DataPipes are not read
source_dp = IterableWrapper([(i, i + 10, i + 20) for i in range(10)])
dp1, dp2, dp3 = source_dp.unzip(sequence_length=3)
output1, output2 = list(dp1), list(dp2)
self.assertEqual(list(range(10)), output1)
self.assertEqual(list(range(10, 20)), output2)
with warnings.catch_warnings(record=True) as wa:
self.assertEqual(list(range(10)), list(dp1)) # Resets even though dp3 has not been read
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"Some child DataPipes are not exhausted")
output3 = []
for i, n3 in enumerate(dp3):
output3.append(n3)
if i == 4:
with warnings.catch_warnings(record=True) as wa:
output1 = list(dp1) # Resets even though dp3 is only partially read
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"Some child DataPipes are not exhausted")
self.assertEqual(list(range(20, 25)), output3)
self.assertEqual(list(range(10)), output1)
break
self.assertEqual(list(range(20, 30)), list(dp3)) # dp3 has to read from the start again
# __len__ Test: Each DataPipe inherits the source datapipe's length
dp1, dp2, dp3 = source_dp.unzip(sequence_length=3)
self.assertEqual(len(source_dp), len(dp1))
self.assertEqual(len(source_dp), len(dp2))
self.assertEqual(len(source_dp), len(dp3))
def test_itertomap_mapdatapipe(self):
# Functional Test with None key_value_fn
values = list(range(10))
keys = ["k" + str(i) for i in range(10)]
source_dp = IterableWrapper(list(zip(keys, values)))
map_dp = source_dp.to_map_datapipe()
self.assertTrue(isinstance(map_dp, MapDataPipe))
# Lazy loading
self.assertTrue(map_dp._map is None)
# __len__ Test: Each DataPipe inherits the source datapipe's length
self.assertEqual(len(map_dp), 10)
# Functional Test
self.assertEqual(list(range(10)), [map_dp["k" + str(idx)] for idx in range(10)])
self.assertFalse(map_dp._map is None)
source_dp = IterableWrapper(range(10))
# TypeError test for invalid data type
map_dp = source_dp.to_map_datapipe()
with self.assertRaisesRegex(TypeError, "Cannot convert dictionary update element"):
_ = list(map_dp)
# ValueError test for wrong length
map_dp = source_dp.to_map_datapipe(lambda d: (d,))
with self.assertRaisesRegex(ValueError, "dictionary update sequence element has length"):
_ = list(map_dp)
# Functional Test with key_value_fn
map_dp = source_dp.to_map_datapipe(lambda d: ("k" + str(d), d + 1))
self.assertEqual(list(range(1, 11)), [map_dp["k" + str(idx)] for idx in range(10)])
self.assertFalse(map_dp._map is None)
# No __len__ from prior DataPipe
no_len_dp = source_dp.filter(lambda x: x % 2 == 0)
map_dp = no_len_dp.to_map_datapipe(lambda x: (x, x + 2))
with warnings.catch_warnings(record=True) as wa:
length = len(map_dp)
self.assertEqual(length, 5)
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"Data from prior DataPipe")
# Duplicate Key Test
dup_map_dp = source_dp.to_map_datapipe(lambda x: (x % 1, x))
with warnings.catch_warnings(record=True) as wa:
dup_map_dp._load_map()
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"Found duplicate key")
def test_mux_longest_iterdatapipe(self):
# Functional Test: Elements are yielded one at a time from each DataPipe, until they are all exhausted
input_dp1 = IterableWrapper(range(4))
input_dp2 = IterableWrapper(range(4, 8))
input_dp3 = IterableWrapper(range(8, 12))
output_dp = input_dp1.mux_longest(input_dp2, input_dp3)
expected_output = [0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11]
self.assertEqual(len(expected_output), len(output_dp))
self.assertEqual(expected_output, list(output_dp))
# Functional Test: Uneven input Data Pipes
input_dp1 = IterableWrapper([1, 2, 3, 4])
input_dp2 = IterableWrapper([10])
input_dp3 = IterableWrapper([100, 200, 300])
output_dp = input_dp1.mux_longest(input_dp2, input_dp3)
expected_output = [1, 10, 100, 2, 200, 3, 300, 4]
self.assertEqual(len(expected_output), len(output_dp))
self.assertEqual(expected_output, list(output_dp))
# Functional Test: Empty Data Pipe
input_dp1 = IterableWrapper([0, 1, 2, 3])
input_dp2 = IterableWrapper([])
output_dp = input_dp1.mux_longest(input_dp2)
self.assertEqual(len(input_dp1), len(output_dp))
self.assertEqual(list(input_dp1), list(output_dp))
# __len__ Test: raises TypeError when __len__ is called and an input doesn't have __len__
input_dp1 = IterableWrapper(range(10))
input_dp_no_len = IDP_NoLen(range(10))
output_dp = input_dp1.mux_longest(input_dp_no_len)
with self.assertRaises(TypeError):
len(output_dp)
def test_shard_expand(self):
# Functional Test: ensure expansion generates the right outputs
def testexpand(s):
stage1 = IterableWrapper([s])
stage2 = ShardExpander(stage1)
return list(iter(stage2))
def myexpand(lo, hi, fmt):
return [fmt.format(i) for i in range(lo, hi)]
self.assertEqual(testexpand("ds-{000000..000009}.tar"), myexpand(0, 10, "ds-{:06d}.tar"))
self.assertEqual(testexpand("{0..9}"), myexpand(0, 10, "{}"))
self.assertEqual(testexpand("{0..999}"), myexpand(0, 1000, "{}"))
self.assertEqual(testexpand("{123..999}"), myexpand(123, 1000, "{}"))
self.assertEqual(testexpand("{000..999}"), myexpand(0, 1000, "{:03d}"))
with self.assertRaisesRegex(ValueError, r"must not start with 0"):
testexpand("{01..999}")
with self.assertRaisesRegex(ValueError, r"must be shorter"):
testexpand("{0000..999}")
with self.assertRaisesRegex(ValueError, r"bad range"):
testexpand("{999..123}")
self.assertEqual(testexpand("{0..1}{0..1}"), "00 01 10 11".split())
def test_combining_infinite_iterdatapipe(self):
r"""
Test combining DataPipe can properly exit at the end of iteration
with an infinite DataPipe as the input.
"""
def _get_dp(length=10):
source_dp = IterableWrapper(list(range(length)))
inf_dp = IterableWrapper(list(range(length))).cycle()
return source_dp, inf_dp
# zip
noinf_dp, inf_dp = _get_dp(10)
dp = inf_dp.zip(noinf_dp)
res = list(dp)
self.assertEqual(res, [(i, i) for i in range(10)])
# mux
noinf_dp, inf_dp = _get_dp(10)
dp = inf_dp.mux(noinf_dp)
res = list(dp)
self.assertEqual(res, [i for i in range(10) for _ in range(2)])
# zip_with_iter
noinf_dp, inf_dp = _get_dp(10)
dp = noinf_dp.zip_with_iter(inf_dp, key_fn=lambda x: x)
res = list(dp)
self.assertEqual(res, [(i, i) for i in range(10)])
def test_zip_longest_iterdatapipe(self):
# Functional Test: raises TypeError when an input is not of type `IterDataPipe`
with self.assertRaises(TypeError):
input_dp1 = IterableWrapper(range(10))
input_no_dp = list(range(10))
output_dp = input_dp1.zip_longest(input_no_dp) # type: ignore[arg-type]
# Functional Test: raises TypeError when an input does not have valid length
input_dp1 = IterableWrapper(range(10))
input_dp_no_len = IDP_NoLen(range(5))
output_dp = input_dp1.zip_longest(input_dp_no_len)
with self.assertRaisesRegex(TypeError, r"instance doesn't have valid length$"):
len(output_dp)
# Functional Test: zips the results properly even when lengths are different
# (zips to the longest, filling missing values with default value None.)
input_dp1 = IterableWrapper(range(10))
input_dp2 = IterableWrapper(range(5))
output_dp = input_dp1.zip_longest(input_dp2)
exp = [(i, i) for i in range(5)] + [(i, None) for i in range(5, 10)]
self.assertEqual(list(output_dp), exp)
# Functional Test: zips the results properly even when lengths are different
# (zips to the longest, filling missing values with user input)
input_dp1 = IterableWrapper(range(10))
input_dp2 = IterableWrapper(range(5))
output_dp = input_dp1.zip_longest(input_dp2, fill_value=-1)
exp = [(i, i) for i in range(5)] + [(i, -1) for i in range(5, 10)]
self.assertEqual(list(output_dp), exp)
# __len__ Test: length matches the length of the shortest input
self.assertEqual(len(output_dp), 10)
def test_drop_iterdatapipe(self):
# tuple tests
input_dp = IterableWrapper([(0, 1, 2), (3, 4, 5), (6, 7, 8)])
# Functional Test: single index drop for tuple elements
drop_dp = input_dp.drop(1)
self.assertEqual([(0, 2), (3, 5), (6, 8)], list(drop_dp))
# Functional Test: multiple indices drop for tuple elements
drop_dp = input_dp.drop([0, 2])
self.assertEqual([(1,), (4,), (7,)], list(drop_dp))
# dict tests
input_dp = IterableWrapper([{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4, "c": 5}, {"a": 5, "b": 6, "c": 7}])
# Functional Test: single key drop for dict elements
drop_dp = input_dp.drop("a")
self.assertEqual([{"b": 2, "c": 3}, {"b": 4, "c": 5}, {"b": 6, "c": 7}], list(drop_dp))
# Functional Test: multiple key drop for dict elements
drop_dp = input_dp.drop(["a", "b"])
self.assertEqual([{"c": 3}, {"c": 5}, {"c": 7}], list(drop_dp))
# list tests
input_dp = IterableWrapper([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
# Functional Test: single key drop for list elements
drop_dp = input_dp.drop(2)
self.assertEqual([[0, 1], [3, 4], [6, 7]], list(drop_dp))
# Functional Test: multiple key drop for list elements
drop_dp = input_dp.drop([0, 1])
self.assertEqual([[2], [5], [8]], list(drop_dp))
# Reset Test:
n_elements_before_reset = 2
input_dp = IterableWrapper([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
drop_dp = input_dp.drop([0, 1])
expected_res = [[2], [5], [8]]
res_before_reset, res_after_reset = reset_after_n_next_calls(drop_dp, n_elements_before_reset)
self.assertEqual(expected_res[:n_elements_before_reset], res_before_reset)
self.assertEqual(expected_res, res_after_reset)
# __len__ Test:
input_dp = IterableWrapper([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
drop_dp = input_dp.drop([0, 1])
self.assertEqual(3, len(drop_dp))
def test_slice_iterdatapipe(self):
# tuple tests
input_dp = IterableWrapper([(0, 1, 2), (3, 4, 5), (6, 7, 8)])
# Functional Test: slice with no stop and no step for tuple
slice_dp = input_dp.slice(1)
self.assertEqual([(1, 2), (4, 5), (7, 8)], list(slice_dp))
# Functional Test: slice with no step for tuple
slice_dp = input_dp.slice(0, 2)
self.assertEqual([(0, 1), (3, 4), (6, 7)], list(slice_dp))
# Functional Test: slice with step for tuple
slice_dp = input_dp.slice(0, 2, 2)
self.assertEqual([(0,), (3,), (6,)], list(slice_dp))
# Functional Test: slice with list of indices for tuple
slice_dp = input_dp.slice([0, 1])
self.assertEqual([(0, 1), (3, 4), (6, 7)], list(slice_dp))
# list tests
input_dp = IterableWrapper([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
# Functional Test: slice with no stop and no step for list
slice_dp = input_dp.slice(1)
self.assertEqual([[1, 2], [4, 5], [7, 8]], list(slice_dp))
# Functional Test: slice with no step for list
slice_dp = input_dp.slice(0, 2)
self.assertEqual([[0, 1], [3, 4], [6, 7]], list(slice_dp))
# Functional Test: slice with list of indices for list
slice_dp = input_dp.slice(0, 2)
self.assertEqual([[0, 1], [3, 4], [6, 7]], list(slice_dp))
# dict tests
input_dp = IterableWrapper([{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4, "c": 5}, {"a": 5, "b": 6, "c": 7}])
# Functional Test: slice with key for dict
slice_dp = input_dp.slice("a")
self.assertEqual([{"a": 1}, {"a": 3}, {"a": 5}], list(slice_dp))
# Functional Test: slice with list of keys for dict
slice_dp = input_dp.slice(["a", "b"])
self.assertEqual([{"a": 1, "b": 2}, {"a": 3, "b": 4}, {"a": 5, "b": 6}], list(slice_dp))
# __len__ Test:
input_dp = IterableWrapper([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
slice_dp = input_dp.slice(0, 2)
self.assertEqual(3, len(slice_dp))
# Reset Test:
n_elements_before_reset = 2
input_dp = IterableWrapper([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
slice_dp = input_dp.slice([2])
expected_res = [[2], [5], [8]]
res_before_reset, res_after_reset = reset_after_n_next_calls(slice_dp, n_elements_before_reset)
self.assertEqual(expected_res[:n_elements_before_reset], res_before_reset)
self.assertEqual(expected_res, res_after_reset)
def test_flatten_iterdatapipe(self):
# tuple tests
# Functional Test: flatten for an index
input_dp = IterableWrapper([(0, 1, (2, 3)), (4, 5, (6, 7)), (8, 9, (10, 11))])
flatten_dp = input_dp.flatten(2)
self.assertEqual([(0, 1, 2, 3), (4, 5, 6, 7), (8, 9, 10, 11)], list(flatten_dp))
# Functional Test: flatten for list of indices
input_dp = IterableWrapper([((0, 10), 1, (2, 3)), ((4, 14), 5, (6, 7)), ((8, 18), 9, (10, 11))])
flatten_dp = input_dp.flatten([0, 2])
self.assertEqual([(0, 10, 1, 2, 3), (4, 14, 5, 6, 7), (8, 18, 9, 10, 11)], list(flatten_dp))
# Functional Test: flatten all iters in the datapipe one level (no argument)
input_dp = IterableWrapper([(0, (1, 2)), (3, (4, 5)), (6, (7, 8))])
flatten_dp = input_dp.flatten()
self.assertEqual([(0, 1, 2), (3, 4, 5), (6, 7, 8)], list(flatten_dp))
# list tests
# Functional Test: flatten for an index
input_dp = IterableWrapper([[0, 1, [2, 3]], [4, 5, [6, 7]], [8, 9, [10, 11]]])
flatten_dp = input_dp.flatten(2)
self.assertEqual([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]], list(flatten_dp))
# Functional Test: flatten for list of indices
input_dp = IterableWrapper([[[0, 10], 1, [2, 3]], [[4, 14], 5, [6, 7]], [[8, 18], 9, [10, 11]]])
flatten_dp = input_dp.flatten([0, 2])
self.assertEqual([[0, 10, 1, 2, 3], [4, 14, 5, 6, 7], [8, 18, 9, 10, 11]], list(flatten_dp))
# Functional Test: flatten all iters in the datapipe one level (no argument)
input_dp = IterableWrapper([[0, [1, 2]], [3, [4, 5]], [6, [7, 8]]])
flatten_dp = input_dp.flatten()
self.assertEqual([[0, 1, 2], [3, 4, 5], [6, 7, 8]], list(flatten_dp))
# Functional Test: string test, flatten all iters in the datapipe one level (no argument)
input_dp = IterableWrapper([["zero", ["one", "2"]], ["3", ["4", "5"]], ["6", ["7", "8"]]])
flatten_dp = input_dp.flatten()
self.assertEqual([["zero", "one", "2"], ["3", "4", "5"], ["6", "7", "8"]], list(flatten_dp))
# dict tests
# Functional Test: flatten for an index
input_dp = IterableWrapper([{"a": 1, "b": 2, "c": {"d": 3, "e": 4}}, {"a": 5, "b": 6, "c": {"d": 7, "e": 8}}])
flatten_dp = input_dp.flatten("c")
self.assertEqual([{"a": 1, "b": 2, "d": 3, "e": 4}, {"a": 5, "b": 6, "d": 7, "e": 8}], list(flatten_dp))
# Functional Test: flatten for an index already flat
input_dp = IterableWrapper([{"a": 1, "b": 2, "c": {"d": 9, "e": 10}}, {"a": 5, "b": 6, "c": {"d": 7, "e": 8}}])
flatten_dp = input_dp.flatten("a")
self.assertEqual(
[{"a": 1, "b": 2, "c": {"d": 9, "e": 10}}, {"a": 5, "b": 6, "c": {"d": 7, "e": 8}}], list(flatten_dp)
)
# Functional Test: flatten for list of indices
input_dp = IterableWrapper(
[
{"a": {"f": 10, "g": 11}, "b": 2, "c": {"d": 3, "e": 4}},
{"a": {"f": 10, "g": 11}, "b": 6, "c": {"d": 7, "e": 8}},
]
)
flatten_dp = input_dp.flatten(["a", "c"])
self.assertEqual(
[{"f": 10, "g": 11, "b": 2, "d": 3, "e": 4}, {"f": 10, "g": 11, "b": 6, "d": 7, "e": 8}], list(flatten_dp)
)
# Functional Test: flatten all iters in the datapipe one level (no argument)
input_dp = IterableWrapper([{"a": 1, "b": 2, "c": {"d": 3, "e": 4}}, {"a": 5, "b": 6, "c": {"d": 7, "e": 8}}])
flatten_dp = input_dp.flatten()
self.assertEqual([{"a": 1, "b": 2, "d": 3, "e": 4}, {"a": 5, "b": 6, "d": 7, "e": 8}], list(flatten_dp))
# Functional Test: flatten all iters one level, multiple iters
input_dp = IterableWrapper(
[
{"a": {"f": 10, "g": 11}, "b": 2, "c": {"d": 3, "e": 4}},
{"a": {"f": 10, "g": 11}, "b": 6, "c": {"d": 7, "e": 8}},
]
)
flatten_dp = input_dp.flatten()
self.assertEqual(
[{"f": 10, "g": 11, "b": 2, "d": 3, "e": 4}, {"f": 10, "g": 11, "b": 6, "d": 7, "e": 8}], list(flatten_dp)
)
# __len__ Test:
input_dp = IterableWrapper([(0, 1, (2, 3)), (4, 5, (6, 7)), (8, 9, (10, 11))])
flatten_dp = input_dp.flatten(2)
self.assertEqual(3, len(flatten_dp))
# Reset Test:
n_elements_before_reset = 2
input_dp = IterableWrapper([(0, 1, (2, 3)), (4, 5, (6, 7)), (8, 9, (10, 11))])
flatten_dp = input_dp.flatten(2)
expected_res = [(0, 1, 2, 3), (4, 5, 6, 7), (8, 9, 10, 11)]
res_before_reset, res_after_reset = reset_after_n_next_calls(flatten_dp, n_elements_before_reset)
self.assertEqual(expected_res[:n_elements_before_reset], res_before_reset)
self.assertEqual(expected_res, res_after_reset)
def test_length_setter_iterdatapipe(self):
input_dp = IterableWrapper(range(10))
# Functional Test: Setting length doesn't change the content of the DataPipe
dp: IterDataPipe = input_dp.set_length(3)
self.assertEqual(list(range(10)), list(dp))
with self.assertRaises(AssertionError):
input_dp.set_length(-1)
# __len__ Test: Length is as specified and propagates through
dp = input_dp.set_length(3).map(lambda x: x + 1)
self.assertEqual(3, len(dp))
# Reset Test:
n_elements_before_reset = 2
dp = input_dp.set_length(3)
expected_res = list(range(10))
res_before_reset, res_after_reset = reset_after_n_next_calls(dp, n_elements_before_reset)
self.assertEqual(expected_res[:n_elements_before_reset], res_before_reset)
self.assertEqual(expected_res, res_after_reset)
def test_random_splitter_iterdatapipe(self):
n_epoch = 2
# Functional Test: Split results are the same across epochs
dp = IterableWrapper(range(10))
train, valid = dp.random_split(total_length=10, weights={"train": 0.5, "valid": 0.5}, seed=0)
results = []
for _ in range(n_epoch):
res = list(train)
self.assertEqual(5, len(res))
results.append(res)
self.assertEqual(results[0], results[1])
valid_res = list(valid)
self.assertEqual(5, len(valid_res))
self.assertEqual(list(range(10)), sorted(results[0] + valid_res))
# Functional Test: lengths can be known in advance because it splits evenly into integers.
self.assertEqual(5, len(train))
self.assertEqual(5, len(valid))
# Functional Test: DataPipe can split into 3 DataPipes, and infer `total_length` when not given
dp = IterableWrapper(range(10))
train, valid, test = dp.random_split(weights={"train": 0.6, "valid": 0.2, "test": 0.2}, seed=0)
results = []
for _ in range(n_epoch):
res = list(train)
self.assertEqual(6, len(res))
results.append(res)
self.assertEqual(results[0], results[1])
valid_res = list(valid)
self.assertEqual(2, len(valid_res))
test_res = list(test)
self.assertEqual(2, len(test_res))
self.assertEqual(list(range(10)), sorted(results[0] + valid_res + test_res))
# Functional Test: lengths can be known in advance because it splits evenly into integers.
self.assertEqual(6, len(train))
self.assertEqual(2, len(valid))
self.assertEqual(2, len(test))
# Functional Test: Split can work even when weights do not split evenly into integers.
dp = IterableWrapper(range(13))
train, valid, test = dp.random_split(weights={"train": 0.6, "valid": 0.2, "test": 0.2}, seed=0)
res = list(train) + list(valid) + list(test)
self.assertEqual(list(range(13)), sorted(res))
# Functional Test: lengths can be known in advance because it splits evenly into integers.
with self.assertRaisesRegex(TypeError, "Lengths of the split cannot be known in advance"):
len(train)
# Functional Test: Error when `total_length` cannot be inferred
nolen_dp = IDP_NoLen(range(10))
with self.assertRaisesRegex(TypeError, "needs `total_length`"):
_, __ = nolen_dp.random_split(weights={"train": 0.5, "valid": 0.5}, seed=0) # type: ignore[call-arg]
# Functional Test: `target` must match a key in the `weights` dict
dp = IterableWrapper(range(10))
with self.assertRaisesRegex(KeyError, "does not match any key"):
_ = dp.random_split(
total_length=10, weights={"train": 0.5, "valid": 0.2, "test": 0.2}, seed=0, target="NOTINDICT"
)
# Functional Test: `target` is specified, and match the results from before
dp = IterableWrapper(range(10))
train = dp.random_split(
total_length=10, weights={"train": 0.6, "valid": 0.2, "test": 0.2}, seed=0, target="train"
)
results2 = []
for _ in range(n_epoch):
res = list(train)
self.assertEqual(6, len(res))
results2.append(res)
self.assertEqual(results2[0], results2[1])
self.assertEqual(results, results2)
# Functional Test: `override_seed` works and change split result
train.override_seed(1)
seed_1_res = list(train)
self.assertNotEqual(results2[0], seed_1_res)
# Functional Test: `override_seed` doesn't impact the current iteration, only the next one
temp_res = []
for i, x in enumerate(train):
temp_res.append(x)
if i == 3:
train.override_seed(0)
self.assertEqual(seed_1_res, temp_res) # The current iteration should equal seed 1 result
self.assertEqual(results2[0], list(train)) # The next iteration should equal seed 0 result
# Functional Test: Raise exception if both children are used at the same time
dp = IterableWrapper(range(10))
train, valid = dp.random_split(total_length=10, weights={"train": 0.5, "valid": 0.5}, seed=0)
it_train = iter(train)
next(it_train)
it_valid = iter(valid) # This resets the DataPipe and invalidates the other iterator
next(it_valid)
with self.assertRaisesRegex(RuntimeError, "iterator has been invalidated"):
next(it_train)
next(it_valid) # No error, can keep going
@skipIfNoCUDA
def test_pin_memory(self):
# Tensor
dp = IterableWrapper([(i, i + 1) for i in range(10)]).map(_convert_to_tensor).pin_memory()
self.assertTrue(all(d.is_pinned() for d in dp))
# List of Tensors
dp = IterableWrapper([[(i - 1, i), (i, i + 1)] for i in range(10)]).map(_convert_to_tensor).pin_memory()
self.assertTrue(all(d0.is_pinned() and d1.is_pinned() for d0, d1 in dp))
# Dict of Tensors
dp = IterableWrapper([{str(i): (i, i + 1)} for i in range(10)]).map(_convert_to_tensor).pin_memory()
self.assertTrue(all(v.is_pinned() for d in dp for v in d.values()))
# NamedTuple
dp = IterableWrapper([NamedTensors(torch.tensor(i), torch.tensor(i + 1)) for i in range(10)]).pin_memory()
self.assertTrue(all(v.is_pinned() for d in dp for v in d))
# Dict of List of Tensors
dp = (
IterableWrapper([{str(i): [(i - 1, i), (i, i + 1)]} for i in range(10)])
.map(_convert_to_tensor)
.pin_memory()
)
self.assertTrue(all(v.is_pinned() for d in dp for batch in d.values() for v in batch))
# List of Dict of Tensors
dp = IterableWrapper([{str(i): (i, i + 1)} for i in range(10)]).map(_convert_to_tensor).batch(2).pin_memory()
self.assertTrue(all(v.is_pinned() for batch in dp for d in batch for v in d.values()))
# List of List of Tensors
dp = (
IterableWrapper([[(i - 1, i), (i, i + 1)] for i in range(10)]).map(_convert_to_tensor).batch(2).pin_memory()
)
self.assertTrue(all(d0.is_pinned() and d1.is_pinned() for batch in dp for d0, d1 in batch))
# Single str
dp = IterableWrapper(["hello", "world"]).batch(1).collate().pin_memory()
self.assertEqual(list(dp), [["hello"], ["world"]])
def test_async_map_batches(self):
batch_size = 16
def _helper(input_data, exp_res, async_fn, input_col=None, output_col=None, max_concurrency=32, flatten=True):
dp = IterableWrapper(input_data)
dp = dp.async_map_batches(async_fn, batch_size, input_col, output_col, max_concurrency, flatten)
self.assertEqual(
exp_res,
list(dp),
msg=f"Async map test with {async_fn=}, {input_col=}, {output_col=}, {max_concurrency=}",
)
if flatten:
self.assertEqual(len(input_data), len(dp))
_helper(range(50), [i * 10 for i in range(50)], _async_mul_ten)
# Smaller max_concurrency
_helper(range(50), [i * 10 for i in range(50)], _async_mul_ten, max_concurrency=6)
# Tuple with input_col
_helper([(i, i) for i in range(50)], [(i * 10, i) for i in range(50)], _async_mul_ten, input_col=0)
_helper([(i, i) for i in range(50)], [(i, i * 10) for i in range(50)], _async_mul_ten, input_col=1)
# Tuple with input_col and output_col
_helper(
[(i, i) for i in range(50)], [(i, i * 10) for i in range(50)], _async_mul_ten, input_col=0, output_col=1
)
_helper(
[(i, i) for i in range(50)], [(i, i, i * 10) for i in range(50)], _async_mul_ten, input_col=0, output_col=-1
)
# Dict with input_col
_helper(
[{"a": i, "b": i} for i in range(50)],
[{"a": i, "b": i * 10} for i in range(50)],
_async_mul_ten,
input_col="b",
)
# Dict with input_col and output_col
_helper(
[{"a": i, "b": i} for i in range(50)],
[{"a": i * 10, "b": i} for i in range(50)],
_async_mul_ten,
input_col="b",
output_col="a",
)
_helper(
[{"a": i, "b": i} for i in range(50)],
[{"a": i, "b": i, "c": i * 10} for i in range(50)],
_async_mul_ten,
input_col="b",
output_col="c",
)
# Multiple input_col
_helper(
[(i - 1, i, i + 1) for i in range(50)],
[((i - 1) * (i + 1), i) for i in range(50)],
_async_x_mul_y,
input_col=(0, 2),
)
_helper(
[(i - 1, i, i + 1) for i in range(50)],
[(i, (i - 1) * (i + 1)) for i in range(50)],
_async_x_mul_y,
input_col=(2, 0),
)
# Multiple input_col with output_col
_helper(
[(i - 1, i, i + 1) for i in range(50)],
[(i - 1, (i - 1) * (i + 1), i + 1) for i in range(50)],
_async_x_mul_y,
input_col=(0, 2),
output_col=1,
)
# Skip over `flatten` operation
_helper(
range(32),
[[i * 10 for i in range(16)], [i * 10 for i in range(16, 32)]],
_async_mul_ten,
flatten=False,
)
# Test multiple asyncio eventloops
dp1 = IterableWrapper(range(50))
dp1 = dp1.async_map_batches(_async_mul_ten, 16)
dp2 = IterableWrapper(range(50))
dp2 = dp2.async_map_batches(_async_mul_ten, 16)
for v1, v2, exp in zip(dp1, dp2, [i * 10 for i in range(50)]):
self.assertEqual(v1, exp)
self.assertEqual(v2, exp)
def test_threadpool_map(self):
target_length = 30
input_dp = IterableWrapper(range(target_length))
input_dp_parallel = IterableWrapper(range(target_length))
def fn(item, dtype=torch.float, *, sum=False):
data = torch.tensor(item, dtype=dtype)
return data if not sum else data.sum()
# Functional Test: apply to each element correctly
map_dp = input_dp.threadpool_map(fn)
self.assertEqual(target_length, len(map_dp))
for x, y in zip(map_dp, range(target_length)):
self.assertEqual(x, torch.tensor(y, dtype=torch.float))
# Functional Test: works with partial function
map_dp = input_dp.threadpool_map(partial(fn, dtype=torch.int, sum=True))
for x, y in zip(map_dp, range(target_length)):
self.assertEqual(x, torch.tensor(y, dtype=torch.int).sum())
# __len__ Test: inherits length from source DataPipe
self.assertEqual(target_length, len(map_dp))
input_dp_nl = IDP_NoLen(range(target_length))
map_dp_nl = input_dp_nl.threadpool_map(lambda x: x)
for x, y in zip(map_dp_nl, range(target_length)):
self.assertEqual(x, torch.tensor(y, dtype=torch.float))
# __len__ Test: inherits length from source DataPipe - raises error when invalid
with self.assertRaisesRegex(TypeError, r"instance doesn't have valid length$"):
len(map_dp_nl)
# Test: two independent ThreadPoolExecutors running at the same time
map_dp_parallel = input_dp_parallel.threadpool_map(fn)
for x, y, z in zip(map_dp, map_dp_parallel, range(target_length)):
self.assertEqual(x, torch.tensor(z, dtype=torch.float))
self.assertEqual(y, torch.tensor(z, dtype=torch.float))
# Reset Test: DataPipe resets properly
n_elements_before_reset = 5
res_before_reset, res_after_reset = reset_after_n_next_calls(map_dp, n_elements_before_reset)
self.assertEqual(list(range(n_elements_before_reset)), res_before_reset)
self.assertEqual(list(range(target_length)), res_after_reset)
@suppress_warnings # Suppress warning for lambda fn
def test_threadpool_map_tuple_list_with_col_iterdatapipe(self):
def fn_11(d):
return -d
def fn_1n(d):
return -d, d
def fn_n1(d0, d1):
return d0 + d1
def fn_nn(d0, d1):
return -d0, -d1, d0 + d1
def fn_n1_def(d0, d1=1):
return d0 + d1
def fn_n1_kwargs(d0, d1, **kwargs):
return d0 + d1
def fn_n1_pos(d0, d1, *args):
return d0 + d1
def fn_n1_sep_pos(d0, *args, d1):
return d0 + d1
def fn_cmplx(d0, d1=1, *args, d2, **kwargs):
return d0 + d1
p_fn_n1 = partial(fn_n1, d1=1)
p_fn_cmplx = partial(fn_cmplx, d2=2)
def _helper(ref_fn, fn, input_col=None, output_col=None, error=None):
for constr in (list, tuple):
datapipe = IterableWrapper([constr((0, 1, 2)), constr((3, 4, 5)), constr((6, 7, 8))])
if ref_fn is None:
with self.assertRaises(error):
res_dp = datapipe.threadpool_map(fn, input_col, output_col)
list(res_dp)
else:
res_dp = datapipe.threadpool_map(fn, input_col, output_col)
ref_dp = datapipe.map(ref_fn)
if constr is list:
ref_dp = ref_dp.map(list)
self.assertEqual(list(res_dp), list(ref_dp), "First test failed")
# Reset
self.assertEqual(list(res_dp), list(ref_dp), "Test after reset failed")
_helper(lambda data: data, fn_n1_def, 0, 1)
_helper(lambda data: (data[0], data[1], data[0] + data[1]), fn_n1_def, [0, 1], 2)
_helper(lambda data: data, p_fn_n1, 0, 1)
_helper(lambda data: data, p_fn_cmplx, 0, 1)
_helper(lambda data: (data[0], data[1], data[0] + data[1]), p_fn_cmplx, [0, 1], 2)
_helper(lambda data: (data[0] + data[1],), fn_n1_pos, [0, 1, 2])
# Replacing with one input column and default output column
_helper(lambda data: (data[0], -data[1], data[2]), fn_11, 1)
_helper(lambda data: (data[0], (-data[1], data[1]), data[2]), fn_1n, 1)
# The index of input column is out of range
_helper(None, fn_1n, 3, error=IndexError)
# Unmatched input columns with fn arguments
_helper(None, fn_n1, 1, error=ValueError)
_helper(None, fn_n1, [0, 1, 2], error=ValueError)
_helper(None, lambda d0, d1: d0 + d1, 0, error=ValueError)
_helper(None, lambda d0, d1: d0 + d1, [0, 1, 2], error=ValueError)
_helper(None, fn_cmplx, 0, 1, ValueError)
_helper(None, fn_n1_pos, 1, error=ValueError)
_helper(None, fn_n1_def, [0, 1, 2], 1, error=ValueError)
_helper(None, p_fn_n1, [0, 1], error=ValueError)
_helper(None, fn_1n, [1, 2], error=ValueError)
# _helper(None, p_fn_cmplx, [0, 1, 2], error=ValueError)
_helper(None, fn_n1_sep_pos, [0, 1, 2], error=ValueError)
# Fn has keyword-only arguments
_helper(None, fn_n1_kwargs, 1, error=ValueError)
_helper(None, fn_cmplx, [0, 1], 2, ValueError)
# Replacing with multiple input columns and default output column (the left-most input column)
_helper(lambda data: (data[1], data[2] + data[0]), fn_n1, [2, 0])
_helper(lambda data: (data[0], (-data[2], -data[1], data[2] + data[1])), fn_nn, [2, 1])
# output_col can only be specified when input_col is not None
_helper(None, fn_n1, None, 1, error=ValueError)
# output_col can only be single-element list or tuple
_helper(None, fn_n1, None, [0, 1], error=ValueError)
# Single-element list as output_col
_helper(lambda data: (-data[1], data[1], data[2]), fn_11, 1, [0])
# Replacing with one input column and single specified output column
_helper(lambda data: (-data[1], data[1], data[2]), fn_11, 1, 0)
_helper(lambda data: (data[0], data[1], (-data[1], data[1])), fn_1n, 1, 2)
# The index of output column is out of range
_helper(None, fn_1n, 1, 3, error=IndexError)
_helper(lambda data: (data[0], data[0] + data[2], data[2]), fn_n1, [0, 2], 1)
_helper(lambda data: ((-data[1], -data[2], data[1] + data[2]), data[1], data[2]), fn_nn, [1, 2], 0)
# Appending the output at the end
_helper(lambda data: (*data, -data[1]), fn_11, 1, -1)
_helper(lambda data: (*data, (-data[1], data[1])), fn_1n, 1, -1)
_helper(lambda data: (*data, data[0] + data[2]), fn_n1, [0, 2], -1)
_helper(lambda data: (*data, (-data[1], -data[2], data[1] + data[2])), fn_nn, [1, 2], -1)
# Handling built-in functions (e.g. `dict`, `iter`, `int`, `str`) whose signatures cannot be inspected
_helper(lambda data: (str(data[0]), data[1], data[2]), str, 0)
_helper(lambda data: (data[0], data[1], int(data[2])), int, 2)
@suppress_warnings # Suppress warning for lambda fn
def test_threadpool_map_dict_with_col_iterdatapipe(self):
def fn_11(d):
return -d
def fn_1n(d):
return -d, d
def fn_n1(d0, d1):
return d0 + d1
def fn_nn(d0, d1):
return -d0, -d1, d0 + d1
def fn_n1_def(d0, d1=1):
return d0 + d1
p_fn_n1 = partial(fn_n1, d1=1)
def fn_n1_pos(d0, d1, *args):
return d0 + d1
def fn_n1_kwargs(d0, d1, **kwargs):
return d0 + d1
def fn_kwonly(*, d0, d1):
return d0 + d1
def fn_has_nondefault_kwonly(d0, *, d1):
return d0 + d1
def fn_cmplx(d0, d1=1, *args, d2, **kwargs):
return d0 + d1
p_fn_cmplx = partial(fn_cmplx, d2=2)
# Prevent modification in-place to support resetting
def _dict_update(data, newdata, remove_idx=None):
_data = dict(data)
_data.update(newdata)
if remove_idx:
for idx in remove_idx:
del _data[idx]
return _data
def _helper(ref_fn, fn, input_col=None, output_col=None, error=None):
datapipe = IterableWrapper([{"x": 0, "y": 1, "z": 2}, {"x": 3, "y": 4, "z": 5}, {"x": 6, "y": 7, "z": 8}])
if ref_fn is None:
with self.assertRaises(error):
res_dp = datapipe.threadpool_map(fn, input_col, output_col)
list(res_dp)
else:
res_dp = datapipe.threadpool_map(fn, input_col, output_col)
ref_dp = datapipe.map(ref_fn)
self.assertEqual(list(res_dp), list(ref_dp), "First test failed")
# Reset
self.assertEqual(list(res_dp), list(ref_dp), "Test after reset failed")
_helper(lambda data: data, fn_n1_def, "x", "y")
_helper(lambda data: data, p_fn_n1, "x", "y")
_helper(lambda data: data, p_fn_cmplx, "x", "y")
_helper(lambda data: _dict_update(data, {"z": data["x"] + data["y"]}), p_fn_cmplx, ["x", "y", "z"], "z")
_helper(lambda data: _dict_update(data, {"z": data["x"] + data["y"]}), fn_n1_def, ["x", "y"], "z")
_helper(None, fn_n1_pos, "x", error=ValueError)
_helper(None, fn_n1_kwargs, "x", error=ValueError)
# non-default kw-only args
_helper(None, fn_kwonly, ["x", "y"], error=ValueError)
_helper(None, fn_has_nondefault_kwonly, ["x", "y"], error=ValueError)
_helper(None, fn_cmplx, ["x", "y"], error=ValueError)
# Replacing with one input column and default output column
_helper(lambda data: _dict_update(data, {"y": -data["y"]}), fn_11, "y")
_helper(lambda data: _dict_update(data, {"y": (-data["y"], data["y"])}), fn_1n, "y")
# The key of input column is not in dict
_helper(None, fn_1n, "a", error=KeyError)
# Unmatched input columns with fn arguments
_helper(None, fn_n1, "y", error=ValueError)
_helper(None, fn_1n, ["x", "y"], error=ValueError)
_helper(None, fn_n1_def, ["x", "y", "z"], error=ValueError)
_helper(None, p_fn_n1, ["x", "y"], error=ValueError)
_helper(None, fn_n1_kwargs, ["x", "y", "z"], error=ValueError)
# Replacing with multiple input columns and default output column (the left-most input column)
_helper(lambda data: _dict_update(data, {"z": data["x"] + data["z"]}, ["x"]), fn_n1, ["z", "x"])
_helper(
lambda data: _dict_update(data, {"z": (-data["z"], -data["y"], data["y"] + data["z"])}, ["y"]),
fn_nn,
["z", "y"],
)
# output_col can only be specified when input_col is not None
_helper(None, fn_n1, None, "x", error=ValueError)
# output_col can only be single-element list or tuple
_helper(None, fn_n1, None, ["x", "y"], error=ValueError)
# Single-element list as output_col
_helper(lambda data: _dict_update(data, {"x": -data["y"]}), fn_11, "y", ["x"])
# Replacing with one input column and single specified output column
_helper(lambda data: _dict_update(data, {"x": -data["y"]}), fn_11, "y", "x")
_helper(lambda data: _dict_update(data, {"z": (-data["y"], data["y"])}), fn_1n, "y", "z")
_helper(lambda data: _dict_update(data, {"y": data["x"] + data["z"]}), fn_n1, ["x", "z"], "y")
_helper(
lambda data: _dict_update(data, {"x": (-data["y"], -data["z"], data["y"] + data["z"])}),
fn_nn,
["y", "z"],
"x",
)
# Adding new key to dict for the output
_helper(lambda data: _dict_update(data, {"a": -data["y"]}), fn_11, "y", "a")
_helper(lambda data: _dict_update(data, {"a": (-data["y"], data["y"])}), fn_1n, "y", "a")
_helper(lambda data: _dict_update(data, {"a": data["x"] + data["z"]}), fn_n1, ["x", "z"], "a")
_helper(
lambda data: _dict_update(data, {"a": (-data["y"], -data["z"], data["y"] + data["z"])}),
fn_nn,
["y", "z"],
"a",
)
if __name__ == "__main__":
unittest.main()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from unittest import TestCase
from torchdata.dataloader2 import DataLoader2
from torchdata.dataloader2.adapter import Shuffle
from torchdata.datapipes.iter import IterableWrapper
class AdapterTest(TestCase):
def test_shuffle(self) -> None:
size = 500
dp = IterableWrapper(range(size))
dl = DataLoader2(datapipe=dp)
self.assertEqual(list(range(size)), list(dl))
with self.assertWarns(Warning, msg="`shuffle=True` was set, but the datapipe does not contain a `Shuffler`."):
dl = DataLoader2(datapipe=dp, datapipe_adapter_fn=Shuffle(True))
self.assertNotEqual(list(range(size)), list(dl))
dp = IterableWrapper(range(size)).shuffle()
dl = DataLoader2(datapipe=dp)
self.assertNotEqual(list(range(size)), list(dl))
dl = DataLoader2(dp, Shuffle(True))
self.assertNotEqual(list(range(size)), list(dl))
dl = DataLoader2(dp, [Shuffle(None)])
self.assertNotEqual(list(range(size)), list(dl))
dl = DataLoader2(dp, [Shuffle(False)])
self.assertEqual(list(range(size)), list(dl))
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch.multiprocessing as mp
from torch.testing._internal.common_utils import slowTest
from torch.utils.data import DataLoader
from torchtext.datasets import AG_NEWS, AmazonReviewPolarity, IMDB, SQuAD1, SQuAD2, SST2
# TODO(124): Replace the following tests with the corresponding tests in TorchText
class TestTextExamples(unittest.TestCase):
def _test_helper(self, fn):
dp = fn()
for stage_dp in dp:
_ = list(stage_dp)
@staticmethod
def _collate_fn(batch):
return batch
def _test_DL_helper(self, fn):
mp.set_sharing_strategy("file_system")
dp = fn()
for stage_dp in dp:
dl = DataLoader(
stage_dp,
batch_size=8,
num_workers=4,
collate_fn=TestTextExamples._collate_fn,
multiprocessing_context="spawn",
)
_ = list(dl)
def test_SST(self) -> None:
self._test_helper(SST2)
self._test_DL_helper(SST2)
def test_AG_NEWS(self) -> None:
self._test_helper(AG_NEWS)
self._test_DL_helper(AG_NEWS)
@slowTest
def test_AmazonReviewPolarity(self) -> None:
self._test_helper(AmazonReviewPolarity)
self._test_DL_helper(AmazonReviewPolarity)
@slowTest
def test_IMDB(self) -> None:
self._test_helper(IMDB)
self._test_DL_helper(IMDB)
def test_SQuAD1(self) -> None:
self._test_helper(SQuAD1)
self._test_DL_helper(SQuAD1)
def test_SQuAD2(self) -> None:
self._test_helper(SQuAD2)
self._test_DL_helper(SQuAD2)
if __name__ == "__main__":
unittest.main()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import bz2
import functools
import hashlib
import io
import itertools
import lzma
import os
import subprocess
import tarfile
import tempfile
import time
import unittest
import warnings
import zipfile
from functools import partial
from json.decoder import JSONDecodeError
import expecttest
from _utils._common_utils_for_test import create_temp_dir, create_temp_files, get_name, reset_after_n_next_calls
from torch.utils.data import DataLoader
from torchdata.dataloader2.adapter import CacheTimeout
from torchdata.datapipes.iter import (
Bz2FileLoader,
CSVDictParser,
CSVParser,
Decompressor,
FileLister,
FileOpener,
HashChecker,
IoPathFileLister,
IoPathFileOpener,
IoPathSaver,
IterableWrapper,
IterDataPipe,
JsonParser,
RarArchiveLoader,
Saver,
StreamReader,
TarArchiveLoader,
WebDataset,
XzFileLoader,
ZipArchiveLoader,
)
try:
import iopath
import torch
HAS_IOPATH = True
except ImportError:
HAS_IOPATH = False
skipIfNoIoPath = unittest.skipIf(not HAS_IOPATH, "no iopath")
try:
import rarfile
HAS_RAR_TOOLS = True
try:
rarfile.tool_setup()
subprocess.run(("rar", "-?"), check=True)
except (rarfile.RarCannotExec, subprocess.CalledProcessError):
HAS_RAR_TOOLS = False
except (ModuleNotFoundError, FileNotFoundError):
HAS_RAR_TOOLS = False
skipIfNoRarTools = unittest.skipIf(not HAS_RAR_TOOLS, "no rar tools")
try:
import portalocker
HAS_PORTALOCKER = True
except ImportError:
HAS_PORTALOCKER = False
skipIfNoPortalocker = unittest.skipIf(not HAS_PORTALOCKER, "No portalocker installed")
def filepath_fn(temp_dir_name, name: str) -> str:
return os.path.join(temp_dir_name, os.path.basename(name))
def _unbatch(x):
return x[0]
def _noop(x):
return x
class TestDataPipeLocalIO(expecttest.TestCase):
def setUp(self):
self.temp_dir = create_temp_dir()
self.temp_files = create_temp_files(self.temp_dir)
self.temp_sub_dir = create_temp_dir(self.temp_dir.name)
self.temp_sub_files = create_temp_files(self.temp_sub_dir, 4, False)
self.temp_dir_2 = create_temp_dir()
self.temp_files_2 = create_temp_files(self.temp_dir_2)
self.temp_sub_dir_2 = create_temp_dir(self.temp_dir_2.name)
self.temp_sub_files_2 = create_temp_files(self.temp_sub_dir_2, 4, False)
def tearDown(self):
try:
self.temp_sub_dir.cleanup()
self.temp_dir.cleanup()
self.temp_sub_dir_2.cleanup()
self.temp_dir_2.cleanup()
except Exception as e:
warnings.warn(f"TestDataPipeLocalIO was not able to cleanup temp dir due to {e}")
def _custom_files_set_up(self, files):
for fname, content in files.items():
temp_file_path = os.path.join(self.temp_dir.name, fname)
with open(temp_file_path, "w") as f:
f.write(content)
def _compressed_files_comparison_helper(self, expected_files, result, check_length: bool = True):
if check_length:
self.assertEqual(len(expected_files), len(result))
for res, expected_file in itertools.zip_longest(result, expected_files):
self.assertTrue(res is not None and expected_file is not None)
self.assertEqual(os.path.basename(res[0]), os.path.basename(expected_file))
with open(expected_file, "rb") as f:
self.assertEqual(res[1].read(), f.read())
res[1].close()
def _unordered_compressed_files_comparison_helper(self, expected_files, result, check_length: bool = True):
expected_names_to_files = {os.path.basename(f): f for f in expected_files}
if check_length:
self.assertEqual(len(expected_files), len(result))
for res in result:
fname = os.path.basename(res[0])
self.assertTrue(fname is not None)
self.assertTrue(fname in expected_names_to_files)
with open(expected_names_to_files[fname], "rb") as f:
self.assertEqual(res[1].read(), f.read())
res[1].close()
def test_csv_parser_iterdatapipe(self):
def make_path(fname):
return f"{self.temp_dir.name}/{fname}"
csv_files = {"1.csv": "key,item\na,1\nb,2", "empty.csv": "", "empty2.csv": "\n"}
self._custom_files_set_up(csv_files)
datapipe1 = IterableWrapper([make_path(fname) for fname in ["1.csv", "empty.csv", "empty2.csv"]])
datapipe2 = FileOpener(datapipe1, mode="b")
datapipe3 = datapipe2.map(get_name)
# Functional Test: yield one row at time from each file, skipping over empty content
csv_parser_dp = datapipe3.parse_csv()
expected_res = [["key", "item"], ["a", "1"], ["b", "2"], []]
self.assertEqual(expected_res, list(csv_parser_dp))
# Functional Test: yield one row at time from each file, skipping over empty content and header
csv_parser_dp = datapipe3.parse_csv(skip_lines=1)
expected_res = [["a", "1"], ["b", "2"]]
self.assertEqual(expected_res, list(csv_parser_dp))
# Functional Test: yield one row at time from each file with file name, skipping over empty content
csv_parser_dp = datapipe3.parse_csv(return_path=True)
expected_res = [("1.csv", ["key", "item"]), ("1.csv", ["a", "1"]), ("1.csv", ["b", "2"]), ("empty2.csv", [])]
self.assertEqual(expected_res, list(csv_parser_dp))
# Functional Test: yield one row at time from each file as tuple instead of list, skipping over empty content
csv_parser_dp = datapipe3.parse_csv(as_tuple=True)
expected_res = [("key", "item"), ("a", "1"), ("b", "2"), ()]
self.assertEqual(expected_res, list(csv_parser_dp))
# Reset Test:
csv_parser_dp = CSVParser(datapipe3, return_path=True)
n_elements_before_reset = 2
expected_res = [("1.csv", ["key", "item"]), ("1.csv", ["a", "1"]), ("1.csv", ["b", "2"]), ("empty2.csv", [])]
res_before_reset, res_after_reset = reset_after_n_next_calls(csv_parser_dp, n_elements_before_reset)
self.assertEqual(expected_res[:n_elements_before_reset], res_before_reset)
self.assertEqual(expected_res, res_after_reset)
# __len__ Test: length isn't implemented since it cannot be known ahead of time
with self.assertRaisesRegex(TypeError, "has no len"):
len(csv_parser_dp)
def test_csv_dict_parser_iterdatapipe(self):
def get_name(path_and_stream):
return os.path.basename(path_and_stream[0]), path_and_stream[1]
csv_files = {"1.csv": "key,item\na,1\nb,2", "empty.csv": "", "empty2.csv": "\n"}
self._custom_files_set_up(csv_files)
datapipe1 = FileLister(self.temp_dir.name, "*.csv")
datapipe2 = FileOpener(datapipe1, mode="b")
datapipe3 = datapipe2.map(get_name)
# Functional Test: yield one row at a time as dict, with the first row being the header (key)
csv_dict_parser_dp = datapipe3.parse_csv_as_dict()
expected_res1 = [{"key": "a", "item": "1"}, {"key": "b", "item": "2"}]
self.assertEqual(expected_res1, list(csv_dict_parser_dp))
# Functional Test: yield one row at a time as dict, skip over first row, with the second row being the header
csv_dict_parser_dp = datapipe3.parse_csv_as_dict(skip_lines=1)
expected_res2 = [{"a": "b", "1": "2"}]
self.assertEqual(expected_res2, list(csv_dict_parser_dp))
# Functional Test: yield one row at a time as dict with file name, and the first row being the header (key)
csv_dict_parser_dp = datapipe3.parse_csv_as_dict(return_path=True)
expected_res3 = [("1.csv", {"key": "a", "item": "1"}), ("1.csv", {"key": "b", "item": "2"})]
self.assertEqual(expected_res3, list(csv_dict_parser_dp))
# Reset Test
csv_dict_parser_dp = CSVDictParser(datapipe3)
expected_res4 = [{"key": "a", "item": "1"}, {"key": "b", "item": "2"}]
n_elements_before_reset = 1
res_before_reset, res_after_reset = reset_after_n_next_calls(csv_dict_parser_dp, n_elements_before_reset)
self.assertEqual(expected_res4[:n_elements_before_reset], res_before_reset)
self.assertEqual(expected_res4, res_after_reset)
# __len__ Test: length isn't implemented since it cannot be known ahead of time
with self.assertRaisesRegex(TypeError, "has no len"):
len(csv_dict_parser_dp)
def test_hash_checker_iterdatapipe(self):
hash_dict = {}
def fill_hash_dict():
for path in self.temp_files:
with open(path) as f:
hash_func = hashlib.sha256()
content = f.read().encode("utf-8")
hash_func.update(content)
hash_dict[path] = hash_func.hexdigest()
fill_hash_dict()
datapipe1 = FileLister(self.temp_dir.name, "*")
datapipe2 = FileOpener(datapipe1, mode="b")
hash_check_dp = HashChecker(datapipe2, hash_dict)
expected_res = list(datapipe2)
# Functional Test: Ensure the DataPipe values are unchanged if the hashes are the same
for (expected_path, expected_stream), (actual_path, actual_stream) in zip(expected_res, hash_check_dp):
self.assertEqual(expected_path, actual_path)
self.assertEqual(expected_stream.read(), actual_stream.read())
# Functional Test: Ensure the rewind option works, and the stream is empty when there is no rewind
hash_check_dp_no_reset = HashChecker(datapipe2, hash_dict, rewind=False)
for (expected_path, _), (actual_path, actual_stream) in zip(expected_res, hash_check_dp_no_reset):
self.assertEqual(expected_path, actual_path)
self.assertEqual(b"", actual_stream.read())
# Functional Test: Error when file/path is not in hash_dict
hash_check_dp = HashChecker(datapipe2, {})
it = iter(hash_check_dp)
with self.assertRaisesRegex(RuntimeError, "Unspecified hash for file"):
next(it)
# Functional Test: Error when the hash is different
hash_dict[self.temp_files[0]] = "WRONG HASH"
hash_check_dp = HashChecker(datapipe2, hash_dict)
with self.assertRaisesRegex(RuntimeError, "does not match"):
list(hash_check_dp)
# Reset Test:
fill_hash_dict() # Reset the dict with correct values because we changed it in the last test case
hash_check_dp = datapipe2.check_hash(hash_dict)
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(hash_check_dp, n_elements_before_reset)
for (expected_path, expected_stream), (actual_path, actual_stream) in zip(datapipe2, res_before_reset):
self.assertEqual(expected_path, actual_path)
self.assertEqual(expected_stream.read(), actual_stream.read())
for (expected_path, expected_stream), (actual_path, actual_stream) in zip(datapipe2, res_after_reset):
self.assertEqual(expected_path, actual_path)
self.assertEqual(expected_stream.read(), actual_stream.read())
# __len__ Test: returns the length of source DataPipe
with self.assertRaisesRegex(TypeError, "FileOpenerIterDataPipe instance doesn't have valid length"):
len(hash_check_dp)
def test_json_parser_iterdatapipe(self):
def is_empty_json(path_and_stream):
return path_and_stream[0] == "empty.json"
def is_nonempty_json(path_and_stream):
return path_and_stream[0] != "empty.json"
json_files = {
"1.json": '["foo", {"bar":["baz", null, 1.0, 2]}]',
"empty.json": "",
"2.json": '{"__complex__": true, "real": 1, "imag": 2}',
}
self._custom_files_set_up(json_files)
datapipe1 = IterableWrapper([f"{self.temp_dir.name}/{fname}" for fname in ["empty.json", "1.json", "2.json"]])
datapipe2 = FileOpener(datapipe1, mode="b")
datapipe3 = datapipe2.map(get_name)
datapipe_empty = datapipe3.filter(is_empty_json)
datapipe_nonempty = datapipe3.filter(is_nonempty_json)
empty_json_dp = datapipe_empty.parse_json_files()
it = iter(empty_json_dp)
# Functional Test: dp fails when empty JSON file is given
with self.assertRaisesRegex(JSONDecodeError, "Expecting value"):
next(it)
# Functional Test: dp yields one json file at a time
json_dp = datapipe_nonempty.parse_json_files()
expected_res = [
("1.json", ["foo", {"bar": ["baz", None, 1.0, 2]}]),
("2.json", {"__complex__": True, "real": 1, "imag": 2}),
]
self.assertEqual(expected_res, list(json_dp))
# Reset Test:
json_dp = JsonParser(datapipe_nonempty)
n_elements_before_reset = 1
res_before_reset, res_after_reset = reset_after_n_next_calls(json_dp, n_elements_before_reset)
self.assertEqual(expected_res[:n_elements_before_reset], res_before_reset)
self.assertEqual(expected_res, res_after_reset)
# __len__ Test: length isn't implemented since it cannot be known ahead of time
with self.assertRaisesRegex(TypeError, "len"):
len(json_dp)
# kwargs Test:
json_dp = JsonParser(datapipe_nonempty, parse_int=str)
expected_res = [
("1.json", ["foo", {"bar": ["baz", None, 1.0, "2"]}]),
("2.json", {"__complex__": True, "real": "1", "imag": "2"}),
]
self.assertEqual(expected_res, list(json_dp))
def test_saver_iterdatapipe(self):
# Functional Test: Saving some data
name_to_data = {"1.txt": b"DATA1", "2.txt": b"DATA2", "3.txt": b"DATA3"}
source_dp = IterableWrapper(sorted(name_to_data.items()))
saver_dp = source_dp.save_to_disk(filepath_fn=partial(filepath_fn, self.temp_dir.name), mode="wb")
res_file_paths = list(saver_dp)
expected_paths = [filepath_fn(self.temp_dir.name, name) for name in name_to_data.keys()]
self.assertEqual(expected_paths, res_file_paths)
for name in name_to_data.keys():
p = filepath_fn(self.temp_dir.name, name)
with open(p) as f:
self.assertEqual(name_to_data[name], f.read().encode())
# Reset Test:
saver_dp = Saver(source_dp, filepath_fn=partial(filepath_fn, self.temp_dir.name), mode="wb")
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(saver_dp, n_elements_before_reset)
self.assertEqual(
[filepath_fn(self.temp_dir.name, "1.txt"), filepath_fn(self.temp_dir.name, "2.txt")], res_before_reset
)
self.assertEqual(expected_paths, res_after_reset)
for name in name_to_data.keys():
p = filepath_fn(self.temp_dir.name, name)
with open(p) as f:
self.assertEqual(name_to_data[name], f.read().encode())
# __len__ Test: returns the length of source DataPipe
self.assertEqual(3, len(saver_dp))
def _write_test_tar_files(self):
path = os.path.join(self.temp_dir.name, "test_tar.tar")
with tarfile.open(path, "w:tar") as tar:
tar.add(self.temp_files[0])
tar.add(self.temp_files[1])
tar.add(self.temp_files[2])
def _write_test_tar_gz_files(self):
path = os.path.join(self.temp_dir.name, "test_gz.tar.gz")
with tarfile.open(path, "w:gz") as tar:
tar.add(self.temp_files[0])
tar.add(self.temp_files[1])
tar.add(self.temp_files[2])
def test_tar_archive_reader_iterdatapipe(self):
self._write_test_tar_files()
datapipe1 = FileLister(self.temp_dir.name, "*.tar")
datapipe2 = FileOpener(datapipe1, mode="b")
tar_loader_dp = TarArchiveLoader(datapipe2)
self._write_test_tar_gz_files()
datapipe_gz_1 = FileLister(self.temp_dir.name, "*.tar.gz")
datapipe_gz_2 = FileOpener(datapipe_gz_1, mode="b")
gz_reader_dp = TarArchiveLoader(datapipe_gz_2)
# Functional Test: Read extracted files before reaching the end of the tarfile
self._compressed_files_comparison_helper(self.temp_files, tar_loader_dp, check_length=False)
self._compressed_files_comparison_helper(self.temp_files, gz_reader_dp, check_length=False)
# Load from decompressed file stream
decomp_dp = datapipe_gz_2.decompress()
decomp_reader_dp = TarArchiveLoader(decomp_dp)
self._compressed_files_comparison_helper(self.temp_files, decomp_reader_dp, check_length=False)
# Functional Test: Read extracted files after reaching the end of the tarfile
data_refs = list(tar_loader_dp)
self._compressed_files_comparison_helper(self.temp_files, data_refs)
data_refs_gz = list(gz_reader_dp)
self._compressed_files_comparison_helper(self.temp_files, data_refs_gz)
# Reset Test: reset the DataPipe after reading part of it
tar_loader_dp = datapipe2.load_from_tar()
n_elements_before_reset = 1
res_before_reset, res_after_reset = reset_after_n_next_calls(tar_loader_dp, n_elements_before_reset)
# Check result accumulated before reset
self._compressed_files_comparison_helper(self.temp_files[:n_elements_before_reset], res_before_reset)
# Check result accumulated after reset
self._compressed_files_comparison_helper(self.temp_files, res_after_reset)
# __len__ Test: doesn't have valid length
with self.assertRaisesRegex(TypeError, "instance doesn't have valid length"):
len(tar_loader_dp)
def _write_test_zip_files(self):
path = os.path.join(self.temp_dir.name, "test_zip.zip")
with zipfile.ZipFile(path, "w") as myzip:
myzip.write(self.temp_files[0], arcname=os.path.basename(self.temp_files[0]))
myzip.write(self.temp_files[1], arcname=os.path.basename(self.temp_files[1]))
myzip.write(self.temp_files[2], arcname=os.path.basename(self.temp_files[2]))
def test_zip_archive_reader_iterdatapipe(self):
self._write_test_zip_files()
datapipe1 = FileLister(self.temp_dir.name, "*.zip")
datapipe2 = FileOpener(datapipe1, mode="b")
zip_loader_dp = ZipArchiveLoader(datapipe2)
# Functional Test: read extracted files before reaching the end of the zipfile
self._compressed_files_comparison_helper(self.temp_files, zip_loader_dp, check_length=False)
# Functional Test: read extracted files after reaching the end of the zipile
data_refs = list(zip_loader_dp)
self._compressed_files_comparison_helper(self.temp_files, data_refs)
# Reset Test: reset the DataPipe after reading part of it
zip_loader_dp = datapipe2.load_from_zip()
n_elements_before_reset = 1
res_before_reset, res_after_reset = reset_after_n_next_calls(zip_loader_dp, n_elements_before_reset)
# Check the results accumulated before reset
self._compressed_files_comparison_helper(self.temp_files[:n_elements_before_reset], res_before_reset)
# Check the results accumulated after reset
self._compressed_files_comparison_helper(self.temp_files, res_after_reset)
# __len__ Test: doesn't have valid length
with self.assertRaisesRegex(TypeError, "instance doesn't have valid length"):
len(zip_loader_dp)
def _write_test_xz_files(self):
for path in self.temp_files:
fname = os.path.basename(path)
temp_xzfile_pathname = os.path.join(self.temp_dir.name, f"{fname}.xz")
with open(path) as f:
with lzma.open(temp_xzfile_pathname, "w") as xz:
xz.write(f.read().encode("utf-8"))
def test_xz_archive_reader_iterdatapipe(self):
# Worth noting that the .tar and .zip tests write multiple files into the same compressed file
# Whereas we create multiple .xz files in the same directories below.
self._write_test_xz_files()
datapipe1 = FileLister(self.temp_dir.name, "*.xz")
datapipe2 = FileOpener(datapipe1, mode="b")
xz_loader_dp = XzFileLoader(datapipe2)
# Functional Test: Read extracted files before reaching the end of the xzfile
self._unordered_compressed_files_comparison_helper(self.temp_files, xz_loader_dp, check_length=False)
# Functional Test: Read extracted files after reaching the end of the xzfile
data_refs = list(xz_loader_dp)
self._unordered_compressed_files_comparison_helper(self.temp_files, data_refs)
# Reset Test: reset the DataPipe after reading part of it
xz_loader_dp = datapipe2.load_from_xz()
n_elements_before_reset = 1
res_before_reset, res_after_reset = reset_after_n_next_calls(xz_loader_dp, n_elements_before_reset)
# Check result accumulated before reset
self.assertEqual(n_elements_before_reset, len(res_before_reset))
self._unordered_compressed_files_comparison_helper(self.temp_files, res_before_reset, check_length=False)
# Check result accumulated after reset
self._unordered_compressed_files_comparison_helper(self.temp_files, res_after_reset)
# Reset Test: Ensure the order is consistent between iterations
for r1, r2 in zip(list(xz_loader_dp), list(xz_loader_dp)):
self.assertEqual(r1[0], r2[0])
# __len__ Test: doesn't have valid length
with self.assertRaisesRegex(TypeError, "instance doesn't have valid length"):
len(xz_loader_dp)
def _write_test_bz2_files(self):
for path in self.temp_files:
fname = os.path.basename(path)
temp_bz2file_pathname = os.path.join(self.temp_dir.name, f"{fname}.bz2")
with open(path) as f:
with bz2.open(temp_bz2file_pathname, "w") as f_bz2:
f_bz2.write(f.read().encode("utf-8"))
def test_bz2_archive_reader_iterdatapipe(self):
self._write_test_bz2_files()
filelist_dp = FileLister(self.temp_dir.name, "*.bz2")
fileopen_dp = FileOpener(filelist_dp, mode="b")
bz2_loader_dp = Bz2FileLoader(fileopen_dp)
# Functional Test: Read extracted files before reaching the end of the bz2file
self._unordered_compressed_files_comparison_helper(self.temp_files, bz2_loader_dp, check_length=False)
# Functional Test: Read extracted files after reaching the end of the bz2file
data_refs = list(bz2_loader_dp)
self._unordered_compressed_files_comparison_helper(self.temp_files, data_refs)
# Reset Test: reset the DataPipe after reading part of it
bz2_loader_dp = fileopen_dp.load_from_bz2()
n_elements_before_reset = 1
res_before_reset, res_after_reset = reset_after_n_next_calls(bz2_loader_dp, n_elements_before_reset)
# Check result accumulated before reset
self.assertEqual(n_elements_before_reset, len(res_before_reset))
self._unordered_compressed_files_comparison_helper(self.temp_files, res_before_reset, check_length=False)
# Check result accumulated after reset
self._unordered_compressed_files_comparison_helper(self.temp_files, res_after_reset)
# Reset Test: Ensure the order is consistent between iterations
for r1, r2 in zip(list(bz2_loader_dp), list(bz2_loader_dp)):
self.assertEqual(r1[0], r2[0])
# __len__ Test: doesn't have valid length
with self.assertRaisesRegex(TypeError, "instance doesn't have valid length"):
len(bz2_loader_dp)
def _decompressor_tar_test_helper(self, expected_files, tar_decompress_dp):
for _file, child_obj in tar_decompress_dp:
for expected_file, tarinfo in zip(expected_files, child_obj):
if not tarinfo.isfile():
continue
extracted_fobj = child_obj.extractfile(tarinfo)
with open(expected_file, "rb") as f:
self.assertEqual(f.read(), extracted_fobj.read())
def _decompressor_xz_test_helper(self, xz_decompress_dp):
for xz_file_name, xz_stream in xz_decompress_dp:
expected_file = xz_file_name[:-3]
with open(expected_file, "rb") as f:
self.assertEqual(f.read(), xz_stream.read())
def _decompressor_bz2_test_helper(self, bz2_decompress_dp):
for bz2_file_name, bz2_stream in bz2_decompress_dp:
expected_file = bz2_file_name.rsplit(".", 1)[0]
with open(expected_file, "rb") as f:
self.assertEqual(f.read(), bz2_stream.read())
def _write_single_gz_file(self):
import gzip
with gzip.open(f"{self.temp_dir.name}/temp.gz", "wb") as k:
with open(self.temp_files[0], "rb") as f:
k.write(f.read())
def test_decompressor_iterdatapipe(self):
self._write_test_tar_files()
self._write_test_tar_gz_files()
self._write_single_gz_file()
self._write_test_zip_files()
self._write_test_xz_files()
self._write_test_bz2_files()
# Functional Test: work with .tar files
tar_file_dp = FileLister(self.temp_dir.name, "*.tar")
tar_load_dp = FileOpener(tar_file_dp, mode="b")
tar_decompress_dp = Decompressor(tar_load_dp, file_type="tar")
self._decompressor_tar_test_helper(self.temp_files, tar_decompress_dp)
# Functional test: work with .tar.gz files
tar_gz_file_dp = FileLister(self.temp_dir.name, "*.tar.gz")
tar_gz_load_dp = FileOpener(tar_gz_file_dp, mode="b")
tar_gz_decompress_dp = Decompressor(tar_gz_load_dp, file_type="tar")
self._decompressor_tar_test_helper(self.temp_files, tar_gz_decompress_dp)
# Functional Test: work with .gz files
gz_file_dp = IterableWrapper([f"{self.temp_dir.name}/temp.gz"])
gz_load_dp = FileOpener(gz_file_dp, mode="b")
gz_decompress_dp = Decompressor(gz_load_dp, file_type="gzip")
for _, gz_stream in gz_decompress_dp:
with open(self.temp_files[0], "rb") as f:
self.assertEqual(f.read(), gz_stream.read())
# Functional Test: work with .zip files
zip_file_dp = FileLister(self.temp_dir.name, "*.zip")
zip_load_dp = FileOpener(zip_file_dp, mode="b")
zip_decompress_dp = zip_load_dp.decompress(file_type="zip")
for _, zip_stream in zip_decompress_dp:
for fname in self.temp_files:
with open(fname, "rb") as f:
self.assertEqual(f.read(), zip_stream.read(name=os.path.basename(fname)))
# Functional Test: work with .xz files
xz_file_dp = FileLister(self.temp_dir.name, "*.xz")
xz_load_dp = FileOpener(xz_file_dp, mode="b")
xz_decompress_dp = Decompressor(xz_load_dp, file_type="lzma")
self._decompressor_xz_test_helper(xz_decompress_dp)
# Functional Test: work with .bz2 files
bz2_file_dp = FileLister(self.temp_dir.name, "*.bz2")
bz2_load_dp = FileOpener(bz2_file_dp, mode="b")
bz2_decompress_dp = Decompressor(bz2_load_dp, file_type="bz2")
self._decompressor_bz2_test_helper(bz2_decompress_dp)
# Functional Test: work without file type as input for .tar files
tar_decompress_dp = Decompressor(tar_load_dp, file_type=None)
self._decompressor_tar_test_helper(self.temp_files, tar_decompress_dp)
# Functional Test: work without file type as input for .xz files
xz_decompress_dp = Decompressor(xz_load_dp)
self._decompressor_xz_test_helper(xz_decompress_dp)
# Functional Test: work without file type as input for .tar.gz files
tar_gz_decompress_dp = Decompressor(tar_gz_load_dp, file_type=None)
self._decompressor_tar_test_helper(self.temp_files, tar_gz_decompress_dp)
# Functional Test: work without file type as input for .bz2 files
bz2_decompress_dp = Decompressor(bz2_load_dp, file_type=None)
self._decompressor_bz2_test_helper(bz2_decompress_dp)
# Functional Test: Compression Type is works for both upper and lower case strings
tar_decompress_dp = Decompressor(tar_load_dp, file_type="TAr")
self._decompressor_tar_test_helper(self.temp_files, tar_decompress_dp)
# Functional Test: Compression Type throws error for invalid file type
with self.assertRaisesRegex(ValueError, "not a valid CompressionType"):
Decompressor(tar_load_dp, file_type="ABC")
# Reset Test: Ensure the order is consistent between iterations
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(xz_decompress_dp, n_elements_before_reset)
self._decompressor_xz_test_helper(res_before_reset)
self._decompressor_xz_test_helper(res_after_reset)
# __len__ Test: doesn't have valid length
with self.assertRaisesRegex(TypeError, "has no len"):
len(tar_decompress_dp)
def _write_text_files(self):
name_to_data = {"1.text": b"DATA", "2.text": b"DATA", "3.text": b"DATA"}
source_dp = IterableWrapper(sorted(name_to_data.items()))
saver_dp = source_dp.save_to_disk(filepath_fn=partial(filepath_fn, self.temp_dir.name), mode="wb")
list(saver_dp)
@staticmethod
def _slow_fn(tmpdirname, x):
with open(os.path.join(tmpdirname, str(os.getpid())), "w") as pid_fh:
pid_fh.write("anything")
time.sleep(10)
return (x, "str")
@skipIfNoPortalocker
def test_disk_cache_locks(self):
with tempfile.TemporaryDirectory() as tmpdirname:
file_name = os.path.join(tmpdirname, "test.bin")
dp = IterableWrapper([file_name])
dp = dp.on_disk_cache(filepath_fn=_noop)
dp = dp.map(functools.partial(self._slow_fn, tmpdirname))
dp = dp.end_caching(mode="t", filepath_fn=_noop, timeout=120)
dp = FileOpener(dp)
dp = StreamReader(dp)
dl = DataLoader(dp, num_workers=10, multiprocessing_context="spawn", batch_size=1, collate_fn=_unbatch)
result = list(dl)
all_files = []
for (_, _, filenames) in os.walk(tmpdirname):
all_files += filenames
# We expect only two files, one with pid and 'downloaded' one
self.assertEqual(2, len(all_files))
self.assertEqual("str", result[0][1])
# cleanup cached files
for f in os.listdir(tmpdirname):
os.remove(os.path.join(tmpdirname, f))
dp = CacheTimeout(2)(dp) # Calling adapter manually to work with classic DataLoader
dl = DataLoader(dp, num_workers=10, multiprocessing_context="spawn", batch_size=1, collate_fn=_unbatch)
with self.assertRaisesRegex(Exception, "OnDiskCache Exception"):
result = list(dl)
# TODO(120): this test currently only covers reading from local
# filesystem. It needs to be modified once test data can be stored on
# gdrive/onedrive
@skipIfNoIoPath
def test_io_path_file_lister_iterdatapipe(self):
datapipe = IoPathFileLister(root=self.temp_sub_dir.name)
# check all file paths within sub_folder are listed
for path in datapipe:
self.assertTrue(path in self.temp_sub_files)
datapipe = IterableWrapper([self.temp_sub_dir.name])
datapipe = datapipe.list_files_by_iopath()
for path in datapipe:
self.assertTrue(path in self.temp_sub_files)
@skipIfNoIoPath
def test_io_path_file_lister_iterdatapipe_with_list(self):
datapipe = IoPathFileLister(root=[self.temp_sub_dir.name, self.temp_sub_dir_2.name])
file_lister = list(datapipe)
file_lister.sort()
all_temp_files = list(self.temp_sub_files + self.temp_sub_files_2)
all_temp_files.sort()
# check all file paths within sub_folder are listed
self.assertEqual(file_lister, all_temp_files)
datapipe = IterableWrapper([self.temp_sub_dir.name, self.temp_sub_dir_2.name])
datapipe = datapipe.list_files_by_iopath()
results = list(datapipe)
results.sort()
self.assertEqual(results, all_temp_files)
@skipIfNoIoPath
def test_io_path_file_loader_iterdatapipe(self):
datapipe1 = IoPathFileLister(root=self.temp_sub_dir.name)
datapipe2 = IoPathFileOpener(datapipe1)
# check contents of file match
for _, f in datapipe2:
self.assertEqual(f.read(), "0123456789abcdef")
# Reset Test: Ensure the resulting streams are still readable after the DataPipe is reset/exhausted
self._write_text_files()
lister_dp = FileLister(self.temp_dir.name, "*.text")
iopath_file_opener_dp = lister_dp.open_files_by_iopath(mode="rb")
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(iopath_file_opener_dp, n_elements_before_reset)
self.assertEqual(2, len(res_before_reset))
self.assertEqual(3, len(res_after_reset))
for _name, stream in res_before_reset:
self.assertEqual(b"DATA", stream.read())
for _name, stream in res_after_reset:
self.assertEqual(b"DATA", stream.read())
@skipIfNoIoPath
def test_io_path_saver_iterdatapipe(self):
# Functional Test: Saving some data
name_to_data = {"1.txt": b"DATA1", "2.txt": b"DATA2", "3.txt": b"DATA3"}
source_dp = IterableWrapper(sorted(name_to_data.items()))
saver_dp = source_dp.save_by_iopath(filepath_fn=partial(filepath_fn, self.temp_dir.name), mode="wb")
res_file_paths = list(saver_dp)
expected_paths = [filepath_fn(self.temp_dir.name, name) for name in name_to_data.keys()]
self.assertEqual(expected_paths, res_file_paths)
for name in name_to_data.keys():
p = filepath_fn(self.temp_dir.name, name)
with open(p) as f:
self.assertEqual(name_to_data[name], f.read().encode())
# Reset Test:
saver_dp = IoPathSaver(source_dp, filepath_fn=partial(filepath_fn, self.temp_dir.name), mode="wb")
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(saver_dp, n_elements_before_reset)
self.assertEqual(
[filepath_fn(self.temp_dir.name, "1.txt"), filepath_fn(self.temp_dir.name, "2.txt")], res_before_reset
)
self.assertEqual(expected_paths, res_after_reset)
for name in name_to_data.keys():
p = filepath_fn(self.temp_dir.name, name)
with open(p) as f:
self.assertEqual(name_to_data[name], f.read().encode())
# __len__ Test: returns the length of source DataPipe
self.assertEqual(3, len(saver_dp))
@skipIfNoIoPath
def test_io_path_saver_file_lock(self):
# Same filename with different name
name_to_data = {"1.txt": b"DATA1", "1.txt": b"DATA2", "2.txt": b"DATA3", "2.txt": b"DATA4"} # noqa: F601
# Add sharding_filter to shard data into 2
source_dp = IterableWrapper(list(name_to_data.items())).sharding_filter()
# Use appending as the mode
saver_dp = source_dp.save_by_iopath(filepath_fn=partial(filepath_fn, self.temp_dir.name), mode="ab")
import torch.utils.data.graph_settings
from torch.utils.data import DataLoader
num_workers = 2
line_lengths = []
dl = DataLoader(saver_dp, num_workers=num_workers, multiprocessing_context="spawn")
for filename in dl:
with open(filename[0]) as f:
lines = f.readlines()
x = len(lines)
line_lengths.append(x)
self.assertEqual(x, 1)
self.assertEqual(num_workers, len(line_lengths))
def _write_test_rar_files(self):
# `rarfile` can only read but not write .rar archives so we use to system utilities
rar_archive_name = os.path.join(self.temp_dir.name, "test_rar")
subprocess.run(("rar", "a", rar_archive_name + ".rar", *self.temp_files), check=True)
# Nested RAR
subprocess.run(("rar", "a", rar_archive_name + "1.rar", self.temp_files[0]), check=True)
subprocess.run(("rar", "a", rar_archive_name + "2.rar", *self.temp_files[1:]), check=True)
subprocess.run(
("rar", "a", rar_archive_name + "_nested.rar", rar_archive_name + "1.rar", rar_archive_name + "2.rar"),
check=True,
)
# Nested RAR in TAR
with tarfile.open(rar_archive_name + "_nested.tar", "w:tar") as tar:
tar.add(rar_archive_name + "1.rar")
tar.add(rar_archive_name + "2.rar")
@skipIfNoRarTools
def test_rar_archive_loader(self):
self._write_test_rar_files()
datapipe1 = IterableWrapper([os.path.join(self.temp_dir.name, "test_rar.rar")])
datapipe2 = FileOpener(datapipe1, mode="b")
rar_loader_dp = RarArchiveLoader(datapipe2)
# Functional Test: read extracted files before reaching the end of the rarfile
self._unordered_compressed_files_comparison_helper(self.temp_files, rar_loader_dp, check_length=False)
# Functional Test: read extracted files after reaching the end of the rarfile
data_refs = list(rar_loader_dp)
self._unordered_compressed_files_comparison_helper(self.temp_files, data_refs)
# Reset Test: reset the DataPipe after reading part of it
rar_loader_dp = datapipe2.load_from_rar()
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(rar_loader_dp, n_elements_before_reset)
# Check the results accumulated before reset
self._unordered_compressed_files_comparison_helper(self.temp_files[:n_elements_before_reset], res_before_reset)
# Check the results accumulated after reset
self._unordered_compressed_files_comparison_helper(self.temp_files, res_after_reset)
# __len__ Test: doesn't have valid length
with self.assertRaisesRegex(TypeError, "instance doesn't have valid length"):
len(rar_loader_dp)
# Nested RAR
datapipe1 = IterableWrapper([os.path.join(self.temp_dir.name, "test_rar_nested.rar")])
datapipe2 = FileOpener(datapipe1, mode="b")
rar_loader_dp_1 = RarArchiveLoader(datapipe2)
rar_loader_dp_2 = RarArchiveLoader(rar_loader_dp_1)
with self.assertRaisesRegex(ValueError, "Nested RAR archive is not supported"):
list(rar_loader_dp_2)
# Nested RAR in TAR
datapipe1 = IterableWrapper([os.path.join(self.temp_dir.name, "test_rar_nested.tar")])
datapipe2 = FileOpener(datapipe1, mode="b")
tar_loader_dp = TarArchiveLoader(datapipe2)
rar_loader_dp = RarArchiveLoader(tar_loader_dp)
# Functional Test: read extracted files before reaching the end of the rarfile
self._unordered_compressed_files_comparison_helper(self.temp_files, rar_loader_dp, check_length=False)
# Functional Test: read extracted files after reaching the end of the rarfile
data_refs = list(rar_loader_dp)
self._unordered_compressed_files_comparison_helper(self.temp_files, data_refs)
def _add_data_to_wds_tar(self, archive, name, value):
if isinstance(value, str):
value = value.encode()
info = tarfile.TarInfo(name)
info.size = len(value)
archive.addfile(info, io.BytesIO(value))
def _create_wds_tar(self, dest, nsamples):
with tarfile.open(dest, mode="w") as archive:
for i in range(nsamples):
self._add_data_to_wds_tar(archive, f"data/{i}.txt", f"text{i}")
self._add_data_to_wds_tar(archive, f"data/{i}.bin", f"bin{i}")
def test_webdataset(self) -> None:
# Functional Test: groups samples correctly
source_dp = IterableWrapper(
# simulated tar file content
[
("/path/to/file1.jpg", b"1"),
("/path/to/_something_", b"nothing"),
("/path/to/file1.cls", b"2"),
("/path/to/file2.jpg", b"3"),
("/path/to/file2.cls", b"4"),
]
)
web_dataset = WebDataset(source_dp)
self.assertEqual(
# expected grouped output
[
{".jpg": b"1", ".cls": b"2", "__key__": "/path/to/file1"},
{".jpg": b"3", ".cls": b"4", "__key__": "/path/to/file2"},
],
list(web_dataset),
)
def test_webdataset2(self) -> None:
# Setup
nsamples = 10
self._create_wds_tar(os.path.join(self.temp_dir.name, "wds.tar"), nsamples)
def decode(item):
key, value = item
if key.endswith(".txt"):
return key, value.read().decode("utf-8")
if key.endswith(".bin"):
return key, value.read().decode("utf-8")
datapipe1 = FileLister(self.temp_dir.name, "wds*.tar")
datapipe2 = FileOpener(datapipe1, mode="b")
dataset = datapipe2.load_from_tar().map(decode).webdataset()
items = list(dataset)
assert len(items) == nsamples
assert items[0][".txt"] == "text0"
assert items[9][".bin"] == "bin9"
if __name__ == "__main__":
unittest.main()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import tempfile
import unittest
import torch.multiprocessing as mp
from torch.testing._internal.common_utils import slowTest
from torch.utils.data import DataLoader
current = os.path.dirname(os.path.abspath(__file__))
ROOT = os.path.dirname(current)
sys.path.insert(0, ROOT)
from examples.audio.librispeech import LibriSpeech
class TestAudioExamples(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def _test_helper(self, fn, *args, **kwargs):
dp = fn(*args, **kwargs)
_ = list(dp)
@staticmethod
def _collate_fn(batch):
return batch
def _test_DL_helper(self, fn, *args, **kwargs):
dp = fn(*args, **kwargs)
mp.set_sharing_strategy("file_system")
dl = DataLoader(
dp,
batch_size=8,
num_workers=4,
collate_fn=TestAudioExamples._collate_fn,
multiprocessing_context="fork", # Using Fork her because `torchaudio.load` doesn't work well with spawn
)
for _ in dl:
pass
@slowTest
def test_LibriSpeech_dev(self) -> None:
root = self.temp_dir.name
self._test_helper(LibriSpeech, root, "dev-other")
# With cache and DataLoader
self._test_DL_helper(LibriSpeech, root, "dev-other")
@unittest.skipIf(True, "Dataset is too large to run on CI")
def test_LibriSpeech_train(self) -> None:
root = self.temp_dir.name
self._test_helper(LibriSpeech, root, "train-clean-100")
# With cache and DataLoader
self._test_DL_helper(LibriSpeech, root, "train-clean-100")
if __name__ == "__main__":
unittest.main()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import expecttest
from torchdata.datapipes.iter import MapToIterConverter
from torchdata.datapipes.map import InMemoryCacheHolder, MapDataPipe, SequenceWrapper, UnZipper
class TestMapDataPipe(expecttest.TestCase):
def test_unzipper_mapdatapipe(self) -> None:
source_dp = SequenceWrapper([(i, i + 10, i + 20) for i in range(10)])
# Functional Test: unzips each sequence, with `sequence_length` specified
dp1: MapDataPipe
dp2: MapDataPipe
dp3: MapDataPipe
dp1, dp2, dp3 = UnZipper(source_dp, sequence_length=3) # type: ignore[misc]
self.assertEqual(list(range(10)), list(dp1))
self.assertEqual(list(range(10, 20)), list(dp2))
self.assertEqual(list(range(20, 30)), list(dp3))
# Functional Test: skipping over specified values
dp2, dp3 = source_dp.unzip(sequence_length=3, columns_to_skip=[0])
self.assertEqual(list(range(10, 20)), list(dp2))
self.assertEqual(list(range(20, 30)), list(dp3))
(dp2,) = source_dp.unzip(sequence_length=3, columns_to_skip=[0, 2])
self.assertEqual(list(range(10, 20)), list(dp2))
source_dp = SequenceWrapper([(i, i + 10, i + 20, i + 30) for i in range(10)])
dp2, dp3 = source_dp.unzip(sequence_length=4, columns_to_skip=[0, 3])
self.assertEqual(list(range(10, 20)), list(dp2))
self.assertEqual(list(range(20, 30)), list(dp3))
# __len__ Test: the lengths of child DataPipes are correct
self.assertEqual((10, 10), (len(dp2), len(dp3)))
def test_map_to_iter_converter_datapipe(self) -> None:
# Functional Test: ensure the conversion without indices input is correct
source_dp = SequenceWrapper(range(10))
iter_dp = source_dp.to_iter_datapipe()
self.assertEqual(list(range(10)), list(iter_dp))
# Functional Test: ensure conversion with custom indices is correct
source_dp2 = SequenceWrapper({"a": 0, "b": 1, "c": 2})
iter_dp2 = MapToIterConverter(source_dp2, indices=["a", "b", "c"])
self.assertEqual([0, 1, 2], list(iter_dp2))
# __len__ Test: the lengths of the output is correct
self.assertEqual(10, len(iter_dp))
self.assertEqual(3, len(iter_dp2))
def test_in_memory_cache_holder_mapdatapipe(self) -> None:
source_dp = SequenceWrapper(range(10))
cache_dp = source_dp.in_memory_cache()
# Functional Test: Cache DP should just return the data without changing the values
self.assertEqual(list(range(10)), list(cache_dp))
# Functional Test: Ensure the objects are the same ones from source DataPipe
cache_dp = InMemoryCacheHolder(source_dp) # type: ignore[arg-type]
res1 = list(cache_dp)
res2 = list(cache_dp)
self.assertTrue(id(source) == id(cache) for source, cache in zip(source_dp, res1))
self.assertTrue(id(source) == id(cache) for source, cache in zip(source_dp, res2))
# __len__ Test: inherits length from source_dp
self.assertEqual(10, len(cache_dp))
if __name__ == "__main__":
unittest.main()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import queue
import random
import socket
import sys
import unittest
from functools import partial
from unittest import TestCase
import numpy as np
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.testing._internal.common_utils import instantiate_parametrized_tests, parametrize
from torch.utils.data import DataLoader
from torchdata.dataloader2 import DataLoader2, DistributedReadingService
from torchdata.datapipes.iter import IterableWrapper
from torchdata.datapipes.iter.util.distributed import PrefetchTimeoutError
TEST_MASTER_ADDR = "127.0.0.1"
DEFAULT_WORLD_SIZE = 2
if not dist.is_available():
print("Distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
_backends = ["gloo"]
if dist.is_mpi_available():
_backends.append("mpi")
if dist.is_nccl_available() and torch.cuda.device_count() > 0:
_backends.append("nccl")
world_size_parametrize = parametrize("world_size", [1, DEFAULT_WORLD_SIZE])
backend_parametrize = parametrize("backend", _backends)
def abs_path(path):
return os.path.join(os.path.dirname(__file__), os.path.normpath(path))
def _get_open_port():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
port = s.getsockname()[1]
s.close()
return str(port)
class TerminateSignal:
pass
# TODO(ejguan): Use queue for all distributed tests
def launch_distributed_training(backend, world_size, *args, fn):
os.environ["MASTER_ADDR"] = TEST_MASTER_ADDR
os.environ["MASTER_PORT"] = _get_open_port()
ctx = mp.get_context("spawn")
q = ctx.Queue()
ps = []
for rank in range(world_size):
p = ctx.Process(
target=fn,
args=(
rank,
world_size,
backend,
q,
*args,
),
)
p.start()
ps.append(p)
res = []
while True:
try:
d = q.get()
if isinstance(d, TerminateSignal):
break
res.append(d)
except queue.Empty:
continue
for p in ps:
p.join()
return res
def _dist_iterate_one_epoch(dl, seed=None):
r"""
Iterate a full epoch of DataLoader and set seeds for global RNGs if provided.
"""
if seed is not None:
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
res = []
for d in dl:
res.append(d)
# Simulate training synchronization
dist.barrier()
return res
def _finalize_distributed_queue(rank, q):
r"""
Synchronize all distributed processes to guarantee all data have been put into
the Multiprocessing Queue.
"""
pg = dist.new_group(backend="gloo")
end_tensor = torch.tensor([rank], dtype=torch.int64)
dist.all_reduce(end_tensor, group=pg)
if rank == 0:
q.put(TerminateSignal())
dist.destroy_process_group(pg)
class DistributedTest(TestCase):
@staticmethod
def _test_fullsync(rank, world_size, backend, q):
dist.init_process_group(backend, rank=rank, world_size=world_size)
# Use a prime number to make sure uneven data sharding
data_length = 23
dp = IterableWrapper(list(range(data_length))).sharding_filter()
torch.utils.data.graph_settings.apply_sharding(dp, world_size, rank)
dp1 = dp.fullsync()
for _ in range(2):
res = _dist_iterate_one_epoch(dp1)
assert res == list(range(rank, data_length // world_size * world_size, world_size))
# Timeout Test
dp2 = dp.fullsync(timeout=0.01)
try:
for _ in range(2):
_ = list(dp2)
except Exception as e:
assert isinstance(e, PrefetchTimeoutError)
# Test that reset/shutdown does not hang while paused
dp3 = dp.fullsync()
it = iter(dp3)
next(it)
dp3.pause()
it2 = iter(dp3) # Reset
next(it2)
dp4 = dp.prefetch(2)
it = iter(dp4)
next(it)
dp4.pause()
it2 = iter(dp4) # Reset
next(it2)
_finalize_distributed_queue(rank, q)
@world_size_parametrize
@backend_parametrize
def test_fullsync(self, world_size, backend) -> None:
world_size = world_size if backend != "nccl" else torch.cuda.device_count()
launch_distributed_training(backend, world_size, fn=DistributedTest._test_fullsync)
@staticmethod
def _get_dataloader(data_length: int, dl2: bool, shuffle: bool, rs=None):
data_source = IterableWrapper(list(range(data_length)))
dp = data_source.sharding_filter()
if shuffle:
dp = dp.shuffle()
if dl2:
if rs is None:
rs = DistributedReadingService()
dl = DataLoader2(dp, reading_service=rs)
else:
dp = dp.fullsync()
dl = DataLoader(dp)
return dl
@staticmethod
def _test_distributed_training(dl2, rank, world_size, backend, q):
dist.init_process_group(backend, rank=rank, world_size=world_size)
# Use a prime number to make sure uneven data sharding
data_length = 23
# No shuffle
dl = DistributedTest._get_dataloader(data_length, dl2=dl2, shuffle=False)
res = _dist_iterate_one_epoch(dl)
assert sorted(res) == list(range(rank, data_length // world_size * world_size, world_size))
# Shuffle
dl = DistributedTest._get_dataloader(data_length, dl2=dl2, shuffle=True)
results = []
for _ in range(2):
res = _dist_iterate_one_epoch(dl, seed=123)
results.append(res)
assert results[0] == results[1]
# Different seed
res = _dist_iterate_one_epoch(dl, seed=321)
results.append(res)
assert len(results[0]) == len(results[2])
assert results[0] != results[2]
_finalize_distributed_queue(rank, q)
if dl2:
dl.shutdown()
@backend_parametrize
def test_distributed_dl2(self, backend) -> None:
world_size = DEFAULT_WORLD_SIZE if backend != "nccl" else torch.cuda.device_count()
launch_distributed_training(backend, world_size, fn=partial(DistributedTest._test_distributed_training, True))
@backend_parametrize
def test_elastic_training_dl2(self, backend) -> None:
world_size = DEFAULT_WORLD_SIZE if backend != "nccl" else torch.cuda.device_count()
nnodes = 1
from torch.distributed import run
run.main(
[
"--run_path",
f"--nnodes={nnodes}",
f"--nproc_per_node={world_size}",
abs_path("bin/elastic_training.py"),
"--" + backend,
"--dl2",
],
)
@backend_parametrize
def test_distributed_dl1(self, backend) -> None:
world_size = DEFAULT_WORLD_SIZE if backend != "nccl" else torch.cuda.device_count()
launch_distributed_training(backend, world_size, fn=partial(DistributedTest._test_distributed_training, False))
@unittest.skipIf(sys.version_info < (3, 8), "Torch Elastic requires Python >= 3.8")
@backend_parametrize
def test_elastic_training_dl1(self, backend) -> None:
world_size = DEFAULT_WORLD_SIZE if backend != "nccl" else torch.cuda.device_count()
nnodes = 1
from torch.distributed import run
run.main(
[
"--run_path",
f"--nnodes={nnodes}",
f"--nproc_per_node={world_size}",
abs_path("bin/elastic_training.py"),
"--" + backend,
"--dl1",
],
)
instantiate_parametrized_tests(DistributedTest)
if __name__ == "__main__":
unittest.main()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import pickle
import unittest
import warnings
from functools import partial
from io import StringIO
from operator import itemgetter
from typing import List
import expecttest
import torchdata.datapipes.iter as iterdp
import torchdata.datapipes.map as mapdp
from _utils._common_utils_for_test import create_temp_dir, create_temp_files
from torch.utils.data.datapipes.utils.common import DILL_AVAILABLE
from torchdata.datapipes.iter import IterableWrapper
from torchdata.datapipes.map import SequenceWrapper
if DILL_AVAILABLE:
import dill
dill.extend(use_dill=False)
try:
import datasets
except ImportError:
datasets = None
try:
import fsspec
except ImportError:
fsspec = None
try:
import iopath
except ImportError:
iopath = None
try:
import subprocess
import rarfile
try:
rarfile.tool_setup()
subprocess.run(("rar", "-?"), check=True)
except (rarfile.RarCannotExec, subprocess.CalledProcessError):
rarfile = None
except (ModuleNotFoundError, FileNotFoundError):
rarfile = None
try:
import torcharrow
import torcharrow.dtypes as dt
DTYPE = dt.Struct([dt.Field("Values", dt.int32)])
except ImportError:
torcharrow = None
dt = None
DTYPE = None
def _fake_batch_fn(batch):
return [d + 1 for d in batch]
def _fake_fn_ls(x):
return [x, x]
def _filepath_fn(name: str, dir) -> str:
return os.path.join(dir, os.path.basename(name))
def _filter_by_module_availability(datapipes):
filter_set = set()
if datasets is None:
filter_set.update([iterdp.HuggingFaceHubReader])
if fsspec is None:
filter_set.update([iterdp.FSSpecFileLister, iterdp.FSSpecFileOpener, iterdp.FSSpecSaver])
if iopath is None:
filter_set.update([iterdp.IoPathFileLister, iterdp.IoPathFileOpener, iterdp.IoPathSaver])
if rarfile is None:
filter_set.update([iterdp.RarArchiveLoader])
if torcharrow is None or not DILL_AVAILABLE:
filter_set.update([iterdp.DataFrameMaker, iterdp.ParquetDataFrameLoader])
return [dp for dp in datapipes if dp[0] not in filter_set]
def _convert_to_tensor(data):
return torch.tensor(data)
class TestIterDataPipeSerialization(expecttest.TestCase):
def setUp(self):
self.temp_dir = create_temp_dir()
self.temp_files = create_temp_files(self.temp_dir)
self.temp_sub_dir = create_temp_dir(self.temp_dir.name)
self.temp_sub_files = create_temp_files(self.temp_sub_dir, 4, False)
def tearDown(self):
try:
self.temp_sub_dir.cleanup()
self.temp_dir.cleanup()
except Exception as e:
warnings.warn(f"TestIterDataPipeSerialization was not able to cleanup temp dir due to {e}")
def _serialization_test_helper(self, datapipe, use_dill):
if use_dill:
serialized_dp = dill.dumps(datapipe)
deserialized_dp = dill.loads(serialized_dp)
else:
serialized_dp = pickle.dumps(datapipe)
deserialized_dp = pickle.loads(serialized_dp)
try:
self.assertEqual(list(datapipe), list(deserialized_dp))
except AssertionError as e:
print(f"{datapipe} is failing.")
raise e
def _serialization_dataframe_test_helper(self, datapipe, use_dill):
if use_dill:
serialized_dp = dill.dumps(datapipe)
deserialized_dp = dill.loads(serialized_dp)
else:
serialized_dp = pickle.dumps(datapipe)
deserialized_dp = pickle.loads(serialized_dp)
for df1, df2 in zip(datapipe, deserialized_dp):
for exp, act in zip(df1, df2):
self.assertEqual(exp, act)
def _serialization_test_for_single_dp(self, dp, use_dill, is_dataframe=False):
test_helper_fn = self._serialization_dataframe_test_helper if is_dataframe else self._serialization_test_helper
# 1. Testing for serialization before any iteration starts
test_helper_fn(dp, use_dill)
# 2. Testing for serialization afterDataPipe is partially read
it = iter(dp)
_ = next(it)
test_helper_fn(dp, use_dill)
# 3. Testing for serialization after DataPipe is fully read
it = iter(dp)
_ = list(it)
test_helper_fn(dp, use_dill)
def _serialization_test_for_dp_with_children(self, dp1, dp2, use_dill):
# 1. Testing for serialization before any iteration starts
self._serialization_test_helper(dp1, use_dill=use_dill)
self._serialization_test_helper(dp2, use_dill=use_dill)
# 2. Testing for serialization after DataPipe is partially read
it1, it2 = iter(dp1), iter(dp2)
_, _ = next(it1), next(it2)
self._serialization_test_helper(dp1, use_dill=use_dill)
self._serialization_test_helper(dp2, use_dill=use_dill)
# 2.5. Testing for serialization after one child DataPipe is fully read
# (Only for DataPipes with children DataPipes)
it1 = iter(dp1)
_ = list(it1) # fully read one child
self._serialization_test_helper(dp1, use_dill=use_dill)
self._serialization_test_helper(dp2, use_dill=use_dill)
# 3. Testing for serialization after DataPipe is fully read
it2 = iter(dp2)
_ = list(it2) # fully read the other child
self._serialization_test_helper(dp1, use_dill=use_dill)
self._serialization_test_helper(dp2, use_dill=use_dill)
def test_serializable(self):
# A tuple of 4 objects
# (DataPipeConstructor, custom_input_datapipe=None, dp_args=(), dp_kwargs={})
picklable_datapipes: List = [
(iterdp.BatchMapper, IterableWrapper([(0, 0), (0, 0), (0, 0), (0, 0)]), (_fake_batch_fn, 2, 1), {}),
(iterdp.BucketBatcher, IterableWrapper([0, 0, 0, 0, 0, 0, 0]), (5,), {}),
(iterdp.Bz2FileLoader, None, (), {}),
(
iterdp.CSVDictParser,
IterableWrapper(
[("f1", StringIO("Label,1,1\nLabel,2,2\nLabel,3,3")), ("f2", StringIO("L,1,1\r\nL,2,2\r\nL,3,3"))]
),
(),
{},
),
(
iterdp.CSVParser,
IterableWrapper(
[("f1", StringIO("Label,1,1\nLabel,2,2\nLabel,3,3")), ("f2", StringIO("L,1,1\r\nL,2,2\r\nL,3,3"))]
),
(),
{},
),
(iterdp.Cycler, None, (2,), {}),
(iterdp.DataFrameMaker, IterableWrapper([(i,) for i in range(3)]), (), {"dtype": DTYPE}),
(iterdp.Decompressor, None, (), {}),
(iterdp.Dropper, IterableWrapper([(0, 0), (0, 0), (0, 0), (0, 0)]), ([1]), {}),
(iterdp.Enumerator, None, (2,), {}),
(iterdp.FlatMapper, None, (_fake_fn_ls,), {}),
(iterdp.ShuffledFlatMapper, None, (_fake_fn_ls,), {"buffer_size": 1}),
(iterdp.Flattener, IterableWrapper([(0, (0, 1)), (0, (0, 1)), (0, (0, 1)), (0, (0, 1))]), ([1]), {}),
(iterdp.FSSpecFileLister, ".", (), {}),
(iterdp.FSSpecFileOpener, None, (), {}),
(
iterdp.FSSpecSaver,
IterableWrapper([("1.txt", b"DATA1"), ("2.txt", b"DATA2"), ("3.txt", b"DATA3")]),
(),
{"mode": "wb", "filepath_fn": partial(_filepath_fn, dir=self.temp_dir.name)},
),
(iterdp.GDriveReader, None, (), {}),
(iterdp.HashChecker, None, ({},), {}),
(iterdp.Header, None, (3,), {}),
(iterdp.HttpReader, None, (), {}),
(iterdp.HuggingFaceHubReader, None, (), {}),
# TODO(593): (ejguan): Deterministic serialization is required
# (iterdp.InBatchShuffler, IterableWrapper(range(10)).batch(3), (), {}),
(iterdp.InMemoryCacheHolder, None, (), {}),
(iterdp.IndexAdder, IterableWrapper([{"a": 1, "b": 2}, {"c": 3, "a": 1}]), ("label",), {}),
(iterdp.IoPathFileLister, ".", (), {}),
(iterdp.IoPathFileOpener, None, (), {}),
(
iterdp.IoPathSaver,
IterableWrapper([("1.txt", b"DATA1"), ("2.txt", b"DATA2"), ("3.txt", b"DATA3")]),
(),
{"mode": "wb", "filepath_fn": partial(_filepath_fn, dir=self.temp_dir.name)},
),
(
iterdp.IterKeyZipper,
IterableWrapper([("a", 100), ("b", 200), ("c", 300)]),
(IterableWrapper([("a", 1), ("b", 2), ("c", 3)]), itemgetter(0), itemgetter(0)),
{},
),
(
iterdp.JsonParser,
IterableWrapper(
[
("1.json", StringIO('["fo", {"ba":["baz", null, 1.0, 2]}]')),
("2.json", StringIO('{"__cx__": true, "r": 1, "i": 2}')),
]
),
(),
{},
),
(iterdp.LengthSetter, None, (3,), {}),
(
iterdp.LineReader,
IterableWrapper(
[("file1", StringIO("Line1\nLine2")), ("file2", StringIO("Line2,1\r\nLine2,2\r\nLine2,3"))]
),
(),
{},
),
(iterdp.MapToIterConverter, SequenceWrapper(range(10)), (), {}),
(
iterdp.MaxTokenBucketizer,
IterableWrapper(["1", "22", "1", "4444", "333", "1", "22", "22", "333"]),
(4,),
{},
),
(
iterdp.MapKeyZipper,
IterableWrapper([("a", 1), ("b", 2), ("c", 3)]),
(SequenceWrapper({"a": 100, "b": 200, "c": 300}), itemgetter(0)),
{},
),
(
iterdp.MultiplexerLongest,
IterableWrapper(range(10)),
(),
{},
),
(iterdp.OnDiskCacheHolder, None, (), {}),
(iterdp.OnlineReader, None, (), {}),
(
iterdp.ParagraphAggregator,
IterableWrapper([("f1", "L1"), ("f1", "L2"), ("f2", "21"), ("f2", "22")]),
(),
{},
),
(iterdp.Prefetcher, None, (), {}),
(iterdp.ParquetDataFrameLoader, None, (), {"dtype": DTYPE}),
(iterdp.RarArchiveLoader, None, (), {}),
(
iterdp.Rows2Columnar,
IterableWrapper([[{"a": 1}, {"b": 2, "a": 1}], [{"a": 1, "b": 200}, {"c": 3}]]),
(),
{},
),
(iterdp.Repeater, None, (2,), {}),
(iterdp.SampleMultiplexer, {IterableWrapper([0] * 10): 0.5, IterableWrapper([1] * 10): 0.5}, (), {}),
(
iterdp.Saver,
IterableWrapper([("1.txt", b"DATA1"), ("2.txt", b"DATA2"), ("3.txt", b"DATA3")]),
(),
{"mode": "wb", "filepath_fn": partial(_filepath_fn, dir=self.temp_dir.name)},
),
(iterdp.Slicer, IterableWrapper([(0, 0), (0, 0), (0, 0), (0, 0)]), ([1]), {}),
(iterdp.TarArchiveLoader, None, (), {}),
# TODO(594): Add serialization tests for optional DataPipe
# (iterdp.TFRecordLoader, None, (), {}),
(iterdp.ThreadPoolMapper, None, (_fake_fn_ls,), {}),
(iterdp.UnZipper, IterableWrapper([(i, i + 10) for i in range(10)]), (), {"sequence_length": 2}),
(iterdp.WebDataset, IterableWrapper([("foo.txt", b"1"), ("bar.txt", b"2")]), (), {}),
(iterdp.XzFileLoader, None, (), {}),
(iterdp.ZipArchiveLoader, None, (), {}),
(iterdp.ZipperLongest, IterableWrapper(range(10)), (), {}),
]
picklable_datapipes = _filter_by_module_availability(picklable_datapipes)
# Skipping value comparison for these DataPipes
# Most of them return streams not comparable by `self.assertEqual`
# Others are similar to caching where the outputs depend on other DataPipes
dp_skip_comparison = {
iterdp.Bz2FileLoader,
iterdp.Decompressor,
iterdp.FileOpener,
iterdp.FSSpecFileOpener,
iterdp.GDriveReader,
iterdp.IoPathFileOpener,
iterdp.HashChecker,
iterdp.HttpReader,
iterdp.HuggingFaceHubReader,
iterdp.OnDiskCacheHolder,
iterdp.OnlineReader,
iterdp.ParquetDataFrameLoader,
iterdp.SampleMultiplexer,
iterdp.RarArchiveLoader,
iterdp.TarArchiveLoader,
iterdp.TFRecordLoader,
iterdp.XzFileLoader,
iterdp.ZipArchiveLoader,
}
# These DataPipes produce multiple DataPipes as outputs and those should be compared
dp_compare_children = {iterdp.UnZipper}
for dpipe, custom_input, dp_args, dp_kwargs in picklable_datapipes:
try:
# Creating input (usually a DataPipe) for the specific dpipe being tested
if custom_input is None:
custom_input = IterableWrapper(range(10))
if dpipe in dp_skip_comparison: # Mke sure they are picklable and loadable (no value comparison)
datapipe = dpipe(custom_input, *dp_args, **dp_kwargs) # type: ignore[call-arg]
serialized_dp = pickle.dumps(datapipe)
_ = pickle.loads(serialized_dp)
elif dpipe in dp_compare_children: # DataPipes that have children
dp1, dp2 = dpipe(custom_input, *dp_args, **dp_kwargs) # type: ignore[call-arg]
self._serialization_test_for_dp_with_children(dp1, dp2, use_dill=False)
else: # Single DataPipe that requires comparison
datapipe = dpipe(custom_input, *dp_args, **dp_kwargs) # type: ignore[call-arg]
is_dataframe = issubclass(dpipe, (iterdp.DataFrameMaker, iterdp.ParquetDataFrameLoader))
self._serialization_test_for_single_dp(datapipe, use_dill=False, is_dataframe=is_dataframe)
except Exception as e:
print(f"{dpipe} is failing.")
raise e
def test_serializable_with_dill(self):
"""Only for DataPipes that take in a function as argument"""
input_dp = IterableWrapper(range(10))
ref_idp = IterableWrapper(range(10))
ref_mdp = SequenceWrapper(range(10))
unpicklable_datapipes: List = [
(iterdp.BatchMapper, (lambda batch: [d + 1 for d in batch], 2), {}),
(iterdp.FlatMapper, (lambda x: [x, x],), {}),
(iterdp.ShuffledFlatMapper, (lambda x: [x, x],), {"buffer_size": 1}),
(iterdp.IterKeyZipper, (ref_idp, lambda x: x, None, True, 100), {}),
(iterdp.MapKeyZipper, (ref_mdp, lambda x: x), {}),
(iterdp.OnDiskCacheHolder, (lambda x: x,), {}),
(iterdp.ParagraphAggregator, (lambda x: x,), {}),
(iterdp.ThreadPoolMapper, (lambda x: x,), {}),
]
# Skipping value comparison for these DataPipes
dp_skip_comparison = {iterdp.OnDiskCacheHolder, iterdp.ParagraphAggregator}
for dpipe, dp_args, dp_kwargs in unpicklable_datapipes:
if DILL_AVAILABLE:
try:
if dpipe in dp_skip_comparison: # Make sure they are picklable/loadable (no value comparison)
datapipe = dpipe(input_dp, *dp_args, **dp_kwargs) # type: ignore[call-arg]
serialized_dp = dill.dumps(datapipe)
_ = dill.loads(serialized_dp)
else:
datapipe = dpipe(input_dp, *dp_args, **dp_kwargs) # type: ignore[call-arg]
self._serialization_test_for_single_dp(datapipe, use_dill=True)
except Exception as e:
print(f"{dpipe} is failing.")
raise e
else:
dp_no_attribute_error = (iterdp.OnDiskCacheHolder,)
try:
with self.assertWarnsRegex(UserWarning, r"^Local function is not supported by pickle"):
datapipe = dpipe(input_dp, *dp_args, **dp_kwargs) # type: ignore[call-arg]
if isinstance(datapipe, dp_no_attribute_error):
_ = pickle.dumps(datapipe)
else:
with self.assertRaises(AttributeError):
_ = pickle.dumps(datapipe)
except Exception as e:
print(f"{dpipe} is failing.")
raise e
class TestMapDataPipeSerialization(expecttest.TestCase):
def _serialization_test_helper(self, datapipe):
serialized_dp = pickle.dumps(datapipe)
deserialized_dp = pickle.loads(serialized_dp)
try:
self.assertEqual(list(datapipe), list(deserialized_dp))
except AssertionError as e:
print(f"{datapipe} is failing.")
raise e
def _serialization_test_for_dp_with_children(self, dp1, dp2):
self._serialization_test_helper(dp1)
self._serialization_test_helper(dp2)
def test_serializable(self):
picklable_datapipes: List = [
(mapdp.InMemoryCacheHolder, None, (), {}),
(mapdp.IterToMapConverter, IterableWrapper([(i, i) for i in range(10)]), (), {}),
(mapdp.UnZipper, SequenceWrapper([(i, i + 10) for i in range(10)]), (), {"sequence_length": 2}),
]
dp_skip_comparison = set()
# These DataPipes produce multiple DataPipes as outputs and those should be compared
dp_compare_children = {mapdp.UnZipper}
for dpipe, custom_input, dp_args, dp_kwargs in picklable_datapipes:
try:
# Creating input (usually a DataPipe) for the specific dpipe being tested
if custom_input is None:
custom_input = SequenceWrapper(range(10))
if dpipe in dp_skip_comparison: # Mke sure they are picklable and loadable (no value comparison)
datapipe = dpipe(custom_input, *dp_args, **dp_kwargs) # type: ignore[call-arg]
serialized_dp = pickle.dumps(datapipe)
_ = pickle.loads(serialized_dp)
elif dpipe in dp_compare_children: # DataPipes that have children
dp1, dp2 = dpipe(custom_input, *dp_args, **dp_kwargs) # type: ignore[call-arg]
self._serialization_test_for_dp_with_children(dp1, dp2)
else: # Single DataPipe that requires comparison
datapipe = dpipe(custom_input, *dp_args, **dp_kwargs) # type: ignore[call-arg]
self._serialization_test_helper(datapipe)
except Exception as e:
print(f"{dpipe} is failing.")
raise e
def test_serializable_with_dill(self):
"""Only for DataPipes that take in a function as argument"""
pass
if __name__ == "__main__":
unittest.main()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import types
import unittest
from typing import Dict, Iterator, List, Tuple, TypeVar
import expecttest
from _utils._common_utils_for_test import IS_WINDOWS
from torch.utils.data import IterDataPipe
from torch.utils.data.datapipes.iter.sharding import SHARDING_PRIORITIES
from torchdata.dataloader2 import DataLoader2, ReadingServiceInterface
from torchdata.dataloader2.graph import find_dps, list_dps, remove_dp, replace_dp, traverse_dps
from torchdata.dataloader2.graph.utils import _find_replicable_branches
from torchdata.dataloader2.random import SeedGenerator
from torchdata.dataloader2.utils.dispatch import (
_DummyIterDataPipe,
find_lca_round_robin_sharding_dp,
find_non_dispatching_branches,
)
from torchdata.datapipes.iter import IterableWrapper, Mapper, ShardingRoundRobinDispatcher
from torchdata.datapipes.utils import to_graph
T_co = TypeVar("T_co", covariant=True)
try:
import graphviz
HAS_GRAPHVIZ = True
except ImportError:
HAS_GRAPHVIZ = False
class Adaptor(IterDataPipe[T_co]):
def __init__(self, datapipe: IterDataPipe) -> None:
self.datapipe = datapipe
self.started = False
def __iter__(self) -> Iterator[T_co]:
yield from self.datapipe
class DummyIterDataPipe(IterDataPipe[T_co]):
def __iter__(self) -> Iterator[T_co]:
yield from range(10)
class TempReadingService(ReadingServiceInterface):
adaptors: List[IterDataPipe] = []
def initialize(self, datapipe: IterDataPipe) -> IterDataPipe:
graph = traverse_dps(datapipe)
dps = find_dps(graph, Mapper)
for dp in reversed(dps):
new_dp = Adaptor(dp)
self.adaptors.append(new_dp)
graph = replace_dp(graph, dp, new_dp)
return list(graph.values())[0][0]
def initialize_iteration(self, seed_generator: SeedGenerator) -> None:
seed_generator.seed(123)
for dp in self.adaptors:
dp.started = True
def finalize_iteration(self) -> None:
for dp in self.adaptors:
dp.started = False
def _x_and_x_plus_5(x):
return [x, x + 5]
def _x_mod_2(x):
return x % 2
def _x_mult_2(x):
return x * 2
class TestGraph(expecttest.TestCase):
def _get_datapipes(self) -> Tuple[IterDataPipe, IterDataPipe, IterDataPipe]:
src_dp = IterableWrapper(range(20))
m1 = src_dp.map(_x_and_x_plus_5)
ub = m1.unbatch()
c1, c2 = ub.demux(2, _x_mod_2)
dm = c1.main_datapipe
m2 = c1.map(_x_mult_2)
dp = m2.zip(c2)
return traverse_dps(dp), (src_dp, m1, ub, dm, c1, c2, m2, dp)
def test_find_dps(self) -> None:
graph, (_, m1, *_, m2, _) = self._get_datapipes() # pyre-ignore
dps = find_dps(graph, Mapper)
expected_dps = {m1, m2}
for dp in dps:
self.assertTrue(dp in expected_dps)
def test_list_dps(self) -> None:
def _validate_fn(dps, exp_dps):
self.assertEqual(len(dps), len(exp_dps))
# Validate BFS Order
for dp, exp_dp in zip(dps, exp_dps):
self.assertEqual(dp, exp_dp)
graph, (
src_dp,
m1,
ub,
dm,
c1,
c2,
m2,
dp,
) = self._get_datapipes()
exp_all_dps = [dp, m2, c2, c1, dm, ub, m1, src_dp]
# List all DataPipes
dps = list_dps(graph)
_validate_fn(dps, exp_all_dps)
# List all DataPipes excluding a single DataPipe
dps = list_dps(graph, exclude_dps=m1)
*exp_dps, _, _ = exp_all_dps
_validate_fn(dps, exp_dps)
# Exclude a DataPipe on one branch
dps = list_dps(graph, exclude_dps=m2)
exp_dps = [dp, c2]
_validate_fn(dps, exp_dps)
# List all DataPipes excluding multiple DataPipes
dps = list_dps(graph, exclude_dps=[m1, m2])
exp_dps = [dp, c2]
_validate_fn(dps, exp_dps)
def _validate_graph(self, graph, nested_dp):
self.assertEqual(len(graph), len(nested_dp))
for dp_id, sub_nested_dp in zip(graph, nested_dp):
self.assertEqual(graph[dp_id][0], sub_nested_dp[0])
if len(graph[dp_id][1]) > 0:
self._validate_graph(graph[dp_id][1], sub_nested_dp[1])
def test_replace_dps(self) -> None:
# pyre-fixme[23]: Unable to unpack 3 values, 2 were expected.
graph, (
src_dp,
m1,
ub,
dm,
c1,
c2,
m2,
dp,
) = self._get_datapipes()
new_dp1 = Adaptor(m1)
new_dp2 = Adaptor(m2)
new_dp3 = DummyIterDataPipe()
graph = replace_dp(graph, m1, new_dp1)
exp_g1 = [
[
dp,
[
[m2, [[c1, [[dm, [[ub, [[new_dp1, [[m1, [[src_dp, []]]]]]]]]]]]]],
[c2, [[dm, [[ub, [[new_dp1, [[m1, [[src_dp, []]]]]]]]]]]],
],
]
]
self._validate_graph(traverse_dps(dp), exp_g1)
graph = replace_dp(graph, m2, new_dp2)
exp_g2 = [
[
dp,
[
[new_dp2, [[m2, [[c1, [[dm, [[ub, [[new_dp1, [[m1, [[src_dp, []]]]]]]]]]]]]]]],
[c2, [[dm, [[ub, [[new_dp1, [[m1, [[src_dp, []]]]]]]]]]]],
],
]
]
self._validate_graph(traverse_dps(dp), exp_g2)
graph = replace_dp(graph, m1, new_dp3)
exp_g3 = [
[
dp,
[
[new_dp2, [[m2, [[c1, [[dm, [[ub, [[new_dp1, [[new_dp3, []]]]]]]]]]]]]],
[c2, [[dm, [[ub, [[new_dp1, [[new_dp3, []]]]]]]]]],
],
]
]
self._validate_graph(traverse_dps(dp), exp_g3)
def test_remove_dps(self) -> None:
# pyre-fixme[23]: Unable to unpack 3 values, 2 were expected.
graph, (
src_dp,
m1,
ub,
dm,
c1,
c2,
m2,
dp,
) = self._get_datapipes()
graph = remove_dp(graph, m1)
exp_g1 = [[dp, [[m2, [[c1, [[dm, [[ub, [[src_dp, []]]]]]]]]], [c2, [[dm, [[ub, [[src_dp, []]]]]]]]]]]
self._validate_graph(traverse_dps(dp), exp_g1)
graph = remove_dp(graph, m2)
exp_g2 = [[dp, [[c1, [[dm, [[ub, [[src_dp, []]]]]]]], [c2, [[dm, [[ub, [[src_dp, []]]]]]]]]]]
self._validate_graph(traverse_dps(dp), exp_g2)
with self.assertRaisesRegex(RuntimeError, "Cannot remove the source DataPipe"):
remove_dp(graph, src_dp)
with self.assertRaisesRegex(RuntimeError, "Cannot remove the receiving DataPipe"):
remove_dp(graph, dp)
def test_reading_service(self) -> None:
_, (*_, dp) = self._get_datapipes() # pyre-ignore
rs = TempReadingService()
dl = DataLoader2(dp, reading_service=rs)
self.assertTrue(len(rs.adaptors) == 0)
it = iter(dl)
for new_dp in rs.adaptors:
self.assertTrue(new_dp.started)
res = list(it)
self.assertEqual(len(res), 20)
for new_dp in rs.adaptors:
self.assertFalse(new_dp.started)
self.assertEqual(res, list(dl))
def insert_round_robin_sharding(graph, datapipe):
dispatch_dp = ShardingRoundRobinDispatcher(datapipe, SHARDING_PRIORITIES.MULTIPROCESSING)
return replace_dp(graph, datapipe, dispatch_dp), dispatch_dp
def replace_by_dummy(graph, datapipe):
return replace_dp(graph, datapipe, _DummyIterDataPipe())
def make_non_replicable_dp(datapipe):
datapipe.is_replicable = types.MethodType(lambda self: False, datapipe)
return datapipe
class TestNonReplicableDataPipe(expecttest.TestCase):
def _make_dp(self):
r"""
Create a DataPipe that contains the most of cases including:
- single-branch pipeline
- multi-branch pipeline
- pipeline that has circurlar references
single_br_dp -------------------------------------
ch1 \
/ \ \
multi_br_dp -->forker_dp--> -> fork_zip_dp -> end_dp ->
\ / /
<------- ch2 /
/ \ /
cir_br_dp -> cir_map_dp --------------------------
"""
# Single-branch
single_br_dp = IterableWrapper(list(range(10)))
# Multi-branch
multi_br_dp = IterableWrapper(list(range(10)))
ch1, ch2 = multi_br_dp.fork(2)
forker_dp = ch1.main_datapipe
fork_zip_dp = ch1.zip(ch2)
# Circular-branch
cir_br_dp = IterableWrapper(list(range(10)))
cir_map_dp = cir_br_dp.map(_x_mult_2)
# Force to circular reference
cir_br_dp.cir_dep = cir_map_dp
end_dp = single_br_dp.zip(fork_zip_dp, cir_map_dp)
graph = traverse_dps(end_dp)
return single_br_dp, multi_br_dp, forker_dp, ch1, ch2, fork_zip_dp, cir_br_dp, cir_map_dp, end_dp, graph
def test_single_round_robin_sharding_dp(self):
single_br_dp, *_, graph = self._make_dp()
graph, single_br_dp = insert_round_robin_sharding(graph, single_br_dp)
self.assertEqual(find_lca_round_robin_sharding_dp(graph), single_br_dp)
# The same non-shardable DataPipe on both branches
_, multi_br_dp, *_, graph = self._make_dp()
graph, multi_br_dp = insert_round_robin_sharding(graph, multi_br_dp)
self.assertEqual(find_lca_round_robin_sharding_dp(graph), multi_br_dp)
_, _, _, ch1, _, fork_zip_dp, *_, graph = self._make_dp()
graph, ch1 = insert_round_robin_sharding(graph, ch1)
self.assertEqual(find_lca_round_robin_sharding_dp(graph), fork_zip_dp)
# Circular reference
*_, cir_br_dp, cir_map_dp, _, graph = self._make_dp()
graph, cir_br_dp = insert_round_robin_sharding(graph, cir_br_dp)
self.assertEqual(find_lca_round_robin_sharding_dp(graph), cir_map_dp)
*_, cir_map_dp, _, graph = self._make_dp()
graph, cir_map_dp = insert_round_robin_sharding(graph, cir_map_dp)
self.assertEqual(find_lca_round_robin_sharding_dp(graph), cir_map_dp)
def test_multi_round_robin_sharding_dps(self):
single_br_dp, multi_br_dp, *_, end_dp, graph = self._make_dp()
graph, single_br_dp = insert_round_robin_sharding(graph, single_br_dp)
graph, multi_br_dp = insert_round_robin_sharding(graph, multi_br_dp)
self.assertEqual(find_lca_round_robin_sharding_dp(graph), end_dp)
single_br_dp, _, _, ch1, *_, end_dp, graph = self._make_dp()
graph, single_br_dp = insert_round_robin_sharding(graph, single_br_dp)
graph, ch1 = insert_round_robin_sharding(graph, ch1)
self.assertEqual(find_lca_round_robin_sharding_dp(graph), end_dp)
_, multi_br_dp, _, ch1, _, fork_zip_dp, *_, graph = self._make_dp()
graph, multi_br_dp = insert_round_robin_sharding(graph, multi_br_dp)
graph, ch1 = insert_round_robin_sharding(graph, ch1)
self.assertEqual(find_lca_round_robin_sharding_dp(graph), fork_zip_dp)
single_br_dp, *_, cir_br_dp, _, end_dp, graph = self._make_dp()
graph, single_br_dp = insert_round_robin_sharding(graph, single_br_dp)
graph, cir_br_dp = insert_round_robin_sharding(graph, cir_br_dp)
self.assertEqual(find_lca_round_robin_sharding_dp(graph), end_dp)
def test_non_dispatching_branches(self):
r"""
There should be a single DataPipe as the lowest common ancestor of all
non-dispatching DataPipes that is replaced by ``DummyIterDataPipe``.
"""
single_br_dp, *_, fork_zip_dp, _, cir_map_dp, _, graph = self._make_dp()
graph = replace_by_dummy(graph, single_br_dp)
dps = find_non_dispatching_branches(graph)
self.assertEqual(len(dps), 2)
self.assertTrue(all(dp in (fork_zip_dp, cir_map_dp) for dp in dps))
single_br_dp, multi_br_dp, *_, cir_map_dp, _, graph = self._make_dp()
graph = replace_by_dummy(graph, multi_br_dp)
dps = find_non_dispatching_branches(graph)
self.assertEqual(len(dps), 2)
self.assertTrue(all(dp in (single_br_dp, cir_map_dp) for dp in dps))
# In theory, this case should never happen because LCA (fork_zip_dp) should be
# replaced by _DummpyIterDataPipe if any of child is non-replicable
single_br_dp, _, _, ch1, ch2, *_, cir_map_dp, _, graph = self._make_dp()
graph = replace_by_dummy(graph, ch1)
dps = find_non_dispatching_branches(graph)
self.assertEqual(len(dps), 3)
self.assertTrue(all(dp in (single_br_dp, ch2, cir_map_dp) for dp in dps))
single_br_dp, *_, fork_zip_dp, _, cir_map_dp, _, graph = self._make_dp()
graph = replace_by_dummy(graph, cir_map_dp)
dps = find_non_dispatching_branches(graph)
self.assertTrue(all(dp in (single_br_dp, fork_zip_dp) for dp in dps))
*_, end_dp, graph = self._make_dp()
graph = replace_by_dummy(graph, end_dp)
dps = find_non_dispatching_branches(graph)
self.assertEqual(len(dps), 0)
single_br_dp, *_, fork_zip_dp, _, cir_map_dp, _, graph = self._make_dp()
graph = replace_by_dummy(graph, fork_zip_dp)
dps = find_non_dispatching_branches(graph)
self.assertEqual(len(dps), 2)
self.assertTrue(all(dp in (single_br_dp, cir_map_dp) for dp in dps))
def test_single_non_replicable_dp(self):
# All replicable
*_, end_dp, graph = self._make_dp()
dps = _find_replicable_branches(graph)
self.assertEqual(len(dps), 1)
self.assertEqual(dps[0], end_dp)
# Test the production use case where the last DataPipe is fullsync
*_, end_dp, _ = self._make_dp()
dp = end_dp.fullsync()
graph = traverse_dps(dp)
dps = _find_replicable_branches(graph)
self.assertEqual(len(dps), 1)
self.assertEqual(dps[0], end_dp)
single_br_dp, *_, fork_zip_dp, _, cir_map_dp, _, graph = self._make_dp()
make_non_replicable_dp(single_br_dp)
dps = _find_replicable_branches(graph)
self.assertEqual(len(dps), 2)
self.assertTrue(all(dp in (fork_zip_dp, cir_map_dp) for dp in dps))
single_br_dp, *_, ch1, ch2, fork_zip_dp, _, cir_map_dp, _, graph = self._make_dp()
make_non_replicable_dp(fork_zip_dp)
dps = _find_replicable_branches(graph)
self.assertEqual(len(dps), 4)
self.assertTrue(all(dp in (single_br_dp, ch1, ch2, cir_map_dp) for dp in dps))
single_br_dp, _, forker_dp, ch1, *_, cir_map_dp, _, graph = self._make_dp()
make_non_replicable_dp(ch1)
dps = _find_replicable_branches(graph)
self.assertEqual(len(dps), 3)
self.assertTrue(all(dp in (single_br_dp, forker_dp, cir_map_dp) for dp in dps))
single_br_dp, *_, fork_zip_dp, cir_br_dp, cir_map_dp, _, graph = self._make_dp()
make_non_replicable_dp(cir_map_dp)
dps = _find_replicable_branches(graph)
self.assertEqual(len(dps), 3)
self.assertTrue(all(dp in (single_br_dp, fork_zip_dp, cir_br_dp) for dp in dps))
single_br_dp, *_, fork_zip_dp, _, cir_map_dp, end_dp, graph = self._make_dp()
make_non_replicable_dp(end_dp)
dps = _find_replicable_branches(graph)
self.assertEqual(len(dps), 3)
self.assertTrue(all(dp in (single_br_dp, fork_zip_dp, cir_map_dp) for dp in dps))
def test_multi_non_replicable_dps(self):
single_br_dp, multi_br_dp, *_, cir_map_dp, _, graph = self._make_dp()
make_non_replicable_dp(single_br_dp)
make_non_replicable_dp(multi_br_dp)
dps = _find_replicable_branches(graph)
self.assertEqual(len(dps), 1)
self.assertEqual(dps[0], cir_map_dp)
single_br_dp, _, forker_dp, ch1, *_, cir_map_dp, _, graph = self._make_dp()
make_non_replicable_dp(single_br_dp)
make_non_replicable_dp(ch1)
dps = _find_replicable_branches(graph)
self.assertEqual(len(dps), 2)
self.assertTrue(all(dp in (forker_dp, cir_map_dp) for dp in dps))
single_br_dp, *_, ch1, ch2, fork_zip_dp, _, cir_map_dp, _, graph = self._make_dp()
make_non_replicable_dp(single_br_dp)
make_non_replicable_dp(fork_zip_dp)
dps = _find_replicable_branches(graph)
self.assertEqual(len(dps), 3)
self.assertTrue(all(dp in (ch1, ch2, cir_map_dp) for dp in dps))
single_br_dp, *_, fork_zip_dp, cir_br_dp, cir_map_dp, _, graph = self._make_dp()
make_non_replicable_dp(single_br_dp)
make_non_replicable_dp(cir_map_dp)
dps = _find_replicable_branches(graph)
self.assertEqual(len(dps), 2)
self.assertTrue(all(dp in (fork_zip_dp, cir_br_dp) for dp in dps))
single_br_dp, multi_br_dp, forker_dp, ch1, *_, cir_map_dp, _, graph = self._make_dp()
make_non_replicable_dp(forker_dp)
make_non_replicable_dp(ch1)
dps = _find_replicable_branches(graph)
self.assertEqual(len(dps), 3)
self.assertTrue(all(dp in (single_br_dp, multi_br_dp, cir_map_dp) for dp in dps))
single_br_dp, multi_br_dp, forker_dp, *_, cir_br_dp, cir_map_dp, _, graph = self._make_dp()
make_non_replicable_dp(forker_dp)
make_non_replicable_dp(cir_map_dp)
dps = _find_replicable_branches(graph)
self.assertEqual(len(dps), 3)
self.assertTrue(all(dp in (single_br_dp, multi_br_dp, cir_br_dp) for dp in dps))
single_br_dp, *_, ch1, ch2, fork_zip_dp, cir_br_dp, cir_map_dp, _, graph = self._make_dp()
make_non_replicable_dp(fork_zip_dp)
make_non_replicable_dp(cir_map_dp)
dps = _find_replicable_branches(graph)
self.assertEqual(len(dps), 4)
self.assertTrue(all(dp in (single_br_dp, ch1, ch2, cir_br_dp) for dp in dps))
class TestGraphVisualization(expecttest.TestCase):
@unittest.skipIf(not HAS_GRAPHVIZ, "Package `graphviz` is required to test graph visualization functionalities.")
def test_to_graph(self):
dp1 = IterableWrapper(range(10))
dp2 = dp1.map(lambda x: x + 1)
dp3 = dp2.filter(lambda x: x > 5)
cdp1, cdp2 = dp3.fork(num_instances=2)
dp4 = cdp1.zip(cdp2)
cdp3, cdp4 = dp4.demux(num_instances=2, classifier_fn=lambda x: x % 2)
dp5 = cdp3.concat(cdp4)
# Test to ensure that we can create these graphs with runtime errors
kwargs_list: List[Dict] = [
{"dp": dp1},
{"dp": dp2},
{"dp": dp3},
{"dp": cdp1, "debug": True},
{"dp": dp4},
{"dp": dp4, "debug": True},
{"dp": cdp3, "debug": True},
{"dp": dp5},
{"dp": dp5, "debug": True},
]
for kwargs in kwargs_list:
g = to_graph(**kwargs)
self.assertTrue(isinstance(g, graphviz.Digraph))
if __name__ == "__main__":
unittest.main()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import unittest
import warnings
from functools import partial
import expecttest
import torch
from _utils._common_utils_for_test import IS_M1, reset_after_n_next_calls
from torchdata.datapipes.iter import (
FileLister,
FileOpener,
FSSpecFileLister,
FSSpecFileOpener,
FSSpecSaver,
IterableWrapper,
TFRecordLoader,
)
try:
import google.protobuf as _protobuf
del _protobuf
HAS_PROTOBUF = True
except ImportError:
HAS_PROTOBUF = False
skipIfNoPROTOBUF = unittest.skipIf(not HAS_PROTOBUF, "no google protobuf")
class TestDataPipeTFRecord(expecttest.TestCase):
def setUp(self):
self.temp_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "_fakedata", "tfrecord")
def assertArrayEqual(self, arr1, arr2):
if isinstance(arr1, list):
arr1 = torch.stack(arr1)
if isinstance(arr2, list):
arr2 = torch.stack(arr2)
torch.testing.assert_close(arr1, arr2, check_dtype=False)
def _ground_truth_data(self):
for i in range(4):
x = torch.arange(i * 10, (i + 1) * 10)
yield {
"x_float": x,
"x_int": (x * 10).long(),
"x_byte": [b"test str"],
}
def _ground_truth_seq_data(self):
for i in range(4):
x = torch.arange(i * 10, (i + 1) * 10)
rep = 2 * i + 3
yield {"x_float": x, "x_int": (x * 10).long(), "x_byte": [b"test str"]}, {
"x_float_seq": [x] * rep,
"x_int_seq": [(x * 10).long()] * rep,
"x_byte_seq": [[b"test str"]] * rep,
}
@skipIfNoPROTOBUF
@unittest.skipIf(
IS_M1, "Protobuf 3.19.* is not supported on MacOS M1, but Tensorflow is incompatible with Protobuf 4"
)
@torch.no_grad()
def test_tfrecord_loader_example_iterdatapipe(self):
filename = f"{self.temp_dir}/example.tfrecord"
datapipe1 = IterableWrapper([filename])
datapipe2 = FileOpener(datapipe1, mode="b")
# Functional Test: test if the returned data is correct
tfrecord_parser = datapipe2.load_from_tfrecord()
result = list(tfrecord_parser)
self.assertEqual(len(result), 4)
expected_res = final_expected_res = list(self._ground_truth_data())
for true_data, loaded_data in zip(expected_res, result):
self.assertSetEqual(set(true_data.keys()), set(loaded_data.keys()))
for key in ["x_float", "x_int"]:
self.assertArrayEqual(true_data[key], loaded_data[key])
self.assertEqual(len(loaded_data["x_byte"]), 1)
self.assertEqual(true_data["x_byte"][0], loaded_data["x_byte"][0])
# Functional Test: test if the shape of the returned data is correct when using spec
tfrecord_parser = datapipe2.load_from_tfrecord(
{
"x_float": ((5, 2), torch.float64),
"x_int": ((5, 2), torch.int32),
"x_byte": (tuple(), None),
}
)
result = list(tfrecord_parser)
self.assertEqual(len(result), 4)
expected_res = [
{
"x_float": x["x_float"].reshape(5, 2),
"x_int": x["x_int"].reshape(5, 2),
"x_byte": x["x_byte"][0],
}
for x in self._ground_truth_data()
]
for true_data, loaded_data in zip(expected_res, result):
self.assertSetEqual(set(true_data.keys()), set(loaded_data.keys()))
self.assertArrayEqual(true_data["x_float"], loaded_data["x_float"].float())
self.assertArrayEqual(true_data["x_int"], loaded_data["x_int"].long())
self.assertEqual(loaded_data["x_float"].dtype, torch.float64)
self.assertEqual(loaded_data["x_int"].dtype, torch.int32)
self.assertEqual(true_data["x_byte"], loaded_data["x_byte"])
# Functional Test: ignore features missing from spec
tfrecord_parser = datapipe2.load_from_tfrecord(
{
"x_float": ((10,), torch.float32),
}
)
result = list(tfrecord_parser)
self.assertEqual(len(result), 4)
expected_res = [
{
"x_float": x["x_float"],
}
for x in self._ground_truth_data()
]
for true_data, loaded_data in zip(expected_res, result):
self.assertSetEqual(set(true_data.keys()), set(loaded_data.keys()))
self.assertArrayEqual(true_data["x_float"], loaded_data["x_float"].float())
# Functional Test: raises error if missing spec feature
with self.assertRaises(RuntimeError):
tfrecord_parser = datapipe2.load_from_tfrecord(
{
"x_float_unknown": ((5, 2), torch.float64),
"x_int": ((5, 2), torch.int32),
"x_byte": (tuple(), None),
}
)
result = list(tfrecord_parser)
# Reset Test:
tfrecord_parser = TFRecordLoader(datapipe2)
expected_res = final_expected_res
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(tfrecord_parser, n_elements_before_reset)
self.assertEqual(len(expected_res[:n_elements_before_reset]), len(res_before_reset))
for true_data, loaded_data in zip(expected_res[:n_elements_before_reset], res_before_reset):
self.assertSetEqual(set(true_data.keys()), set(loaded_data.keys()))
for key in ["x_float", "x_int"]:
self.assertArrayEqual(true_data[key], loaded_data[key])
self.assertEqual(true_data["x_byte"][0], loaded_data["x_byte"][0])
self.assertEqual(len(expected_res), len(res_after_reset))
for true_data, loaded_data in zip(expected_res, res_after_reset):
self.assertSetEqual(set(true_data.keys()), set(loaded_data.keys()))
for key in ["x_float", "x_int"]:
self.assertArrayEqual(true_data[key], loaded_data[key])
self.assertEqual(true_data["x_byte"][0], loaded_data["x_byte"][0])
# __len__ Test: length isn't implemented since it cannot be known ahead of time
with self.assertRaisesRegex(TypeError, "doesn't have valid length"):
len(tfrecord_parser)
@skipIfNoPROTOBUF
@unittest.skipIf(
IS_M1, "Protobuf 3.19.* is not supported on MacOS M1, but Tensorflow is incompatible with Protobuf 4"
)
@torch.no_grad()
def test_tfrecord_loader_sequence_example_iterdatapipe(self):
filename = f"{self.temp_dir}/sequence_example.tfrecord"
datapipe1 = IterableWrapper([filename])
datapipe2 = FileOpener(datapipe1, mode="b")
# Functional Test: test if the returned data is correct
tfrecord_parser = datapipe2.load_from_tfrecord()
result = list(tfrecord_parser)
self.assertEqual(len(result), 4)
expected_res = final_expected_res = list(self._ground_truth_seq_data())
for (true_data_ctx, true_data_seq), loaded_data in zip(expected_res, result):
self.assertSetEqual(set(true_data_ctx.keys()).union(true_data_seq.keys()), set(loaded_data.keys()))
for key in ["x_float", "x_int"]:
self.assertArrayEqual(true_data_ctx[key], loaded_data[key])
self.assertEqual(len(true_data_seq[key + "_seq"]), len(loaded_data[key + "_seq"]))
self.assertIsInstance(loaded_data[key + "_seq"], list)
for a1, a2 in zip(true_data_seq[key + "_seq"], loaded_data[key + "_seq"]):
self.assertArrayEqual(a1, a2)
self.assertEqual(true_data_ctx["x_byte"], loaded_data["x_byte"])
self.assertListEqual(true_data_seq["x_byte_seq"], loaded_data["x_byte_seq"])
# Functional Test: test if the shape of the returned data is correct when using spec
tfrecord_parser = datapipe2.load_from_tfrecord(
{
"x_float": ((5, 2), torch.float64),
"x_int": ((5, 2), torch.int32),
"x_byte": (tuple(), None),
"x_float_seq": ((-1, 5, 2), torch.float64),
"x_int_seq": ((-1, 5, 2), torch.int32),
"x_byte_seq": ((-1,), None),
}
)
result = list(tfrecord_parser)
self.assertEqual(len(result), 4)
expected_res = [
(
{
"x_float": x["x_float"].reshape(5, 2),
"x_int": x["x_int"].reshape(5, 2),
"x_byte": x["x_byte"][0],
},
{
"x_float_seq": [y.reshape(5, 2) for y in z["x_float_seq"]],
"x_int_seq": [y.reshape(5, 2) for y in z["x_int_seq"]],
"x_byte_seq": [y[0] for y in z["x_byte_seq"]],
},
)
for x, z in self._ground_truth_seq_data()
]
for (true_data_ctx, true_data_seq), loaded_data in zip(expected_res, result):
self.assertSetEqual(set(true_data_ctx.keys()).union(true_data_seq.keys()), set(loaded_data.keys()))
for key in ["x_float", "x_int"]:
l_loaded_data = loaded_data[key]
if key == "x_float":
l_loaded_data = l_loaded_data.float()
else:
l_loaded_data = l_loaded_data.int()
self.assertArrayEqual(true_data_ctx[key], l_loaded_data)
self.assertArrayEqual(true_data_seq[key + "_seq"], loaded_data[key + "_seq"])
self.assertEqual(true_data_ctx["x_byte"], loaded_data["x_byte"])
self.assertListEqual(true_data_seq["x_byte_seq"], loaded_data["x_byte_seq"])
# Functional Test: ignore features missing from spec
tfrecord_parser = datapipe2.load_from_tfrecord(
{
"x_float": ((10,), torch.float32),
}
)
result = list(tfrecord_parser)
self.assertEqual(len(result), 4)
expected_res = [
{
"x_float": x["x_float"],
}
for x, z in self._ground_truth_seq_data()
]
for true_data, loaded_data in zip(expected_res, result):
self.assertSetEqual(set(true_data.keys()), set(loaded_data.keys()))
self.assertArrayEqual(true_data["x_float"], loaded_data["x_float"].float())
# Functional Test: raises error if missing spec feature
with self.assertRaises(RuntimeError):
tfrecord_parser = datapipe2.load_from_tfrecord(
{"x_float_unknown": ((5, 2), torch.float64), "x_int": ((5, 2), torch.int32), "x_byte": None}
)
result = list(tfrecord_parser)
# Reset Test:
tfrecord_parser = TFRecordLoader(datapipe2)
expected_res = final_expected_res
n_elements_before_reset = 2
res_before_reset, res_after_reset = reset_after_n_next_calls(tfrecord_parser, n_elements_before_reset)
self.assertEqual(len(expected_res[:n_elements_before_reset]), len(res_before_reset))
for (true_data_ctx, true_data_seq), loaded_data in zip(
expected_res[:n_elements_before_reset], res_before_reset
):
self.assertSetEqual(set(true_data_ctx.keys()).union(true_data_seq.keys()), set(loaded_data.keys()))
for key in ["x_float", "x_int"]:
self.assertArrayEqual(true_data_ctx[key], loaded_data[key])
self.assertEqual(len(true_data_seq[key + "_seq"]), len(loaded_data[key + "_seq"]))
self.assertIsInstance(loaded_data[key + "_seq"], list)
for a1, a2 in zip(true_data_seq[key + "_seq"], loaded_data[key + "_seq"]):
self.assertArrayEqual(a1, a2)
self.assertEqual(true_data_ctx["x_byte"], loaded_data["x_byte"])
self.assertListEqual(true_data_seq["x_byte_seq"], loaded_data["x_byte_seq"])
self.assertEqual(len(expected_res), len(res_after_reset))
for (true_data_ctx, true_data_seq), loaded_data in zip(expected_res, res_after_reset):
self.assertSetEqual(set(true_data_ctx.keys()).union(true_data_seq.keys()), set(loaded_data.keys()))
for key in ["x_float", "x_int"]:
self.assertArrayEqual(true_data_ctx[key], loaded_data[key])
self.assertEqual(len(true_data_seq[key + "_seq"]), len(loaded_data[key + "_seq"]))
self.assertIsInstance(loaded_data[key + "_seq"], list)
for a1, a2 in zip(true_data_seq[key + "_seq"], loaded_data[key + "_seq"]):
self.assertArrayEqual(a1, a2)
self.assertEqual(true_data_ctx["x_byte"], loaded_data["x_byte"])
self.assertListEqual(true_data_seq["x_byte_seq"], loaded_data["x_byte_seq"])
# __len__ Test: length isn't implemented since it cannot be known ahead of time
with self.assertRaisesRegex(TypeError, "doesn't have valid length"):
len(tfrecord_parser)
if __name__ == "__main__":
unittest.main()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import io
import json
import os
import subprocess
import unittest
import warnings
from unittest.mock import patch
import expecttest
from _utils._common_utils_for_test import check_hash_fn, create_temp_dir, IS_M1, IS_WINDOWS
from torch.utils.data import DataLoader
from torchdata.datapipes.iter import (
FileOpener,
FSSpecFileLister,
FSSpecFileOpener,
HttpReader,
IterableWrapper,
OnDiskCacheHolder,
S3FileLister,
S3FileLoader,
)
from torchdata.datapipes.iter.load.online import _get_proxies
try:
import fsspec
HAS_FSSPEC = True
except ImportError:
HAS_FSSPEC = False
try:
import s3fs
HAS_FSSPEC_S3 = True
except ImportError:
HAS_FSSPEC_S3 = False
skipIfNoFSSpecS3 = unittest.skipIf(not (HAS_FSSPEC and HAS_FSSPEC_S3), "no FSSpec with S3fs")
try:
import adlfs
HAS_FSSPEC_AZ = True
except ImportError:
HAS_FSSPEC_AZ = False
skipIfNoFSSpecAZ = unittest.skipIf(not (HAS_FSSPEC and HAS_FSSPEC_AZ), "no FSSpec with adlfs")
try:
from torchdata._torchdata import S3Handler
HAS_AWS = True
except ImportError:
HAS_AWS = False
skipIfAWS = unittest.skipIf(HAS_AWS, "AWSSDK Enabled")
skipIfNoAWS = unittest.skipIf(not HAS_AWS, "No AWSSDK Enabled")
try:
import portalocker
HAS_PORTALOCKER = True
except ImportError:
HAS_PORTALOCKER = False
skipIfNoPortalocker = unittest.skipIf(not HAS_PORTALOCKER, "No portalocker installed")
class TestDataPipeRemoteIO(expecttest.TestCase):
def setUp(self):
self.temp_dir = create_temp_dir()
def tearDown(self):
try:
self.temp_dir.cleanup()
except Exception as e:
warnings.warn(f"TestDataPipeRemoteIO was not able to cleanup temp dir due to {e}")
def test_http_reader_iterdatapipe(self):
file_url = "https://raw.githubusercontent.com/pytorch/data/main/LICENSE"
expected_file_name = "LICENSE"
expected_MD5_hash = "bb9675028dd39d2dd2bf71002b93e66c"
query_params = {"auth": ("fake_username", "fake_password"), "allow_redirects": True}
timeout = 120
http_reader_dp = HttpReader(IterableWrapper([file_url]), timeout=timeout, **query_params)
# Functional Test: test if the Http Reader can download and read properly
reader_dp = http_reader_dp.readlines()
it = iter(reader_dp)
path, line = next(it)
self.assertEqual(expected_file_name, os.path.basename(path))
self.assertTrue(b"BSD" in line)
# Reset Test: http_reader_dp has been read, but we reset when calling check_hash()
check_cache_dp = http_reader_dp.check_hash({file_url: expected_MD5_hash}, "md5", rewind=False)
it = iter(check_cache_dp)
path, stream = next(it)
self.assertEqual(expected_file_name, os.path.basename(path))
self.assertTrue(io.BufferedReader, type(stream))
# __len__ Test: returns the length of source DataPipe
self.assertEqual(1, len(http_reader_dp))
# Error Test: test if the Http Reader raises an error when the url is invalid
error_url = "https://github.com/pytorch/data/this/url/dont/exist"
http_error_dp = HttpReader(IterableWrapper([error_url]), timeout=timeout)
with self.assertRaisesRegex(Exception, f"404.+{error_url}"):
next(iter(http_error_dp.readlines()))
# Feature skip-error Test: test if the Http Reader skips urls causing problems
http_skip_error_dp = HttpReader(IterableWrapper([error_url, file_url]), timeout=timeout, skip_on_error=True)
reader_dp = http_skip_error_dp.readlines()
with self.assertWarnsRegex(Warning, f"404.+{error_url}.+skipping"):
it = iter(reader_dp)
path, line = next(it)
self.assertEqual(expected_file_name, os.path.basename(path))
self.assertTrue(b"BSD" in line)
# test if GET-request is done with correct arguments
with patch("requests.Session.get") as mock_get:
http_reader_dp = HttpReader(IterableWrapper([file_url]), timeout=timeout, **query_params)
_ = next(iter(http_reader_dp))
mock_get.assert_called_with(
file_url,
timeout=timeout,
proxies=_get_proxies(),
stream=True,
auth=query_params["auth"],
allow_redirects=query_params["allow_redirects"],
)
@skipIfNoPortalocker
def test_on_disk_cache_holder_iterdatapipe(self):
tar_file_url = "https://raw.githubusercontent.com/pytorch/data/main/test/_fakedata/csv.tar.gz"
expected_file_name = os.path.join(self.temp_dir.name, "csv.tar.gz")
expected_MD5_hash = "42cd45e588dbcf64c65751fbf0228af9"
tar_hash_dict = {expected_file_name: expected_MD5_hash}
tar_file_dp = IterableWrapper([tar_file_url])
with self.assertRaisesRegex(RuntimeError, "Expected `OnDiskCacheHolder` existing"):
_ = tar_file_dp.end_caching()
def _filepath_fn(url):
filename = os.path.basename(url)
return os.path.join(self.temp_dir.name, filename)
tar_cache_dp = tar_file_dp.on_disk_cache(
filepath_fn=_filepath_fn,
hash_dict=tar_hash_dict,
hash_type="md5",
)
# DataPipe Constructor
tar_cache_dp = HttpReader(tar_cache_dp)
# Start iteration without `end_caching`
with self.assertRaisesRegex(RuntimeError, "Please call"):
_ = list(tar_cache_dp)
# Both filepath_fn and same_filepath_fn are set
with self.assertRaisesRegex(ValueError, "`filepath_fn` is mutually"):
_ = tar_cache_dp.end_caching(mode="wb", filepath_fn=_filepath_fn, same_filepath_fn=True)
tar_cache_dp = tar_cache_dp.end_caching(mode="wb", same_filepath_fn=True)
# File doesn't exist on disk
self.assertFalse(os.path.exists(expected_file_name))
path = list(tar_cache_dp)[0]
# File is cached to disk
self.assertTrue(os.path.exists(expected_file_name))
self.assertEqual(expected_file_name, path)
self.assertTrue(check_hash_fn(expected_file_name, expected_MD5_hash))
# Modify the downloaded file to trigger downloading again
with open(expected_file_name, "w") as f:
f.write("0123456789abcdef")
self.assertFalse(check_hash_fn(expected_file_name, expected_MD5_hash))
path = list(tar_cache_dp)[0]
self.assertTrue(check_hash_fn(expected_file_name, expected_MD5_hash))
# Call `end_caching` again
with self.assertRaisesRegex(RuntimeError, "`end_caching` can only be invoked once"):
_ = tar_cache_dp.end_caching()
# Cache decompressed archive but only check root directory
root_dir = "temp"
file_cache_dp = OnDiskCacheHolder(
tar_cache_dp, filepath_fn=lambda tar_path: os.path.join(os.path.dirname(tar_path), root_dir)
)
remember_cache_dp_object = file_cache_dp
file_cache_dp = FileOpener(file_cache_dp, mode="rb").load_from_tar()
file_cache_dp = file_cache_dp.end_caching(
mode="wb",
filepath_fn=lambda file_path: os.path.join(self.temp_dir.name, root_dir, os.path.basename(file_path)),
)
cached_it = iter(file_cache_dp)
for i in range(3):
expected_csv_path = os.path.join(self.temp_dir.name, root_dir, f"{i}.csv")
# File doesn't exist on disk
# Check disabled due to some elements of prefetching inside of on_disck_cache
# self.assertFalse(os.path.exists(expected_csv_path))
csv_path = next(cached_it)
# File is cached to disk
self.assertTrue(os.path.exists(expected_csv_path))
self.assertEqual(expected_csv_path, csv_path)
# This is the situation when previous process had no canche to release promise file on the file lists,
# as we are in same pid, we need to force iterators to finish by deleting or exhausing them
del cached_it
if not IS_WINDOWS:
dl = DataLoader(file_cache_dp, num_workers=3, multiprocessing_context="fork", batch_size=1)
expected = [[os.path.join(self.temp_dir.name, root_dir, f"{i}.csv")] for i in range(3)] * 3
res = list(dl)
self.assertEqual(sorted(expected), sorted(res))
remember_cache_dp_object._download_everything = True
workers = 100
dl = DataLoader(file_cache_dp, num_workers=workers, multiprocessing_context="fork", batch_size=1)
expected = [[os.path.join(self.temp_dir.name, root_dir, f"{i}.csv")] for i in range(3)] * workers
res = list(dl)
self.assertEqual(sorted(expected), sorted(res))
def __get_s3_cnt(self, s3_pths: list, recursive=True):
"""Return the count of the total objects collected from a list s3 paths"""
tot_objs = set()
for p in s3_pths:
pth_parts = p.split("s3://")[1].split("/", 1)
if len(pth_parts) == 1:
bkt_name, prefix = pth_parts[0], ""
else:
bkt_name, prefix = pth_parts
aws_cmd = f"aws --output json s3api list-objects --bucket {bkt_name} --no-sign-request"
if prefix.strip():
aws_cmd += f" --prefix {prefix}"
if not recursive:
aws_cmd += " --delimiter /"
res = subprocess.run(aws_cmd, shell=True, check=True, capture_output=True)
json_res = json.loads(res.stdout)
if "Contents" in json_res:
objs = [v["Key"] for v in json_res["Contents"]]
else:
objs = [v["Prefix"] for v in json_res["CommonPrefixes"]]
tot_objs |= set(objs)
return len(tot_objs)
@skipIfNoFSSpecS3
def test_fsspec_io_iterdatapipe(self):
input_list = [
["s3://ai2-public-datasets"], # bucket without '/'
["s3://ai2-public-datasets/charades/"], # bucket with '/'
[
"s3://ai2-public-datasets/charades/Charades_v1.zip",
"s3://ai2-public-datasets/charades/Charades_v1_flow.tar",
"s3://ai2-public-datasets/charades/Charades_v1_rgb.tar",
"s3://ai2-public-datasets/charades/Charades_v1_480.zip",
], # multiple files
]
for urls in input_list:
fsspec_lister_dp = FSSpecFileLister(IterableWrapper(urls), anon=True)
self.assertEqual(
sum(1 for _ in fsspec_lister_dp), self.__get_s3_cnt(urls, recursive=False), f"{urls} failed"
)
url = "s3://ai2-public-datasets/charades/"
fsspec_loader_dp = FSSpecFileOpener(FSSpecFileLister(IterableWrapper([url]), anon=True), anon=True)
res = list(fsspec_loader_dp)
self.assertEqual(len(res), 18, f"{input} failed")
@unittest.skipIf(True, "Needs authentications. See: https://github.com/pytorch/data/issues/904")
@skipIfNoFSSpecAZ
def test_fsspec_azure_blob(self):
url = "public/curated/covid-19/ecdc_cases/latest/ecdc_cases.csv"
account_name = "pandemicdatalake"
azure_prefixes = ["abfs", "az"]
fsspec_loader_dp = {}
for prefix in azure_prefixes:
fsspec_lister_dp = FSSpecFileLister(f"{prefix}://{url}", account_name=account_name)
fsspec_loader_dp[prefix] = FSSpecFileOpener(fsspec_lister_dp, account_name=account_name).parse_csv()
res_abfs = list(fsspec_loader_dp["abfs"])[0]
res_az = list(fsspec_loader_dp["az"])[0]
self.assertEqual(res_abfs, res_az, f"{input} failed")
@skipIfAWS
def test_disabled_s3_io_iterdatapipe(self):
file_urls = ["s3://ai2-public-datasets"]
with self.assertRaisesRegex(ModuleNotFoundError, "TorchData must be built with"):
_ = S3FileLister(IterableWrapper(file_urls))
with self.assertRaisesRegex(ModuleNotFoundError, "TorchData must be built with"):
_ = S3FileLoader(IterableWrapper(file_urls))
@skipIfNoAWS
@unittest.skipIf(IS_M1, "PyTorch M1 CI Machine doesn't allow accessing")
def test_s3_io_iterdatapipe(self):
# S3FileLister: different inputs
input_list = [
["s3://ai2-public-datasets"], # bucket without '/'
["s3://ai2-public-datasets/"], # bucket with '/'
["s3://ai2-public-datasets/charades"], # folder without '/'
["s3://ai2-public-datasets/charades/"], # folder without '/'
["s3://ai2-public-datasets/charad"], # prefix
[
"s3://ai2-public-datasets/charades/Charades_v1",
"s3://ai2-public-datasets/charades/Charades_vu17",
], # prefixes
["s3://ai2-public-datasets/charades/Charades_v1.zip"], # single file
[
"s3://ai2-public-datasets/charades/Charades_v1.zip",
"s3://ai2-public-datasets/charades/Charades_v1_flow.tar",
"s3://ai2-public-datasets/charades/Charades_v1_rgb.tar",
"s3://ai2-public-datasets/charades/Charades_v1_480.zip",
], # multiple files
[
"s3://ai2-public-datasets/charades/Charades_v1.zip",
"s3://ai2-public-datasets/charades/Charades_v1_flow.tar",
"s3://ai2-public-datasets/charades/Charades_v1_rgb.tar",
"s3://ai2-public-datasets/charades/Charades_v1_480.zip",
"s3://ai2-public-datasets/charades/Charades_vu17",
], # files + prefixes
]
for input in input_list:
s3_lister_dp = S3FileLister(IterableWrapper(input), region="us-west-2")
self.assertEqual(sum(1 for _ in s3_lister_dp), self.__get_s3_cnt(input), f"{input} failed")
# S3FileLister: prefixes + different region
file_urls = [
"s3://aft-vbi-pds/bin-images/111",
"s3://aft-vbi-pds/bin-images/222",
]
s3_lister_dp = S3FileLister(IterableWrapper(file_urls), request_timeout_ms=10000, region="us-east-1")
self.assertEqual(sum(1 for _ in s3_lister_dp), 2212, f"{input} failed")
# S3FileLister: incorrect inputs
input_list = [
[""],
["ai2-public-datasets"],
["s3://"],
["s3:///bin-images"],
]
for input in input_list:
with self.assertRaises(ValueError, msg=f"{input} should raise ValueError."):
s3_lister_dp = S3FileLister(IterableWrapper(input), region="us-east-1")
for _ in s3_lister_dp:
pass
input = [["s3://aft-vbi-pds/bin-images/100730.jpg"], 1]
s3_loader_dp = S3FileLoader(input[0], region="us-east-1")
self.assertEqual(sum(1 for _ in s3_loader_dp), input[1], f"{input[0]} failed")
# S3FileLoader: incorrect inputs
input_list = [
[""],
["ai2-public-datasets"],
["s3://"],
["s3:///bin-images"],
["s3://ai2-public-datasets/bin-image"],
]
for input in input_list:
with self.assertRaises(ValueError, msg=f"{input} should raise ValueError."):
s3_loader_dp = S3FileLoader(input, region="us-east-1")
for _ in s3_loader_dp:
pass
# integration test
input = [["s3://charades-tar-shards/"], 10]
s3_lister_dp = S3FileLister(IterableWrapper(input[0]), region="us-west-2")
s3_loader_dp = S3FileLoader(s3_lister_dp, region="us-west-2")
self.assertEqual(sum(1 for _ in s3_loader_dp), input[1], f"{input[0]} failed")
if __name__ == "__main__":
unittest.main()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import unittest
import warnings
from itertools import chain
import expecttest
from _utils._common_utils_for_test import create_temp_dir, reset_after_n_next_calls
from torchdata.datapipes.iter import DataFrameMaker, FileLister, FileOpener, IterableWrapper, ParquetDataFrameLoader
try:
import torcharrow
import torcharrow.dtypes as dt
HAS_TORCHARROW = True
except ImportError:
HAS_TORCHARROW = False
try:
import pyarrow
import pyarrow.parquet as parquet
HAS_PYARROW = True
except ImportError:
HAS_PYARROW = False
skipIfNoPyArrow = unittest.skipIf(not HAS_PYARROW, "no PyArrow.")
skipIfNoTorchArrow = unittest.skipIf(not HAS_TORCHARROW, "no TorchArrow.")
@skipIfNoTorchArrow
class TestDataFrame(expecttest.TestCase):
def setUp(self) -> None:
self.temp_dir = create_temp_dir()
if HAS_PYARROW:
self._write_parquet_files()
def tearDown(self) -> None:
try:
self.temp_dir.cleanup()
except Exception as e:
warnings.warn(f"TestDataFrame was not able to cleanup temp dir due to {e}")
def _write_parquet_files(self):
# Create TorchArrow DataFrames
DTYPE = dt.Struct([dt.Field("Values", dt.int32)])
df1 = torcharrow.dataframe([(i,) for i in range(10)], dtype=DTYPE)
df2 = torcharrow.dataframe([(i,) for i in range(100)], dtype=DTYPE)
# Write them as parquet files
for i, df in enumerate([df1, df2]):
fname = f"df{i}.parquet"
self._write_df_as_parquet(df, fname)
self._write_multiple_dfs_as_parquest([df1, df2], fname="merged.parquet")
def _custom_files_set_up(self, files):
for fname, content in files.items():
temp_file_path = os.path.join(self.temp_dir.name, fname)
with open(temp_file_path, "w") as f:
f.write(content)
def _compare_dataframes(self, expected_df, actual_df):
self.assertEqual(len(expected_df), len(actual_df))
for exp, act in zip(expected_df, actual_df):
self.assertEqual(exp, act)
def _write_df_as_parquet(self, df, fname: str) -> None:
table = df.to_arrow()
parquet.write_table(table, os.path.join(self.temp_dir.name, fname))
def _write_multiple_dfs_as_parquest(self, dfs, fname: str) -> None:
tables = [df.to_arrow() for df in dfs]
merged_table = pyarrow.concat_tables(tables)
parquet.write_table(merged_table, os.path.join(self.temp_dir.name, fname))
def test_dataframe_maker_iterdatapipe(self):
source_data = [(i,) for i in range(10)]
source_dp = IterableWrapper(source_data)
DTYPE = dt.Struct([dt.Field("Values", dt.int32)])
# Functional Test: DataPipe correctly converts into a single TorchArrow DataFrame
df_dp = source_dp.dataframe(dtype=DTYPE)
df = list(df_dp)[0]
expected_df = torcharrow.dataframe([(i,) for i in range(10)], dtype=DTYPE)
self._compare_dataframes(expected_df, df)
# Functional Test: DataPipe correctly converts into multiple TorchArrow DataFrames, based on size argument
df_dp = DataFrameMaker(source_dp, dataframe_size=5, dtype=DTYPE)
dfs = list(df_dp)
expected_dfs = [
torcharrow.dataframe([(i,) for i in range(5)], dtype=DTYPE),
torcharrow.dataframe([(i,) for i in range(5, 10)], dtype=DTYPE),
]
for exp_df, act_df in zip(expected_dfs, dfs):
self._compare_dataframes(exp_df, act_df)
# __len__ Test:
df_dp = source_dp.dataframe(dtype=DTYPE)
self.assertEqual(1, len(df_dp))
self.assertEqual(10, len(list(df_dp)[0]))
df_dp = source_dp.dataframe(dataframe_size=5, dtype=DTYPE)
self.assertEqual(2, len(df_dp))
self.assertEqual(5, len(list(df_dp)[0]))
# Reset Test:
n_elements_before_reset = 1
res_before_reset, res_after_reset = reset_after_n_next_calls(df_dp, n_elements_before_reset)
for exp_df, act_df in zip(expected_dfs[:1], res_before_reset):
self._compare_dataframes(exp_df, act_df)
for exp_df, act_df in zip(expected_dfs, res_after_reset):
self._compare_dataframes(exp_df, act_df)
def test_dataframe_maker_with_csv(self):
def get_name(path_and_stream):
return os.path.basename(path_and_stream[0]), path_and_stream[1]
csv_files = {"1.csv": "key,item\na,1\nb,2"}
self._custom_files_set_up(csv_files)
datapipe1 = FileLister(self.temp_dir.name, "*.csv")
datapipe2 = FileOpener(datapipe1, mode="b")
datapipe3 = datapipe2.map(get_name)
csv_dict_parser_dp = datapipe3.parse_csv_as_dict()
# Functional Test: Correctly generate TorchArrow DataFrame from CSV
DTYPE = dt.Struct([dt.Field("key", dt.string), dt.Field("item", dt.string)])
df_dp = csv_dict_parser_dp.dataframe(dtype=DTYPE, columns=["key", "item"])
expected_dfs = [torcharrow.dataframe([{"key": "a", "item": "1"}, {"key": "b", "item": "2"}], dtype=DTYPE)]
for exp_df, act_df in zip(expected_dfs, list(df_dp)):
self._compare_dataframes(exp_df, act_df)
# Functional: making sure DataPipe works even without `columns` input
df_dp = csv_dict_parser_dp.dataframe(dtype=DTYPE)
for exp_df, act_df in zip(expected_dfs, list(df_dp)):
self._compare_dataframes(exp_df, act_df)
@skipIfNoPyArrow
def test_parquet_dataframe_reader_iterdatapipe(self):
DTYPE = dt.Struct([dt.Field("Values", dt.int32)])
# Functional Test: read from Parquet files and output TorchArrow DataFrames
source_dp = FileLister(self.temp_dir.name, masks="df*.parquet")
parquet_df_dp = ParquetDataFrameLoader(source_dp, dtype=DTYPE)
expected_dfs = [
torcharrow.dataframe([(i,) for i in range(10)], dtype=DTYPE),
torcharrow.dataframe([(i,) for i in range(100)], dtype=DTYPE),
]
for exp_df, act_df in zip(expected_dfs, list(parquet_df_dp)):
self._compare_dataframes(exp_df, act_df)
# Functional Test: correctly read from a Parquet file that was a merged DataFrame
merged_source_dp = FileLister(self.temp_dir.name, masks="merged.parquet")
merged_parquet_df_dp = ParquetDataFrameLoader(merged_source_dp, dtype=DTYPE)
expected_merged_dfs = [torcharrow.dataframe([(i,) for i in chain(range(10), range(100))], dtype=DTYPE)]
for exp_df, act_df in zip(expected_merged_dfs, list(merged_parquet_df_dp)):
self._compare_dataframes(exp_df, act_df)
# __len__ Test: no valid length because we do not know the number of row groups in advance
with self.assertRaisesRegex(TypeError, "has no len"):
len(parquet_df_dp)
# Reset Test:
n_elements_before_reset = 1
res_before_reset, res_after_reset = reset_after_n_next_calls(parquet_df_dp, n_elements_before_reset)
for exp_df, act_df in zip(expected_dfs[:1], res_before_reset):
self._compare_dataframes(exp_df, act_df)
for exp_df, act_df in zip(expected_dfs, res_after_reset):
self._compare_dataframes(exp_df, act_df)
if __name__ == "__main__":
unittest.main()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import hashlib
import os
import platform
import sys
import tempfile
from typing import List, Tuple, TypeVar
from torchdata.datapipes.iter import IterDataPipe
T_co = TypeVar("T_co", covariant=True)
IS_LINUX = sys.platform == "linux"
IS_WINDOWS = sys.platform == "win32"
IS_MACOS = sys.platform == "darwin"
IS_M1 = IS_MACOS and "arm" in platform.platform()
class IDP_NoLen(IterDataPipe):
def __init__(self, input_dp) -> None:
super().__init__()
self.input_dp = input_dp
def __iter__(self):
yield from self.input_dp
def get_name(path_and_stream):
return os.path.basename(path_and_stream[0]), path_and_stream[1]
# Given a DataPipe and integer n, iterate the DataPipe for n elements and store the elements into a list
# Then, reset the DataPipe and return a tuple of two lists
# 1. A list of elements yielded before the reset
# 2. A list of all elements of the DataPipe after the reset
def reset_after_n_next_calls(datapipe: IterDataPipe[T_co], n: int) -> Tuple[List[T_co], List[T_co]]:
it = iter(datapipe)
res_before_reset = []
for _ in range(n):
res_before_reset.append(next(it))
return res_before_reset, list(datapipe)
def create_temp_dir(dir=None):
# The temp dir and files within it will be released and deleted in tearDown().
# Adding `noqa: P201` to avoid mypy's warning on not releasing the dir handle within this function.
temp_dir = tempfile.TemporaryDirectory(dir=dir) # noqa: P201
return temp_dir
def create_temp_files(temp_dir, prefix=1, empty=True):
temp_dir_path = temp_dir.name
with tempfile.NamedTemporaryFile(dir=temp_dir_path, delete=False, prefix=str(prefix), suffix=".txt") as f:
temp_file1_name = f.name
with open(temp_file1_name, "w") as f1:
f1.write("0123456789abcdef")
with tempfile.NamedTemporaryFile(dir=temp_dir_path, delete=False, prefix=str(prefix + 1), suffix=".byte") as f:
temp_file2_name = f.name
with open(temp_file2_name, "wb") as f2:
f2.write(b"0123456789abcdef")
if empty:
with tempfile.NamedTemporaryFile(dir=temp_dir_path, delete=False, prefix=str(prefix + 2), suffix=".empty") as f:
temp_file3_name = f.name
return temp_file1_name, temp_file2_name, temp_file3_name
return temp_file1_name, temp_file2_name
def check_hash_fn(filepath, expected_hash, hash_type="md5"):
if hash_type == "sha256":
hash_fn = hashlib.sha256()
elif hash_type == "md5":
hash_fn = hashlib.md5()
else:
raise ValueError("Invalid hash_type requested, should be one of {}".format(["sha256", "md5"]))
with open(filepath, "rb") as f:
chunk = f.read(1024 ** 2)
while chunk:
hash_fn.update(chunk)
chunk = f.read(1024 ** 2)
return hash_fn.hexdigest() == expected_hash
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import tarfile
NUMBER_OF_FILES = 3
FILES = [
("bytes", "bt", "{fn}_0123456789abcdef\n", True),
("csv", "csv", "key,item\n0,{fn}_0\n1,{fn}_1\n"),
("json", "json", '{{"{fn}_0": [{{"{fn}_01": 1}}, {{"{fn}_02": 2}}], "{fn}_1": 1}}\n'),
("txt", "txt", "{fn}_0123456789abcdef\n"),
]
def create_files(folder, suffix, data, encoding=False):
os.makedirs(folder, exist_ok=True)
for i in range(NUMBER_OF_FILES):
fn = str(i)
d = data.format(fn=fn)
mode = "wb" if encoding else "wt"
if encoding:
d = d.encode()
with open(folder + "/" + fn + "." + suffix, mode) as f:
f.write(d)
with tarfile.open(folder + ".tar", mode="w") as archive:
archive.add(folder)
with tarfile.open(folder + ".tar.gz", mode="w:gz") as archive:
archive.add(folder)
def create_tfrecord_files(path: str):
try:
import tensorflow as tf
except ImportError:
print("TensorFlow not found!")
print("We will not generate tfrecord files.")
return
os.makedirs(path, exist_ok=True)
with tf.io.TFRecordWriter(os.path.join(path, "example.tfrecord")) as writer:
for i in range(4):
x = tf.range(i * 10, (i + 1) * 10)
record_bytes = tf.train.Example(
features=tf.train.Features(
feature={
"x_float": tf.train.Feature(float_list=tf.train.FloatList(value=x)),
"x_int": tf.train.Feature(int64_list=tf.train.Int64List(value=tf.cast(x * 10, "int64"))),
"x_byte": tf.train.Feature(bytes_list=tf.train.BytesList(value=[b"test str"])),
}
)
).SerializeToString()
writer.write(record_bytes)
with tf.io.TFRecordWriter(os.path.join(path, "sequence_example.tfrecord")) as writer:
for i in range(4):
x = tf.range(i * 10, (i + 1) * 10)
rep = 2 * i + 3
record_bytes = tf.train.SequenceExample(
context=tf.train.Features(
feature={
"x_float": tf.train.Feature(float_list=tf.train.FloatList(value=x)),
"x_int": tf.train.Feature(int64_list=tf.train.Int64List(value=tf.cast(x * 10, "int64"))),
"x_byte": tf.train.Feature(bytes_list=tf.train.BytesList(value=[b"test str"])),
}
),
feature_lists=tf.train.FeatureLists(
feature_list={
"x_float_seq": tf.train.FeatureList(
feature=[tf.train.Feature(float_list=tf.train.FloatList(value=x))] * rep
),
"x_int_seq": tf.train.FeatureList(
feature=[tf.train.Feature(int64_list=tf.train.Int64List(value=tf.cast(x * 10, "int64")))]
* rep
),
"x_byte_seq": tf.train.FeatureList(
feature=[tf.train.Feature(bytes_list=tf.train.BytesList(value=[b"test str"]))] * rep
),
}
),
).SerializeToString()
writer.write(record_bytes)
if __name__ == "__main__":
for args in FILES:
create_files(*args)
create_tfrecord_files("tfrecord")
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import torchdata
import torchdata.dataloader2
import torchdata.datapipes
def s3_test():
from torchdata._torchdata import S3Handler
if __name__ == "__main__":
r"""
TorchData Smoke Test
"""
parser = argparse.ArgumentParser()
parser.add_argument("--no-s3", dest="s3", action="store_false")
options = parser.parse_args()
if options.s3:
s3_test()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import torch
import torch.distributed as dist
from torch.distributed.elastic.multiprocessing.errors import record
from torch.utils.data import DataLoader
from torchdata.dataloader2 import DataLoader2, DistributedReadingService
from torchdata.datapipes.iter import IterableWrapper
def _get_dataloader(data_length: int, dl2: bool, shuffle: bool, rs=None):
data_source = IterableWrapper(list(range(data_length)))
dp = data_source.sharding_filter()
if shuffle:
dp = dp.shuffle()
if dl2:
if rs is None:
rs = DistributedReadingService()
dl = DataLoader2(dp, reading_service=rs)
else:
dp = dp.fullsync()
dl = DataLoader(dp)
return dl
@record
def main(backend, dl2):
dist.init_process_group(backend)
rank = dist.get_rank()
world_size = dist.get_world_size()
# Use a prime number to make sure uneven data sharding
data_length = 23
# No Shuffle
dl = _get_dataloader(data_length, dl2=dl2, shuffle=False)
res = []
for d in dl:
res.append(d)
# Simulate training synchronization
dist.barrier()
assert sorted(res) == list(range(rank, data_length // world_size * world_size, world_size))
# Shuffle
dl = _get_dataloader(data_length, dl2=dl2, shuffle=True)
results = []
for _ in range(2):
res = []
torch.manual_seed(123)
for d in dl:
res.append(d)
# Simulate training synchronization
dist.barrier()
results.append(res)
assert results[0] == results[1]
# Different seed
res = []
torch.manual_seed(321)
for d in dl:
res.append(d)
# Simulate training synchronization
dist.barrier()
results.append(res)
assert len(results[0]) == len(results[2])
assert results[0] != results[2]
# Properly shutdown the process group
if isinstance(dl, DataLoader2):
dl.shutdown()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Elastic Training")
backend_group = parser.add_mutually_exclusive_group(required=True)
backend_group.add_argument("--gloo", action="store_true", help="GLOO backend")
backend_group.add_argument("--nccl", action="store_true", help="NCCL backend")
backend_group.add_argument("--mpi", action="store_true", help="MPI backend")
dl_group = parser.add_mutually_exclusive_group(required=True)
dl_group.add_argument("--dl1", action="store_true", help="DataLoader")
dl_group.add_argument("--dl2", action="store_true", help="DataLoader2")
args = parser.parse_args()
backend = "gloo"
if args.nccl:
backend = "nccl"
elif args.mpi:
backend = "mpi"
dl2 = True
if args.dl1:
dl2 = False
main(backend, dl2)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import multiprocessing as mp
import os
import pickle
import queue
import random
import socket
import unittest
from unittest import TestCase
import numpy as np
import torch
import torch.distributed as dist
from torch.testing._internal.common_utils import instantiate_parametrized_tests, IS_WINDOWS, parametrize
from torch.utils.data.datapipes.iter.sharding import SHARDING_PRIORITIES
from torchdata.dataloader2 import (
DataLoader2,
DistributedReadingService,
InProcessReadingService,
MultiProcessingReadingService,
ReadingServiceInterface,
SequentialReadingService,
)
from torchdata.dataloader2.dataloader2 import READING_SERVICE_STATE_KEY_NAME, SERIALIZED_DATAPIPE_KEY_NAME
from torchdata.dataloader2.graph import DataPipe, list_dps, replace_dp, set_datapipes_seed, traverse_dps
from torchdata.dataloader2.random import SeedGenerator
from torchdata.datapipes.iter import IterableWrapper, IterDataPipe, ShardingRoundRobinDispatcher
try:
import dill
# XXX: By default, dill writes the Pickler dispatch table to inject its
# own logic there. This globally affects the behavior of the standard library
# pickler for any user who transitively depends on this module!
# Undo this extension to avoid altering the behavior of the pickler globally.
dill.extend(use_dill=False)
HAS_DILL = True
except ImportError:
HAS_DILL = False
skipIfNoDill = unittest.skipIf(not HAS_DILL, "no dill")
if dist.is_available():
HAS_DIST = True
else:
HAS_DIST = False
skipIfNoDistributed = unittest.skipIf(not HAS_DIST, "no torch.distributed")
TEST_WITH_TSAN = os.getenv("PYTORCH_TEST_WITH_TSAN", "0") == "1"
mp_ctx_parametrize = parametrize("ctx", mp.get_all_start_methods())
EXCEPTION_ITERATION_NUM = 7
class _ReadingServiceWrapper:
def __init__(self, dp):
self.dp = dp
def __iter__(self):
self.it = iter(self.dp)
return self
def __next__(self):
return next(self.it)
@staticmethod
def return_one():
return 1
class TestReadingService(ReadingServiceInterface):
def initialize(self, dp: DataPipe) -> DataPipe:
return _ReadingServiceWrapper(dp) # type: ignore[return-value]
class DataLoader2Test(TestCase):
def test_dataloader2(self) -> None:
test_data_pipe = IterableWrapper(range(3))
data_loader: DataLoader2 = DataLoader2(datapipe=test_data_pipe)
expected_batch = 0
for batch in iter(data_loader):
self.assertEqual(batch, expected_batch)
expected_batch += 1
def test_dataloader2_shutdown(self) -> None:
test_data_pipe = IterableWrapper(range(3))
data_loader: DataLoader2 = DataLoader2(datapipe=test_data_pipe)
data_loader.shutdown()
def test_dataloader2_state_dict(self) -> None:
test_data_pipe = IterableWrapper(range(3))
data_loader: DataLoader2 = DataLoader2(datapipe=test_data_pipe)
state = data_loader.state_dict()
self.assertIsNotNone(state)
self.assertIsNotNone(state[SERIALIZED_DATAPIPE_KEY_NAME])
self.assertIsNone(state[READING_SERVICE_STATE_KEY_NAME])
data_loader.shutdown()
def test_dataloader2_reading_service(self) -> None:
test_data_pipe = IterableWrapper(range(3))
reading_service = TestReadingService()
data_loader: DataLoader2 = DataLoader2(datapipe=test_data_pipe, reading_service=reading_service)
expected_batch = 0
for batch in iter(data_loader):
self.assertEqual(batch, expected_batch)
expected_batch += 1
def test_dataloader2_load_state_dict(self) -> None:
test_data_pipe = IterableWrapper(range(3))
reading_service = TestReadingService()
data_loader: DataLoader2 = DataLoader2(datapipe=test_data_pipe, reading_service=reading_service)
batch = next(iter(data_loader))
self.assertEqual(batch, 0)
state = data_loader.state_dict()
self.assertIsNotNone(state)
self.assertIsNotNone(state[SERIALIZED_DATAPIPE_KEY_NAME])
self.assertIsNone(state[READING_SERVICE_STATE_KEY_NAME])
data_loader.shutdown()
restored_data_loader: DataLoader2 = DataLoader2(datapipe=None, reading_service=reading_service)
restored_data_loader.load_state_dict(state)
restored_data_loader_datapipe = restored_data_loader.datapipe
deserialized_datapipe = pickle.loads(state[SERIALIZED_DATAPIPE_KEY_NAME])
for batch_1, batch_2 in zip(restored_data_loader_datapipe, deserialized_datapipe):
self.assertEqual(batch_1, batch_2)
self.assertEqual(
restored_data_loader.reading_service_state,
state[READING_SERVICE_STATE_KEY_NAME],
)
restored_data_loader.shutdown()
def test_dataloader2_iterates_correctly(self) -> None:
test_data_pipe = IterableWrapper(range(10)).sharding_filter()
reading_services = [
None,
TestReadingService(),
MultiProcessingReadingService(num_workers=4),
MultiProcessingReadingService(num_workers=4, worker_prefetch_cnt=0),
]
for reading_service in reading_services:
data_loader: DataLoader2 = DataLoader2(datapipe=test_data_pipe, reading_service=reading_service)
self.assertEqual(list(range(10)), list(data_loader))
self.assertEqual(list(range(10)), list(data_loader))
self.assertEqual(list(range(10)), list(data_loader))
actual = []
for i in data_loader:
actual.append(i)
self.assertEqual(list(range(10)), actual)
actual = []
for i in data_loader:
actual.append(i)
self.assertEqual(list(range(10)), actual)
def test_dataloader2_reset(self) -> None:
test_data_pipe = IterableWrapper(range(10))
reading_services = [None, TestReadingService(), MultiProcessingReadingService(num_workers=1)]
for reading_service in reading_services:
data_loader: DataLoader2 = DataLoader2(datapipe=test_data_pipe, reading_service=reading_service)
# Functional Test: Ensure multiple sequential reads of DL2 is possible
self.assertEqual(list(range(10)), list(data_loader))
self.assertEqual(list(range(10)), list(data_loader))
self.assertEqual(list(range(10)), list(data_loader))
# Functional Test: Ensure that the creation of a new iterator invalidates the old one
it1 = iter(data_loader)
self.assertEqual(0, next(it1))
self.assertEqual(1, next(it1))
it2 = iter(data_loader)
self.assertEqual(0, next(it2))
self.assertEqual(1, next(it2))
with self.assertRaisesRegex(RuntimeError, "iterator has been invalidated"):
next(it1)
self.assertEqual(list(range(2, 10)), list(it2))
def test_dataloader2_delegate_attribute(self) -> None:
test_data_pipe = IterableWrapper(range(10))
data_loader: DataLoader2 = DataLoader2(datapipe=test_data_pipe, reading_service=TestReadingService())
# Functional Test: Ensure multiple sequential reads of DL2 is possible
self.assertEqual(list(range(10)), list(data_loader))
self.assertEqual(list(range(10)), list(data_loader))
# Functional Test: Ensure that attribute/method of `dataloader._datapipe_iter` can be used
it = iter(data_loader)
self.assertEqual(1, it.return_one()) # type: ignore[attr-defined]
class DataLoader2ConsistencyTest(TestCase):
r"""
These tests ensure that the behaviors of `DataLoader2` are consistent across `ReadingServices` and potentially
with `DataLoaderV1`.
"""
@staticmethod
def _get_no_reading_service():
return None
@staticmethod
def _get_mp_reading_service():
return MultiProcessingReadingService(num_workers=2)
@staticmethod
def _get_in_process_reading_service():
return InProcessReadingService()
def _collect_data(self, datapipe, reading_service_gen):
dl: DataLoader2 = DataLoader2(datapipe, reading_service=reading_service_gen())
result = []
# Testing how RS handles partial reading and reiterations
for row, _ in zip(dl, range(10)):
result.append(row)
for row in dl:
result.append(row)
dl.shutdown()
return result
@staticmethod
def _no_op(x):
return x
def test_dataloader2_batch_collate(self) -> None:
dp: IterDataPipe = IterableWrapper(range(100)).batch(2).sharding_filter().collate(self._no_op) # type: ignore[assignment]
expected = self._collect_data(dp, reading_service_gen=self._get_no_reading_service)
reading_service_generators = (
self._get_mp_reading_service,
self._get_in_process_reading_service,
)
for reading_service_gen in reading_service_generators:
actual = self._collect_data(dp, reading_service_gen=reading_service_gen)
# TODO(588): This comparison only indicates that somethings is broken and not helping with debug
self.assertEqual(expected, actual, reading_service_gen)
def test_dataloader2_shuffle(self) -> None:
# TODO(589): Add shuffle test
pass
def _x_mult_2(d):
return d * 2
class NonReplicableDataPipe(IterDataPipe):
def __init__(self, datapipe):
self.datapipe = datapipe
def __iter__(self):
yield from self.datapipe
def is_replicable(self):
return False
class _CustomException(Exception):
pass
class MakeMistakeDataPipe(IterDataPipe):
def __init__(self, source_datapipe, exc_iteration=EXCEPTION_ITERATION_NUM):
self.source_datapipe = source_datapipe
self.exc_iteration = exc_iteration
def __iter__(self):
for i, x in enumerate(self.source_datapipe):
if i == self.exc_iteration:
raise _CustomException("oops")
yield x
class MultiProcessingReadingServiceTest(TestCase):
@staticmethod
def _worker_init_fn(datapipe, worker_info):
datapipe = datapipe.sharding_filter()
torch.utils.data.graph_settings.apply_sharding(
datapipe, worker_info.num_workers, worker_info.worker_id, SHARDING_PRIORITIES.MULTIPROCESSING
)
return datapipe
@staticmethod
def _worker_reset_fn(datapipe, worker_info, worker_seed_generator: SeedGenerator):
graph = traverse_dps(datapipe)
dps = list_dps(graph)
worker_seed_generator.seed(123)
set_datapipes_seed(dps, seed_generator=worker_seed_generator, distributed_shared=True)
return datapipe
@mp_ctx_parametrize
def test_worker_fns(self, ctx):
dp: IterDataPipe = IterableWrapper(range(100)).batch(2).shuffle()
rs = MultiProcessingReadingService(
num_workers=2,
multiprocessing_context=ctx,
worker_init_fn=self._worker_init_fn,
worker_reset_fn=self._worker_reset_fn,
)
dl = DataLoader2(dp, reading_service=rs)
res1 = list(dl)
res2 = list(dl)
# Test worker_init_fn to set sharding
def _expand_fn(res):
result = []
for batch in res:
result.extend(batch)
return result
exp = list(range(100))
self.assertEqual(sorted(_expand_fn(res1)), exp)
self.assertEqual(sorted(_expand_fn(res2)), exp)
# Test worker_reset_fn to set the same random seed across epoches
self.assertEqual(res1, res2)
@mp_ctx_parametrize
def test_single_branch_non_replicable(self, ctx):
r"""
For single branch pipeline with a non-replicable DataPipe, all ``sharding_filters``
in the pipeline become non-replicable.
"""
def _make_dp():
single_br_dp = IterableWrapper(list(range(10))).shuffle()
map_dp = single_br_dp.map(_x_mult_2)
end_dp = map_dp.map(_x_mult_2).shuffle()
return single_br_dp, map_dp, end_dp
def _assert_deterministic_dl_res(dl, exp):
torch.manual_seed(123)
res = list(dl)
self.assertEqual(sorted(res), exp)
# Second epoch
torch.manual_seed(123)
self.assertEqual(list(dl), res)
# Different seed
torch.manual_seed(321)
self.assertNotEqual(list(dl), res)
# Properly shutdown
dl.shutdown()
# By-default, all replicable
single_br_dp, _, end_dp = _make_dp()
graph = traverse_dps(end_dp)
sf_dp = single_br_dp.sharding_filter()
replace_dp(graph, single_br_dp, sf_dp)
dl = DataLoader2(
end_dp, reading_service=MultiProcessingReadingService(num_workers=2, multiprocessing_context=ctx)
)
# Determinism and dynamic sharding
# _assert_deterministic_dl_res(dl, [i * 4 for i in range(10)])
# Non-replicable before sharding_filter
# shuffle in dispatch process
single_br_dp, map_dp, end_dp = _make_dp()
graph = traverse_dps(end_dp)
round_robin_dispatcher = ShardingRoundRobinDispatcher(single_br_dp, SHARDING_PRIORITIES.MULTIPROCESSING)
replace_dp(graph, single_br_dp, round_robin_dispatcher)
sf_dp = map_dp.sharding_filter()
replace_dp(graph, map_dp, sf_dp)
dl = DataLoader2(
end_dp, reading_service=MultiProcessingReadingService(num_workers=2, multiprocessing_context=ctx)
)
# Determinism for non-replicable pipeline
_assert_deterministic_dl_res(dl, [i * 4 for i in range(10)])
# Non-replicable after sharding_filter
# shuffle in dispatch process
single_br_dp, map_dp, end_dp = _make_dp()
graph = traverse_dps(end_dp)
sf_dp = single_br_dp.sharding_filter()
replace_dp(graph, single_br_dp, sf_dp)
round_robin_dispatcher = ShardingRoundRobinDispatcher(map_dp, SHARDING_PRIORITIES.MULTIPROCESSING)
replace_dp(graph, map_dp, round_robin_dispatcher)
dl = DataLoader2(
end_dp, reading_service=MultiProcessingReadingService(num_workers=2, multiprocessing_context=ctx)
)
# Determinism for non-replicable pipeline
_assert_deterministic_dl_res(dl, [i * 4 for i in range(10)])
@mp_ctx_parametrize
def test_multi_branch_non_replicable(self, ctx) -> None:
r"""
For multi-branch pipeline with a non-replicable DataPipe on one branch,
all ``sharding_filter`` on the other branches should remain replicable.
"""
def _make_dp():
branch1_dp = IterableWrapper(list(range(10))).shuffle()
branch2_dp = IterableWrapper(list(range(10))).shuffle()
map_dp = branch1_dp.map(_x_mult_2)
end_dp = map_dp.zip(branch2_dp)
return branch1_dp, map_dp, branch2_dp, end_dp
def _assert_deterministic_dl_res(dl, exp1, exp2):
torch.manual_seed(123)
res = list(dl)
res1, res2 = list(zip(*res))
self.assertEqual(sorted(res1), exp1)
self.assertEqual(sorted(res2), exp2)
# Second epoch
torch.manual_seed(123)
self.assertEqual(list(dl), res)
# Different seed
torch.manual_seed(321)
self.assertNotEqual(list(dl), res)
# Properly shutdown
dl.shutdown()
# By-default, all replicable
branch1_dp, _, branch2_dp, end_dp = _make_dp()
graph = traverse_dps(end_dp)
sf1_dp = branch1_dp.sharding_filter()
sf2_dp = branch2_dp.sharding_filter()
replace_dp(graph, branch1_dp, sf1_dp)
replace_dp(graph, branch2_dp, sf2_dp)
dl = DataLoader2(
end_dp, reading_service=MultiProcessingReadingService(num_workers=2, multiprocessing_context=ctx)
)
# Determinism and dynamic sharding
_assert_deterministic_dl_res(dl, [i * 2 for i in range(10)], list(range(10)))
# Non-replicable on one branch
# shuffle in dispatch process
branch1_dp, _, branch2_dp, end_dp = _make_dp()
graph = traverse_dps(end_dp)
non_replicable_dp = ShardingRoundRobinDispatcher(branch1_dp, SHARDING_PRIORITIES.MULTIPROCESSING)
replace_dp(graph, branch1_dp, non_replicable_dp)
# The other branch should has a sharding_filter to make data even
sf_dp = branch2_dp.sharding_filter()
replace_dp(graph, branch2_dp, sf_dp)
dl = DataLoader2(
end_dp, reading_service=MultiProcessingReadingService(num_workers=2, multiprocessing_context=ctx)
)
# Determinism for non-replicable pipeline
_assert_deterministic_dl_res(dl, [i * 2 for i in range(10)], list(range(10)))
# Non-replicable on both branches
# shuffle in dispatch process
branch1_dp, _, branch2_dp, end_dp = _make_dp()
graph = traverse_dps(end_dp)
non_replicable_dp1 = ShardingRoundRobinDispatcher(branch1_dp, SHARDING_PRIORITIES.MULTIPROCESSING)
replace_dp(graph, branch1_dp, non_replicable_dp1)
non_replicable_dp2 = ShardingRoundRobinDispatcher(branch2_dp, SHARDING_PRIORITIES.MULTIPROCESSING)
replace_dp(graph, branch2_dp, non_replicable_dp2)
dl = DataLoader2(
end_dp, reading_service=MultiProcessingReadingService(num_workers=2, multiprocessing_context=ctx)
)
# Determinism for non-replicable pipeline
_assert_deterministic_dl_res(dl, [i * 2 for i in range(10)], list(range(10)))
@mp_ctx_parametrize
def test_multi_worker_determinism(self, ctx):
dp: IterDataPipe = IterableWrapper(range(100))
dp = dp.shuffle().sharding_filter()
dp = dp.batch(2)
rs = MultiProcessingReadingService(
num_workers=2,
multiprocessing_context=ctx,
)
dl = DataLoader2(dp, reading_service=rs)
torch.manual_seed(123)
res = list(dl) + list(dl)
torch.manual_seed(123)
self.assertEqual(res, list(dl) + list(dl))
torch.manual_seed(321)
self.assertNotEqual(res, list(dl) + list(dl))
# Using seed API for DataLoader2
dl.seed(123)
res = list(dl) + list(dl)
dl.seed(123)
self.assertEqual(res, list(dl) + list(dl))
dl.seed(321)
self.assertNotEqual(res, list(dl) + list(dl))
@mp_ctx_parametrize
def test_dispatching_worker_determinism(self, ctx):
dp: IterDataPipe = IterableWrapper(range(101))
dp = dp.shuffle().sharding_round_robin_dispatch(SHARDING_PRIORITIES.MULTIPROCESSING)
dp = dp.batch(2)
rs = MultiProcessingReadingService(
num_workers=2,
multiprocessing_context=ctx,
)
dl = DataLoader2(dp, reading_service=rs)
torch.manual_seed(123)
res = list(dl) + list(dl)
torch.manual_seed(123)
self.assertEqual(res, list(dl) + list(dl))
torch.manual_seed(321)
self.assertNotEqual(res, list(dl) + list(dl))
# Using seed API for DataLoader2
dl.seed(123)
res = list(dl) + list(dl)
dl.seed(123)
self.assertEqual(res, list(dl) + list(dl))
dl.seed(321)
self.assertNotEqual(res, list(dl) + list(dl))
@mp_ctx_parametrize
def test_non_replicable_datapipe(self, ctx) -> None:
r"""
For the pipeline with non-replicable DataPipe, make sure
the DataPipe remains in the main process.
"""
dp: IterDataPipe = IterableWrapper(range(100))
dp = dp.shuffle().sharding_filter()
dp = dp.batch(2)
non_rep_dp = NonReplicableDataPipe(dp)
rs = MultiProcessingReadingService(
num_workers=2,
multiprocessing_context=ctx,
)
dl = DataLoader2(non_rep_dp, reading_service=rs)
torch.manual_seed(123)
it = iter(dl)
# Validate NonReplicableDataPipe still in the main process
non_rep_dp = dl.reading_service._end_datapipe
self.assertEqual(type(non_rep_dp), NonReplicableDataPipe)
res = list(it) + list(dl)
torch.manual_seed(123)
self.assertEqual(res, list(dl) + list(dl))
torch.manual_seed(321)
self.assertNotEqual(res, list(dl) + list(dl))
@parametrize("num_workers", [1, 3])
@parametrize("worker_prefetch_cnt", [0, 5, 10])
def test_worker_exception_raised(self, num_workers, worker_prefetch_cnt):
dp = IterableWrapper(range(100)).sharding_filter()
dp = MakeMistakeDataPipe(dp)
rs = MultiProcessingReadingService(num_workers=num_workers, worker_prefetch_cnt=worker_prefetch_cnt)
dl = DataLoader2(dp, reading_service=rs)
it = iter(dl)
for _ in range(EXCEPTION_ITERATION_NUM * num_workers):
next(it)
with self.assertRaises(_CustomException) as cm:
next(it)
exc_msg = str(cm.exception)
self.assertTrue("Caught _CustomException in worker process 0" in exc_msg)
self.assertTrue("Original Traceback" in exc_msg)
self.assertTrue("_CustomException: oops" in exc_msg)
@parametrize("num_workers", [1, 3])
@parametrize("worker_prefetch_cnt", [0, 5, 10])
def test_dispatching_exception_raised(self, num_workers, worker_prefetch_cnt):
dp = IterableWrapper(range(100))
dp = MakeMistakeDataPipe(dp)
dp = dp.sharding_round_robin_dispatch(SHARDING_PRIORITIES.MULTIPROCESSING)
dp = dp.map(_x_mult_2)
rs = MultiProcessingReadingService(num_workers=num_workers, worker_prefetch_cnt=worker_prefetch_cnt)
dl = DataLoader2(dp, reading_service=rs)
it = iter(dl)
for _ in range(EXCEPTION_ITERATION_NUM):
next(it)
with self.assertRaises(_CustomException) as cm:
next(it)
exc_msg = str(cm.exception)
self.assertTrue("Caught _CustomException in dispatching process" in exc_msg)
self.assertTrue("Original Traceback" in exc_msg)
self.assertTrue("_CustomException: oops" in exc_msg)
TEST_MASTER_ADDR = "127.0.0.1"
DEFAULT_WORLD_SIZE = 2
def _get_open_port():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
port = s.getsockname()[1]
s.close()
return str(port)
class TerminateSignal:
pass
def _launch_distributed_training(world_size, *args, fn):
os.environ["MASTER_ADDR"] = TEST_MASTER_ADDR
os.environ["MASTER_PORT"] = _get_open_port()
ctx = mp.get_context("spawn")
q = ctx.Queue()
ps = []
for rank in range(world_size):
p = ctx.Process(
target=fn,
args=(
rank,
world_size,
q,
*args,
),
)
p.start()
ps.append(p)
res = []
while True:
try:
d = q.get()
if isinstance(d, TerminateSignal):
break
res.append(d)
except queue.Empty:
continue
for p in ps:
p.join()
return res
def _dist_one_epoch(dl):
res = []
for d in dl:
res.append(d)
# Simulate training synchronization
dist.barrier()
return res
def _finalize_distributed_queue(rank, q):
r"""
Synchronize all distributed processes to guarantee all data have been put into
the Multiprocessing Queue.
"""
pg = dist.new_group(backend="gloo")
end_tensor = torch.tensor([rank], dtype=torch.int64)
dist.all_reduce(end_tensor, group=pg)
if rank == 0:
q.put(TerminateSignal())
dist.destroy_process_group(pg)
def _random_fn(data):
r"""
Used to validate the randomness of subprocess-local RNGs are set deterministically.
"""
py_random_num = random.randint(0, 2 ** 32)
np_random_num = np.random.randint(0, 2 ** 32)
torch_random_num = torch.randint(0, 2 ** 32, size=[]).item()
return (data, py_random_num, np_random_num, torch_random_num)
def _dist_training_fn(rank, world_size, q, dp_fn, rs_fn, num_workers, ctx):
# Use gloo
dist.init_process_group("gloo", rank=rank, world_size=world_size)
# Uneven shards
data_length = world_size * num_workers * 10 + 1
dp = dp_fn(data_length)
rs = rs_fn(num_workers, ctx)
dl = DataLoader2(dp, reading_service=rs)
# No seed
res = _dist_one_epoch(dl)
q.put((0, rank, res))
# Shuffle with seed
for epoch in range(2):
dl.seed(123)
res = _dist_one_epoch(dl)
q.put((epoch + 1, rank, res))
# Different seed
dl.seed(321)
res = _dist_one_epoch(dl)
q.put((3, rank, res))
_finalize_distributed_queue(rank, q)
dl.shutdown()
@skipIfNoDistributed
@unittest.skipIf(IS_WINDOWS, "Remove when https://github.com/pytorch/data/issues/857 is fixed")
class SequentialReadingServiceTest(TestCase):
@staticmethod
def _make_dp(data_length):
data_source = IterableWrapper(list(range(data_length)))
dp = data_source.shuffle().sharding_filter().map(_random_fn)
return dp
@staticmethod
def _make_dispatching_dp(data_length):
data_source = IterableWrapper(list(range(data_length)))
dp = data_source.shuffle().sharding_filter()
dp = dp.sharding_round_robin_dispatch(SHARDING_PRIORITIES.MULTIPROCESSING).map(_random_fn)
return dp
@staticmethod
def _make_rs(num_workers, ctx):
mp_rs = MultiProcessingReadingService(
num_workers=num_workers,
multiprocessing_context=ctx,
)
dist_rs = DistributedReadingService()
rs = SequentialReadingService(dist_rs, mp_rs)
return rs
@mp_ctx_parametrize
def test_sequential_reading_service_normal_dp(self, ctx):
world_size = DEFAULT_WORLD_SIZE
num_workers = 2
res = _launch_distributed_training(
world_size,
SequentialReadingServiceTest._make_dp,
SequentialReadingServiceTest._make_rs,
num_workers,
ctx,
fn=_dist_training_fn,
)
result = ({}, {}, {}, {})
for epoch, rank, r in res:
d, *ran_nums = list(zip(*r))
result[epoch][rank] = (d, ran_nums)
# Guarantee the same length per rank
for rr in result:
exp_len = num_workers * 10
for _, (d, _) in rr.items():
self.assertEqual(len(d), exp_len)
# Same seed generate the same order of data and the same random state
self.assertEqual(result[1], result[2])
# Different seeds
for rank in range(world_size):
# Different shuffle order
self.assertNotEqual(result[1][rank][0], result[3][rank][0])
# Different subprocess-local random state
self.assertNotEqual(result[1][rank][1], result[3][rank][1])
@mp_ctx_parametrize
def test_sequential_reading_service_dispatching_dp(self, ctx):
world_size = DEFAULT_WORLD_SIZE
num_workers = 2
res = _launch_distributed_training(
world_size,
SequentialReadingServiceTest._make_dispatching_dp,
SequentialReadingServiceTest._make_rs,
num_workers,
ctx,
fn=_dist_training_fn,
)
result = ({}, {}, {}, {})
for epoch, rank, r in res:
d, *ran_nums = list(zip(*r))
result[epoch][rank] = (d, ran_nums)
# Guarantee the same length per rank
for rr in result:
exp_len = num_workers * 10
for _, (d, _) in rr.items():
self.assertEqual(len(d), exp_len)
# Same seed generate the same order of data and the same random state
self.assertEqual(result[1], result[2])
# Different seeds
for rank in range(world_size):
# Different shuffle order
self.assertNotEqual(result[1][rank][0], result[3][rank][0])
# Different subprocess-local random state
self.assertNotEqual(result[1][rank][1], result[3][rank][1])
instantiate_parametrized_tests(MultiProcessingReadingServiceTest)
instantiate_parametrized_tests(SequentialReadingServiceTest)
if __name__ == "__main__":
unittest.main()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import multiprocessing as mp
import unittest
from unittest import TestCase
from torch.testing._internal.common_utils import instantiate_parametrized_tests, parametrize, subtest
from torch.utils.data.datapipes.iter.sharding import SHARDING_PRIORITIES
from torchdata.dataloader2 import (
DataLoader2,
DataLoader2Iterator,
InProcessReadingService,
MultiProcessingReadingService,
)
from torchdata.datapipes.iter import IterableWrapper, IterDataPipe
def _add_one(x: int) -> int:
return x + 1
# Test DataPipes
n_elements = 10
dp1 = IterableWrapper(range(n_elements)).shuffle().sharding_filter()
double_pause_dp = dp1.prefetch().prefetch()
test_dps = [dp1, double_pause_dp]
mp_ctx_parametrize = parametrize("ctx", mp.get_all_start_methods())
dp_parametrize = parametrize("dp", test_dps)
class TestInProcessReadingService(TestCase):
r"""
This tests specific functionalities of InProcessReadingService, notably
`pause`, `resume`, `snapshot`.
"""
@dp_parametrize
def test_reading_service_pause_resume(self, dp) -> None:
# Functional Test: Testing various configuration of DataPipe/ReadingService to ensure the pipeline
# properly pauses and resumes
rs1 = InProcessReadingService()
dl1: DataLoader2 = DataLoader2(dp, reading_service=rs1)
res = []
for i, x in enumerate(dl1):
res.append(x)
if i in {2, n_elements - 2}:
dl1._pause()
dl1._resume()
self.assertEqual(list(range(n_elements)), sorted(res))
dl1.shutdown()
rs2 = InProcessReadingService(5)
dl2: DataLoader2 = DataLoader2(dp, reading_service=rs2)
res = []
for i, x in enumerate(dl2):
res.append(x)
if i in {2, n_elements - 2}:
dl2._pause()
dl2._resume()
self.assertEqual(list(range(n_elements)), sorted(res))
dl2.shutdown()
@dp_parametrize
def test_reading_service_pause_stop_yield(self, dp) -> None:
# Functional Test: Confirms that `dl` will stop yielding elements after `_pause` is called
rs = InProcessReadingService(5)
dl: DataLoader2 = DataLoader2(dp, reading_service=rs)
res = []
for i, x in enumerate(dl):
res.append(x)
if i in {2}:
dl._pause()
self.assertEqual(3, len(res))
dl.shutdown()
@dp_parametrize
def test_reading_service_limit(self, dp) -> None:
rs = InProcessReadingService(5)
dl: DataLoader2 = DataLoader2(dp, reading_service=rs)
res = []
cumulative_res = []
n_limit = 3
it: DataLoader2Iterator = iter(dl)
it.limit(n_limit)
for x in it:
res.append(x)
# Functional Test: Verify that the number of elements yielded equals to the specified limit
self.assertEqual(n_limit, len(res)) # 3
cumulative_res.extend(res)
# Functional Test: Calling `next` after `limit` will trigger `StopIteration`
with self.assertRaises(StopIteration):
next(it)
# Functional Test: Verify that `limit` persists without the need to set it again
it.resume()
res = []
for x in it:
res.append(x)
self.assertEqual(n_limit, len(res)) # 3
cumulative_res.extend(res)
# Functional Test: Clear the `limit` and yield the rest of the elements
it.limit(None)
it.resume()
res = []
for x in it:
res.append(x)
self.assertEqual(n_elements - 2 * n_limit, len(res)) # 4
cumulative_res.extend(res)
self.assertEqual(list(range(n_elements)), sorted(cumulative_res))
# Functional Test: Setting `limit` to a different value during after each mini-epoch
dl2: DataLoader2 = DataLoader2(double_pause_dp, reading_service=rs)
res = []
it2: DataLoader2Iterator = iter(dl2)
it2.limit(3)
for x in it2:
res.append(x)
# Limit can be set before `resume`
it2.limit(4)
it2.resume()
for x in it2:
res.append(x)
self.assertEqual(7, len(res))
# Limit can also be set after `resume`, but before the next `for` loop
it2.resume()
it2.limit(2)
for x in it2:
res.append(x)
self.assertEqual(9, len(res))
def test_initial_epoch_checkpointing(self):
dp = IterableWrapper(range(20)).shuffle()
rs = InProcessReadingService(5)
# Functional Test: Saving state before iterator is created
dl: DataLoader2 = DataLoader2(datapipe=dp, reading_service=rs)
dl.seed(1)
initial_state = dl.state_dict()
it1 = iter(dl)
restored_dl: DataLoader2 = DataLoader2.from_state(initial_state, rs) # type: ignore[arg-type]
restored_dl._restore_checkpoint_beginning_of_epoch()
self.assertEqual(list(it1), list(restored_dl))
dl.shutdown()
restored_dl.shutdown()
# Functional Test: Saving state after iterator is created
dl = DataLoader2(datapipe=dp, reading_service=rs)
dl.seed(1)
it1 = iter(dl)
initial_state = dl.state_dict()
restored_dl = DataLoader2.from_state(initial_state, rs) # type: ignore[arg-type]
restored_dl._restore_checkpoint_beginning_of_epoch()
self.assertEqual(list(it1), list(restored_dl))
dl.shutdown()
restored_dl.shutdown()
# Functional Test: Saving state after iterator is created and began iterating
dl = DataLoader2(datapipe=dp, reading_service=rs)
dl.seed(1)
it1 = iter(dl)
temp = next(it1) # Starts iterating
initial_state = dl.state_dict()
restored_dl = DataLoader2.from_state(initial_state, rs) # type: ignore[arg-type]
restored_dl._restore_checkpoint_beginning_of_epoch()
self.assertEqual([temp] + list(it1), list(restored_dl)) # Note skipping over 1st element from actual result
dl.shutdown()
restored_dl.shutdown()
def _non_dispatching_dp(n_elements=1000):
dp = IterableWrapper(list(range(n_elements))).shuffle()
dp = dp.sharding_filter()
dp = dp.map(_add_one).batch(8)
return dp
def _dispatching_dp(n_elements=1000):
dp = IterableWrapper(list(range(n_elements))).shuffle()
dp = dp.prefetch(20)
dp = dp.sharding_round_robin_dispatch(SHARDING_PRIORITIES.MULTIPROCESSING)
dp = dp.map(_add_one).batch(16)
return dp
class NonShardableDataPipe(IterDataPipe):
def __init__(self, dp: IterDataPipe):
self.dp = dp
def is_replicable(self):
return False
def __iter__(self):
yield from self.dp
class TestMultiProcessingReadingService(TestCase):
r"""
This tests specific functionalities of MultiProcessingReadingService, notably
`pause`, `resume`, `snapshot`.
"""
@mp_ctx_parametrize
@parametrize("dp_fn", [subtest(_non_dispatching_dp, "non_dispatch"), subtest(_dispatching_dp, "dispatch")])
@parametrize("main_prefetch", [0, 10])
@parametrize("worker_prefetch", [0, 10])
def test_early_exit(self, ctx, dp_fn, main_prefetch, worker_prefetch) -> None:
dp = dp_fn(1000)
rs = MultiProcessingReadingService(
num_workers=2,
main_prefetch_cnt=main_prefetch,
worker_prefetch_cnt=worker_prefetch,
multiprocessing_context=ctx,
)
dl: DataLoader2 = DataLoader2(dp, reading_service=rs)
it = iter(dl)
for _ in range(10):
_ = next(it)
dl.shutdown()
@mp_ctx_parametrize
@parametrize("dp_fn", [subtest(_non_dispatching_dp, "non_dispatch"), subtest(_dispatching_dp, "dispatch")])
@parametrize("main_prefetch", [0, 10])
@parametrize("worker_prefetch", [0, 10])
def test_exit(self, ctx, dp_fn, main_prefetch, worker_prefetch) -> None:
dp = dp_fn(1000)
rs = MultiProcessingReadingService(
num_workers=2,
main_prefetch_cnt=main_prefetch,
worker_prefetch_cnt=worker_prefetch,
multiprocessing_context=ctx,
)
dl: DataLoader2 = DataLoader2(dp, reading_service=rs)
_ = list(dl)
dl.shutdown()
@mp_ctx_parametrize
@dp_parametrize
@parametrize(
"n_workers,worker_prefetch_cnt,main_prefetch_cnt",
[(1, 0, 0), (1, 0, 2), (2, 0, 0), (2, 2, 0), (2, 0, 2), (2, 2, 2)],
)
def test_reading_service_pause_resume(self, ctx, dp, n_workers, worker_prefetch_cnt, main_prefetch_cnt) -> None:
# Functional Test: Testing various configuration of DataPipe/ReadingService to ensure the pipeline
# properly pauses and resumes
rs = MultiProcessingReadingService(
num_workers=n_workers,
worker_prefetch_cnt=worker_prefetch_cnt,
main_prefetch_cnt=main_prefetch_cnt,
multiprocessing_context=ctx,
)
dl: DataLoader2 = DataLoader2(dp, reading_service=rs)
res = []
for i, x in enumerate(dl):
res.append(x)
if i in {2, n_elements - 2}:
dl._pause()
dl._resume()
self.assertEqual(
list(range(n_elements)),
sorted(res),
msg=f"The test is failing with '{ctx}', num_workers = {rs.num_workers}, "
f"worker_prefetch_cnt = {rs.worker_prefetch_cnt}, "
f"main_prefetch_cnt = {rs.main_prefetch_cnt}",
)
dl.shutdown()
@mp_ctx_parametrize
@dp_parametrize
@parametrize("n_workers,worker_prefetch_cnt,main_prefetch_cnt", [(2, 0, 1), (2, 1, 0), (2, 0, 0)])
def test_reading_service_pause_stop_yield(self, ctx, dp, n_workers, worker_prefetch_cnt, main_prefetch_cnt) -> None:
# Functional Test: Confirms that `dl` will stop yielding elements after `_pause` is called
rs = MultiProcessingReadingService(
num_workers=n_workers,
worker_prefetch_cnt=worker_prefetch_cnt,
main_prefetch_cnt=main_prefetch_cnt,
multiprocessing_context=ctx,
)
dl: DataLoader2 = DataLoader2(dp, reading_service=rs)
res = []
for i, x in enumerate(dl):
res.append(x)
if i in {2}:
dl._pause()
self.assertEqual(
3,
len(res),
msg=f"The test is failing with '{ctx}', num_workers = {rs.num_workers}, "
f"worker_prefetch_cnt = {rs.worker_prefetch_cnt}, main_prefetch_cnt = {rs.main_prefetch_cnt}",
)
dl.shutdown()
@dp_parametrize
@parametrize("n_workers,worker_prefetch_cnt,main_prefetch_cnt", [(1, 0, 0), (1, 0, 2), (2, 0, 0), (2, 2, 2)])
def test_reading_service_limit(self, dp, n_workers, worker_prefetch_cnt, main_prefetch_cnt) -> None:
rs = MultiProcessingReadingService(
num_workers=n_workers, worker_prefetch_cnt=worker_prefetch_cnt, main_prefetch_cnt=main_prefetch_cnt
)
dl: DataLoader2 = DataLoader2(dp, reading_service=rs)
res = []
cumulative_res = []
n_limit = 3
it: DataLoader2Iterator = iter(dl)
it.limit(n_limit)
for x in it:
res.append(x)
# Functional Test: Verify that the number of elements yielded equals to the specified limit
self.assertEqual(
n_limit,
len(res), # 3
msg=f"The test is failing with default multiprocessing method, "
f"num_workers = {rs.num_workers}, "
f"worker_prefetch_cnt = {rs.worker_prefetch_cnt}, main_prefetch_cnt = {rs.main_prefetch_cnt}",
)
cumulative_res.extend(res)
# Functional Test: Calling `next` after `limit` will trigger `StopIteration`
with self.assertRaises(StopIteration):
next(it)
# Functional Test: Verify that `limit` persists without the need to set it again
it.resume()
res = []
for x in it:
res.append(x)
self.assertEqual(
n_limit,
len(res), # 3
msg=f"The test is failing with default multiprocessing method, "
f"num_workers = {rs.num_workers}, "
f"worker_prefetch_cnt = {rs.worker_prefetch_cnt}, main_prefetch_cnt = {rs.main_prefetch_cnt}",
)
cumulative_res.extend(res)
# Functional Test: Clear the `limit` and yield the rest of the elements
it.limit(None)
it.resume()
res = []
for x in it:
res.append(x)
self.assertEqual(
n_elements - 2 * n_limit,
len(res), # 4
msg=f"The test is failing with default multiprocessing method, "
f"num_workers = {rs.num_workers}, "
f"worker_prefetch_cnt = {rs.worker_prefetch_cnt}, main_prefetch_cnt = {rs.main_prefetch_cnt}",
)
cumulative_res.extend(res)
self.assertEqual(list(range(n_elements)), sorted(cumulative_res))
# Functional Test: Setting `limit` to a different value during after each mini-epoch
dl2: DataLoader2 = DataLoader2(double_pause_dp, reading_service=rs)
res = []
it2: DataLoader2Iterator = iter(dl2)
it2.limit(3)
for x in it2:
res.append(x)
# Limit can be set before `resume`
it2.limit(4)
it2.resume()
for x in it2:
res.append(x)
self.assertEqual(7, len(res))
# Limit can also be set after `resume`, but before the next `for` loop
it2.resume()
it2.limit(2)
for x in it2:
res.append(x)
self.assertEqual(9, len(res))
def test_initial_epoch_checkpointing(self):
dp = IterableWrapper(range(20)).shuffle().sharding_filter()
# Note that the second `shuffle` occurs in the main process, which uses a different RNG from
# the `shuffle` done in the worker processes
dp = NonShardableDataPipe(dp).shuffle() # type: ignore[assignment, arg-type]
rs = MultiProcessingReadingService(num_workers=2)
# Functional Test: Saving state before iterator is created
dl: DataLoader2 = DataLoader2(datapipe=dp, reading_service=rs)
dl.seed(1)
initial_state = dl.state_dict()
it1 = iter(dl)
restored_dl: DataLoader2 = DataLoader2.from_state(initial_state, rs) # type: ignore[arg-type]
restored_dl._restore_checkpoint_beginning_of_epoch()
self.assertEqual(list(it1), list(restored_dl))
dl.shutdown()
restored_dl.shutdown()
# Functional Test: Saving state after iterator is created
dl = DataLoader2(datapipe=dp, reading_service=rs)
dl.seed(1)
it1 = iter(dl)
initial_state = dl.state_dict()
restored_dl = DataLoader2.from_state(initial_state, rs) # type: ignore[arg-type]
restored_dl._restore_checkpoint_beginning_of_epoch()
self.assertEqual(list(it1), list(restored_dl))
dl.shutdown()
restored_dl.shutdown()
# Functional Test: Saving state after iterator is created and began iterating
dl = DataLoader2(datapipe=dp, reading_service=rs)
dl.seed(1)
it1 = iter(dl)
temp = next(it1) # Starts iterating
initial_state = dl.state_dict()
restored_dl = DataLoader2.from_state(initial_state, rs) # type: ignore[arg-type]
restored_dl._restore_checkpoint_beginning_of_epoch()
self.assertEqual([temp] + list(it1), list(restored_dl)) # Note skipping over 1st element from actual result
dl.shutdown()
restored_dl.shutdown()
# TODO: Test cases when there is official support of `pause` and `resume` with round-robin sharding
# Currently, using sharding_round_robin raises a warning
# def test_round_robin_dispatching_pause_limit(self):
# source_dp = IterableWrapper(range(20))
# dp = source_dp.shuffle().sharding_round_robin_dispatch(SHARDING_PRIORITIES.MULTIPROCESSING)
# dp = dp.map(_add_one)
# TODO: This doesn't work with `num_workers > 1`
# TODO: Try checking if `dp_list`'s elements are _IterateQueueDP or QueueWrapper, we can safely assume
# those DPs belong to a dispatching process and only do pause if worker_id == 0
# There might still be a race condition, need to look into the messages
# rs1 = MultiProcessingReadingService(num_workers=2, worker_prefetch_cnt=0, main_prefetch_cnt=0)
# rs2 = MultiProcessingReadingService(num_workers=2, worker_prefetch_cnt=0, main_prefetch_cnt=2)
# rs3 = MultiProcessingReadingService(num_workers=2, worker_prefetch_cnt=2, main_prefetch_cnt=0)
# rs4 = MultiProcessingReadingService(num_workers=2, worker_prefetch_cnt=2, main_prefetch_cnt=2)
# rss = [rs1, rs2, rs3, rs4]
# for n, rs in enumerate(rss):
# dl = DataLoader2(dp, reading_service=rs)
# res = []
# # cumulative_res = []
# n_limit = 3
#
# it: DataLoader2Iterator = iter(dl)
# it.limit(n_limit) # The `pause` call here doesn't stop
# for x in it:
# res.append(x)
#
# print()
# print(res)
#
# dl.shutdown()
# # Functional Test: Verify that the number of elements yielded equals to the specified limit
# # self.assertEqual(
# # n_limit,
# # len(res), # 3
# # msg=f"The test is failing for rs{n + 1} with default multiprocessing method, "
# # f"num_workers = {rs.num_workers}, "
# # f"worker_prefetch_cnt = {rs.worker_prefetch_cnt}, main_prefetch_cnt = {rs.main_prefetch_cnt}",
# # )
# cumulative_res.extend(res)
#
# # Functional Test: Calling `next` after `limit` will trigger `StopIteration`
# with self.assertRaisesRegex(StopIteration, "pause"):
# next(it)
#
# # Functional Test: Verify that `limit` persists without the need to set it again
# it.resume()
# res = []
# for x in it:
# res.append(x)
# # self.assertEqual(
# # n_limit,
# # len(res), # 3
# # msg=f"The test is failing for rs{n + 1} with default multiprocessing method, "
# # f"num_workers = {rs.num_workers}, "
# # f"worker_prefetch_cnt = {rs.worker_prefetch_cnt}, main_prefetch_cnt = {rs.main_prefetch_cnt}",
# # )
# cumulative_res.extend(res)
#
# # Functional Test: Clear the `limit` and yield the rest of the elements
# it.limit(None)
# it.resume()
# res = []
# for x in it:
# res.append(x)
# # self.assertEqual(
# # n_elements - 2 * n_limit,
# # len(res), # 4
# # msg=f"The test is failing for rs{n + 1} with default multiprocessing method, "
# # f"num_workers = {rs.num_workers}, "
# # f"worker_prefetch_cnt = {rs.worker_prefetch_cnt}, main_prefetch_cnt = {rs.main_prefetch_cnt}",
# # )
#
# cumulative_res.extend(res)
# self.assertEqual(list(range(n_elements)), sorted(cumulative_res))
# TODO: Implemented in an upcoming PR
# def test_reading_service_snapshot(self) -> None:
# pass
#
# def test_dataloader2_snapshot(self) -> None:
# pass
instantiate_parametrized_tests(TestInProcessReadingService)
instantiate_parametrized_tests(TestMultiProcessingReadingService)
if __name__ == "__main__":
unittest.main()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import random
import unittest
from unittest import TestCase
import numpy as np
import torch
from torch.testing._internal.common_utils import instantiate_parametrized_tests, IS_WINDOWS, parametrize
from torchdata.dataloader2 import DataLoader2, InProcessReadingService, MultiProcessingReadingService
from torchdata.dataloader2.graph.settings import set_graph_random_seed
from torchdata.dataloader2.random import SeedGenerator
from torchdata.datapipes.iter import IterableWrapper
def _random_fn(data):
r"""
Used to validate the randomness of subprocess-local RNGs are set deterministically.
"""
py_random_num = random.randint(0, 2 ** 32)
np_random_num = np.random.randint(0, 2 ** 32, dtype=np.uint32)
torch_random_num = torch.randint(0, 2 ** 32, size=[]).item()
return (data, py_random_num, np_random_num, torch_random_num)
class DeterminismTest(TestCase):
@unittest.skipIf(IS_WINDOWS, "Remove when https://github.com/pytorch/data/issues/857 is fixed")
@parametrize("num_workers", [1, 8])
def test_mprs_determinism(self, num_workers):
data_length = 64
exp = list(range(data_length))
data_source = IterableWrapper(exp)
dp = data_source.shuffle().sharding_filter().map(_random_fn)
rs = MultiProcessingReadingService(num_workers=num_workers)
dl = DataLoader2(dp, reading_service=rs)
# No seed
res = []
for d, *_ in dl:
res.append(d)
self.assertEqual(sorted(res), exp)
# Shuffle with seed
results = []
for _ in range(2):
res = []
ran_res = []
torch.manual_seed(123)
random.seed(123)
np.random.seed(123)
for d, *ran_nums in dl:
res.append(d)
ran_res.append(ran_nums)
self.assertEqual(sorted(res), exp)
results.append((res, ran_res))
# Same seed generate the same order of data and the same random state
self.assertEqual(results[0], results[1])
# Different seed
res = []
ran_res = []
torch.manual_seed(321)
random.seed(321)
np.random.seed(321)
for d, *ran_nums in dl:
res.append(d)
ran_res.append(ran_nums)
self.assertEqual(sorted(res), exp)
# Different shuffle order
self.assertNotEqual(results[0][0], res)
# Different subprocess-local random state
self.assertNotEqual(results[0][1], ran_res)
def test_graph_random_settings(self):
def _get_dp_seeds_after_setting(worker_id, seed=123):
data_source = IterableWrapper(list(range(100)))
dp0 = data_source.shuffle()
dp1, dp2, dp3 = dp0.fork(3)
dp1 = dp1.sharding_filter()
dp2 = dp2.shuffle()
dp3 = dp3.shuffle()
dp3_ = dp3.sharding_filter()
dp4 = dp1.zip(dp2, dp3_).shuffle()
sg = SeedGenerator(seed).spawn(worker_id)
set_graph_random_seed(dp4, sg)
# same seeds, different seeds
return (dp0._seed, dp3._seed), (dp2._seed, dp4._seed)
ss_0_123, ds_0_123 = _get_dp_seeds_after_setting(worker_id=0, seed=123)
ss_1_123, ds_1_123 = _get_dp_seeds_after_setting(worker_id=1, seed=123)
self.assertEqual(ss_0_123, ss_1_123)
self.assertNotEqual(ds_0_123, ds_1_123)
ss_0_123_, ds_0_123_ = _get_dp_seeds_after_setting(worker_id=0, seed=123)
self.assertEqual(ss_0_123, ss_0_123_)
self.assertEqual(ds_0_123, ds_0_123_)
ss_0_321, ds_0_321 = _get_dp_seeds_after_setting(worker_id=0, seed=321)
self.assertNotEqual(ss_0_123, ss_0_321)
self.assertNotEqual(ds_0_123, ds_0_321)
def test_sprs_determinism(self):
data_length = 64
exp = list(range(data_length))
data_source = IterableWrapper(exp)
dp = data_source.shuffle().sharding_filter().map(_random_fn)
rs = InProcessReadingService()
dl = DataLoader2(dp, reading_service=rs)
# No seed
res = []
for d, *_ in dl:
res.append(d)
self.assertEqual(sorted(res), exp)
# Shuffle with seed
results = []
for _ in range(2):
res = []
ran_res = []
torch.manual_seed(123)
random.seed(123)
np.random.seed(123)
for d, *ran_nums in dl:
res.append(d)
ran_res.append(ran_nums)
self.assertEqual(sorted(res), exp)
results.append((res, ran_res))
# Same seed generate the same order of data and the same random state
self.assertEqual(results[0], results[1])
# Different seed
res = []
ran_res = []
torch.manual_seed(321)
random.seed(321)
np.random.seed(321)
for d, *ran_nums in dl:
res.append(d)
ran_res.append(ran_nums)
self.assertEqual(sorted(res), exp)
# Different shuffle order
self.assertNotEqual(results[0][0], res)
# Different subprocess-local random state
self.assertNotEqual(results[0][1], ran_res)
instantiate_parametrized_tests(DeterminismTest)
if __name__ == "__main__":
unittest.main()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import pytorch_sphinx_theme
import torchdata
# sys.path.insert(0, os.path.abspath('.'))
current_dir = os.path.dirname(__file__)
target_dir = os.path.abspath(os.path.join(current_dir, "../.."))
sys.path.insert(0, target_dir)
print(target_dir)
# -- Project information -----------------------------------------------------
project = "TorchData"
copyright = "2021 - Present, Torch Contributors"
author = "Torch Contributors"
# The short X.Y version
version = "main (" + torchdata.__version__ + " )"
# The full version, including alpha/beta/rc tags
release = "main"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.napoleon",
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.doctest",
"sphinx.ext.graphviz",
]
# Do not execute standard reST doctest blocks so that documentation can
# be successively migrated to sphinx's doctest directive.
doctest_test_doctest_blocks = ""
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [
"generated/torchdata.datapipes.iter.Extractor.rst",
"generated/torchdata.datapipes.iter.TarArchiveReader.rst",
"generated/torchdata.datapipes.iter.XzFileReader.rst",
"generated/torchdata.datapipes.iter.ZipArchiveReader.rst",
]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = "pytorch_sphinx_theme"
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
html_theme_options = {
"collapse_navigation": False,
"display_version": True,
"logo_only": True,
"pytorch_project": "docs",
"navigation_with_keys": True,
"analytics_id": "UA-117752657-2",
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_css_files = [
"css/custom.css",
]
# TODO(598): use regex to replace all "T" and "T_co" related signature
signature_replacements = {
"torch.utils.data.datapipes.datapipe.IterDataPipe": "IterDataPipe",
"abc.IterDataPipe": "IterDataPipe",
"torch.utils.data.datapipes.datapipe.MapDataPipe": "MapDataPipe",
"abc.MapDataPipe": "MapDataPipe",
"typing.Type[torch.utils.data.sampler.Sampler]": "torch.utils.data.sampler.Sampler",
"<class 'torch.utils.data.sampler.SequentialSampler'>": "SequentialSampler",
"torch.utils.data.datapipes.iter.combining.T_co": "T_co",
"torch.utils.data.datapipes.iter.combinatorics.T_co": "T_co",
"torchdata.datapipes.iter.transform.bucketbatcher.T_co": "T_co",
"torch.utils.data.datapipes.map.grouping.T": "T",
"torch.utils.data.datapipes.map.combining.T_co": "T_co",
"torch.utils.data.datapipes.map.combinatorics.T_co": "T_co",
"torchdata.datapipes.iter.util.cycler.T_co": "T_co",
"torchdata.datapipes.iter.util.paragraphaggregator.T_co": "T_co",
"torchdata.datapipes.map.util.cacheholder.T_co": "T_co",
"Sequence[torchdata.datapipes.map.util.unzipper.T]": "Sequence[T]",
"torchdata.datapipes.iter.util.samplemultiplexer.T_co": "T_co",
"torchdata.datapipes.iter.util.indexadder.K": "K",
"torchdata.datapipes.iter.util.unzipper.T": "T",
"torch.utils.data.datapipes.iter.grouping.T_co": "T_co",
"torchdata.datapipes.iter.util.dataframemaker.T_co": "T_co",
"torchdata.datapipes.iter.util.cacheholder.T_co": "T_co",
"torchdata.datapipes.iter.util.header.T_co": "T_co",
"<class 'torch.utils.data.datapipes.datapipe.DataChunk'>": "List",
"typing.": "",
"Union[IterDataPipe, MapDataPipe]": "DataPipe",
"Dict[int, Tuple[DataPipe, DataPipeGraph]": "DataPipeGraph",
}
def process_signature(app, what, name, obj, options, signature, return_annotation):
"""Replacing long type annotations in signature with more succinct ones."""
if isinstance(signature, str):
for old, new in signature_replacements.items():
if old in signature:
signature = signature.replace(old, new)
return signature, return_annotation
def setup(app):
# Overwrite class name to allow aliasing in documentation generation
import torchdata.datapipes.iter as iter
import torchdata.datapipes.map as map
for mod in (iter, map):
for name, obj in mod.__dict__.items():
if isinstance(obj, type):
obj.__name__ = name
app.connect("autodoc-process-signature", process_signature)
intersphinx_mapping = {
"graphviz": ("https://graphviz.readthedocs.io/en/stable/", None),
}
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""
This file contains the data pipeline to read from a TSV file and output a DataFrame.
"""
from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple, TypeVar, Union
import numpy as np
import torcharrow as ta
import torcharrow.dtypes as dt
import torcharrow.pytorch as tap
import torcharrow_wrapper # noqa: F401
from common import (
CAT_FEATURE_COUNT,
DEFAULT_CAT_NAMES,
DEFAULT_COLUMN_NAMES,
DEFAULT_INT_NAMES,
INT_FEATURE_COUNT,
safe_cast,
)
from iopath.common.file_io import PathManagerFactory
from torch.utils.data import get_worker_info
from torch.utils.data.datapipes.dataframe.dataframes import CaptureLikeMock
from torcharrow import functional
from torchdata.dataloader2 import DataLoader2, MultiProcessingReadingService
from torchdata.datapipes.iter import Batcher, CSVParser, IoPathFileOpener, IterableWrapper, IterDataPipe, Mapper
PATH_MANAGER_KEY = "torchrec"
T = TypeVar("T")
COLUMN_TYPE_CASTERS: List[Callable[[Union[int, str]], Union[int, str]]] = [
lambda val: safe_cast(val, int, 0),
*(lambda val: safe_cast(val, int, 0) for _ in range(INT_FEATURE_COUNT)),
*(lambda val: safe_cast(val, str, "") for _ in range(CAT_FEATURE_COUNT)),
]
DTYPE = dt.Struct(
[
dt.Field("labels", dt.int8),
dt.Field(
"dense_features",
dt.Struct([dt.Field(int_name, dt.Int32(nullable=True)) for int_name in DEFAULT_INT_NAMES]),
),
dt.Field(
"sparse_features",
dt.Struct([dt.Field(cat_name, dt.Int32(nullable=True)) for cat_name in DEFAULT_CAT_NAMES]),
),
]
)
def _torcharrow_row_mapper(row: List[str]) -> Tuple[int, Tuple[int, ...], Tuple[int, ...]]:
label = int(safe_cast(row[0], int, 0))
dense = tuple(int(safe_cast(row[i], int, 0)) for i in range(1, 1 + INT_FEATURE_COUNT))
sparse = tuple(
int(safe_cast(row[i], str, "0") or "0", 16)
for i in range(1 + INT_FEATURE_COUNT, 1 + INT_FEATURE_COUNT + CAT_FEATURE_COUNT)
)
# TorchArrow doesn't support uint32, but we can save memory
# by not using int64. Numpy will automatically handle sparse values >= 2 ** 31.
sparse = tuple(np.array(sparse, dtype=np.int32).tolist())
return label, dense, sparse
def criteo_dataframes_from_tsv(
paths: Union[str, Iterable[str]],
*,
batch_size: int = 128,
) -> IterDataPipe:
"""
Load Criteo dataset (Kaggle or Terabyte) as TorchArrow DataFrame streams from TSV file(s)
This implementaiton is inefficient and is used for prototype and test only.
Args:
paths (str or Iterable[str]): local paths to TSV files that constitute
the Kaggle or Criteo 1TB dataset.
batch_size (int): number of rows within each DataFrame
Example:
>>> datapipe = criteo_dataframes_from_tsv(
>>> ["/home/datasets/criteo/day_0.tsv", "/home/datasets/criteo/day_1.tsv"]
>>> )
>>> for df in datapipe:
>>> print(df)
"""
if isinstance(paths, str):
paths = [paths]
datapipe = CriteoIterDataPipe(paths, row_mapper=_torcharrow_row_mapper)
datapipe = Batcher(datapipe, batch_size)
datapipe = Mapper(datapipe, lambda batch: ta.dataframe(batch, dtype=DTYPE))
return datapipe.trace_as_dataframe()
def _default_row_mapper(example: List[str]) -> Dict[str, Union[int, str]]:
column_names = reversed(DEFAULT_COLUMN_NAMES)
column_type_casters = reversed(COLUMN_TYPE_CASTERS)
return {next(column_names): next(column_type_casters)(val) for val in reversed(example)}
class CriteoIterDataPipe(IterDataPipe):
"""
IterDataPipe that can be used to stream either the Criteo 1TB Click Logs Dataset
(https://ailab.criteo.com/download-criteo-1tb-click-logs-dataset/) or the
Kaggle/Criteo Display Advertising Dataset
(https://www.kaggle.com/c/criteo-display-ad-challenge/) from the source TSV
files.
Args:
paths (Iterable[str]): local paths to TSV files that constitute the Criteo
dataset.
row_mapper (Optional[Callable[[List[str]], Any]]): function to apply to each
split TSV line.
open_kw: options to pass to underlying invocation of
iopath.common.file_io.PathManager.open.
Example:
>>> datapipe = CriteoIterDataPipe(
>>> ("/home/datasets/criteo/day_0.tsv", "/home/datasets/criteo/day_1.tsv")
>>> )
>>> datapipe = dp.iter.Batcher(datapipe, 100)
>>> datapipe = dp.iter.Collator(datapipe)
>>> batch = next(iter(datapipe))
"""
def __init__(
self,
paths: Iterable[str],
*,
# pyre-ignore[2]
row_mapper: Optional[Callable[[List[str]], Any]] = _default_row_mapper,
) -> None:
self.paths = paths
self.row_mapper = row_mapper
# pyre-ignore[3]
def __iter__(self) -> Iterator[Any]:
worker_info = get_worker_info()
paths = self.paths
if worker_info is not None:
paths = (path for (idx, path) in enumerate(paths) if idx % worker_info.num_workers == worker_info.id)
paths = IterableWrapper(paths)
datapipe = IoPathFileOpener(paths, mode="r", pathmgr=PathManagerFactory().get(PATH_MANAGER_KEY))
datapipe = CSVParser(datapipe, delimiter="\t")
if self.row_mapper:
datapipe = Mapper(datapipe, self.row_mapper)
yield from datapipe
# Creating DataFrame from TSV File
df = criteo_dataframes_from_tsv("day_11_first_3k_rows_original.tsv")
df = df.shuffle()
df["dense_features"] = df["dense_features"].fill_null(0)
df["sparse_features"] = df["sparse_features"].fill_null(0)
# Remove CaptureLikeMock hen torcharrow.functional will accept StreamDataFrame
with CaptureLikeMock("torcharrow.functional.array_constructor"):
for field in df["sparse_features"].columns:
df["sparse_features"][field] = functional.array_constructor(df["sparse_features"][field])
df["dense_features"] = (df["dense_features"] + 3).log()
df["labels"] = df["labels"].cast(dt.int32)
df = df.batch(10)
conversion = {
"dense_features": tap.rec.Dense(),
"sparse_features": tap.rec.Dense(), # Sparse not implemented yet in torcharrow
# Because labels are unlisted it works like "labels": tap.rec.Default(),
}
df = df.collate(conversion=conversion)
reading_service = MultiProcessingReadingService(num_workers=0)
dl = DataLoader2(df, reading_service=reading_service)
print("Iterating DataLoader now")
for item in dl:
labels, dense_features, sparse_features = item
print(labels)
print(dense_features)
print(sparse_features)
break
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# TODO(597): This file can be moved to the dataframe parent directory once Torcharrow
# is open sourced
from typing import Iterable, List, Optional, Union
import torcharrow as ta
from torch.utils.data.datapipes.dataframe import dataframe_wrapper as df_wrapper
class TorcharrowWrapper:
@classmethod
def create_dataframe(cls, data: Iterable, columns: Optional[List[str]] = None):
columnar_data = list(zip(*data))
# set default column values if `columns` arg is not provided
column_names = columns
if not columns or len(columns) == 0:
column_names = [f"col{i}" for i in range(len(columnar_data))]
return ta.dataframe({column_name: ta.Column(value) for column_name, value in zip(column_names, columnar_data)})
@classmethod
def is_dataframe(cls, data: Union[ta.DataFrame, ta.Column]):
return isinstance(data, ta.DataFrame)
@classmethod
def is_column(cls, data: Union[ta.DataFrame, ta.Column]):
return isinstance(data, ta.Column)
@classmethod
def iterate(cls, df):
yield from df
@classmethod
def concat(cls, buffer: List[ta.DataFrame]):
concat_buffer = []
for b in buffer:
concat_buffer += list(b)
return ta.dataframe(concat_buffer, dtype=buffer[0].dtype)
@classmethod
def get_item(cls, df: ta.DataFrame, idx):
return df[idx : idx + 1]
@classmethod
def get_len(cls, df: ta.DataFrame):
return len(df)
@classmethod
def get_columns(cls, df):
return list(df.columns)
df_wrapper.set_df_wrapper(TorcharrowWrapper)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""
This file contains the data pipeline to read from a Paruet and output a DataFrame.
"""
import torcharrow.dtypes as dt
from common import DEFAULT_CAT_NAMES, DEFAULT_INT_NAMES
from torchdata.datapipes.iter import FileLister, ParquetDataFrameLoader
DTYPE = dt.Struct(
[dt.Field("label", dt.int64)]
+ [dt.Field(int_name, dt.Float64(nullable=True)) for int_name in DEFAULT_INT_NAMES]
+ [dt.Field(cat_name, dt.Float64(nullable=True)) for cat_name in DEFAULT_CAT_NAMES]
)
source_dp = FileLister(".", masks="*.parquet")
parquet_df_dp = ParquetDataFrameLoader(source_dp, dtype=DTYPE)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable, List, TypeVar
T = TypeVar("T")
# Criteo Data Set Parameters
INT_FEATURE_COUNT = 13
CAT_FEATURE_COUNT = 26
DEFAULT_LABEL_NAME = "label"
DEFAULT_INT_NAMES: List[str] = [f"int_{idx}" for idx in range(INT_FEATURE_COUNT)]
DEFAULT_CAT_NAMES: List[str] = [f"cat_{idx}" for idx in range(CAT_FEATURE_COUNT)]
DEFAULT_COLUMN_NAMES: List[str] = [
DEFAULT_LABEL_NAME,
*DEFAULT_INT_NAMES,
*DEFAULT_CAT_NAMES,
]
def safe_cast(val: T, dest_type: Callable[[T], T], default: T) -> T:
"""
Helper function to safely cast data with default as fallback.
"""
try:
return dest_type(val)
except ValueError:
return default
def safe_hex_to_int(num):
try:
return int(safe_cast(num, str, "0") or "0", 16)
except Exception:
return float("NaN")
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""
This file pre-process the source file and save it as a TSV file and a Parquet file.
You do not need to re-run this file if "day_11_first_3k_rows.parquet" and "day_11_first_3k_rows.tsv" exist locally
"""
import pandas
import pyarrow
import pyarrow.parquet as parquet
from common import DEFAULT_CAT_NAMES, DEFAULT_COLUMN_NAMES, safe_hex_to_int
# Read TSV File with Pandas
tsv_fname = "day_11_first_3k_rows_original.tsv"
df = pandas.read_csv(tsv_fname, sep="\t")
df.columns = DEFAULT_COLUMN_NAMES
# Convert hex strings to interger
for i, row in df.iterrows():
for cat_col in DEFAULT_CAT_NAMES:
df.at[i, cat_col] = safe_hex_to_int(row[cat_col])
# Convert to PyArrow table and write to disk as parquet file
table = pyarrow.Table.from_pandas(df=df)
parquet_fname = "day_11_first_3k_rows.parquet"
parquet.write_table(table, parquet_fname)
# Write to a new .tsv file
df.to_csv("day_11_first_3k_rows.tsv", sep="\t")
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import http.server
import os
import re
import threading
import torchvision.datasets as datasets
import torchvision.datasets.folder
import torchvision.transforms as transforms
from PIL import Image
from torch.utils.data import DataLoader
from torchdata.datapipes.iter import FileLister, HttpReader, IterDataPipe
IMAGES_ROOT = os.path.join("fakedata", "imagefolder")
USE_FORK_DATAPIPE = False
NUM_WORKERS = 5
BATCH_SIZE = None
data_transform = transforms.Compose(
[
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)
# DataPipes implementation of ImageFolder constructs and executes graph of DataPipes (aka DataPipeline)
# FileLister -> ObtainCategories
# |
# V
# FileLister -> AttributeCategories -> LoadAndDecodeImages (using `map`) -> ApplyTorchVisionTransforms (using `map`)
def get_category_name(path):
rel_path = os.path.relpath(path, start=IMAGES_ROOT)
elements = rel_path.split(os.sep)
return elements[0]
class ObtainCategories(IterDataPipe):
def __init__(self, source_dp, parse_category_fn=get_category_name) -> None:
self.source_dp = source_dp
self.parse_category_fn = parse_category_fn
def __iter__(self):
categories = set()
for path in self.source_dp:
categories.add(self.parse_category_fn(path))
cat_to_id = {name: i for i, name in enumerate(sorted(categories))}
yield cat_to_id
class AttributeCategories(IterDataPipe):
def __init__(self, listfiles_dp, categories_dp, parse_category_fn=get_category_name) -> None:
self.listfiles_dp = listfiles_dp
self.categories_dp = categories_dp
self.parse_category_fn = parse_category_fn
def __iter__(self):
for categories in self.categories_dp:
cat_to_dp = categories
for data in self.listfiles_dp:
if isinstance(data, tuple):
category = cat_to_dp[self.parse_category_fn(data[0])]
yield data + (category,)
else:
category = cat_to_dp[self.parse_category_fn(data)]
yield (data, category)
def MyImageFolder(root=IMAGES_ROOT, transform=None):
if not USE_FORK_DATAPIPE:
# Yes, we had to scan files twice. Alternativelly it is possible to use
# `fork` DataPipe, but it will require buffer equal to the size of all
# full file names
# TODO(125): Make sure that `fork` complains when buffer becomes
# too large
list_files_0 = FileLister(root=IMAGES_ROOT, recursive=True)
list_files_1 = FileLister(root=IMAGES_ROOT, recursive=True).sharding_filter()
else:
list_files_0, list_files_1 = FileLister(root=IMAGES_ROOT, recursive=True).fork(2)
list_files_1 = list_files_1.sharding_filter()
categories = ObtainCategories(list_files_0)
with_categories = AttributeCategories(list_files_1, categories)
using_default_loader = with_categories.map(lambda x: (torchvision.datasets.folder.default_loader(x[0]), x[1]))
transformed = using_default_loader.map(lambda x: (transform(x[0]), x[1]))
return transformed
class ExpandURLPatternDataPipe(IterDataPipe):
def __init__(self, pattern) -> None:
result = re.match(r"(.*?)\{(.*?)}(.*)", pattern)
if result:
self.prefix = result.group(1)
self.pattern = result.group(2)
self.postfix = result.group(3)
result = re.match(r"(\d+)\.\.(\d+)", self.pattern)
if result:
self.start_str = result.group(1)
self.end_str = result.group(2)
else:
raise Exception("Invalid pattern")
else:
raise Exception("Invalid pattern")
def __iter__(self):
current_int = int(self.start_str)
end_int = int(self.end_str)
for i in range(current_int, end_int + 1):
str_i = str(i)
while len(str_i) < len(self.start_str):
str_i = "0" + str_i
yield self.prefix + str_i + self.postfix
HTTP_PATH_ROOT = "http://localhost:8000/"
HTTP_PATH_CAT = "http://localhost:8000/cat/{1..3}.jpg"
HTTP_PATH_DOG = "http://localhost:8000/dog/{1..3}.jpg"
def get_category_name_url(url):
rel_path = os.path.relpath(url, start=HTTP_PATH_ROOT)
elements = rel_path.split(os.sep)
return elements[0]
def stream_to_pil(stream):
img = Image.open(stream)
return img.convert("RGB")
def MyHTTPImageFolder(transform=None):
# HTTP Protocol doesn't support listing files, so we had to provide it explicitly
list_files = ExpandURLPatternDataPipe(HTTP_PATH_CAT) + ExpandURLPatternDataPipe(HTTP_PATH_DOG)
list_files_0, list_files_1 = list_files.fork(2)
list_files_1 = list_files_1.sharding_filter().shuffle()
categories = ObtainCategories(list_files_0, parse_category_fn=get_category_name_url)
loaded_files = HttpReader(list_files_1)
with_categories = AttributeCategories(loaded_files, categories, parse_category_fn=get_category_name_url)
pil_images = with_categories.map(lambda x: (x[0], stream_to_pil(x[1]), x[2]))
transformed = pil_images.map(lambda x: (transform(x[1]), x[2]))
return transformed
if __name__ == "__main__":
dataset = datasets.ImageFolder(root=IMAGES_ROOT, transform=data_transform)
dl = DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=NUM_WORKERS)
items = list(dl)
assert len(items) == 6
dataset = MyImageFolder(root=IMAGES_ROOT, transform=data_transform)
dl = DataLoader(
dataset,
batch_size=BATCH_SIZE,
shuffle=False,
num_workers=NUM_WORKERS,
)
items = list(dl)
assert len(items) == 6
http_handler = http.server.SimpleHTTPRequestHandler
http_handler.log_message = lambda a, b, c, d, e: None
httpd = http.server.HTTPServer(("", 8000), http_handler)
os.chdir(IMAGES_ROOT)
thread = threading.Thread(target=httpd.serve_forever)
thread.start()
dataset = MyHTTPImageFolder(transform=data_transform)
dl = DataLoader(
dataset,
batch_size=BATCH_SIZE,
shuffle=False,
num_workers=NUM_WORKERS,
)
try:
items = list(dl)
assert len(items) == 6
finally:
httpd.shutdown()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from io import BytesIO
import requests
from torchdata.dataloader2 import DataLoader2, MultiProcessingReadingService
from torchdata.datapipes.iter import HuggingFaceHubReader
try:
import PIL
from PIL import Image
except ImportError:
PIL = None
Image = None
def has_no_watermark(x):
return x["pwatermark"] is not None and x["pwatermark"] < 0.8
def is_sfw(x):
return x["punsafe"] is not None and x["punsafe"] < 0.5
def load_image(url):
try:
r = requests.get(url, timeout=5)
return Image.open(BytesIO(r.content))
except Exception:
return None
def image_was_loaded(x):
return x is not None
# For more information about the dataset see: https://laion.ai/blog/laion-5b/
# name of the dataset to be used
NAME = "laion/laion2B-en-joined"
# As the dataset is too large to store locally we use a streaming approach
def laion2b_en(name=NAME):
dp = HuggingFaceHubReader(name)
dp = dp.filter(has_no_watermark)
dp = dp.filter(is_sfw)
dp = dp.shuffle().sharding_filter()
dp = dp.slice(index=["TEXT", "URL"])
dp = dp.map(fn=load_image, input_col="URL", output_col="IMAGE") # this needs multithreading
dp = dp.filter(filter_fn=image_was_loaded, input_col="IMAGE")
dp = dp.drop("URL")
dp = dp.batch(20)
return dp
def print_label_and_copyright(label, image):
try:
try:
exif = image.getexif()
# 0x8298 is the EXIF-tag for copyright
copyright_info = exif.get(0x8298, "no info")
except Exception:
copyright_info = "EXIF data is corrupted"
if copyright_info != "no info" and copyright_info != "EXIF data is corrupted":
print(f"image {i}: {label=}, {copyright_info=} ")
else:
print(f"image {i}: {label=}")
except PIL.UnidentifiedImageError:
print(f"image {i}: corrupted")
if __name__ == "__main__":
i = 0
dp = laion2b_en()
rs = MultiProcessingReadingService(num_workers=4)
dl = DataLoader2(dp, reading_service=rs)
for batch in dl:
for entry in batch:
print_label_and_copyright(entry["TEXT"], entry["IMAGE"])
i += 1
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os.path
import re
import torch
from torch.utils.data.datapipes.utils.decoder import imagehandler, mathandler
from torchdata.datapipes.iter import (
FileOpener,
Filter,
IterableWrapper,
IterKeyZipper,
Mapper,
RoutedDecoder,
TarArchiveLoader,
)
# Download size is ~150 MB so fake data is provided
URL = {
"images": "http://www.vision.caltech.edu/Image_Datasets/Caltech101/101_ObjectCategories.tar.gz",
"annotations": "http://www.vision.caltech.edu/Image_Datasets/Caltech101/Annotations.tar",
}
# We really shouldn't use MD5 anymore and switch to a more secure hash like SHA256 or
# SHA512
MD5 = {
"images": "b224c7392d521a49829488ab0f1120d9",
"annotations": "f83eeb1f24d99cab4eb377263132c91",
}
ROOT = os.path.join("fakedata", "caltech101")
IMAGES_NAME_PATTERN = re.compile(r"image_(?P<id>\d+)[.]jpg")
ANNS_NAME_PATTERN = re.compile(r"annotation_(?P<id>\d+)[.]mat")
ANNS_CLASS_MAP = {
"Faces_2": "Faces",
"Faces_3": "Faces_easy",
"Motorbikes_16": "Motorbikes",
"Airplanes_Side_2": "airplanes",
}
def is_ann(data):
path, _ = data
return bool(ANNS_NAME_PATTERN.match(os.path.basename(path)))
def collate_ann(data):
path, ann = data
cls = os.path.split(os.path.dirname(path))[1]
if cls in ANNS_CLASS_MAP:
cls = ANNS_CLASS_MAP[cls]
return path, {"cls": cls, "contour": torch.as_tensor(ann["obj_contour"])}
def is_not_background_image(data):
path, _ = data
return os.path.split(os.path.dirname(path))[1] != "BACKGROUND_Google"
def is_not_rogue_image(data) -> bool:
path, _ = data
return os.path.basename(path) != "RENAME2"
def extract_file_id(path, *, pattern):
match = pattern.match(os.path.basename(path))
return int(match.group("id"))
def images_key_fn(data):
path, _ = data
cls = os.path.split(os.path.dirname(path))[1]
id = extract_file_id(path, pattern=IMAGES_NAME_PATTERN)
return cls, id
def anns_key_fn(data):
path, ann = data
id = extract_file_id(path, pattern=ANNS_NAME_PATTERN)
return ann["cls"], id
def collate_sample(data):
(image_path, image), (ann_path, ann) = data
return dict(ann, image_path=image_path, image=image, ann_path=ann_path)
def Caltech101(root=ROOT):
anns_dp = IterableWrapper([os.path.join(root, "Annotations.tar")])
anns_dp = FileOpener(anns_dp, mode="b")
anns_dp = TarArchiveLoader(anns_dp)
anns_dp = Filter(anns_dp, is_ann)
anns_dp = RoutedDecoder(anns_dp, mathandler())
anns_dp = Mapper(anns_dp, collate_ann)
images_dp = IterableWrapper([os.path.join(root, "101_ObjectCategories.tar.gz")])
images_dp = FileOpener(images_dp, mode="b")
images_dp = TarArchiveLoader(images_dp)
images_dp = Filter(images_dp, is_not_background_image)
images_dp = Filter(images_dp, is_not_rogue_image)
images_dp = RoutedDecoder(images_dp, imagehandler("pil"))
dp = IterKeyZipper(images_dp, anns_dp, images_key_fn, ref_key_fn=anns_key_fn, buffer_size=None)
return Mapper(dp, collate_sample)
if __name__ == "__main__":
for _sample in Caltech101():
pass
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os.path
from torch.utils.data.datapipes.utils.decoder import imagehandler
from torchdata.datapipes.iter import FileOpener, IterableWrapper, Mapper, RoutedDecoder, TarArchiveLoader
# Download size is ~1.2 GB so fake data is provided
URL = "http://www.vision.caltech.edu/Image_Datasets/Caltech256/256_ObjectCategories.tar"
ROOT = os.path.join("datasets", "caltech256")
# We really shouldn't use MD5 anymore and switch to a more secure hash like SHA256 or
# SHA512
MD5 = "67b4f42ca05d46448c6bb8ecd2220f6d"
def collate_sample(data):
path, image = data
dir = os.path.split(os.path.dirname(path))[1]
label_str, cls = dir.split(".")
return {"path": path, "image": image, "label": int(label_str), "cls": cls}
def Caltech256(root=ROOT):
dp = IterableWrapper([os.path.join(root, "256_ObjectCategories.tar")])
dp = FileOpener(dp, mode="b")
dp = TarArchiveLoader(dp)
dp = RoutedDecoder(dp, imagehandler("pil"))
return Mapper(dp, collate_sample)
if __name__ == "__main__":
for _sample in Caltech256():
pass
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import functools
import os
from pathlib import Path
from typing import Union
import torchaudio
from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper
URL = "train-clean-100"
FOLDER_IN_ARCHIVE = "LibriSpeech"
BASE_URL = "http://www.openslr.org/resources/12/"
_CHECKSUMS = {
"dev-clean.tar.gz": "76f87d090650617fca0cac8f88b9416e0ebf80350acb97b343a85fa903728ab3",
"dev-other.tar.gz": "12661c48e8c3fe1de2c1caa4c3e135193bfb1811584f11f569dd12645aa84365",
"test-clean.tar.gz": "39fde525e59672dc6d1551919b1478f724438a95aa55f874b576be21967e6c23",
"test-other.tar.gz": "d09c181bba5cf717b3dee7d4d592af11a3ee3a09e08ae025c5506f6ebe961c29",
"train-clean-100.tar.gz": "d4ddd1d5a6ab303066f14971d768ee43278a5f2a0aa43dc716b0e64ecbbbf6e2",
"train-clean-360.tar.gz": "146a56496217e96c14334a160df97fffedd6e0a04e66b9c5af0d40be3c792ecf",
"train-other-500.tar.gz": "ddb22f27f96ec163645d53215559df6aa36515f26e01dd70798188350adcb6d2",
}
AUDIO_EXT = ".flac"
TXT_EXT = ".trans.txt"
def decompress_filepath_fn(file_path, root_path):
file_path = os.path.normpath(file_path)
if file_path.endswith((AUDIO_EXT, TXT_EXT)):
return os.path.join(root_path, *file_path.split(os.sep)[-4:])
else:
return os.path.join(root_path, os.path.basename(file_path))
def classify_file_fn(filepath):
if filepath.endswith(AUDIO_EXT):
return 0
if filepath.endswith(TXT_EXT):
return 1
return None
def text_split_fn(line):
fileid_text, transcript = line.strip().split(" ", 1)
return (fileid_text, transcript)
def audio_key_fn(audio_file):
audio_filename = os.path.splitext(os.path.basename(audio_file))[0]
return audio_filename
def load_librispeech_item(data):
audio_file, transcript = data
audio_filename = os.path.splitext(os.path.basename(audio_file))[0]
speaker_id, chapter_id, utterance_id = audio_filename.split("-")
# Load audio
waveform, sample_rate = torchaudio.load(audio_file)
return (
waveform,
sample_rate,
transcript,
int(speaker_id),
int(chapter_id),
int(utterance_id),
)
def LibriSpeech(root: Union[str, Path], url: str = URL, folder_in_archive: str = FOLDER_IN_ARCHIVE):
if url in [
"dev-clean",
"dev-other",
"test-clean",
"test-other",
"train-clean-100",
"train-clean-360",
"train-other-500",
]:
url = BASE_URL + url + ".tar.gz"
# Get string representation of 'root' in case Path object is passed
root = os.fspath(root)
checksum_dict = {os.path.join(root, key): value for key, value in _CHECKSUMS.items()}
url_dp = IterableWrapper([url])
# Cache tar.gz archive
cache_compressed_dp = url_dp.on_disk_cache(
filepath_fn=lambda url: os.path.join(root, os.path.basename(url)),
hash_dict=checksum_dict,
hash_type="sha256",
)
cache_compressed_dp = HttpReader(cache_compressed_dp).end_caching(same_filepath_fn=True)
# Cache decompressed archive into folder_in_archive
cache_decompressed_dp = cache_compressed_dp.on_disk_cache(
filepath_fn=lambda tar_path: os.path.join(root, folder_in_archive, tar_path.split(".")[0])
)
cache_decompressed_dp = FileOpener(cache_decompressed_dp, mode="b").load_from_tar()
cache_decompressed_dp = cache_decompressed_dp.end_caching(
filepath_fn=functools.partial(decompress_filepath_fn, root_path=os.path.join(root, folder_in_archive)),
)
audio_dp, txt_dp = cache_decompressed_dp.demux(2, classify_file_fn, drop_none=True, buffer_size=-1)
txt_dp = FileOpener(txt_dp, mode="t").readlines(return_path=False).map(text_split_fn)
transcript_map_dp = txt_dp.to_map_datapipe()
audio_transcript_dp = audio_dp.zip_with_map(transcript_map_dp, key_fn=audio_key_fn)
return audio_transcript_dp.map(load_librispeech_item)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
from functools import partial
from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper, IterDataPipe
from .utils import _add_docstring_header, _create_dataset_directory, _wrap_split_argument
URL = {
"train": "https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v1.1.json",
"dev": "https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v1.1.json",
}
MD5 = {
"train": "981b29407e0affa3b1b156f72073b945",
"dev": "3e85deb501d4e538b6bc56f786231552",
}
NUM_LINES = {
"train": 87599,
"dev": 10570,
}
DATASET_NAME = "SQuAD1"
def _path_fn(root, path):
return os.path.join(root, os.path.basename(path))
class _ParseSQuADQAData(IterDataPipe):
def __init__(self, source_datapipe) -> None:
self.source_datapipe = source_datapipe
def __iter__(self):
for _, stream in self.source_datapipe:
raw_json_data = stream["data"]
for layer1 in raw_json_data:
for layer2 in layer1["paragraphs"]:
for layer3 in layer2["qas"]:
_context, _question = layer2["context"], layer3["question"]
_answers = [item["text"] for item in layer3["answers"]]
_answer_start = [item["answer_start"] for item in layer3["answers"]]
if len(_answers) == 0:
_answers = [""]
_answer_start = [-1]
yield (_context, _question, _answers, _answer_start)
@_add_docstring_header(num_lines=NUM_LINES)
@_create_dataset_directory(dataset_name=DATASET_NAME)
@_wrap_split_argument(("train", "dev"))
def SQuAD1(root, split):
"""Demonstrates use case when more complex processing is needed on data-stream
Here we process dictionary returned by standard JSON reader and write custom
datapipe to orchestrates data samples for Q&A use-case
"""
url_dp = IterableWrapper([URL[split]])
# cache data on-disk with sanity check
cache_dp = url_dp.on_disk_cache(
filepath_fn=partial(_path_fn, root),
hash_dict={_path_fn(root, URL[split]): MD5[split]},
hash_type="md5",
)
cache_dp = HttpReader(cache_dp).end_caching(mode="wb", same_filepath_fn=True)
cache_dp = FileOpener(cache_dp, mode="b")
# stack custom data pipe on top of JSON reader to orchestrate data samples for Q&A dataset
return _ParseSQuADQAData(cache_dp.parse_json_files())
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
from functools import partial
from pathlib import Path
from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper
from .utils import _add_docstring_header, _create_dataset_directory, _wrap_split_argument
URL = "http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz"
MD5 = "7c2ac02c03563afcf9b574c7e56c153a"
NUM_LINES = {
"train": 25000,
"test": 25000,
}
_PATH = "aclImdb_v1.tar.gz"
DATASET_NAME = "IMDB"
def _path_fn(root, path):
return os.path.join(root, os.path.basename(path))
def _filter_fn(split, t):
return Path(t[0]).parts[-3] == split and Path(t[0]).parts[-2] in ["pos", "neg"]
def _file_to_sample(t):
return Path(t[0]).parts[-2], t[1].read().decode("utf-8")
@_add_docstring_header(num_lines=NUM_LINES, num_classes=2)
@_create_dataset_directory(dataset_name=DATASET_NAME)
@_wrap_split_argument(("train", "test"))
def IMDB(root, split):
"""Demonstrates complex use case where each sample is stored in separate file and compressed in tar file
Here we show some fancy filtering and mapping operations.
Filtering is needed to know which files belong to train/test and neg/pos label
Mapping is needed to yield proper data samples by extracting label from file name
and reading data from file
"""
url_dp = IterableWrapper([URL])
# cache data on-disk
cache_dp = url_dp.on_disk_cache(
filepath_fn=partial(_path_fn, root),
hash_dict={_path_fn(root, URL): MD5},
hash_type="md5",
)
cache_dp = HttpReader(cache_dp).end_caching(mode="wb", same_filepath_fn=True)
cache_dp = FileOpener(cache_dp, mode="b")
# stack TAR extractor on top of load files data pipe
extracted_files = cache_dp.load_from_tar()
# filter the files as applicable to create dataset for given split (train or test)
filter_files = extracted_files.filter(partial(_filter_fn, split))
# map the file to yield proper data samples
return filter_files.map(_file_to_sample)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# The following utility functions are copied from torchtext
# https://github.com/pytorch/text/blob/main/torchtext/data/datasets_utils.py
import functools
import inspect
import os
def _check_default_set(split, target_select, dataset_name):
# Check whether given object split is either a tuple of strings or string
# and represents a valid selection of options given by the tuple of strings
# target_select.
if isinstance(split, str):
split = (split,)
if isinstance(target_select, str):
target_select = (target_select,)
if not isinstance(split, tuple):
raise ValueError("Internal error: Expected split to be of type tuple.")
if not set(split).issubset(set(target_select)):
raise TypeError(
"Given selection {} of splits is not supported for dataset {}. Please choose from {}.".format(
split, dataset_name, target_select
)
)
return split
def _wrap_datasets(datasets, split):
# Wrap return value for _setup_datasets functions to support singular values instead
# of tuples when split is a string.
if isinstance(split, str):
if len(datasets) != 1:
raise ValueError("Internal error: Expected number of datasets is not 1.")
return datasets[0]
return datasets
def _dataset_docstring_header(fn, num_lines=None, num_classes=None):
"""
Returns docstring for a dataset based on function arguments.
Assumes function signature of form (root='.data', split=<some tuple of strings>, **kwargs)
"""
argspec = inspect.getfullargspec(fn)
if not (argspec.args[0] == "root" and argspec.args[1] == "split"):
raise ValueError(f"Internal Error: Given function {fn} did not adhere to standard signature.")
default_split = argspec.defaults[1]
if not (isinstance(default_split, tuple) or isinstance(default_split, str)):
raise ValueError(f"default_split type expected to be of string or tuple but got {type(default_split)}")
header_s = fn.__name__ + " dataset\n"
if isinstance(default_split, tuple):
header_s += "\nSeparately returns the {} split".format("/".join(default_split))
if isinstance(default_split, str):
header_s += f"\nOnly returns the {default_split} split"
if num_lines is not None:
header_s += "\n\nNumber of lines per split:"
for k, v in num_lines.items():
header_s += f"\n {k}: {v}\n"
if num_classes is not None:
header_s += "\n\nNumber of classes"
header_s += f"\n {num_classes}\n"
args_s = "\nArgs:"
args_s += "\n root: Directory where the datasets are saved."
args_s += "\n Default: .data"
if isinstance(default_split, tuple):
args_s += "\n split: split or splits to be returned. Can be a string or tuple of strings."
args_s += "\n Default: {}" "".format(str(default_split))
if isinstance(default_split, str):
args_s += "\n split: Only {default_split} is available."
args_s += "\n Default: {default_split}.format(default_split=default_split)"
return "\n".join([header_s, args_s]) + "\n"
def _add_docstring_header(docstring=None, num_lines=None, num_classes=None):
def docstring_decorator(fn):
old_doc = fn.__doc__
fn.__doc__ = _dataset_docstring_header(fn, num_lines, num_classes)
if docstring is not None:
fn.__doc__ += docstring
if old_doc is not None:
fn.__doc__ += old_doc
return fn
return docstring_decorator
def _wrap_split_argument_with_fn(fn, splits):
"""
Wraps given function of specific signature to extend behavior of split
to support individual strings. The given function is expected to have a split
kwarg that accepts tuples of strings, e.g. ('train', 'valid') and the returned
function will have a split argument that also accepts strings, e.g. 'train', which
are then turned single entry tuples. Furthermore, the return value of the wrapped
function is unpacked if split is only a single string to enable behavior such as
train = AG_NEWS(split='train')
train, valid = AG_NEWS(split=('train', 'valid'))
"""
argspec = inspect.getfullargspec(fn)
if not (
argspec.args[0] == "root"
and argspec.args[1] == "split"
and argspec.varargs is None
and argspec.varkw is None
and len(argspec.kwonlyargs) == 0
and len(argspec.annotations) == 0
):
raise ValueError(f"Internal Error: Given function {fn} did not adhere to standard signature.")
@functools.wraps(fn)
def new_fn(root=os.path.expanduser("~/.torchtext/cache"), split=splits, **kwargs):
result = []
for item in _check_default_set(split, splits, fn.__name__):
result.append(fn(root, item, **kwargs))
return _wrap_datasets(tuple(result), split)
new_sig = inspect.signature(new_fn)
new_sig_params = new_sig.parameters
new_params = []
new_params.append(new_sig_params["root"].replace(default=".data"))
new_params.append(new_sig_params["split"].replace(default=splits))
new_params += [entry[1] for entry in list(new_sig_params.items())[2:]]
new_sig = new_sig.replace(parameters=tuple(new_params))
new_fn.__signature__ = new_sig
return new_fn
def _wrap_split_argument(splits):
def new_fn(fn):
return _wrap_split_argument_with_fn(fn, splits)
return new_fn
def _create_dataset_directory(dataset_name):
def decorator(func):
argspec = inspect.getfullargspec(func)
if not (
argspec.args[0] == "root"
and argspec.args[1] == "split"
and argspec.varargs is None
and argspec.varkw is None
and len(argspec.kwonlyargs) == 0
and len(argspec.annotations) == 0
):
raise ValueError(f"Internal Error: Given function {func} did not adhere to standard signature.")
@functools.wraps(func)
def wrapper(root=os.path.expanduser("~/.torchtext/cache"), *args, **kwargs):
new_root = os.path.join(root, dataset_name)
if not os.path.exists(new_root):
os.makedirs(new_root)
return func(root=new_root, *args, **kwargs)
return wrapper
return decorator
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
from functools import partial
from torchdata.datapipes.iter import FileOpener, GDriveReader, IterableWrapper
from utils import _add_docstring_header, _create_dataset_directory, _wrap_split_argument
# URL to the target file that we will be downloading
URL = "https://drive.google.com/uc?export=download&id=0Bz8a_Dbh9QhbaW12WVVZS2drcnM"
# Expected MD5 Hash of the target file, which will be later used to verify that the file we downloaded is authentic
MD5 = "fe39f8b653cada45afd5792e0f0e8f9b"
NUM_LINES = {
"train": 3600000,
"test": 400000,
}
# Path/name where we will be caching the downloaded file
_PATH = "amazon_review_polarity_csv.tar.gz"
# Mapping dataset type (train/test) to the corresponding expected file names.
_EXTRACTED_FILES = {
"train": os.path.join("amazon_review_polarity_csv", "train.csv"),
"test": os.path.join("amazon_review_polarity_csv", "test.csv"),
}
DATASET_NAME = "AmazonReviewPolarity"
def _path_fn(root, _=None):
return os.path.join(root, _PATH)
def _cache_path_fn(root, split, _=None):
return os.path.join(root, _EXTRACTED_FILES[split])
def _filter_fn(split, fname_and_stream):
return _EXTRACTED_FILES[split] in fname_and_stream[0]
def _process_tuple(t):
return int(t[0]), " ".join(t[1:])
@_add_docstring_header(num_lines=NUM_LINES, num_classes=2)
@_create_dataset_directory(dataset_name=DATASET_NAME)
@_wrap_split_argument(("train", "test"))
def AmazonReviewPolarity(root, split):
"""Demonstrating caching, extraction and sanity check pipelines."""
# Wrapping the URL into a IterDataPipe
url_dp = IterableWrapper([URL])
# `.on_disk_cache` is the functional form of `OnDiskCacheHolder`, which caches the results from the
# subsequent DataPipe operations (until `.end_caching`) onto the disk to the path as specified by `filepath_fn`.
# In addition, since the optional argument `hash_dict` is given, the DataPipe will also check the hashes of
# the files before saving them. `.on_disk_cache` merely indicates that caching will take place, but the
# content of the previous DataPipe is unchanged. Therefore, `cache_compressed_dp` still contains URL(s).
cache_compressed_dp = url_dp.on_disk_cache(
filepath_fn=partial(_path_fn, root), hash_dict={_path_fn(root): MD5}, hash_type="md5"
)
# `GDriveReader` takes in URLs to GDrives files, and yields a tuple of file name and IO stream.
cache_compressed_dp = GDriveReader(cache_compressed_dp)
# `.end_caching` saves the previous DataPipe's outputs onto the disk. In this case,
# the results from GDriveReader (i.e. the downloaded compressed archive) will be saved onto the disk.
# Upon saving the results, the DataPipe returns the paths to the cached files.
cache_compressed_dp = cache_compressed_dp.end_caching(mode="wb", same_filepath_fn=True)
# Again, `.on_disk_cache` is invoked again here and the subsequent DataPipe operations (until `.end_caching`)
# will be saved onto the disk. At this point, `cache_decompressed_dp` contains paths to the cached files.
cache_decompressed_dp = cache_compressed_dp.on_disk_cache(filepath_fn=partial(_cache_path_fn, root, split))
# Opens the cache files using `FileOpener`
cache_decompressed_dp = FileOpener(cache_decompressed_dp, mode="b")
# Loads the content of the TAR archive file, yielding a tuple of file names and streams of the content.
cache_decompressed_dp = cache_decompressed_dp.load_from_tar()
# Filters for specific file based on the file name from the previous DataPipe (either "train.csv" or "test.csv").
cache_decompressed_dp = cache_decompressed_dp.filter(partial(_filter_fn, split))
# ".end_caching" saves the decompressed file onto disks and yields the path to the file.
cache_decompressed_dp = cache_decompressed_dp.end_caching(mode="wb", same_filepath_fn=True)
# Opens the decompressed file.
data_dp = FileOpener(cache_decompressed_dp, mode="b")
# Finally, this parses content of the decompressed CSV file and returns the result line by line.
return data_dp.parse_csv().map(_process_tuple)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
from functools import partial
from torchdata.datapipes.iter import FileOpener, HttpReader, IterableWrapper, IterDataPipe
from .utils import _add_docstring_header, _create_dataset_directory, _wrap_split_argument
URL = {
"train": "https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v2.0.json",
"dev": "https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json",
}
MD5 = {
"train": "62108c273c268d70893182d5cf8df740",
"dev": "246adae8b7002f8679c027697b0b7cf8",
}
NUM_LINES = {
"train": 130319,
"dev": 11873,
}
DATASET_NAME = "SQuAD2"
def _path_fn(root, path):
return os.path.join(root, os.path.basename(path))
class _ParseSQuADQAData(IterDataPipe):
def __init__(self, source_datapipe) -> None:
self.source_datapipe = source_datapipe
def __iter__(self):
for _, stream in self.source_datapipe:
raw_json_data = stream["data"]
for layer1 in raw_json_data:
for layer2 in layer1["paragraphs"]:
for layer3 in layer2["qas"]:
_context, _question = layer2["context"], layer3["question"]
_answers = [item["text"] for item in layer3["answers"]]
_answer_start = [item["answer_start"] for item in layer3["answers"]]
if len(_answers) == 0:
_answers = [""]
_answer_start = [-1]
yield (_context, _question, _answers, _answer_start)
@_add_docstring_header(num_lines=NUM_LINES)
@_create_dataset_directory(dataset_name=DATASET_NAME)
@_wrap_split_argument(("train", "dev"))
def SQuAD2(root, split):
"""Demonstrates use case when more complex processing is needed on data-stream
Here we process dictionary returned by standard JSON reader and write custom
datapipe to orchestrates data samples for Q&A use-case
"""
url_dp = IterableWrapper([URL[split]])
# cache data on-disk with sanity check
cache_dp = url_dp.on_disk_cache(
filepath_fn=partial(_path_fn, root),
hash_dict={_path_fn(root, URL[split]): MD5[split]},
hash_type="md5",
)
cache_dp = HttpReader(cache_dp).end_caching(mode="wb", same_filepath_fn=True)
cache_dp = FileOpener(cache_dp, mode="b")
# stack custom data pipe on top of JSON reader to orchestrate data samples for Q&A dataset
return _ParseSQuADQAData(cache_dp.parse_json_files())
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from torchdata.datapipes.iter import HttpReader
from .utils import _add_docstring_header, _create_dataset_directory, _wrap_split_argument
URL = {
"train": "https://raw.githubusercontent.com/mhjabreel/CharCnn_Keras/master/data/ag_news_csv/train.csv",
"test": "https://raw.githubusercontent.com/mhjabreel/CharCnn_Keras/master/data/ag_news_csv/test.csv",
}
MD5 = {
"train": "b1a00f826fdfbd249f79597b59e1dc12",
"test": "d52ea96a97a2d943681189a97654912d",
}
NUM_LINES = {
"train": 120000,
"test": 7600,
}
DATASET_NAME = "AG_NEWS"
def _process_tuple(t):
return int(t[0]), " ".join(t[1:])
@_add_docstring_header(num_lines=NUM_LINES, num_classes=4)
@_create_dataset_directory(dataset_name=DATASET_NAME)
@_wrap_split_argument(("train", "test"))
def AG_NEWS(root, split):
"""Demonstrating streaming use case
This might be useful when we do not want to cache or download the data.
The limitation is that we do not have any checking mechanism or data sanity check.
"""
# Stack CSV Parser directly on top of web-stream
return HttpReader([URL[split]]).parse_csv().map(_process_tuple)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torchtext
import torchtext.functional as F
import torchtext.transforms as T
from torch.hub import load_state_dict_from_url
from torch.optim import AdamW
from torchdata.dataloader2 import DataLoader2
from torchtext.datasets import SST2
LEARNING_RATE = 1e-5
PADDING_IDX = 1
BOS_IDX = 0
EOS_IDX = 2
MAX_SEQ_LEN = 256
XLMR_VOCAB_PATH = r"https://download.pytorch.org/models/text/xlmr.vocab.pt"
XLMR_SPM_MODEL_PATH = r"https://download.pytorch.org/models/text/xlmr.sentencepiece.bpe.model"
DEVICE = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
text_transform = T.Sequential(
T.SentencePieceTokenizer(XLMR_SPM_MODEL_PATH),
T.VocabTransform(load_state_dict_from_url(XLMR_VOCAB_PATH)),
T.Truncate(MAX_SEQ_LEN - 2),
T.AddToken(token=BOS_IDX, begin=True),
T.AddToken(token=EOS_IDX, begin=False),
)
NUM_EPOCHS = 1
BATCH_SIZE = 8
NUM_CLASSES = 2
INPUT_DIM = 768
def apply_transform(x):
return text_transform(x[0]), x[1]
def train_step(input: torch.Tensor, target: torch.Tensor) -> None:
output = model(input)
loss = criteria(output, target)
optim.zero_grad()
loss.backward()
optim.step()
def eval_step(input: torch.Tensor, target: torch.Tensor) -> None:
output = model(input)
loss = criteria(output, target).item()
return float(loss), (output.argmax(1) == target).type(torch.float).sum().item()
def evaluate() -> None:
model.eval()
total_loss = 0
correct_predictions = 0
total_predictions = 0
counter = 0
with torch.no_grad():
for batch in eval_dataloader:
input = F.to_tensor(batch["token_ids"], padding_value=PADDING_IDX).to(DEVICE)
target = torch.tensor(batch["target"]).to(DEVICE)
loss, predictions = eval_step(input, target)
total_loss += loss
correct_predictions += predictions
total_predictions += len(target)
counter += 1
return total_loss / counter, correct_predictions / total_predictions
if __name__ == "__main__":
train_datapipe = SST2(split="train")
eval_datapipe = SST2(split="dev")
train_datapipe = train_datapipe.map(apply_transform)
train_datapipe = train_datapipe.batch(BATCH_SIZE)
train_datapipe = train_datapipe.rows2columnar(["token_ids", "target"])
train_dataloader = DataLoader2(datapipe=train_datapipe)
print("Created train dataloader")
eval_datapipe = eval_datapipe.map(apply_transform)
eval_datapipe = eval_datapipe.batch(BATCH_SIZE)
eval_datapipe = eval_datapipe.rows2columnar(["token_ids", "target"])
eval_dataloader = DataLoader2(datapipe=eval_datapipe)
print("Created eval dataloader")
classifier_head = torchtext.models.RobertaClassificationHead(num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model = torchtext.models.XLMR_BASE_ENCODER.get_model(head=classifier_head)
model.to(DEVICE)
optim = AdamW(model.parameters(), lr=LEARNING_RATE)
criteria = nn.CrossEntropyLoss()
for epoch in range(NUM_EPOCHS):
for step, batch in enumerate(train_dataloader):
input = F.to_tensor(batch["token_ids"], padding_value=PADDING_IDX).to(DEVICE)
target = torch.tensor(batch["target"]).to(DEVICE)
train_step(input, target)
# stop early for example purpose
if step == 10:
break
loss, accuracy = evaluate()
print(f"Epoch: {epoch}, loss: {loss}, accuracy: {accuracy}")
print("Finished Training")
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torchdata.dataloader2 import DataLoader2
from torchdata.datapipes.iter import IterableWrapper
class ToyModel(torch.nn.Module):
def __init__(self) -> None:
"""
In the model constructor, we instantiate four parameters and use them
as member parameters.
"""
super().__init__()
self.a = torch.nn.Parameter(torch.randn(()))
self.b = torch.nn.Parameter(torch.randn(()))
self.c = torch.nn.Parameter(torch.randn(()))
self.d = torch.nn.Parameter(torch.randn(()))
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Simple model forward function
"""
return self.a + self.b * x + self.c * x ** 2 + self.d * x ** 3
if __name__ == "__main__":
model = ToyModel()
train_features = IterableWrapper([torch.rand(3) for _ in range(20000)])
train_labels = IterableWrapper([torch.rand(3) for _ in range(20000)])
train_data_pipe = train_features.zip(train_labels).shuffle()
# DataLoader2 wraps an iterable around the Datapipe to enable easy access to
# the features and labels.
data_loader = DataLoader2(datapipe=train_data_pipe)
# Construct the loss function and the optimizer.
criterion = torch.nn.MSELoss(reduction="sum")
optimizer = torch.optim.SGD(model.parameters(), lr=1e-6)
# Loop over the dataset multiple times. Here we are doing only 3 training
# epochs - that is, three passes over the training datapipes.
for epoch in range(3):
# Set manual seed per epoch to control the randomness for shuffle.
torch.manual_seed(epoch)
running_loss = 0.0
for step, data in enumerate(data_loader):
# Obtain the inputs and labels from data.
train_feature, train_label = data
# Zero the parameter gradients.
optimizer.zero_grad()
# Train step: forward + backward + optimize.
predicted_outputs = model(train_feature)
loss = criterion(predicted_outputs, train_label)
loss.backward()
optimizer.step()
# Calculate the statistics.
running_loss += loss.item()
# Print the loss every 2000 mini-batches.
if step % 2000 == 1999:
print("[epoch: %d, %5d] loss: %.3f" % (epoch + 1, step + 1, running_loss / 2000))
running_loss = 0.0
print("Finished Training")
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import torch
import torch.distributed as dist
from torch import nn
from torchdata.dataloader2 import DataLoader2, DistributedReadingService
from torchdata.datapipes.iter import IterableWrapper
class ToyModel(nn.Module):
def __init__(self) -> None:
"""
In the model constructor, we instantiate four parameters and use them
as member parameters.
"""
super().__init__()
self.a = nn.Parameter(torch.randn(()))
self.b = nn.Parameter(torch.randn(()))
self.c = nn.Parameter(torch.randn(()))
self.d = nn.Parameter(torch.randn(()))
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Simple model forward function
"""
return self.a + self.b * x + self.c * x ** 2 + self.d * x ** 3
if __name__ == "__main__":
model = ToyModel()
os.environ["RANK"] = str(0)
os.environ["WORLD_SIZE"] = str(2)
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "0"
dist.init_process_group("gloo")
# Use a prime number to make sure uneven data sharding and let
# DistributedReadingService prevent hanging with the unbalanced data shard
data_length = 19997
train_features = IterableWrapper([torch.rand(3) for _ in range(data_length)])
train_labels = IterableWrapper([torch.rand(3) for _ in range(data_length)])
# sharding_filter will automatically shard the data based on the
# distributed ranks
train_data_pipe = train_features.zip(train_labels).shuffle().sharding_filter()
# Torch Distributed is required to use DistributedReadingService
reading_service = DistributedReadingService()
# Create DataLoader2 with DistributedReadingService
data_loader2 = DataLoader2(
datapipe=train_data_pipe,
reading_service=reading_service,
)
criterion = torch.nn.MSELoss(reduction="sum")
optimizer = torch.optim.SGD(model.parameters(), lr=1e-6)
for epoch in range(5):
# Set manual seed per epoch to control the randomness for shuffle.
torch.manual_seed(epoch)
running_loss = 0.0
for step, data in enumerate(data_loader2):
train_feature, train_label = data
optimizer.zero_grad()
predicted_outputs = model(train_feature)
loss = criterion(predicted_outputs, train_label)
loss.backward()
optimizer.step()
running_loss += loss.item()
if step % 2000 == 1999:
print("[epoch: %d, %5d] loss: %.3f" % (epoch + 1, step + 1, running_loss / 2000))
running_loss = 0.0
print("Finished Training")
"""
Training Output:
[epoch: 1, 2000] loss: 0.860
[epoch: 1, 4000] loss: 0.823
[epoch: 1, 6000] loss: 0.809
[epoch: 1, 8000] loss: 0.778
[epoch: 1, 10000] loss: 0.753
[epoch: 1, 12000] loss: 0.756
[epoch: 1, 14000] loss: 0.730
[epoch: 1, 16000] loss: 0.727
[epoch: 1, 18000] loss: 0.704
[epoch: 1, 20000] loss: 0.703
[epoch: 2, 2000] loss: 0.677
[epoch: 2, 4000] loss: 0.649
[epoch: 2, 6000] loss: 0.648
[epoch: 2, 8000] loss: 0.629
[epoch: 2, 10000] loss: 0.623
[epoch: 2, 12000] loss: 0.593
[epoch: 2, 14000] loss: 0.586
[epoch: 2, 16000] loss: 0.584
[epoch: 2, 18000] loss: 0.571
[epoch: 2, 20000] loss: 0.558
[epoch: 3, 2000] loss: 0.537
[epoch: 3, 4000] loss: 0.540
[epoch: 3, 6000] loss: 0.544
[epoch: 3, 8000] loss: 0.512
[epoch: 3, 10000] loss: 0.496
[epoch: 3, 12000] loss: 0.506
[epoch: 3, 14000] loss: 0.486
[epoch: 3, 16000] loss: 0.489
[epoch: 3, 18000] loss: 0.489
[epoch: 3, 20000] loss: 0.456
[epoch: 4, 2000] loss: 0.474
[epoch: 4, 4000] loss: 0.445
[epoch: 4, 6000] loss: 0.442
[epoch: 4, 8000] loss: 0.440
[epoch: 4, 10000] loss: 0.434
[epoch: 4, 12000] loss: 0.421
[epoch: 4, 14000] loss: 0.415
[epoch: 4, 16000] loss: 0.404
[epoch: 4, 18000] loss: 0.427
[epoch: 4, 20000] loss: 0.410
[epoch: 5, 2000] loss: 0.395
[epoch: 5, 4000] loss: 0.393
[epoch: 5, 6000] loss: 0.389
[epoch: 5, 8000] loss: 0.397
[epoch: 5, 10000] loss: 0.375
[epoch: 5, 12000] loss: 0.375
[epoch: 5, 14000] loss: 0.372
[epoch: 5, 16000] loss: 0.365
[epoch: 5, 18000] loss: 0.371
[epoch: 5, 20000] loss: 0.359
Finished Training
"""
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torchdata.dataloader2 import DataLoader2, MultiProcessingReadingService
from torchdata.datapipes.iter import IterableWrapper
class ToyModel(torch.nn.Module):
def __init__(self) -> None:
"""
In the model constructor, we instantiate four parameters and use them
as member parameters.
"""
super().__init__()
self.a = torch.nn.Parameter(torch.randn(()))
self.b = torch.nn.Parameter(torch.randn(()))
self.c = torch.nn.Parameter(torch.randn(()))
self.d = torch.nn.Parameter(torch.randn(()))
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Simple model forward function
"""
return self.a + self.b * x + self.c * x ** 2 + self.d * x ** 3
if __name__ == "__main__":
model = ToyModel()
train_features = IterableWrapper([torch.rand(3) for _ in range(20000)])
train_labels = IterableWrapper([torch.rand(3) for _ in range(20000)])
train_data_pipe = train_features.zip(train_labels).shuffle().sharding_filter()
# Create DataLoader2 with MultiProcessingReadingService
data_loader = DataLoader2(
datapipe=train_data_pipe,
reading_service=MultiProcessingReadingService(num_workers=2),
)
criterion = torch.nn.MSELoss(reduction="sum")
optimizer = torch.optim.SGD(model.parameters(), lr=1e-6)
for epoch in range(3):
# Set manual seed per epoch to control the randomness for shuffle.
torch.manual_seed(epoch)
running_loss = 0.0
for step, data in enumerate(data_loader):
train_feature, train_label = data
optimizer.zero_grad()
predicted_outputs = model(train_feature)
loss = criterion(predicted_outputs, train_label)
loss.backward()
optimizer.step()
running_loss += loss.item()
if step % 2000 == 1999:
print("[epoch: %d, %5d] loss: %.3f" % (epoch + 1, step + 1, running_loss / 2000))
running_loss = 0.0
print("Finished Training")
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import hashlib
import os
import time
from functools import partial
from typing import Callable
import pandas as pd
import psutil
from torchdata.dataloader2 import DataLoader2, MultiProcessingReadingService
from torchdata.datapipes.iter import IterableWrapper
def map_read(t):
"""
Read stream and close. Used for tar files.
Args:
t: (path, data_stream) tuple
"""
data = t[1].read()
t[1].close()
return t[0], data
def map_calculate_md5(t, n_md5):
"""
Calculate MD5 hash of data for `n_md5` number of times. Increasing the number of md5 calculation will determine
CPU usage (this is an approximate for the complexity of data transforms).
Args:
t: (path, data) tuple
n_md5: number of times to compute hash of the data
"""
path, data = t
long_str = ""
for _ in range(n_md5):
long_str += str(hashlib.md5(data).hexdigest())
result = hashlib.md5(long_str.encode()).hexdigest()
size = len(data)
return path, str(result), size
def check_and_output_speed(prefix: str, create_dp_fn: Callable, n_prefetch: int, n_md5: int, n_workers: int):
"""
Benchmark the speed of the prefetching setup and prints the results.
Args:
prefix: String indicating what is being executed
create_dp_fn: function that returns a DataPipe
n_prefetch: number of batches to prefetch
n_md5: number of times to compute hash of the data
"""
initial_memory_usage = psutil.virtual_memory().used
max_memory_usage = initial_memory_usage
dp = create_dp_fn()
rs_type = "DataLoader2 w/ tar archives"
new_rs = MultiProcessingReadingService(
num_workers=n_workers, worker_prefetch_cnt=n_prefetch, main_prefetch_cnt=n_prefetch
)
dl: DataLoader2 = DataLoader2(dp, reading_service=new_rs)
start = time.time()
items_len = 0 # Number of items processed
total_size = 0 # Number of bytes processed
time_to_first = None
for _name, _md5, size in dl:
if items_len > 10 and time_to_first is None:
time_to_first = time.time() - start
total_size += size
items_len += 1
if psutil.virtual_memory().used > max_memory_usage:
max_memory_usage = psutil.virtual_memory().used
total = time.time() - start
speed = int(items_len / total) # item per sec
function_name = create_dp_fn.__name__
io_speed = int(total_size / total / 1024 / 1024) # size MiBs per sec
total_size = int(total_size / 1024 / 1024) # total size in MiBs
total = int(total)
print(
f"{prefix} {function_name} and {rs_type} with n_prefetch {n_prefetch} | "
f"n_md5 {n_md5} results are: total time {total} sec, with {items_len} items at {speed} files per/sec. "
f"{total_size} MiB with io speed at {io_speed} MiBps"
)
change_in_memory_usage = (max_memory_usage - initial_memory_usage) / 1024 / 1024
print(f"initial_memory_usage: {initial_memory_usage / 1024 / 1024:0.1f} MiBs")
print(f"change_in_memory_usage: {change_in_memory_usage:0.1f} MiBs\n")
return (
function_name,
rs_type,
n_prefetch,
total,
items_len,
speed,
total_size,
io_speed,
int(change_in_memory_usage),
)
def append_result(
df,
workers,
n_tar_files,
n_md5,
fs,
iteration,
columns,
fn_name,
rs_type,
prefetch,
total,
items_len,
speed,
total_size,
io_speed,
change_in_memory_usage,
):
return pd.concat(
[
df,
pd.DataFrame(
data=[
[
workers,
fn_name,
rs_type,
prefetch,
n_md5,
total,
n_tar_files,
items_len,
total_size,
speed,
io_speed,
fs,
iteration,
change_in_memory_usage,
]
],
columns=columns,
),
]
)
def save_result(df, csv_name: str, directory: str = ""):
file_path = os.path.join(directory, f"{csv_name}.csv")
df.to_csv(file_path, mode="a") # Append result
def main(args):
def get_datapipe(path, n_items, n_md5, use_source_prefetch, use_s3=False):
if use_s3:
dp = IterableWrapper([path] * n_items).shuffle().sharding_filter()
dp = dp.open_files_by_fsspec(mode="rb", anon=True)
if use_source_prefetch:
dp = dp.prefetch(5)
dp = dp.load_from_tar(mode="r|")
else:
tar_files = [f"{path}/images{i}.tar" for i in range(n_items)]
dp = IterableWrapper(tar_files).shuffle().sharding_filter().open_files(mode="b")
if use_source_prefetch:
dp = dp.prefetch(5)
dp = dp.load_from_tar(mode="r:")
dp = dp.map(map_read)
dp = dp.map(partial(map_calculate_md5, n_md5=n_md5))
return dp
columns = [
"n_workers",
"file_type",
"RS Type",
"n_prefetch",
"n_md5",
"total_time",
"n_tar_files",
"n_items",
"total_size (MB)",
"speed (file/s)",
"io_speed (MB/s)",
"fs",
"iteration",
"change_in_memory_usage",
]
df = pd.DataFrame(columns=columns)
if args.use_s3:
print("Loading data from S3...")
fs_str = "s3"
path = "s3://torchdatabenchmarkdatasets/images0.tar"
dp_fn = partial(get_datapipe, path, args.n_tar_files, args.n_md5, args.use_source_prefetch, args.use_s3)
dp_fn.__name__ = "S3_Tar" # type: ignore[attr-defined]
else:
print("Loading data from disk...")
fs_str = "Local"
path = "/home/ubuntu/source_data/large_images_tars"
dp_fn = partial(get_datapipe, path, args.n_tar_files, args.n_md5, args.use_source_prefetch, args.use_s3)
dp_fn.__name__ = "Tar" # type: ignore[attr-defined]
# print(f"{path = }")
for n_workers in [4, 8, 12]:
for i in range(1 + args.n_epochs): # 1 warm-up + n runs
params = check_and_output_speed(
f"[prefetch is True, {n_workers} workers]",
dp_fn,
n_prefetch=args.n_prefetch,
n_md5=args.n_md5,
n_workers=n_workers,
)
df = append_result(df, n_workers, args.n_tar_files, args.n_md5, fs_str, i, columns, *params)
# Save CSV
print(df)
save_result(df, csv_name=args.output_file)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--n-epochs", default=3, type=int, help="Number of times to benchmark per setup excluding warm up"
)
parser.add_argument("--n-tar-files", default=200, type=int, help="Number of tar files (~100MB each)")
parser.add_argument("--n-prefetch", default=20, type=int, help="Number of batches to prefetch")
parser.add_argument(
"--n-md5",
default=22,
type=int,
help="Number of times to compute MD5 hash per file, "
"a proxy for transformation complexity "
"(Low ~3ms: 22, Med ~7ms: 54, High ~10ms: 77)",
)
parser.add_argument("--output-file", default="benchmark_result", type=str, help="output csv file name")
parser.add_argument("--use-s3", default=False, action="store_true", help="Load file from S3 instead of local")
parser.add_argument("--use-source-prefetch", default=False, action="store_true", help="Use source prefetch")
args = parser.parse_args()
main(args)
# python ~/data/benchmarks/cloud/aws_s3.py --n-tar-files 500 --n-epoch 1 --n-md5 22 &&
# python ~/data/benchmarks/cloud/aws_s3.py --n-tar-files 500 --n-epoch 1 --n-md5 22 --use-s3 &&
# python ~/data/benchmarks/cloud/aws_s3.py --n-tar-files 500 --n-epoch 1 --n-md5 54 &&
# python ~/data/benchmarks/cloud/aws_s3.py --n-tar-files 500 --n-epoch 1 --n-md5 54 --use-s3 &&
# python ~/data/benchmarks/cloud/aws_s3.py --n-tar-files 500 --n-epoch 1 --n-md5 77 &&
# python ~/data/benchmarks/cloud/aws_s3.py --n-tar-files 500 --n-epoch 1 --n-md5 77 --use-s3
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torchvision.transforms import transforms
class ClassificationPresetTrain:
def __init__(
self,
*,
crop_size,
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
hflip_prob=0.5,
):
trans = [transforms.RandomResizedCrop(crop_size)]
if hflip_prob > 0:
trans.append(transforms.RandomHorizontalFlip(hflip_prob))
trans.extend(
[
transforms.PILToTensor(),
transforms.ConvertImageDtype(torch.float),
transforms.Normalize(mean=mean, std=std),
]
)
self.transforms = transforms.Compose(trans)
def __call__(self, img):
return self.transforms(img)
class ClassificationPresetEval:
def __init__(
self,
*,
crop_size,
resize_size=256,
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225),
):
self.transforms = transforms.Compose(
[
transforms.Resize(resize_size),
transforms.CenterCrop(crop_size),
transforms.PILToTensor(),
transforms.ConvertImageDtype(torch.float),
transforms.Normalize(mean=mean, std=std),
]
)
def __call__(self, img):
return self.transforms(img)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import datetime
import errno
import os
import time
from collections import defaultdict, deque
import torch
import torch.distributed as dist
class SmoothedValue:
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
t = reduce_across_processes([self.count, self.total])
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
if not self.deque:
return 0
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
if not self.deque:
return 0
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
try:
return self.total / self.count
except ZeroDivisionError:
return 0
@property
def max(self):
if not self.deque:
return 0
return max(self.deque)
@property
def value(self):
if not self.deque:
return 0
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median, avg=self.avg, global_avg=self.global_avg, max=self.max, value=self.value
)
class MetricLogger:
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError(f"'{type(self).__name__}' object has no attribute '{attr}'")
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(f"{name}: {str(meter)}")
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ""
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt="{avg:.4f}")
data_time = SmoothedValue(fmt="{avg:.4f}")
model_time = SmoothedValue(fmt="{avg:.4f}")
space_fmt = ":" + str(len(str(len(iterable)))) + "d"
if torch.cuda.is_available():
log_msg = self.delimiter.join(
[
header,
"[{0" + space_fmt + "}/{1}]",
"eta: {eta}",
"{meters}",
"time: {time}",
"data: {data}",
"model: {model}",
"max mem: {memory:.0f}",
]
)
else:
log_msg = self.delimiter.join(
[header, "[{0" + space_fmt + "}/{1}]", "eta: {eta}", "{meters}", "time: {time}", "data: {data}"]
)
MB = 1024.0 * 1024.0
for obj in iterable:
dtime = time.time() - end
data_time.update(dtime)
yield obj
ttime = time.time() - end
iter_time.update(ttime)
model_time.update(ttime - dtime)
if i % print_freq == 0:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(
log_msg.format(
i,
len(iterable),
eta=eta_string,
meters=str(self),
time=str(iter_time),
data=str(data_time),
model=str(model_time),
memory=torch.cuda.max_memory_allocated() / MB,
)
)
else:
print(
log_msg.format(
i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time)
)
)
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print(f"{header} Total time: {total_time_str}")
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.inference_mode():
maxk = max(topk)
batch_size = target.size(0)
if target.ndim == 2:
target = target.max(dim=1)[1]
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target[None])
res = []
for k in topk:
correct_k = correct[:k].flatten().sum(dtype=torch.float32)
res.append(correct_k * (100.0 / batch_size))
return res
def mkdir(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop("force", False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if "RANK" in os.environ and "WORLD_SIZE" in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ["WORLD_SIZE"])
args.gpu = int(os.environ["LOCAL_RANK"])
elif "SLURM_PROCID" in os.environ:
args.rank = int(os.environ["SLURM_PROCID"])
args.gpu = args.rank % torch.cuda.device_count()
elif hasattr(args, "rank"):
pass
else:
print("Not using distributed mode")
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = "nccl"
print(f"| distributed init (rank {args.rank}): {args.dist_url}", flush=True)
torch.distributed.init_process_group(
backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank
)
torch.distributed.barrier()
if args.data_loader.lower() != "ffcv":
setup_for_distributed(args.rank == 0)
def reduce_across_processes(val):
if not is_dist_avail_and_initialized():
# nothing to sync, but we still convert to tensor for consistency with the distributed case.
return torch.tensor(val)
t = torch.tensor(val, device="cuda")
dist.barrier()
dist.all_reduce(t)
return t
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import datetime
import os
import time
import warnings
import helpers
import presets
import torch
import torch.utils.data
import torchvision
import utils
from torch import nn
from torchdata.dataloader2 import adapter, DataLoader2, MultiProcessingReadingService
def train_one_epoch(model, criterion, optimizer, data_loader, device, epoch, args):
model.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter("lr", utils.SmoothedValue(window_size=1, fmt="{value}"))
metric_logger.add_meter("img/s", utils.SmoothedValue(window_size=10, fmt="{value}"))
header = f"Epoch: [{epoch}]"
for i, (image, target) in enumerate(metric_logger.log_every(data_loader, args.print_freq, header)):
if args.data_loading_only:
continue
start_time = time.time()
image, target = image.to(device), target.to(device)
output = model(image)
loss = criterion(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
acc1, acc5 = utils.accuracy(output, target, topk=(1, 5))
batch_size = image.shape[0]
metric_logger.update(loss=loss.item(), lr=optimizer.param_groups[0]["lr"])
metric_logger.meters["acc1"].update(acc1.item(), n=batch_size)
metric_logger.meters["acc5"].update(acc5.item(), n=batch_size)
metric_logger.meters["img/s"].update(batch_size / (time.time() - start_time))
def evaluate(model, criterion, data_loader, device, args, print_freq=100, log_suffix=""):
model.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
header = f"Test: {log_suffix}"
metric_logger.add_meter("acc1", utils.SmoothedValue())
metric_logger.add_meter("acc5", utils.SmoothedValue())
num_processed_samples = 0
with torch.inference_mode():
for image, target in metric_logger.log_every(data_loader, print_freq, header):
if args.data_loading_only:
continue
image, target = image.to(device), target.to(device)
output = model(image)
loss = criterion(output, target)
acc1, acc5 = utils.accuracy(output, target, topk=(1, 5))
batch_size = image.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters["acc1"].update(acc1.item(), n=batch_size)
metric_logger.meters["acc5"].update(acc5.item(), n=batch_size)
num_processed_samples += batch_size
# gather the stats from all processes
num_processed_samples = utils.reduce_across_processes(num_processed_samples)
if (
hasattr(data_loader, "dataset")
and hasattr(data_loader.dataset, "__len__")
and len(data_loader.dataset) != num_processed_samples
and torch.distributed.get_rank() == 0
):
warnings.warn(
f"It looks like the dataset has {len(data_loader.dataset)} samples, but {num_processed_samples} "
"samples were used for the validation, which might bias the results. "
"Try adjusting the batch size and / or the world size. "
"Setting the world size to 1 is always a safe bet."
)
metric_logger.synchronize_between_processes()
print(f"{header} Acc@1 {metric_logger.acc1.global_avg:.3f} Acc@5 {metric_logger.acc5.global_avg:.3f}")
return metric_logger.acc1.global_avg
def create_data_loaders(args):
print(f"file-system = {args.fs}")
if args.fs == "fsx":
dataset_dir = "/datasets01"
elif args.fs == "fsx_isolated":
dataset_dir = "/fsx_isolated"
elif args.fs == "ontap":
dataset_dir = "/datasets01_ontap"
elif args.fs == "ontap_isolated":
dataset_dir = "/ontap_isolated"
else:
raise ValueError(f"bad args.fs, got {args.fs}")
dataset_dir += "/imagenet_full_size/061417/"
train_dir = os.path.join(dataset_dir, "train")
val_dir = os.path.join(dataset_dir, "val")
val_resize_size, val_crop_size, train_crop_size = args.val_resize_size, args.val_crop_size, args.train_crop_size
if args.no_transforms:
train_preset = val_preset = helpers.no_transforms
else:
train_preset = presets.ClassificationPresetTrain(crop_size=train_crop_size)
val_preset = presets.ClassificationPresetEval(crop_size=val_crop_size, resize_size=val_resize_size)
if args.ds_type == "dp":
builder = helpers.make_pre_loaded_dp if args.preload_ds else helpers.make_dp
train_dataset = builder(train_dir, transforms=train_preset)
val_dataset = builder(val_dir, transforms=val_preset)
train_sampler = val_sampler = None
train_shuffle = True
elif args.ds_type == "iterable":
train_dataset = torchvision.datasets.ImageFolder(train_dir, transform=train_preset)
train_dataset = helpers.MapStyleToIterable(train_dataset, shuffle=True)
val_dataset = torchvision.datasets.ImageFolder(val_dir, transform=val_preset)
val_dataset = helpers.MapStyleToIterable(val_dataset, shuffle=False)
train_sampler = val_sampler = None
train_shuffle = None # but actually True
elif args.ds_type == "mapstyle":
builder = helpers.PreLoadedMapStyle if args.preload_ds else torchvision.datasets.ImageFolder
train_dataset = builder(train_dir, transform=train_preset)
val_dataset = builder(val_dir, transform=val_preset)
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, shuffle=True)
val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset, shuffle=False)
train_shuffle = None # but actually True
else:
raise ValueError(f"Invalid value for args.ds_type ({args.ds_type})")
data_loader_arg = args.data_loader.lower()
if data_loader_arg == "v1":
train_data_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.batch_size,
shuffle=train_shuffle,
sampler=train_sampler,
num_workers=args.workers,
pin_memory=True,
drop_last=True,
)
val_data_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=args.batch_size,
sampler=val_sampler,
num_workers=args.workers,
pin_memory=True,
)
elif data_loader_arg == "v2":
if args.ds_type != "dp":
raise ValueError("DataLoader2 only works with datapipes.")
# Note: we are batching and collating here *after the transforms*, which is consistent with DLV1.
# But maybe it would be more efficient to do that before, so that the transforms can work on batches??
train_dataset = train_dataset.batch(args.batch_size, drop_last=True).collate()
train_data_loader = DataLoader2(
train_dataset,
datapipe_adapter_fn=adapter.Shuffle(),
reading_service=MultiProcessingReadingService(num_workers=args.workers),
)
val_dataset = val_dataset.batch(args.batch_size, drop_last=True).collate() # TODO: Do we need drop_last here?
val_data_loader = DataLoader2(
val_dataset,
reading_service=MultiProcessingReadingService(num_workers=args.workers),
)
else:
raise ValueError(f"invalid data-loader param. Got {args.data_loader}")
return train_data_loader, val_data_loader, train_sampler
def main(args):
if args.output_dir:
utils.mkdir(args.output_dir)
utils.init_distributed_mode(args)
print("\n".join(f"{k}: {str(v)}" for k, v in sorted(dict(vars(args)).items())))
device = torch.device(args.device)
if args.use_deterministic_algorithms:
torch.backends.cudnn.benchmark = False
torch.use_deterministic_algorithms(True)
else:
torch.backends.cudnn.benchmark = True
train_data_loader, val_data_loader, train_sampler = create_data_loaders(args)
num_classes = 1000 # I'm lazy. TODO change this
print("Creating model")
model = torchvision.models.__dict__[args.model](weights=args.weights, num_classes=num_classes)
model.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.lr_step_size, gamma=args.lr_gamma)
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
if args.test_only:
# We disable the cudnn benchmarking because it can noticeably affect the accuracy
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
evaluate(model, criterion, val_data_loader, device=device, args=args)
return
print("Start training")
start_time = time.time()
for epoch in range(args.epochs):
if args.distributed and train_sampler is not None:
train_sampler.set_epoch(epoch)
train_one_epoch(model, criterion, optimizer, train_data_loader, device, epoch, args)
lr_scheduler.step()
evaluate(model, criterion, val_data_loader, device=device, args=args)
if args.output_dir:
checkpoint = {
"model": model_without_ddp.state_dict(),
"optimizer": optimizer.state_dict(),
"lr_scheduler": lr_scheduler.state_dict(),
"epoch": epoch,
"args": args,
}
utils.save_on_master(checkpoint, os.path.join(args.output_dir, f"model_{epoch}.pth"))
utils.save_on_master(checkpoint, os.path.join(args.output_dir, "checkpoint.pth"))
if epoch == 0:
first_epoch_time = time.time() - start_time
total_time = time.time() - start_time
print(f"Training time: {datetime.timedelta(seconds=int(total_time))}")
print(f"Training time (w/o 1st epoch): {datetime.timedelta(seconds=int(total_time - first_epoch_time))}")
def get_args_parser(add_help=True):
import argparse
parser = argparse.ArgumentParser(description="PyTorch Classification Training", add_help=add_help)
parser.add_argument("--fs", default="fsx", type=str)
parser.add_argument("--model", default="resnet18", type=str, help="model name")
parser.add_argument("--device", default="cuda", type=str, help="device (Use cuda or cpu Default: cuda)")
parser.add_argument(
"-b", "--batch-size", default=32, type=int, help="images per gpu, the total batch size is $NGPU x batch_size"
)
parser.add_argument("--epochs", default=90, type=int, metavar="N", help="number of total epochs to run")
parser.add_argument(
"-j", "--workers", default=12, type=int, metavar="N", help="number of data loading workers (default: 16)"
)
parser.add_argument("--lr", default=0.1, type=float, help="initial learning rate")
parser.add_argument("--lr-step-size", default=30, type=int, help="decrease lr every step-size epochs")
parser.add_argument("--lr-gamma", default=0.1, type=float, help="decrease lr by a factor of lr-gamma")
parser.add_argument("--momentum", default=0.9, type=float, metavar="M", help="momentum")
parser.add_argument("--print-freq", default=10, type=int, help="print frequency")
parser.add_argument("--output-dir", default=".", type=str, help="path to save outputs")
parser.add_argument(
"--test-only",
dest="test_only",
help="Only test the model",
action="store_true",
)
# distributed training parameters
parser.add_argument("--world-size", default=1, type=int, help="number of distributed processes")
parser.add_argument("--dist-url", default="env://", type=str, help="url used to set up distributed training")
parser.add_argument(
"--use-deterministic-algorithms", action="store_true", help="Forces the use of deterministic algorithms only."
)
parser.add_argument(
"--val-resize-size", default=256, type=int, help="the resize size used for validation (default: 256)"
)
parser.add_argument(
"--val-crop-size", default=224, type=int, help="the central crop size used for validation (default: 224)"
)
parser.add_argument(
"--train-crop-size", default=224, type=int, help="the random crop size used for training (default: 224)"
)
parser.add_argument("--weights", default=None, type=str, help="the weights enum name to load")
parser.add_argument(
"--ds-type",
default="mapstyle",
type=str,
help="'dp' or 'iterable' or 'mapstyle' (for regular indexable datasets)",
)
parser.add_argument(
"--preload-ds",
action="store_true",
help="whether to use a fake dataset where all images are pre-loaded in RAM and already transformed. "
"Mostly useful to benchmark how fast a model training would be without data-loading bottlenecks."
"Acc results are irrevant because we don't cache the entire dataset, only a very small fraction of it.",
)
parser.add_argument(
"--data-loading-only",
action="store_true",
help="When on, we bypass the model's forward and backward passes. So mostly only the dataloading happens",
)
parser.add_argument(
"--no-transforms",
action="store_true",
help="Whether to apply transforms to the images. No transforms means we "
"load and decode PIL images as usual, but we don't transform them. Instead we discard them "
"and the dataset will produce random tensors instead. We "
"need to create random tensors because without transforms, the images would still be PIL images "
"and they wouldn't be of the required size."
"Obviously, Acc resuts will not be relevant.",
)
parser.add_argument(
"--data-loader",
default="V1",
type=str,
help="'V1' or 'V2'. V2 only works for datapipes",
)
return parser
if __name__ == "__main__":
args = get_args_parser().parse_args()
main(args)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import os
import random
from functools import partial
from pathlib import Path
import torch
import torch.distributed as dist
import torchvision
from PIL import Image
from torchdata.datapipes.iter import FileLister, IterDataPipe
# TODO: maybe infinite buffer can / is already natively supported by torchdata?
INFINITE_BUFFER_SIZE = 1_000_000_000
IMAGENET_TRAIN_LEN = 1_281_167
IMAGENET_TEST_LEN = 50_000
class _LenSetter(IterDataPipe):
# TODO: Ideally, we woudn't need this extra class
def __init__(self, dp, root):
self.dp = dp
if "train" in str(root):
self.size = IMAGENET_TRAIN_LEN
elif "val" in str(root):
self.size = IMAGENET_TEST_LEN
else:
raise ValueError("oops?")
def __iter__(self):
yield from self.dp
def __len__(self):
# TODO The // world_size part shouldn't be needed. See https://github.com/pytorch/data/issues/533
return self.size // dist.get_world_size()
def _decode(path, root, category_to_int):
category = Path(path).relative_to(root).parts[0]
image = Image.open(path).convert("RGB")
label = category_to_int(category)
return image, label
def _apply_tranforms(img_and_label, transforms):
img, label = img_and_label
return transforms(img), label
def make_dp(root, transforms):
root = Path(root).expanduser().resolve()
categories = sorted(entry.name for entry in os.scandir(root) if entry.is_dir())
category_to_int = {category: i for (i, category) in enumerate(categories)}
dp = FileLister(str(root), recursive=True, masks=["*.JPEG"])
dp = dp.shuffle(buffer_size=INFINITE_BUFFER_SIZE).set_shuffle(False).sharding_filter()
dp = dp.map(partial(_decode, root=root, category_to_int=category_to_int))
dp = dp.map(partial(_apply_tranforms, transforms=transforms))
dp = _LenSetter(dp, root=root)
return dp
class PreLoadedMapStyle:
# All the data is pre-loaded and transformed in __init__, so the DataLoader should be crazy fast.
# This is just to assess how fast a model could theoretically be trained if there was no data bottleneck at all.
def __init__(self, dir, transform, buffer_size=100):
dataset = torchvision.datasets.ImageFolder(dir, transform=transform)
self.size = len(dataset)
self.samples = [dataset[torch.randint(0, len(dataset), size=(1,)).item()] for i in range(buffer_size)]
def __len__(self):
return self.size
def __getitem__(self, idx):
return self.samples[idx % len(self.samples)]
class _PreLoadedDP(IterDataPipe):
# Same as above, but this is a DataPipe
def __init__(self, root, transforms, buffer_size=100):
dataset = torchvision.datasets.ImageFolder(root, transform=transforms)
self.size = len(dataset)
self.samples = [dataset[torch.randint(0, len(dataset), size=(1,)).item()] for i in range(buffer_size)]
# Note: the rng might be different across DDP workers so they'll all have different samples.
# But we don't care about accuracy here so whatever.
def __iter__(self):
for idx in range(self.size):
yield self.samples[idx % len(self.samples)]
def make_pre_loaded_dp(root, transforms):
dp = _PreLoadedDP(root=root, transforms=transforms)
dp = dp.shuffle(buffer_size=INFINITE_BUFFER_SIZE).set_shuffle(False).sharding_filter()
dp = _LenSetter(dp, root=root)
return dp
class MapStyleToIterable(torch.utils.data.IterableDataset):
# This converts a MapStyle dataset into an iterable one.
# Not sure this kind of Iterable dataset is actually useful to benchmark. It
# was necessary when benchmarking async-io stuff, but not anymore.
# If anything, it shows how tricky Iterable datasets are to implement.
def __init__(self, dataset, shuffle):
self.dataset = dataset
self.shuffle = shuffle
self.size = len(self.dataset)
self.seed = 0 # has to be hard-coded for all DDP workers to have the same shuffling
def __len__(self):
return self.size // dist.get_world_size()
def __iter__(self):
worker_info = torch.utils.data.get_worker_info()
num_dl_workers = worker_info.num_workers
dl_worker_id = worker_info.id
num_ddp_workers = dist.get_world_size()
ddp_worker_id = dist.get_rank()
num_total_workers = num_ddp_workers * num_dl_workers
current_worker_id = ddp_worker_id + (num_ddp_workers * dl_worker_id)
indices = range(self.size)
if self.shuffle:
rng = random.Random(self.seed)
indices = rng.sample(indices, k=self.size)
indices = itertools.islice(indices, current_worker_id, None, num_total_workers)
samples = (self.dataset[i] for i in indices)
yield from samples
# TODO: maybe only generate these when --no-transforms is passed?
_RANDOM_IMAGE_TENSORS = [torch.randn(3, 224, 224) for _ in range(300)]
def no_transforms(_):
# see --no-transforms doc
return random.choice(_RANDOM_IMAGE_TENSORS)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# This file is adpated from PyTorch Core
# https://github.com/pytorch/pytorch/blob/master/scripts/release_notes/common.py
import json
import locale
import os
import re
import subprocess
from collections import namedtuple
import requests
topics = [
"bc_breaking",
"deprecations",
"new_features",
"improvements",
"bug_fixes",
"performance",
"docs",
"devs",
"Untopiced",
]
Features = namedtuple(
"Features",
[
"title",
"body",
"pr_number",
"files_changed",
"labels",
],
)
def dict_to_features(dct):
return Features(
title=dct["title"],
body=dct["body"],
pr_number=dct["pr_number"],
files_changed=dct["files_changed"],
labels=dct["labels"],
)
def features_to_dict(features):
return dict(features._asdict())
def run(command):
"""Returns (return-code, stdout, stderr)"""
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
output, err = p.communicate()
rc = p.returncode
enc = locale.getpreferredencoding()
output = output.decode(enc)
err = err.decode(enc)
return rc, output.strip(), err.strip()
def commit_body(commit_hash):
cmd = f"git log -n 1 --pretty=format:%b {commit_hash}"
ret, out, err = run(cmd)
return out if ret == 0 else None
def commit_title(commit_hash):
cmd = f"git log -n 1 --pretty=format:%s {commit_hash}"
ret, out, err = run(cmd)
return out if ret == 0 else None
def commit_files_changed(commit_hash):
cmd = f"git diff-tree --no-commit-id --name-only -r {commit_hash}"
ret, out, err = run(cmd)
return out.split("\n") if ret == 0 else None
def parse_pr_number(body, commit_hash, title):
regex = r"Pull Request resolved: https://github.com/pytorch/data/pull/([0-9]+)"
matches = re.findall(regex, body)
if len(matches) == 0:
if "revert" not in title.lower() and "updating submodules" not in title.lower():
print(f"[{commit_hash}: {title}] Could not parse PR number, ignoring PR")
return None
if len(matches) > 1:
print(f"[{commit_hash}: {title}] Got two PR numbers, using the first one")
return matches[0]
return matches[0]
def get_ghstack_token():
pattern = "github_oauth = (.*)"
with open(os.path.expanduser("~/.ghstackrc"), "r+") as f:
config = f.read()
matches = re.findall(pattern, config)
if len(matches) == 0:
raise RuntimeError("Can't find a github oauth token")
return matches[0]
token = get_ghstack_token()
headers = {"Authorization": f"token {token}"}
def run_query(query):
request = requests.post("https://api.github.com/graphql", json={"query": query}, headers=headers)
if request.status_code == 200:
return request.json()
else:
raise Exception(f"Query failed to run by returning code of {request.status_code}. {query}")
def gh_labels(pr_number):
query = f"""
{{
repository(owner: "pytorch", name: "data") {{
pullRequest(number: {pr_number}) {{
labels(first: 10) {{
edges {{
node {{
name
}}
}}
}}
}}
}}
}}
"""
query = run_query(query)
edges = query["data"]["repository"]["pullRequest"]["labels"]["edges"]
return [edge["node"]["name"] for edge in edges]
def get_features(commit_hash, return_dict=False):
title, body, files_changed = (
commit_title(commit_hash),
commit_body(commit_hash),
commit_files_changed(commit_hash),
)
pr_number = parse_pr_number(body, commit_hash, title)
labels = []
if pr_number is not None:
labels = gh_labels(pr_number)
result = Features(title, body, pr_number, files_changed, labels)
if return_dict:
return features_to_dict(result)
return result
class CommitDataCache:
def __init__(self, path="results/data.json"):
self.path = path
self.data = {}
if os.path.exists(path):
self.data = self.read_from_disk()
def get(self, commit):
if commit not in self.data.keys():
# Fetch and cache the data
self.data[commit] = get_features(commit)
self.write_to_disk()
return self.data[commit]
def read_from_disk(self):
with open(self.path) as f:
data = json.load(f)
data = {commit: dict_to_features(dct) for commit, dct in data.items()}
return data
def write_to_disk(self):
data = {commit: features._asdict() for commit, features in self.data.items()}
with open(self.path, "w") as f:
json.dump(data, f)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# This file is adpated from PyTorch Core
# https://github.com/pytorch/pytorch/blob/master/scripts/release_notes/commitlist.py
import argparse
import csv
import os
import pprint
import re
from collections import defaultdict
from common import CommitDataCache, get_features, run, topics
class Commit:
def __init__(self, commit_hash, category, topic, title):
self.commit_hash = commit_hash
self.category = category
self.topic = topic
self.title = title
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return (
self.commit_hash == other.commit_hash
and self.category == other.category
and self.topic == other.topic
and self.title == other.title
)
def __repr__(self):
return f"Commit({self.commit_hash}, {self.category}, {self.topic}, {self.title})"
class CommitList:
# NB: Private ctor. Use `from_existing` or `create_new`.
def __init__(self, path, commits):
self.path = path
self.commits = commits
@staticmethod
def from_existing(path):
commits = CommitList.read_from_disk(path)
return CommitList(path, commits)
@staticmethod
def create_new(path, base_version, new_version):
if os.path.exists(path):
raise ValueError("Attempted to create a new commitlist but one exists already!")
commits = CommitList.get_commits_between(base_version, new_version)
return CommitList(path, commits)
@staticmethod
def read_from_disk(path):
with open(path) as csvfile:
reader = csv.reader(csvfile)
rows = list(reader)
assert all(len(row) >= 4 for row in rows)
return [Commit(*row[:4]) for row in rows]
def write_to_disk(self):
path = self.path
rows = self.commits
directory = os.path.dirname(path)
os.makedirs(directory, exist_ok=True)
with open(path, "w") as csvfile:
writer = csv.writer(csvfile)
for commit in rows:
writer.writerow([commit.commit_hash, commit.category, commit.topic, commit.title])
def keywordInFile(file, keywords):
for key in keywords:
if key in file:
return True
return False
@staticmethod
def categorize(commit_hash, title):
features = get_features(commit_hash, return_dict=True)
title = features["title"]
labels = features["labels"]
category = "Uncategorized"
topic = "Untopiced"
# We ask contributors to label their PR's appropriately
# when they're first landed.
# Check if the labels are there first.
already_categorized = already_topiced = False
for label in labels:
if label.startswith("release notes: "):
category = label.split("release notes: ", 1)[1]
already_categorized = True
if label.startswith("topic: "):
topic = label.split("topic: ", 1)[1]
already_topiced = True
if already_categorized and already_topiced:
return Commit(commit_hash, category, topic, title)
if "deprecation" in title.lower():
topic = "deprecations"
files_changed = features["files_changed"]
for file in files_changed:
if CommitList.keywordInFile(file, ["docker/", ".github", "packaging/"]):
category = "releng"
break
if CommitList.keywordInFile(
file,
[
"torchdata/dataloader2",
],
):
category = "dataloader2"
break
if CommitList.keywordInFile(
file,
[
"torchdata/datapipes",
],
):
category = "datapipe"
break
return Commit(commit_hash, category, topic, title)
@staticmethod
def get_commits_between(base_version, new_version):
cmd = f"git merge-base {base_version} {new_version}"
rc, merge_base, _ = run(cmd)
assert rc == 0
# Returns a list of something like
# b33e38ec47 Allow a higher-precision step type for Vec256::arange (#34555)
cmd = f"git log --reverse --oneline {merge_base}..{new_version}"
rc, commits, _ = run(cmd)
assert rc == 0
log_lines = commits.split("\n")
hashes, titles = zip(*[log_line.split(" ", 1) for log_line in log_lines])
return [CommitList.categorize(commit_hash, title) for commit_hash, title in zip(hashes, titles)]
def filter(self, *, category=None, topic=None):
commits = self.commits
if category is not None:
commits = [commit for commit in commits if commit.category == category]
if topic is not None:
commits = [commit for commit in commits if commit.topic == topic]
return commits
def update_to(self, new_version):
last_hash = self.commits[-1].commit_hash
new_commits = CommitList.get_commits_between(last_hash, new_version)
self.commits += new_commits
def stat(self):
counts = defaultdict(lambda: defaultdict(int))
for commit in self.commits:
counts[commit.category][commit.topic] += 1
return counts
def create_new(path, base_version, new_version):
commits = CommitList.create_new(path, base_version, new_version)
commits.write_to_disk()
def update_existing(path, new_version):
commits = CommitList.from_existing(path)
commits.update_to(new_version)
commits.write_to_disk()
def to_markdown(commit_list, category):
def cleanup_title(commit):
match = re.match(r"(.*) \(#\d+\)", commit.title)
if match is None:
return commit.title
return match.group(1)
cdc = CommitDataCache()
lines = [f"\n## {category}\n"]
for topic in topics:
lines.append(f"### {topic}\n")
commits = commit_list.filter(category=category, topic=topic)
for commit in commits:
result = cleanup_title(commit)
maybe_pr_number = cdc.get(commit.commit_hash).pr_number
if maybe_pr_number is None:
result = f"- {result} ({commit.commit_hash})\n"
else:
result = f"- {result} ([#{maybe_pr_number}](https://github.com/pytorch/data/pull/{maybe_pr_number}))\n"
lines.append(result)
return lines
def get_markdown_header(category):
header = f"""
# Release Notes worksheet {category}
The main goal of this process is to rephrase all the commit messages below to make them clear and easy to read by the end user. You should follow the following instructions to do so:
* **Please cleanup, and format commit titles to be readable by the general pytorch user.** [Detailed intructions here](https://fb.quip.com/OCRoAbEvrRD9#HdaACARZZvo)
* Please sort commits into the following categories (you should not rename the categories!), I tried to pre-sort these to ease your work, feel free to move commits around if the current categorization is not good.
* Please drop any commits that are not user-facing.
* If anything is from another domain, leave it in the UNTOPICED section at the end and I'll come and take care of it.
The categories below are as follows:
* BC breaking: All commits that are BC-breaking. These are the most important commits. If any pre-sorted commit is actually BC-breaking, do move it to this section. Each commit should contain a paragraph explaining the rational behind the change as well as an example for how to update user code (guidelines here: https://quip.com/OCRoAbEvrRD9)
* Deprecations: All commits introducing deprecation. Each commit should include a small example explaining what should be done to update user code.
* new_features: All commits introducing a new feature (new functions, new submodule, new supported platform etc)
* improvements: All commits providing improvements to existing feature should be here (new backend for a function, new argument, better numerical stability)
* bug fixes: All commits that fix bugs and behaviors that do not match the documentation
* performance: All commits that are added mainly for performance (we separate this from improvements above to make it easier for users to look for it)
* documentation: All commits that add/update documentation
* Developers: All commits that are not end-user facing but still impact people that compile from source, develop into pytorch, extend pytorch, etc
"""
return [
header,
]
def main():
"""
Example Usages
Create a new commitlist.
Said commitlist contains commits between v1.5.0 and f5bc91f851.
python commitlist.py --create_new tags/v1.5.0 f5bc91f851
Update the existing commitlist to commit bfcb687b9c.
python commitlist.py --update_to bfcb687b9c
"""
parser = argparse.ArgumentParser(description="Tool to create a commit list")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--create_new", nargs=2)
group.add_argument("--update_to")
group.add_argument("--stat", action="store_true")
group.add_argument("--export_markdown", action="store_true")
parser.add_argument("--path", default="results/commitlist.csv")
args = parser.parse_args()
if args.create_new:
create_new(args.path, args.create_new[0], args.create_new[1])
return
if args.update_to:
update_existing(args.path, args.update_to)
return
if args.stat:
commits = CommitList.from_existing(args.path)
stats = commits.stat()
pprint.pprint(stats)
return
if args.export_markdown:
commits = CommitList.from_existing(args.path)
categories = list(commits.stat().keys())
for category in categories:
print(f"Exporting {category}...")
lines = get_markdown_header(category)
lines += to_markdown(commits, category)
filename = f"results/export/result_{category}.md"
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, "w") as f:
f.writelines(lines)
return
if __name__ == "__main__":
main()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
def collect_init_dps(init_file_location):
init_dps = set()
with open(init_file_location) as init_file:
while (line := init_file.readline()) != "":
if line.startswith("__all__ "):
while (line := init_file.readline()) != "" and (stripped_line := line.strip()).startswith('"'):
init_dps.add(stripped_line.replace(",", "").replace('"', ""))
break
return init_dps
def collect_rst_dps(rst_file_location):
rst_dps = set()
with open(rst_file_location) as rst_file:
while (line := rst_file.readline()) != "":
if line.count("class_template.rst") > 0 or line.count("function.rst") > 0:
rst_file.readline()
while (line := rst_file.readline()) != "" and len(stripped_line := line.strip()) > 1:
rst_dps.add(stripped_line)
return rst_dps
def compare_sets(set_a, set_b, ignore_set=None):
res = set_a.difference(set_b)
if ignore_set is not None:
res.difference_update(ignore_set)
return res
def main():
datapipes_folder = os.path.join("torchdata", "datapipes")
init_file = "__init__.py"
docs_source_folder = os.path.join("docs", "source")
exit_code = 0
for target, ignore_set in zip(["iter", "map", "utils"], [{"IterDataPipe", "Extractor"}, {"MapDataPipe"}, {}]):
init_path = os.path.join(datapipes_folder, target, init_file)
rst_path = os.path.join(docs_source_folder, "torchdata.datapipes." + target + ".rst")
init_set = collect_init_dps(init_path)
rst_set = collect_rst_dps(rst_path)
dif_init = compare_sets(init_set, rst_set, ignore_set)
dif_rst = compare_sets(rst_set, init_set)
for elem in dif_init:
print(f"Please add {elem} to {rst_path}")
exit_code = 1
for elem in dif_rst:
print(f"{elem} is present in {rst_path} but not in {init_path}")
exit_code = 1
sys.exit(exit_code)
if __name__ == "__main__":
main()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# Use the same timeout as PyTorch Distributed
default_timeout_in_s = 30 * 60
default_dl2_worker_join_timeout_in_s = 20
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from torchdata import _extension # noqa: F401
from . import datapipes
janitor = datapipes.utils.janitor
try:
from .version import __version__ # noqa: F401
except ImportError:
pass
__all__ = [
"datapipes",
"janitor",
]
# Please keep this list sorted
assert __all__ == sorted(__all__)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import importlib.machinery
import os
from pathlib import Path
_LIB_DIR = Path(__file__).parent
def _init_extension():
lib_dir = os.path.dirname(__file__)
# TODO(631): If any extension had dependency of shared library,
# in order to support load these shred libraries dynamically,
# we need to add logic to load dll path on Windows
# See: https://github.com/pytorch/pytorch/blob/master/torch/__init__.py#L56-L140
loader_details = (importlib.machinery.ExtensionFileLoader, importlib.machinery.EXTENSION_SUFFIXES)
extfinder = importlib.machinery.FileFinder(lib_dir, loader_details) # type: ignore[arg-type]
ext_specs = extfinder.find_spec("_torchdata")
if ext_specs is None:
return
from torchdata import _torchdata as _torchdata
_init_extension()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import sys
import traceback
class KeyErrorMessage(str):
r"""str subclass that returns itself in repr"""
def __repr__(self):
return self
class ExceptionWrapper:
r"""
Wraps an exception with traceback to communicate across threads/processes
"""
def __init__(self, exc_info=None, where: str = "in background"):
if exc_info is None:
exc_info = sys.exc_info()
self.exc_type = exc_info[0]
self.exc_msg = "".join(traceback.format_exception(*exc_info))
self.where = where
def reraise(self):
r"""
Reraises the wrapped exception in the current thread/process
"""
# Format a message such as: "Caught ValueError in DataLoader worker
# process 2. Original Traceback:", followed by the traceback.
msg = f"Caught {self.exc_type.__name__} {self.where}.\nOriginal {self.exc_msg}"
if self.exc_type == KeyError:
# KeyError calls repr() on its argument (usually a dict key). This
# makes stack traces unreadable. It will not be changed in Python
# (https://bugs.python.org/issue2651), so we work around it.
msg = KeyErrorMessage(msg)
elif getattr(self.exc_type, "message", None):
# Some exceptions have first argument as non-str but explicitly
# have message field
raise self.exc_type(message=msg)
try:
exception = self.exc_type(msg)
except TypeError:
# If the exception takes multiple arguments, don't try to
# instantiate since we don't know how to
raise RuntimeError(msg) from None
raise exception
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pickle
import warnings
from typing import Any, Dict, Generic, Iterable, Iterator, Optional, TypeVar, Union
from torchdata.dataloader2.adapter import Adapter
from torchdata.dataloader2.error import PauseIteration
from torchdata.dataloader2.graph._serialization import (
clone,
DataPipe,
deserialize_datapipe,
MapDataPipe,
serialize_datapipe,
)
from torchdata.dataloader2.random import SeedGenerator
from torchdata.dataloader2.random.seed_generator import _UINT64_UPPER_BOUND
from torchdata.dataloader2.reading_service import CheckpointableReadingServiceInterface, ReadingServiceInterface
T_co = TypeVar("T_co", covariant=True)
SERIALIZED_DATAPIPE_KEY_NAME = "serialized_datapipe"
READING_SERVICE_STATE_KEY_NAME = "reading_service_state"
RANDOMNESS_STATE_KEY_NAME = "randomness_state"
class DataLoader2Iterator(Iterator[T_co]):
r"""
An iterator wrapper returned by ``DataLoader2``'s ``__iter__` method. It delegates method/attribute calls
to the DataPipe iterator object.
The purpose of this wrapper object is to track the validity of an iterator to enforce the single iterator per
``DataLoader2`` constraint, and to finalize iteration/shutdown when necessary.
"""
def __init__(self, dataloader: "DataLoader2", iterator_id: int):
self.dataloader = dataloader
self.iterator_id = iterator_id
self.limit_counter: Optional[int] = None
self.limit_threshold: Optional[int] = None
def __next__(self) -> T_co:
if self.iterator_id == self.dataloader.valid_iterator_id:
self.dataloader._reset_iter = True
try:
if self.dataloader._is_paused:
raise PauseIteration("DataLoader2 has been paused. `resume` must be called before continuing.")
else:
next_val = next(self.dataloader._datapipe_iter) # type: ignore[arg-type]
if self.limit_threshold is not None:
self.limit_counter = self.limit_counter + 1 # type: ignore[operator]
return next_val
except PauseIteration: # This can be used for raising `StopIteration` without `finalize_iteration`
raise StopIteration
except StopIteration:
if self.dataloader.reading_service is not None:
self.dataloader.reading_service.finalize_iteration()
raise
except Exception:
if self.dataloader:
self.dataloader.shutdown()
raise
finally:
# Call `pause` if threshold is reached
if (
not self.dataloader._is_paused
and self.limit_threshold is not None
and self.limit_counter >= self.limit_threshold # type: ignore[operator]
):
self._pause()
else: # `iterator_id` is not valid
if self.dataloader.reading_service is not None:
self.dataloader.reading_service.finalize_iteration()
raise RuntimeError(
"This iterator has been invalidated because another iterator has been created "
"from the same DataLoader2.\n"
"This may be caused multiple references to the same DataLoader2. "
"For feedback regarding this single iterator per DataLoader2 constraint, feel free "
"to comment on this issue: https://github.com/pytorch/data/issues/45."
)
def _pause(self) -> None:
r"""
Pauses ``DataLoader2`` by halting its threads and ensure that its state remains unchanged,
allowing ``DataLoader2`` to safely perform snapshotting and similar operations afterwards.
The ``limit_counter`` is also reset to ``0``.
"""
self.dataloader._pause()
self.limit_counter = 0
def resume(self) -> None:
r"""
Restarts the threads within ``DataLoader2`` and allows it to yield additional batches.
"""
self.dataloader._resume()
def limit(self, num_batches: Optional[int]) -> None:
"""
Pauses ``DataLoader2`` from yielding additional batches after ``num_batches`` has been yielded. The count
begins after this method is invoked (i.e. previously yielded batches do not count towards the threshold).
While paused, ``DataLoader2``'s threads are halted and its state remains unchanged,
allowing ``DataLoader2`` to safely perform snapshotting and similar operations.
After ``DataLoader2`` is paused, ``resume()`` must be called before it can start yielding again.
Note:
- ``limit_threshold`` persists after ``pause`` and ``resume``. Use ``.limit(None)`` to remove it.
- If dispatching process is present, in order to make sure limit is in sync across processes,
please place 1-to-N ``DataPipes`` in the dispatching process (before ``sharding_round_robin_dispatch``)
Args:
num_batches: Number of batches after which the DataLoader2 will pause, use ``None`` to remove the limit
"""
self.limit_counter = 0
self.limit_threshold = num_batches
self.dataloader._limit(num_batches)
def __getattr__(self, name):
"""
To delegate operations to ``dataloader._datapipe_iter``.
"""
if "dataloader" not in self.__dict__ or self.dataloader._datapipe_iter is None:
raise AttributeError
return getattr(self.dataloader._datapipe_iter, name)
class DataLoader2(Generic[T_co]):
r"""
``DataLoader2`` is used to optimize and execute the given ``DataPipe`` graph
based on ``ReadingService`` and ``Adapter`` functions, with support for
- Dynamic sharding for multiprocess and distributed data loading
- Multiple backend ``ReadingServices``
- ``DataPipe`` graph in-place modification like shuffle control, memory pinning, etc.
- Snapshot the state of data-preprocessing pipeline (WIP)
Args:
datapipe (``IterDataPipe`` or ``MapDataPipe``): ``DataPipe`` from which to load the data. A deepcopy of this
datapipe will be made during initialization, allowing the input to be re-used in a different ``DataLoader2``
without sharing states. Input ``None`` can only be used if ``load_state_dict`` is called
right after the creation of the DataLoader.
datapipe_adapter_fn (``Iterable[Adapter]`` or ``Adapter``, optional): ``Adapter`` function(s) that
will be applied to the DataPipe (default: ``None``).
reading_service (ReadingServiceInterface, optional): defines how ``DataLoader2`` should execute operations over
the ``DataPipe``, e.g. multiprocessing/distributed (default: ``None``). A deepcopy of this will be
created during initialization, allowing the ReadingService to be re-used in a different
``DataLoader2`` without sharing states.
Note:
When a ``MapDataPipe`` is passed into ``DataLoader2``, in order to iterate through
the data, ``DataLoader2`` will attempt to create an iterator via ``iter(datapipe)``.
If the object has a non-zero-indexed indices, this may fail.
Consider using ``.shuffle()`` (which converts ``MapDataPipe`` to ``IterDataPipe``)
or ``datapipe.to_iter_datapipe(custom_indices)``.
"""
def __init__(
self,
datapipe: Optional[DataPipe],
datapipe_adapter_fn: Optional[Union[Iterable[Adapter], Adapter]] = None,
reading_service: Optional[ReadingServiceInterface] = None,
) -> None:
if isinstance(datapipe, MapDataPipe):
datapipe = datapipe.to_iter_datapipe()
self.datapipe = clone(datapipe) if datapipe is not None else None
self._adapted: bool = False
self._datapipe_iter: Optional[Iterator[T_co]] = None
self._reset_iter: bool = True # Sets to `False` when `__iter__` runs, and `True` when `__next__` is called
# TODO(630): Some ReadingServices might want to validate adapters, we can add this feature
if datapipe_adapter_fn is None:
self.datapipe_adapter_fns = None
elif isinstance(datapipe_adapter_fn, Iterable):
self.datapipe_adapter_fns = datapipe_adapter_fn
else:
self.datapipe_adapter_fns = [datapipe_adapter_fn]
self.reading_service = clone(reading_service)
self.reading_service_state: Optional[bytes] = None # is not `None` when `load_state_dict` is called
self._terminated: bool = False
self.valid_iterator_id: Optional[int] = None
self._is_paused = False
if self.datapipe is not None and self.datapipe_adapter_fns is not None:
for adapter_fn in self.datapipe_adapter_fns:
self.datapipe = adapter_fn(self.datapipe)
self._datapipe_before_reading_service_adapt: DataPipe = clone(self.datapipe)
self._seed_generator: SeedGenerator = SeedGenerator()
self._seed: Optional[int] = None
self._reset_seed: bool = True
# Seed generator as of beginning of each epoch
self._initial_seed_generator: SeedGenerator = clone(self._seed_generator)
def __iter__(self) -> DataLoader2Iterator[T_co]:
r"""
Return a singleton iterator from the ``DataPipe`` graph adapted by ``ReadingService``.
``DataPipe`` will be restored if the serialized state is provided to construct
``DataLoader2``. And, ``initialize_iteration`` and ``finalize_iterator`` will be
invoked at the beginning and end of the iteration correspondingly.
"""
if self.datapipe is None:
raise RuntimeError("Please provide datapipe or use load_state_dict to load datapipe from state")
if self._terminated:
raise RuntimeError("Cannot iterate over the DataLoader as it has already been shut down")
if self._reset_iter:
if self._seed is not None:
if self._reset_seed:
self._seed_generator.seed(self._seed)
self._reset_seed = False
else:
self._seed_generator.seed()
# Saving initial seed generator state
self._initial_seed_generator = clone(self._seed_generator)
if not self._adapted and self.reading_service is not None:
if self.reading_service_state is None:
self.datapipe = self.reading_service.initialize(self.datapipe)
else:
if not isinstance(self.reading_service, CheckpointableReadingServiceInterface):
raise TypeError("Cannot restore from non-checkpointable reading service")
self.datapipe = self.reading_service.restore(self.datapipe, self.reading_service_state)
self._adapted = True
if self.reading_service is not None:
iter_reset_fn = self.reading_service.initialize_iteration(self._seed_generator)
if iter_reset_fn:
self.datapipe = iter_reset_fn(self.datapipe)
self._datapipe_iter = iter(self.datapipe)
self._reset_iter = False
self.valid_iterator_id = 0 if self.valid_iterator_id is None else self.valid_iterator_id + 1
return DataLoader2Iterator(self, self.valid_iterator_id)
def seed(self, seed: int) -> None:
r"""
Set random seed for DataLoader2 to control determinism.
Args:
seed: Random uint64 seed
"""
if seed >= _UINT64_UPPER_BOUND:
raise ValueError(f"Expected an uint64 seed, but got {seed}.")
self._seed = seed
self._reset_seed = True
def __del__(self) -> None:
self.shutdown()
def shutdown(self) -> None:
r"""
Shuts down ``ReadingService`` and clean up iterator.
"""
try:
if not self._terminated:
self._terminated = True
if self.reading_service is not None:
self.reading_service.finalize_iteration()
self.reading_service.finalize()
if not self._reset_iter:
self._reset_iter = True
self._datapipe_iter = None
# Ignore AttributeError in case any attribute has been removed before `__del__`
except AttributeError:
pass
def __enter__(self) -> "DataLoader2[T_co]":
return self
def __exit__(self, exc_type, exc_value, traceback) -> None:
self.shutdown()
def state_dict(self) -> Dict[str, Any]:
r"""
Return a dictionary to represent the state of data-processing pipeline with keys:
- ``serialized_datapipe``:Serialized ``DataPipe`` before ``ReadingService`` adaption.
- ``reading_service_state``: The state of ``ReadingService`` and adapted ``DataPipe``.
"""
reading_service_state = None
if self.reading_service is not None and isinstance(self.reading_service, CheckpointableReadingServiceInterface):
reading_service_state = self.reading_service.checkpoint()
# Serialize datapipe after applying adapters and before reading service adaption
serialized_datapipe = serialize_datapipe(self._datapipe_before_reading_service_adapt)
serialized_randomness_state = (
self._seed,
self._reset_seed,
pickle.dumps(self._seed_generator),
pickle.dumps(self._initial_seed_generator),
)
return {
SERIALIZED_DATAPIPE_KEY_NAME: serialized_datapipe,
READING_SERVICE_STATE_KEY_NAME: reading_service_state,
RANDOMNESS_STATE_KEY_NAME: serialized_randomness_state,
}
@classmethod
def from_state(
cls,
state: Dict[str, Any],
reading_service: CheckpointableReadingServiceInterface,
) -> "DataLoader2[T_co]":
"""
Create new ``DataLoader2`` with ``DataPipe`` graph and ``ReadingService`` restored
from the serialized state.
"""
serialized_datapipe = state[SERIALIZED_DATAPIPE_KEY_NAME]
reading_service_state = state[READING_SERVICE_STATE_KEY_NAME]
data_loader: "DataLoader2[T_co]" = DataLoader2(
datapipe=deserialize_datapipe(serialized_datapipe),
datapipe_adapter_fn=None,
reading_service=reading_service,
)
data_loader.reading_service_state = reading_service_state
# This check is needed for backward compatibility of `state_dict` for users loading from older version
if RANDOMNESS_STATE_KEY_NAME in state:
randomness_state = state[RANDOMNESS_STATE_KEY_NAME]
data_loader._seed, data_loader._reset_seed = randomness_state[0], randomness_state[1]
data_loader._seed_generator = pickle.loads(randomness_state[2])
data_loader._initial_seed_generator = pickle.loads(randomness_state[3])
return data_loader
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
"""
For the existing ``DataLoader2``, load serialized state to restore ``DataPipe`` graph
and reset the internal state of ``ReadingService``.
"""
# edge case checking
# iterator has already been created: 1) iterator is just created 2) iterator is created and iter is exhausted
if self._datapipe_iter is not None:
raise RuntimeError(
"DataLoaderV2 iterator has already been created, `load_state_dict()` can’t be called. "
"Please create a new dataloader in order to use load state dict."
)
serialized_datapipe = state_dict[SERIALIZED_DATAPIPE_KEY_NAME]
reading_service_state = state_dict[READING_SERVICE_STATE_KEY_NAME]
# deserialize datapipe
deserialized_datapipe = deserialize_datapipe(serialized_datapipe)
assert deserialized_datapipe is not None
# override existing datapipe and reading service state
self.datapipe = deserialized_datapipe
self.reading_service_state = reading_service_state
# This check is needed for backward compatibility of `state_dict` for users loading from older version
if RANDOMNESS_STATE_KEY_NAME in state_dict:
randomness_state = state_dict[RANDOMNESS_STATE_KEY_NAME]
self._seed, self._reset_seed = randomness_state[0], randomness_state[1]
self._seed_generator = pickle.loads(randomness_state[2])
self._initial_seed_generator = pickle.loads(randomness_state[3])
# re-initialize datapipe_adapter_fn and _datapipe_before_reading_service_adapt
if self.datapipe_adapter_fns is not None:
for adapter_fn in self.datapipe_adapter_fns:
self.datapipe = adapter_fn(self.datapipe)
self._datapipe_before_reading_service_adapt = clone(self.datapipe)
def _restore_checkpoint_beginning_of_epoch(self) -> None:
r"""
At the beginning of each iteration (epoch), the initial state of randomness is automatically saved.
That state is also saved as part of ``state_dict``. This method restores the current DataLoader2 RNG state
to that initial state.
The common use case is to invoke this method after ``DataLoader2``'s state is restored (through
``.from_state(...)`` or ``load_state_dict(...)``) in order to resume from the beginning of the last-ran epoch.
"""
self._seed_generator = self._initial_seed_generator
def _pause(self) -> None:
if hasattr(self.reading_service, "_pause"):
self._is_paused = True
pause_fn = self.reading_service._pause()
if pause_fn is not None:
self.datapipe = pause_fn(self.datapipe)
else:
warnings.warn("ReadingService doesn't support `pause`.")
def _resume(self) -> None:
if hasattr(self.reading_service, "_resume"):
if not self._is_paused:
warnings.warn("Resume is called when `DataLoader2` is not paused. No operation is performed.")
else:
resume_fn = self.reading_service._resume()
if resume_fn is not None:
self.datapipe = resume_fn(self.datapipe)
self._is_paused = False
else:
warnings.warn("ReadingService doesn't support `resume`.")
def _limit(self, num_batches: Optional[int]) -> None:
if hasattr(self.reading_service, "_limit"):
limit_fn = self.reading_service._limit(num_batches)
if limit_fn is not None:
self.datapipe = limit_fn(self.datapipe, num_batches)
else:
warnings.warn("ReadingService doesn't support `limit`.")
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
class PauseIteration(StopIteration):
pass
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from abc import abstractmethod
import torch
from torchdata.dataloader2.graph import DataPipe, traverse_dps
from torchdata.datapipes.iter.util.cacheholder import _WaitPendingCacheItemIterDataPipe
__all__ = [
"Adapter",
"CacheTimeout",
"Shuffle",
]
assert __all__ == sorted(__all__)
class Adapter:
r"""
Adapter Base Class that follows python Callable protocol.
"""
@abstractmethod
def __call__(self, datapipe: DataPipe) -> DataPipe:
r"""
Callable function that either runs in-place modification of
the ``DataPipe`` graph, or returns a new ``DataPipe`` graph.
Args:
datapipe: ``DataPipe`` that needs to be adapted.
Returns:
Adapted ``DataPipe`` or new ``DataPipe``.
"""
pass
class Shuffle(Adapter):
r"""
Shuffle DataPipes adapter allows control over all existing Shuffler (``shuffle``) DataPipes in the graph.
Args:
enable: Optional boolean argument to enable/disable shuffling in the ``DataPipe`` graph. True by default.
- True: Enables all previously disabled ``ShufflerDataPipes``. If none exists, it will add a new ``shuffle`` at the end of the graph.
- False: Disables all ``ShufflerDataPipes`` in the graph.
- None: No-op. Introduced for backward compatibility.
Example:
.. testsetup::
from torchdata.datapipes.iter import IterableWrapper
from torchdata.dataloader2 import DataLoader2
from torchdata.dataloader2.adapter import Shuffle
size = 12
.. testcode::
dp = IterableWrapper(range(size)).shuffle()
dl = DataLoader2(dp, [Shuffle(False)])
assert list(range(size)) == list(dl)
"""
def __init__(self, enable=True):
self.enable = enable
def __call__(self, datapipe: DataPipe) -> DataPipe:
return torch.utils.data.graph_settings.apply_shuffle_settings(datapipe, shuffle=self.enable)
class CacheTimeout(Adapter):
r"""
CacheTimeout DataPipes adapter allows control over timeouts of all existing EndOnDiskCacheHolder (``end_caching``)
in the graph. Useful when cached pipeline takes too long to execute (ex. slow file downloading).
Args:
timeout: int - amount of seconds parallel processes will wait for cached files to appear.
Example:
.. testsetup::
from torchdata.datapipes.iter import IterableWrapper
from torchdata.dataloader2 import DataLoader2
from torchdata.dataloader2.adapter import CacheTimeout
size = 12
.. testcode::
dp = IterableWrapper(range(size)).shuffle()
dl = DataLoader2(dp, [CacheTimeout(600)])
"""
def __init__(self, timeout=None):
if timeout is None:
raise ValueError("timeout should be integer")
self.timeout = timeout
def __call__(self, datapipe: DataPipe) -> DataPipe:
graph = traverse_dps(datapipe)
all_pipes = torch.utils.data.graph_settings.get_all_graph_pipes(graph)
cache_locks = {pipe for pipe in all_pipes if isinstance(pipe, _WaitPendingCacheItemIterDataPipe)}
for cache_lock in cache_locks:
cache_lock.set_timeout(self.timeout)
return datapipe
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import abc
class ShuffleSpec(abc.ABC):
"""Defines a shuffle specification."""
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from torchdata.dataloader2.dataloader2 import DataLoader2, DataLoader2Iterator
from torchdata.dataloader2.error import PauseIteration
from torchdata.dataloader2.reading_service import (
CheckpointableReadingServiceInterface,
DistributedReadingService,
InProcessReadingService,
MultiProcessingReadingService,
PrototypeMultiProcessingReadingService,
ReadingServiceInterface,
SequentialReadingService,
)
from torchdata.dataloader2.shuffle_spec import ShuffleSpec
__all__ = [
"CheckpointableReadingServiceInterface",
"DataLoader2",
"DataLoader2Iterator",
"DistributedReadingService",
"InProcessReadingService",
"MultiProcessingReadingService",
"PauseIteration",
"PrototypeMultiProcessingReadingService",
"ReadingServiceInterface",
"SequentialReadingService",
"ShuffleSpec",
]
assert __all__ == sorted(__all__)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from torchdata.dataloader2.graph import DataPipe, DataPipeGraph, traverse_dps
from torchdata.datapipes.iter import ShardingFilter, Shuffler
def _check_shuffle_before_sharding(datapipe: DataPipe) -> bool:
"""
This function will check if a ``shuffle`` operation is presented before each
``sharding_filter`` operation for every single path in the ``DataPipe`` graph.
"""
graph: DataPipeGraph = traverse_dps(datapipe) # type: ignore[arg-type]
return _check_shuffler_before_sharding_helper(graph)
def _check_shuffler_before_sharding_helper(graph: DataPipeGraph) -> bool:
if not graph:
return True
if len(graph) > 1:
for dp, sub_graph in graph.values():
if isinstance(dp, ShardingFilter):
if not _has_shuffler(sub_graph):
return False
else:
if not _check_shuffler_before_sharding_helper(sub_graph):
return False
return True
dp, dp_graph = list(graph.values())[0]
if isinstance(dp, ShardingFilter):
return _has_shuffler(dp_graph)
return _check_shuffler_before_sharding_helper(dp_graph)
def _has_shuffler(graph: DataPipeGraph) -> bool:
if not graph:
return False
if len(graph) > 1:
for dp, sub_graph in graph.values():
if not (isinstance(dp, Shuffler) or _has_shuffler(sub_graph)):
return False
return True
dp, dp_graph = list(graph.values())[0]
if isinstance(dp, Shuffler):
return True
return _has_shuffler(dp_graph)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import multiprocessing as py_mp
import pickle
import warnings
from abc import ABC, abstractmethod
from datetime import timedelta
from functools import partial
from multiprocessing.queues import Queue
from typing import Callable, List, Optional, Tuple
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.utils.data.datapipes.iter.sharding import SHARDING_PRIORITIES
from torchdata._constants import default_dl2_worker_join_timeout_in_s, default_timeout_in_s
from torchdata.dataloader2 import communication
from torchdata.dataloader2.graph import DataPipe, list_dps, replace_dp, set_graph_random_seed, traverse_dps
from torchdata.dataloader2.graph._serialization import attach_wrapper
from torchdata.dataloader2.graph.utils import _find_replicable_branches
from torchdata.dataloader2.random import dist_share_seed, SeedGenerator
from torchdata.dataloader2.utils import process_init_fn, WorkerInfo
from torchdata.dataloader2.utils.dispatch import _DummyIterDataPipe, find_lca_round_robin_sharding_dp
from torchdata.datapipes.iter import FullSync
class ReadingServiceInterface(ABC):
r"""
Interface for ``ReadingService``. Please extend custom ``ReadingService`` based on this interface class.
ReadingService must be picklable prior to ``initialize`` being called. This is because a copy of it will be
created by ``DataLoader2`` to avoid the situation where the same ReadingService object is used by
multiple ``DataLoader2``, and its internal state will be modifiable by each of them.
As a result of this constraint, certain initialization steps may need to take place within the
``initialize`` method rather than ``__init__`` of the ReadingService class.
"""
@abstractmethod
def initialize(self, datapipe: DataPipe) -> DataPipe:
r"""
``ReadingService`` takes a ``DataPipe`` graph, adapts it into a new ``DataPipe`` graph based on the custom need.
Called once in creating ``DataLoader2`` iterator at first time. Prior to calling this method,
the ``ReadingService`` object must be picklable.
Args:
datapipe: Original ``DataPipe`` graph.
Return:
An adapted or a new ``DataPipe`` graph.
"""
pass
def finalize(self) -> None:
r"""
``ReadingService`` cleans up internal states and fully shuts down the service.
Called in ``DataLoader2``'s ``shutdown`` and ``__del__``.
"""
pass
def initialize_iteration(
self, seed_generator: SeedGenerator, iter_reset_fn: Optional[Callable[[DataPipe], DataPipe]] = None
) -> Optional[Callable[[DataPipe], DataPipe]]:
r"""
``ReadingService`` spins up service for an epoch. Called at the beginning
of every time getting ``DataLoader2`` iterator.
Args:
seed_generator: SeedGenerator object created and managed by DataLoader2. As the single
source of randomness, it will govern the determinism for all of random operations
with the graph of DataPipes.
iter_reset_fn: Optional reset function from the prior ``ReadingServcie``
when ``SequentialReadingService`` chains multiple ``ReadingServices``
Returns:
A new ``iter_reset_fn`` to be used by subseqeuent ``ReadingService``
Example:
MultiProcessingReadingService starts setting worker seeds per process and prefetching
items from the graph.
"""
pass
def finalize_iteration(self) -> None:
r"""
``ReadingService`` ends service after an epoch is finished. Called when
the iterator of ``DataLoader2`` is depleted.
"""
pass
def __del__(self):
# Due to non-deterministic order of destruction, by the time `finalize` is called,
# some objects may already be `None`.
try:
self.finalize()
except AttributeError:
pass
class CheckpointableReadingServiceInterface(ReadingServiceInterface):
r"""
Extend ``ReadingServiceInterface`` with two additional methods to save/restore the state of the data-processing graph.
"""
@abstractmethod
def checkpoint(self) -> bytes:
"""
``ReadingService`` serializes the internal states. Called in ``DataLoader2.state_dict``.
"""
pass
@abstractmethod
def restore(self, datapipe: DataPipe, serialized_state: bytes) -> DataPipe:
"""
``ReadingService`` adapts ``DataPipe`` graph based on the serialized state.
Called once in creating ``DataLoader2`` iterator at first time.
Counterpart of ``initialize``, which adapt ``DataPipe`` graph from scratch.
Args:
datapipe: original ``DataPipe`` graph before adapted by ``ReadingService``
serialized_state: The serialized state of internal state used to restore the state
of the adapted ``DataPipe`` graph.
Returns:
Adapted ``DataPipe`` generated from the serialized state.
"""
pass
def _collate_no_op(batch):
return batch[0]
class PrototypeMultiProcessingReadingService(ReadingServiceInterface):
def __new__(cls, *args, **kwargs):
warnings.warn(
"`PrototypeMultiProcessingReadingService` is deprecated and will be removed in TorchData 0.8. "
"Please use `MultiProcessingReadingService`."
)
return MultiProcessingReadingService(*args, **kwargs)
class InProcessReadingService(ReadingServiceInterface):
r"""
Default ReadingService to serve the ``DataPipe` graph in the main process,
and apply graph settings like determinism control to the graph.
Args:
prefetch_cnt: (int, 0 by default): Number of data will be prefetched in the main process.
init_fn: (Callable, optional): Custom function to be called when the main
process starts to iterate over ``DataPipe`` graph.
reset_fn: (Callable, optional): Custom function to be called at the beginning
of each epoch with ``DataPipe``, ``WorkerInfo`` and ``SeedGenerator``
as the expected arguments.
"""
_prefetch_cnt: int
_init_fn: Optional[Callable[[DataPipe, WorkerInfo], DataPipe]]
_reset_fn: Optional[Callable[[DataPipe, WorkerInfo, SeedGenerator], DataPipe]]
_end_datapipe: Optional[DataPipe]
def __init__(
self,
prefetch_cnt: int = 0,
init_fn: Optional[Callable[[DataPipe, WorkerInfo], DataPipe]] = None,
reset_fn: Optional[Callable[[DataPipe, WorkerInfo, SeedGenerator], DataPipe]] = None,
) -> None:
self._prefetch_cnt = prefetch_cnt
self._init_fn = init_fn
self._reset_fn = reset_fn
self._end_datapipe = None
def initialize(self, datapipe: DataPipe) -> DataPipe:
worker_info = WorkerInfo(1, 0)
datapipe = process_init_fn(datapipe, worker_info, self._init_fn)
self._end_datapipe = datapipe
return datapipe
def initialize_iteration(
self, seed_generator: SeedGenerator, iter_reset_fn: Optional[Callable[[DataPipe], DataPipe]] = None
) -> Optional[Callable[[DataPipe], DataPipe]]:
assert self._end_datapipe is not None
# Set random seeds for DataPipe that are in the main process (NOT those in worker processes)
# Worker seeds are set in `process_reset_fn`
set_graph_random_seed(self._end_datapipe, seed_generator)
return None
def _pause(
self, pause_fn: Optional[Callable[[DataPipe], DataPipe]] = None
) -> Optional[Callable[[DataPipe], DataPipe]]:
"""
Pauses DataPipes' activities in the main process in order to collect state.
"""
assert self._end_datapipe is not None
dp_list = list_dps(traverse_dps(self._end_datapipe))
for dp in dp_list:
if hasattr(dp, "pause") and callable(dp.pause):
dp.pause()
return None
def _resume(
self, resume_fn: Optional[Callable[[DataPipe], DataPipe]] = None
) -> Optional[Callable[[DataPipe], DataPipe]]:
"""
Resumes DataPipes' activities. This is required to be called after `_pause` before
the DataLoader can keep yielding elements.
"""
assert self._end_datapipe is not None
dp_list = list_dps(traverse_dps(self._end_datapipe))
# Reversed order
for dp in dp_list[::-1]:
if hasattr(dp, "resume") and callable(dp.resume):
dp.resume()
return None
def _limit(
self, num_batches: Optional[int], limit_fn: Optional[Callable[[DataPipe, Optional[int]], DataPipe]] = None
) -> Optional[Callable[[DataPipe, Optional[int]], DataPipe]]:
r"""
Apply limit_fn to the DataPipe graph.
"""
if limit_fn is not None:
# TODO: Remove when flexible checkpoint is supported
limit_fn(self._end_datapipe, num_batches) # type: ignore[arg-type]
return None
class MultiProcessingReadingService(ReadingServiceInterface):
r"""
Spawns multiple worker processes to load data from the ``DataPipe`` graph.
If any non-replicable ``DataPipe`` (``sharding_round_robin_dispatch``) is presented in the graph,
a separate dispatching process will be created to load data from the lowest common ancestor
of all non-replicable ``DataPipes`` and distributes data to each worker process in the round-robin manner
Then, the subsequent ``DataPipe`` graph in each worker process will process the data from the dispatching
process and eventually return the result to the main process.
Args:
num_workers (int): How many subprocesses to use for data loading.
multiprocessing_context (str, optional): Multiprocessing starting method.
If method is None then the default context is returned.
Otherwise, method should be 'fork', 'spawn'.
worker_prefetch_cnt: (int, 10 by default): Number of data will be prefetched at
the end of each worker process.
main_prefetch_cnt: (int, 10 by default): Number of data will be prefetched
at the end of the whole pipeline in the main process.
worker_init_fn: (Callable, optional): Function to be called when each worker
process launches with ``DataPipe`` and ``WorkerInfo`` as the expected arguments.
worker_reset_fn: (Callable, optional): Function to be called at the beginning
of each epoch in each worker process with ``DataPipe``, ``WorkerInfo``
and ``SeedGenerator`` as the expected arguments.
"""
num_workers: int
multiprocessing_context: Optional[str]
worker_prefetch_cnt: int
main_prefetch_cnt: int
worker_init_fn: Optional[Callable[[DataPipe, WorkerInfo], DataPipe]]
worker_reset_fn: Optional[Callable[[DataPipe, WorkerInfo, SeedGenerator], DataPipe]]
_worker_processes: List[Tuple[py_mp.process.BaseProcess, Queue, Queue]]
_dispatch_process: Optional[Tuple[py_mp.process.BaseProcess, List[Queue], List[Queue]]]
_worker_datapipes: List[DataPipe]
_worker_consumer_datapipe: Optional[DataPipe]
_main_prefetch_datapipe: Optional[DataPipe]
_end_datapipe: Optional[DataPipe]
_mp: bool
_finalized: bool = False
def __init__(
self,
num_workers: int = 0,
multiprocessing_context: Optional[str] = None,
worker_prefetch_cnt: int = 10,
main_prefetch_cnt: int = 10,
worker_init_fn: Optional[Callable[[DataPipe, WorkerInfo], DataPipe]] = None,
worker_reset_fn: Optional[Callable[[DataPipe, WorkerInfo, SeedGenerator], DataPipe]] = None,
) -> None:
if num_workers == 0:
warnings.warn("Please use `InProcessReadingService` for num_workers=0")
self.num_workers = num_workers
if multiprocessing_context is not None:
_all_start_methods = mp.get_all_start_methods()
assert (
multiprocessing_context in _all_start_methods
), f"Please choose one available multiprocessing context from {_all_start_methods}"
self.multiprocessing_context = multiprocessing_context
self.worker_prefetch_cnt = worker_prefetch_cnt
self.main_prefetch_cnt = main_prefetch_cnt
self.worker_init_fn = worker_init_fn
self.worker_reset_fn = worker_reset_fn
self._worker_processes = []
self._dispatch_process = None
self._worker_datapipes = []
self._worker_consumer_datapipe = None
self._main_prefetch_datapipe = None
self._end_datapipe = None
self._mp = num_workers > 0
def initialize(self, datapipe: DataPipe) -> DataPipe:
r"""
``MultiProcessingReadingService`` finds information about sharding,
separates graph by multiple pieces and reconnects it using queues.
creates subprocesses.
"""
if not self._mp:
# TODO(616): Warn and recommend usage of InProcessReadingService
worker_info = WorkerInfo(1, 0)
datapipe = process_init_fn(datapipe, worker_info, self.worker_init_fn)
self._end_datapipe = datapipe
return datapipe
ctx = mp.get_context(self.multiprocessing_context)
# Launch dispatching process for the lowest common ancestor of non-replicable DataPipes
graph = traverse_dps(datapipe)
dispatching_dp = find_lca_round_robin_sharding_dp(graph)
# TODO(ejguan): When the last DataPipe is round_robin_sharding, use InPrcoessReadingService
if dispatching_dp is not None:
dummy_dp = _DummyIterDataPipe()
graph = replace_dp(graph, dispatching_dp, dummy_dp) # type: ignore[arg-type]
datapipe = list(graph.values())[0][0]
# TODO(ejguan): Determine buffer_size at runtime or use unlimited buffer
round_robin_dps = dispatching_dp.round_robin_demux(num_instances=self.num_workers)
# TODO(ejguan): Benchmark if we need to prefetch in dispatching process
worker_info = WorkerInfo(self.num_workers, 0)
process, req_queues, res_queues = communication.eventloop.CreateProcessForMultipleDataPipelines(
ctx,
round_robin_dps,
process_name="dispatching process",
worker_info=worker_info,
custom_reset_fn=self.worker_reset_fn,
)
assert len(req_queues) == self.num_workers and len(res_queues) == self.num_workers
for req_queue in req_queues:
req_queue.cancel_join_thread()
for res_queue in res_queues:
res_queue.cancel_join_thread()
process.daemon = True
process.start()
self._dispatch_process = (process, req_queues, res_queues)
# Find replicable branches for worker processes
# The rest of non-replicable DataPipes will remain in the main process
replicable_dps = _find_replicable_branches(graph)
assert (
len(replicable_dps) == 1
), "MultiProcessingReadingService only supports single replicable branch currently"
replicable_dp = replicable_dps[0]
replicable_dp = attach_wrapper(replicable_dp)
for worker_id in range(self.num_workers):
worker_info = WorkerInfo(self.num_workers, worker_id)
# Dispatching process for non-replicable DataPipes exists
dispatching_req_queue = None if self._dispatch_process is None else self._dispatch_process[1][worker_id]
dispatching_res_queue = None if self._dispatch_process is None else self._dispatch_process[2][worker_id]
call_on_process_init = partial(
process_init_fn,
worker_info=worker_info,
custom_init_fn=self.worker_init_fn,
worker_prefetch_cnt=self.worker_prefetch_cnt,
dispatching_req_queue=dispatching_req_queue,
dispatching_res_queue=dispatching_res_queue,
)
(process, req_queue, res_queue) = communication.eventloop.CreateProcessForDataPipeline(
ctx,
replicable_dp,
process_name="worker process",
worker_info=worker_info,
call_on_process_init=call_on_process_init,
custom_reset_fn=self.worker_reset_fn,
)
req_queue.cancel_join_thread()
process.daemon = True
process.start()
self._worker_processes.append((process, req_queue, res_queue)) # These queues are independent
local_datapipe = communication.iter.QueueWrapper(
communication.protocol.IterDataPipeQueueProtocolClient(req_queue, res_queue)
)
self._worker_datapipes.append(local_datapipe)
end_datapipe = communication.iter._IterateQueueDataPipes(self._worker_datapipes) # type: ignore[assignment]
self._worker_consumer_datapipe = end_datapipe
if self.main_prefetch_cnt > 0:
end_datapipe = self._worker_consumer_datapipe.prefetch(self.main_prefetch_cnt) # type: ignore[union-attr]
self._main_prefetch_datapipe = end_datapipe
# Attach non-replicable DataPipes
if replicable_dps[0] is not datapipe:
graph = replace_dp(graph, replicable_dps[0], end_datapipe)
end_datapipe = datapipe # type: ignore[assignment]
self._end_datapipe = end_datapipe
assert self._end_datapipe is not None
return self._end_datapipe # type: ignore[return-value]
def initialize_iteration(
self, seed_generator: SeedGenerator, iter_reset_fn: Optional[Callable[[DataPipe], DataPipe]] = None
) -> Optional[Callable[[DataPipe], DataPipe]]:
assert self._end_datapipe is not None
# Set random seeds for DataPipe that are in the main process (NOT those in worker processes)
# Worker seeds are set in `process_reset_fn`
set_graph_random_seed(self._end_datapipe, seed_generator)
if self._mp:
if self.main_prefetch_cnt > 0:
# Stop prefetching first
self._main_prefetch_datapipe.reset() # type: ignore[union-attr]
# Send the shared seed to subprocesses
assert self._worker_consumer_datapipe is not None
self._worker_consumer_datapipe.reset_epoch(seed_generator, iter_reset_fn)
# In-process (num_workers == 0)
else:
# Technically speaking, we should call `_process_reset_fn` to reset global RNGs
# for data-related operations. However, it would pollute the state of global RNGs
# (random, torch and numpy), if users have already seeded them in the main process
# TODO(ejguan): This should be fixed by adding a method to isolate global RNGs
pass
return None
def finalize(self) -> None:
r"""
``MultiProcessingReadingService`` invalidate states & properly exits all subprocesses.
"""
if self._finalized:
return
self._finalized = True
# TODO(618): Check if anyone stuck with messages
# Clean up worker processes
if self.num_workers > 0:
self._worker_consumer_datapipe.request_terminate() # type: ignore[union-attr]
for process, req_queue, _ in self._worker_processes:
try:
process.join(default_dl2_worker_join_timeout_in_s)
except TimeoutError:
pass
req_queue.close()
# Clean up dispatching process
if self._dispatch_process is not None:
try:
self._dispatch_process[0].join(default_dl2_worker_join_timeout_in_s)
except TimeoutError:
pass
for req_queue in self._dispatch_process[1]:
req_queue.close()
self._worker_processes = []
self._dispatch_process = None
def _pause(
self, pause_fn: Optional[Callable[[DataPipe], DataPipe]] = None
) -> Optional[Callable[[DataPipe], DataPipe]]:
r"""
Pauses DataPipes' activities such as prefetching within main/worker/dispatching processes,
in order to collect state. The provided ``pause_fn`` will be executed in
worker/dispatching processes.
"""
if self.num_workers == 0:
raise RuntimeError(
"If you would like to use `pause` with `MultiProcessingReadingService`, "
"please use more than 0 worker."
)
assert self._end_datapipe is not None
# Call pause for DataPipes in the main process (e.g. prefetch, fullsync)
dp_list = list_dps(traverse_dps(self._end_datapipe))
for dp in dp_list:
if hasattr(dp, "pause") and callable(dp.pause):
dp.pause()
self._worker_consumer_datapipe.request_pause(pause_fn) # type: ignore[union-attr]
return None
def _resume(
self, resume_fn: Optional[Callable[[DataPipe], DataPipe]] = None
) -> Optional[Callable[[DataPipe], DataPipe]]:
r"""
Resumes DataPipes' activities. This is required to be called after `_pause` before
the DataLoader can keep yielding elements.
"""
if self.num_workers > 0:
self._worker_consumer_datapipe.request_resume(resume_fn) # type: ignore[union-attr]
else:
raise RuntimeError(
"If you would like to use `resume` with `MultiProcessingReadingService`, "
"please use more than 0 worker."
)
assert self._end_datapipe is not None
# Call resume for DataPipes in the main process (e.g. prefetch, fullsync)
dp_list = list_dps(traverse_dps(self._end_datapipe))
for dp in dp_list[::-1]:
if hasattr(dp, "resume") and callable(dp.resume):
dp.resume()
return None
def _limit(
self, num_batches: Optional[int], limit_fn: Optional[Callable[[DataPipe, Optional[int]], DataPipe]] = None
) -> Optional[Callable[[DataPipe, Optional[int]], DataPipe]]:
r"""
Send limit_fn to worker/dispatching process to set the limit number to the specified DataPipes.
"""
if limit_fn is not None:
# Only propogate limit when dispatching process exists
num_batches = None if self._dispatch_process is None else num_batches
self._worker_consumer_datapipe.request_limit(num_batches, limit_fn) # type: ignore[union-attr]
# TODO: Remove when flexible checkpoint is supported
limit_fn(self._end_datapipe, num_batches) # type: ignore[arg-type]
return None
class DistributedReadingService(ReadingServiceInterface):
r"""
``DistributedReadingSerivce`` handles distributed sharding on the graph of ``DataPipe`` and
guarantee the randomness by sharing the same seed across the distributed processes.
Args:
timeout: Timeout for operations executed against the process group in seconds.
Default value equals 30 minutes.
"""
def __init__(self, timeout: int = default_timeout_in_s):
if not dist.is_available():
raise RuntimeError("Torch Distributed is required to be available")
self._world_size: int = 1
self._rank: int = 0
self._datapipe: Optional[DataPipe] = None
self._timeout: int = timeout
self._pg: Optional[dist.ProcessGroup] = None
def initialize(self, datapipe: DataPipe) -> DataPipe:
r"""
Launches the ``gloo``-backend distributed process group. Carries out distributed sharding
on the graph of ``DataPipe`` and returns the graph attached with a ``FullSyncIterDataPipe``
at the end.
"""
if not (dist.is_available() and dist.is_initialized()):
raise RuntimeError("Torch Distributed is required to be initialized")
self._world_size = dist.get_world_size()
self._rank = dist.get_rank()
self._pg = dist.new_group(backend="gloo", timeout=timedelta(seconds=self._timeout))
torch.utils.data.graph_settings.apply_sharding(
datapipe, self._world_size, self._rank, SHARDING_PRIORITIES.DISTRIBUTED
)
# Only append FullSyncIterDataPipe if it's not presented at the end of the pipeline
if not isinstance(datapipe, FullSync):
datapipe = datapipe.fullsync(self._timeout)
self._datapipe = datapipe
return datapipe
def initialize_iteration(
self, seed_generator: SeedGenerator, iter_reset_fn: Optional[Callable[[DataPipe], DataPipe]] = None
) -> Optional[Callable[[DataPipe], DataPipe]]:
r"""
Shares the same seed from rank 0 to other ranks across the distributed processes
and apply the random seed to the ``DataPipe`` graph.
"""
assert self._datapipe is not None
shared_seed = dist_share_seed(seed_generator.generate_shared_seed(), self._pg)
seed_generator.seed(shared_seed)
seed_generator = seed_generator.spawn(self._rank, inplace=True)
set_graph_random_seed(self._datapipe, seed_generator)
return None
def finalize(self) -> None:
r"""
Clean up the distributed process group.
"""
if self._pg is not None:
dist.destroy_process_group(self._pg)
self._pg = None
class SequentialReadingService(CheckpointableReadingServiceInterface):
def __init__(self, *reading_services):
self.reading_services = reading_services
# Sequential Order
def initialize(self, datapipe: DataPipe) -> DataPipe:
for rs in self.reading_services:
datapipe = rs.initialize(datapipe)
return datapipe
# Reversed Order
def finalize(self) -> None:
for rs in reversed(self.reading_services):
rs.finalize()
# Sequential Order
def initialize_iteration(
self, seed_generator: SeedGenerator, iter_reset_fn: Optional[Callable[[DataPipe], DataPipe]] = None
) -> Optional[Callable[[DataPipe], DataPipe]]:
chained_iter_reset_fn = iter_reset_fn
for rs in self.reading_services:
chained_iter_reset_fn = rs.initialize_iteration(
seed_generator=seed_generator, iter_reset_fn=chained_iter_reset_fn
)
return chained_iter_reset_fn
# Reversed Order
def finalize_iteration(self) -> None:
for rs in reversed(self.reading_services):
rs.finalize_iteration()
# Sequential Order
def checkpoint(self) -> bytes:
states = []
for rs in self.reading_services:
if hasattr(rs, "checkpoint") and callable(rs.checkpoint):
states.append(rs.checkpoint())
else:
warnings.warn(f"{rs} doesn't support `checkpoint`, skipping...")
states.append(b"")
return pickle.dumps(states)
# Sequential Order, to align with initialize
def restore(self, datapipe, serialized_state: bytes) -> DataPipe:
states = pickle.loads(serialized_state)
assert len(states) == len(self.reading_services)
for rs, state in zip(self.reading_services, states):
if hasattr(rs, "restore") and callable(rs.restore):
datapipe = rs.restore(datapipe, state)
else:
warnings.warn(f"{rs} doesn't support `restore` from state, initialize from scratch")
datapipe = rs.initialize(datapipe)
return datapipe
def _pause(
self, pause_fn: Optional[Callable[[DataPipe], DataPipe]] = None
) -> Optional[Callable[[DataPipe], DataPipe]]:
r"""
Pause the ``DataPipe`` graph defined in all ``ReadingServices``. For example of
``MultiProcessingReadingService`` would accept a ``pause_fn`` from a prior ``ReadingService``
to execute custom pause logic within worker/dispatching processes.
"""
for rs in self.reading_services:
if hasattr(rs, "_pause"):
pause_fn = rs._pause(pause_fn)
return pause_fn
def _resume(
self, resume_fn: Optional[Callable[[DataPipe], DataPipe]] = None
) -> Optional[Callable[[DataPipe], DataPipe]]:
r"""
Resume the ``DataPipe`` graph defined in all ``ReadingServices``. For example of
``MultiProcessingReadingService`` would accept a ``resume_fn`` from a prior ``ReadingService``
to execute custom resume logic within worker/dispatching processes.
"""
for rs in self.reading_services:
if hasattr(rs, "_resume"):
resume_fn = rs._resume(resume_fn)
return resume_fn
def _limit(
self, num_batches: Optional[int], limit_fn: Optional[Callable[[DataPipe, Optional[int]], DataPipe]] = None
) -> Optional[Callable[[DataPipe, Optional[int]], DataPipe]]:
r"""
Limit the ``DataPipe`` graph defined in all ``ReadingServices``. For example of
``MultiProcessingReadingService`` would accept a ``limit_fn`` from a prior ``ReadingService``
to set limit to ``DataPipes` within worker/dispatching processes.
"""
for rs in self.reading_services:
if hasattr(rs, "_limit"):
limit_fn = rs._limit(num_batches, limit_fn)
return limit_fn
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pickle
from torch.utils.data.datapipes.datapipe import (
_DataPipeSerializationWrapper,
_IterDataPipeSerializationWrapper,
_MapDataPipeSerializationWrapper,
)
from torchdata.dataloader2.graph import DataPipe
from torchdata.datapipes.iter import IterDataPipe
from torchdata.datapipes.map import MapDataPipe
try:
import dill
# XXX: By default, dill writes the Pickler dispatch table to inject its
# own logic there. This globally affects the behavior of the standard library
# pickler for any user who transitively depends on this module!
# Undo this extension to avoid altering the behavior of the pickler globally.
dill.extend(use_dill=False)
HAS_DILL = True
except ImportError:
HAS_DILL = False
__all__ = [
"attach_wrapper",
"clone",
"deserialize_datapipe",
"extract_wrapper",
"serialize_datapipe",
]
def serialize_datapipe(datapipe: DataPipe) -> bytes:
datapipe = attach_wrapper(datapipe)
try:
return pickle.dumps(datapipe)
except pickle.PickleError as e:
raise NotImplementedError(f"Prototype only support pickle-able datapipes for checkpoint: {e}")
def deserialize_datapipe(serialized_state: bytes) -> DataPipe:
try:
datapipe = pickle.loads(serialized_state)
except pickle.PickleError as e:
raise NotImplementedError(f"Prototype only support pickle-able datapipes for checkpoint: {e}")
return extract_wrapper(datapipe)
def attach_wrapper(datapipe: DataPipe) -> DataPipe:
r"""
Wraps the ``DataPipe`` with the corresponding serialization wrapper.
"""
wrapped_dp: DataPipe = datapipe
if not isinstance(datapipe, _DataPipeSerializationWrapper):
if isinstance(datapipe, IterDataPipe):
wrapped_dp = _IterDataPipeSerializationWrapper(datapipe)
elif isinstance(datapipe, MapDataPipe):
wrapped_dp = _MapDataPipeSerializationWrapper(datapipe)
return wrapped_dp
def extract_wrapper(datapipe: DataPipe) -> DataPipe:
r"""
Extracts the ``DataPipe`` from the serialization wrapper.
"""
if isinstance(datapipe, _DataPipeSerializationWrapper):
datapipe = datapipe._datapipe
return datapipe
def clone(obj):
r"""
Standardized way to copy an object when needed, such as for DataPipe/ReadingService.
This uses `pickle` to serialize/deserialize to create the copy.
"""
use_dill = False
try:
states = pickle.dumps(obj)
except Exception:
if HAS_DILL:
states = dill.dumps(obj)
use_dill = True
else:
raise
if use_dill:
return dill.loads(states)
else:
return pickle.loads(states)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from torch.utils.data.graph import DataPipe, DataPipeGraph, traverse_dps
from torchdata.dataloader2.graph.settings import set_datapipes_seed, set_graph_random_seed
from torchdata.dataloader2.graph.utils import find_dps, list_dps, remove_dp, replace_dp
__all__ = [
"DataPipe",
"DataPipeGraph",
"find_dps",
"list_dps",
"remove_dp",
"replace_dp",
"set_datapipes_seed",
"set_graph_random_seed",
"traverse_dps",
]
assert __all__ == sorted(__all__)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from collections import deque
from typing import Deque, Dict, List, Optional, Set, Type, Union
from torchdata.dataloader2.graph import DataPipe, DataPipeGraph, traverse_dps
from torchdata.datapipes.iter import IterDataPipe
from torchdata.datapipes.map import MapDataPipe
def find_dps(graph: DataPipeGraph, dp_type: Type[DataPipe]) -> List[DataPipe]:
r"""
Given the graph of DataPipe generated by ``traverse_dps`` function, return DataPipe
instances with the provided DataPipe type.
"""
dps: List[DataPipe] = []
cache: Set[int] = set()
def helper(g) -> None: # pyre-ignore
for dp_id, (dp, src_graph) in g.items():
if dp_id in cache:
continue
cache.add(dp_id)
if type(dp) is dp_type: # Please not use `isinstance`, there is a bug.
dps.append(dp)
helper(src_graph)
helper(graph)
return dps
def list_dps(graph: DataPipeGraph, exclude_dps: Optional[Union[DataPipe, List[DataPipe]]] = None) -> List[DataPipe]:
r"""
Given the graph of DataPipe generated by ``traverse_dps`` function, return a list
of all DataPipe instances without duplication. If ``exclude_dps`` is provided,
the provided ``DataPipes`` and their predecessors will be ignored.
Note:
- The returned list is in the order of breadth first search of the graph
"""
dps: List[DataPipe] = []
cache: Set[int] = set()
if exclude_dps is not None:
if isinstance(exclude_dps, (IterDataPipe, MapDataPipe)):
exclude_dps = [
exclude_dps,
]
for exclude_dp in exclude_dps: # type: ignore[union-attr]
assert isinstance(exclude_dp, (IterDataPipe, MapDataPipe))
# Skip DataPipe that has already been excluded
if id(exclude_dp) in cache:
continue
for dp in list_dps(traverse_dps(exclude_dp)): # type: ignore[arg-type]
cache.add(id(dp))
q: Deque = deque()
# Initialization
for dp_id, (dp, subgraph) in graph.items():
if dp_id not in cache:
q.append((dp_id, dp, subgraph))
cache.add(dp_id)
while len(q) > 0:
dp_id, dp, subgraph = q.popleft()
dps.append(dp)
for parent_dp_id, (parent_dp, parent_subgraph) in subgraph.items():
if parent_dp_id not in cache:
q.append((parent_dp_id, parent_dp, parent_subgraph))
cache.add(parent_dp_id)
return dps
# Given the DataPipe needs to be replaced and the expected DataPipe, return a new graph
def replace_dp(graph: DataPipeGraph, old_datapipe: DataPipe, new_datapipe: DataPipe) -> DataPipeGraph:
r"""
Given the graph of DataPipe generated by ``traverse_dps`` function and the DataPipe to be replaced and
the new DataPipe, return the new graph of DataPipe.
"""
assert len(graph) == 1
if id(old_datapipe) in graph:
graph = traverse_dps(new_datapipe)
final_datapipe = list(graph.values())[0][0]
for recv_dp, send_graph in graph.values():
_replace_dp(recv_dp, send_graph, old_datapipe, new_datapipe)
return traverse_dps(final_datapipe)
def remove_dp(graph: DataPipeGraph, datapipe: DataPipe) -> DataPipeGraph:
r"""
Given the graph of DataPipe generated by ``traverse_dps`` function and the DataPipe to be removed,
return the new graph of DataPipe.
Note:
- This function can not remove DataPipe that takes multiple DataPipes as the input.
"""
assert len(graph) == 1
dp_graph = traverse_dps(datapipe)
dp_id = id(datapipe)
if len(dp_graph[dp_id][1]) == 0:
raise RuntimeError("Cannot remove the source DataPipe from the graph of DataPipe")
if len(dp_graph[dp_id][1]) > 1:
raise RuntimeError("Cannot remove the receiving DataPipe having multiple sending DataPipes")
if dp_id in graph:
graph = graph[dp_id][1]
for recv_dp, send_graph in graph.values():
_remove_dp(recv_dp, send_graph, datapipe)
# Get the last DataPipe in graph
assert len(graph) == 1
datapipe = list(graph.values())[0][0]
return traverse_dps(datapipe)
def _find_replicable_branches(graph: DataPipeGraph) -> List[DataPipe]:
r"""
Given the graph of DataPipe generated by ``traverse_dps`` function, return DataPipe
instances of which all of prior DataPipes are replicable (``dp.is_replicable() == True``).
"""
assert len(graph) == 1, "DataPipeGraph should only contain a single output DataPipe"
dps: List[DataPipe] = []
dp_ids: Set[int] = set()
branch_is_replicable: Dict[int, bool] = {}
root_dp_id = list(graph.keys())[0]
root_dp, root_graph = graph[root_dp_id]
def _is_replicable(root_dp_id, root_dp, root_graph) -> bool: # pyre-ignore
if root_dp_id in branch_is_replicable:
return branch_is_replicable[root_dp_id]
# Temporarily set to True
branch_is_replicable[root_dp_id] = True
if hasattr(root_dp, "is_replicable") and not root_dp.is_replicable():
branch_is_replicable[root_dp_id] = False
for dp_id, (dp, src_graph) in root_graph.items():
if not _is_replicable(dp_id, dp, src_graph):
branch_is_replicable[root_dp_id] = False
# Do not break to go through all children
if not branch_is_replicable[root_dp_id]:
# All children should have been added to branch_is_replicable already
for dp_id, (dp, _) in root_graph.items():
if dp_id in dp_ids:
continue
if branch_is_replicable[dp_id]:
# Guarantee returning the frontmost replicable DataPipe
prior_dps = list_dps(traverse_dps(dp))
if all(id(p_dp) not in dp_ids for p_dp in prior_dps):
dps.append(dp)
dp_ids.add(dp_id)
return branch_is_replicable[root_dp_id]
if _is_replicable(root_dp_id, root_dp, root_graph):
if root_dp_id not in dp_ids:
# Guarantee returning the frontmost replicable DataPipe
prior_dps = list_dps(traverse_dps(root_dp))
if all(id(p_dp) not in dp_ids for p_dp in prior_dps):
dps.append(root_dp)
dp_ids.add(root_dp_id)
return dps
# For each `recv_dp`, find if the source_datapipe needs to be replaced by the new one.
# If found, find where the `old_dp` is located in `recv_dp` and switch it to the `new_dp`
def _replace_dp(recv_dp, send_graph: DataPipeGraph, old_dp: DataPipe, new_dp: DataPipe) -> None:
old_dp_id = id(old_dp)
for send_id in send_graph:
if send_id == old_dp_id:
_assign_attr(recv_dp, old_dp, new_dp, inner_dp=True)
else:
send_dp, sub_send_graph = send_graph[send_id]
_replace_dp(send_dp, sub_send_graph, old_dp, new_dp)
# For each `recv_dp`, find if the source_datapipe needs to be replaced by the new one.
# If found, find where the `old_dp` is located in `dp` and switch it to the `new_dp`
def _remove_dp(recv_dp, send_graph: DataPipeGraph, datapipe: DataPipe) -> None:
dp_id = id(datapipe)
for send_dp_id in send_graph:
if send_dp_id == dp_id:
send_dp, sub_send_graph = send_graph[send_dp_id]
# if len(sub_send_graph) == 0:
# raise RuntimeError("Cannot remove the source DataPipe from the graph of DataPipe")
# if len(sub_send_graph) > 1:
# raise RuntimeError("Cannot remove the receiving DataPipe having multiple sending DataPipes")
src_dp = list(sub_send_graph.values())[0][0]
_assign_attr(recv_dp, send_dp, src_dp, inner_dp=True)
else:
send_dp, sub_send_graph = send_graph[send_dp_id]
_remove_dp(send_dp, sub_send_graph, datapipe)
# Recursively re-assign datapipe for the sake of nested data structure
# `inner_dp` is used to prevent recursive call if we have already met a `DataPipe`
def _assign_attr(obj, old_dp, new_dp, inner_dp: bool = False):
if obj is old_dp:
return new_dp
elif isinstance(obj, (IterDataPipe, MapDataPipe)):
# Prevent recursive call for DataPipe
if not inner_dp:
return None
for k in list(obj.__dict__.keys()):
new_obj = _assign_attr(obj.__dict__[k], old_dp, new_dp)
if new_obj is not None:
obj.__dict__[k] = new_obj
break
return None
elif isinstance(obj, dict):
for k in list(obj.keys()):
new_obj = _assign_attr(obj[k], old_dp, new_dp)
if new_obj is not None:
obj[k] = new_obj
break
return None
# Tuple is immutable, has to re-create a tuple
elif isinstance(obj, tuple):
temp_list = []
flag = False
for o in obj:
new_obj = _assign_attr(o, old_dp, new_dp, inner_dp)
if new_obj is not None:
flag = True
temp_list.append(new_dp)
else:
temp_list.append(o)
if flag:
return tuple(temp_list) # Special case
else:
return None
elif isinstance(obj, list):
for i in range(len(obj)):
new_obj = _assign_attr(obj[i], old_dp, new_dp, inner_dp)
if new_obj is not None:
obj[i] = new_obj
break
return None
elif isinstance(obj, set):
new_obj = None
for o in obj:
if _assign_attr(o, old_dp, new_dp, inner_dp) is not None:
new_obj = new_dp
break
if new_obj is not None:
obj.remove(old_dp)
obj.add(new_dp)
return None
else:
return None
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import inspect
from typing import List
from torchdata.dataloader2.graph.utils import DataPipe, find_dps, list_dps, traverse_dps
from torchdata.dataloader2.random import SeedGenerator
from torchdata.datapipes.iter import ShardingFilter
def _is_random_datapipe(datapipe: DataPipe) -> bool:
if hasattr(datapipe, "set_seed") and inspect.ismethod(datapipe.set_seed):
return True
return False
def set_datapipes_seed(datapipes: List[DataPipe], seed_generator: SeedGenerator, distributed_shared: bool) -> None:
for dp in datapipes:
if _is_random_datapipe(dp):
if distributed_shared:
dp.set_seed(seed_generator.generate_shared_seed())
else:
dp.set_seed(seed_generator.generate_seed())
def set_graph_random_seed(datapipe: DataPipe, seed_generator: SeedGenerator) -> DataPipe:
r"""
Set seeds to the graph of ``DataPipes`` based on a Seed Generator. All random ``DataPipes`` prior to
``ShardingFilter`` will be set seeds by the same Seed Generator to preserve the same random state
across distributed/non-distributed workers. And, the random ``DataPipes`` after ``ShardingFilter``
will be set seeds by the worker-local Seed Generator deterministically created based on ``worker_id``.
Args:
datapipe:
seed_generator:
"""
graph = traverse_dps(datapipe)
sharding_filter_dps = find_dps(graph, ShardingFilter)
# Set the same seed before sharding_filter
# Using cache to exclude potential duplciate DataPipe
cache = set()
dps_before_sharding = []
for sf_dp in sharding_filter_dps:
dps = list_dps(traverse_dps(sf_dp))
for dp in dps:
if id(dp) not in cache:
cache.add(id(dp))
dps_before_sharding.append(dp)
set_datapipes_seed(dps_before_sharding, seed_generator, distributed_shared=True)
# Set different seeds after sharding_filter
dps_after_sharding = list_dps(graph, exclude_dps=sharding_filter_dps)
set_datapipes_seed(dps_after_sharding, seed_generator, distributed_shared=False)
return datapipe
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import random
from dataclasses import dataclass
from multiprocessing.queues import Queue
from typing import Callable, Optional
import torch
from torch.utils.data.datapipes.iter.sharding import SHARDING_PRIORITIES
from torchdata.dataloader2 import communication
from torchdata.dataloader2.graph import (
DataPipe,
find_dps,
list_dps,
replace_dp,
set_datapipes_seed,
set_graph_random_seed,
traverse_dps,
)
from torchdata.dataloader2.random import SeedGenerator
from torchdata.dataloader2.utils.dispatch import _DummyIterDataPipe, find_non_dispatching_branches
from torchdata.datapipes.iter import IterDataPipe
from torchdata.datapipes.map import MapDataPipe
try:
import numpy
HAS_NUMPY = True
except ModuleNotFoundError:
HAS_NUMPY = False
@dataclass(frozen=True)
class WorkerInfo:
r"""
Message class for keeping track of worker information.
Args:
num_workers (int): Total number of worker processes
worker_id (int): Worker ID for the current worker process
"""
num_workers: int
worker_id: int
def process_init_fn(
datapipe: DataPipe,
worker_info: WorkerInfo,
custom_init_fn: Optional[Callable[[DataPipe, WorkerInfo], DataPipe]] = None,
worker_prefetch_cnt: int = 0,
dispatching_req_queue: Optional[Queue] = None,
dispatching_res_queue: Optional[Queue] = None,
) -> DataPipe:
r"""
Based on the worker information, shard the ``DataPipe`` graph dynamically.
"""
# Find if there is non-replicable DataPipe
graph = traverse_dps(datapipe)
non_replicable_dp = find_dps(graph, _DummyIterDataPipe) # type: ignore
# There are two cases for DataPipe graph in terms of mp sharding:
# 1) All DataPipes are replicable, apply mp sharding to the whole graph
if len(non_replicable_dp) == 0:
torch.utils.data.graph_settings.apply_sharding(
datapipe, worker_info.num_workers, worker_info.worker_id, SHARDING_PRIORITIES.MULTIPROCESSING
)
assert dispatching_req_queue is None and dispatching_res_queue is None
# 2) There is non-replicable DataPipe. Since we have replaced the lowest common
# ancestor by a `_DummyIterDataPipe`, we would only apply mp sharding
# to replicable branches that don't have `_DummyIterDataPipe`.
else:
assert len(non_replicable_dp) == 1
assert not (dispatching_req_queue is None and dispatching_res_queue is None)
dispatching_req_queue.cancel_join_thread() # type: ignore[union-attr]
non_dispatching_branches = find_non_dispatching_branches(graph)
for dp in non_dispatching_branches:
torch.utils.data.graph_settings.apply_sharding(
dp, worker_info.num_workers, worker_info.worker_id, SHARDING_PRIORITIES.MULTIPROCESSING
)
queue_wrapper = communication.iter.QueueWrapper(
communication.protocol.IterDataPipeQueueProtocolClient(dispatching_req_queue, dispatching_res_queue)
)
dispatch_process_dp = communication.iter._IterateQueueDataPipes([queue_wrapper])
graph = replace_dp(graph, non_replicable_dp[0], dispatch_process_dp)
datapipe = list(graph.values())[0][0]
if custom_init_fn is not None:
datapipe = custom_init_fn(datapipe, worker_info)
assert isinstance(datapipe, (IterDataPipe, MapDataPipe))
if worker_prefetch_cnt > 0:
datapipe = datapipe.prefetch(worker_prefetch_cnt)
return datapipe
def _set_global_random_state(seed_generator: SeedGenerator, distributed_shared: bool = False) -> None:
py_seed = seed_generator.generate_shared_seed() if distributed_shared else seed_generator.generate_seed()
random.seed(py_seed)
torch_seed = seed_generator.generate_shared_seed() if distributed_shared else seed_generator.generate_seed()
torch.manual_seed(torch_seed)
if HAS_NUMPY:
# Convert uint64 to uint32 for Numpy
np_seed = seed_generator.generate_shared_seed() if distributed_shared else seed_generator.generate_seed()
np_seed = np_seed >> 32
numpy.random.seed(np_seed)
def process_reset_fn(
datapipe: DataPipe,
worker_info: WorkerInfo,
seed_generator: SeedGenerator,
distributed_shared_seed: bool = False,
iter_reset_fn: Optional[Callable[[DataPipe], DataPipe]] = None,
custom_reset_fn: Optional[Callable[[DataPipe, WorkerInfo, SeedGenerator], DataPipe]] = None,
) -> DataPipe:
r"""
Based on the distributed shared random seed and worker id, this function is used to
reset the random state of the ``DataPipe`` graph and the global random states for ``torch``,
``random`` and ``numpy``.
"""
# Set global random states
_set_global_random_state(seed_generator, distributed_shared=distributed_shared_seed)
if distributed_shared_seed:
graph = traverse_dps(datapipe)
dps = list_dps(graph)
set_datapipes_seed(dps, seed_generator=seed_generator, distributed_shared=distributed_shared_seed)
else:
set_graph_random_seed(datapipe, seed_generator)
if iter_reset_fn is not None:
datapipe = iter_reset_fn(datapipe)
assert isinstance(datapipe, (IterDataPipe, MapDataPipe))
if custom_reset_fn is not None:
datapipe = custom_reset_fn(datapipe, worker_info, seed_generator)
assert isinstance(datapipe, (IterDataPipe, MapDataPipe))
return datapipe
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# from multiprocessing.queues import Queue
from typing import Dict, List, Optional, Set
from torchdata.dataloader2.graph import DataPipe, DataPipeGraph, list_dps, traverse_dps
from torchdata.datapipes.iter import IterDataPipe, ShardingRoundRobinDispatcher
__all__ = ["_DummyIterDataPipe", "find_lca_round_robin_sharding_dp", "find_non_dispatching_branches"]
class _DummyIterDataPipe(IterDataPipe):
r"""
This DataPipe is a placeholder to be replaced by the ``QueueWrapper``
that connects the worker process for non-replicable DataPipe.
"""
# TODO: Revert `_DummyIterDataPipe` as the placeholder when `_SerializationWrapper`
# can handle mp.Queue. See: https://github.com/pytorch/data/issues/934
# req_queue: Queue
# res_queue: Queue
def find_lca_round_robin_sharding_dp(graph: DataPipeGraph) -> Optional[DataPipe]:
r"""
Given the graph of DataPipe generated by ``traverse_dps`` function, return the
DataPipe instance that is the lowest common ancestor of all ``sharding_round_robin_dispatch`` DataPipes
Note:
- If multiple branches share the same source DataPipe and any branch contains a
non-replicable DataPipe, the lowest common ancestor of all branches is returned.
- If there is any non-replicable DataPipe in a circular-referenced (sub)graph, the
whole (sub)graph is treated as non-replicable and the last DataPipe is returned.
"""
assert len(graph) == 1, "DataPipeGraph should only contain a single output DataPipe"
def _is_round_robin_sharding(dp: DataPipe) -> bool:
return type(dp) == ShardingRoundRobinDispatcher
dps = list_dps(graph)
non_replicable_dps: Set[int] = set()
for dp in dps:
# Skip when it has been visited
if id(dp) in non_replicable_dps:
continue
if _is_round_robin_sharding(dp):
parent_dps = list_dps(traverse_dps(dp))
for par_dp in parent_dps:
non_replicable_dps.add(id(par_dp))
root_dp_id = list(graph.keys())[0]
root_dp, root_graph = graph[root_dp_id]
lca_for_subgraph: Dict[int, Optional[DataPipe]] = {}
def _get_lca_from_graph(root_dp_id, root_dp, root_graph) -> Optional[DataPipe]: # pyre-ignore
if root_dp_id in lca_for_subgraph:
return lca_for_subgraph[root_dp_id]
if root_dp_id in non_replicable_dps:
lca_for_subgraph[root_dp_id] = root_dp
return root_dp
lca_for_subgraph[root_dp_id] = None
non_replicable_parents = []
for dp_id, (dp, src_graph) in root_graph.items():
res = _get_lca_from_graph(dp_id, dp, src_graph)
if res is not None:
non_replicable_parents.append(res)
# `root_dp` becomes the lowest common ancestor of this branch,
# if there are more than one unique non-replicable DataPipe prior to it.
if len(non_replicable_parents) > 0:
# One unique non-replicable DataPipe
if len(non_replicable_parents) == 1 or all(
dp == non_replicable_parents[0] for dp in non_replicable_parents
):
lca_for_subgraph[root_dp_id] = non_replicable_parents[0]
# Multiple non-replicable DataPipes
else:
lca_for_subgraph[root_dp_id] = root_dp
return lca_for_subgraph[root_dp_id]
return _get_lca_from_graph(root_dp_id, root_dp, root_graph)
def find_non_dispatching_branches(graph: DataPipeGraph) -> List[DataPipe]:
r"""
Given the graph of DataPipe generated by ``traverse_dps`` function, return the DataPipe
instances that don't have ``_DummyIterDataPipe`` (dipatching process) in the prior graph.
"""
assert len(graph) == 1, "DataPipeGraph should only contain a single output DataPipe"
dps: List[DataPipe] = []
non_dispatching_branches: Dict[int, bool] = {}
root_dp_id = list(graph.keys())[0]
root_dp, root_graph = graph[root_dp_id]
def _is_non_dispatching(root_dp_id, root_dp, root_graph) -> bool: # pyre-ignore
if root_dp_id in non_dispatching_branches:
return non_dispatching_branches[root_dp_id]
if type(root_dp) == _DummyIterDataPipe:
non_dispatching_branches[root_dp_id] = False
return False
non_dispatching_branches[root_dp_id] = True
for dp_id, (dp, src_graph) in root_graph.items():
if not _is_non_dispatching(dp_id, dp, src_graph):
non_dispatching_branches[root_dp_id] = False
# Do not break to go through all children
if not non_dispatching_branches[root_dp_id]:
# All children should have been added to non_dispatching_branches already
for dp_id, (dp, _) in root_graph.items():
if non_dispatching_branches[dp_id]:
dps.append(dp)
return non_dispatching_branches[root_dp_id]
if _is_non_dispatching(root_dp_id, root_dp, root_graph):
dps.append(root_dp)
return dps
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from torchdata.dataloader2.utils.worker import process_init_fn, process_reset_fn, WorkerInfo
__all__ = [
"WorkerInfo",
"process_init_fn",
"process_reset_fn",
]
assert __all__ == sorted(__all__)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import threading
import time
class LocalQueue:
ops = 0
stored = 0
uid = 0
empty = 0
def __init__(self, name="unnamed"):
self.items = []
self.name = name
self.uid = LocalQueue.uid
LocalQueue.uid += 1
def put(self, item, block=True):
LocalQueue.ops += 1
LocalQueue.stored += 1
self.items.append(item)
def get(self, block=True, timeout=0):
# TODO(622): Add support of block and timeout arguments
LocalQueue.ops += 1
if not len(self.items):
LocalQueue.empty += 1
raise Exception("LocalQueue is empty")
LocalQueue.stored -= 1
return self.items.pop()
class ThreadingQueue:
def __init__(self, name="unnamed"):
self.lock = threading.Lock()
self.items = []
self.name = name
def put(self, item, block=True):
with self.lock:
self.items.append(item)
def get(self, block=True, timeout=0):
# TODO(623): Add support of block and timeout arguments
while True:
with self.lock:
if len(self.items) > 0:
return self.items.pop()
if not block:
raise Exception("Not available")
# TODO(624): Figure out what to do if nothing in the queue
time.sleep(0.000001)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import time
import types
import warnings
from collections import deque
from itertools import cycle
from typing import Callable, Deque, List, Optional
from torch.utils.data import IterDataPipe
from torchdata._utils import ExceptionWrapper
from torchdata.dataloader2 import communication
from torchdata.dataloader2.graph import DataPipe, find_dps, list_dps, traverse_dps
from torchdata.dataloader2.random import SeedGenerator
from torchdata.dataloader2.utils import process_reset_fn
DEFAULT_NON_BLOCKING_SLEEP = 0.001
__all__ = [
"DataPipeBehindQueues",
"EnsureNonBlockingDataPipe",
"InvalidStateResetRequired",
"NonBlocking",
"NotAvailable",
"QueueWrapper",
"default_not_available_hook",
]
def default_not_available_hook():
time.sleep(DEFAULT_NON_BLOCKING_SLEEP)
class NotAvailable(Exception):
pass
class InvalidStateResetRequired(Exception):
"""
Returned by DataPipe when it is expecting to get reset request,
for example RouterDataPipe expecting all workers to request reset.
"""
pass
class TerminateRequired(Exception):
"""
Returned by DataPipe when it is expecting to get terminate request,
for example it got terminate request from other source and at the process
of stopping.
"""
pass
class NonBlocking(IterDataPipe):
not_available_hook = default_not_available_hook
def __iter__(self):
self.reset_iterator()
return self
def __next__(self):
while True:
try:
return self.nonblocking_next()
except NotAvailable:
if NonBlocking.not_available_hook is not None:
NonBlocking.not_available_hook()
def nonblocking_next(self):
raise NotImplementedError("nonblocking_next is not implemented for %s" % self.__class__)
def reset_iterator(self):
raise NotImplementedError("reset_iterator is not implemented for %s" % self.__class__)
@staticmethod
def register_not_available_hook(hook_function):
NonBlocking.not_available_hook = hook_function
def EnsureNonBlockingDataPipe(validated_datapipe):
if not isinstance(validated_datapipe, IterDataPipe):
raise Exception("Not Iterable DataPipe " + str(validated_datapipe.__class__))
if isinstance(validated_datapipe, NonBlocking):
return validated_datapipe
if not hasattr(validated_datapipe, "_as_iterator"):
validated_datapipe._as_iterator = None # type: ignore[attr-defined]
if not hasattr(validated_datapipe, "nonblocking_next"):
def nonblocking_next(self):
if self._as_iterator is None:
self._as_iterator = iter(self)
return next(self._as_iterator)
validated_datapipe.nonblocking_next = types.MethodType( # type: ignore[attr-defined]
nonblocking_next, validated_datapipe
)
if not hasattr(validated_datapipe, "reset_iterator"):
def reset_iterator(self):
self._as_iterator = None
validated_datapipe.reset_iterator = types.MethodType( # type: ignore[attr-defined]
reset_iterator, validated_datapipe
)
return validated_datapipe
def _sync_recv(request_counter, msg):
if request_counter is not None:
request_counter.increment(msg)
# Make sure all loops have reached
while not request_counter.is_reached(msg):
yield True
def _sync_resp(request_counter, msg):
if request_counter is not None:
request_counter.reset(msg)
while request_counter.is_reached(msg):
yield True
def DataPipeBehindQueues(
source_datapipe,
protocol,
process_name,
loop_id,
worker_info,
custom_reset_fn,
blocking_request_get=False,
request_counter=None,
):
"""
Indefinitely iterates over ``req_queue`` and passing values from source_datapipe to ``res_queue``.
Request Types:
`ResetEpoch` - Call the `reset_epoch_fn` on the protocol's DataPipe and reset DataPipe iterator
`Terminate` - exits the infinite while loop
`GetNext` - returns the value from the DataPipe, and handles exceptions such as `StopIteration` as appropriate
`Limit` - Set limit to the DataPipe graph
`Pause` - Pause
the DataPipe graph
`Resume` - Resume the DataPipe graph
Args:
source_datapipe: DataPipe
protocol: ``IterDataPipeQueueProtocolServer`` that contains ``req_queue`` and ``res_queue``
process_name: Process name
loop_id: Loop ID
worker_info: Worker info include worker id and number of workers
custom_reset_fn: function to call after each request is received
blocking_request_get: determines if ``protocol.get_new_request`` will block
request_counter: Optional counter to synchronize all loops that have received requests for
reset/limit/pause/resume within the dispatching process. It would guarantee that
all loops starts to reset iterator and get next element at the same time.
"""
if not isinstance(protocol, communication.protocol.IterDataPipeQueueProtocolServer):
raise Exception("Expecting IterDataPipeQueueProtocolServer, got", protocol)
source_datapipe = EnsureNonBlockingDataPipe(source_datapipe)
forever = True
while forever:
try:
# TODO: Non-blocking call is extremely slow here for python.mp, need to figure out a good workaround
request = protocol.get_new_request(block=blocking_request_get)
except communication.protocol.EmptyQueue:
yield True
continue
# TODO: Handle Error caused by requests other than GetNext and send it to main process
if isinstance(request, communication.messages.ResetEpochRequest):
yield from _sync_recv(request_counter, "reset_epoch")
distributed_shared_seed = request_counter is not None
if request_counter is None or loop_id == 0:
seed_generator = request.seed_generator
iter_reset_fn = request.iter_reset_fn
dispatching_dps = find_dps(traverse_dps(source_datapipe), _IterateQueueDataPipes)
for dp in dispatching_dps:
dp.reset_epoch(seed_generator, iter_reset_fn)
source_datapipe = process_reset_fn(
source_datapipe,
worker_info,
seed_generator,
distributed_shared_seed,
iter_reset_fn,
custom_reset_fn,
)
source_datapipe.reset_iterator()
yield from _sync_resp(request_counter, "reset_epoch")
protocol.response_reset_epoch()
yield True # Returns control
elif isinstance(request, communication.messages.LimitRequest):
yield from _sync_recv(request_counter, "limit")
if request_counter is None or loop_id == 0:
num_batches = request.num_batches
limit_fn = request.limit_fn
worker_num_batches = num_batches if request.worker_num_batches is None else request.worker_num_batches
# Send limit to the worker/dispatching process
dispatching_dps = find_dps(traverse_dps(source_datapipe), _IterateQueueDataPipes)
for dp in dispatching_dps:
dp.request_limit(num_batches, limit_fn, worker_num_batches)
if limit_fn is not None:
# Set limit to the DataPipe graph in worker/dispatching process
source_datapipe = limit_fn(source_datapipe, worker_num_batches)
yield from _sync_resp(request_counter, "limit")
protocol.response_limit()
yield True # Returns control
elif isinstance(request, communication.messages.PauseRequest):
yield from _sync_recv(request_counter, "pause")
if request_counter is None or loop_id == 0:
graph = traverse_dps(source_datapipe)
dp_list = list_dps(graph)
for dp in dp_list:
if hasattr(dp, "pause") and callable(dp.pause):
dp.pause()
dispatching_dps = find_dps(graph, _IterateQueueDataPipes)
for dp in dispatching_dps:
dp.request_pause(request.pause_fn)
if request.pause_fn is not None:
source_datapipe = request.pause_fn(source_datapipe)
yield from _sync_resp(request_counter, "pause")
protocol.response_pause()
yield True # Returns control
elif isinstance(request, communication.messages.ResumeRequest):
yield from _sync_recv(request_counter, "resume")
if request_counter is None or loop_id == 0:
if request.resume_fn is not None:
source_datapipe = request.resume_fn(source_datapipe)
graph = traverse_dps(source_datapipe)
# Send resume to the dispatching process
dispatching_dps = find_dps(graph, _IterateQueueDataPipes)
for dp in dispatching_dps:
dp.request_resume(request.resume_fn)
for dp in reversed(list_dps(graph)):
if hasattr(dp, "resume") and callable(dp.resume):
dp.resume()
yield from _sync_resp(request_counter, "resume")
protocol.response_resume()
yield True # Returns control
elif isinstance(request, communication.messages.TerminateRequest):
forever = False
dispatch_dps = find_dps(traverse_dps(source_datapipe), _IterateQueueDataPipes)
for dispatch_dp in dispatch_dps:
dispatch_dp.request_terminate()
protocol.response_terminate()
yield True # Returns control
elif isinstance(request, communication.messages.GetNextRequest):
while forever:
if protocol.is_paused():
protocol.response_stop_iteration()
warnings.warn(
"Cannot `GetNext` after `Pause` has been called. "
"`Resume` must be called first before additional elements can be yielded."
)
yield True
break
try:
value = source_datapipe.nonblocking_next()
except NotAvailable:
yield True
continue
except StopIteration:
protocol.response_stop_iteration()
yield True
break
except InvalidStateResetRequired:
protocol.response_invalid_state()
yield True
break
except Exception:
exc = ExceptionWrapper(where=f"in {process_name} {loop_id}")
protocol.response_worker_exception(exc)
return
protocol.response_next(value)
yield True # Returns control
break
else:
raise Exception("Unrecognized type of request received", request)
class QueueWrapper(NonBlocking):
"""
Creates an IterDataPipe which sends requests and reads the response from the DataLoader.Queue.
The input is a ProtocolClient that contains request queue and response queue.
"""
def __init__(self, protocol, response_wait_time=0.00001):
if not isinstance(protocol, communication.protocol.IterDataPipeQueueProtocolClient):
raise Exception("Got", protocol)
self.protocol = protocol
self.counter = 0
self._stop_iteration = False
self._response_wait_time = response_wait_time
def request_reset_epoch(self, seed_generator, iter_reset_fn):
self._stop_iteration = False
self.counter = 0
self.protocol.request_reset_epoch(seed_generator, iter_reset_fn)
def _get_response(self, fn_name) -> None:
assert hasattr(self.protocol, fn_name) and callable(getattr(self.protocol, fn_name))
get_response_fn = getattr(self.protocol, fn_name)
while True:
try:
get_response_fn()
break
except communication.protocol.EmptyQueue:
if NonBlocking.not_available_hook is not None:
NonBlocking.not_available_hook()
def get_reset_epoch_response(self) -> None:
self._get_response("get_response_reset_epoch")
def request_limit(
self,
num_batches: Optional[int],
limit_fn: Optional[Callable[[DataPipe, Optional[int]], DataPipe]] = None,
worker_num_batches: Optional[int] = None,
) -> None:
self.protocol.request_limit(num_batches, limit_fn, worker_num_batches)
def get_limit_response(self) -> None:
self._get_response("get_response_limit")
def request_pause(self, pause_fn: Optional[Callable[[DataPipe], DataPipe]] = None) -> None:
self.protocol.request_pause(pause_fn)
def get_pause_response(self) -> None:
self._get_response("get_response_pause")
def request_resume(self, resume_fn: Optional[Callable[[DataPipe], DataPipe]] = None) -> None:
self.protocol.request_resume(resume_fn)
def get_resume_response(self) -> None:
self._get_response("get_response_resume")
def nonblocking_next(self):
if self._stop_iteration:
raise Exception("`next` or `nonblocking_next` called after receiving StopIteration")
if self.protocol.can_take_request():
self.protocol.request_next()
try:
response = self.protocol.get_response_next(block=True, timeout=self._response_wait_time)
except communication.protocol.EmptyQueue:
raise NotAvailable
if isinstance(response, communication.messages.StopIterationResponse):
self._stop_iteration = True
raise StopIteration
if isinstance(response, communication.messages.InvalidStateResponse):
raise NotAvailable
return response.value
class _IterateQueueDataPipes(IterDataPipe):
r"""
Takes in ``QueueWrapper``s and iterates through them in a round-robin manner to get batches one-by-one.
Typically, each worker has one ``QueueWrapper``.
"""
def __init__(self, datapipes):
# TODO(VitalyFedyunin): Consider combining _IterateQueueDataPipes and QueueWrapper
# into one class, which supports any number of queues.
for dp in datapipes:
if not isinstance(dp, communication.iter.QueueWrapper):
raise Exception("Source datapipes should be an instance of iter.QueueWrapper")
self.datapipes = datapipes
self._num_processes = len(datapipes)
self.res_buffers: List[Deque] = [deque() for _ in range(len(datapipes))]
self._terminated: bool = False
self._limit: Optional[int] = None
self._request_cnt: int = 0
def __iter__(self):
disabled_pipe = [False] * len(self.datapipes)
cnt_disabled_pipes = 0
total_req_cnt = 0
req_idx_cycle = cycle(range(self._num_processes))
req_idx = next(req_idx_cycle)
total_res_cnt = 0
res_idx_cycle = cycle(range(self._num_processes))
res_idx = next(res_idx_cycle)
while cnt_disabled_pipes < self._num_processes and not self._terminated:
# Send a round of requests until limit is reached (limit is smaller than total pipes)
for _ in range(self._num_processes):
if not disabled_pipe[req_idx]:
self.datapipes[req_idx].protocol.request_next()
self._request_cnt += 1
total_req_cnt += 1
req_idx = next(req_idx_cycle)
if self._limit is not None and self._request_cnt == self._limit:
break
# Receive responses from each of the workers with pending requests
while total_res_cnt < total_req_cnt and cnt_disabled_pipes < self._num_processes:
disabled = disabled_pipe[res_idx]
if not disabled:
if len(self.res_buffers[res_idx]):
response = self.res_buffers[res_idx].popleft()
else:
while not self._terminated:
try:
# Using non-blocking next to make sure termination reached
response = self.datapipes[res_idx].protocol.get_response_next(block=False)
break
except communication.protocol.EmptyQueue:
time.sleep(DEFAULT_NON_BLOCKING_SLEEP)
if isinstance(response, communication.messages.InvalidStateResponse):
raise communication.iter.InvalidStateResetRequired
if isinstance(response, communication.messages.TerminateResponse):
raise communication.iter.TerminateRequired
if isinstance(response, communication.messages.WorkerExceptionResponse):
response.exc.reraise()
if self._terminated:
break
if isinstance(response, communication.messages.StopIterationResponse):
disabled_pipe[res_idx] = True
cnt_disabled_pipes += 1
disabled = True
req_idx = next(req_idx_cycle)
else:
# Only request if buffer is empty and has not reached the limit
if len(self.res_buffers[res_idx]) == 0 and (
self._limit is None or self._request_cnt < self._limit
):
self.datapipes[req_idx].protocol.request_next()
req_idx = next(req_idx_cycle)
self._request_cnt += 1
total_req_cnt += 1
total_res_cnt += 1
res_idx = next(res_idx_cycle)
if not disabled:
yield response.value
def reset_epoch(
self,
seed_generator: SeedGenerator,
iter_reset_fn: Optional[Callable[[DataPipe], DataPipe]],
):
self._request_cnt = 0
for dp in self.datapipes:
dp.protocol.discard_existing_request()
for worker_id, dp in enumerate(self.datapipes):
worker_seed_generator = seed_generator.spawn(worker_id)
dp.request_reset_epoch(worker_seed_generator, iter_reset_fn)
for dp in self.datapipes:
dp.get_reset_epoch_response()
def request_pause(self, pause_fn: Optional[Callable[[DataPipe], DataPipe]] = None) -> None:
# Store results of pending requests
for idx, dp in enumerate(self.datapipes):
if dp.protocol.waiting_for_response():
res = dp.protocol.get_response_next(block=True)
self.res_buffers[idx].append(res)
for dp in self.datapipes:
dp.request_pause(pause_fn)
for dp in self.datapipes:
dp.get_pause_response()
def request_resume(self, resume_fn: Optional[Callable[[DataPipe], DataPipe]] = None) -> None:
for dp in self.datapipes:
dp.request_resume(resume_fn)
for dp in self.datapipes:
dp.get_resume_response()
self._request_cnt = 0
def request_limit(
self,
num_batches: Optional[int],
limit_fn: Optional[Callable[[DataPipe, Optional[int]], DataPipe]] = None,
worker_num_batches: Optional[int] = None,
) -> None:
self._limit = num_batches if worker_num_batches is None else worker_num_batches
avg_num_batches = num_batches if num_batches is None else num_batches // self._num_processes
batch_remainder = 0 if num_batches is None else num_batches % self._num_processes
for idx, dp in enumerate(self.datapipes):
ext_batch = 1 if batch_remainder > idx else 0
wnb = None if avg_num_batches is None or worker_num_batches is not None else avg_num_batches + ext_batch
dp.request_limit(num_batches, limit_fn, wnb)
for dp in self.datapipes:
dp.get_limit_response()
def request_terminate(self):
self._terminated = True
for dp in self.datapipes:
dp.protocol.discard_existing_request()
for dp in self.datapipes:
dp.protocol.request_terminate()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from queue import Empty as EmptyException
from torchdata.dataloader2 import communication
class Protocol:
__slots__ = ("request_queue", "response_queue")
def __init__(self, request_queue, response_queue):
self.request_queue = request_queue
self.response_queue = response_queue
class ProtocolClient(Protocol):
"""
ProtocolClient takes charge of putting requests into req_queue and returning results from res_queue.
"""
_req_sent = None
def __init__(self, request_queue, response_queue):
self.request_queue = request_queue
self.response_queue = response_queue
self._req_sent = None
def can_take_request(self):
return self._req_sent is None
def waiting_for_response(self):
return self._req_sent is not None
def request_sent(self, request=True):
if not self.can_take_request():
raise Exception("Protocol only supports one request in the Queue")
self._req_sent = request
def request_served(self, result=None):
if not self.waiting_for_response():
raise Exception("Expected no pending requests, but something got served", result)
self._req_sent = None
def discard_existing_request(self):
if self.waiting_for_response():
response = self.response_queue.get(block=True)
self.request_served(response)
def request_limit(self, num_batches, limit_fn=None, worker_num_batches=None):
if not self.can_take_request():
raise Exception("Can not `limit` while we are still waiting response for previous request")
request = communication.messages.LimitRequest(num_batches, limit_fn, worker_num_batches)
self.request_queue.put(request)
self.request_sent(request)
def request_pause(self, pause_fn=None):
if not self.can_take_request():
raise Exception("Can not `pause` while we are still waiting response for previous request")
request = communication.messages.PauseRequest(pause_fn)
self.request_queue.put(request)
self.request_sent(request)
def request_resume(self, resume_fn=None):
if not self.can_take_request():
raise Exception("Can not `resume` while we are still waiting response for previous request")
request = communication.messages.ResumeRequest(resume_fn)
self.request_queue.put(request)
self.request_sent(request)
def request_terminate(self):
r"""
Drop the existing request and send TerminateRequest directly
"""
if not self.can_take_request():
self._req_sent = None
request = communication.messages.TerminateRequest()
self.request_queue.put(request)
self.request_sent(request)
class ProtocolServer(Protocol):
"""
ProtocolServer takes charge of getting requests from req_queue and fetching data from source datapipe.
"""
# TODO(966): Update the exceptions raised in this class to be more specific
_req_received = None
_paused = False # When `True`, prevents `GetNext` in `DataPipeBehindQueues`.
def __init__(self, request_queue, response_queue):
self.request_queue = request_queue
self.response_queue = response_queue
self._req_received = None
self._paused = False
def is_paused(self):
return self._paused
def have_pending_request(self):
return self._req_received is not None
def get_new_request(self, block=False):
if self.have_pending_request():
raise Exception("Trying to get next request, while having one un-served")
try:
response = self.request_queue.get(block=block)
except EmptyException:
raise EmptyQueue("queue is empty")
self._req_received = response
return response
# TODO(626): Validate supported requests
def response_terminate(self):
if not self.have_pending_request():
raise Exception("Attempting to reply with pending request")
if not isinstance(self._req_received, communication.messages.TerminateRequest):
raise Exception("Replaying with `terminate` status to other type of message")
self.response_queue.put(communication.messages.TerminateResponse())
self._req_received = None
def response_reset_epoch(self):
if not self.have_pending_request():
raise Exception("Attempting to reply with pending request")
if not isinstance(self._req_received, communication.messages.ResetEpochRequest):
raise Exception("Replaying with `reset_epoch` status to other type of message")
self.response_queue.put(communication.messages.ResetEpochResponse())
self._req_received = None
def response_limit(self):
if not self.have_pending_request():
raise Exception("Attempting to reply with pending request")
if not isinstance(self._req_received, communication.messages.LimitRequest):
raise Exception("Replaying with `limit` status to other type of message")
self.response_queue.put(communication.messages.LimitResponse())
self._req_received = None
def response_pause(self):
if not self.have_pending_request():
raise Exception("Attempting to reply with pending request")
if not isinstance(self._req_received, communication.messages.PauseRequest):
raise Exception("Replaying with `pause` status to other type of message")
self._paused = True
self.response_queue.put(communication.messages.PauseResponse())
self._req_received = None
def response_resume(self):
if not self.have_pending_request():
raise Exception("Attempting to reply with pending request")
if not isinstance(self._req_received, communication.messages.ResumeRequest):
raise Exception("Replaying with `resume` status to other type of message")
self._paused = False
self.response_queue.put(communication.messages.ResumeResponse())
self._req_received = None
def response_worker_exception(self, exception):
if not self.have_pending_request():
raise Exception("Attempting to reply with pending request")
self.response_queue.put(communication.messages.WorkerExceptionResponse(exception))
self._req_received = None
class MapDataPipeQueueProtocolServer(ProtocolServer):
def response_item(self, key, value):
if not self.have_pending_request():
raise Exception("Attempting to reply with pending request")
self.response_queue.put(communication.messages.GetItemResponse(key, value))
self._req_received = None
def response_len(self, size):
if not self.have_pending_request():
raise Exception("Attempting to reply with pending request")
self.response_queue.put(communication.messages.LenResponse(size))
self._req_received = None
def response_index_out_of_bound(self):
if not self.have_pending_request():
raise Exception("Attempting to reply with pending request")
self.response_queue.put(communication.messages.StopIterationResponse())
self._req_received = None
class MapDataPipeQueueProtocolClient(ProtocolClient):
def request_len(self):
if not self.can_take_request():
raise Exception("Can not request len while we are still waiting response for previous request")
request = communication.messages.LenRequest()
self.request_queue.put(request)
self.request_sent(request)
def request_reset_epoch(self, seed_generator, iter_reset_fn):
if not self.can_take_request():
raise Exception("Can not reset while we are still waiting response for previous request")
request = communication.messages.ResetEpochRequest(seed_generator, iter_reset_fn)
self.request_queue.put(request)
self.request_sent(request)
def request_item(self, index):
if not self.can_take_request():
raise Exception("Can not request item while we are still waiting response for previous request")
request = communication.messages.GetItemRequest(index)
self.request_queue.put(request)
self.request_sent(request)
def get_response_len(self, block=False, timeout=None):
if not self.waiting_for_response():
raise Exception("Can not expect any response without submitted request")
try:
response = self.response_queue.get(block=block, timeout=timeout)
except TimeoutError:
raise EmptyQueue("queue is empty")
self.request_served(response)
if not isinstance(response, communication.messages.LenResponse):
raise Exception("Invalid response received")
return response
def get_response_item(self, block=False, timeout=None):
if not self.waiting_for_response():
raise Exception("Can not expect any response without submitted request")
try:
response = self.response_queue.get(block=block, timeout=timeout)
except TimeoutError:
raise EmptyQueue("queue is empty")
self.request_served(response)
# if not isinstance(response, communication.messages.GetItemResponse):
# raise Exception('Invalid response received')
return response
class EmptyQueue(Exception):
pass
class IterDataPipeQueueProtocolServer(ProtocolServer):
def response_next(self, value):
if not self.have_pending_request():
raise Exception("Attempting to reply with pending request")
self.response_queue.put(communication.messages.GetNextResponse(value))
self._req_received = None
def response_stop_iteration(self):
if not self.have_pending_request():
raise Exception("Attempting to reply with pending request")
self.response_queue.put(communication.messages.StopIterationResponse())
self._req_received = None
def response_invalid_state(self):
if not self.have_pending_request():
raise Exception("Attempting to reply with pending request")
self.response_queue.put(communication.messages.InvalidStateResponse())
self._req_received = None
class IterDataPipeQueueProtocolClient(ProtocolClient):
def request_reset_epoch(self, seed_generator, iter_reset_fn):
if not self.can_take_request():
raise Exception("Can not reset while we are still waiting response for previous request")
request = communication.messages.ResetEpochRequest(seed_generator, iter_reset_fn)
self.request_queue.put(request)
self.request_sent(request)
def request_next(self):
if not self.can_take_request():
raise Exception("Can not request next item while we are still waiting response for previous request")
request = communication.messages.GetNextRequest()
self.request_queue.put(request)
self.request_sent(request)
def get_response_reset_epoch(self, block=False):
try:
response = self.response_queue.get(block=block)
except EmptyException:
raise EmptyQueue("queue is empty")
self.request_served(response)
if not isinstance(response, communication.messages.ResetEpochResponse):
raise Exception("Invalid response received")
def get_response_limit(self, block=False):
try:
response = self.response_queue.get(block=block)
except EmptyException:
raise EmptyQueue("queue is empty")
self.request_served(response)
if not isinstance(response, communication.messages.LimitResponse):
raise Exception("Invalid response received when expecting `LimitResponse`")
def get_response_pause(self, block=False):
try:
response = self.response_queue.get(block=block)
except EmptyException:
raise EmptyQueue("queue is empty")
self.request_served(response)
if not isinstance(response, communication.messages.PauseResponse):
raise Exception("Invalid response received when expecting `PauseResponse`")
def get_response_resume(self, block=False):
try:
response = self.response_queue.get(block=block)
except EmptyException:
raise EmptyQueue("queue is empty")
self.request_served(response)
if not isinstance(response, communication.messages.ResumeResponse):
raise Exception("Invalid response received when expecting `ResumeResponse`")
def get_response_next(self, block=False, timeout=None):
if not self.waiting_for_response():
raise Exception("Can not expect any response without submitted request")
try:
response = self.response_queue.get(block=block, timeout=timeout)
except EmptyException:
raise EmptyQueue("queue is empty")
self.request_served(response)
# TODO(629): Add possible response types validation here
return response
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from . import eventloop, iter, map, messages, protocol, queue
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import time
from itertools import zip_longest
from typing import Dict, List
import torch
from torch.utils.data import IterDataPipe, MapDataPipe
from torchdata.dataloader2 import communication
from torchdata.dataloader2.graph._serialization import extract_wrapper
try:
import dill
# XXX: By default, dill writes the Pickler dispatch table to inject its
# own logic there. This globally affects the behavior of the standard library
# pickler for any user who transitively depends on this module!
# Undo this extension to avoid altering the behavior of the pickler globally.
dill.extend(use_dill=False)
HAS_DILL = True
except ImportError:
HAS_DILL = False
__all__ = [
"DataPipeToQueuesLoop",
"CreateProcessForDataPipeline",
"CreateProcessForMultipleDataPipelines",
]
class _RequestCounter:
r"""
_RequestCounter is used to synchronize between eventloops within the dispatching
process. It guarantees to only handle the limit/pause/reset_epoch/resume request
util all loops have received the same message.
"""
exp_cnt: int
_keys: List[str] = ["limit", "pause", "reset_epoch", "resume"]
_cnt: Dict[str, int]
_reached: Dict[str, bool]
def __init__(self, exp_cnt: int):
self.exp_cnt = exp_cnt
self._cnt = {k: 0 for k in self._keys}
self._reached = {k: False for k in self._keys}
def increment(self, key: str) -> None:
assert key in self._reached
self._cnt[key] += 1
assert self._cnt[key] <= self.exp_cnt
if self._cnt[key] == self.exp_cnt:
self._reached[key] = True
def is_reached(self, key: str) -> bool:
assert key in self._reached
return self._reached[key]
def reset(self, key: str) -> None:
assert key in self._reached and self._reached[key]
assert self._cnt[key] >= 1
self._cnt[key] -= 1
if self._cnt[key] == 0:
self._reached[key] = False
def MultipleDataPipesToQueuesLoop(
source_datapipes, req_queues, res_queues, process_name, worker_info, call_on_process_init=None, custom_reset_fn=None
):
r"""
Set the appropriate pipes and protocol server type, and create a loop over multiple datapipes
with the protocol server in a non-blocking manner.
Args:
source_datapipe: DataPipe being iterated in the dispatching process
req_queue: Multiprocessing queue providing requests from the worker process
res_queue: Multiprocessing queue sending results to the worker process
process_name: The name of process (used for logging and exception handling)
worker_info: Worker information (worker id and number of workers)
call_on_process_init: Not allowed by dispatching process for now.
custom_reset_fn: Optional callable function to reset the DataPipe.
"""
assert call_on_process_init is None, "``MultipleDataPipesToQueuesLoop`` does not support call_on_process_init"
num_loops = len(source_datapipes)
assert num_loops == len(req_queues) and num_loops == len(
res_queues
), "``MultipleDataPipesToQueuesLoop`` requires the same number of datapipes, request queues and response queues"
torch.set_num_threads(1)
loops = []
request_counter = _RequestCounter(num_loops)
loop_id = 0
for source_datapipe, req_queue, res_queue in zip(source_datapipes, req_queues, res_queues):
loops.append(
_create_datapipe_queue_loop(
source_datapipe,
req_queue,
res_queue,
process_name,
loop_id,
worker_info,
custom_reset_fn,
blocking_request_get=False,
request_counter=request_counter,
)
) # Non-blocking request with reset counters
loop_id += 1
# Using `zip_longest` to guarantee the process is terminated only when
# all loops have received `TerminateRequest`
for _ in zip_longest(*loops):
# time.sleep to make Python switch context to get/send message in mp.Queue
# TODO(ejguan): Microbenchmarked a synthetic non-replicable case that sleep perform similar to pass.
# A more comprehensive benchmarking in real-world scneario is needed.
time.sleep(0)
def DataPipeToQueuesLoop(
source_datapipe, req_queue, res_queue, process_name, worker_info, call_on_process_init=None, custom_reset_fn=None
):
r"""
Initialize with the given init function, set the appropriate pipe and protocol server type, and
create a loop with the protocol server.
Args:
source_datapipe: DataPipe being iterated in the worker process
req_queue: Multiprocessing queue providing requests from the main process
res_queue: Multiprocessing queue sending results to the main process
process_name: The name of process (used for logging and exception handling)
worker_info: Worker information (worker id and number of workers)
call_on_process_init: Callable function will be called at the time of worker process initialization.
Users can provide it to modify the DataPipe grpah in the worker process.
custom_reset_fn: Optional callable function to reset the DataPipe.
"""
# Extract Serialization Wrapper
source_datapipe = extract_wrapper(source_datapipe)
if call_on_process_init is not None:
source_datapipe = call_on_process_init(source_datapipe)
torch.set_num_threads(1)
loop = _create_datapipe_queue_loop(
source_datapipe,
req_queue,
res_queue,
process_name,
worker_info.worker_id,
worker_info,
custom_reset_fn,
blocking_request_get=True,
)
for _ in loop:
pass
def _create_datapipe_queue_loop(
source_datapipe,
req_queue,
res_queue,
process_name,
loop_id,
worker_info,
custom_reset_fn=None,
blocking_request_get=True,
request_counter=None,
):
if isinstance(source_datapipe, IterDataPipe):
pipe_type = communication.iter
protocol_type = communication.protocol.IterDataPipeQueueProtocolServer
elif isinstance(source_datapipe, MapDataPipe):
pipe_type = communication.map # type: ignore[misc]
protocol_type = communication.protocol.MapDataPipeQueueProtocolServer # type: ignore[assignment]
else:
raise Exception("Only supports IterDataPipe or MapDataPipe, got", source_datapipe)
return pipe_type.DataPipeBehindQueues(
source_datapipe,
protocol_type(req_queue, res_queue),
process_name=process_name,
loop_id=loop_id,
worker_info=worker_info,
custom_reset_fn=custom_reset_fn,
blocking_request_get=blocking_request_get,
request_counter=request_counter,
)
def CreateProcessForDataPipeline(
multiprocessing_ctx, datapipe, process_name, worker_info, call_on_process_init=None, custom_reset_fn=None
):
r"""
Given a DataPipe, creates a new process with ``DataPipeToQueuesLoop`` as target,
and returns ``(process, req_queue, res_queue)``.
"""
req_queue = multiprocessing_ctx.Queue()
res_queue = multiprocessing_ctx.Queue()
process = multiprocessing_ctx.Process(
target=DataPipeToQueuesLoop,
args=(datapipe, req_queue, res_queue, process_name, worker_info, call_on_process_init, custom_reset_fn),
)
return process, req_queue, res_queue
def CreateProcessForMultipleDataPipelines(
multiprocessing_ctx, datapipes, process_name, worker_info, custom_reset_fn=None
):
r"""
Given a DataPipe, creates a new process with ``MultipleDataPipesToQueuesLoop`` as target,
and returns ``(process, [req_queue_0, ...], [res_queue_0, ...])``.
"""
req_queues = []
res_queues = []
for _ in datapipes:
req_queues.append(multiprocessing_ctx.Queue())
res_queues.append(multiprocessing_ctx.Queue())
process = multiprocessing_ctx.Process(
target=MultipleDataPipesToQueuesLoop,
args=(datapipes, req_queues, res_queues, process_name, worker_info, custom_reset_fn),
)
return process, req_queues, res_queues
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import time
import types
from torch.utils.data import MapDataPipe
from torchdata._utils import ExceptionWrapper
from torchdata.dataloader2 import communication
from torchdata.dataloader2.utils import process_reset_fn
DEFAULT_NON_BLOCKING_SLEEP = 0.001
__all__ = [
"DataPipeBehindQueues",
"EnsureNonBlockingMapDataPipe",
"NonBlockingMap",
"NotAvailable",
"QueueWrapperForMap",
"default_not_available_hook",
]
def default_not_available_hook():
time.sleep(DEFAULT_NON_BLOCKING_SLEEP)
class NotAvailable(Exception):
pass
class NonBlockingMap(MapDataPipe):
not_available_hook = default_not_available_hook
def __getitem__(self, index):
while True:
try:
return self.nonblocking_getitem(index)
except NotAvailable:
if NonBlockingMap.not_available_hook is not None:
NonBlockingMap.not_available_hook()
def __len__(self):
try:
return self.nonblocking_len()
except NotAvailable:
if NonBlockingMap.not_available_hook is not None:
NonBlockingMap.not_available_hook()
def nonblocking_len(self):
raise NotImplementedError("nonblocking_len is not implemented for %s" % self.__class__)
def nonblocking_getitem(self, index):
raise NotImplementedError("nonblocking_getitem is not implemented for %s" % self.__class__)
@staticmethod
def register_not_available_hook(hook_function):
NonBlockingMap.not_available_hook = hook_function
def EnsureNonBlockingMapDataPipe(validated_datapipe):
if not isinstance(validated_datapipe, MapDataPipe):
raise Exception(f"Not Map DataPipe - got {validated_datapipe.__class__}")
if isinstance(validated_datapipe, NonBlockingMap):
return validated_datapipe
if not hasattr(validated_datapipe, "nonblocking_len"):
def nonblocking_len(self):
return self.__len__()
validated_datapipe.nonblocking_len = types.MethodType( # type: ignore[attr-defined]
nonblocking_len, validated_datapipe
)
if not hasattr(validated_datapipe, "nonblocking_getitem"):
def nonblocking_getitem(self, index):
return self.__getitem__(index)
validated_datapipe.nonblocking_getitem = types.MethodType( # type: ignore[attr-defined]
nonblocking_getitem, validated_datapipe
)
return validated_datapipe
def DataPipeBehindQueues(
source_datapipe,
protocol,
process_name,
loop_id,
worker_info,
custom_reset_fn,
blocking_request_get=False,
request_counter=None,
):
"""
Indefinitely iterates over req_queue and passing values from source_datapipe to res_queue.
Args:
source_datapipe: DataPipe
protocol: ``MapDataPipeQueueProtocolServer`` that contains ``req_queue`` and ``res_queue``
process_name: Process name
loop_id: Loop ID
worker_info: Worker info include worker id and number of workers
custom_reset_fn: function to call after each request is received
blocking_request_get: determines if ``protocol.get_new_request`` will block
"""
if not isinstance(protocol, communication.protocol.MapDataPipeQueueProtocolServer):
raise Exception("Expecting MapDataPipeQueueProtocolServer, got", protocol)
source_datapipe = EnsureNonBlockingMapDataPipe(source_datapipe)
forever = True
while forever:
try:
# TODO: non-blocking call is extremely slow here for python.mp, need to figure out a good workaround
request = protocol.get_new_request(block=blocking_request_get)
except communication.protocol.EmptyQueue:
yield True
continue
if isinstance(request, communication.messages.ResetEpochRequest):
distributed_shared_seed = request_counter is not None
source_datapipe = process_reset_fn(
source_datapipe,
worker_info,
request.seed_generator,
distributed_shared_seed,
request.iter_reset_fn,
custom_reset_fn,
)
protocol.response_reset_epoch()
elif isinstance(request, communication.messages.TerminateRequest):
forever = False
protocol.response_terminate()
elif isinstance(request, communication.messages.LenRequest):
size = source_datapipe.nonblocking_len()
protocol.response_len(size)
elif isinstance(request, communication.messages.GetItemRequest):
while forever:
try:
value = source_datapipe.nonblocking_getitem(request.key)
except NotAvailable:
yield True
continue
except IndexError:
# Alternatively, we can just allow the underlying DataPipe to throw an exception?
protocol.response_index_out_of_bound()
yield True
break
except Exception:
exc = ExceptionWrapper(where=f"in {process_name} {loop_id}")
protocol.response_worker_exception(exc)
break
protocol.response_item(request.key, value)
yield True # Returns control
break
else:
raise Exception("Unrecognized type of request received", request)
class QueueWrapperForMap(NonBlockingMap):
"""
Creates map.DataPipe which reads data from the DataLoader.Queue
"""
def __init__(self, protocol, response_wait_time=0.00001):
if not isinstance(protocol, communication.protocol.MapDataPipeQueueProtocolClient):
raise Exception("Got", protocol)
self.protocol = protocol
self.counter = 0
self._stop_iteration = False
self._response_wait_time = response_wait_time
def nonblocking_getitem(self, index):
if self._stop_iteration:
raise Exception("`getitem` or `nonblocking_getitem` called after receiving StopIteration")
if self.protocol.can_take_request():
self.protocol.request_item(index)
try:
response = self.protocol.get_response_item(block=True, timeout=self._response_wait_time)
except communication.protocol.EmptyQueue:
raise NotAvailable
if isinstance(response, communication.messages.StopIterationResponse):
self._stop_iteration = True
raise IndexError(f"Index {index} is out of bound.")
if isinstance(response, communication.messages.WorkerExceptionResponse):
self._stop_iteration = True
response.exc.reraise()
return response.key, response.value
def nonblocking_len(self):
if self._stop_iteration:
raise Exception("`len` or `nonblocking_len` called after receiving StopIteration")
if self.protocol.can_take_request():
self.protocol.request_len()
try:
response = self.protocol.get_response_len(block=True, timeout=self._response_wait_time)
except communication.protocol.EmptyQueue:
raise NotAvailable
return response.len
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.