python_code
stringlengths 0
229k
|
---|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from torchdata._utils import ExceptionWrapper
class DataLoaderQueueMessage:
pass
class Request(DataLoaderQueueMessage):
pass
class Response(DataLoaderQueueMessage):
pass
class ResetEpochRequest(Request):
__slots__ = ("seed_generator", "iter_reset_fn")
def __init__(self, seed_generator, iter_reset_fn):
self.seed_generator = seed_generator
self.iter_reset_fn = iter_reset_fn
class ResetEpochResponse(Response):
pass
class LimitRequest(Request):
__slots__ = ("num_batches", "limit_fn", "worker_num_batches")
def __init__(self, num_batches, limit_fn, worker_num_batches=None):
self.num_batches = num_batches
self.limit_fn = limit_fn
self.worker_num_batches = worker_num_batches
class LimitResponse(Response):
pass
class PauseRequest(Request):
__slots__ = "pause_fn"
def __init__(self, pause_fn):
self.pause_fn = pause_fn
class PauseResponse(Response):
pass
class ResumeRequest(Request):
__slots__ = "resume_fn"
def __init__(self, resume_fn):
self.resume_fn = resume_fn
class ResumeResponse(Response):
pass
class TerminateRequest(Request):
pass
class TerminateResponse(Response):
pass
class LenRequest(Request):
pass
class LenResponse(Response):
__slots__ = "len"
def __init__(self, len):
self.len = len
class GetItemRequest(Request):
__slots__ = "key"
def __init__(self, key):
self.key = key
class GetItemResponse(Response):
__slots__ = ("key", "value")
def __init__(self, key, value):
self.key = key
self.value = value
class GetNextRequest(Request):
pass
class GetNextResponse(Response):
__slots__ = "value"
def __init__(self, value):
self.value = value
class StopIterationResponse(Response):
pass
class InvalidStateResponse(Response):
"""
Returned by DataPipe when it is expecting to get reset request,
for example RouterDataPipe expecting all workers to request reset'
"""
pass
class WorkerExceptionResponse(Response):
__slots__ = "exc"
def __init__(self, exc: ExceptionWrapper):
self.exc: ExceptionWrapper = exc
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import List, Optional, Tuple
# Note [Philox Engine implementation]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Refer to: http://www.thesalmons.org/john/random123/papers/random123sc11.pdf for details regarding the engine.
# Using Philox4×32-10 for the sake of performance, randomness and crush-resistance.
# The following code could be optimized into C++ bindings
# Philox Constants
kPhilox10A = 0x9E3779B9
kPhilox10B = 0xBB67AE85
kPhiloxSA = 0xD2511F53
kPhiloxSB = 0xCD9E8D57
MASK_32b = 0xFFFFFFFF
MASK_64b = 0xFFFFFFFFFFFFFFFF
HALF_UINT64 = 0x8000000000000000
def mulhilo32(a: int, b: int) -> Tuple[int, int]:
product = a * b
return product & MASK_32b, (product >> 32) & MASK_32b
def single_round(key: List[int], ctr: List[int]) -> List[int]:
lo0, hi0 = mulhilo32(kPhiloxSA, ctr[0])
lo1, hi1 = mulhilo32(kPhiloxSB, ctr[2])
res = [0] * 4
res[0] = hi1 ^ ctr[1] ^ key[0]
res[1] = lo1
res[2] = hi0 ^ ctr[3] ^ key[1]
res[3] = lo0
return res
def philox_10_round(key: Tuple[int, int], ctr: List[int]) -> List[int]:
_key = list(key)
_ctr = list(ctr)
for _ in range(9):
_ctr = single_round(_key, _ctr)
_key[0] = (_key[0] + kPhilox10A) & MASK_32b
_key[1] = (_key[1] + kPhilox10B) & MASK_32b
return single_round(_key, _ctr)
class PhiloxEngine:
r"""
Philox is a counter-based RNG with a certain properties:
- High performance
- Statistiacl random
- Crush-resistance Bijection
Generate new seeds or spawn parallel seeds for worker processes.
"""
def __init__(self, seed: Optional[int] = None) -> None:
self._seed: Tuple[int, int] = (-1, -1)
self._ctr: List[int] = [0] * 4
self._generated_seeds: Optional[List[int]] = None
self._spawn_seed: Tuple[int, int] = (-1, -1)
if seed is not None:
self.seed(seed)
def _incr_ctr(self) -> None:
for i in range(3):
self._ctr[i] += 1
if self._ctr[i] <= MASK_32b:
return
self._ctr[i] = 0
self._ctr[3] += 1
# if overflow (2^128) has occurred during addition, back to the initial counter
if self._ctr[3] > MASK_32b:
self._ctr[3] = 0
self._incr_ctr()
def seed(self, seed: int) -> "PhiloxEngine":
seed = seed & MASK_64b
# Convert seed from int64 to uint64
if seed < 0:
seed = seed + HALF_UINT64
lo = seed & MASK_32b
hi = (seed >> 32) & MASK_32b
self._seed = (lo, hi)
# Reset counter and cached seed
self._ctr = [0] * 4
self._generated_seeds = None
# Generate the spawn seed
self._spawn_seed = tuple(philox_10_round(self._seed, self._ctr)[:2]) # type: ignore[assignment]
self._incr_ctr()
return self
def generate(self) -> int:
assert self._seed != (-1, -1), "Please provide seed to PhiloxEngine"
if self._generated_seeds is None:
self._generated_seeds = philox_10_round(self._seed, self._ctr)
self._incr_ctr()
res = self._generated_seeds[:2]
else:
res = self._generated_seeds[2:]
self._generated_seeds = None
return (res[1] << 32) + res[0]
def clone(self) -> "PhiloxEngine":
new_engine = PhiloxEngine(None)
new_engine._seed = self._seed # immutable tuple
new_engine._ctr = self._ctr.copy()
new_engine._generated_seeds = None if self._generated_seeds is None else self._generated_seeds.copy()
new_engine._spawn_seed = self._spawn_seed # immutable tuple
return new_engine
def spawn(self, index: int) -> "PhiloxEngine":
assert index >= 0, f"Expected a non-negative value for spawn, but found {index}"
assert self._spawn_seed != (-1, -1), "Please provide seed to PhiloxEngine"
offset = index % 2
val = index if offset == 0 else index - 1
ctr = []
for _ in range(4):
ctr.append(val & MASK_32b)
val = val >> 32
res = philox_10_round(self._spawn_seed, ctr)[offset * 2 : offset * 2 + 2]
sub_seed = (res[1] << 32) + res[0]
return PhiloxEngine(sub_seed)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from torchdata.dataloader2.random.distributed import dist_share_seed
from torchdata.dataloader2.random.seed_generator import SeedGenerator
__all__ = ["SeedGenerator", "dist_share_seed"]
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
import torch
import torch.distributed as dist
_HALF_UINT64 = 0x8000000000000000
def dist_share_seed(seed: int, process_group: Optional[dist.ProcessGroup] = None) -> int:
# Convert uint64 to int64 to prevent overflow for integer Tensor
seed -= _HALF_UINT64
shared_seed = torch.tensor(seed, dtype=torch.int64)
dist.broadcast(shared_seed, src=0, group=process_group)
# Revert int64 back to uint64
return int(shared_seed.item()) + _HALF_UINT64
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional, Tuple
import torch
from torchdata.dataloader2.random._philox import PhiloxEngine
_UINT64_UPPER_BOUND = 2 ** 64
def _get_torch_random_seed():
iinfo = torch.iinfo(torch.int64)
seed = torch.randint(iinfo.min, iinfo.max, ()).item()
# Convert int64 to uint64
seed += 2 ** 63
return seed
class SeedGenerator:
r"""
``SeedGenerator`` is used to generate seeds in a deterministic and randomized manner
based on a user-provided initial seed. Internally, it utilizes a counter-based PRNG
called Philox to generate random seeds.
Args:
seed: The base seed to generate random seeds
"""
_shared_rng: PhiloxEngine
_worker_rng: PhiloxEngine
def __init__(self, seed: Optional[int] = None, _rngs: Optional[Tuple[PhiloxEngine, PhiloxEngine]] = None) -> None:
if seed is not None and _rngs is not None:
raise ValueError("SeedGenerator doesn't allow both seed and _rng specified at the same time")
if _rngs is None:
self._shared_rng = PhiloxEngine()
self._worker_rng = PhiloxEngine()
self.seed(seed)
else:
assert len(_rngs) == 2
self._shared_rng, self._worker_rng = _rngs
def seed(self, seed: Optional[int] = None) -> None:
r"""
Re-seed the ``SeedGenerator``. When ``None`` is provided, a random seed generated
by the default PyTorch RNG.
"""
if seed is None:
seed = _get_torch_random_seed()
if seed >= _UINT64_UPPER_BOUND:
raise ValueError(f"Expected an uint64 seed, but got {seed}.")
self._shared_rng.seed(seed)
self._worker_rng.seed(seed)
def generate_shared_seed(self) -> int:
r"""
Generate one uint64 random seed that is supposed to be the same across
distributed processes.
"""
return self._shared_rng.generate()
def generate_seed(self) -> int:
r"""
Generate one unique uint64 random seed based on distributed and multiprocessing
information.
"""
return self._worker_rng.generate()
def spawn(self, worker_id: int, inplace: bool = False) -> "SeedGenerator":
r"""
Spawn a sub-SeedGenerator based on the provided worker_id. If inplace is turn on, the SeedGenerator
will evolve itself rather than spawning a new
"""
if worker_id < 0:
raise ValueError(f"Expected `rank` equal or larger than 0, but got {worker_id}.")
if inplace:
self._worker_rng = self._worker_rng.spawn(worker_id)
return self
return SeedGenerator(seed=None, _rngs=(self._shared_rng.clone(), self._worker_rng.spawn(worker_id)))
def __getstate__(self):
state = (
self._shared_rng,
self._worker_rng,
)
return state
def __setstate__(self, state):
self._shared_rng, self._worker_rng = state
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from torch.utils.data import DataChunk, functional_datapipe
from . import iter, map, utils
__all__ = ["DataChunk", "functional_datapipe", "iter", "map", "utils"]
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
###############################################################################
# Reference From PyTorch Core
###############################################################################
from torch.utils.data import IterDataPipe
from torch.utils.data.datapipes.iter import (
Batcher,
Collator,
Concater,
Demultiplexer,
FileLister,
FileOpener,
Filter,
Forker,
Grouper,
IterableWrapper,
Mapper,
Multiplexer,
RoutedDecoder,
Sampler,
ShardingFilter,
Shuffler,
StreamReader,
UnBatcher,
Zipper,
)
from torchdata.datapipes.iter.load.aisio import (
AISFileListerIterDataPipe as AISFileLister,
AISFileLoaderIterDataPipe as AISFileLoader,
)
###############################################################################
# TorchData
###############################################################################
from torchdata.datapipes.iter.load.fsspec import (
FSSpecFileListerIterDataPipe as FSSpecFileLister,
FSSpecFileOpenerIterDataPipe as FSSpecFileOpener,
FSSpecSaverIterDataPipe as FSSpecSaver,
)
from torchdata.datapipes.iter.load.huggingface import HuggingFaceHubReaderIterDataPipe as HuggingFaceHubReader
from torchdata.datapipes.iter.load.iopath import (
IoPathFileListerIterDataPipe as IoPathFileLister,
IoPathFileOpenerIterDataPipe as IoPathFileOpener,
IoPathSaverIterDataPipe as IoPathSaver,
)
from torchdata.datapipes.iter.load.online import (
GDriveReaderDataPipe as GDriveReader,
HTTPReaderIterDataPipe as HttpReader,
OnlineReaderIterDataPipe as OnlineReader,
)
from torchdata.datapipes.iter.load.s3io import (
S3FileListerIterDataPipe as S3FileLister,
S3FileLoaderIterDataPipe as S3FileLoader,
)
from torchdata.datapipes.iter.transform.bucketbatcher import (
BucketBatcherIterDataPipe as BucketBatcher,
InBatchShufflerIterDataPipe as InBatchShuffler,
MaxTokenBucketizerIterDataPipe as MaxTokenBucketizer,
)
from torchdata.datapipes.iter.transform.callable import (
BatchAsyncMapperIterDataPipe as BatchAsyncMapper,
BatchMapperIterDataPipe as BatchMapper,
DropperIterDataPipe as Dropper,
FlatMapperIterDataPipe as FlatMapper,
FlattenIterDataPipe as Flattener,
ShuffledFlatMapperIterDataPipe as ShuffledFlatMapper,
SliceIterDataPipe as Slicer,
ThreadPoolMapperIterDataPipe as ThreadPoolMapper,
)
from torchdata.datapipes.iter.util.bz2fileloader import Bz2FileLoaderIterDataPipe as Bz2FileLoader
from torchdata.datapipes.iter.util.cacheholder import (
EndOnDiskCacheHolderIterDataPipe as EndOnDiskCacheHolder,
InMemoryCacheHolderIterDataPipe as InMemoryCacheHolder,
OnDiskCacheHolderIterDataPipe as OnDiskCacheHolder,
)
from torchdata.datapipes.iter.util.combining import (
IterKeyZipperIterDataPipe as IterKeyZipper,
MapKeyZipperIterDataPipe as MapKeyZipper,
RoundRobinDemultiplexerIterDataPipe as RoundRobinDemultiplexer,
UnZipperIterDataPipe as UnZipper,
)
from torchdata.datapipes.iter.util.cycler import CyclerIterDataPipe as Cycler, RepeaterIterDataPipe as Repeater
from torchdata.datapipes.iter.util.dataframemaker import (
DataFrameMakerIterDataPipe as DataFrameMaker,
ParquetDFLoaderIterDataPipe as ParquetDataFrameLoader,
)
from torchdata.datapipes.iter.util.decompressor import (
DecompressorIterDataPipe as Decompressor,
ExtractorIterDataPipe as Extractor,
)
from torchdata.datapipes.iter.util.distributed import FullSyncIterDataPipe as FullSync
from torchdata.datapipes.iter.util.hashchecker import HashCheckerIterDataPipe as HashChecker
from torchdata.datapipes.iter.util.header import HeaderIterDataPipe as Header, LengthSetterIterDataPipe as LengthSetter
from torchdata.datapipes.iter.util.indexadder import (
EnumeratorIterDataPipe as Enumerator,
IndexAdderIterDataPipe as IndexAdder,
)
from torchdata.datapipes.iter.util.jsonparser import JsonParserIterDataPipe as JsonParser
from torchdata.datapipes.iter.util.mux_longest import MultiplexerLongestIterDataPipe as MultiplexerLongest
from torchdata.datapipes.iter.util.paragraphaggregator import ParagraphAggregatorIterDataPipe as ParagraphAggregator
from torchdata.datapipes.iter.util.plain_text_reader import (
CSVDictParserIterDataPipe as CSVDictParser,
CSVParserIterDataPipe as CSVParser,
LineReaderIterDataPipe as LineReader,
)
from torchdata.datapipes.iter.util.prefetcher import (
PinMemoryIterDataPipe as PinMemory,
PrefetcherIterDataPipe as Prefetcher,
)
from torchdata.datapipes.iter.util.randomsplitter import RandomSplitterIterDataPipe as RandomSplitter
from torchdata.datapipes.iter.util.rararchiveloader import RarArchiveLoaderIterDataPipe as RarArchiveLoader
from torchdata.datapipes.iter.util.rows2columnar import Rows2ColumnarIterDataPipe as Rows2Columnar
from torchdata.datapipes.iter.util.samplemultiplexer import SampleMultiplexerDataPipe as SampleMultiplexer
from torchdata.datapipes.iter.util.saver import SaverIterDataPipe as Saver
from torchdata.datapipes.iter.util.shardexpander import ShardExpanderIterDataPipe as ShardExpander
from torchdata.datapipes.iter.util.sharding import (
ShardingRoundRobinDispatcherIterDataPipe as ShardingRoundRobinDispatcher,
)
from torchdata.datapipes.iter.util.tararchiveloader import TarArchiveLoaderIterDataPipe as TarArchiveLoader
from torchdata.datapipes.iter.util.tfrecordloader import (
TFRecordExample,
TFRecordExampleSpec,
TFRecordLoaderIterDataPipe as TFRecordLoader,
)
from torchdata.datapipes.iter.util.webdataset import WebDatasetIterDataPipe as WebDataset
from torchdata.datapipes.iter.util.xzfileloader import XzFileLoaderIterDataPipe as XzFileLoader
from torchdata.datapipes.iter.util.zip_longest import ZipperLongestIterDataPipe as ZipperLongest
from torchdata.datapipes.iter.util.ziparchiveloader import ZipArchiveLoaderIterDataPipe as ZipArchiveLoader
from torchdata.datapipes.map.util.converter import MapToIterConverterIterDataPipe as MapToIterConverter
__all__ = [
"AISFileLister",
"AISFileLoader",
"BatchAsyncMapper",
"BatchMapper",
"Batcher",
"BucketBatcher",
"Bz2FileLoader",
"CSVDictParser",
"CSVParser",
"Collator",
"Concater",
"Cycler",
"DataFrameMaker",
"Decompressor",
"Demultiplexer",
"Dropper",
"EndOnDiskCacheHolder",
"Enumerator",
"Extractor",
"FSSpecFileLister",
"FSSpecFileOpener",
"FSSpecSaver",
"FileLister",
"FileOpener",
"Filter",
"FlatMapper",
"Flattener",
"Forker",
"FullSync",
"GDriveReader",
"Grouper",
"HashChecker",
"Header",
"HttpReader",
"HuggingFaceHubReader",
"InBatchShuffler",
"InMemoryCacheHolder",
"IndexAdder",
"IoPathFileLister",
"IoPathFileOpener",
"IoPathSaver",
"IterDataPipe",
"IterKeyZipper",
"IterableWrapper",
"JsonParser",
"LengthSetter",
"LineReader",
"MapKeyZipper",
"MapToIterConverter",
"Mapper",
"MaxTokenBucketizer",
"Multiplexer",
"MultiplexerLongest",
"OnDiskCacheHolder",
"OnlineReader",
"ParagraphAggregator",
"ParquetDataFrameLoader",
"PinMemory",
"Prefetcher",
"RandomSplitter",
"RarArchiveLoader",
"Repeater",
"RoundRobinDemultiplexer",
"RoutedDecoder",
"Rows2Columnar",
"S3FileLister",
"S3FileLoader",
"SampleMultiplexer",
"Sampler",
"Saver",
"ShardExpander",
"ShardingFilter",
"ShardingRoundRobinDispatcher",
"ShuffledFlatMapper",
"Shuffler",
"Slicer",
"StreamReader",
"TFRecordLoader",
"TarArchiveLoader",
"ThreadPoolMapper",
"UnBatcher",
"UnZipper",
"WebDataset",
"XzFileLoader",
"ZipArchiveLoader",
"Zipper",
"ZipperLongest",
]
# Please keep this list sorted
assert __all__ == sorted(__all__)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Iterator, Optional, TypeVar
from torch.utils.data.datapipes.iter.sharding import SHARDING_PRIORITIES
from torchdata.datapipes import functional_datapipe
from torchdata.datapipes.iter import IterDataPipe
T_co = TypeVar("T_co", covariant=True)
@functional_datapipe("sharding_round_robin_dispatch")
class ShardingRoundRobinDispatcherIterDataPipe(IterDataPipe):
r"""
Wrapper that indicates the prior section of ``DataPipe`` graph is non-replicable and will be
iterated in a separate, single dispatching process to distribute data to worker processes
in a round-robin manner when multiprocessing is being used.
(functional name: ``sharding_round_robin_dispatch``).
Args:
source_datapipe: Iterable DataPipe that will be sharded
sharding_group_filter: Optional ``SHARDING_PRIORITIES`` value
Note:
- ``sharding_group_filter`` only accepts ``SHARDING_PRIORITIES.MULTIPROCESSING`` for now
- When using distributed training, you can add a ``sharding_filter()`` prior to this DataPipe
to distribute samples among worker nodes.
Examples:
>>> # xdoctest: +SKIP
>>> from torchdata.datapipes.iter import IterableWrapper
>>> from torch.utils.data.datapipes.iter.sharding import SHARDING_PRIORITIES
>>> dp = IterableWrapper(range(10))
>>> # `.shuffle()` will be executed in a single dispatching processing, then the samples are distributed
>>> # to worker processes
>>> dp = dp.shuffle().sharding_round_robin_dispatch(SHARDING_PRIORITIES.MULTIPROCESSING)
>>> # `.map()` will be executed within each worker process
>>> dp = dp.map(lambda x: x + 1)
>>> # Distributed case: the 10 samples will be distributed among the nodes
>>> dp = IterableWrapper(range(10)).sharding_filter()
>>> # `.map()` will be executed in a single dispatching processing in each node
>>> # You may apply further transformation after within each worker process
>>> dp = dp.map(lambda x: x + 1).sharding_round_robin_dispatch(SHARDING_PRIORITIES.MULTIPROCESSING)
"""
def __init__(self, source_datapipe: IterDataPipe, sharding_group_filter: Optional[SHARDING_PRIORITIES] = None):
self.source_datapipe = source_datapipe
if sharding_group_filter != SHARDING_PRIORITIES.MULTIPROCESSING:
raise NotImplementedError(
"`sharding_round_robin_dispatch` currently only supports `SHARDING_PRIORITIES.MULTIPROCESSING`."
"Please open issue on github for your feature request."
)
self.sharding_group_filter = sharding_group_filter
def __iter__(self) -> Iterator[T_co]:
yield from self.source_datapipe
def __len__(self) -> int:
return len(self.source_datapipe)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import re
from typing import Any, Dict, Iterator, List, Union
from torchdata.datapipes import functional_datapipe
from torchdata.datapipes.iter import IterDataPipe
def pathsplit(p):
"""Split a path into a WebDataset prefix and suffix.
The prefix is used for grouping files into samples,
the suffix is used as key in the output dictionary.
The suffix consists of all components after the first
"." in the filename.
In torchdata, the prefix consists of the .tar file
path followed by the file name inside the archive.
Any backslash in the prefix is replaced by a forward
slash to make Windows prefixes consistent with POSIX
paths.
"""
# convert Windows pathnames to UNIX pathnames, otherwise
# we get an inconsistent mix of the Windows path to the tar
# file followed by the POSIX path inside that tar file
p = p.replace("\\", "/")
if "." not in p:
return p, ""
# we need to use a regular expression because os.path is
# platform specific, but tar files always contain POSIX paths
match = re.search(r"^(.*?)(\.[^/]*)$", p)
if not match:
return p, ""
prefix, suffix = match.groups()
return prefix, suffix
@functional_datapipe("webdataset")
class WebDatasetIterDataPipe(IterDataPipe[Dict]):
r"""
Iterable DataPipe that accepts stream of (path, data) tuples, usually,
representing the pathnames and files of a tar archive (functional name:
``webdataset``). This aggregates consecutive items with the same basename
into a single dictionary, using the extensions as keys (WebDataset file
convention). Any text after the first "." in the filename is used as
a key/extension.
File names that do not have an extension are ignored.
Args:
source_datapipe: a DataPipe yielding a stream of (path, data) pairs
Returns:
a DataPipe yielding a stream of dictionaries
Examples:
>>> from torchdata.datapipes.iter import FileLister, FileOpener
>>>
>>> def decode(item):
>>> key, value = item
>>> if key.endswith(".txt"):
>>> return key, value.read().decode("utf-8")
>>> if key.endswith(".bin"):
>>> return key, value.read().decode("utf-8")
>>>
>>> datapipe1 = FileLister("test/_fakedata", "wds*.tar")
>>> datapipe2 = FileOpener(datapipe1, mode="b")
>>> dataset = datapipe2.load_from_tar().map(decode).webdataset()
>>> for obj in dataset:
>>> print(obj)
"""
def __init__(self, source_datapipe: IterDataPipe[List[Union[Dict, List]]]) -> None:
self.source_datapipe: IterDataPipe[List[Union[Dict, List]]] = source_datapipe
def __iter__(self) -> Iterator[Dict]:
sample: Dict[str, Any] = {}
current = ""
for path, data in self.source_datapipe:
assert isinstance(path, str), path
prefix, suffix = pathsplit(path)
if suffix == "":
# files with empty suffixes can be used for metadata
# they cannot be used for data since they wouldn't have a key
continue
if prefix != current:
if current != "":
yield sample
sample = {}
current = prefix
sample["__key__"] = current
sample[suffix] = data
if sample != {}:
yield sample
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import lzma
import warnings
from io import BufferedIOBase
from typing import Iterable, Iterator, Tuple
from torchdata.datapipes import functional_datapipe
from torchdata.datapipes.iter import IterDataPipe
from torchdata.datapipes.utils import StreamWrapper
from torchdata.datapipes.utils.common import validate_pathname_binary_tuple
@functional_datapipe("load_from_xz")
class XzFileLoaderIterDataPipe(IterDataPipe[Tuple[str, BufferedIOBase]]):
r"""
Decompresses xz (lzma) binary streams from an Iterable DataPipe which contains tuples of
path name and xy binary streams, and yields a tuple of path name and extracted binary
stream (functional name: ``load_from_xz``).
Args:
datapipe: Iterable DataPipe that provides tuples of path name and xy binary stream
length: Nominal length of the DataPipe
Note:
The opened file handles will be closed automatically if the default ``DecoderDataPipe``
is attached. Otherwise, user should be responsible to close file handles explicitly
or let Python's GC close them periodically.
Example:
>>> from torchdata.datapipes.iter import FileLister, FileOpener
>>> datapipe1 = FileLister(".", "*.xz")
>>> datapipe2 = FileOpener(datapipe1, mode="b")
>>> xz_loader_dp = datapipe2.load_from_xz()
>>> for _, stream in xz_loader_dp:
>>> print(stream.read())
b'0123456789abcdef'
"""
def __init__(self, datapipe: Iterable[Tuple[str, BufferedIOBase]], length: int = -1) -> None:
super().__init__()
self.datapipe: Iterable[Tuple[str, BufferedIOBase]] = datapipe
self.length: int = length
def __iter__(self) -> Iterator[Tuple[str, BufferedIOBase]]:
for data in self.datapipe:
validate_pathname_binary_tuple(data)
pathname, data_stream = data
try:
extracted_fobj = lzma.open(data_stream, mode="rb") # type: ignore[call-overload]
new_pathname = pathname.rstrip(".xz")
yield new_pathname, StreamWrapper(extracted_fobj, data_stream, name=pathname) # type: ignore[misc]
except Exception as e:
warnings.warn(f"Unable to extract files from corrupted xz/lzma stream {pathname} due to: {e}, abort!")
raise e
finally:
if isinstance(data_stream, StreamWrapper):
data_stream.autoclose()
def __len__(self) -> int:
if self.length == -1:
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
return self.length
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import csv
from typing import IO, Iterator, Tuple, TypeVar, Union
from torchdata.datapipes import functional_datapipe
from torchdata.datapipes.iter import IterDataPipe
D = TypeVar("D")
Str_Or_Bytes = Union[str, bytes]
class PlainTextReaderHelper:
def __init__(
self,
*,
skip_lines: int = 0,
strip_newline: bool = True,
decode: bool = True,
encoding="utf-8",
errors: str = "ignore",
return_path: bool = False,
as_tuple: bool = False,
) -> None:
if skip_lines < 0:
raise ValueError("'skip_lines' is required to be a positive integer.")
self._skip_lines = skip_lines
self._strip_newline = strip_newline
self._decode = decode
self._encoding = encoding
self._errors = errors
self._return_path = return_path
self._as_tuple = as_tuple
def skip_lines(self, file: IO) -> Union[Iterator[bytes], Iterator[str]]:
with contextlib.suppress(StopIteration):
for _ in range(self._skip_lines):
next(file)
try:
yield from file
finally:
file.close()
def strip_newline(self, stream: Union[Iterator[bytes], Iterator[str]]) -> Union[Iterator[bytes], Iterator[str]]:
if not self._strip_newline:
yield from stream
return
for line in stream:
if isinstance(line, str):
yield line.strip("\r\n")
else:
yield line.strip(b"\r\n")
def decode(self, stream: Union[Iterator[bytes], Iterator[str]]) -> Union[Iterator[bytes], Iterator[str]]:
if not self._decode:
yield from stream
else:
for line in stream:
yield line.decode(self._encoding, self._errors) if isinstance(line, bytes) else line
def return_path(self, stream: Iterator[D], *, path: str) -> Iterator[Union[D, Tuple[str, D]]]:
if not self._return_path:
yield from stream
return
for data in stream:
yield path, data
def as_tuple(self, stream: Iterator[D]) -> Iterator[Union[D, Tuple]]:
if not self._as_tuple:
yield from stream
return
for data in stream:
if isinstance(data, list):
yield tuple(data)
else:
yield data
@functional_datapipe("readlines")
class LineReaderIterDataPipe(IterDataPipe[Union[Str_Or_Bytes, Tuple[str, Str_Or_Bytes]]]):
r"""
Accepts a DataPipe consisting of tuples of file name and string data stream, and for each line in the
stream, yields a tuple of file name and the line (functional name: ``readlines``).
Args:
source_datapipe: a DataPipe with tuples of file name and string data stream
skip_lines: number of lines to skip at the beginning of each file
strip_newline: if ``True``, the new line character will be stripped
decode: if ``True``, this will decode the contents of the file based on the specified ``encoding``
encoding: the character encoding of the files (`default='utf-8'`)
errors: the error handling scheme used while decoding
return_path: if ``True``, each line will return a tuple of path and contents, rather
than just the contents
Example:
>>> from torchdata.datapipes.iter import IterableWrapper
>>> import io
>>> text1 = "Line1\nLine2"
>>> text2 = "Line2,1\r\nLine2,2\r\nLine2,3"
>>> source_dp = IterableWrapper([("file1", io.StringIO(text1)), ("file2", io.StringIO(text2))])
>>> line_reader_dp = source_dp.readlines()
>>> list(line_reader_dp)
[('file1', 'Line1'), ('file1', 'Line2'), ('file2', 'Line2,1'), ('file2', 'Line2,2'), ('file2', 'Line2,3')]
"""
def __init__(
self,
source_datapipe: IterDataPipe[Tuple[str, IO]],
*,
skip_lines: int = 0,
strip_newline: bool = True,
decode: bool = False,
encoding="utf-8",
errors: str = "ignore",
return_path: bool = True,
) -> None:
self.source_datapipe = source_datapipe
self._helper = PlainTextReaderHelper(
skip_lines=skip_lines,
strip_newline=strip_newline,
decode=decode,
encoding=encoding,
errors=errors,
return_path=return_path,
)
def __iter__(self) -> Iterator[Union[Str_Or_Bytes, Tuple[str, Str_Or_Bytes]]]:
for path, file in self.source_datapipe:
stream = self._helper.skip_lines(file)
stream = self._helper.strip_newline(stream)
stream = self._helper.decode(stream)
yield from self._helper.return_path(stream, path=path) # type: ignore[misc]
class _CSVBaseParserIterDataPipe(IterDataPipe):
def __init__(
self,
source_datapipe,
csv_reader,
*,
skip_lines: int = 0,
decode: bool = False,
encoding="utf-8",
errors: str = "ignore",
return_path: bool = True,
as_tuple: bool = False,
**fmtparams,
) -> None:
self.source_datapipe = source_datapipe
self._csv_reader = csv_reader
self._helper = PlainTextReaderHelper(
skip_lines=skip_lines,
decode=decode,
encoding=encoding,
errors=errors,
return_path=return_path,
as_tuple=as_tuple,
)
self.fmtparams = fmtparams
def __iter__(self) -> Iterator[Union[D, Tuple[str, D]]]:
for path, file in self.source_datapipe:
stream = self._helper.skip_lines(file)
stream = self._helper.decode(stream)
stream = self._csv_reader(stream, **self.fmtparams)
stream = self._helper.as_tuple(stream) # type: ignore[assignment]
yield from self._helper.return_path(stream, path=path) # type: ignore[misc]
@functional_datapipe("parse_csv")
class CSVParserIterDataPipe(_CSVBaseParserIterDataPipe):
r"""
Accepts a DataPipe consists of tuples of file name and CSV data stream,
reads and returns the contents within the CSV files one row at a time (functional name: ``parse_csv``).
Each output is a `List` by default, but it depends on ``fmtparams``.
Args:
source_datapipe: source DataPipe with tuples of file name and CSV data stream
skip_lines: number of lines to skip at the beginning of each file
strip_newline: if ``True``, the new line character will be stripped
decode: if ``True``, this will decode the contents of the file based on the specified ``encoding``
encoding: the character encoding of the files (`default='utf-8'`)
errors: the error handling scheme used while decoding
return_path: if ``True``, each line will return a tuple of path and contents, rather
than just the contents
as_tuple: if ``True``, each line will return a tuple instead of a list
Example:
>>> from torchdata.datapipes.iter import IterableWrapper, FileOpener
>>> import os
>>> def get_name(path_and_stream):
>>> return os.path.basename(path_and_stream[0]), path_and_stream[1]
>>> datapipe1 = IterableWrapper(["1.csv", "empty.csv", "empty2.csv"])
>>> datapipe2 = FileOpener(datapipe1, mode="b")
>>> datapipe3 = datapipe2.map(get_name)
>>> csv_parser_dp = datapipe3.parse_csv()
>>> list(csv_parser_dp)
[['key', 'item'], ['a', '1'], ['b', '2'], []]
"""
def __init__(
self,
source_datapipe: IterDataPipe[Tuple[str, IO]],
*,
skip_lines: int = 0,
decode: bool = True,
encoding: str = "utf-8",
errors: str = "ignore",
return_path: bool = False,
as_tuple: bool = False,
**fmtparams,
) -> None:
super().__init__(
source_datapipe,
csv.reader,
skip_lines=skip_lines,
decode=decode,
encoding=encoding,
errors=errors,
return_path=return_path,
as_tuple=as_tuple,
**fmtparams,
)
@functional_datapipe("parse_csv_as_dict")
class CSVDictParserIterDataPipe(_CSVBaseParserIterDataPipe):
r"""
Accepts a DataPipe consists of tuples of file name and CSV data stream, reads and returns the contents
within the CSV files one row at a time (functional name: ``parse_csv_as_dict``).
Each output is a `Dict` by default, but it depends on ``fmtparams``. The first row of each file, unless skipped,
will be used as the header; the contents of the header row will be used as keys for the `Dict`\s
generated from the remaining rows.
Args:
source_datapipe: source DataPipe with tuples of file name and CSV data stream
skip_lines: number of lines to skip at the beginning of each file
strip_newline: if ``True``, the new line character will be stripped
decode: if ``True``, this will decode the contents of the file based on the specified ``encoding``
encoding: the character encoding of the files (`default='utf-8'`)
errors: the error handling scheme used while decoding
return_path: if ``True``, each line will return a tuple of path and contents, rather
than just the contents
Example:
>>> from torchdata.datapipes.iter import FileLister, FileOpener
>>> import os
>>> def get_name(path_and_stream):
>>> return os.path.basename(path_and_stream[0]), path_and_stream[1]
>>> datapipe1 = FileLister(".", "*.csv")
>>> datapipe2 = FileOpener(datapipe1, mode="b")
>>> datapipe3 = datapipe2.map(get_name)
>>> csv_dict_parser_dp = datapipe3.parse_csv_as_dict()
>>> list(csv_dict_parser_dp)
[{'key': 'a', 'item': '1'}, {'key': 'b', 'item': '2'}]
"""
def __init__(
self,
source_datapipe: IterDataPipe[Tuple[str, IO]],
*,
skip_lines: int = 0,
decode: bool = True,
encoding: str = "utf-8",
errors: str = "ignore",
return_path: bool = False,
**fmtparams,
) -> None:
super().__init__(
source_datapipe,
csv.DictReader,
skip_lines=skip_lines,
decode=decode,
encoding=encoding,
errors=errors,
return_path=return_path,
**fmtparams,
)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Iterator, Optional, TypeVar
from warnings import warn
from torchdata.datapipes import functional_datapipe
from torchdata.datapipes.iter import IterDataPipe
T_co = TypeVar("T_co", covariant=True)
@functional_datapipe("header")
class HeaderIterDataPipe(IterDataPipe[T_co]):
r"""
Yields elements from the source DataPipe from the start, up to the specfied limit (functional name: ``header``).
If you would like to manually set the length of a DataPipe to a certain value; we recommend you to
use :class:`.LengthSetter`.
Args:
source_datapipe: the DataPipe from which elements will be yielded
limit: the number of elements to yield before stopping
Example:
>>> from torchdata.datapipes.iter import IterableWrapper
>>> dp = IterableWrapper(range(10))
>>> header_dp = dp.header(3)
>>> list(header_dp)
[0, 1, 2]
"""
def __init__(self, source_datapipe: IterDataPipe[T_co], limit: Optional[int] = 10) -> None:
self.source_datapipe: IterDataPipe[T_co] = source_datapipe
self.limit: Optional[int] = limit
def __iter__(self) -> Iterator[T_co]:
i: int = 0
for value in self.source_datapipe:
i += 1
if self.limit is None or i <= self.limit:
yield value
else:
break
def __len__(self) -> int:
try:
source_len = len(self.source_datapipe)
return source_len if self.limit is None else min(source_len, self.limit)
except TypeError as error:
if self.limit is None:
raise TypeError("The length of this HeaderIterDataPipe cannot be determined.") from error
warn(
"The length of this HeaderIterDataPipe is inferred to be equal to its limit."
"The actual value may be smaller if the actual length of source_datapipe is smaller than the limit."
)
return self.limit
@functional_datapipe("set_length")
class LengthSetterIterDataPipe(IterDataPipe[T_co]):
r"""
Set the length attribute of the DataPipe, which is returned by ``__len__`` (functional name: ``set_length``).
This can be used after DataPipes whose final length cannot be known in advance (e.g. ``filter``). If you
know the final length with certainty, you can manually set it, which can then be used by
DataLoader or other DataPipes.
Note:
This DataPipe differs from :class:`.Header` in that this doesn't restrict the number of elements that
can be yielded from the DataPipe; this is strictly used for setting an attribute so that it can be used later.
Args:
source_datapipe: a DataPipe
length: the integer value that will be set as the length
Example:
>>> from torchdata.datapipes.iter import IterableWrapper
>>> dp = IterableWrapper(range(10)).filter(lambda x: x < 5).set_length(3)
>>> list(dp) # Notice that the number of elements yielded is unchanged
[0, 1, 2, 3, 4]
>>> len(dp)
3
>>> header_dp = IterableWrapper(range(10)).filter(lambda x: x < 5).header(3)
>>> list(header_dp) # Use `.header()` if you want to limit the number of elements yielded
[0, 1, 2]
>>> len(header_dp)
3
"""
def __init__(self, source_datapipe: IterDataPipe[T_co], length: int) -> None:
self.source_datapipe: IterDataPipe[T_co] = source_datapipe
assert length >= 0
self.length: int = length
def __iter__(self) -> Iterator[T_co]:
yield from self.source_datapipe
def __len__(self) -> int:
return self.length
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from collections import defaultdict
from typing import Dict, Iterator, List, Union
from torchdata.datapipes import functional_datapipe
from torchdata.datapipes.iter import IterDataPipe
@functional_datapipe("rows2columnar")
class Rows2ColumnarIterDataPipe(IterDataPipe[Dict]):
r"""
Accepts an input DataPipe with batches of data, and processes one batch
at a time and yields a Dict for each batch, with ``column_names`` as keys and lists of
corresponding values from each row as values (functional name: ``rows2columnar``).
Within the input DataPipe, each row within a batch must either be a `Dict` or a `List`
Note:
If ``column_names`` are not given and each row is a `Dict`, the keys of that Dict will be used as column names.
Args:
source_datapipe: a DataPipe where each item is a batch. Within each batch,
there are rows and each row is a `List` or `Dict`
column_names: if each element in a batch contains `Dict`, ``column_names`` act as a filter for matching keys;
otherwise, these are used as keys to for the generated `Dict` of each batch
Example:
>>> # Each element in a batch is a `Dict`
>>> from torchdata.datapipes.iter import IterableWrapper
>>> dp = IterableWrapper([[{'a': 1}, {'b': 2, 'a': 1}], [{'a': 1, 'b': 200}, {'b': 2, 'c': 3, 'a': 100}]])
>>> row2col_dp = dp.rows2columnar()
>>> list(row2col_dp)
[defaultdict(<class 'list'>, {'a': [1, 1], 'b': [2]}),
defaultdict(<class 'list'>, {'a': [1, 100], 'b': [200, 2], 'c': [3]})]
>>> row2col_dp = dp.rows2columnar(column_names=['a'])
>>> list(row2col_dp)
[defaultdict(<class 'list'>, {'a': [1, 1]}),
defaultdict(<class 'list'>, {'a': [1, 100]})]
>>> # Each element in a batch is a `List`
>>> dp = IterableWrapper([[[0, 1, 2, 3], [4, 5, 6, 7]]])
>>> row2col_dp = dp.rows2columnar(column_names=["1st_in_batch", "2nd_in_batch", "3rd_in_batch", "4th_in_batch"])
>>> list(row2col_dp)
[defaultdict(<class 'list'>, {'1st_in_batch': [0, 4], '2nd_in_batch': [1, 5],
'3rd_in_batch': [2, 6], '4th_in_batch': [3, 7]})]
"""
column_names: List[str]
def __init__(self, source_datapipe: IterDataPipe[List[Union[Dict, List]]], column_names: List[str] = None) -> None:
self.source_datapipe: IterDataPipe[List[Union[Dict, List]]] = source_datapipe
self.column_names: List[str] = [] if column_names is None else column_names
def __iter__(self) -> Iterator[Dict]:
for batch in self.source_datapipe:
columnar = defaultdict(list)
for list_or_dict_row in batch:
if isinstance(list_or_dict_row, dict):
# if column_names provided, we use it as a filter
if len(self.column_names) > 0:
for column_name in self.column_names:
# this line will raise a KeyError if column_name
# is not within list_or_dict_row which is the
# expected behavior
columnar[column_name].append(list_or_dict_row[column_name])
else:
for k, v in list_or_dict_row.items():
columnar[k].append(v)
else:
for i, v in enumerate(list_or_dict_row):
columnar[self.column_names[i]].append(v)
yield columnar
def __len__(self) -> int:
return len(self.source_datapipe)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import warnings
import zipfile
from io import BufferedIOBase
from typing import cast, IO, Iterable, Iterator, Tuple
from torchdata.datapipes import functional_datapipe
from torchdata.datapipes.iter import IterDataPipe
from torchdata.datapipes.utils import StreamWrapper
from torchdata.datapipes.utils.common import validate_pathname_binary_tuple
@functional_datapipe("load_from_zip")
class ZipArchiveLoaderIterDataPipe(IterDataPipe[Tuple[str, BufferedIOBase]]):
r"""
Opens/decompresses zip binary streams from an Iterable DataPipe which contains a tuple of path name and
zip binary stream, and yields a tuple of path name and extracted binary stream (functional name: ``load_from_zip``).
Args:
datapipe: Iterable DataPipe that provides tuples of path name and zip binary stream
length: Nominal length of the DataPipe
Note:
The opened file handles will be closed automatically if the default ``DecoderDataPipe``
is attached. Otherwise, user should be responsible to close file handles explicitly
or let Python's GC close them periodically. Due to how `zipfiles` implements its ``open()`` method,
the data_stream variable below cannot be closed within the scope of this function.
Example:
>>> from torchdata.datapipes.iter import FileLister, FileOpener
>>> datapipe1 = FileLister(".", "*.zip")
>>> datapipe2 = FileOpener(datapipe1, mode="b")
>>> zip_loader_dp = datapipe2.load_from_zip()
>>> for _, stream in zip_loader_dp:
>>> print(stream.read())
b'0123456789abcdef'
"""
def __init__(self, datapipe: Iterable[Tuple[str, BufferedIOBase]], length: int = -1) -> None:
super().__init__()
self.datapipe: Iterable[Tuple[str, BufferedIOBase]] = datapipe
self.length: int = length
def __iter__(self) -> Iterator[Tuple[str, BufferedIOBase]]:
for data in self.datapipe:
validate_pathname_binary_tuple(data)
pathname, data_stream = data
try:
# typing.cast is used here to silence mypy's type checker
zips = zipfile.ZipFile(cast(IO[bytes], data_stream))
for zipinfo in zips.infolist():
# major version should always be 3 here.
if sys.version_info[1] >= 6:
if zipinfo.is_dir():
continue
elif zipinfo.filename.endswith("/"):
continue
extracted_fobj = zips.open(zipinfo)
inner_pathname = os.path.normpath(os.path.join(pathname, zipinfo.filename))
yield inner_pathname, StreamWrapper(extracted_fobj, data_stream, name=inner_pathname) # type: ignore[misc]
except Exception as e:
warnings.warn(f"Unable to extract files from corrupted zipfile stream {pathname} due to: {e}, abort!")
raise e
finally:
if isinstance(data_stream, StreamWrapper):
data_stream.autoclose()
# We are unable to close 'data_stream' here, because it needs to be available to use later
def __len__(self) -> int:
if self.length == -1:
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
return self.length
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, Iterator, Tuple, TypeVar
from torchdata.datapipes import functional_datapipe
from torchdata.datapipes.iter import IterDataPipe
K = TypeVar("K")
@functional_datapipe("enumerate")
class EnumeratorIterDataPipe(IterDataPipe[Tuple[int, K]]):
r"""
Adds an index to an existing DataPipe through enumeration, with
the index starting from 0 by default (functional name: ``enumerate``).
Args:
source_datapipe: Iterable DataPipe being indexed
starting_index: Index from which enumeration will start
Example:
>>> from torchdata.datapipes.iter import IterableWrapper
>>> dp = IterableWrapper(['a', 'b', 'c'])
>>> enum_dp = dp.enumerate()
>>> list(enum_dp)
[(0, 'a'), (1, 'b'), (2, 'c')]
"""
def __init__(self, source_datapipe: IterDataPipe[K], starting_index: int = 0) -> None:
self.source_datapipe: IterDataPipe[K] = source_datapipe
self.starting_index = starting_index
def __iter__(self):
yield from enumerate(self.source_datapipe, self.starting_index)
def __len__(self):
return len(self.source_datapipe)
@functional_datapipe("add_index")
class IndexAdderIterDataPipe(IterDataPipe[Dict]):
r"""
Adds an index to an existing Iterable DataPipe with (functional name: ``add_index``). The row or batch
within the DataPipe must have the type `Dict`; otherwise, a `NotImplementedError` will be thrown. The index
of the data is set to the provided ``index_name``.
Args:
source_datapipe: Iterable DataPipe being indexed, its row/batch must be of type `Dict`
index_name: Name of the key to store data index
Example:
>>> from torchdata.datapipes.iter import IterableWrapper
>>> dp = IterableWrapper([{'a': 1, 'b': 2}, {'c': 3, 'a': 1}])
>>> index_dp = dp.add_index("order")
>>> list(index_dp)
[{'a': 1, 'b': 2, 'order': 0}, {'c': 3, 'a': 1, 'order': 1}]
"""
def __init__(self, source_datapipe: IterDataPipe[Dict], index_name: str = "index") -> None:
self.source_datapipe = source_datapipe
self.index_name = index_name
def __iter__(self) -> Iterator[Dict]:
for i, row_or_batch in enumerate(self.source_datapipe):
if isinstance(row_or_batch, dict):
row_or_batch[self.index_name] = i
yield row_or_batch
else:
raise NotImplementedError("We only support adding index to row or batch in dict type")
def __len__(self) -> int:
return len(self.source_datapipe)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import random
from typing import Dict, Iterator, Optional, Sized, TypeVar
from torchdata.datapipes.iter import IterDataPipe
T_co = TypeVar("T_co", covariant=True)
class SampleMultiplexerDataPipe(IterDataPipe[T_co]):
"""
Takes a `Dict` of (IterDataPipe, Weight), and yields items by sampling from these
DataPipes with respect to their weights. When individual DataPipes are exhausted, continues to sample from
the remaining DataPipes according to their relative weights.
If you wish to maintain the same ratio of weights indefinitely, you need to ensure that the
inputs are never exhausted, by, for instance, applying ``cycle`` to them.
Sampling is controlled by the provided random ``seed``. If you don't provide it, the sampling
will not be deterministic.
Args:
pipes_to_weights_dict: a `Dict` of IterDataPipes and Weights. The total weight of
unexhausted DataPipes will be normalized to 1 for the purpose of sampling.
seed: random seed to initialize the random number generator
Example:
>>> from torchdata.datapipes.iter import IterableWrapper, SampleMultiplexer
>>> source_dp1 = IterableWrapper([0] * 10)
>>> source_dp2 = IterableWrapper([1] * 10)
>>> d = {source_dp1: 99999999, source_dp2: 0.0000001}
>>> sample_mul_dp = SampleMultiplexer(pipes_to_weights_dict=d, seed=0)
>>> list(sample_mul_dp)
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
"""
def __init__(
self,
pipes_to_weights_dict: Dict[IterDataPipe[T_co], float],
seed: Optional[int] = None,
):
if not pipes_to_weights_dict:
raise ValueError("Empty dictionary passed to SampleMultiplexerDataPipe")
total_weight: float = 0
for v in pipes_to_weights_dict.values():
if v <= 0:
raise ValueError(f"Expecting a positive and non-zero weight, got {v}")
total_weight += v
self.pipes_and_weights = [(k, v / total_weight) for k, v in pipes_to_weights_dict.items()]
if seed is None:
self.random = random.Random()
else:
self.random = random.Random(seed)
def __iter__(self) -> Iterator[T_co]:
pipes_and_weights = [(iter(k), v) for k, v in self.pipes_and_weights]
while len(pipes_and_weights) > 1:
r = self.random.random()
s: float = 0
for it, weight in pipes_and_weights:
s += weight
if r < s:
try:
item = next(it)
yield item
except StopIteration:
# remove the current stream
new_total = 1 - weight
assert new_total > 0
pipes_and_weights = [(k, v / new_total) for k, v in pipes_and_weights if k != it]
break
# only one stream left
for item in pipes_and_weights[0][0]:
yield item
def __len__(self) -> int:
if all(isinstance(dp, Sized) for dp, _ in self.pipes_and_weights):
return sum(len(dp) for dp, _ in self.pipes_and_weights)
else:
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import hashlib
from io import IOBase
from typing import Dict, Iterator, Tuple, Union
from torchdata.datapipes import functional_datapipe
from torchdata.datapipes.iter import IterDataPipe
from torchdata.datapipes.utils import StreamWrapper
D_type = Union[str, bytes, bytearray]
U = Union[D_type, StreamWrapper]
@functional_datapipe("check_hash")
class HashCheckerIterDataPipe(IterDataPipe[Tuple[str, U]]):
r"""
Computes and checks the hash of each file, from an input DataPipe of tuples of file name and
data/stream (functional name: ``check_hash``). If the hashes match the given hash
in the dictionary, it yields a tuple of file name and data/stream. Otherwise, it will raise an error.
Args:
source_datapipe: IterDataPipe with tuples of file name and data/stream
hash_dict: Dictionary that maps file names to their corresponding hashes
hash_type: The type of hash function to apply
rewind: Rewind the stream after using the stream to compute the hash (this
does not work with non-seekable stream, e.g. HTTP)
Example:
>>> from torchdata.datapipes.iter import IterableWrapper, FileOpener
>>> expected_MD5_hash = "bb9675028dd39d2dd2bf71002b93e66c"
File is from "https://raw.githubusercontent.com/pytorch/data/main/LICENSE"
>>> file_dp = FileOpener(IterableWrapper(["LICENSE.txt"]), mode='rb')
>>> # An exception is only raised when the hash doesn't match, otherwise (path, stream) is returned
>>> check_hash_dp = file_dp.check_hash({"LICENSE.txt": expected_MD5_hash}, "md5", rewind=True)
>>> reader_dp = check_hash_dp.readlines()
>>> it = iter(reader_dp)
>>> path, line = next(it)
>>> path
LICENSE.txt
>>> line
b'BSD 3-Clause License'
"""
def __init__(
self,
source_datapipe: IterDataPipe[Tuple[str, IOBase]],
hash_dict: Dict[str, str],
hash_type: str = "sha256",
rewind: bool = True,
) -> None:
self.source_datapipe: IterDataPipe[Tuple[str, IOBase]] = source_datapipe
self.hash_dict: Dict[str, str] = hash_dict
self.hash_type: str = hash_type
self.rewind: bool = rewind
if self.hash_type not in ["sha256", "md5"]:
raise ValueError("Invalid hash_type requested, should be one of {}".format(["sha256", "md5"]))
def __iter__(self) -> Iterator[Tuple[str, StreamWrapper]]:
for file_name, data in self.source_datapipe:
if self.hash_type == "sha256":
hash_func = hashlib.sha256()
else:
hash_func = hashlib.md5()
if isinstance(data, (str, bytes, bytearray)):
if isinstance(data, str):
data = data.decode()
hash_func.update(data)
# File Stream
else:
# Not all streams have `read(bytes)` method.
# `__iter__` method is chosen because it is a common interface for IOBase.
for d in data:
hash_func.update(d)
# TODO(133): this will not work (or work crappy for non-seekable steams like http)
if self.rewind:
data.seek(0)
if file_name not in self.hash_dict:
raise RuntimeError(f"Unspecified hash for file {file_name}")
if hash_func.hexdigest() != self.hash_dict[file_name]:
raise RuntimeError(
f"The computed hash {hash_func.hexdigest()} of {file_name} does not match the expected"
f"hash {self.hash_dict[file_name]}. Delete the file manually and retry."
)
if isinstance(data, (str, bytes, bytearray)):
yield file_name, data
else:
yield file_name, StreamWrapper(data)
def __len__(self) -> int:
return len(self.source_datapipe)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import warnings
from typing import Callable, Dict, Optional
from torch.utils.data import IterDataPipe, MapDataPipe
from torch.utils.data.datapipes.utils.common import _check_unpickable_fn, DILL_AVAILABLE
if DILL_AVAILABLE:
import dill
dill.extend(use_dill=False)
# @functional_datapipe("to_map_datapipe") # This line must be kept for .pyi signature parser
class IterToMapConverterMapDataPipe(MapDataPipe):
r"""
Lazily load data from ``IterDataPipe`` to construct a ``MapDataPipe`` with
the key-value pair generated by ``key_value_fn`` (functional name: ``to_map_datapipe``).
If ``key_value_fn`` is not given, each data from the source IterDataPipe must itself be an iterable
with exactly two objects. The first object of each item becomes a key in
the new dictionary, and the second object the corresponding value.
For the opposite converter, use :class:`.MapToIterConverter`.
Args:
datapipe: Source IterDataPipe
key_value_fn: Function being applied over each data to generate key-value pair
Note:
If a key being added is already present, the corresponding value
will be replaced by the new value.
Example:
>>> from torchdata.datapipes.iter import IterableWrapper
>>> source_dp = IterableWrapper([(i, i) for i in range(10)])
>>> map_dp = source_dp.to_map_datapipe()
>>> list(map_dp)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> source_dp2 = IterableWrapper([('a', 1), ('b', 2), ('c', 1)])
>>> map_dp2 = source_dp2.to_map_datapipe()
>>> map_dp2['a']
1
>>> def row_to_tuple(row):
>>> label = row[0]
>>> data = row[1:]
>>> return label, data
>>> source_dp3 = IterableWrapper([('a', 1, 1, 1, 1, 1, 1), ('b', 2, 2, 2, 2, 2, 2), ('c', 3, 3, 3, 3, 3, 3)])
>>> map_dp3 = source_dp3.to_map_datapipe(key_value_fn=row_to_tuple)
>>> map_dp3['a']
(1, 1, 1, 1, 1, 1)
"""
datapipe: IterDataPipe
key_value_fn: Optional[Callable]
_map: Optional[Dict]
_length: int
def __init__(self, datapipe: IterDataPipe, key_value_fn: Optional[Callable] = None):
if not isinstance(datapipe, IterDataPipe):
raise TypeError(f"IterToMapConverter can only apply on IterDataPipe, but found {type(datapipe)}")
self.datapipe = datapipe
if key_value_fn is not None:
_check_unpickable_fn(key_value_fn)
self.key_value_fn = key_value_fn # type: ignore[assignment]
self._map = None
def _load_map(self):
self._map = {}
for d in self.datapipe:
inp = d if self.key_value_fn is None else self.key_value_fn(d)
try:
length = len(inp)
except TypeError:
raise TypeError(f"Cannot convert dictionary update element {type(inp)} ({inp}) to a sequence")
if length != 2:
raise ValueError(f"dictionary update sequence element has length {length}, 2 is required")
key, value = inp
if key in self._map:
warnings.warn(f"Found duplicate key {key}. Please check your `key_value_fn`")
self._map[key] = value
def __getitem__(self, index):
try:
if self._map is None:
self._load_map()
return self._map[index] # type: ignore[index]
except KeyError:
raise IndexError(f"Index {index} is invalid for IterToMapConverter.")
def __len__(self):
if self._map is not None:
return len(self._map) # type: ignore[arg-type]
try:
return len(self.datapipe)
except (TypeError, NotImplementedError):
pass
warnings.warn(
"Data from prior DataPipe are loaded to get length of"
"IterToMapConverter before execution of the pipeline."
"Please consider removing len()."
)
self._load_map()
return len(self._map) # type: ignore[arg-type]
def __getstate__(self):
if DILL_AVAILABLE:
dill_key_value_fn = dill.dumps(self.key_value_fn)
else:
dill_key_value_fn = self.key_value_fn
return (
self.datapipe,
dill_key_value_fn,
self._map,
)
def __setstate__(self, state):
(self.datapipe, dill_key_value_fn, self._map) = state
if DILL_AVAILABLE:
self.key_value_fn = dill.loads(dill_key_value_fn) # type: ignore[assignment]
else:
self.key_value_fn = dill_key_value_fn # type: ignore[assignment]
# Register for functional API
# See https://github.com/pytorch/data/issues/200
IterDataPipe.register_datapipe_as_function("to_map_datapipe", IterToMapConverterMapDataPipe)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import warnings
from collections import OrderedDict
from typing import Callable, final, Iterator, List, Optional, Sequence, TypeVar
from torch.utils.data import functional_datapipe, IterDataPipe, MapDataPipe
from torch.utils.data.datapipes.iter.combining import _ChildDataPipe, _DemultiplexerIterDataPipe, _ForkerIterDataPipe
from torch.utils.data.datapipes.utils.common import _check_unpickable_fn
from torchdata.datapipes.utils.janitor import janitor
T_co = TypeVar("T_co", covariant=True)
T = TypeVar("T")
@functional_datapipe("zip_with_iter")
class IterKeyZipperIterDataPipe(IterDataPipe[T_co]):
r"""
Zips two IterDataPipes together based on the matching key (functional name: ``zip_with_iter``). The keys
are computed by ``key_fn`` and ``ref_key_fn`` for the two IterDataPipes, respectively. When there isn't a match
between the elements of the two IterDataPipes, the element from ``ref_datapipe`` is stored in a buffer. Then, the
next element from ``ref_datapipe`` is tried. After a match is found, the ``merge_fn`` determines how they will
be combined and returned (a tuple is generated by default).
Args:
source_datapipe: IterKeyZipper will yield data based on the order of this IterDataPipe
ref_datapipe: Reference IterDataPipe from which IterKeyZipper will find items
with matching key for ``source_datapipe``
key_fn: Callable function that will compute keys using elements from ``source_datapipe``
ref_key_fn: Callable function that will compute keys using elements from ``ref_datapipe``
If it's not specified, the ``key_fn`` will also be applied to elements from ``ref_datapipe``
keep_key: Option to yield the matching key along with the items in a tuple,
resulting in `(key, merge_fn(item1, item2))`.
buffer_size: The size of buffer used to hold key-data pairs from reference DataPipe until a match is found.
If it's specified as ``None``, the buffer size is set as infinite.
merge_fn: Function that combines the item from ``source_datapipe`` and the item from ``ref_datapipe``,
by default a tuple is created
Example:
>>> from torchdata.datapipes.iter import IterableWrapper
>>> from operator import itemgetter
>>> def merge_fn(t1, t2):
>>> return t1[1] + t2[1]
>>> dp1 = IterableWrapper([('a', 100), ('b', 200), ('c', 300)])
>>> dp2 = IterableWrapper([('a', 1), ('b', 2), ('c', 3), ('d', 4)])
>>> res_dp = dp1.zip_with_iter(dp2, key_fn=itemgetter(0),
>>> ref_key_fn=itemgetter(0), keep_key=True, merge_fn=merge_fn)
>>> list(res_dp)
[('a', 101), ('b', 202), ('c', 303)]
"""
def __init__(
self,
source_datapipe: IterDataPipe,
ref_datapipe: IterDataPipe,
key_fn: Callable,
ref_key_fn: Optional[Callable] = None,
keep_key: bool = False,
buffer_size: int = 10000,
merge_fn: Optional[Callable] = None,
) -> None:
if not isinstance(ref_datapipe, IterDataPipe):
raise TypeError(f"ref_datapipe must be a IterDataPipe, but its type is {type(ref_datapipe)} instead.")
self.source_datapipe = source_datapipe
self.ref_datapipe = ref_datapipe
_check_unpickable_fn(key_fn)
self.key_fn = key_fn
if ref_key_fn is not None:
_check_unpickable_fn(ref_key_fn)
self.ref_key_fn = key_fn if ref_key_fn is None else ref_key_fn
self.keep_key = keep_key
if merge_fn is not None:
_check_unpickable_fn(merge_fn)
self.merge_fn = merge_fn
if buffer_size is not None and buffer_size <= 0:
raise ValueError("'buffer_size' is required to be either None or a positive integer.")
self.buffer_size: int = buffer_size
self.buffer: OrderedDict = OrderedDict()
def __iter__(self) -> Iterator:
ref_it = iter(self.ref_datapipe)
warn_once_flag = True
try:
for data in self.source_datapipe:
key = self.key_fn(data)
while key not in self.buffer:
try:
ref_data = next(ref_it)
except StopIteration:
raise BufferError(
f"No matching key can be found from reference DataPipe for the data {data}. "
"Please consider increasing the buffer size."
)
ref_key = self.ref_key_fn(ref_data)
if ref_key in self.buffer:
raise ValueError("Duplicate key is found in reference DataPipe")
if self.buffer_size is not None and len(self.buffer) > self.buffer_size:
if warn_once_flag:
warn_once_flag = False
warnings.warn(
"Buffer reaches the upper limit, so reference key-data pair begins to "
"be removed from buffer in FIFO order. Please consider increase buffer size."
)
self.buffer.popitem(last=False)
self.buffer[ref_key] = ref_data
res = self.merge_fn(data, self.buffer.pop(key)) if self.merge_fn else (data, self.buffer.pop(key))
if self.keep_key:
yield key, res
else:
yield res
finally:
del ref_it
# TODO(633): This should be Exception or warn when debug mode is enabled
if self.buffer:
for _, v in self.buffer.items():
janitor(v)
self.buffer.clear()
def __len__(self) -> int:
return len(self.source_datapipe)
@final
def reset(self) -> None:
self.buffer = OrderedDict()
def __getstate__(self):
state = (
self.source_datapipe,
self.ref_datapipe,
self.key_fn,
self.ref_key_fn,
self.keep_key,
self.merge_fn,
self.buffer_size,
)
if IterDataPipe.getstate_hook is not None:
return IterDataPipe.getstate_hook(state)
return state
def __setstate__(self, state):
(
self.source_datapipe,
self.ref_datapipe,
self.key_fn,
self.ref_key_fn,
self.keep_key,
self.merge_fn,
self.buffer_size,
) = state
self.buffer = OrderedDict()
def __del__(self):
if self.buffer:
for _, v in self.buffer.items():
janitor(v)
self.buffer.clear()
@functional_datapipe("zip_with_map")
class MapKeyZipperIterDataPipe(IterDataPipe[T_co]):
r"""
Joins the items from the source IterDataPipe with items from a MapDataPipe (functional name: ``zip_with_map``).
The matching is done by the provided ``key_fn``, which maps an item from ``source_iterdatapipe`` to
a key that should exist in the ``map_datapipe``. The return value is created by the ``merge_fn``, which returns
a tuple of the two items by default.
Args:
source_iterdatapipe: IterDataPipe from which items are yield and will be combined with an item
from ``map_datapipe``
map_datapipe: MapDataPipe that takes a key from ``key_fn``, and returns an item
key_fn: Function that maps each item from ``source_iterdatapipe`` to a key that exists in ``map_datapipe``
keep_key: Option to yield the matching key along with the items in a tuple,
resulting in ``(key, merge_fn(item1, item2))``.
merge_fn: Function that combines the item from ``source_iterdatapipe`` and the matching item
from ``map_datapipe``, by default a tuple is created
Example:
.. testsetup::
from operator import itemgetter
.. testcode::
from torchdata.datapipes.iter import IterableWrapper
from torchdata.datapipes.map import SequenceWrapper
def merge_fn(tuple_from_iter, value_from_map):
return tuple_from_iter[0], tuple_from_iter[1] + value_from_map
dp1 = IterableWrapper([('a', 1), ('b', 2), ('c', 3)])
mapdp = SequenceWrapper({'a': 100, 'b': 200, 'c': 300, 'd': 400})
res_dp = dp1.zip_with_map(map_datapipe=mapdp, key_fn=itemgetter(0), merge_fn=merge_fn)
print(list(res_dp))
Output:
.. testoutput::
[('a', 101), ('b', 202), ('c', 303)]
"""
def __init__(
self,
source_iterdatapipe: IterDataPipe,
map_datapipe: MapDataPipe,
key_fn: Callable,
merge_fn: Optional[Callable] = None,
keep_key: bool = False,
):
if not isinstance(map_datapipe, MapDataPipe):
raise TypeError(f"map_datapipe must be a MapDataPipe, but its type is {type(map_datapipe)} instead.")
self.source_iterdatapipe: IterDataPipe = source_iterdatapipe
self.map_datapipe: MapDataPipe = map_datapipe
_check_unpickable_fn(key_fn)
self.key_fn: Callable = key_fn
if merge_fn is not None:
_check_unpickable_fn(merge_fn)
self.merge_fn: Optional[Callable] = merge_fn
self.keep_key = keep_key
def __iter__(self) -> Iterator:
for item in self.source_iterdatapipe:
key = self.key_fn(item)
try:
map_item = self.map_datapipe[key]
except (KeyError, IndexError):
raise KeyError(f"key_fn maps {item} to {key}, which is not a valid key in the given MapDataPipe.")
res = self.merge_fn(item, map_item) if self.merge_fn else (item, map_item)
if self.keep_key:
yield key, res
else:
yield res
def __len__(self) -> int:
return len(self.source_iterdatapipe)
def _drop_index(idx_data):
_, data = idx_data
return data
@functional_datapipe("round_robin_demux")
class RoundRobinDemultiplexerIterDataPipe(IterDataPipe):
r"""
Splits the input DataPipe into multiple child DataPipes in the round-robin order (functional name: ``round_robin_demux``).
A list of the child DataPipes is returned from this operation.
Args:
datapipe: Iterable DataPipe being filtered
num_instances: number of instances of the DataPipe to create
buffer_size: this defines the maximum number of inputs that the buffer can hold across all child
DataPipes while waiting for their values to be yielded.
Defaults to ``1000``. Use ``-1`` for the unlimited buffer.
Examples:
>>> from torchdata.datapipes.iter import IterableWrapper
>>> source_dp = IterableWrapper(range(5))
>>> dp1, dp2 = source_dp.round_robin_demux(2)
>>> list(dp1)
[0, 2, 4]
>>> len(dp1)
3
>>> list(dp2)
[1, 3]
>>> len(dp2)
2
"""
def __new__(cls, datapipe: IterDataPipe, num_instances: int, buffer_size: int = 1000):
if num_instances < 1:
raise ValueError(f"Expected `num_instaces` larger than 0, but {num_instances} is found")
if num_instances == 1:
warnings.warn(
"The operation of `round_robin_demux` with `num_instances=1` is an no-op and returns the provided `datapipe` in a list directly"
)
return [datapipe]
datapipe = datapipe.enumerate()
container = _RoundRobinDemultiplexerIterDataPipe(datapipe, num_instances, buffer_size=buffer_size)
return [_ChildDataPipe(container, i).map(_drop_index) for i in range(num_instances)]
class _RoundRobinDemultiplexerIterDataPipe(_DemultiplexerIterDataPipe):
def __init__(self, datapipe: IterDataPipe[T_co], num_instances: int, buffer_size: int):
super().__init__(datapipe, num_instances, self._round_robin_fn, drop_none=False, buffer_size=buffer_size)
def _round_robin_fn(self, idx_data) -> int:
idx, _ = idx_data
return idx % self.num_instances
def get_length_by_instance(self, instance_id: int) -> int:
n = len(self.main_datapipe)
avg_length = n // self.num_instances
return avg_length + 1 if n - avg_length * self.num_instances > instance_id else avg_length
@functional_datapipe("unzip")
class UnZipperIterDataPipe(IterDataPipe[T]):
r"""
Takes in a DataPipe of Sequences, unpacks each Sequence, and return the elements in separate DataPipes
based on their position in the Sequence (functional name: ``unzip``). The number of instances produced equals to
the sequence length minus the number of columns to skip.
Note:
Each sequence within the DataPipe should have the same length, specified by
the input argument `sequence_length`.
Args:
source_datapipe: Iterable DataPipe with sequences of data
sequence_length: Length of the sequence within the source_datapipe. All elements should have the same length.
buffer_size: this restricts how far ahead the leading child DataPipe can read relative
to the slowest child DataPipe. Use -1 for the unlimited buffer.
columns_to_skip: optional indices of columns that the DataPipe should skip (each index should be
an integer from 0 to sequence_length - 1)
Example:
>>> from torchdata.datapipes.iter import IterableWrapper
>>> source_dp = IterableWrapper([(i, i + 10, i + 20) for i in range(3)])
>>> dp1, dp2, dp3 = source_dp.unzip(sequence_length=3)
>>> list(dp1)
[0, 1, 2]
>>> list(dp2)
[10, 11, 12]
>>> list(dp3)
[20, 21, 22]
"""
def __new__(
cls,
source_datapipe: IterDataPipe[Sequence[T]],
sequence_length: int,
buffer_size: int = 1000,
columns_to_skip: Optional[Sequence[int]] = None,
):
if columns_to_skip is None:
instance_ids = list(range(sequence_length))
else:
skips = set(columns_to_skip)
instance_ids = [i for i in range(sequence_length) if i not in skips]
if len(instance_ids) == 0:
raise RuntimeError(
"All instances are being filtered out in UnZipperIterDataPipe. Please check"
"the input `sequence_length` and `columns_to_skip`."
)
# The implementation basically uses Forker but only yields a specific element within the sequence
container = _UnZipperIterDataPipe(source_datapipe, instance_ids, buffer_size) # type: ignore[arg-type]
return [_ChildDataPipe(container, i) for i in range(len(instance_ids))]
class _UnZipperIterDataPipe(_ForkerIterDataPipe):
def __init__(self, datapipe: IterDataPipe, instance_ids: List[int], buffer_size: int = 1000):
super().__init__(datapipe, len(instance_ids), buffer_size) # type: ignore[arg-type]
self.instance_ids = instance_ids
def get_next_element_by_instance(self, instance_id: int):
r"""
Note:
Each element returned from the source datapipe is required to be a sequnce that can
be subscribed with a column index
"""
for return_val in super().get_next_element_by_instance(instance_id):
yield return_val[self.instance_ids[instance_id]]
def __getstate__(self):
state = super().__getstate__()
return (*state, self.instance_ids)
def __setstate__(self, state):
super().__setstate__(state[:-1])
self.instance_ids = state[-1]
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import bz2
import gzip
import lzma
import os
import pathlib
import tarfile
import zipfile
from enum import Enum
from io import IOBase
from typing import Iterator, Optional, Tuple, Union
from torchdata.datapipes import functional_datapipe
from torchdata.datapipes.iter import IterDataPipe
from torchdata.datapipes.utils import StreamWrapper
class CompressionType(Enum):
GZIP = "gzip"
LZMA = "lzma"
TAR = "tar"
ZIP = "zip"
BZIP2 = "bz2"
@functional_datapipe("decompress")
class DecompressorIterDataPipe(IterDataPipe[Tuple[str, StreamWrapper]]):
r"""
Takes tuples of path and compressed stream of data, and returns tuples of
path and decompressed stream of data (functional name: ``decompress``). The input compression format can be specified
or automatically detected based on the files' file extensions.
Args:
source_datapipe: IterDataPipe containing tuples of path and compressed stream of data
file_type: Optional `string` or ``CompressionType`` that represents what compression format of the inputs
Example:
>>> from torchdata.datapipes.iter import FileLister, FileOpener
>>> tar_file_dp = FileLister(self.temp_dir.name, "*.tar")
>>> tar_load_dp = FileOpener(tar_file_dp, mode="b")
>>> tar_decompress_dp = Decompressor(tar_load_dp, file_type="tar")
>>> for _, stream in tar_decompress_dp:
>>> print(stream.read())
b'0123456789abcdef'
"""
types = CompressionType
_DECOMPRESSORS = {
types.GZIP: lambda file: gzip.GzipFile(fileobj=file),
types.LZMA: lambda file: lzma.LZMAFile(file),
types.TAR: lambda file: tarfile.open(fileobj=file, mode="r:*"),
types.ZIP: lambda file: zipfile.ZipFile(file=file),
types.BZIP2: lambda file: bz2.BZ2File(filename=file),
}
def __init__(
self, source_datapipe: IterDataPipe[Tuple[str, IOBase]], file_type: Optional[Union[str, CompressionType]] = None
) -> None:
self.source_datapipe: IterDataPipe[Tuple[str, IOBase]] = source_datapipe
if isinstance(file_type, str):
file_type = self.types(file_type.lower())
self.file_type: Optional[CompressionType] = file_type
def _detect_compression_type(self, path: str) -> CompressionType:
if self.file_type:
return self.file_type
ext = "".join(pathlib.Path(path).suffixes)
if ext in {".tar.gz", ".tar.xz"}:
return self.types.TAR
else:
ext = os.path.splitext(path)[1]
if ext == ".tar":
return self.types.TAR
elif ext == ".xz":
return self.types.LZMA
elif ext == ".gz":
return self.types.GZIP
elif ext == ".zip":
return self.types.ZIP
elif ext == ".bz2":
return self.types.BZIP2
else:
raise RuntimeError(
f"File at {path} has file extension {ext}, which does not match what are supported by"
f"ExtractorIterDataPipe."
)
def __iter__(self) -> Iterator[Tuple[str, StreamWrapper]]:
for path, file in self.source_datapipe:
try:
file_type = self._detect_compression_type(path)
decompressor = self._DECOMPRESSORS[file_type]
yield path, StreamWrapper(decompressor(file), file, name=path)
finally:
if isinstance(file, StreamWrapper):
file.autoclose()
@functional_datapipe("extract")
class ExtractorIterDataPipe(IterDataPipe[Tuple[str, StreamWrapper]]):
r"""
Please use ``Decompressor`` or ``.decompress`` instead.
"""
def __new__(
cls, source_datapipe: IterDataPipe[Tuple[str, IOBase]], file_type: Optional[Union[str, CompressionType]] = None
):
return DecompressorIterDataPipe(source_datapipe, file_type)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import threading
import time
from collections import deque
from typing import Deque, final, Optional, Sized
import torch
from torchdata.datapipes import functional_datapipe
from torchdata.datapipes.iter import IterDataPipe
from torchdata.datapipes.utils import pin_memory_fn
PRODUCER_SLEEP_INTERVAL = 0.0001 # Interval between buffer fulfillment checks
CONSUMER_SLEEP_INTERVAL = 0.0001 # Interval between checking items availability in buffer
class _PrefetchData:
def __init__(self, source_datapipe, buffer_size: int):
self.run_prefetcher: bool = True
self.prefetch_buffer: Deque = deque()
self.buffer_size: int = buffer_size
self.source_datapipe = source_datapipe
self.stop_iteration: bool = False
self.paused: bool = False
@functional_datapipe("prefetch")
class PrefetcherIterDataPipe(IterDataPipe):
r"""
Prefetches elements from the source DataPipe and puts them into a buffer (functional name: ``prefetch``).
Prefetching performs the operations (e.g. I/O, computations) of the DataPipes up to this one ahead of time
and stores the result in the buffer, ready to be consumed by the subsequent DataPipe. It has no effect aside
from getting the sample ready ahead of time.
This is used by ``MultiProcessingReadingService`` when the arguments
``worker_prefetch_cnt`` (for prefetching at each worker process) or
``main_prefetch_cnt`` (for prefetching at the main loop) are greater than 0.
Beyond the built-in use cases, this can be useful to put after I/O DataPipes that have
expensive I/O operations (e.g. takes a long time to request a file from a remote server).
Args:
source_datapipe: IterDataPipe from which samples are prefetched
buffer_size: the size of the buffer which stores the prefetched samples
Example:
>>> from torchdata.datapipes.iter import IterableWrapper
>>> dp = IterableWrapper(file_paths).open_files().prefetch(5)
"""
def __init__(self, source_datapipe, buffer_size: int = 10):
self.source_datapipe = source_datapipe
if buffer_size <= 0:
raise ValueError("'buffer_size' is required to be a positive integer.")
self.buffer_size = buffer_size
self.thread: Optional[threading.Thread] = None
self.prefetch_data: Optional[_PrefetchData] = None
@staticmethod
def thread_worker(prefetch_data: _PrefetchData):
itr = iter(prefetch_data.source_datapipe)
while not prefetch_data.stop_iteration:
# Run if not paused
while prefetch_data.run_prefetcher:
if len(prefetch_data.prefetch_buffer) < prefetch_data.buffer_size:
try:
item = next(itr)
prefetch_data.prefetch_buffer.append(item)
except Exception as e:
prefetch_data.run_prefetcher = False
prefetch_data.stop_iteration = True
prefetch_data.prefetch_buffer.append(e)
else: # Buffer is full, waiting for main thread to consume items
# TODO: Calculate sleep interval based on previous consumption speed
time.sleep(PRODUCER_SLEEP_INTERVAL)
prefetch_data.paused = True
# Sleep longer when this prefetcher thread is paused
time.sleep(PRODUCER_SLEEP_INTERVAL * 10)
def __iter__(self):
try:
prefetch_data = _PrefetchData(self.source_datapipe, self.buffer_size)
self.prefetch_data = prefetch_data
thread = threading.Thread(target=PrefetcherIterDataPipe.thread_worker, args=(prefetch_data,), daemon=True)
thread.start()
self.thread = thread
# Lazily import to prevent circular import
from torchdata.dataloader2 import communication
while not prefetch_data.stop_iteration or len(prefetch_data.prefetch_buffer) > 0:
if len(prefetch_data.prefetch_buffer) > 0:
data = prefetch_data.prefetch_buffer.popleft()
if isinstance(data, Exception):
if isinstance(data, (StopIteration, communication.iter.TerminateRequired)):
break
raise data
yield data
else:
time.sleep(CONSUMER_SLEEP_INTERVAL)
finally:
if "prefetch_data" in locals():
prefetch_data.run_prefetcher = False
prefetch_data.stop_iteration = True
prefetch_data.paused = False
if "thread" in locals():
thread.join()
def __getstate__(self):
"""
Getting state in threading environment requires next operations:
1) Stopping of the producer thread.
2) Saving buffer.
3) Adding lazy restart of producer thread when __next__ is called again
(this will guarantee that you only change state of the source_datapipe
after entire state of the graph is saved).
"""
# TODO: Update __getstate__ and __setstate__ to support snapshotting and restoration
return {"source_datapipe": self.source_datapipe, "buffer_size": self.buffer_size}
def __setstate__(self, state):
self.source_datapipe = state["source_datapipe"]
self.buffer_size = state["buffer_size"]
self.thread = None
@final
def reset(self):
self.shutdown()
def pause(self):
if self.thread is not None:
assert self.prefetch_data is not None
self.prefetch_data.run_prefetcher = False
if self.thread.is_alive():
# Blocking until the thread is paused
while not self.prefetch_data.paused:
time.sleep(PRODUCER_SLEEP_INTERVAL * 10)
@final
def resume(self):
if (
self.thread is not None
and self.prefetch_data is not None
and (not self.prefetch_data.stop_iteration or len(self.prefetch_data.prefetch_buffer) > 0)
):
self.prefetch_data.run_prefetcher = True
self.prefetch_data.paused = False
@final
def shutdown(self):
if hasattr(self, "prefetch_data") and self.prefetch_data is not None:
self.prefetch_data.run_prefetcher = False
self.prefetch_data.stop_iteration = True
self.prefetch_data.paused = False
self.prefetch_data = None
if hasattr(self, "thread") and self.thread is not None:
self.thread.join()
self.thread = None
def __del__(self):
self.shutdown()
def __len__(self) -> int:
if isinstance(self.source_datapipe, Sized):
return len(self.source_datapipe)
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
@functional_datapipe("pin_memory")
class PinMemoryIterDataPipe(PrefetcherIterDataPipe):
r"""
Prefetches one element from the source DataPipe and moves it to pinned memory (functional name: ``pin_memory``).
When used with ``MultiProcessingReadingService``, this DataPipe would be kept in the main process to prevent
duplicated CUDA context creation.
Args:
source_datapipe: IterDataPipe from which samples are moved to pinned memory.
device: The device to pin samples.
pin_memory_fn: Optional callable function to move data to pinned memory.
A ``pin_memory_fn`` to handle general objects is provided by default.
Example:
>>> from torchdata.datapipes.iter import IterableWrapper
>>> dp = IterableWrapper(file_paths).open_files().readlines().map(tokenize_fn).pin_memory()
"""
def __init__(self, source_datapipe, device=None, pin_memory_fn=pin_memory_fn):
if not torch.cuda.is_available():
raise RuntimeError("``pin_memory`` can only be used when CUDA is available.")
# TODO: Add support for dynamic buffer based on the available size of pinned memory
super().__init__(source_datapipe, buffer_size=2)
if device is None:
device = torch.cuda.current_device()
self.device = device
self.pin_memory_fn = pin_memory_fn
def is_replicable(self) -> bool:
return False
@staticmethod
def thread_worker(prefetch_data: _PrefetchData, pin_memory_fn, device): # type: ignore[override]
itr = iter(prefetch_data.source_datapipe)
while not prefetch_data.stop_iteration:
# Run if not paused
while prefetch_data.run_prefetcher:
if len(prefetch_data.prefetch_buffer) < prefetch_data.buffer_size:
try:
item = pin_memory_fn(next(itr), device)
prefetch_data.prefetch_buffer.append(item)
except Exception as e:
prefetch_data.run_prefetcher = False
prefetch_data.stop_iteration = True
prefetch_data.prefetch_buffer.append(e)
else: # Buffer is full, waiting for main thread to consume items
# TODO: Calculate sleep interval based on previous consumption speed
time.sleep(PRODUCER_SLEEP_INTERVAL)
# Sleep longer when this prefetcher thread is paused
time.sleep(PRODUCER_SLEEP_INTERVAL * 10)
def __iter__(self):
try:
prefetch_data = _PrefetchData(self.source_datapipe, self.buffer_size)
self.prefetch_data = prefetch_data
thread = threading.Thread(
target=PinMemoryIterDataPipe.thread_worker,
args=(prefetch_data, self.pin_memory_fn, self.device),
daemon=True,
)
thread.start()
self.thread = thread
# Lazily import to prevent circular import
from torchdata.dataloader2 import communication
while not prefetch_data.stop_iteration or len(prefetch_data.prefetch_buffer) > 0:
if len(prefetch_data.prefetch_buffer) > 0:
data = prefetch_data.prefetch_buffer.popleft()
if isinstance(data, Exception):
if isinstance(data, (StopIteration, communication.iter.TerminateRequired)):
break
raise data
yield data
else:
time.sleep(CONSUMER_SLEEP_INTERVAL)
finally:
if "prefetch_data" in locals():
prefetch_data.run_prefetcher = False
prefetch_data.stop_iteration = True
prefetch_data.paused = False
if "thread" in locals():
thread.join()
def __getstate__(self):
state = super().__getstate__()
state["pin_memory_fn"] = self.pin_memory_fn
state["device"] = self.device
return state
def __setstate__(self, state):
super().__setstate__(state)
self.pin_memory_fn = state["pin_memory_fn"]
self.device = state["device"]
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Set, Sized
from torch.utils.data.datapipes._decorator import functional_datapipe
from torch.utils.data.datapipes.datapipe import IterDataPipe
@functional_datapipe("mux_longest")
class MultiplexerLongestIterDataPipe(IterDataPipe):
r"""
Yields one element at a time from each of the input Iterable DataPipes (functional name: ``mux_longest``). As in,
one element from the 1st input DataPipe, then one element from the 2nd DataPipe in the next iteration,
and so on. It skips over DataPipes that are exhausted, and ends when all input DataPipes are exhausted.
Args:
datapipes: Iterable DataPipes that will take turn to yield their elements, until they are all exhausted
Example:
>>> from torchdata.datapipes.iter import IterableWrapper
>>> dp1, dp2, dp3 = IterableWrapper(range(5)), IterableWrapper(range(10, 15)), IterableWrapper(range(20, 25))
>>> list(dp1.mux_longest(dp2, dp3))
[0, 10, 20, 1, 11, 21, 2, 12, 22, 3, 13, 23, 4, 14, 24]
"""
def __init__(self, *datapipes):
self.datapipes = datapipes
def __iter__(self):
iterators = [iter(x) for x in self.datapipes]
finished: Set[int] = set()
while len(finished) < len(iterators):
for i in range(len(iterators)):
if i not in finished:
try:
value = next(iterators[i])
yield value
except StopIteration:
finished.add(i)
def __len__(self):
if all(isinstance(dp, Sized) for dp in self.datapipes):
return sum(len(dp) for dp in self.datapipes)
else:
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import bz2
import warnings
from io import BufferedIOBase
from typing import Iterable, Iterator, Tuple
from torchdata.datapipes import functional_datapipe
from torchdata.datapipes.iter import IterDataPipe
from torchdata.datapipes.utils import StreamWrapper
from torchdata.datapipes.utils.common import validate_pathname_binary_tuple
@functional_datapipe("load_from_bz2")
class Bz2FileLoaderIterDataPipe(IterDataPipe[Tuple[str, BufferedIOBase]]):
r"""
Decompresses bz2 binary streams from an Iterable DataPipe which contains tuples of
path name and bz2 binary streams, and yields a tuple of path name and extracted binary
stream (functional name: ``load_from_bz2``).
Args:
datapipe: Iterable DataPipe that provides tuples of path name and bz2 binary stream
length: Nominal length of the DataPipe
Note:
The opened file handles will be closed automatically if the default ``DecoderDataPipe``
is attached. Otherwise, user should be responsible to close file handles explicitly
or let Python's GC close them periodically.
Example:
>>> from torchdata.datapipes.iter import FileLister, FileOpener
>>> datapipe1 = FileLister(".", "*.bz2")
>>> datapipe2 = FileOpener(datapipe1, mode="b")
>>> bz2_loader_dp = datapipe2.load_from_bz2()
>>> for _, stream in bz2_loader_dp:
>>> print(stream.read())
b'0123456789abcdef'
"""
def __init__(self, datapipe: Iterable[Tuple[str, BufferedIOBase]], length: int = -1) -> None:
super().__init__()
self.datapipe: Iterable[Tuple[str, BufferedIOBase]] = datapipe
self.length: int = length
def __iter__(self) -> Iterator[Tuple[str, BufferedIOBase]]:
for data in self.datapipe:
validate_pathname_binary_tuple(data)
pathname, data_stream = data
try:
extracted_fobj = bz2.open(data_stream, mode="rb") # type: ignore[call-overload]
new_pathname = pathname.rstrip(".bz2")
yield new_pathname, StreamWrapper(extracted_fobj, data_stream, name=new_pathname) # type: ignore[misc]
except Exception as e:
warnings.warn(f"Unable to extract files from corrupted bzip2 stream {pathname} due to: {e}, abort!")
raise e
finally:
if isinstance(data_stream, StreamWrapper):
data_stream.autoclose()
def __len__(self) -> int:
if self.length == -1:
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
return self.length
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable, final, Iterator, List, Tuple, TypeVar
from torch.utils.data.datapipes.utils.common import _check_unpickable_fn
from torchdata.datapipes import functional_datapipe
from torchdata.datapipes.iter import IterDataPipe
T_co = TypeVar("T_co", covariant=True)
def _default_line_join(lines: List[str]) -> str:
return "\n".join(lines)
@functional_datapipe("lines_to_paragraphs")
class ParagraphAggregatorIterDataPipe(IterDataPipe[Tuple[str, str]]):
r"""
Aggregates lines of text from the same file into a single paragraph (functional name: ``lines_to_paragraphs``).
Specifically, this accepts a DataPipe consisting of tuples of a file name and a line. For each tuple,
it checks if the file name matches the file name from the previous tuple. If yes, it joins the current line
with existing paragraph. If the file names do not match, the existing paragraph is yielded and a new
paragraph starts.
Args:
source_datapipe: a DataPipe with tuples of a file name and a line
joiner: a function that joins a list of lines together
Example:
>>> from torchdata.datapipes.iter import IterableWrapper
>>> source_dp = IterableWrapper(
>>> [("file1", "Line1"), ("file1", "Line2"), ("file2", "Line2,1"), ("file2", "Line2,2"), ("file2", "Line2,3")]
>>> )
>>> para_agg_dp = source_dp.lines_to_paragraphs(joiner=lambda ls: " ".join(ls))
>>> list(para_agg_dp)
[('file1', 'Line1 Line2'), ('file2', 'Line2,1 Line2,2 Line2,3')]
"""
def __init__(self, source_datapipe: IterDataPipe[Tuple[str, T_co]], joiner: Callable = _default_line_join) -> None:
self.source_datapipe: IterDataPipe[Tuple[str, T_co]] = source_datapipe
_check_unpickable_fn(joiner)
self.joiner: Callable = joiner
self.buffer: List = []
def __iter__(self) -> Iterator[Tuple[str, str]]:
prev_filename = None
for filename, line in self.source_datapipe:
if prev_filename is None:
prev_filename = filename
if line and prev_filename == filename:
self.buffer.append(line)
else:
if self.buffer:
yield prev_filename, self.joiner(self.buffer) # type: ignore[misc]
if line:
self.buffer = [line]
else:
self.buffer = []
prev_filename = filename
if self.buffer:
yield prev_filename, self.joiner(self.buffer) # type: ignore[misc]
@final
def reset(self) -> None:
self.buffer = []
def __getstate__(self):
state = (self.source_datapipe, self.joiner)
if IterDataPipe.getstate_hook is not None:
return IterDataPipe.getstate_hook(state)
return state
def __setstate__(self, state):
(self.source_datapipe, self.joiner) = state
self.buffer = []
def __del__(self):
self.buffer.clear()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import threading
import time
from collections import deque
from concurrent.futures import Future, ThreadPoolExecutor, TimeoutError
from dataclasses import dataclass
from functools import partial
from typing import Callable, Deque, final, Iterator, Optional, TypeVar
import torch
import torch.distributed as dist
from torchdata._constants import default_timeout_in_s
from torchdata.datapipes import functional_datapipe
from torchdata.datapipes.iter import IterDataPipe
from torchdata.datapipes.iter.util.prefetcher import PRODUCER_SLEEP_INTERVAL
T_co = TypeVar("T_co", covariant=True)
__all__ = ["Expected", "FullSyncIterDataPipe", "PrefetchTimeoutError"]
class PrefetchTimeoutError(RuntimeError):
def __init__(self, timeout: int) -> None:
super().__init__(f"Fail to fetch data within {timeout} seconds")
self.timeout = timeout
class _EndOfPrefetch:
...
@dataclass
class Expected:
r"""
Expected data provided to callback function in ``_PrefetchExecutor``.
"""
index: int
error: Optional[BaseException] = None
def has_error(self) -> bool:
return self.error is not None
class _PrefetchExecutor:
# TODO: Improvement - merge with the `_PrefetchData` class of prefetcher.py
# May not be possible right now due to circular import
def __init__(
self,
datapipe_iterator: Iterator,
prefetch_size: int = 1,
callback_fn: Optional[Callable[[Expected], None]] = None,
timeout: int = default_timeout_in_s,
) -> None:
self.datapipe_iterator = datapipe_iterator
self.prefetch_size = prefetch_size
self.callback_fn = callback_fn
self.timeout = timeout
# Use max_workers as 1 to guarantee the order of data fetched from iterator
self._executor = ThreadPoolExecutor(max_workers=1)
self._futures: Deque[Future] = deque()
self._lock = threading.RLock()
# `_end_flag` indicates the end of epoch or an exception has been raised,
# with the exception being handled by `callback_fn`
self._end_flag: bool = False
self._paused: bool = False
self._is_shutdown: bool = False # indicates if `_executor` has been shutdown by `shutdown` method
self._idx = 0
for _ in range(prefetch_size):
with self._lock:
if self._end_flag:
break
fetch_future: Future = self._executor.submit(self.fetch_next)
fetch_future.add_done_callback(partial(self._done_callback_fn, self._idx))
self._futures.append(fetch_future)
with self._lock:
self._idx += 1
def fetch_next(self):
while self._paused:
time.sleep(PRODUCER_SLEEP_INTERVAL * 10)
return next(self.datapipe_iterator)
def _done_callback_fn(self, index: int, f: Future):
if f.exception():
with self._lock:
self._end_flag = True
if self.callback_fn is not None:
# Invoke `callback_fn` in order to set `FullSyncDP._done_callback` to `True`
self.callback_fn(Expected(index, f.exception()))
def return_next(self):
if self._futures:
fetch_future = self._futures.popleft()
try:
data = fetch_future.result(timeout=self.timeout)
except TimeoutError:
raise PrefetchTimeoutError(self.timeout)
with self._lock:
if not self._end_flag and not self._is_shutdown:
next_future = self._executor.submit(self.fetch_next)
next_future.add_done_callback(partial(self._done_callback_fn, self._idx))
self._futures.append(next_future)
self._idx += 1
else:
data = _EndOfPrefetch()
return data
def shutdown(self):
self._paused = False
self._is_shutdown = True
while self._futures:
self._futures.popleft().cancel()
self._executor.shutdown(wait=True)
def pause(self):
self._paused = True
def resume(self):
self._paused = False
@functional_datapipe("fullsync")
class FullSyncIterDataPipe(IterDataPipe[T_co]):
r"""
Synchronizes data across distributed processes to prevent hanging during training,
which is caused by uneven sharded data (functional name: ``fullsync``). It stops
when the shortest distributed shard is exhausted. It would be appended at the end
of the graph of ``DataPipe`` by ``DistributedReadingService`` automatically.
Args:
datapipe: IterDataPipe that needs to be synchronized
timeout: Timeout for prefetching data in seconds. Default value equals to 30 minutes
Example:
>>> from torchdata.datapipes.iter import IterableWrapper
>>> # Distributed training with world size 2
>>> world_size = 2
>>> dp = IterableWrapper(list(range(23))).sharding_filter()
>>> torch.utils.data.graph_settings.apply_sharding(dp, world_size, rank)
>>> # Rank 0 has 12 elements; Rank 1 has 11 elements
>>> for d in dp:
... model(d) # Hanging at the end of epoch due to uneven sharding
>>> dp = dp.fullsync()
>>> # Both ranks have 11 elements
>>> for d in dp:
... model(d) # Not hanging anymore
"""
def __init__(self, datapipe: IterDataPipe, timeout=default_timeout_in_s):
if not dist.is_available():
raise RuntimeError("Torch Distributed is required to be available")
self.datapipe = datapipe
self.timeout: int = timeout
self._process_group: Optional[dist.ProcessGroup] = None
self._world_size: int = 1
self._lock = threading.RLock()
self._cv = threading.Condition(lock=self._lock)
self._executor: Optional[_PrefetchExecutor] = None
# Use single values rather than deques for the following variables
# because fullsync only prefetches 1 element
self._error = None
self._sync_counter = torch.tensor([0], dtype=torch.int32)
self._done_callback = False
def _callback_fn(self, exp: Expected) -> None:
with self._cv:
if exp.has_error():
if not isinstance(exp.error, StopIteration):
self._error = exp.error # type: ignore[assignment]
self._sync_counter = torch.tensor([0], dtype=torch.int32)
else:
self._sync_counter = torch.tensor([1], dtype=torch.int32)
dist.all_reduce(
tensor=self._sync_counter,
op=dist.ReduceOp.SUM,
group=self._process_group,
)
self._done_callback = True
self._cv.notify()
def __iter__(self) -> Iterator[T_co]:
assert self._executor is None
if not (dist.is_available() and dist.is_initialized()):
raise RuntimeError("Torch Distributed is required to be initialized to use `FullSync`.")
if self._process_group is None:
self._process_group = dist.new_group(backend="gloo")
self._world_size = dist.get_world_size()
if self._world_size == 1: # The below functionalities are not needed if `_world_size == 1`
yield from self.datapipe
return
self._executor = _PrefetchExecutor(iter(self.datapipe), 1, self._callback_fn, self.timeout)
while True:
with self._cv:
is_success = self._cv.wait_for(
lambda: self._done_callback is True,
self.timeout,
)
if not is_success:
raise PrefetchTimeoutError(self.timeout)
if self._error is not None:
raise self._error
if bool(self._sync_counter < self._world_size):
break
self._done_callback = False
data = self._executor.return_next() # type: ignore[attr-defined]
if isinstance(data, _EndOfPrefetch):
break
yield data
@final
def reset(self):
if self._executor is not None:
self._executor.shutdown()
self._executor = None
self._world_size = 1
with self._cv:
self._error = None
self._sync_counter = torch.tensor([0], dtype=torch.int32)
self._done_callback = False
def is_replicable(self):
return False
def __getstate__(self):
state = (
self.datapipe,
self.timeout,
)
if IterDataPipe.getstate_hook is not None:
return IterDataPipe.getstate_hook(state)
return state
def __setstate__(self, state):
self.datapipe, self.timeout = state
self._process_group = None
self._world_size = 1
self._lock = threading.RLock()
self._cv = threading.Condition(lock=self._lock)
self._executor = None
self._error = None
self._sync_counter = torch.tensor([0], dtype=torch.int32)
self._done_callback = False
@final
def pause(self):
if self._executor is not None:
self._executor.pause()
@final
def resume(self):
if self._executor is not None:
self._executor.resume()
@final
def shutdown(self):
if self._executor is not None:
self._executor.shutdown()
self._executor = None
def __del__(self):
self.shutdown()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
from typing import Any, Callable, Iterator, Optional, Tuple, Union
from torchdata.datapipes import functional_datapipe
from torchdata.datapipes.iter import IterDataPipe
U = Union[bytes, bytearray, str]
@functional_datapipe("save_to_disk")
class SaverIterDataPipe(IterDataPipe[str]):
r"""
Takes in a DataPipe of tuples of metadata and data, saves the data
to the target path generated by the ``filepath_fn`` and metadata, and yields file path on local file
system (functional name: ``save_to_disk``).
Args:
source_datapipe: Iterable DataPipe with tuples of metadata and data
mode: Node in which the file will be opened for write the data (``"w"`` by default)
filepath_fn: Function that takes in metadata and returns the target path of the new file
Example:
>>> from torchdata.datapipes.iter import IterableWrapper
>>> import os
>>> def filepath_fn(name: str) -> str:
>>> return os.path.join(".", os.path.basename(name))
>>> name_to_data = {"1.txt": b"DATA1", "2.txt": b"DATA2", "3.txt": b"DATA3"}
>>> source_dp = IterableWrapper(sorted(name_to_data.items()))
>>> saver_dp = source_dp.save_to_disk(filepath_fn=filepath_fn, mode="wb")
>>> res_file_paths = list(saver_dp)
>>> res_file_paths
['./1.txt', './2.txt', './3.txt']
"""
def __init__(
self,
source_datapipe: IterDataPipe[Tuple[Any, U]],
mode: str = "w",
filepath_fn: Optional[Callable] = None,
):
self.source_datapipe: IterDataPipe[Tuple[Any, U]] = source_datapipe
self.mode: str = mode if "w" in mode else "w" + mode
self.fn: Optional[Callable] = filepath_fn
def __iter__(self) -> Iterator[str]:
for filepath, data in self.source_datapipe:
if self.fn is not None:
filepath = self.fn(filepath)
dirname = os.path.dirname(filepath)
if not os.path.exists(dirname):
os.makedirs(dirname)
# with portalocker.Lock(filepath, self.mode, flags=portalocker.LockFlags.EXCLUSIVE) as f:
# TODO(639): Enabling line above will require all read sites to be updated (Win).
with open(filepath, self.mode) as f:
f.write(data)
yield filepath
def __len__(self) -> int:
return len(self.source_datapipe)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import io
import os.path
from typing import Iterator, Tuple
from unittest.mock import patch
from torchdata.datapipes import functional_datapipe
from torchdata.datapipes.iter import IterDataPipe
from torchdata.datapipes.utils import StreamWrapper
from torchdata.datapipes.utils.common import validate_pathname_binary_tuple
class RarfilePatcher:
def __init__(self):
from rarfile import DirectReader
unpatched_read = DirectReader._read
def patched_read(self, cnt=-1):
self._fd.seek(self._inf.header_offset, 0)
self._cur = self._parser._parse_header(self._fd)
self._cur_avail = self._cur.add_size
return unpatched_read(self, cnt)
self._patch = patch("rarfile.DirectReader._read", new=patched_read)
def start(self):
self._patch.start()
def stop(self):
self._patch.stop()
_PATCHED = False
@functional_datapipe("load_from_rar")
class RarArchiveLoaderIterDataPipe(IterDataPipe[Tuple[str, io.BufferedIOBase]]):
r"""
Decompresses rar binary streams from input Iterable Datapipes which contains tuples of path name and rar
binary stream, and yields a tuple of path name and extracted binary stream (functional name: ``load_from_rar``).
Note:
The nested RAR archive is not supported by this DataPipe
due to the limitation of the archive type. Please extract
outer RAR archive before reading the inner archive.
Args:
datapipe: Iterable DataPipe that provides tuples of path name and rar binary stream
length: Nominal length of the DataPipe
Example:
>>> from torchdata.datapipes.iter import FileLister, FileOpener
>>> datapipe1 = FileLister(".", "*.rar")
>>> datapipe2 = FileOpener(datapipe1, mode="b")
>>> rar_loader_dp = datapipe2.load_from_rar()
>>> for _, stream in rar_loader_dp:
>>> print(stream.read())
b'0123456789abcdef'
"""
def __init__(self, datapipe: IterDataPipe[Tuple[str, io.BufferedIOBase]], *, length: int = -1):
try:
import rarfile
except ImportError as error:
raise ModuleNotFoundError(
"Package `rarfile` is required to be installed to use this datapipe. "
"Please use `pip install rarfile` or `conda -c conda-forge install rarfile` to install it."
) from error
# check if at least one system library for reading rar archives is available to be used by rarfile
rarfile.tool_setup()
self.datapipe = datapipe
self.length = length
def __iter__(self) -> Iterator[Tuple[str, io.BufferedIOBase]]:
import rarfile
global _PATCHED
if not _PATCHED:
patcher = RarfilePatcher()
patcher.start()
_PATCHED = True
for data in self.datapipe:
try:
validate_pathname_binary_tuple(data)
path, stream = data
if isinstance(stream, rarfile.RarExtFile) or (
isinstance(stream, StreamWrapper) and isinstance(stream.file_obj, rarfile.RarExtFile)
):
raise ValueError(
f"Nested RAR archive is not supported by {type(self).__name__}. Please extract outer archive first."
)
rar = rarfile.RarFile(stream)
for info in rar.infolist():
if info.is_dir():
continue
inner_path = os.path.join(path, info.filename)
file_obj = rar.open(info)
yield inner_path, StreamWrapper(file_obj, stream, name=path) # type: ignore[misc]
finally:
if isinstance(stream, StreamWrapper):
stream.autoclose()
def __len__(self) -> int:
if self.length == -1:
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
return self.length
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Iterator, List, Optional, Set, Sized, Tuple
from torch.utils.data.datapipes._decorator import functional_datapipe
from torch.utils.data.datapipes.datapipe import IterDataPipe
@functional_datapipe("zip_longest")
class ZipperLongestIterDataPipe(IterDataPipe):
r"""
Aggregates elements into a tuple from each of the input DataPipes (functional name: ``zip_longest``).
The output is stopped until all input DataPipes are exhausted. If any input DataPipe is exhausted,
missing values are filled-in with `fill_value` (default value is None).
Args:
*datapipes: Iterable DataPipes being aggregated
*fill_value: Value that user input to fill in the missing values from DataPipe. Default value is None.
Example:
>>> from torchdata.datapipes.iter import IterableWrapper
>>> dp1, dp2, dp3 = IterableWrapper(range(3)), IterableWrapper(range(10, 15)), IterableWrapper(range(20, 25))
>>> list(dp1.zip_longest(dp2, dp3))
[(0, 10, 20), (1, 11, 21), (2, 12, 22), (None, 13, 23), (None, 14, 24)]
>>> list(dp1.zip_longest(dp2, dp3, -1))
[(0, 10, 20), (1, 11, 21), (2, 12, 22), (-1, 13, 23), (-1, 14, 24)]
"""
datapipes: Tuple[IterDataPipe]
length: Optional[int]
fill_value: Any
def __init__(
self,
*datapipes: IterDataPipe,
fill_value: Any = None,
):
if not all(isinstance(dp, IterDataPipe) for dp in datapipes):
raise TypeError("All inputs are required to be `IterDataPipe` " "for `ZipperLongestIterDataPipe`.")
super().__init__()
self.datapipes = datapipes # type: ignore[assignment]
self.fill_value = fill_value
def __iter__(self) -> Iterator[Tuple]:
iterators = [iter(x) for x in self.datapipes]
finished: Set[int] = set()
while len(finished) < len(iterators):
values: List[Any] = []
for i in range(len(iterators)):
value = self.fill_value
if i not in finished:
try:
value = next(iterators[i])
except StopIteration:
finished.add(i)
if len(finished) == len(iterators):
return
values.append(value)
yield tuple(values)
def __len__(self) -> int:
if all(isinstance(dp, Sized) for dp in self.datapipes):
return max(len(dp) for dp in self.datapipes)
else:
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import json
from typing import Dict, IO, Iterator, Tuple
from torchdata.datapipes import functional_datapipe
from torchdata.datapipes.iter import IterDataPipe
@functional_datapipe("parse_json_files")
class JsonParserIterDataPipe(IterDataPipe[Tuple[str, Dict]]):
r"""
Reads from JSON data streams and yields a tuple of file name and JSON data (functional name: ``parse_json_files``).
Args:
source_datapipe: a DataPipe with tuples of file name and JSON data stream
kwargs: keyword arguments that will be passed through to ``json.loads``
Example:
>>> from torchdata.datapipes.iter import IterableWrapper, FileOpener
>>> import os
>>> def get_name(path_and_stream):
>>> return os.path.basename(path_and_stream[0]), path_and_stream[1]
>>> datapipe1 = IterableWrapper(["empty.json", "1.json", "2.json"])
>>> datapipe2 = FileOpener(datapipe1, mode="b")
>>> datapipe3 = datapipe2.map(get_name)
>>> json_dp = datapipe3.parse_json_files()
>>> list(json_dp)
[('1.json', ['foo', {'bar': ['baz', None, 1.0, 2]}]), ('2.json', {'__complex__': True, 'real': 1, 'imag': 2})]
"""
def __init__(self, source_datapipe: IterDataPipe[Tuple[str, IO]], **kwargs) -> None:
self.source_datapipe: IterDataPipe[Tuple[str, IO]] = source_datapipe
self.kwargs = kwargs
def __iter__(self) -> Iterator[Tuple[str, Dict]]:
for file_name, stream in self.source_datapipe:
data = stream.read()
stream.close()
yield file_name, json.loads(data, **self.kwargs)
def __len__(self) -> int:
return len(self.source_datapipe)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import hashlib
import inspect
import os.path
import sys
import time
import uuid
import warnings
from collections import deque
from functools import partial
from typing import Any, Callable, Deque, Dict, Iterator, List, Optional, Tuple, TypeVar
try:
import portalocker
except ImportError:
portalocker = None
from torch.utils.data.datapipes.utils.common import _check_unpickable_fn, DILL_AVAILABLE
from torch.utils.data.graph import traverse_dps
from torchdata.datapipes import functional_datapipe
from torchdata.datapipes.iter import IterableWrapper, IterDataPipe
if DILL_AVAILABLE:
import dill
dill.extend(use_dill=False)
def _assert_portalocker() -> None:
try:
import portalocker # noqa: F401
except ImportError as e:
if os.name == "nt" and str(e).startswith("DLL load failed while importing"):
print(
"Please take a look at FAQ in https://github.com/pytorch/data#frequently-asked-questions-faq"
"for the solution of this Error."
)
raise
else:
raise ModuleNotFoundError(
"Package `portalocker` is required to be installed to use this datapipe."
"Please use `pip install 'portalocker>=2.0.0'` or"
"`conda install -c conda-forge 'portalocker>=2/0.0'`"
"to install the package"
)
T_co = TypeVar("T_co", covariant=True)
PROMISE_FILE_DELETE_TIMEOUT = 30
PROMISE_FILE_DELETE_RETRY_INTERVAL = 0.005
from enum import IntEnum
class CacheState(IntEnum):
UNCACHED = 0
CACHED_SINGLE_ENTITY = 1
CACHED_MULTIPLE_ENTITIES = 2
@functional_datapipe("in_memory_cache")
class InMemoryCacheHolderIterDataPipe(IterDataPipe[T_co]):
r"""
Stores elements from the source DataPipe in memory, up to a size limit
if specified (functional name: ``in_memory_cache``). This cache is FIFO - once the cache is full,
further elements will not be added to the cache until the previous ones are yielded and popped off from the cache.
Args:
source_dp: source DataPipe from which elements are read and stored in memory
size: The maximum size (in megabytes) that this DataPipe can hold in memory. This defaults to unlimited.
Example:
>>> from torchdata.datapipes.iter import IterableWrapper
>>> source_dp = IterableWrapper(range(10))
>>> cache_dp = source_dp.in_memory_cache(size=5)
>>> list(cache_dp)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
size: Optional[int] = None
idx: int
def __init__(self, source_dp: IterDataPipe[T_co], size: Optional[int] = None) -> None:
self.source_dp: IterDataPipe[T_co] = source_dp
# cache size in MB
if size is not None:
self.size = size * 1024 * 1024
self.cache: Optional[Deque] = None
self.idx: int = 0
def __iter__(self) -> Iterator[T_co]:
if self.cache:
if self.idx > 0:
for idx, data in enumerate(self.source_dp):
if idx < self.idx:
yield data
else:
break
yield from self.cache
else:
# Local cache
cache: Deque = deque()
idx = 0
for data in self.source_dp:
cache.append(data)
# Cache reaches limit
if self.size is not None and sys.getsizeof(cache) > self.size:
cache.popleft()
idx += 1
yield data
self.cache = cache
self.idx = idx
def __len__(self) -> int:
try:
return len(self.source_dp)
except TypeError:
if self.cache:
return self.idx + len(self.cache)
else:
raise TypeError(f"{type(self).__name__} instance doesn't have valid length until the cache is loaded.")
def _generator_to_list(gen_fn):
def list_fn(*args, **kwargs):
gen = gen_fn(*args, **kwargs)
return list(gen)
return list_fn
def _hash_check(filepath, hash_dict, hash_type):
if filepath not in hash_dict:
return False
if hash_type == "sha256":
hash_func = hashlib.sha256()
else:
hash_func = hashlib.md5()
# with portalocker.Lock(filepath, "rb", flags=portalocker.LockFlags.SHARED) as f:
# TODO(634): Line above will require all readers (Win) to obtain proper locks,
# I'm putting it on hold as we need to modify PyTorch core codebase heavily.
with open(filepath, "rb") as f:
chunk = f.read(1024 ** 2)
while chunk:
hash_func.update(chunk)
chunk = f.read(1024 ** 2)
return hash_func.hexdigest() == hash_dict[filepath]
def _promise_filename(filename, cache_uuid):
return filename + ".promise." + str(cache_uuid)
@functional_datapipe("on_disk_cache")
class OnDiskCacheHolderIterDataPipe(IterDataPipe):
"""
Caches the outputs of multiple DataPipe operations to local files, which are
typically performance bottleneck such download, decompress, and etc (functional name: ``on_disk_cache``).
Must use ``.end_caching()`` to stop tracing the sequence of DataPipe operations and save the results to local files.
Args:
source_datapipe: IterDataPipe
filepath_fn: Given data from ``source_datapipe``, returns file path(s) on local file system.
Single file path is only allowed as output of the function.
If resulted file name is different from the filename generated by the filename function of the end_cache
original file name used to store list of yield files (and as cached items availability check)
hash_dict: A Dictionary mapping file names to their corresponding hashes. If ``hash_dict`` is specified,
the extra hash check will be attached before saving data to local file system. If the data
doesn't meet the hash, the pipeline will raise an Error.
hash_type: The type of hash function to apply
extra_check_fn: Optional function to carry out extra validation on
the given file path from ``filepath_fn``.
Example:
>>> from torchdata.datapipes.iter import IterableWrapper, HttpReader
>>> url = IterableWrapper(["https://path/to/filename", ])
>>> def _filepath_fn(url):
>>> temp_dir = tempfile.gettempdir()
>>> return os.path.join(temp_dir, os.path.basename(url))
>>> hash_dict = {"expected_filepath": expected_MD5_hash}
>>> cache_dp = url.on_disk_cache(filepath_fn=_filepath_fn, hash_dict=_hash_dict, hash_type="md5")
>>> # You must call ``.end_caching`` at a later point to stop tracing and save the results to local files.
>>> cache_dp = HttpReader(cache_dp).end_caching(mode="wb", filepath_fn=_filepath_fn)
"""
_temp_dict: Dict = {}
def __init__(
self,
source_datapipe: IterDataPipe,
filepath_fn: Optional[Callable] = None,
hash_dict: Dict[str, str] = None,
hash_type: str = "sha256",
extra_check_fn: Optional[Callable[[str], bool]] = None,
):
_assert_portalocker()
self.source_datapipe = source_datapipe
if filepath_fn is not None:
_check_unpickable_fn(filepath_fn)
assert not inspect.isgeneratorfunction(filepath_fn) # BC breaking, now only str is accepted as return
if hash_dict is not None and hash_type not in ("sha256", "md5"):
raise ValueError("Invalid hash_type requested, should be one of {}".format(("sha256", "md5")))
# TODO(VitalyFedyunin): We need some way to generate pipe uuids which will have similar result for
# same graph but different nodes of distributed system
self._uuid = uuid.uuid4()
OnDiskCacheHolderIterDataPipe._temp_dict[self] = (filepath_fn, hash_dict, hash_type, extra_check_fn, self._uuid)
self._end_caching_flag: bool = False
self._download_everything = False # This is internal field used for load testing only
def __iter__(self):
if self._end_caching_flag:
yield from self.source_datapipe
else:
# In case of BC breaking, use RuntimeError for now. Warning is another option
raise RuntimeError("Please call `end_caching()` before iteration.")
def __add__(self, other_datapipe):
raise RuntimeError("`OnDiskCacheHolder` doesn't support add operation")
# Since Demux is using this function, we should not attach it to OnDiskCacheHolder instance.
# Otherwise, it would cause infinite recursion in graph traversal
@staticmethod
def _cache_check_fn(data, filepath_fn, hash_dict, hash_type, extra_check_fn, cache_uuid):
filepath = data if filepath_fn is None else filepath_fn(data)
assert not isinstance(filepath, (list, tuple)) # BC breaking, now only str is accepted as return
result = CacheState.CACHED_SINGLE_ENTITY
cached_file_exists = True
if os.path.exists(_get_list_filename(filepath)):
return int(CacheState.CACHED_MULTIPLE_ENTITIES)
if not os.path.exists(filepath):
cached_file_exists = False
elif hash_dict is not None and not _hash_check(filepath, hash_dict, hash_type):
# TODO: It is safer to assume that entire cache is compromised and require user to wipe it
cached_file_exists = False
elif extra_check_fn is not None and not extra_check_fn(filepath):
# TODO: It is safer to assume that entire cache is compromised and require user to wipe it
cached_file_exists = False
if not cached_file_exists:
promise_filepath = _promise_filename(filepath, cache_uuid)
dirname = os.path.dirname(promise_filepath)
if not os.path.exists(dirname):
os.makedirs(dirname)
with portalocker.Lock(promise_filepath, "a+", flags=portalocker.LockFlags.EXCLUSIVE) as promise_fh:
promise_fh.seek(0)
data = promise_fh.read()
# TODO(VitalyFedyunin): Potentially there is old .promise file from previous failed run, we
# need to somehow propagate uniq session id for dataloader, save and compare it here,
# raising error
file_exists = len(data) > 0
if not file_exists:
result = CacheState.UNCACHED
promise_fh.seek(0)
data = promise_fh.read()
# TODO(635): Potentially there is old .promise file from previous failed run, we
# need to somehow propagate uniq session id for dataloader, save and compare it here,
# raising error
file_exists = len(data) > 0
if not file_exists:
promise_fh.seek(0)
promise_fh.write("[dataloader session uid]")
promise_fh.truncate()
promise_fh.flush()
return int(result)
def _end_caching(self):
filepath_fn, hash_dict, hash_type, extra_check_fn, cache_uuid = OnDiskCacheHolderIterDataPipe._temp_dict.pop(
self
)
todo_dp: Any
cached_dp: Any
one_many_cached_dp: Any
if self._download_everything:
todo_dp = self.source_datapipe
cached_dp = IterableWrapper([])
one_many_cached_dp = IterableWrapper([])
else:
todo_dp, cached_dp, one_many_cached_dp = self.source_datapipe.demux(
3,
partial(
OnDiskCacheHolderIterDataPipe._cache_check_fn,
filepath_fn=filepath_fn,
hash_dict=hash_dict,
hash_type=hash_type,
extra_check_fn=extra_check_fn,
cache_uuid=cache_uuid,
),
)
# Cached: keep filepath(s)
cached_dp = cached_dp.map(fn=filepath_fn)
one_many_cached_dp = one_many_cached_dp.map(fn=filepath_fn)
one_many_cached_dp = _ExtractFilesFromList(one_many_cached_dp)
self.source_datapipe = todo_dp.memory_cell()
self._end_caching_flag = True
return cached_dp, one_many_cached_dp
def _read_bytes(fd):
return b"".join(fd)
def _read_str(fd):
return "".join(fd)
def _is_promise_pending(promise_filename):
return os.path.exists(promise_filename)
class _WaitPendingCacheItemIterDataPipe(IterDataPipe):
def __init__(self, source_datapipe, timeout=300, input_col=None, cache_uuid=None):
self.source_datapipe = source_datapipe
self.timeout = timeout
self.input_col = input_col
self._cache_uuid = cache_uuid
def set_timeout(self, timeout):
self.timeout = timeout
def __iter__(self):
for data in self.source_datapipe:
if self.input_col is not None:
filename = data[self.input_col]
else:
filename = data
promise_filename = _promise_filename(filename, self._cache_uuid)
start = time.time()
while _is_promise_pending(promise_filename):
time.sleep(0.01)
if time.time() - start > self.timeout:
raise Exception(
f"OnDiskCache Exception: {filename} expected to be written by different process, "
+ f"but file is not ready in {self.timeout} seconds."
)
yield data
@functional_datapipe("memory_cell")
class _MemoryCellIterDataPipe(IterDataPipe):
def __init__(self, source_datapipe, remember_elements=1000):
self.source_datapipe = source_datapipe
self.buffer: List[Optional[Tuple[Any, Any]]] = [None for i in range(remember_elements)]
self.remember_elements = remember_elements
self.buffer_pos = -1
# TODO(VitalyFedyunin): Make it friendly to save/restore state
def __iter__(self):
for item in self.source_datapipe:
item_id = uuid.uuid4()
self.buffer_pos = (self.buffer_pos + 1) % self.remember_elements
self.buffer[self.buffer_pos] = (item_id, item)
yield item
def get_last(self):
# Returns tuple of elements, autogenerated id of the last returned row and its value
return self.buffer[self.buffer_pos]
def get_buffer(self):
# Returns last returned id+element and others in the order from latest to oldest.
result = []
for i in range(self.remember_elements):
idx = (self.buffer_pos - i) % self.remember_elements
if self.buffer[idx] is not None:
result.append(self.buffer[idx])
return result
def _get_list_filename(file_name):
return file_name + ".torchdata_list"
class _ExtractFilesFromList(IterDataPipe):
def __init__(self, source_datapipe):
self.source_datapipe = source_datapipe
def __iter__(self):
for filename in self.source_datapipe:
with open(_get_list_filename(filename)) as fh:
for line in fh:
inner_file_name = line.rstrip()
yield filename, inner_file_name
class _FulfilledPromisesIterDataPipe(IterDataPipe):
def __init__(self, source_datapipe, memory_cell_dp, first_filepath_fn, cache_uuid):
self.source_datapipe = source_datapipe
self.memory_cell_dp = memory_cell_dp
self.first_filepath_fn = first_filepath_fn
self._cache_uuid = cache_uuid
@staticmethod
def _del_promise_file(promise_filename, filename):
if os.path.exists(promise_filename):
retry = True
start = time.time()
while retry:
retry = False
try:
os.unlink(promise_filename)
except Exception as e:
# Workaround about Windows not letting to delete file, while it is open by another process
retry = True
if time.time() - start > PROMISE_FILE_DELETE_TIMEOUT:
raise Exception("Timeout while trying to recover from the ", type(e), e)
time.sleep(PROMISE_FILE_DELETE_RETRY_INTERVAL)
else:
warnings.warn(
f"Attempt to mark {promise_filename} promise (base of file {filename}) as fulfilled failed. Potentially missmatching filename functions of on_disk_cache and end_cache."
)
def __iter__(self):
last_record_uuid = None
one_to_many_detected = False
one_to_one_detected = False
def fulfill_old_promises(buffer, last_record_uuid, first_filepath_fn, cache_uuid):
for old_rec_uuid, old_rec in buffer:
original_file_name = first_filepath_fn(old_rec)
old_promise_filename = _promise_filename(original_file_name, cache_uuid)
self._del_promise_file(old_promise_filename, original_file_name)
if old_rec_uuid == last_record_uuid:
break
# TODO(VitalyFedyunin): If no match found, that means we exceeded length of memory_cell
# and there is aggressive amount 1-to-zero cases, raise error and explain how to fix
try:
for filename in self.source_datapipe:
rec_uuid, record = self.memory_cell_dp.get_last()
original_file_name = self.first_filepath_fn(record)
# TODO(VitalyFedyunin): For debug mode we can detect duplicate keys situations here and warn user
if original_file_name != filename:
# Situations when every archive unpacks to single file only are also considered as 1-M
one_to_many_detected = True
if one_to_one_detected:
raise Exception("Disovered different keys when one-to-one mode previously assumed")
# We are dealing with one-to-many situation now
with open(_get_list_filename(original_file_name), "a") as fh:
fh.write(f"{filename}\n")
else:
one_to_one_detected = True
if one_to_many_detected:
# Keys should be always the same (1-1 situation) or always different (1-many) sutuation
raise Exception("first key somehow equal to secondary key")
if rec_uuid != last_record_uuid:
fulfill_old_promises(
self.memory_cell_dp.get_buffer()[1:], last_record_uuid, self.first_filepath_fn, self._cache_uuid
)
last_record_uuid = rec_uuid
yield filename
finally:
if last_record_uuid is not None:
fulfill_old_promises(
self.memory_cell_dp.get_buffer(), last_record_uuid, self.first_filepath_fn, self._cache_uuid
)
def _leave_second(x):
return x[1]
@functional_datapipe("end_caching")
class EndOnDiskCacheHolderIterDataPipe(IterDataPipe):
"""
Indicates when the result of prior DataPipe will be saved local files specified
by ``filepath_fn`` (functional name: ``end_caching``). Moreover, the result of source DataPipe
is required to be a tuple of metadata and data, or a tuple of metadata and file handle.
Args:
datapipe: IterDataPipe with at least one ``OnDiskCacheHolder`` in the graph.
mode: Mode in which the cached files are opened to write the data on disk. This is needed
to be aligned with the type of data or file handle from ``datapipe``. ``"wb"`` is used by default.
filepath_fn: Optional function to extract filepath from the metadata from ``datapipe``.
By default, it would directly use the ?metadata? as file path.
same_filepath_fn: Set to ``True`` to use same ``filepath_fn`` from the ``OnDiskCacheHolder``.
skip_read: Boolean value to skip reading the file handle from ``datapipe``.
By default, reading is enabled and reading function is created based on the ``mode``.
timeout: Integer value of seconds to wait for uncached item to be written to disk
Example:
>>> from torchdata.datapipes.iter import IterableWrapper, HttpReader
>>> url = IterableWrapper(["https://path/to/filename", ])
>>> def _filepath_fn(url):
>>> temp_dir = tempfile.gettempdir()
>>> return os.path.join(temp_dir, os.path.basename(url))
>>> hash_dict = {"expected_filepath": expected_MD5_hash}
>>> # You must call ``.on_disk_cache`` at some point before ``.end_caching``
>>> cache_dp = url.on_disk_cache(filepath_fn=_filepath_fn, hash_dict=_hash_dict, hash_type="md5")
>>> # You must call ``.end_caching`` at a later point to stop tracing and save the results to local files.
>>> cache_dp = HttpReader(cache_dp).end_caching(mode="wb", filepath_fn=_filepath_fn)
"""
def __new__(cls, datapipe, mode="wb", filepath_fn=None, *, same_filepath_fn=False, skip_read=False, timeout=300):
if filepath_fn is not None and same_filepath_fn:
raise ValueError("`filepath_fn` is mutually exclusive with `same_filepath_fn`")
graph = traverse_dps(datapipe)
# Get the last CacheHolder
cache_holder = EndOnDiskCacheHolderIterDataPipe._recursive_search(graph)
if cache_holder is None:
raise RuntimeError("Expected `OnDiskCacheHolder` existing in pipeline when `end_caching` is invoked")
if cache_holder._end_caching_flag:
raise RuntimeError("`end_caching` can only be invoked once per `OnDiskCacheHolder`")
first_filepath_fn, _hash_dict, _hash_type, _, cache_uuid = OnDiskCacheHolderIterDataPipe._temp_dict[
cache_holder
]
cached_dp, one_many_cached_dp = cache_holder._end_caching()
cached_dp = _WaitPendingCacheItemIterDataPipe(cached_dp, timeout=timeout, cache_uuid=cache_uuid)
one_many_cached_dp = _WaitPendingCacheItemIterDataPipe(
one_many_cached_dp, timeout=timeout, cache_uuid=cache_uuid, input_col=0
)
one_many_cached_dp = one_many_cached_dp.map(_leave_second)
memory_cell_dp = cache_holder.source_datapipe
if same_filepath_fn:
filepath_fn = first_filepath_fn
todo_dp = datapipe
if not skip_read:
if "t" in mode:
todo_dp = todo_dp.map(fn=_read_str, input_col=1)
else:
todo_dp = todo_dp.map(fn=_read_bytes, input_col=1)
if filepath_fn is not None:
todo_dp = todo_dp.map(fn=filepath_fn, input_col=0)
# Extra hash check here when hash is provided.
# And, raise Error if data returned from prior operations doesn't meet hash
if _hash_dict is not None:
todo_dp = todo_dp.check_hash(_hash_dict, _hash_type)
todo_dp = todo_dp.save_to_disk(mode=mode)
todo_dp = _FulfilledPromisesIterDataPipe(todo_dp, memory_cell_dp, first_filepath_fn, cache_uuid=cache_uuid)
# TODO(VitalyFedyunin): This impacts determinism for partial cache situations
return todo_dp.concat(cached_dp).concat(one_many_cached_dp)
@staticmethod
def _recursive_search(graph):
for dp, _ in graph.values():
# Find the closest CacheHolder
if isinstance(dp, OnDiskCacheHolderIterDataPipe):
return dp
for _, sub_graph in graph.values():
res = EndOnDiskCacheHolderIterDataPipe._recursive_search(sub_graph)
if res is not None:
return res
return None
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Iterator, Optional, TypeVar
from torchdata.datapipes import functional_datapipe
from torchdata.datapipes.iter import IterDataPipe
T_co = TypeVar("T_co", covariant=True)
@functional_datapipe("cycle")
class CyclerIterDataPipe(IterDataPipe[T_co]):
"""
Cycles the specified input in perpetuity by default, or for the specified number
of times (functional name: ``cycle``).
If the ordering does not matter (e.g. because you plan to ``shuffle`` later) or if you would like to
repeat an element multiple times before moving onto the next element, use :class:`.Repeater`.
Args:
source_datapipe: source DataPipe that will be cycled through
count: the number of times to read through ``source_datapipe` (if ``None``, it will cycle in perpetuity)
Example:
>>> from torchdata.datapipes.iter import IterableWrapper
>>> dp = IterableWrapper(range(3))
>>> dp = dp.cycle(2)
>>> list(dp)
[0, 1, 2, 0, 1, 2]
"""
def __init__(self, source_datapipe: IterDataPipe[T_co], count: Optional[int] = None) -> None:
self.source_datapipe: IterDataPipe[T_co] = source_datapipe
self.count: Optional[int] = count
if count is not None and count < 0:
raise ValueError(f"Expected non-negative count, got {count}")
def __iter__(self) -> Iterator[T_co]:
i = 0
while self.count is None or i < self.count:
yield from self.source_datapipe
i += 1
def __len__(self) -> int:
if self.count is None:
raise TypeError(
f"This {type(self).__name__} instance cycles forever, and therefore doesn't have valid length"
)
else:
return self.count * len(self.source_datapipe)
@functional_datapipe("repeat")
class RepeaterIterDataPipe(IterDataPipe[T_co]):
"""
Repeatedly yield each element of source DataPipe for the specified number of times before
moving onto the next element (functional name: ``repeat``). Note that no copy is made in this DataPipe,
the same element is yielded repeatedly.
If you would like to yield the whole DataPipe in order multiple times, use :class:`.Cycler`.
Args:
source_datapipe: source DataPipe that will be iterated through
times: the number of times an element of ``source_datapipe`` will be yielded before moving onto the next element
Example:
>>> from torchdata.datapipes.iter import IterableWrapper
>>> dp = IterableWrapper(range(3))
>>> dp = dp.repeat(2)
>>> list(dp)
[0, 0, 1, 1, 2, 2]
"""
def __init__(self, source_datapipe: IterDataPipe[T_co], times: int) -> None:
self.source_datapipe: IterDataPipe[T_co] = source_datapipe
self.times: int = times
if times <= 1:
raise ValueError(f"The number of repetition must be > 1, got {times}")
def __iter__(self) -> Iterator[T_co]:
for element in self.source_datapipe:
for _ in range(self.times):
yield element
def __len__(self) -> int:
return self.times * len(self.source_datapipe)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from functools import partial
from typing import List, Optional, TypeVar
from torch.utils.data.datapipes.utils.common import DILL_AVAILABLE
from torchdata.datapipes import functional_datapipe
from torchdata.datapipes.iter import IterDataPipe
try: # TODO(637): Create dependency on TorchArrow?
import pyarrow.parquet as parquet
import torcharrow
except ImportError:
torcharrow = None
parquet = None
if DILL_AVAILABLE:
import dill
dill.extend(use_dill=False)
T_co = TypeVar("T_co")
def _construct_dataframe(data, dtype=None, dtype_generator=None, columns=None, device=None):
if dtype is None:
dtype = dtype_generator()
return torcharrow.dataframe(data, dtype=dtype, columns=columns, device=device)
@functional_datapipe("dataframe")
class DataFrameMakerIterDataPipe(IterDataPipe): # IterDataPipe[torcharrow.IDataFrame[T_co]]
r"""
Takes rows of data, batches a number of them together and creates `TorchArrow`
DataFrames (functional name: ``dataframe``).
Note:
There is a trade-off between having a large number of rows within a DataFrame and usage of memory. Please
choose a value carefully.
Args:
source_dp: IterDataPipe containing rows of data
dataframe_size: number of rows of data within each DataFrame, page size can be option
dtype: specify the `TorchArrow` dtype for the DataFrame, use ``torcharrow.dtypes.DType``
dtype_generator: function with no input argument that generates a torcharrow.dtypes.DType,
which overrides dtype if both are given. This is useful for when the desired dtype is
not serializable.
columns: List of str that specifies the column names of the DataFrame
device: specify the device on which the DataFrame will be stored
Example:
>>> from torchdata.datapipes.iter import IterableWrapper
>>> import torcharrow.dtypes as dt
>>> source_data = [(i,) for i in range(3)]
>>> source_dp = IterableWrapper(source_data)
>>> DTYPE = dt.Struct([dt.Field("Values", dt.int32)])
>>> df_dp = source_dp.dataframe(dtype=DTYPE)
>>> list(df_dp)[0]
index Values
------- --------
0 0
1 1
2 2
dtype: Struct([Field('Values', int32)]), count: 3, null_count: 0
"""
def __new__(
cls,
source_dp: IterDataPipe[T_co],
dataframe_size: int = 1000,
dtype=None,
dtype_generator=None,
columns: Optional[List[str]] = None,
device: str = "",
):
if torcharrow is None:
raise ImportError(
"The library 'torcharrow' is necessary for this DataPipe but it is not available."
"Please visit https://github.com/facebookresearch/torcharrow/ to install it."
)
# In this version, DF tracing is not available, which would allow DataPipe to run DataFrame operations
batch_dp = source_dp.batch(dataframe_size)
df_dp = batch_dp.map(
partial(_construct_dataframe, dtype=dtype, dtype_generator=dtype_generator, columns=columns, device=device)
)
return df_dp
@functional_datapipe("load_parquet_as_df")
class ParquetDFLoaderIterDataPipe(IterDataPipe): # IterDataPipe[torcharrow.IDataFrame[T_co]]
r"""
Takes in paths to Parquet files and return a `TorchArrow` DataFrame for each row group
within a Parquet file (functional name: ``load_parquet_as_df``).
Args:
source_dp: source DataPipe containing paths to the Parquet files
columns: List of `str` that specifies the column names of the DataFrame
use_threads: if ``True``, Parquet reader will perform multi-threaded column reads
dtype: specify the `TorchArrow` dtype for the DataFrame, use ``torcharrow.dtypes.DType``
device: specify the device on which the DataFrame will be stored
Example:
>>> from torchdata.datapipes.iter import FileLister
>>> import torcharrow.dtypes as dt
>>> DTYPE = dt.Struct([dt.Field("Values", dt.int32)])
>>> source_dp = FileLister(".", masks="df*.parquet")
>>> parquet_df_dp = source_dp.load_parquet_as_df(dtype=DTYPE)
>>> list(parquet_df_dp)[0]
index Values
------- --------
0 0
1 1
2 2
dtype: Struct([Field('Values', int32)]), count: 3, null_count: 0
"""
def __init__(
self,
source_dp: IterDataPipe[str],
dtype=None,
columns: Optional[List[str]] = None,
device: str = "",
use_threads: bool = False,
):
if torcharrow is None:
raise ImportError(
"The library 'torcharrow' is necessary for this DataPipe but it is not available."
"Please visit https://github.com/facebookresearch/torcharrow/ to install it."
)
if parquet is None:
raise ImportError("The library 'parquet' is necessary for this DataPipe but it is not available.")
self.source_dp = source_dp
self.columns = columns
self.use_threads = use_threads
self.dtype = dtype
self.device = device
def __iter__(self):
for path in self.source_dp:
parquet_file = parquet.ParquetFile(path)
num_row_groups = parquet_file.num_row_groups
for i in range(num_row_groups):
# TODO(638): More fine-grain control over the number of rows or row group per DataFrame
row_group = parquet_file.read_row_group(i, columns=self.columns, use_threads=self.use_threads)
yield torcharrow.from_arrow(row_group, dtype=self.dtype)
def __getstate__(self):
if DILL_AVAILABLE:
dill_dtype = dill.dumps(self.dtype)
else:
dill_dtype = self.dtype
state = (self.source_dp, dill_dtype, self.columns, self.device, self.use_threads)
if IterDataPipe.getstate_hook is not None:
return IterDataPipe.getstate_hook(state)
return state
def __setstate__(self, state):
(self.source_dp, dill_dtype, self.columns, self.device, self.use_threads) = state
if DILL_AVAILABLE:
self.dtype = dill.loads(dill_dtype) # type: ignore[assignment]
else:
self.dtype = dill_dtype # type: ignore[assignment]
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import struct
import warnings
from functools import partial
from io import BufferedIOBase
from typing import Any, cast, Dict, Iterable, Iterator, List, NamedTuple, Optional, Tuple, Union
import torch
from torchdata.datapipes import functional_datapipe
from torchdata.datapipes.iter import IterDataPipe
from torchdata.datapipes.utils.common import validate_pathname_binary_tuple
try:
from math import prod # type: ignore
except ImportError:
# Implementation for older Python
# NOTE: this is not supported by mypy yet
# https://github.com/python/mypy/issues/1393
import operator
from functools import reduce
def prod(xs): # type: ignore[no-redef]
return reduce(operator.mul, xs, 1)
try:
import google.protobuf as _protobuf
del _protobuf
HAS_PROTOBUF = True
except ImportError:
HAS_PROTOBUF = False
U = Union[bytes, bytearray, str]
TFRecordFeatureSpec = Tuple[Tuple[int, ...], torch.dtype]
TFRecordExampleSpec = Dict[str, TFRecordFeatureSpec]
# Note, reccursive types not supported by mypy at the moment
# TODO(640): uncomment as soon as it becomes supported
# https://github.com/python/mypy/issues/731
# BinaryData = Union[str, List['BinaryData']]
TFRecordBinaryData = Union[str, List[str], List[List[str]], List[List[List[Any]]]]
TFRecordExampleFeature = Union[torch.Tensor, List[torch.Tensor], TFRecordBinaryData]
TFRecordExample = Dict[str, TFRecordExampleFeature]
class SequenceExampleSpec(NamedTuple):
context: TFRecordExampleSpec
feature_lists: TFRecordExampleSpec
def _assert_protobuf() -> None:
if not HAS_PROTOBUF:
raise ModuleNotFoundError(
"Package `protobuf` is required to be installed to use this datapipe."
"Please use `pip install protobuf` or `conda install -c conda-forge protobuf`"
"to install the package"
)
def iterate_tfrecord_file(data: BufferedIOBase) -> Iterator[memoryview]:
length_bytes = bytearray(8)
crc_bytes = bytearray(4)
data_bytes = bytearray(1024)
while True:
bytes_read = data.readinto(length_bytes)
if bytes_read == 0:
break
elif bytes_read != 8:
raise RuntimeError("Invalid tfrecord file: failed to read the record size.")
if data.readinto(crc_bytes) != 4:
raise RuntimeError("Invalid tfrecord file: failed to read the start token.")
(length,) = struct.unpack("<Q", length_bytes)
if length > len(data_bytes):
data_bytes = data_bytes.zfill(int(length * 1.5))
data_bytes_view = memoryview(data_bytes)[:length]
if data.readinto(data_bytes_view) != length:
raise RuntimeError("Invalid tfrecord file: failed to read the record.")
if data.readinto(crc_bytes) != 4:
raise RuntimeError("Invalid tfrecord file: failed to read the end token.")
# TODO(641): check CRC
yield data_bytes_view
def process_feature(feature) -> torch.Tensor:
# NOTE: We assume that each key in the example has only one field
# (either "bytes_list", "float_list", or "int64_list")!
field = feature.ListFields()[0]
inferred_typename, value = field[0].name, field[1].value
if inferred_typename == "bytes_list":
pass
elif inferred_typename == "float_list":
value = torch.tensor(value, dtype=torch.float32)
elif inferred_typename == "int64_list":
value = torch.tensor(value, dtype=torch.int64)
return value
def _reshape_list(value, shape):
# Flatten list
flat_list = []
def flatten(value):
if isinstance(value, (str, bytes)):
flat_list.append(value)
else:
for x in value:
flatten(x)
flatten(value)
# Compute correct shape
common_divisor = prod(x for x in shape if x != -1)
if sum(1 for x in shape if x == -1) > 1:
raise RuntimeError("Shape can contain at most one dynamic dimension (-1).")
if len(flat_list) % max(common_divisor, 1) != 0:
raise RuntimeError(f"Cannot reshape {len(flat_list)} values into shape {shape}")
shape = [x if x != -1 else (len(flat_list) // common_divisor) for x in shape]
# Reshape list into the correct shape
def _reshape(value, shape):
if len(shape) == 0:
assert len(value) == 1
return value[0]
elif len(shape) == 1: # To make the reccursion faster
assert len(value) == shape[0]
return value
dim_size = len(value) // shape[0]
return [_reshape(value[i * dim_size : (i + 1) * dim_size], shape[1:]) for i in range(dim_size)]
return _reshape(flat_list, shape)
def _apply_feature_spec(value, feature_spec):
if feature_spec is not None:
shape, dtype = feature_spec
if isinstance(dtype, torch.dtype):
if shape is not None:
value = value.reshape(shape)
value = value.to(dtype)
elif shape is not None:
# Manual list reshape
value = _reshape_list(value, shape)
return value
def _parse_tfrecord_features(features, spec: Optional[TFRecordExampleSpec]) -> Dict[str, torch.Tensor]:
result = dict()
features = features.feature
for key in features.keys():
if spec is not None and key not in spec:
continue
feature_spec = None if spec is None else spec[key]
feature = features[key]
result[key] = _apply_feature_spec(process_feature(feature), feature_spec)
return result
def parse_tfrecord_sequence_example(example, spec: Optional[TFRecordExampleSpec]) -> TFRecordExample:
# Parse context features
result = cast(TFRecordExample, _parse_tfrecord_features(example.context, spec))
# Parse feature lists
feature_lists_keys = None if spec is None else set(spec.keys()) - set(result.keys())
features = example.feature_lists.feature_list
for key in features.keys():
if feature_lists_keys is not None and key not in feature_lists_keys:
continue
feature_spec = None if spec is None else spec[key]
feature = features[key].feature
if key in result:
raise RuntimeError(
"TFRecord example's key {key} is contained in both the context and feature lists. This is not supported."
)
value: Union[torch.Tensor, List[Any]] = list(map(partial(process_feature), feature))
# For known torch dtypes, we stack the list features
if feature_spec is not None and isinstance(feature_spec[1], torch.dtype):
value = torch.stack(cast(List[torch.Tensor], value), 0)
value = _apply_feature_spec(value, feature_spec)
result[key] = value
if spec is not None and len(result.keys()) != len(spec.keys()):
raise RuntimeError(f"Example is missing some required keys: {sorted(result.keys())} != {sorted(spec.keys())}")
return result
@functional_datapipe("load_from_tfrecord")
class TFRecordLoaderIterDataPipe(IterDataPipe[TFRecordExample]):
r"""
Opens/decompresses tfrecord binary streams from an Iterable DataPipe which contains tuples of path name and
tfrecord binary stream, and yields the stored records (functional name: ``load_from_tfrecord``).
Args:
datapipe: Iterable DataPipe that provides tuples of path name and tfrecord binary stream
length: a nominal length of the DataPipe
Note:
The opened file handles will be closed automatically if the default ``DecoderDataPipe``
is attached. Otherwise, user should be responsible to close file handles explicitly
or let Python's GC close them periodically.
Example:
>>> from torchdata.datapipes.iter import FileLister, FileOpener
>>> datapipe1 = FileLister(".", "*.tfrecord")
>>> datapipe2 = FileOpener(datapipe1, mode="b")
>>> tfrecord_loader_dp = datapipe2.load_from_tfrecord()
>>> for example in tfrecord_loader_dp:
>>> print(example)
"""
def __init__(
self,
datapipe: Iterable[Tuple[str, BufferedIOBase]],
spec: Optional[TFRecordExampleSpec] = None,
length: int = -1,
) -> None:
super().__init__()
_assert_protobuf()
self.datapipe: Iterable[Tuple[str, BufferedIOBase]] = datapipe
self.length: int = length
self.spec = spec
def __iter__(self) -> Iterator[TFRecordExample]:
# We assume that the "example.proto" and "feature.proto"
# stays the same for future TensorFlow versions.
# If it changed, newer TensorFlow versions would
# not be able to load older tfrecord datasets.
from .protobuf_template import _tfrecord_example_pb2 as example_pb2
for data in self.datapipe:
validate_pathname_binary_tuple(data)
pathname, data_stream = data
try:
for example_bytes in iterate_tfrecord_file(data_stream):
example = example_pb2.SequenceExample() # type: ignore
example.ParseFromString(example_bytes) # type: ignore
yield parse_tfrecord_sequence_example(example, self.spec)
except RuntimeError as e:
warnings.warn(f"Unable to read from corrupted tfrecord stream {pathname} due to: {e}, abort!")
raise e
def __len__(self) -> int:
if self.length == -1:
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
return self.length
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import re
from typing import Iterator, List
from torchdata.datapipes import functional_datapipe
from torchdata.datapipes.iter import IterDataPipe
def _shard_expand(s: str) -> List[str]:
expansion = r"[{][0-9]+[.][.][0-9]+[}]"
m = re.search(expansion, s)
if not m:
return [s]
prefix = s[: m.start()]
rest = _shard_expand(s[m.end() :])
rng = s[m.start() + 1 : m.end() - 1]
lohi = rng.split("..")
if len(lohi[0]) == len(lohi[1]) and lohi[0].startswith("0"):
fmt = "{prefix}{i:0>{l}d}{r}"
elif len(lohi[0]) <= len(lohi[1]):
if lohi[0].startswith("0") and lohi[0] != "0":
raise ValueError("shard_expand: low bound must not start with 0 if low bound is shorter")
fmt = "{prefix}{i}{r}"
else:
raise ValueError("shard_expand: low bound must be shorter than high bound")
lo, hi = (int(x) for x in lohi)
if lo >= hi:
raise ValueError(f"shard_expand: bad range in in shard spec {s}.")
result = []
for i in range(lo, hi + 1):
for r in rest:
expanded: str = fmt.format(prefix=prefix, i=i, r=r, l=len(lohi[1]))
result.append(expanded)
return result
@functional_datapipe("shard_expand")
class ShardExpanderIterDataPipe(IterDataPipe[str]):
r"""
Expands incoming shard strings into shards.
Sharded data files are named using shell-like brace notation. For example,
an ImageNet dataset sharded into 1200 shards and stored on a web server
might be named `imagenet-{000000..001199}.tar`.
Note that shard names can be expanded without any server transactions;
this makes `shard_expand` reproducible and storage system independent
(unlike :class `.FileLister` etc.).
Args:
source_datapipe: a DataPipe yielding a stream of pairs
Returns:
a DataPipe yielding a stream of expanded pathnames.
Example:
>>> from torchdata.datapipes.iter import IterableWrapper
>>> source_dp = IterableWrapper(["ds-{00..05}.tar"])
>>> expand_dp = source_dp.shard_expand()
>>> list(expand_dp)
['ds-00.tar', 'ds-01.tar', 'ds-02.tar', 'ds-03.tar', 'ds-04.tar', 'ds-05.tar']
>>> source_dp = IterableWrapper(["imgs_{00..05}.tar", "labels_{00..05}.tar"])
>>> expand_dp = source_dp.shard_expand()
>>> list(expand_dp)
['imgs_00.tar', 'imgs_01.tar', 'imgs_02.tar', 'labels_00.tar', 'labels_01.tar', 'labels_02.tar']
"""
def __init__(self, source_datapipe: IterDataPipe[str]) -> None:
super().__init__()
self.source_datapipe: IterDataPipe[str] = source_datapipe
def __iter__(self) -> Iterator[str]:
for path in self.source_datapipe:
yield from _shard_expand(path)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import tarfile
import warnings
from io import BufferedIOBase
from typing import cast, IO, Iterable, Iterator, Optional, Tuple
from torchdata.datapipes import functional_datapipe
from torchdata.datapipes.iter import IterDataPipe
from torchdata.datapipes.utils import StreamWrapper
from torchdata.datapipes.utils.common import validate_pathname_binary_tuple
@functional_datapipe("load_from_tar")
class TarArchiveLoaderIterDataPipe(IterDataPipe[Tuple[str, BufferedIOBase]]):
r"""
Opens/decompresses tar binary streams from an Iterable DataPipe which contains tuples of path name and
tar binary stream, and yields a tuple of path name and extracted binary stream (functional name: ``load_from_tar``).
Args:
datapipe: Iterable DataPipe that provides tuples of path name and tar binary stream
mode: File mode used by `tarfile.open` to read file object.
Mode has to be a string of the form `'filemode[:compression]'`
length: a nominal length of the DataPipe
Note:
The opened file handles will be closed automatically if the default ``DecoderDataPipe``
is attached. Otherwise, user should be responsible to close file handles explicitly
or let Python's GC close them periodically.
Example:
>>> from torchdata.datapipes.iter import FileLister, FileOpener
>>> datapipe1 = FileLister(".", "*.tar")
>>> datapipe2 = FileOpener(datapipe1, mode="b")
>>> tar_loader_dp = datapipe2.load_from_tar()
>>> for _, stream in tar_loader_dp:
>>> print(stream.read())
b'0123456789abcdef'
"""
def __init__(self, datapipe: Iterable[Tuple[str, BufferedIOBase]], mode: str = "r:*", length: int = -1) -> None:
super().__init__()
self.datapipe: Iterable[Tuple[str, BufferedIOBase]] = datapipe
self.mode: str = mode
self.length: int = length
def __iter__(self) -> Iterator[Tuple[str, BufferedIOBase]]:
for data in self.datapipe:
validate_pathname_binary_tuple(data)
pathname, data_stream = data
try:
if isinstance(data_stream, StreamWrapper) and isinstance(data_stream.file_obj, tarfile.TarFile):
tar = data_stream.file_obj
else:
reading_mode = (
self.mode
if hasattr(data_stream, "seekable") and data_stream.seekable()
else self.mode.replace(":", "|")
)
# typing.cast is used here to silence mypy's type checker
tar = tarfile.open(fileobj=cast(Optional[IO[bytes]], data_stream), mode=reading_mode)
for tarinfo in tar:
if not tarinfo.isfile():
continue
extracted_fobj = tar.extractfile(tarinfo)
if extracted_fobj is None:
warnings.warn(f"failed to extract file {tarinfo.name} from source tarfile {pathname}")
raise tarfile.ExtractError
inner_pathname = os.path.normpath(os.path.join(pathname, tarinfo.name))
yield inner_pathname, StreamWrapper(extracted_fobj, data_stream, name=inner_pathname) # type: ignore[misc]
except Exception as e:
warnings.warn(f"Unable to extract files from corrupted tarfile stream {pathname} due to: {e}, abort!")
raise e
finally:
if isinstance(data_stream, StreamWrapper):
data_stream.autoclose()
def __len__(self) -> int:
if self.length == -1:
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
return self.length
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import random
from typing import Dict, final, List, Optional, TypeVar, Union
from torchdata.datapipes import functional_datapipe
from torchdata.datapipes.iter import IterDataPipe
T = TypeVar("T")
@functional_datapipe("random_split")
class RandomSplitterIterDataPipe(IterDataPipe):
r"""
Randomly split samples from a source DataPipe into groups (functional name: ``random_split``).
Since there is no buffer, only ONE group of samples (i.e. one child DataPipe) can be iterated through
at any time. Attempts to iterate through multiple of them simultaneously will fail.
Note that by default, multiple iterations of this DataPipe will yield the same split for consistency across epochs.
You can invoke ``override_seed`` on the output(s) to update the seed whenever needed (such as per epoch to
get a different split per epoch).
Args:
source_datapipe: Iterable DataPipe being split
weights: Dict of weights; the length of this list determines how many output DataPipes there will be.
It is recommended to provide integer weights that sum up to ``total_length``, which allows
resulting DataPipes' length values to be known in advance.
seed: random _seed used to determine the randomness of the split
total_length: Length of the ``source_datapipe``, optional but providing an integer is highly encouraged,
because not all ``IterDataPipe`` has ``len``, espeically ones that can be easily known in advance.
target: Optional key (that must exist in ``weights``) to indicate the specific group to return.
If set to the default ``None``, returns ``List[IterDataPipe]``.
If target is specified, returns ``IterDataPipe``.
Example:
>>> from torchdata.datapipes.iter import IterableWrapper
>>> dp = IterableWrapper(range(10))
>>> train, valid = dp.random_split(total_length=10, weights={"train": 0.5, "valid": 0.5}, seed=0)
>>> list(train)
[2, 3, 5, 7, 8]
>>> list(valid)
[0, 1, 4, 6, 9]
>>> # You can also specify a target key if you only need a specific group of samples
>>> train = dp.random_split(total_length=10, weights={"train": 0.5, "valid": 0.5}, seed=0, target='train')
>>> list(train)
[2, 3, 5, 7, 8]
>>> # Be careful to use the same seed as before when specifying `target` to get the correct split.
>>> valid = dp.random_split(total_length=10, weights={"train": 0.5, "valid": 0.5}, seed=0, target='valid')
>>> list(valid)
[0, 1, 4, 6, 9]
"""
def __new__(
cls,
source_datapipe: IterDataPipe,
weights: Dict[T, Union[int, float]],
seed,
total_length: Optional[int] = None,
target: Optional[T] = None,
):
if total_length is None:
try:
# TODO: This is an issue for DataPipes which only have runtime lengths. Revisit to see if this
# is problematic.
total_length = len(source_datapipe)
except TypeError:
raise TypeError(
"RandomSplitter needs `total_length`, but it is unable to infer it from "
f"the `source_datapipe`: {source_datapipe}."
)
container = _RandomSplitterIterDataPipe(source_datapipe, total_length, weights, seed)
if target is None:
return [SplitterIterator(container, k) for k in list(weights.keys())]
else:
if target in weights.keys():
return SplitterIterator(container, target)
else:
raise KeyError(f"`target={target}` does not match any key in `weights`.")
class _RandomSplitterIterDataPipe(IterDataPipe):
def __init__(
self,
source_datapipe: IterDataPipe,
total_length: int,
weights: Dict[T, Union[int, float]],
seed,
):
self.source_datapipe: IterDataPipe = source_datapipe
self.total_length: int = total_length
self.remaining_length: int = total_length
self._seed = seed
self.keys: List[T] = list(weights.keys())
self.key_to_index: Dict[T, int] = {k: i for i, k in enumerate(self.keys)}
self.norm_weights: List[float] = self.normalize_weights([weights[k] for k in self.keys], total_length)
self.weights: List[float] = self.norm_weights.copy()
self._rng = random.Random(self._seed)
self._lengths: List[int] = []
def draw(self) -> T:
selected_key = self._rng.choices(self.keys, self.weights)[0]
index = self.key_to_index[selected_key]
self.weights[index] -= 1
self.remaining_length -= 1
if self.weights[index] < 0:
self.weights[index] = 0
self.weights = self.normalize_weights(self.weights, self.remaining_length)
return selected_key
@staticmethod
def normalize_weights(weights: List[float], total_length: int) -> List[float]:
"""
Given a ``List`` of weights, normalize them according to ``total_length``.
"""
total_weight = sum(weights)
return [float(w) * total_length / total_weight for w in weights]
@final
def reset(self) -> None:
self._rng = random.Random(self._seed)
self.weights = self.norm_weights.copy()
self.remaining_length = self.total_length
def override_seed(self, seed):
"""
Update the `seed`. The new `seed` will be used in the next iteration.
"""
self._seed = seed
return self
def __getstate__(self):
state = (
self.source_datapipe,
self.total_length,
self._seed,
self.norm_weights,
self.keys,
self.key_to_index,
self.weights,
self._rng.getstate(),
)
if IterDataPipe.getstate_hook is not None:
return IterDataPipe.getstate_hook(state)
return state
def __setstate__(self, state):
(
self.source_datapipe,
self.total_length,
self._seed,
self.norm_weights,
self.keys,
self.key_to_index,
self.weights,
rng_state,
) = state
self._rng = random.Random()
self._rng.setstate(rng_state)
def get_length(self, target: T) -> int:
if not self._lengths:
if all(w.is_integer() for w in self.norm_weights) and sum(self.norm_weights) == self.total_length:
self._lengths = [int(w) for w in self.norm_weights]
else:
raise TypeError(
"Lengths of the split cannot be known in advance. Please supply "
"integer `weights` that sum up to `total_length`.\nAlternatively, "
"use `datapipe.set_length(LENGTH)` to manually set the desired length."
)
index = self.key_to_index[target]
return self._lengths[index]
class SplitterIterator(IterDataPipe):
def __init__(self, main_datapipe: _RandomSplitterIterDataPipe, target: T):
self.main_datapipe = main_datapipe
self.target = target
def __iter__(self):
self.main_datapipe.reset()
for sample in self.main_datapipe.source_datapipe:
if self.main_datapipe.draw() == self.target:
yield sample
def override_seed(self, seed):
"""
Update the `seed`. The new `seed` will be used in the next iteration. For use cases that require a different
split for each epoch, you call this method before or after the epoch as necessary.
"""
self.main_datapipe.override_seed(seed)
return self
def __len__(self):
return self.main_datapipe.get_length(self.target)
|
# type: ignore
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: example.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1"))
from google.protobuf import (
descriptor as _descriptor,
descriptor_pb2,
message as _message,
reflection as _reflection,
symbol_database as _symbol_database,
)
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name="example.proto",
package="tfrecord",
syntax="proto3",
serialized_pb=_b(
'\n\rexample.proto\x12\x08tfrecord"\x1a\n\tBytesList\x12\r\n\x05value\x18\x01 \x03(\x0c"\x1e\n\tFloatList\x12\x11\n\x05value\x18\x01 \x03(\x02\x42\x02\x10\x01"\x1e\n\tInt64List\x12\x11\n\x05value\x18\x01 \x03(\x03\x42\x02\x10\x01"\x92\x01\n\x07\x46\x65\x61ture\x12)\n\nbytes_list\x18\x01 \x01(\x0b\x32\x13.tfrecord.BytesListH\x00\x12)\n\nfloat_list\x18\x02 \x01(\x0b\x32\x13.tfrecord.FloatListH\x00\x12)\n\nint64_list\x18\x03 \x01(\x0b\x32\x13.tfrecord.Int64ListH\x00\x42\x06\n\x04kind"\x7f\n\x08\x46\x65\x61tures\x12\x30\n\x07\x66\x65\x61ture\x18\x01 \x03(\x0b\x32\x1f.tfrecord.Features.FeatureEntry\x1a\x41\n\x0c\x46\x65\x61tureEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12 \n\x05value\x18\x02 \x01(\x0b\x32\x11.tfrecord.Feature:\x02\x38\x01"1\n\x0b\x46\x65\x61tureList\x12"\n\x07\x66\x65\x61ture\x18\x01 \x03(\x0b\x32\x11.tfrecord.Feature"\x98\x01\n\x0c\x46\x65\x61tureLists\x12=\n\x0c\x66\x65\x61ture_list\x18\x01 \x03(\x0b\x32\'.tfrecord.FeatureLists.FeatureListEntry\x1aI\n\x10\x46\x65\x61tureListEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12$\n\x05value\x18\x02 \x01(\x0b\x32\x15.tfrecord.FeatureList:\x02\x38\x01"/\n\x07\x45xample\x12$\n\x08\x66\x65\x61tures\x18\x01 \x01(\x0b\x32\x12.tfrecord.Features"e\n\x0fSequenceExample\x12#\n\x07\x63ontext\x18\x01 \x01(\x0b\x32\x12.tfrecord.Features\x12-\n\rfeature_lists\x18\x02 \x01(\x0b\x32\x16.tfrecord.FeatureListsB\x03\xf8\x01\x01\x62\x06proto3'
),
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_BYTESLIST = _descriptor.Descriptor(
name="BytesList",
full_name="tfrecord.BytesList",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="value",
full_name="tfrecord.BytesList.value",
index=0,
number=1,
type=12,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=27,
serialized_end=53,
)
_FLOATLIST = _descriptor.Descriptor(
name="FloatList",
full_name="tfrecord.FloatList",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="value",
full_name="tfrecord.FloatList.value",
index=0,
number=1,
type=2,
cpp_type=6,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b("\020\001")),
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=55,
serialized_end=85,
)
_INT64LIST = _descriptor.Descriptor(
name="Int64List",
full_name="tfrecord.Int64List",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="value",
full_name="tfrecord.Int64List.value",
index=0,
number=1,
type=3,
cpp_type=2,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b("\020\001")),
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=87,
serialized_end=117,
)
_FEATURE = _descriptor.Descriptor(
name="Feature",
full_name="tfrecord.Feature",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="bytes_list",
full_name="tfrecord.Feature.bytes_list",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="float_list",
full_name="tfrecord.Feature.float_list",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="int64_list",
full_name="tfrecord.Feature.int64_list",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="kind", full_name="tfrecord.Feature.kind", index=0, containing_type=None, fields=[]
),
],
serialized_start=120,
serialized_end=266,
)
_FEATURES_FEATUREENTRY = _descriptor.Descriptor(
name="FeatureEntry",
full_name="tfrecord.Features.FeatureEntry",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="key",
full_name="tfrecord.Features.FeatureEntry.key",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="value",
full_name="tfrecord.Features.FeatureEntry.value",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b("8\001")),
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=330,
serialized_end=395,
)
_FEATURES = _descriptor.Descriptor(
name="Features",
full_name="tfrecord.Features",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="feature",
full_name="tfrecord.Features.feature",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[
_FEATURES_FEATUREENTRY,
],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=268,
serialized_end=395,
)
_FEATURELIST = _descriptor.Descriptor(
name="FeatureList",
full_name="tfrecord.FeatureList",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="feature",
full_name="tfrecord.FeatureList.feature",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=397,
serialized_end=446,
)
_FEATURELISTS_FEATURELISTENTRY = _descriptor.Descriptor(
name="FeatureListEntry",
full_name="tfrecord.FeatureLists.FeatureListEntry",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="key",
full_name="tfrecord.FeatureLists.FeatureListEntry.key",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="value",
full_name="tfrecord.FeatureLists.FeatureListEntry.value",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b("8\001")),
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=528,
serialized_end=601,
)
_FEATURELISTS = _descriptor.Descriptor(
name="FeatureLists",
full_name="tfrecord.FeatureLists",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="feature_list",
full_name="tfrecord.FeatureLists.feature_list",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[
_FEATURELISTS_FEATURELISTENTRY,
],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=449,
serialized_end=601,
)
_EXAMPLE = _descriptor.Descriptor(
name="Example",
full_name="tfrecord.Example",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="features",
full_name="tfrecord.Example.features",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=603,
serialized_end=650,
)
_SEQUENCEEXAMPLE = _descriptor.Descriptor(
name="SequenceExample",
full_name="tfrecord.SequenceExample",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="context",
full_name="tfrecord.SequenceExample.context",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
_descriptor.FieldDescriptor(
name="feature_lists",
full_name="tfrecord.SequenceExample.feature_lists",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None,
),
],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=652,
serialized_end=753,
)
_FEATURE.fields_by_name["bytes_list"].message_type = _BYTESLIST
_FEATURE.fields_by_name["float_list"].message_type = _FLOATLIST
_FEATURE.fields_by_name["int64_list"].message_type = _INT64LIST
_FEATURE.oneofs_by_name["kind"].fields.append(_FEATURE.fields_by_name["bytes_list"])
_FEATURE.fields_by_name["bytes_list"].containing_oneof = _FEATURE.oneofs_by_name["kind"]
_FEATURE.oneofs_by_name["kind"].fields.append(_FEATURE.fields_by_name["float_list"])
_FEATURE.fields_by_name["float_list"].containing_oneof = _FEATURE.oneofs_by_name["kind"]
_FEATURE.oneofs_by_name["kind"].fields.append(_FEATURE.fields_by_name["int64_list"])
_FEATURE.fields_by_name["int64_list"].containing_oneof = _FEATURE.oneofs_by_name["kind"]
_FEATURES_FEATUREENTRY.fields_by_name["value"].message_type = _FEATURE
_FEATURES_FEATUREENTRY.containing_type = _FEATURES
_FEATURES.fields_by_name["feature"].message_type = _FEATURES_FEATUREENTRY
_FEATURELIST.fields_by_name["feature"].message_type = _FEATURE
_FEATURELISTS_FEATURELISTENTRY.fields_by_name["value"].message_type = _FEATURELIST
_FEATURELISTS_FEATURELISTENTRY.containing_type = _FEATURELISTS
_FEATURELISTS.fields_by_name["feature_list"].message_type = _FEATURELISTS_FEATURELISTENTRY
_EXAMPLE.fields_by_name["features"].message_type = _FEATURES
_SEQUENCEEXAMPLE.fields_by_name["context"].message_type = _FEATURES
_SEQUENCEEXAMPLE.fields_by_name["feature_lists"].message_type = _FEATURELISTS
DESCRIPTOR.message_types_by_name["BytesList"] = _BYTESLIST
DESCRIPTOR.message_types_by_name["FloatList"] = _FLOATLIST
DESCRIPTOR.message_types_by_name["Int64List"] = _INT64LIST
DESCRIPTOR.message_types_by_name["Feature"] = _FEATURE
DESCRIPTOR.message_types_by_name["Features"] = _FEATURES
DESCRIPTOR.message_types_by_name["FeatureList"] = _FEATURELIST
DESCRIPTOR.message_types_by_name["FeatureLists"] = _FEATURELISTS
DESCRIPTOR.message_types_by_name["Example"] = _EXAMPLE
DESCRIPTOR.message_types_by_name["SequenceExample"] = _SEQUENCEEXAMPLE
BytesList = _reflection.GeneratedProtocolMessageType(
"BytesList",
(_message.Message,),
dict(
DESCRIPTOR=_BYTESLIST,
__module__="example_pb2"
# @@protoc_insertion_point(class_scope:tfrecord.BytesList)
),
)
_sym_db.RegisterMessage(BytesList)
FloatList = _reflection.GeneratedProtocolMessageType(
"FloatList",
(_message.Message,),
dict(
DESCRIPTOR=_FLOATLIST,
__module__="example_pb2"
# @@protoc_insertion_point(class_scope:tfrecord.FloatList)
),
)
_sym_db.RegisterMessage(FloatList)
Int64List = _reflection.GeneratedProtocolMessageType(
"Int64List",
(_message.Message,),
dict(
DESCRIPTOR=_INT64LIST,
__module__="example_pb2"
# @@protoc_insertion_point(class_scope:tfrecord.Int64List)
),
)
_sym_db.RegisterMessage(Int64List)
Feature = _reflection.GeneratedProtocolMessageType(
"Feature",
(_message.Message,),
dict(
DESCRIPTOR=_FEATURE,
__module__="example_pb2"
# @@protoc_insertion_point(class_scope:tfrecord.Feature)
),
)
_sym_db.RegisterMessage(Feature)
Features = _reflection.GeneratedProtocolMessageType(
"Features",
(_message.Message,),
dict(
FeatureEntry=_reflection.GeneratedProtocolMessageType(
"FeatureEntry",
(_message.Message,),
dict(
DESCRIPTOR=_FEATURES_FEATUREENTRY,
__module__="example_pb2"
# @@protoc_insertion_point(class_scope:tfrecord.Features.FeatureEntry)
),
),
DESCRIPTOR=_FEATURES,
__module__="example_pb2"
# @@protoc_insertion_point(class_scope:tfrecord.Features)
),
)
_sym_db.RegisterMessage(Features)
_sym_db.RegisterMessage(Features.FeatureEntry)
FeatureList = _reflection.GeneratedProtocolMessageType(
"FeatureList",
(_message.Message,),
dict(
DESCRIPTOR=_FEATURELIST,
__module__="example_pb2"
# @@protoc_insertion_point(class_scope:tfrecord.FeatureList)
),
)
_sym_db.RegisterMessage(FeatureList)
FeatureLists = _reflection.GeneratedProtocolMessageType(
"FeatureLists",
(_message.Message,),
dict(
FeatureListEntry=_reflection.GeneratedProtocolMessageType(
"FeatureListEntry",
(_message.Message,),
dict(
DESCRIPTOR=_FEATURELISTS_FEATURELISTENTRY,
__module__="example_pb2"
# @@protoc_insertion_point(class_scope:tfrecord.FeatureLists.FeatureListEntry)
),
),
DESCRIPTOR=_FEATURELISTS,
__module__="example_pb2"
# @@protoc_insertion_point(class_scope:tfrecord.FeatureLists)
),
)
_sym_db.RegisterMessage(FeatureLists)
_sym_db.RegisterMessage(FeatureLists.FeatureListEntry)
Example = _reflection.GeneratedProtocolMessageType(
"Example",
(_message.Message,),
dict(
DESCRIPTOR=_EXAMPLE,
__module__="example_pb2"
# @@protoc_insertion_point(class_scope:tfrecord.Example)
),
)
_sym_db.RegisterMessage(Example)
SequenceExample = _reflection.GeneratedProtocolMessageType(
"SequenceExample",
(_message.Message,),
dict(
DESCRIPTOR=_SEQUENCEEXAMPLE,
__module__="example_pb2"
# @@protoc_insertion_point(class_scope:tfrecord.SequenceExample)
),
)
_sym_db.RegisterMessage(SequenceExample)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b("\370\001\001"))
_FLOATLIST.fields_by_name["value"].has_options = True
_FLOATLIST.fields_by_name["value"]._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b("\020\001"))
_INT64LIST.fields_by_name["value"].has_options = True
_INT64LIST.fields_by_name["value"]._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b("\020\001"))
_FEATURES_FEATUREENTRY.has_options = True
_FEATURES_FEATUREENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b("8\001"))
_FEATURELISTS_FEATURELISTENTRY.has_options = True
_FEATURELISTS_FEATURELISTENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b("8\001"))
# @@protoc_insertion_point(module_scope)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from io import BytesIO
from typing import Iterator, List, Tuple, Union
import torchdata
from torch.utils.data.datapipes.utils.common import match_masks
from torchdata.datapipes import functional_datapipe
from torchdata.datapipes.iter import IterDataPipe
from torchdata.datapipes.utils import StreamWrapper
@functional_datapipe("list_files_by_s3")
class S3FileListerIterDataPipe(IterDataPipe[str]):
r"""
Iterable DataPipe that lists Amazon S3 file URLs with the given prefixes (functional name: ``list_files_by_s3``).
Acceptable prefixes include ``s3://bucket-name``, ``s3://bucket-name/``, ``s3://bucket-name/folder``.
Note:
1. ``source_datapipe`` **must** contain a list of valid S3 URLs
2. ``length`` is `-1` by default, and any call to ``__len__()`` is invalid, because the length is unknown
until all files are iterated.
3. ``request_timeout_ms`` and ``region`` will overwrite settings in the configuration file or
environment variables.
4. The lack of AWS proper configuration can lead empty response. For more details related to S3 IO DataPipe
setup and AWS config, please see the `README file`_.
.. _README file:
https://github.com/pytorch/data/tree/main/torchdata/datapipes/iter/load#s3-io-datapipe-documentation
Args:
source_datapipe: a DataPipe that contains URLs/URL prefixes to s3 files
length: Nominal length of the datapipe
request_timeout_ms: timeout setting for each reqeust (3,000ms by default)
region: region for access files (inferred from credentials by default)
Example:
.. testsetup::
from unittest import mock
from torchdata.datapipes.iter import IterableWrapper, S3FileLister
file_lister_patch = mock.patch.object(S3FileLister, "__iter__", return_value=iter([]))
file_lister_patch.start()
.. testcode::
from torchdata.datapipes.iter import IterableWrapper, S3FileLister
s3_prefixes = IterableWrapper(['s3://bucket-name/folder/', ...])
dp_s3_urls = S3FileLister(s3_prefixes)
for d in dp_s3_urls:
pass
# Functional API
dp_s3_urls = s3_prefixes.list_files_by_s3(request_timeout_ms=100)
for d in dp_s3_urls:
pass
.. testcleanup::
file_lister_patch.stop()
"""
def __init__(
self,
source_datapipe: IterDataPipe[str],
length: int = -1,
request_timeout_ms=-1,
region="",
masks: Union[str, List[str]] = "",
) -> None:
if not hasattr(torchdata, "_torchdata") or not hasattr(torchdata._torchdata, "S3Handler"):
raise ModuleNotFoundError("TorchData must be built with BUILD_S3=1 to use this datapipe.")
self.source_datapipe: IterDataPipe[str] = source_datapipe
self.length: int = length
self.handler = torchdata._torchdata.S3Handler(request_timeout_ms, region)
self.masks = masks
def __iter__(self) -> Iterator[str]:
for prefix in self.source_datapipe:
while True:
urls = self.handler.list_files(prefix)
for url in urls:
if match_masks(url, self.masks):
yield url
if not urls:
break
self.handler.clear_marker()
def __len__(self) -> int:
if self.length == -1:
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
return self.length
@functional_datapipe("load_files_by_s3")
class S3FileLoaderIterDataPipe(IterDataPipe[Tuple[str, StreamWrapper]]):
r"""
Iterable DataPipe that loads Amazon S3 files from the given S3 URLs (functional name: ``load_files_by_s3``).
``S3FileLoader`` iterates all given S3 URLs in ``BytesIO`` format with ``(url, BytesIO)`` tuples.
Note:
1. ``source_datapipe`` **must** contain a list of valid S3 URLs.
2. ``request_timeout_ms`` and ``region`` will overwrite settings in the
configuration file or environment variables.
3. The lack of AWS proper configuration can lead empty response. For more details related to S3 IO DataPipe
setup and AWS config, please see the `README file`_.
.. _README file:
https://github.com/pytorch/data/tree/main/torchdata/datapipes/iter/load#s3-io-datapipe-documentation
Args:
source_datapipe: a DataPipe that contains URLs to s3 files
request_timeout_ms: timeout setting for each reqeust (3,000ms by default)
region: region for access files (inferred from credentials by default)
buffer_size: buffer size of each chunk to download large files progressively (128Mb by default)
multi_part_download: flag to split each chunk into small packets and download those packets in parallel (enabled by default)
Example:
.. testsetup::
from unittest import mock
from torchdata.datapipes.iter import S3FileLister
file_lister_patch = mock.patch.object(S3FileLister, "__iter__", return_value=iter([]))
file_lister_patch.start()
.. testcode::
from torchdata.datapipes.iter import IterableWrapper, S3FileLoader
dp_s3_urls = IterableWrapper(['s3://bucket-name/folder/', ...]).list_files_by_s3()
# In order to make sure data are shuffled and sharded in the
# distributed environment, `shuffle` and `sharding_filter`
# are required. For detail, please check our tutorial in:
# https://pytorch.org/data/main/tutorial.html#working-with-dataloader
sharded_s3_urls = dp_s3_urls.shuffle().sharding_filter()
dp_s3_files = S3FileLoader(sharded_s3_urls)
for url, fd in dp_s3_files: # Start loading data
data = fd.read()
# Functional API
dp_s3_files = sharded_s3_urls.load_files_by_s3(buffer_size=256)
for url, fd in dp_s3_files:
data = fd.read()
.. testcleanup::
file_lister_patch.stop()
"""
def __init__(
self,
source_datapipe: IterDataPipe[str],
request_timeout_ms=-1,
region="",
buffer_size=None,
multi_part_download=None,
) -> None:
if not hasattr(torchdata, "_torchdata") or not hasattr(torchdata._torchdata, "S3Handler"):
raise ModuleNotFoundError("TorchData must be built with BUILD_S3=1 to use this datapipe.")
self.source_datapipe: IterDataPipe[str] = source_datapipe
self.handler = torchdata._torchdata.S3Handler(request_timeout_ms, region)
if buffer_size:
self.handler.set_buffer_size(buffer_size)
if multi_part_download:
self.handler.set_multi_part_download(multi_part_download)
def __iter__(self) -> Iterator[Tuple[str, StreamWrapper]]:
for url in self.source_datapipe:
yield url, StreamWrapper(BytesIO(self.handler.s3_read(url)))
def __len__(self) -> int:
return len(self.source_datapipe)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import re
import urllib
import warnings
from typing import Any, Dict, Iterator, Optional, Tuple
import requests
from torchdata.datapipes import functional_datapipe
from torchdata.datapipes.iter import IterDataPipe
from torchdata.datapipes.utils import StreamWrapper
# TODO(642): Remove this helper function when https://bugs.python.org/issue42627 is resolved
def _get_proxies() -> Optional[Dict[str, str]]:
import os
if os.name == "nt":
proxies = urllib.request.getproxies()
address = proxies.get("https")
# The default proxy type of Windows is HTTP
if address and address.startswith("https"):
address = "http" + address[5:]
proxies["https"] = address
return proxies
return None
def _get_response_from_http(
url: str, *, timeout: Optional[float], **query_params: Optional[Dict[str, Any]]
) -> Tuple[str, StreamWrapper]:
with requests.Session() as session:
proxies = _get_proxies()
r = session.get(url, timeout=timeout, proxies=proxies, stream=True, **query_params) # type: ignore[attr-defined]
r.raise_for_status()
return url, StreamWrapper(r.raw)
@functional_datapipe("read_from_http")
class HTTPReaderIterDataPipe(IterDataPipe[Tuple[str, StreamWrapper]]):
r"""
Takes file URLs (HTTP URLs pointing to files), and yields tuples of file URL and
IO stream (functional name: ``read_from_http``).
Args:
source_datapipe: a DataPipe that contains URLs
timeout: timeout in seconds for HTTP request
skip_on_error: whether to skip over urls causing problems, otherwise an exception is raised
**kwargs: a Dictionary to pass optional arguments that requests takes. For the full list check out https://docs.python-requests.org/en/master/api/
Example:
.. testcode::
from torchdata.datapipes.iter import IterableWrapper, HttpReader
file_url = "https://raw.githubusercontent.com/pytorch/data/main/LICENSE"
query_params = {"auth" : ("fake_username", "fake_password"), "allow_redirects" : True}
timeout = 120
http_reader_dp = HttpReader(IterableWrapper([file_url]), timeout=timeout, **query_params)
reader_dp = http_reader_dp.readlines()
it = iter(reader_dp)
path, line = next(it)
print((path, line))
Output:
.. testoutput::
('https://raw.githubusercontent.com/pytorch/data/main/LICENSE', b'BSD 3-Clause License')
"""
def __init__(
self,
source_datapipe: IterDataPipe[str],
timeout: Optional[float] = None,
skip_on_error: bool = False,
**kwargs: Optional[Dict[str, Any]],
) -> None:
self.source_datapipe: IterDataPipe[str] = source_datapipe
self.timeout = timeout
self.skip_on_error = skip_on_error
self.query_params = kwargs
def __iter__(self) -> Iterator[Tuple[str, StreamWrapper]]:
for url in self.source_datapipe:
try:
yield _get_response_from_http(url, timeout=self.timeout, **self.query_params)
except Exception as e:
if self.skip_on_error:
warnings.warn(f"{e}, skipping...")
else:
raise
def __len__(self) -> int:
return len(self.source_datapipe)
def _extract_gdrive_api_response(content: str) -> Optional[str]:
match = re.search("<title>Google Drive - (?P<api_response>.+?)</title>", content)
return match["api_response"] if match is not None else None
def _get_response_from_google_drive(
url: str, *, timeout: Optional[float], **query_params: Optional[Dict[str, Any]]
) -> Tuple[str, StreamWrapper]:
confirm_token = None
with requests.Session() as session:
response = session.get(url, timeout=timeout, stream=True, **query_params) # type: ignore[attr-defined]
response.raise_for_status()
for k, v in response.cookies.items():
if k.startswith("download_warning"):
confirm_token = v
break
else:
api_response = _extract_gdrive_api_response(response.text)
if api_response == "Virus scan warning":
confirm_token = "t"
elif api_response == "Quota exceeded":
raise RuntimeError(f"Google drive link {url} is currently unavailable, because the quota was exceeded.")
if confirm_token:
url = url + "&confirm=" + confirm_token
response = session.get(url, timeout=timeout, stream=True, **query_params) # type: ignore[attr-defined]
response.raise_for_status()
if "content-disposition" not in response.headers:
raise RuntimeError(
f"Google drive link {url} internal error: "
"headers don't contain content-disposition. This is usually caused by "
"using a sharing/viewing link instead of a download link. Click 'Download' on the "
"Google Drive page, which should redirect you to a download page, and use the link "
"of that page."
)
filename = re.findall('filename="(.+)"', response.headers["content-disposition"])
if filename is None:
raise RuntimeError(f"Google drive link {url}: filename could not be autodetected")
return filename[0], StreamWrapper(response.raw)
@functional_datapipe("read_from_gdrive")
class GDriveReaderDataPipe(IterDataPipe[Tuple[str, StreamWrapper]]):
r"""
Takes URLs pointing at GDrive files, and yields tuples of file name and
IO stream (functional name: ``read_from_gdrive``).
Args:
source_datapipe: a DataPipe that contains URLs to GDrive files
timeout: timeout in seconds for HTTP request
skip_on_error: whether to skip over urls causing problems, otherwise an exception is raised
**kwargs: a Dictionary to pass optional arguments that requests takes. For the full list check out https://docs.python-requests.org/en/master/api/
Example:
.. testsetup::
from torchdata.datapipes.iter import GDriveReader
GDriveReader.readlines = lambda self: [
("https://drive.google.com/uc?export=download&id=SomeIDToAGDriveFile", b"<First line from the GDrive File>")
]
.. testcode::
from torchdata.datapipes.iter import IterableWrapper, GDriveReader
gdrive_file_url = "https://drive.google.com/uc?export=download&id=SomeIDToAGDriveFile"
gdrive_reader_dp = GDriveReader(IterableWrapper([gdrive_file_url]))
reader_dp = gdrive_reader_dp.readlines()
it = iter(reader_dp)
path, line = next(it)
print((path, line))
Output:
.. testoutput::
('https://drive.google.com/uc?export=download&id=SomeIDToAGDriveFile', b'<First line from the GDrive File>')
"""
source_datapipe: IterDataPipe[str]
def __init__(
self,
source_datapipe: IterDataPipe[str],
*,
timeout: Optional[float] = None,
skip_on_error: bool = False,
**kwargs: Optional[Dict[str, Any]],
) -> None:
self.source_datapipe = source_datapipe
self.timeout = timeout
self.skip_on_error = skip_on_error
self.query_params = kwargs
def __iter__(self) -> Iterator[Tuple[str, StreamWrapper]]:
for url in self.source_datapipe:
try:
yield _get_response_from_google_drive(url, timeout=self.timeout, **self.query_params)
except Exception as e:
if self.skip_on_error:
warnings.warn(f"{e}, skipping...")
else:
raise
def __len__(self) -> int:
return len(self.source_datapipe)
@functional_datapipe("read_from_remote")
class OnlineReaderIterDataPipe(IterDataPipe[Tuple[str, StreamWrapper]]):
r"""
Takes file URLs (can be HTTP URLs pointing to files or URLs to GDrive files), and
yields tuples of file URL and IO stream (functional name: ``read_from_remote``).
Args:
source_datapipe: a DataPipe that contains URLs
timeout: timeout in seconds for HTTP request
skip_on_error: whether to skip over urls causing problems, otherwise an exception is raised
**kwargs: a Dictionary to pass optional arguments that requests takes. For the full list check out https://docs.python-requests.org/en/master/api/
Example:
.. testcode::
from torchdata.datapipes.iter import IterableWrapper, OnlineReader
file_url = "https://raw.githubusercontent.com/pytorch/data/main/LICENSE"
online_reader_dp = OnlineReader(IterableWrapper([file_url]))
reader_dp = online_reader_dp.readlines()
it = iter(reader_dp)
path, line = next(it)
print((path, line))
Output:
.. testoutput::
('https://raw.githubusercontent.com/pytorch/data/main/LICENSE', b'BSD 3-Clause License')
"""
source_datapipe: IterDataPipe[str]
def __init__(
self,
source_datapipe: IterDataPipe[str],
*,
timeout: Optional[float] = None,
skip_on_error: bool = False,
**kwargs: Optional[Dict[str, Any]],
) -> None:
self.source_datapipe = source_datapipe
self.timeout = timeout
self.skip_on_error = skip_on_error
self.query_params = kwargs
def __iter__(self) -> Iterator[Tuple[str, StreamWrapper]]:
for url in self.source_datapipe:
parts = urllib.parse.urlparse(url)
if re.match(r"(drive|docs)[.]google[.]com", parts.netloc):
try:
yield _get_response_from_google_drive(url, timeout=self.timeout, **self.query_params)
except Exception as e:
if self.skip_on_error:
warnings.warn(f"{e}, skipping...")
else:
raise
else:
try:
yield _get_response_from_http(url, timeout=self.timeout, **self.query_params)
except Exception as e:
if self.skip_on_error:
warnings.warn(f"{e}, skipping...")
else:
raise
def __len__(self) -> int:
return len(self.source_datapipe)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Iterator, Tuple
from torchdata.datapipes import functional_datapipe
from torchdata.datapipes.iter import IterDataPipe
from torchdata.datapipes.utils import StreamWrapper
try:
from aistore.client import Client
from aistore.pytorch.utils import parse_url, unparse_url
HAS_AIS = True
except ImportError:
HAS_AIS = False
try:
import aistore
from packaging.version import parse as parse_version
AIS_VERSION_CHECK = parse_version(aistore.__version__) >= parse_version("1.0.2")
except (ImportError, AttributeError):
AIS_VERSION_CHECK = False
def _assert_aistore() -> None:
if not HAS_AIS:
raise ModuleNotFoundError(
"Package `aistore` (>=1.0.2) is required to be installed to use this datapipe."
"Please run `pip install --upgrade aistore` or `conda install aistore` to install the package"
"For more info visit: https://github.com/NVIDIA/aistore/blob/master/sdk/python/"
)
def _assert_aistore_version() -> None:
if not AIS_VERSION_CHECK:
raise ImportError(
"AIStore version >= 1.0.2 required"
"Please run `pip install --upgrade aistore` or `conda update aistore` to install the latest version"
)
@functional_datapipe("list_files_by_ais")
class AISFileListerIterDataPipe(IterDataPipe[str]):
"""
Iterable Datapipe that lists files from the AIStore backends with the given URL prefixes
(functional name: ``list_files_by_ais``).
Acceptable prefixes include but not limited to - `ais://bucket-name`, `ais://bucket-name/`
Note:
- This function also supports files from multiple backends (`aws://..`, `gcp://..`, `azure://..`, etc)
- Input must be a list and direct URLs are not supported.
- length is -1 by default, all calls to len() are invalid as
not all items are iterated at the start.
- This internally uses AIStore Python SDK.
Args:
source_datapipe(IterDataPipe[str]): a DataPipe that contains URLs/URL
prefixes to objects on AIS
url(str): AIStore endpoint
length(int): length of the datapipe
Example:
>>> from torchdata.datapipes.iter import IterableWrapper, AISFileLister
>>> ais_prefixes = IterableWrapper(['gcp://bucket-name/folder/', 'aws:bucket-name/folder/', 'ais://bucket-name/folder/', ...])
>>> dp_ais_urls = AISFileLister(url='localhost:8080', source_datapipe=ais_prefixes)
>>> for url in dp_ais_urls:
... pass
>>> # Functional API
>>> dp_ais_urls = ais_prefixes.list_files_by_ais(url='localhost:8080')
>>> for url in dp_ais_urls:
... pass
"""
def __init__(self, source_datapipe: IterDataPipe[str], url: str, length: int = -1) -> None:
_assert_aistore()
_assert_aistore_version()
self.source_datapipe: IterDataPipe[str] = source_datapipe
self.length: int = length
self.client = Client(url)
def __iter__(self) -> Iterator[str]:
for prefix in self.source_datapipe:
provider, bck_name, prefix = parse_url(prefix)
obj_iter = self.client.bucket(bck_name, provider).list_objects_iter(prefix=prefix)
for entry in obj_iter:
yield unparse_url(provider=provider, bck_name=bck_name, obj_name=entry.name)
def __len__(self) -> int:
if self.length == -1:
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
return self.length
@functional_datapipe("load_files_by_ais")
class AISFileLoaderIterDataPipe(IterDataPipe[Tuple[str, StreamWrapper]]):
"""
Iterable DataPipe that loads files from AIStore with the given URLs (functional name: ``load_files_by_ais``).
Iterates all files in BytesIO format and returns a tuple (url, BytesIO).
Note:
- This function also supports files from multiple backends (`aws://..`, `gcp://..`, `azure://..`, etc)
- Input must be a list and direct URLs are not supported.
- This internally uses AIStore Python SDK.
Args:
source_datapipe(IterDataPipe[str]): a DataPipe that contains URLs/URL prefixes to objects
url(str): AIStore endpoint
length(int): length of the datapipe
Example:
>>> from torchdata.datapipes.iter import IterableWrapper, AISFileLister,AISFileLoader
>>> ais_prefixes = IterableWrapper(['gcp://bucket-name/folder/', 'aws:bucket-name/folder/', 'ais://bucket-name/folder/', ...])
>>> dp_ais_urls = AISFileLister(url='localhost:8080', source_datapipe=ais_prefixes)
>>> dp_cloud_files = AISFileLoader(url='localhost:8080', source_datapipe=dp_ais_urls)
>>> for url, file in dp_cloud_files:
... pass
>>> # Functional API
>>> dp_cloud_files = dp_ais_urls.load_files_by_ais(url='localhost:8080')
>>> for url, file in dp_cloud_files:
... pass
"""
def __init__(self, source_datapipe: IterDataPipe[str], url: str, length: int = -1) -> None:
_assert_aistore()
_assert_aistore_version()
self.source_datapipe: IterDataPipe[str] = source_datapipe
self.length = length
self.client = Client(url)
def __iter__(self) -> Iterator[Tuple[str, StreamWrapper]]:
for url in self.source_datapipe:
provider, bck_name, obj_name = parse_url(url)
yield url, StreamWrapper(
self.client.bucket(bck_name=bck_name, provider=provider).object(obj_name=obj_name).get().raw()
)
def __len__(self) -> int:
return len(self.source_datapipe)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import posixpath
from typing import Any, Callable, Dict, Iterator, List, Optional, Sequence, Tuple, Union
from torch.utils.data.datapipes.utils.common import match_masks
from torchdata.datapipes import functional_datapipe
from torchdata.datapipes.iter import IterableWrapper, IterDataPipe
from torchdata.datapipes.utils import StreamWrapper
try:
import fsspec
except ImportError:
fsspec = None
U = Union[bytes, bytearray, str]
def _assert_fsspec() -> None:
if fsspec is None:
raise ModuleNotFoundError(
"Package `fsspec` is required to be installed to use this datapipe."
"Please use `pip install fsspec` or `conda install -c conda-forge fsspec`"
"to install the package"
)
@functional_datapipe("list_files_by_fsspec")
class FSSpecFileListerIterDataPipe(IterDataPipe[str]):
r"""
Lists the contents of the directory at the provided ``root`` pathname or URL,
and yields the full pathname or URL for each file within the
directory (functional name: ``list_files_by_fsspec``).
Args:
root: The root `fsspec` path directory or list of path directories to list files from
masks: Unix style filter string or string list for filtering file name(s)
kwargs: Extra options that make sense to a particular storage connection,
e.g. host, port, username, password, etc.
Example:
.. testsetup::
dir_path = "path"
.. testcode::
from torchdata.datapipes.iter import FSSpecFileLister
datapipe = FSSpecFileLister(root=dir_path)
"""
def __init__(
self,
root: Union[str, Sequence[str], IterDataPipe],
masks: Union[str, List[str]] = "",
**kwargs,
) -> None:
_assert_fsspec()
if isinstance(root, str):
root = [
root,
]
if not isinstance(root, IterDataPipe):
self.datapipe: IterDataPipe = IterableWrapper(root) # type: ignore[assignment]
else:
self.datapipe = root
self.masks = masks
self.kwargs_for_connection = kwargs
def __iter__(self) -> Iterator[str]:
for root in self.datapipe:
fs, path = fsspec.core.url_to_fs(root, **self.kwargs_for_connection)
if isinstance(fs.protocol, str):
protocol_list = [fs.protocol]
else:
protocol_list = fs.protocol
# fspec.core.url_to_fs will return "abfs" for both, "az://" and "abfs://" urls
if "abfs" in protocol_list:
protocol_list.append("az")
is_local = fs.protocol == "file" or not any(root.startswith(protocol) for protocol in protocol_list)
if fs.isfile(path):
yield root
else:
for file_name in fs.ls(path, detail=False): # Ensure it returns List[str], not List[Dict]
if not match_masks(file_name, self.masks):
continue
# ensure the file name has the full fsspec protocol path
if any(file_name.startswith(protocol) for protocol in protocol_list):
yield file_name
else:
if is_local:
abs_path = os.path.join(path, file_name)
elif not file_name.startswith(path):
abs_path = posixpath.join(path, file_name)
else:
abs_path = file_name
starts_with = False
for protocol in protocol_list:
if root.startswith(protocol):
starts_with = True
yield protocol + "://" + abs_path
break
if not starts_with:
yield abs_path
@functional_datapipe("open_files_by_fsspec")
class FSSpecFileOpenerIterDataPipe(IterDataPipe[Tuple[str, StreamWrapper]]):
r"""
Opens files from input datapipe which contains `fsspec` paths and yields a tuple of
pathname and opened file stream (functional name: ``open_files_by_fsspec``).
Args:
source_datapipe: Iterable DataPipe that provides the pathnames or URLs
mode: An optional string that specifies the mode in which the file is opened (``"r"`` by default)
kwargs_for_open: Optional Dict to specify kwargs for opening files (``fs.open()``)
kwargs: Extra options that are used to establish a particular storage connection,
e.g. host, port, username, password, etc.
Example:
.. testsetup::
dir_path = "path"
.. testcode::
from torchdata.datapipes.iter import FSSpecFileLister
datapipe = FSSpecFileLister(root=dir_path)
file_dp = datapipe.open_files_by_fsspec()
"""
def __init__(
self, source_datapipe: IterDataPipe[str], mode: str = "r", *, kwargs_for_open: Optional[Dict] = None, **kwargs
) -> None:
_assert_fsspec()
self.source_datapipe: IterDataPipe[str] = source_datapipe
self.mode: str = mode
self.kwargs_for_open = kwargs_for_open if kwargs_for_open is not None else {}
self.kwargs_for_connection = kwargs
def __iter__(self) -> Iterator[Tuple[str, StreamWrapper]]:
for file_uri in self.source_datapipe:
fs, path = fsspec.core.url_to_fs(file_uri, **self.kwargs_for_connection)
file = fs.open(path, self.mode, **self.kwargs_for_open)
yield file_uri, StreamWrapper(file)
def __len__(self) -> int:
return len(self.source_datapipe)
@functional_datapipe("save_by_fsspec")
class FSSpecSaverIterDataPipe(IterDataPipe[str]):
r"""
Takes in a DataPipe of tuples of metadata and data, saves the data to the target
path (generated by the filepath_fn and metadata), and yields the resulting `fsspec`
path (functional name: ``save_by_fsspec``).
Args:
source_datapipe: Iterable DataPipe with tuples of metadata and data
mode: Mode in which the file will be opened for write the data (``"w"`` by default)
filepath_fn: Function that takes in metadata and returns the target path of the new file
kwargs_for_open: Optional Dict to specify kwargs for opening files (``fs.open()``)
kwargs: Extra options that are used to establish a particular storage connection,
e.g. host, port, username, password, etc.
Example:
.. testsetup::
file_prefix = "file"
.. testcode::
from torchdata.datapipes.iter import IterableWrapper
def filepath_fn(name: str) -> str:
return file_prefix + name
name_to_data = {"1.txt": b"DATA1", "2.txt": b"DATA2", "3.txt": b"DATA3"}
source_dp = IterableWrapper(sorted(name_to_data.items()))
fsspec_saver_dp = source_dp.save_by_fsspec(filepath_fn=filepath_fn, mode="wb")
res_file_paths = list(fsspec_saver_dp)
.. testcleanup::
import os
for name in name_to_data.keys():
os.remove(file_prefix + name)
"""
def __init__(
self,
source_datapipe: IterDataPipe[Tuple[Any, U]],
mode: str = "w",
filepath_fn: Optional[Callable] = None,
*,
kwargs_for_open: Optional[Dict] = None,
**kwargs,
):
_assert_fsspec()
self.source_datapipe: IterDataPipe[Tuple[Any, U]] = source_datapipe
self.mode: str = mode
self.filepath_fn: Optional[Callable] = filepath_fn
self.kwargs_for_open = kwargs_for_open if kwargs_for_open is not None else {}
self.kwargs_for_connection = kwargs
def __iter__(self) -> Iterator[str]:
for meta, data in self.source_datapipe:
filepath = meta if self.filepath_fn is None else self.filepath_fn(meta)
fs, path = fsspec.core.url_to_fs(filepath, **self.kwargs_for_connection)
with fs.open(path, self.mode, **self.kwargs_for_open) as f:
f.write(data)
yield filepath
def __len__(self) -> int:
return len(self.source_datapipe)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Iterator, Tuple
from torchdata.datapipes.iter import IterDataPipe
from torchdata.datapipes.utils import StreamWrapper
try:
import datasets
except ImportError:
datasets = None
def _get_response_from_huggingface_hub(
dataset: str,
streaming: bool = True,
**config_kwargs,
) -> Iterator[Any]:
hf_dataset = datasets.load_dataset(path=dataset, streaming=streaming, **config_kwargs)
return iter(hf_dataset)
class HuggingFaceHubReaderIterDataPipe(IterDataPipe[Tuple[str, StreamWrapper]]):
r"""
Takes in dataset names and returns an Iterable HuggingFace dataset.
Please refer to https://huggingface.co/docs/datasets/loading for the meaning and type of each argument.
Contrary to their implementation, default behavior differs in the following:
* ``streaming`` is set to ``True``
Args:
dataset: path or name of the dataset
**config_kwargs: additional arguments for ``datasets.load_dataset()``
Example:
.. testsetup::
import datasets
from torchdata.datapipes.iter import IterableWrapper, HuggingFaceHubReader
from unittest.mock import MagicMock
datasets.load_dataset = MagicMock(return_value=datasets.Dataset.from_dict(
{"id": ["7bd227d9-afc9-11e6-aba1-c4b301cdf627", "7bd22905-afc9-11e6-a5dc-c4b301cdf627" ], "package_name": ["com.mantz_it.rfanalyzer"] * 2}
))
.. testcode::
huggingface_reader_dp = HuggingFaceHubReader("lhoestq/demo1", revision="main")
elem = next(iter(huggingface_reader_dp))
assert elem["package_name"] == "com.mantz_it.rfanalyzer"
"""
def __init__(
self,
dataset: str,
**config_kwargs,
) -> None:
if datasets is None:
raise ModuleNotFoundError(
"Package `datasets` is required to be installed to use this datapipe."
"Please use `pip install datasets` or `conda install -c conda-forge datasets`"
"to install the package"
)
self.dataset = dataset
self.config_kwargs = config_kwargs
def __iter__(self) -> Iterator[Any]:
return _get_response_from_huggingface_hub(dataset=self.dataset, **self.config_kwargs)
def __len__(self) -> int:
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
from typing import Any, Callable, Iterator, List, Optional, Sequence, Tuple, Union
from torch.utils.data.datapipes.utils.common import match_masks
from torchdata.datapipes import functional_datapipe
from torchdata.datapipes.iter import IterableWrapper, IterDataPipe
from torchdata.datapipes.utils import StreamWrapper
try:
import iopath
except ImportError:
iopath = None
U = Union[bytes, bytearray, str]
def _create_default_pathmanager():
from iopath.common.file_io import HTTPURLHandler, OneDrivePathHandler, PathManager
pathmgr = PathManager()
pathmgr.register_handler(HTTPURLHandler(), allow_override=True)
pathmgr.register_handler(OneDrivePathHandler(), allow_override=True)
# S3PathHandler is not included in 0.1.8
try:
from iopath.common.s3 import S3PathHandler
pathmgr.register_handler(S3PathHandler(), allow_override=True)
except ImportError:
pass
return pathmgr
@functional_datapipe("list_files_by_iopath")
class IoPathFileListerIterDataPipe(IterDataPipe[str]):
r"""
Lists the contents of the directory at the provided ``root`` pathname or URL,
and yields the full pathname or URL for each file within the directory (functional name: ``list_files_by_iopath``).
Args:
root: The root local filepath or URL directory or list of roots to list files from
masks: Unix style filter string or string list for filtering file name(s)
pathmgr: Custom ``iopath.PathManager``. If not specified, a default ``PathManager`` is created.
Note:
Default ``PathManager`` currently supports local file path, normal HTTP URL and OneDrive URL.
S3 URL is supported only with ``iopath``>=0.1.9.
Example:
.. testsetup::
s3_url = "path"
.. testcode::
from torchdata.datapipes.iter import IoPathFileLister
datapipe = IoPathFileLister(root=s3_url)
"""
def __init__(
self,
root: Union[str, Sequence[str], IterDataPipe],
masks: Union[str, List[str]] = "",
*,
pathmgr=None,
handler=None,
) -> None:
if iopath is None:
raise ModuleNotFoundError(
"Package `iopath` is required to be installed to use this datapipe."
"Please use `pip install iopath` or `conda install -c conda-forge iopath`"
"to install the package"
)
if isinstance(root, str):
root = [
root,
]
if not isinstance(root, IterDataPipe):
self.datapipe: IterDataPipe = IterableWrapper(root) # type: ignore[assignment]
else:
self.datapipe = root
self.pathmgr = _create_default_pathmanager() if pathmgr is None else pathmgr
self.masks = masks
if handler is not None:
self.register_handler(handler, allow_override=True)
def register_handler(self, handler, allow_override=False):
self.pathmgr.register_handler(handler, allow_override=allow_override)
def __iter__(self) -> Iterator[str]:
for path in self.datapipe:
if self.pathmgr.isfile(path):
yield path
else:
for file_name in self.pathmgr.ls(path):
if match_masks(file_name, self.masks):
yield os.path.join(path, file_name)
@functional_datapipe("open_files_by_iopath")
class IoPathFileOpenerIterDataPipe(IterDataPipe[Tuple[str, StreamWrapper]]):
r"""
Opens files from input datapipe which contains pathnames or URLs,
and yields a tuple of pathname and opened file stream (functional name: ``open_files_by_iopath``).
Args:
source_datapipe: Iterable DataPipe that provides the pathnames or URLs
mode: An optional string that specifies the mode in which the file is opened (``"r"`` by default)
pathmgr: Custom ``iopath.PathManager``. If not specified, a default ``PathManager`` is created.
Note:
Default ``PathManager`` currently supports local file path, normal HTTP URL and OneDrive URL.
S3 URL is supported only with `iopath`>=0.1.9.
Example:
.. testsetup::
s3_url = "path"
.. testcode::
from torchdata.datapipes.iter import IoPathFileLister
datapipe = IoPathFileLister(root=s3_url)
file_dp = datapipe.open_files_by_iopath()
"""
def __init__(self, source_datapipe: IterDataPipe[str], mode: str = "r", pathmgr=None, handler=None) -> None:
if iopath is None:
raise ModuleNotFoundError(
"Package `iopath` is required to be installed to use this datapipe."
"Please use `pip install iopath` or `conda install -c conda-forge iopath`"
"to install the package"
)
self.source_datapipe: IterDataPipe[str] = source_datapipe
self.pathmgr = _create_default_pathmanager() if pathmgr is None else pathmgr
self.mode: str = mode
if handler is not None:
self.register_handler(handler, allow_override=True)
def register_handler(self, handler, allow_override=False):
self.pathmgr.register_handler(handler, allow_override=allow_override)
def __iter__(self) -> Iterator[Tuple[str, StreamWrapper]]:
for file_uri in self.source_datapipe:
file = self.pathmgr.open(file_uri, self.mode)
yield file_uri, StreamWrapper(file)
def __len__(self) -> int:
return len(self.source_datapipe)
@functional_datapipe("save_by_iopath")
class IoPathSaverIterDataPipe(IterDataPipe[str]):
r"""
Takes in a DataPipe of tuples of metadata and data, saves the data
to the target path which is generated by the ``filepath_fn`` and metadata, and yields the resulting path
in `iopath` format (functional name: ``save_by_iopath``).
Args:
source_datapipe: Iterable DataPipe with tuples of metadata and data
mode: Mode in which the file will be opened for write the data (``"w"`` by default)
filepath_fn: Function that takes in metadata and returns the target path of the new file
pathmgr: Custom ``iopath.PathManager``. If not specified, a default ``PathManager`` is created.
Note:
Default ``PathManager`` currently supports local file path, normal HTTP URL and OneDrive URL.
S3 URL is supported only with `iopath`>=0.1.9.
Example:
.. testsetup::
s3_url = "url"
.. testcode::
from torchdata.datapipes.iter import IterableWrapper
def filepath_fn(name: str) -> str:
return s3_url + name
name_to_data = {"1.txt": b"DATA1", "2.txt": b"DATA2", "3.txt": b"DATA3"}
source_dp = IterableWrapper(sorted(name_to_data.items()))
iopath_saver_dp = source_dp.save_by_iopath(filepath_fn=filepath_fn, mode="wb")
res_file_paths = list(iopath_saver_dp)
.. testcleanup::
import os
for file in ["1.txt", "1.txt.lock", "2.txt", "2.txt.lock", "3.txt", "3.txt.lock"]:
os.remove(s3_url + file)
"""
def __init__(
self,
source_datapipe: IterDataPipe[Tuple[Any, U]],
mode: str = "w",
filepath_fn: Optional[Callable] = None,
*,
pathmgr=None,
handler=None,
):
if iopath is None:
raise ModuleNotFoundError(
"Package `iopath` is required to be installed to use this datapipe."
"Please use `pip install iopath` or `conda install -c conda-forge iopath`"
"to install the package"
)
self.source_datapipe: IterDataPipe[Tuple[Any, U]] = source_datapipe
self.mode: str = mode
self.filepath_fn: Optional[Callable] = filepath_fn
self.pathmgr = _create_default_pathmanager() if pathmgr is None else pathmgr
if handler is not None:
self.register_handler(handler, allow_override=True)
def __iter__(self) -> Iterator[str]:
for meta, data in self.source_datapipe:
filepath = meta if self.filepath_fn is None else self.filepath_fn(meta)
with iopath.file_lock(filepath):
if not os.path.exists(filepath):
with self.pathmgr.open(filepath, self.mode) as f:
f.write(data)
yield filepath
def register_handler(self, handler, allow_override=False):
self.pathmgr.register_handler(handler, allow_override=allow_override)
def __len__(self) -> int:
return len(self.source_datapipe)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import asyncio
import inspect
import random
import warnings
from collections import deque
from concurrent import futures
from typing import Callable, Hashable, Iterator, List, Optional, Set, Sized, TypeVar, Union
import torch
from torch.utils.data.datapipes.utils.common import _check_unpickable_fn, validate_input_col
from torchdata.datapipes import functional_datapipe
from torchdata.datapipes.iter import IterDataPipe
T_co = TypeVar("T_co", covariant=True)
def _no_op_fn(*args):
"""
No-operation function, returns passed arguments.
"""
if len(args) == 1:
return args[0]
return args
@functional_datapipe("map_batches")
class BatchMapperIterDataPipe(IterDataPipe[T_co]):
r"""
Combines elements from the source DataPipe to batches and applies a function
over each batch, then flattens the outputs to a single, unnested IterDataPipe
(functional name: ``map_batches``).
Args:
datapipe: Source IterDataPipe
fn: The function to be applied to each batch of data
batch_size: The size of batch to be aggregated from ``datapipe``
input_col: Index or indices of data which ``fn`` is applied, such as:
- ``None`` as default to apply ``fn`` to the data directly.
- Integer(s) is used for list/tuple.
- Key(s) is used for dict.
Example:
>>> from torchdata.datapipes.iter import IterableWrapper
>>> def fn(batch):
>>> return [d + 1 for d in batch]
>>> source_dp = IterableWrapper(list(range(5)))
>>> mapped_dp = source_dp.map_batches(fn, batch_size=3)
>>> list(mapped_dp)
[1, 2, 3, 4, 5]
Notes:
Compared with ``map``, the reason that ``map_batches`` doesn't take
``output_col`` argument is the size of ``fn`` output is not guaranteed
to be the same as input batch. With different size, this operation cannot
assign data back to original data structure.
And, this operation is introduced based on the use case from `TorchText`.
A pybinded C++ vectorized function can be applied for efficiency.
"""
datapipe: IterDataPipe
fn: Callable
batch_size: int
def __init__(
self,
datapipe: IterDataPipe,
fn: Callable,
batch_size: int,
input_col=None,
) -> None:
self.datapipe = datapipe
_check_unpickable_fn(fn)
self.fn = fn # type: ignore[assignment]
assert batch_size > 0, "Batch size is required to be larger than 0!"
self.batch_size = batch_size
self.input_col = input_col
def _apply_fn(self, batch):
if self.input_col is None:
return self.fn(batch)
if isinstance(self.input_col, (list, tuple)):
args = [[data[idx] for idx in self.input_col] for data in batch]
else:
args = [data[self.input_col] for data in batch]
return self.fn(args)
def __iter__(self) -> Iterator[T_co]:
batch: List = []
for d in self.datapipe:
batch.append(d)
if len(batch) == self.batch_size:
yield from self._apply_fn(batch)
batch = []
if batch:
yield from self._apply_fn(batch)
def __len__(self) -> int:
raise TypeError(f"{type(self).__name__}'s length relies on the output of its function.")
@functional_datapipe("flatmap")
class FlatMapperIterDataPipe(IterDataPipe[T_co]):
r"""
Applies a function over each item from the source DataPipe, then
flattens the outputs to a single, unnested IterDataPipe (functional name: ``flatmap``).
Note:
The output from ``fn`` must be a Sequence. Otherwise, an error will be raised.
If ``fn`` is ``None``, source DataPipe will be just flattened vertically, provided that items can be unpacked.
Args:
datapipe: Source IterDataPipe
fn: the function to be applied to each element in the DataPipe, the output must be a Sequence
input_col: Index or indices of data which ``fn`` is applied, such as:
- ``None`` as default to apply ``fn`` to the data directly.
- Integer(s) is/are used for list/tuple.
- Key(s) is/are used for dict.
Example:
>>> from torchdata.datapipes.iter import IterableWrapper
>>> def fn(e):
>>> return [e, e * 10]
>>> source_dp = IterableWrapper(list(range(5)))
>>> flatmapped_dp = source_dp.flatmap(fn)
>>> list(flatmapped_dp)
[0, 0, 1, 10, 2, 20, 3, 30, 4, 40]
>>>
>>> source_dp = IterableWrapper([[1, 2, 3], [4, 5, 6]])
>>> flatmapped_dp = source_dp.flatmap()
>>> list(flatmapped_dp)
[1, 2, 3, 4, 5, 6]
"""
datapipe: IterDataPipe
fn: Optional[Callable]
def __init__(self, datapipe: IterDataPipe, fn: Optional[Callable] = None, input_col=None) -> None:
self.datapipe = datapipe
if fn is None:
fn = _no_op_fn
_check_unpickable_fn(fn)
self.fn = fn # type: ignore[assignment]
self.input_col = input_col
validate_input_col(fn, input_col)
def _apply_fn(self, data):
if self.input_col is None:
return self.fn(data) # type: ignore[misc]
elif isinstance(self.input_col, (list, tuple)):
args = tuple(data[col] for col in self.input_col)
return self.fn(*args) # type: ignore[misc]
else:
return self.fn(data[self.input_col]) # type: ignore[misc]
def __iter__(self) -> Iterator[T_co]:
for d in self.datapipe:
yield from self._apply_fn(d)
def __len__(self) -> int:
raise TypeError(f"{type(self).__name__}'s length relies on the output of its function.")
@functional_datapipe("shuffled_flatmap")
class ShuffledFlatMapperIterDataPipe(IterDataPipe):
r"""
Applies a function over each item from the source DataPipe,
then collects the iterables returned in a buffer,
then, at every iteration, chooses at random one of the iterables in the buffer
and yields one item from this iterable (functional name: ``shuffled_flatmap``).
When the buffer is full, the DataPipe will begin to yield elements from iterables within the buffer.
New iterables will be added to the buffer once the existing ones run out of elements.
Note:
The output from ``fn`` must be an Iterable. Otherwise, an error will be raised.
If ``fn`` is ``None``, source DataPipe will be just flattened vertically, provided that items can be unpacked.
Args:
datapipe: Source IterDataPipe
fn: the function to be applied to each element in the DataPipe, the output must be a Sequence
input_col: Index or indices of data which ``fn`` is applied, such as:
- ``None`` as default to apply ``fn`` to the data directly.
- Integer(s) is/are used for list/tuple.
- Key(s) is/are used for dict.
buffer_size: the max number of iterables this DataPipe can hold at a time (default to ``100``)
Example:
>>> from torchdata.datapipes.iter import IterableWrapper
>>> source_dp = IterableWrapper([[1, 2, 3, 4], 'abcd', 'ABCD'])
>>> shuffled_flatmapped_dp = source_dp.shuffled_flatmap(buffer_size=2)
>>> list(shuffled_flatmapped_dp)
['a', 'b', 'c', 1, 'd', 'A', 'B', 'C', 2, 'D', 3, 4]
>>>
>>> # To shuffle all the elements, you can combine `shuffled_flatmap` with `in_batch_shuffle` like this:
>>> fully_shuffled_flatmapped_dp = source_dp.in_batch_shuffle()
>>> fully_shuffled_flatmapped_dp = fully_shuffled_flatmapped_dp.shuffled_flatmap()
>>> list(fully_shuffled_flatmapped_dp)
['b', 3, 'c', 'd', 'C', 'A', 'a', 2, 'B', 'D', 4, 1]
"""
datapipe: IterDataPipe
fn: Optional[Callable]
buffer_size: int
_buffer: List[Iterator]
_enabled: bool
_seed: Optional[int]
_rng: random.Random
_no_op_fn: bool = False
def __init__(
self, datapipe: IterDataPipe, fn: Optional[Callable] = None, input_col=None, buffer_size: int = 100
) -> None:
super().__init__()
self._buffer = []
self.datapipe = datapipe
if fn is None:
fn = _no_op_fn
self._no_op_fn = True
_check_unpickable_fn(fn)
self.fn = fn # type: ignore[assignment]
self.input_col = input_col
validate_input_col(fn, input_col)
assert buffer_size > 0, "buffer_size should be larger than 0"
self.buffer_size = buffer_size
self._enabled = True
self._seed = None
self._rng = random.Random()
def set_shuffle(self, shuffle=True):
self._enabled = shuffle
return self
def set_seed(self, seed: int):
self._seed = seed
return self
def reset(self) -> None:
self._buffer = []
if self._enabled:
if self._seed is None:
self._seed = int(torch.empty((), dtype=torch.int64).random_().item())
self._rng.seed(self._seed)
self._seed = None
def _apply_fn(self, data):
if self.input_col is None:
return self.fn(data) # type: ignore[misc]
elif isinstance(self.input_col, (list, tuple)):
args = tuple(data[col] for col in self.input_col)
return self.fn(*args) # type: ignore[misc]
else:
return self.fn(data[self.input_col]) # type: ignore[misc]
def __iter__(self) -> Iterator[T_co]:
if not self._enabled: # equivalent to flatmap
for x in self.datapipe:
yield from self._apply_fn(x)
else:
idx = self._rng.randint(0, self.buffer_size - 1)
for x in self.datapipe:
while len(self._buffer) == self.buffer_size:
try:
yield next(self._buffer[idx])
idx = self._rng.randint(0, self.buffer_size - 1)
except StopIteration:
self._buffer.pop(idx)
self._buffer.append(iter(self._apply_fn(x)))
while self._buffer:
try:
idx = self._rng.randint(0, len(self._buffer) - 1)
yield next(self._buffer[idx])
except StopIteration:
self._buffer.pop(idx)
def __len__(self) -> int:
if self._no_op_fn:
return sum(map(len, self.datapipe))
raise TypeError(f"{type(self).__name__}'s length relies on the output of its function.")
def __getstate__(self):
state = (
self.datapipe,
self.fn,
self.input_col,
self.buffer_size,
self._buffer,
self._enabled,
self._seed,
self._rng.getstate(),
self._valid_iterator_id,
self._number_of_samples_yielded,
)
if IterDataPipe.getstate_hook is not None:
return IterDataPipe.getstate_hook(state)
return state
def __setstate__(self, state):
(
self.datapipe,
self.fn,
self.input_col,
self.buffer_size,
self._buffer,
self._enabled,
self._seed,
rng_state,
self._valid_iterator_id,
self._number_of_samples_yielded,
) = state
self._rng = random.Random()
self._rng.setstate(rng_state)
def __del__(self):
self._buffer.clear()
@functional_datapipe("drop")
class DropperIterDataPipe(IterDataPipe[T_co]):
r"""
Drop columns/elements in input DataPipe via its indices (functional name: ``drop``).
Args:
datapipe: IterDataPipe with columns to be dropped
indices: a single column index to be dropped or a list of indices
- Integer(s) is/are used for list/tuple.
- Key(s) is/are used for dict.
Example:
>>> from torchdata.datapipes.iter import IterableWrapper, ZipperMapDataPipe
>>> dp1 = IterableWrapper(range(5))
>>> dp2 = IterableWrapper(range(10, 15))
>>> dp = dp1.zip(dp2)
>>> list(dp)
[(0, 10), (1, 11), (2, 12), (3, 13), (4, 14)]
>>> drop_dp = dp.drop(1)
>>> list(drop_dp)
[(0), (1), (2), (3), (4)]
"""
datapipe: IterDataPipe
def __init__(
self,
datapipe: IterDataPipe,
indices: Union[Hashable, List[Hashable]],
) -> None:
super().__init__()
self.datapipe = datapipe
if isinstance(indices, list):
self.indices = set(indices)
else:
self.indices = {indices}
def __iter__(self) -> Iterator[T_co]:
for old_item in self.datapipe:
if isinstance(old_item, tuple):
new_item = tuple(x for i, x in enumerate(old_item) if i not in self.indices) # type: ignore[assignment]
elif isinstance(old_item, list):
new_item = [x for i, x in enumerate(old_item) if i not in self.indices] # type: ignore[assignment]
elif isinstance(old_item, dict):
new_item = {k: v for (k, v) in old_item.items() if k not in self.indices} # type: ignore[assignment]
else:
new_item = old_item
warnings.warn(
"The next item was not an iterable and cannot be filtered, "
"please be aware that no filter was done or new item created."
)
# check to make sure all indices requested were in the item. warn if not
try:
for i in self.indices:
old_item[i]
except (IndexError, KeyError):
warnings.warn(
"At least one index in the filter is not present in the item being returned,"
" please be aware that expected columns/keys may be missing."
)
yield new_item # type: ignore[misc]
def __len__(self) -> int:
if isinstance(self.datapipe, Sized):
return len(self.datapipe)
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
@functional_datapipe("slice")
class SliceIterDataPipe(IterDataPipe[T_co]):
r"""
returns a slice of elements in input DataPipe via start/stop/step or indices (functional name: ``slice``).
Args:
datapipe: IterDataPipe with iterable elements
index: a single start index for the slice or a list of indices to be returned instead of a start/stop slice
- Integer(s) is/are used for list/tuple.
- Key(s) is/are used for dict.
stop: the slice stop. ignored if index is a list or if element is a dict
step: step to be taken from start to stop. ignored if index is a list or if element is a dict
Example:
>>> from torchdata.datapipes.iter import IterableWrapper
>>> dp = IterableWrapper([(0, 10, 100), (1, 11, 111), (2, 12, 122), (3, 13, 133), (4, 14, 144)])
>>> slice_dp = dp.slice(0, 2)
>>> list(slice_dp)
[(0, 10), (1, 11), (2, 12), (3, 13), (4, 14)]
"""
datapipe: IterDataPipe
def __init__(
self,
datapipe: IterDataPipe,
index: Union[int, List[Hashable]],
stop: Optional[int] = None,
step: Optional[int] = None,
) -> None:
super().__init__()
self.datapipe = datapipe
self.index = index
self.stop = stop
self.step = step
if isinstance(index, list):
if stop or step:
warnings.warn(
"A list of indices was passed as well as a stop or step for the slice, "
"these arguments can't be used together so only the indices list will be used."
)
def __iter__(self) -> Iterator[T_co]:
for old_item in self.datapipe:
if isinstance(old_item, tuple):
if isinstance(self.index, list):
new_item = tuple(x for i, x in enumerate(old_item) if i in self.index) # type: ignore[assignment]
else:
new_item = old_item[self.index : self.stop : self.step] # type: ignore[assignment]
elif isinstance(old_item, list):
if isinstance(self.index, list):
new_item = [x for i, x in enumerate(old_item) if i in self.index] # type: ignore[assignment]
else:
new_item = old_item[self.index : self.stop : self.step] # type: ignore[assignment]
elif isinstance(old_item, dict):
if isinstance(self.index, list):
new_item = {k: v for (k, v) in old_item.items() if k in self.index} # type: ignore[assignment]
elif self.index in old_item.keys():
new_item = {self.index: old_item.get(self.index)} # type: ignore[assignment]
else:
new_item = old_item # type: ignore[assignment]
warnings.warn(
"Dictionaries are not sliced by steps, only direct index. "
"Please be aware that no filter was done or new item created."
)
else:
new_item = old_item # type: ignore[assignment]
warnings.warn(
"The next item was not an iterable and cannot be filtered, "
"please be aware that no filter was done or new item created."
)
if isinstance(self.index, list):
# check to make sure all indices requested were in the item. warn if not
try:
for i in self.index:
old_item[i]
except (IndexError, KeyError):
warnings.warn(
"At least one index in the filter is not present in the item being returned,"
" please be aware that expected columns/keys may be missing."
)
yield new_item # type: ignore[misc]
def __len__(self) -> int:
if isinstance(self.datapipe, Sized):
return len(self.datapipe)
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
@functional_datapipe("flatten")
class FlattenIterDataPipe(IterDataPipe[T_co]):
r"""
returns a flattened copy of the input DataPipe at the per sample/element level based on provided indices (functional name: ``flatten``).
Note:
no args will flatten each item in the datapipe 1 level
Args:
datapipe: IterDataPipe with iterable elements
indices: a single index/key for the item to flatten from an iterator item or a list of indices/keys to be flattened
- Integer(s) is/are used for list/tuple.
- Key(s) is/are used for dict.
Example:
>>> from torchdata.datapipes.iter import IterableWrapper
>>> dp = IterableWrapper([(0, 10, (100, 1000)), (1, 11, (111, 1001)), (2, 12, (122, 1002)), (3, 13, (133, 1003)), (4, 14, (144, 1004))])
>>> flatten_dp = dp.flatten(2)
>>> list(flatten_dp)
[(0, 10, 100, 1000), (1, 11, 111, 1001), (2, 12, 122, 1002), (3, 13, 133, 1003), (4, 14, 144, 1004)]
>>>
>>> dp = IterableWrapper([(0, (1, 2)), (3, (4, 5)), (6, (7, 8))])
>>> flatten_dp = dp.flatten()
>>> list(flatten_dp)
[(0, 1, 2), (3, 4, 5), (6, 7, 8)]
"""
datapipe: IterDataPipe
indices: Set[Hashable] = set()
def __init__(
self,
datapipe: IterDataPipe,
indices: Optional[Union[Hashable, List[Hashable]]] = None,
) -> None:
super().__init__()
self.datapipe = datapipe
if indices:
if isinstance(indices, list):
self.indices = set(indices)
else:
self.indices = {indices}
def __iter__(self) -> Iterator[T_co]:
flatten_all = False
if not self.indices:
flatten_all = True
for old_item in self.datapipe:
if isinstance(old_item, dict):
new_item = {} # type: ignore[assignment]
for k, v in old_item.items():
if k in self.indices:
pass
if (flatten_all or (k in self.indices)) and isinstance(v, dict):
for k_sub, v_sub in v.items():
if k_sub not in old_item:
new_item[k_sub] = v_sub
else:
warnings.warn(
"Flattener tried to insert the same key twice into the dict item,"
"the second key,value pair has been dropped."
)
else:
if k not in new_item:
new_item[k] = v
else:
warnings.warn(
"Flattener tried to insert the same key twice into the dict item,"
"the second key,value pair has been dropped."
)
else:
is_tuple = False
new_item = [] # type: ignore[assignment]
if isinstance(old_item, tuple):
is_tuple = True
old_item = list(old_item)
for i, item in enumerate(old_item):
if (flatten_all or (i in self.indices)) and isinstance(item, (list, tuple)):
new_item.extend(list(item)) # type: ignore[attr-defined]
else:
new_item.append(item) # type: ignore[attr-defined]
if is_tuple:
new_item = tuple(new_item) # type: ignore[assignment]
# check to make sure all indices requested were in the item. warn if not
try:
if self.indices:
for index in self.indices:
old_item[index]
except (IndexError, KeyError):
warnings.warn(
"At least one index in the filter is not present in the item being returned,"
" please be aware that expected columns/keys may be missing."
)
yield new_item # type: ignore[misc]
def __len__(self) -> int:
if isinstance(self.datapipe, Sized):
return len(self.datapipe)
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
class _BatchAsyncMapperIterDataPipe(IterDataPipe):
datapipe: IterDataPipe
async_fn: Callable
def __init__(
self,
source_datapipe: IterDataPipe,
async_fn: Callable,
input_col=None,
output_col=None,
max_concurrency: int = 32,
):
self.source_datapipe = source_datapipe
if not inspect.iscoroutinefunction(async_fn):
raise ValueError(f"Expected a corotine function with an async def syntax, but got a {type(async_fn)}")
self.async_fn = async_fn # type: ignore[assignment]
if input_col is None and output_col is not None:
raise ValueError("`output_col` must be None when `input_col` is None.")
self.input_col = input_col
if isinstance(output_col, (list, tuple)):
if len(output_col) > 1:
raise ValueError("`output_col` must be a single-element list or tuple")
output_col = output_col[0]
self.output_col = output_col
self.max_concurrency = max_concurrency
def __iter__(self):
policy = asyncio.get_event_loop_policy()
loop = policy.new_event_loop()
try:
for batch in self.source_datapipe:
policy.set_event_loop(loop)
new_batch = loop.run_until_complete(self.processbatch(batch))
yield new_batch
finally:
loop.run_until_complete(loop.shutdown_asyncgens())
loop.close()
async def processbatch(self, batch):
sem = asyncio.Semaphore(self.max_concurrency)
async def controlled_async_fn(async_fn, *data):
async with sem:
return await async_fn(*data)
coroutines = []
if self.input_col is None:
for data in batch:
coroutines.append(controlled_async_fn(self.async_fn, data))
results = await asyncio.gather(*coroutines)
return results
for data in batch:
if isinstance(self.input_col, (list, tuple)):
args = tuple(data[col] for col in self.input_col)
coroutines.append(controlled_async_fn(self.async_fn, *args))
else:
coroutines.append(controlled_async_fn(self.async_fn, data[self.input_col]))
results = await asyncio.gather(*coroutines)
new_batch = []
for data, res in zip(batch, results):
t_flag = isinstance(data, tuple)
if t_flag:
data = list(data)
if self.output_col is None:
if isinstance(self.input_col, (list, tuple)):
data[self.input_col[0]] = res
for idx in sorted(self.input_col[1:], reverse=True):
del data[idx]
else:
data[self.input_col] = res
elif self.output_col == -1:
data.append(res)
else:
data[self.output_col] = res
if t_flag:
data = tuple(data)
new_batch.append(data)
return new_batch
def __len__(self):
return len(self.source_datapipe)
@functional_datapipe("async_map_batches")
class BatchAsyncMapperIterDataPipe(IterDataPipe):
r"""
Combines elements from the source DataPipe to batches and applies a coroutine function
over each element within the batch concurrently, then flattens the outpus to a
single, unnested IterDataPipe (functional name: ``async_map_batches``).
Args:
source_datapipe: Source IterDataPipe
async_fn: The coroutine function to be applied to each batch of data
batch_size: The size of batch to be aggregated from ``source_datapipe``
input_col: Index or indices of data which ``fn`` is applied, such as:
- ``None`` as default to apply ``fn`` to the data directly.
- Integer(s) is used for list/tuple.
- Key(s) is used for dict.
output_col: Index of data where result of ``fn`` is placed. ``output_col`` can be specified
only when ``input_col`` is not ``None``
- ``None`` as default to replace the index that ``input_col`` specified; For ``input_col`` with
multiple indices, the left-most one is used, and other indices will be removed.
- Integer is used for list/tuple. ``-1`` represents to append result at the end.
- Key is used for dict. New key is acceptable.
max_concurrency: Maximum concurrency to call async functions. (Default: ``32``)
flatten: Determine if the batches get flatten in the end (Default: ``True``)
If ``False``, outputs will be in batches of size ``batch_size``
Example:
>>> from torchdata.datapipes.iter import IterableWrapper
>>> async def mul_ten(x):
... await asyncio.sleep(1)
... return x * 10
>>> dp = IterableWrapper(range(50))
>>> dp = dp.async_map_batches(mul_ten, 16)
>>> list(dp)
[0, 10, 20, 30, ...]
>>> dp = IterableWrapper([(i, i) for i in range(50)])
>>> dp = dp.async_map_batches(mul_ten, 16, input_col=1)
>>> list(dp)
[(0, 0), (1, 10), (2, 20), (3, 30), ...]
>>> dp = IterableWrapper([(i, i) for i in range(50)])
>>> dp = dp.async_map_batches(mul_ten, 16, input_col=1, output_col=-1)
>>> list(dp)
[(0, 0, 0), (1, 1, 10), (2, 2, 20), (3, 3, 30), ...]
# Async fetching html from remote
>>> from aiohttp import ClientSession
>>> async def fetch_html(url: str, **kwargs):
... async with ClientSession() as session:
... resp = await session.request(method="GET", url=url, **kwargs)
... resp.raise_for_status()
... html = await resp.text()
... return html
>>> dp = IterableWrapper(urls)
>>> dp = dp.async_map_batches(fetch_html, 16)
"""
def __new__(
self,
source_datapipe,
async_fn: Callable,
batch_size: int,
input_col=None,
output_col=None,
max_concurrency: int = 32,
flatten: bool = True,
):
dp = source_datapipe.batch(batch_size)
dp = _BatchAsyncMapperIterDataPipe(dp, async_fn, input_col, output_col, max_concurrency)
if flatten:
dp = dp.flatmap()
try:
source_length = len(source_datapipe)
if isinstance(source_length, int) and source_length >= 0:
dp = dp.set_length(source_length)
except (TypeError, NotImplementedError):
pass
return dp
@functional_datapipe("threadpool_map")
class ThreadPoolMapperIterDataPipe(IterDataPipe[T_co]):
r"""
Applies a function over each item from the source DataPipe concurrently
using ``ThreadPoolExecutor`` (functional name: ``threadpool_map``).
The function can be any regular Python function or partial object. Lambda
function is not recommended as it is not supported by pickle.
Args:
source_datapipe: Source IterDataPipe
fn: Function being applied over each item
input_col: Index or indices of data which ``fn`` is applied, such as:
- ``None`` as default to apply ``fn`` to the data directly.
- Integer(s) is used for list/tuple.
- Key(s) is used for dict.
output_col: Index of data where result of ``fn`` is placed. ``output_col`` can be specified
only when ``input_col`` is not ``None``
- ``None`` as default to replace the index that ``input_col`` specified; For ``input_col`` with
multiple indices, the left-most one is used, and other indices will be removed.
- Integer is used for list/tuple. ``-1`` represents to append result at the end.
- Key is used for dict. New key is acceptable.
scheduled_tasks: How many tasks will be scheduled at any given time (Default value: 128)
max_workers: Maximum number of threads to execute function calls
**threadpool_kwargs: additional arguments to be given to the ``ThreadPoolExecutor``
Note:
For more information about ``max_workers`` and additional arguments for the ``ThreadPoolExecutor``
please refer to: https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor
Note:
For optimal use of all threads, ``scheduled_tasks`` > ``max_workers`` is strongly recommended. The higher the
variance of the time needed to finish execution of the given ``fn`` is, the higher the value
of ``scheduled_tasks`` needs to be to avoid threads sitting idle while waiting
for the next result (as results are returned in correct order).
However, too high value of ``scheduled_tasks`` might lead to long waiting period until the first element is yielded
as ``next`` is called ``scheduled_tasks`` many times on ``source_datapipe`` before yielding.
We encourage you to try out different values of ``max_workers`` and ``scheduled_tasks``
in search for optimal values for your use-case.
Example:
.. testsetup::
from torchdata.datapipes.iter import IterableWrapper
import requests
import time
from unittest.mock import MagicMock
requests.get = MagicMock()
urls = []
.. testcode::
# fetching html from remote
def fetch_html(url: str, **kwargs):
r = requests.get(url, **kwargs)
r.raise_for_status()
return r.content
dp = IterableWrapper(urls)
dp = dp.threadpool_map(fetch_html,max_workers=16)
.. testcode::
def mul_ten(x):
time.sleep(0.1)
return x * 10
dp = IterableWrapper([(i, i) for i in range(50)])
dp = dp.threadpool_map(mul_ten, input_col=1)
print(list(dp))
.. testoutput::
[(0, 0), (1, 10), (2, 20), (3, 30), ...]
.. testcode::
dp = IterableWrapper([(i, i) for i in range(50)])
dp = dp.threadpool_map(mul_ten, input_col=1, output_col=-1)
print(list(dp))
.. testoutput::
[(0, 0, 0), (1, 1, 10), (2, 2, 20), (3, 3, 30), ...]
"""
datapipe: IterDataPipe
fn: Callable
def __init__(
self,
source_datapipe: IterDataPipe,
fn: Callable,
input_col=None,
output_col=None,
scheduled_tasks: int = 128,
max_workers: Optional[int] = None,
**threadpool_kwargs,
) -> None:
super().__init__()
self.datapipe = source_datapipe
_check_unpickable_fn(fn)
self.fn = fn # type: ignore[assignment]
if scheduled_tasks <= 0:
raise ValueError("'scheduled_tasks' is required to be a positive integer.")
self.scheduled_tasks = scheduled_tasks
if max_workers is not None and max_workers <= 0:
raise ValueError("'max_workers' is required to be a positive integer.")
self.max_workers = max_workers
self.threadpool_kwargs = threadpool_kwargs
self.input_col = input_col
if input_col is None and output_col is not None:
raise ValueError("`output_col` must be None when `input_col` is None.")
if isinstance(output_col, (list, tuple)):
if len(output_col) > 1:
raise ValueError("`output_col` must be a single-element list or tuple")
output_col = output_col[0]
self.output_col = output_col
validate_input_col(fn, input_col)
def _apply_fn(self, data):
if self.input_col is None and self.output_col is None:
return self.fn(data)
if self.input_col is None:
res = self.fn(data)
elif isinstance(self.input_col, (list, tuple)):
args = tuple(data[col] for col in self.input_col)
res = self.fn(*args)
else:
res = self.fn(data[self.input_col])
# Copy tuple to list and run in-place modification because tuple is immutable.
if isinstance(data, tuple):
t_flag = True
data = list(data)
else:
t_flag = False
if self.output_col is None:
if isinstance(self.input_col, (list, tuple)):
data[self.input_col[0]] = res
for idx in sorted(self.input_col[1:], reverse=True):
del data[idx]
else:
data[self.input_col] = res
else:
if self.output_col == -1:
data.append(res)
else:
data[self.output_col] = res
# Convert list back to tuple
return tuple(data) if t_flag else data
def __iter__(self) -> Iterator[T_co]:
with futures.ThreadPoolExecutor(max_workers=self.max_workers, **self.threadpool_kwargs) as executor:
futures_deque: deque = deque()
has_next = True
itr = iter(self.datapipe)
for _ in range(self.scheduled_tasks):
try:
futures_deque.append(executor.submit(self._apply_fn, next(itr)))
except StopIteration:
has_next = False
break
while len(futures_deque) > 0:
if has_next:
try:
futures_deque.append(executor.submit(self._apply_fn, next(itr)))
except StopIteration:
has_next = False
yield futures_deque.popleft().result()
def __len__(self) -> int:
if isinstance(self.datapipe, Sized):
return len(self.datapipe)
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import heapq
import random
from dataclasses import dataclass, field
from functools import partial
from typing import Callable, final, Generic, Iterator, List, Optional, TypeVar
import torch
from torchdata.datapipes import DataChunk, functional_datapipe
from torchdata.datapipes.iter import IterDataPipe
T = TypeVar("T")
T_co = TypeVar("T_co", covariant=True)
@functional_datapipe("in_batch_shuffle")
class InBatchShufflerIterDataPipe(IterDataPipe[DataChunk[T_co]]):
r"""
Shuffles each mini-batch from the prior DataPipe (functional name: ``in_batch_shuffle``).
Args:
datapipe: Iterable DataPipe with batched data
Example:
>>> from torchdata.datapipes.iter import IterableWrapper
>>> source_dp = IterableWrapper(range(10))
>>> batch_dp = source_dp.batch(batch_size=3, drop_last=True)
>>> list(batch_dp)
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
>>> in_batch_shuffle_dp = batch_dp.in_batch_shuffle()
>>> list(in_batch_shuffle_dp)
[[2, 0, 1], [3, 5, 4], [7, 8, 6]]
"""
def __init__(self, datapipe: IterDataPipe[DataChunk[T_co]]):
self.datapipe = datapipe
self._enabled = True
self._seed: Optional[int] = None
self._rng = random.Random()
def set_shuffle(self, shuffle=True):
self._enabled = shuffle
return self
def set_seed(self, seed: int):
self._seed = seed
return self
def __iter__(self) -> Iterator[DataChunk[T_co]]:
if not self._enabled:
for batch in self.datapipe:
yield batch
else:
for batch in self.datapipe:
new_batch = self._rng.sample(batch, len(batch))
yield DataChunk(new_batch)
@final
def reset(self) -> None:
if self._enabled:
if self._seed is None:
self._seed = int(torch.empty((), dtype=torch.int64).random_().item())
self._rng.seed(self._seed)
self._seed = None
def __len__(self) -> int:
return len(self.datapipe)
def __getstate__(self):
state = (
self.datapipe,
self._enabled,
self._seed,
self._rng.getstate(),
self._valid_iterator_id,
self._number_of_samples_yielded,
)
if IterDataPipe.getstate_hook is not None:
return IterDataPipe.getstate_hook(state)
return state
def __setstate__(self, state):
(
self.datapipe,
self._enabled,
self._seed,
rng_state,
self._valid_iterator_id,
self._number_of_samples_yielded,
) = state
self._rng = random.Random()
self._rng.setstate(rng_state)
@functional_datapipe("bucketbatch")
class BucketBatcherIterDataPipe(IterDataPipe[DataChunk[T_co]]):
r"""
Creates mini-batches of data from sorted bucket (functional name: ``bucketbatch``). An outer
dimension will be added as ``batch_size`` if ``drop_last`` is set to ``True``,
or ``length % batch_size`` for the last batch if ``drop_last`` is set to ``False``.
The purpose of this DataPipe is to batch samples with some similarity according to the sorting function
being passed. For an example in the text domain, it may be batching examples with similar number of tokens
to minimize padding and to increase throughput.
Args:
datapipe: Iterable DataPipe being batched
batch_size: The size of each batch
drop_last: Option to drop the last batch if it's not full
batch_num: Number of batches within a bucket (i.e. `bucket_size = batch_size * batch_num`)
bucket_num: Number of buckets to consist a pool for shuffling (i.e. `pool_size = bucket_size * bucket_num`)
sort_key: Callable to sort a bucket (list)
use_in_batch_shuffle: if True, do in-batch shuffle; if False, buffer shuffle
Example:
>>> from torchdata.datapipes.iter import IterableWrapper
>>> source_dp = IterableWrapper(range(10))
>>> batch_dp = source_dp.bucketbatch(batch_size=3, drop_last=True)
>>> list(batch_dp)
[[5, 6, 7], [9, 0, 1], [4, 3, 2]]
>>> def sort_bucket(bucket):
>>> return sorted(bucket)
>>> batch_dp = source_dp.bucketbatch(
>>> batch_size=3, drop_last=True, batch_num=100,
>>> bucket_num=1, use_in_batch_shuffle=False, sort_key=sort_bucket
>>> )
>>> list(batch_dp)
[[3, 4, 5], [6, 7, 8], [0, 1, 2]]
"""
datapipe: IterDataPipe[T_co]
batch_size: int
drop_last: bool
batch_num: int
bucket_num: int
sort_key: Optional[Callable]
use_in_batch_shuffle: bool
def __new__(
cls,
datapipe: IterDataPipe[T_co],
batch_size: int,
drop_last: bool = False,
batch_num: int = 100,
bucket_num: int = 1,
sort_key: Optional[Callable] = None,
use_in_batch_shuffle: bool = True,
):
assert batch_size > 0, "Batch size is required to be larger than 0!"
assert batch_num > 0, "Number of batches is required to be larger than 0!"
assert bucket_num > 0, "Number of buckets is required to be larger than 0!"
bucket_size = batch_size * batch_num
pool_size = bucket_size * bucket_num
# Shuffle by pool_size
if bucket_num > 1 or sort_key is None:
if use_in_batch_shuffle:
datapipe = datapipe.batch(batch_size=pool_size, drop_last=False).in_batch_shuffle().unbatch()
else:
datapipe = datapipe.shuffle(buffer_size=pool_size)
# Sort by bucket_size if sort_key is given
if sort_key is not None:
datapipe = datapipe.batch(bucket_size).map(fn=sort_key).unbatch()
# Batch and drop last (if needed)
datapipe = datapipe.batch(batch_size, drop_last=drop_last)
# Shuffle the batched data
if sort_key is not None:
# In-batch shuffle each bucket seems not that useful, it seems misleading since .batch is called prior.
if use_in_batch_shuffle:
datapipe = datapipe.batch(batch_size=bucket_num, drop_last=False).in_batch_shuffle().unbatch()
else:
datapipe = datapipe.shuffle(buffer_size=bucket_size)
return datapipe
def _default_len_fn(token):
return len(token)
@dataclass(order=True, frozen=True)
class PrioritizedItem(Generic[T_co]):
length: int
data: T_co = field(compare=False)
def _token_len_fn(token: T, len_fn: Callable) -> PrioritizedItem[T]:
return PrioritizedItem(length=len_fn(token), data=token)
def _token_filter_fn(data, *, min_len, max_len):
return data.length >= min_len and data.length <= max_len
@functional_datapipe("max_token_bucketize")
class MaxTokenBucketizerIterDataPipe(IterDataPipe[DataChunk[T_co]]):
r"""
Creates mini-batches of data from a min-heap with limited size, and the total length of samples
returned by ``len_fn`` within each batch will be limited by ``max_token_count``
(functional name: ``max_token_bucketize``). If ``min_len`` or ``max_len`` is set, the samples with
length that is out of ``[min_len, max_len]`` will be filtered out.
The purpose of this DataPipe is to batch samples with similar length according to ``len_fn``.
Min-heap is used here to make sure the samples are sorted incrementally based on the length. And,
the total length of samples in each batch is guaranteed to be smaller than ``max_token_count``.
For an example in the audio domain, it may be batching samples with similar length. Then, given the
``max_token_count``, each batch may be concatenated to a Tensor with the same size and minimum padding.
If ``include_padding`` is set to ``True``, the token count of each batch includes the padding a succeeding
DataPipe could add. This guarentees that even after the batch is padded, ``max_token_count`` will not be exceeded.
This can prevent out-of-memory issues for data with large variations in length.
Note that batches are bucketized starting from the smallest size in a buffer.
This can limit the variablity of batches if ``buffer_size`` is large.
To increase variablity, apply ``torchdata.datapipes.iter.Shuffler`` before and after this DataPipe,
and keep ``buffer_size`` small.
Args:
datapipe: Iterable DataPipe being batched
max_token_count: Maximum length of total length of data in each batch
len_fn: Function to be applied to each element to get lengths. ``len(data)`` is used by default.
min_len: Optional minimum length to be included into each batch
max_len: Optional maximum length to be included into each batch.
buffer_size: This restricts how many samples are taken from prior DataPipe to bucketize
include_padding: If True, the size of each batch includes the extra padding to the largest length in the batch.
Example:
>>> from torchdata.datapipes.iter import IterableWrapper
>>> source_dp = IterableWrapper(['1', '11', '1', '1111', '111', '1', '11', '11', '111'])
>>> # Using default len_fn to sort samples based on length (string length in this case)
>>> batch_dp = source_dp.max_token_bucketize(max_token_count=5)
>>> list(batch_dp)
[['1', '1', '1', '11'], ['11', '11'], ['111'], ['111'], ['1111']]
>>> batch_dp = source_dp.max_token_bucketize(max_token_count=4, buffer_size=4)
>>> list(batch_dp)
[['1', '1', '1'], ['11', '11'], ['11'], ['111'], ['111'], ['1111']]
"""
datapipe: IterDataPipe[PrioritizedItem[T_co]]
max_token_count: int
len_fn: Callable
min_len: int
max_len: Optional[int]
buffer_size: int
def __init__(
self,
datapipe: IterDataPipe[T_co],
max_token_count: int,
len_fn: Callable = _default_len_fn,
min_len: int = 0,
max_len: Optional[int] = None,
buffer_size: int = 1000,
include_padding: bool = False,
) -> None:
if max_len is None:
max_len = max_token_count
if min_len < 0 or min_len > max_len:
raise ValueError("``min_len`` should be larger than 0 and equal to or smaller than ``max_len``.")
if max_len > max_token_count:
raise ValueError("``max_token_count`` must be equal to or greater than ``max_len``.")
if buffer_size <= 0:
raise ValueError("'buffer_size' is required to be a positive integer.")
self.datapipe = datapipe.map(partial(_token_len_fn, len_fn=len_fn))
self.datapipe = self.datapipe.filter(partial(_token_filter_fn, min_len=min_len, max_len=max_len))
self.max_token_count = max_token_count
self.buffer_size = buffer_size
self.include_padding = include_padding
def __iter__(self) -> Iterator[DataChunk[T_co]]:
buffer: List[PrioritizedItem[T_co]] = []
batch: List[T_co] = []
batch_size: int = 0
max_length: int = 0
for d in self.datapipe:
heapq.heappush(buffer, d)
if len(buffer) == self.buffer_size:
buffer, batch, batch_size, max_length, data_chunk = self._pop_buffer(
buffer, batch, batch_size, max_length
)
if data_chunk is not None:
yield data_chunk
while buffer:
buffer, batch, batch_size, max_length, data_chunk = self._pop_buffer(buffer, batch, batch_size, max_length)
if data_chunk is not None:
yield data_chunk
if batch:
yield DataChunk(batch)
def _pop_buffer(self, buffer: List[PrioritizedItem[T_co]], batch: List[T_co], batch_size: int, max_length: int):
data_chunk_to_yield = None
d: PrioritizedItem[T_co] = heapq.heappop(buffer)
length = d.length
token = d.data
if self.include_padding:
max_length = max(length, max_length)
new_batch_size = (len(batch) + 1) * max_length
else:
new_batch_size = batch_size + length
if new_batch_size > self.max_token_count:
data_chunk_to_yield = DataChunk(batch)
batch = [token]
batch_size = length
max_length = length
else:
batch.append(token)
batch_size = new_batch_size
return buffer, batch, batch_size, max_length, data_chunk_to_yield
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import collections
def pin_memory_fn(data, device=None):
r"""
Utility function to move data to pinned memory. If special treatment is needed to move
the input data to pinned memory, please attach a ``pin_memory`` method to the expected
data class.
"""
if hasattr(data, "pin_memory"): # Including torch.Tensor
return data.pin_memory(device)
elif isinstance(data, (str, bytes)):
return data
elif isinstance(data, collections.abc.Mapping):
pinned_data = {k: pin_memory_fn(sample, device) for k, sample in data.items()}
try:
return type(data)(pinned_data) # type: ignore[call-arg]
except TypeError:
# The mapping type may not support `__init__(iterable)`.
return pinned_data
elif isinstance(data, collections.abc.Sequence):
pinned_data = [pin_memory_fn(sample, device) for sample in data] # type: ignore[assignment]
try:
return type(data)(pinned_data) # type: ignore[call-arg]
except TypeError:
# The sequence type may not support `__init__(iterable)` (e.g., `range`).
return pinned_data
else:
return data
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from torch.utils.data.datapipes.utils.common import StreamWrapper
from torchdata.datapipes.utils._visualization import to_graph
from torchdata.datapipes.utils.janitor import janitor
from torchdata.datapipes.utils.pin_memory import pin_memory_fn
__all__ = [
"StreamWrapper",
"janitor",
"pin_memory_fn",
"to_graph",
]
# Please keep this list sorted
assert __all__ == sorted(__all__)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from io import IOBase
from typing import Tuple
from torchdata.datapipes.utils import StreamWrapper
def validate_pathname_binary_tuple(data: Tuple[str, IOBase]):
if not isinstance(data, tuple):
raise TypeError(f"pathname binary data should be tuple type, but it is type {type(data)}")
if len(data) != 2:
raise TypeError(f"pathname binary stream tuple length should be 2, but got {len(data)}")
if not isinstance(data[0], str):
raise TypeError(f"pathname within the tuple should have string type pathname, but it is type {type(data[0])}")
if not isinstance(data[1], IOBase) and not isinstance(data[1], StreamWrapper):
raise TypeError(
f"binary stream within the tuple should have IOBase or"
f"its subclasses as type, but it is type {type(data[1])}"
)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import itertools
from collections import defaultdict
from typing import Optional, Set, TYPE_CHECKING
from torch.utils.data.datapipes.iter.combining import _ChildDataPipe, IterDataPipe
from torch.utils.data.graph import traverse_dps
if TYPE_CHECKING:
import graphviz
class Node:
def __init__(self, dp, *, name=None):
self.dp = dp
self.name = name or type(dp).__name__.replace("IterDataPipe", "")
self.childs = set()
self.parents = set()
def add_child(self, child):
self.childs.add(child)
child.parents.add(self)
def remove_child(self, child):
self.childs.remove(child)
child.parents.remove(self)
def add_parent(self, parent):
self.parents.add(parent)
parent.childs.add(self)
def remove_parent(self, parent):
self.parents.remove(parent)
parent.childs.remove(self)
def __eq__(self, other):
if not isinstance(other, Node):
return NotImplemented
return hash(self) == hash(other)
def __hash__(self):
return hash(self.dp)
def __str__(self):
return self.name
def __repr__(self):
return f"{self}-{hash(self)}"
def to_nodes(dp, *, debug: bool) -> Set[Node]:
def recurse(dp_graph, child=None):
for _dp_id, (dp_node, dp_parents) in dp_graph.items():
node = Node(dp_node)
if child is not None:
node.add_child(child)
yield node
yield from recurse(dp_parents, child=node)
def aggregate(nodes):
groups = defaultdict(list)
for node in nodes:
groups[node].append(node)
nodes = set()
for node, group in groups.items():
if len(group) == 1:
nodes.add(node)
continue
aggregated_node = Node(node.dp)
for duplicate_node in group:
for child in duplicate_node.childs.copy():
duplicate_node.remove_child(child)
aggregated_node.add_child(child)
for parent in duplicate_node.parents.copy():
duplicate_node.remove_parent(parent)
aggregated_node.add_parent(parent)
nodes.add(aggregated_node)
if debug:
return nodes
child_dp_nodes = set(
itertools.chain.from_iterable(node.parents for node in nodes if isinstance(node.dp, _ChildDataPipe))
)
if not child_dp_nodes:
return nodes
for node in child_dp_nodes:
fixed_parent_node = Node(
type(str(node).lstrip("_"), (IterDataPipe,), dict(dp=node.dp, childs=node.childs))()
)
nodes.remove(node)
nodes.add(fixed_parent_node)
for parent in node.parents.copy():
node.remove_parent(parent)
fixed_parent_node.add_parent(parent)
for child in node.childs:
nodes.remove(child)
for actual_child in child.childs.copy():
actual_child.remove_parent(child)
actual_child.add_parent(fixed_parent_node)
return nodes
return aggregate(recurse(traverse_dps(dp)))
def to_graph(dp, *, debug: bool = False) -> "graphviz.Digraph":
"""Visualizes a DataPipe by returning a :class:`graphviz.Digraph`, which is a graph of the data pipeline.
This allows you to visually inspect all the transformation that takes place in your DataPipes.
.. note::
The package :mod:`graphviz` is required to use this function.
.. note::
The most common interfaces for the returned graph object are:
- :meth:`~graphviz.Digraph.render`: Save the graph to a file.
- :meth:`~graphviz.Digraph.view`: Open the graph in a viewer.
Args:
dp: DataPipe that you would like to visualize (generally the last one in a chain of DataPipes).
debug (bool): If ``True``, renders internal datapipes that are usually hidden from the user
(such as ``ChildDataPipe`` of `demux` and `fork`). Defaults to ``False``.
Example:
>>> from torchdata.datapipes.iter import IterableWrapper
>>> from torchdata.datapipes.utils import to_graph
>>> dp = IterableWrapper(range(10))
>>> dp1, dp2 = dp.demux(num_instances=2, classifier_fn=lambda x: x % 2)
>>> dp1 = dp1.map(lambda x: x + 1)
>>> dp2 = dp2.filter(lambda _: True)
>>> dp3 = dp1.zip(dp2).map(lambda t: t[0] + t[1])
>>> g = to_graph(dp3)
>>> g.view() # This will open the graph in a viewer
"""
try:
import graphviz
except ModuleNotFoundError:
raise ModuleNotFoundError(
"The package `graphviz` is required to be installed to use this function. "
"Please `pip install graphviz` or `conda install -c conda-forge graphviz`."
) from None
# The graph style as well as the color scheme below was copied from https://github.com/szagoruyko/pytorchviz/
# https://github.com/szagoruyko/pytorchviz/blob/0adcd83af8aa7ab36d6afd139cabbd9df598edb7/torchviz/dot.py#L78-L85
node_attr = dict(
style="filled",
shape="box",
align="left",
fontsize="10",
ranksep="0.1",
height="0.2",
fontname="monospace",
)
graph = graphviz.Digraph(node_attr=node_attr, graph_attr=dict(size="12,12"))
for node in to_nodes(dp, debug=debug):
fillcolor: Optional[str]
if not node.parents:
fillcolor = "lightblue"
elif not node.childs:
fillcolor = "darkolivegreen1"
else:
fillcolor = None
graph.node(name=repr(node), label=str(node), fillcolor=fillcolor)
for child in node.childs:
graph.edge(repr(node), repr(child))
return graph
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from torchdata.datapipes.utils import StreamWrapper
def janitor(obj):
"""
Invokes various `obj` cleanup procedures such as:
- Closing streams
"""
# TODO(632): We can also release caching locks here to allow filtering
StreamWrapper.close_streams(obj)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from torch.utils.data import MapDataPipe
from torch.utils.data.datapipes.map import Batcher, Concater, Mapper, SequenceWrapper, Shuffler, Zipper
from torchdata.datapipes.iter.util.converter import IterToMapConverterMapDataPipe as IterToMapConverter
from torchdata.datapipes.map.util.cacheholder import InMemoryCacheHolderMapDataPipe as InMemoryCacheHolder
from torchdata.datapipes.map.util.unzipper import UnZipperMapDataPipe as UnZipper
__all__ = [
"Batcher",
"Concater",
"InMemoryCacheHolder",
"IterToMapConverter",
"MapDataPipe",
"Mapper",
"SequenceWrapper",
"Shuffler",
"UnZipper",
"Zipper",
]
# Please keep this list sorted
assert __all__ == sorted(__all__)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import List, Optional
from torch.utils.data import IterDataPipe, MapDataPipe
# @functional_datapipe("to_iter_datapipe") # This line must be kept for .pyi signature parser
class MapToIterConverterIterDataPipe(IterDataPipe):
"""
Convert a ``MapDataPipe`` to an ``IterDataPipe`` (functional name: ``to_iter_datapipe``). It uses ``indices`` to
iterate through the ``MapDataPipe``, defaults to ``range(len(mapdatapipe))`` if not given.
For the opposite converter, use :class:`.IterToMapConverter`.
Args:
datapipe: source MapDataPipe with data
indices: optional list of indices that will dictate how the datapipe will be iterated over
Example:
>>> from torchdata.datapipes.map import SequenceWrapper
>>> source_dp = SequenceWrapper(range(10))
>>> iter_dp = source_dp.to_iter_datapipe()
>>> list(iter_dp)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> source_dp2 = SequenceWrapper({'a': 1, 'b': 2, 'c': 3})
>>> iter_dp2 = source_dp2.to_iter_datapipe(indices=['a', 'b', 'c'])
>>> list(iter_dp2)
[1, 2, 3]
"""
# Note that ``indices`` has ``Optional[List]`` instead of ``Optional[Iterable]`` as type because a generator
# can be passed in as an iterable, which will complicate the serialization process as we will have
# to materialize ``indices`` and store it.
def __init__(self, datapipe: MapDataPipe, indices: Optional[List] = None):
if not isinstance(datapipe, MapDataPipe):
raise TypeError(f"MapToIterConverter can only apply on MapDataPipe, but found {type(datapipe)}")
self.datapipe: MapDataPipe = datapipe
self.indices = indices if indices else range(len(datapipe))
def __iter__(self):
for idx in self.indices:
yield self.datapipe[idx]
def __len__(self):
return len(self.indices)
MapDataPipe.register_datapipe_as_function("to_iter_datapipe", MapToIterConverterIterDataPipe)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, TypeVar
from torchdata.datapipes import functional_datapipe
from torchdata.datapipes.map import MapDataPipe
T_co = TypeVar("T_co", covariant=True)
@functional_datapipe("in_memory_cache")
class InMemoryCacheHolderMapDataPipe(MapDataPipe[T_co]):
r"""
Stores elements from the source DataPipe in memory (functional name: ``in_memory_cache``). Once an item is
stored, it will remain unchanged and subsequent retrivals will return the same element. Since items from
``MapDataPipe`` are lazily computed, this can be used to store the results from previous ``MapDataPipe`` and
reduce the number of duplicate computations.
Note:
The default ``cache`` is a ``Dict``. If another data structure is more suitable as cache for your use
Args:
source_dp: source DataPipe from which elements are read and stored in memory
Example:
>>> from torchdata.datapipes.map import SequenceWrapper
>>> source_dp = SequenceWrapper(range(10))
>>> cache_dp = source_dp.in_memory_cache()
>>> list(cache_dp)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
def __init__(self, source_dp: MapDataPipe[T_co]) -> None:
self.source_dp: MapDataPipe[T_co] = source_dp
self.cache: Dict[Any, T_co] = {}
def __getitem__(self, index) -> T_co:
if index not in self.cache:
self.cache[index] = self.source_dp[index] # type: ignore[index]
return self.cache[index] # type: ignore[index]
# We can potentially remove `self.source_dp` to save memory once `len(self.cache) == len(self.source_dp)`
# Be careful about how that may interact with and graph traversal and other features
def __len__(self) -> int:
return len(self.source_dp)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional, Sequence, TypeVar
from torchdata.datapipes import functional_datapipe
from torchdata.datapipes.map import MapDataPipe
T = TypeVar("T")
@functional_datapipe("unzip")
class UnZipperMapDataPipe(MapDataPipe):
"""
Takes in a DataPipe of Sequences, unpacks each Sequence, and return the elements in separate DataPipes
based on their position in the Sequence (functional name: ``unzip``). The number of instances produced
equals to the ``sequence_legnth`` minus the number of columns to skip.
Note:
Each sequence within the DataPipe should have the same length, specified by
the input argument `sequence_length`.
Args:
source_datapipe: Iterable DataPipe with sequences of data
sequence_length: Length of the sequence within the source_datapipe. All elements should have the same length.
columns_to_skip: optional indices of columns that the DataPipe should skip (each index should be
an integer from 0 to sequence_length - 1)
Example:
>>> from torchdata.datapipes.map import SequenceWrapper
>>> source_dp = SequenceWrapper([(i, i + 10, i + 20) for i in range(3)])
>>> dp1, dp2, dp3 = source_dp.unzip(sequence_length=3)
>>> list(dp1)
[0, 1, 2]
>>> list(dp2)
[10, 11, 12]
>>> list(dp3)
[20, 21, 22]
"""
def __new__(
cls,
source_datapipe: MapDataPipe[Sequence[T]],
sequence_length: int,
columns_to_skip: Optional[Sequence[int]] = None,
):
if sequence_length < 1:
raise ValueError(f"Expected `sequence_length` larger than 0, but {sequence_length} is found")
if columns_to_skip is None:
instance_ids = list(range(sequence_length))
else:
skips = set(columns_to_skip)
instance_ids = [i for i in range(sequence_length) if i not in skips]
if len(instance_ids) == 0:
raise RuntimeError(
f"All instances are being filtered out in {cls.__name__}. Please check"
"the input `sequence_length` and `columns_to_skip`."
)
return [_UnZipperMapDataPipe(source_datapipe, i) for i in instance_ids]
class _UnZipperMapDataPipe(MapDataPipe[T]):
def __init__(self, main_datapipe: MapDataPipe[Sequence[T]], instance_id: int):
self.main_datapipe = main_datapipe
self.instance_id = instance_id
def __getitem__(self, index) -> T:
return self.main_datapipe[index][self.instance_id]
def __len__(self) -> int:
return len(self.main_datapipe)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
#!/usr/bin/env python3
import typing
from typing import Any, Callable, cast, List, Tuple, Union
import torch
from captum._utils.typing import BaselineType, TargetType, TensorOrTupleOfTensorsGeneric
from captum.attr import (
DeepLift,
GradientShap,
GuidedBackprop,
IntegratedGradients,
Saliency,
)
from captum.metrics import sensitivity_max
from captum.metrics._core.sensitivity import default_perturb_func
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import (
BasicModel2,
BasicModel4_MultiArgs,
BasicModel_ConvNet_One_Conv,
BasicModel_MultiLayer,
)
from torch import Tensor
@typing.overload
def _perturb_func(inputs: Tensor) -> Tensor:
...
@typing.overload
def _perturb_func(inputs: Tuple[Tensor, ...]) -> Tuple[Tensor, ...]:
...
def _perturb_func(
inputs: TensorOrTupleOfTensorsGeneric,
) -> Union[Tensor, Tuple[Tensor, ...]]:
def perturb_ratio(input):
return (
torch.arange(-torch.numel(input[0]) // 2, torch.numel(input[0]) // 2)
.view(input[0].shape)
.float()
/ 100
)
input2 = None
if isinstance(inputs, tuple):
input1 = inputs[0]
input2 = inputs[1]
else:
input1 = cast(Tensor, inputs)
perturbed_input1 = input1 + perturb_ratio(input1)
if input2 is None:
return perturbed_input1
return perturbed_input1, input2 + perturb_ratio(input2)
class Test(BaseTest):
def test_basic_sensitivity_max_single(self) -> None:
model = BasicModel2()
sa = Saliency(model)
input1 = torch.tensor([3.0])
input2 = torch.tensor([1.0])
self.sensitivity_max_assert(
sa.attribute,
(input1, input2),
torch.zeros(1),
perturb_func=default_perturb_func,
)
def test_basic_sensitivity_max_multiple(self) -> None:
model = BasicModel2()
sa = Saliency(model)
input1 = torch.tensor([3.0] * 20)
input2 = torch.tensor([1.0] * 20)
self.sensitivity_max_assert(
sa.attribute, (input1, input2), torch.zeros(20), max_examples_per_batch=21
)
self.sensitivity_max_assert(
sa.attribute, (input1, input2), torch.zeros(20), max_examples_per_batch=60
)
def test_basic_sensitivity_max_multiple_gradshap(self) -> None:
model = BasicModel2()
gs = GradientShap(model)
input1 = torch.tensor([0.0] * 5)
input2 = torch.tensor([0.0] * 5)
baseline1 = torch.arange(0, 2).float() / 1000
baseline2 = torch.arange(0, 2).float() / 1000
self.sensitivity_max_assert(
gs.attribute,
(input1, input2),
torch.zeros(5),
baselines=(baseline1, baseline2),
max_examples_per_batch=2,
)
self.sensitivity_max_assert(
gs.attribute,
(input1, input2),
torch.zeros(5),
baselines=(baseline1, baseline2),
max_examples_per_batch=20,
)
def test_convnet_multi_target(self) -> None:
r"""
Another test with Saliency, local sensitivity and more
complex model with higher dimensional input.
"""
model = BasicModel_ConvNet_One_Conv()
sa = Saliency(model)
input = torch.stack([torch.arange(1, 17).float()] * 20, dim=0).view(20, 1, 4, 4)
self.sensitivity_max_assert(
sa.attribute,
input,
torch.zeros(20),
target=torch.tensor([1] * 20),
n_perturb_samples=10,
max_examples_per_batch=40,
)
def test_convnet_multi_target_and_default_pert_func(self) -> None:
r"""
Similar to previous example but here we also test default
perturbation function.
"""
model = BasicModel_ConvNet_One_Conv()
gbp = GuidedBackprop(model)
input = torch.stack([torch.arange(1, 17).float()] * 20, dim=0).view(20, 1, 4, 4)
sens1 = self.sensitivity_max_assert(
gbp.attribute,
input,
torch.zeros(20),
perturb_func=default_perturb_func,
target=torch.tensor([1] * 20),
n_perturb_samples=10,
max_examples_per_batch=40,
)
sens2 = self.sensitivity_max_assert(
gbp.attribute,
input,
torch.zeros(20),
perturb_func=default_perturb_func,
target=torch.tensor([1] * 20),
n_perturb_samples=10,
max_examples_per_batch=5,
)
assertTensorAlmostEqual(self, sens1, sens2)
def test_sensitivity_max_multi_dim(self) -> None:
model = BasicModel_MultiLayer()
input = torch.arange(1.0, 13.0).view(4, 3)
additional_forward_args = (None, True)
targets: List = [(0, 1, 1), (0, 1, 1), (1, 1, 1), (0, 1, 1)]
ig = IntegratedGradients(model)
self.sensitivity_max_assert(
ig.attribute,
input,
torch.tensor([0.006, 0.01, 0.001, 0.008]),
n_perturb_samples=1,
max_examples_per_batch=4,
perturb_func=_perturb_func,
target=targets,
additional_forward_args=additional_forward_args,
)
def test_sensitivity_max_multi_dim_batching(self) -> None:
model = BasicModel_MultiLayer()
input = torch.arange(1.0, 16.0).view(5, 3)
additional_forward_args = (torch.ones(5, 3).float(), False)
targets: List = [0, 0, 0, 0, 0]
sa = Saliency(model)
sensitivity1 = self.sensitivity_max_assert(
sa.attribute,
input,
torch.zeros(5),
n_perturb_samples=1,
max_examples_per_batch=None,
perturb_func=_perturb_func,
target=targets,
additional_forward_args=additional_forward_args,
)
sensitivity2 = self.sensitivity_max_assert(
sa.attribute,
input,
torch.zeros(5),
n_perturb_samples=10,
max_examples_per_batch=10,
perturb_func=_perturb_func,
target=targets,
additional_forward_args=additional_forward_args,
)
assertTensorAlmostEqual(self, sensitivity1, sensitivity2, 0.0)
def test_sensitivity_additional_forward_args_multi_args(self) -> None:
model = BasicModel4_MultiArgs()
input1 = torch.tensor([[1.5, 2.0, 3.3]])
input2 = torch.tensor([[3.0, 3.5, 2.2]])
args = torch.tensor([[1.0, 3.0, 4.0]])
ig = DeepLift(model)
sensitivity1 = self.sensitivity_max_assert(
ig.attribute,
(input1, input2),
torch.zeros(1),
additional_forward_args=args,
n_perturb_samples=1,
max_examples_per_batch=1,
perturb_func=_perturb_func,
)
sensitivity2 = self.sensitivity_max_assert(
ig.attribute,
(input1, input2),
torch.zeros(1),
additional_forward_args=args,
n_perturb_samples=4,
max_examples_per_batch=2,
perturb_func=_perturb_func,
)
assertTensorAlmostEqual(self, sensitivity1, sensitivity2, 0.0)
def test_classification_sensitivity_tpl_target_w_baseline(self) -> None:
model = BasicModel_MultiLayer()
input = torch.arange(1.0, 13.0).view(4, 3)
baseline = torch.ones(4, 3)
additional_forward_args = (torch.arange(1, 13).view(4, 3).float(), True)
targets: List = [(0, 1, 1), (0, 1, 1), (1, 1, 1), (0, 1, 1)]
dl = DeepLift(model)
sens1 = self.sensitivity_max_assert(
dl.attribute,
input,
torch.tensor([0.01, 0.003, 0.001, 0.001]),
additional_forward_args=additional_forward_args,
baselines=baseline,
target=targets,
n_perturb_samples=10,
perturb_func=_perturb_func,
)
sens2 = self.sensitivity_max_assert(
dl.attribute,
input,
torch.zeros(4),
additional_forward_args=additional_forward_args,
baselines=baseline,
target=targets,
n_perturb_samples=10,
perturb_func=_perturb_func,
max_examples_per_batch=30,
)
assertTensorAlmostEqual(self, sens1, sens2)
def sensitivity_max_assert(
self,
expl_func: Callable,
inputs: TensorOrTupleOfTensorsGeneric,
expected_sensitivity: Tensor,
perturb_func: Callable = _perturb_func,
n_perturb_samples: int = 5,
max_examples_per_batch: int = None,
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
) -> Tensor:
if baselines is None:
sens = sensitivity_max(
expl_func,
inputs,
perturb_func=perturb_func,
target=target,
additional_forward_args=additional_forward_args,
n_perturb_samples=n_perturb_samples,
max_examples_per_batch=max_examples_per_batch,
)
else:
sens = sensitivity_max(
expl_func,
inputs,
perturb_func=perturb_func,
baselines=baselines,
target=target,
additional_forward_args=additional_forward_args,
n_perturb_samples=n_perturb_samples,
max_examples_per_batch=max_examples_per_batch,
)
assertTensorAlmostEqual(self, sens, expected_sensitivity, 0.05)
return sens
|
#!/usr/bin/env python3
import typing
from typing import Any, Callable, cast, List, Tuple, Union
import torch
from captum._utils.typing import BaselineType, TargetType, TensorOrTupleOfTensorsGeneric
from captum.attr import (
Attribution,
DeepLift,
FeatureAblation,
IntegratedGradients,
Saliency,
)
from captum.metrics import infidelity, infidelity_perturb_func_decorator
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import (
BasicModel2,
BasicModel4_MultiArgs,
BasicModel_ConvNet_One_Conv,
BasicModel_MultiLayer,
)
from torch import Tensor
from torch.nn import Module
@infidelity_perturb_func_decorator(False)
def _local_perturb_func_default(
inputs: TensorOrTupleOfTensorsGeneric,
) -> TensorOrTupleOfTensorsGeneric:
return _local_perturb_func(inputs)[1]
@typing.overload
def _local_perturb_func(inputs: Tensor) -> Tuple[Tensor, Tensor]:
...
@typing.overload
def _local_perturb_func(
inputs: Tuple[Tensor, ...]
) -> Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...]]:
...
def _local_perturb_func(
inputs: TensorOrTupleOfTensorsGeneric,
) -> Tuple[Union[Tensor, Tuple[Tensor, ...]], Union[Tensor, Tuple[Tensor, ...]]]:
input2 = None
if isinstance(inputs, tuple):
input1 = inputs[0]
input2 = inputs[1]
else:
input1 = cast(Tensor, inputs)
perturb1 = 0.0009 * torch.ones_like(input1)
if input2 is None:
return perturb1, input1 - perturb1
perturb2 = 0.0121 * torch.ones_like(input2)
return (perturb1, perturb2), (input1 - perturb1, input2 - perturb2)
@infidelity_perturb_func_decorator(True)
def _global_perturb_func1_default(
inputs: TensorOrTupleOfTensorsGeneric,
) -> TensorOrTupleOfTensorsGeneric:
return _global_perturb_func1(inputs)[1]
@typing.overload
def _global_perturb_func1(inputs: Tensor) -> Tuple[Tensor, Tensor]:
...
@typing.overload
def _global_perturb_func1(
inputs: Tuple[Tensor, ...]
) -> Tuple[Tuple[Tensor, ...], Tuple[Tensor, ...]]:
...
# sensitivity-N, N = #input features
def _global_perturb_func1(
inputs: TensorOrTupleOfTensorsGeneric,
) -> Tuple[Union[Tensor, Tuple[Tensor, ...]], Union[Tensor, Tuple[Tensor, ...]]]:
input2 = None
if isinstance(inputs, tuple):
input1 = inputs[0]
input2 = inputs[1]
else:
input1 = cast(Tensor, inputs)
pert1 = torch.ones(input1.shape)
if input2 is None:
return pert1, torch.zeros(input1.shape)
pert2 = torch.ones(input2.shape)
return (pert1, pert2), (torch.zeros(input1.shape), torch.zeros(input2.shape))
class Test(BaseTest):
def test_basic_infidelity_single(self) -> None:
input1 = torch.tensor([3.0])
input2 = torch.tensor([1.0])
inputs = (input1, input2)
expected = torch.zeros(1)
self.basic_model_assert(BasicModel2(), inputs, expected)
def test_basic_infidelity_multiple(self) -> None:
input1 = torch.tensor([3.0] * 3)
input2 = torch.tensor([1.0] * 3)
inputs = (input1, input2)
expected = torch.zeros(3)
infid = self.basic_model_assert(BasicModel2(), inputs, expected)
infid_w_common_func = self.basic_model_assert(
BasicModel2(),
inputs,
expected,
perturb_func=_local_perturb_func_default,
multiply_by_inputs=False,
)
assertTensorAlmostEqual(self, infid, infid_w_common_func)
def test_basic_infidelity_multiple_with_batching(self) -> None:
input1 = torch.tensor([3.0] * 20)
input2 = torch.tensor([1.0] * 20)
expected = torch.zeros(20)
infid1 = self.basic_model_assert(
BasicModel2(),
(input1, input2),
expected,
n_perturb_samples=5,
max_batch_size=21,
)
infid2 = self.basic_model_assert(
BasicModel2(),
(input1, input2),
expected,
n_perturb_samples=5,
max_batch_size=60,
)
assertTensorAlmostEqual(self, infid1, infid2, delta=0.01, mode="max")
def test_basic_infidelity_additional_forward_args1(self) -> None:
model = BasicModel4_MultiArgs()
input1 = torch.tensor([[1.5, 2.0, 3.3]])
input2 = torch.tensor([[3.0, 3.5, 2.2]])
args = torch.tensor([[1.0, 3.0, 4.0]])
ig = IntegratedGradients(model)
infidelity1 = self.basic_model_global_assert(
ig,
model,
(input1, input2),
torch.zeros(1),
additional_args=args,
n_perturb_samples=1,
max_batch_size=1,
perturb_func=_global_perturb_func1,
)
infidelity2 = self.basic_model_global_assert(
ig,
model,
(input1, input2),
torch.zeros(1),
additional_args=args,
n_perturb_samples=5,
max_batch_size=2,
perturb_func=_global_perturb_func1,
)
infidelity2_w_custom_pert_func = self.basic_model_global_assert(
ig,
model,
(input1, input2),
torch.zeros(1),
additional_args=args,
n_perturb_samples=5,
max_batch_size=2,
perturb_func=_global_perturb_func1_default,
)
assertTensorAlmostEqual(self, infidelity1, infidelity2, 0.0)
assertTensorAlmostEqual(self, infidelity2_w_custom_pert_func, infidelity2, 0.0)
def test_classification_infidelity_convnet_multi_targets(self) -> None:
model = BasicModel_ConvNet_One_Conv()
dl = DeepLift(model)
input = torch.stack([torch.arange(1, 17).float()] * 20, dim=0).view(20, 1, 4, 4)
self.infidelity_assert(
model,
dl.attribute(input, target=torch.tensor([1] * 20)) / input,
input,
torch.zeros(20),
target=torch.tensor([1] * 20),
multi_input=False,
n_perturb_samples=500,
max_batch_size=120,
)
def test_classification_infidelity_tpl_target(self) -> None:
model = BasicModel_MultiLayer()
input = torch.arange(1.0, 13.0).view(4, 3)
additional_forward_args = (torch.arange(1, 13).view(4, 3).float(), True)
targets: List = [(0, 1, 1), (0, 1, 1), (1, 1, 1), (0, 1, 1)]
sa = Saliency(model)
infid1 = self.infidelity_assert(
model,
sa.attribute(
input, target=targets, additional_forward_args=additional_forward_args
),
input,
torch.zeros(4),
additional_args=additional_forward_args,
target=targets,
multi_input=False,
)
infid2 = self.infidelity_assert(
model,
sa.attribute(
input, target=targets, additional_forward_args=additional_forward_args
),
input,
torch.zeros(4),
additional_args=additional_forward_args,
target=targets,
max_batch_size=2,
multi_input=False,
)
assertTensorAlmostEqual(self, infid1, infid2, delta=1e-05, mode="max")
def test_classification_infidelity_tpl_target_w_baseline(self) -> None:
model = BasicModel_MultiLayer()
input = torch.arange(1.0, 13.0).view(4, 3)
baseline = torch.ones(4, 3)
additional_forward_args = (torch.arange(1, 13).view(4, 3).float(), True)
targets: List = [(0, 1, 1), (0, 1, 1), (1, 1, 1), (0, 1, 1)]
ig = IntegratedGradients(model)
def perturbed_func2(inputs, baselines):
return torch.ones(baselines.shape), baselines
@infidelity_perturb_func_decorator(True)
def perturbed_func3(inputs, baselines):
return baselines
attr, delta = ig.attribute(
input,
target=targets,
additional_forward_args=additional_forward_args,
baselines=baseline,
return_convergence_delta=True,
)
infid = self.infidelity_assert(
model,
attr,
input,
torch.tensor([0.10686, 0.0, 0.0, 0.0]),
additional_args=additional_forward_args,
baselines=baseline,
target=targets,
multi_input=False,
n_perturb_samples=3,
perturb_func=perturbed_func3,
)
infid2 = self.infidelity_assert(
model,
attr,
input,
torch.tensor([0.10686, 0.0, 0.0, 0.0]),
additional_args=additional_forward_args,
baselines=baseline,
target=targets,
multi_input=False,
n_perturb_samples=3,
perturb_func=perturbed_func2,
)
assertTensorAlmostEqual(self, infid, delta * delta)
assertTensorAlmostEqual(self, infid, infid2)
def test_basic_infidelity_multiple_with_normalize(self) -> None:
input1 = torch.tensor([3.0] * 3)
input2 = torch.tensor([1.0] * 3)
inputs = (input1, input2)
expected = torch.zeros(3)
model = BasicModel2()
ig = IntegratedGradients(model)
attrs = ig.attribute(inputs)
scaled_attrs = tuple(attr * 100 for attr in attrs)
infid = self.infidelity_assert(model, attrs, inputs, expected, normalize=True)
scaled_infid = self.infidelity_assert(
model,
scaled_attrs,
inputs,
expected,
normalize=True,
)
# scaling attr should not change normalized infidelity
assertTensorAlmostEqual(self, infid, scaled_infid)
def test_sensitivity_n_ig(self) -> None:
model = BasicModel_MultiLayer()
ig = IntegratedGradients(model)
self.basic_multilayer_sensitivity_n(ig, model)
def test_sensitivity_n_fa(self) -> None:
model = BasicModel_MultiLayer()
fa = FeatureAblation(model)
self.basic_multilayer_sensitivity_n(fa, model)
def basic_multilayer_sensitivity_n(
self, attr_algo: Attribution, model: Module
) -> None:
# sensitivity-2
def _global_perturb_func2(input):
pert = torch.tensor([[0, 1, 1], [1, 1, 0], [1, 0, 1]]).float()
return pert, (1 - pert) * input
# sensitivity-1
def _global_perturb_func3(input):
pert = torch.tensor([[0, 0, 1], [1, 0, 0], [0, 1, 0]]).float()
return pert, (1 - pert) * input
@infidelity_perturb_func_decorator(True)
def _global_perturb_func3_custom(input):
return _global_perturb_func3(input)[1]
input = torch.tensor([[1.0, 2.5, 3.3]])
# infidelity for sensitivity-1
infid = self.basic_model_global_assert(
attr_algo,
model,
input,
torch.zeros(1),
additional_args=None,
target=0,
n_perturb_samples=3,
max_batch_size=None,
perturb_func=_global_perturb_func3,
)
infid_w_default = self.basic_model_global_assert(
attr_algo,
model,
input,
torch.zeros(1),
additional_args=None,
target=0,
n_perturb_samples=3,
max_batch_size=None,
perturb_func=_global_perturb_func3_custom,
)
assertTensorAlmostEqual(self, infid, infid_w_default)
# infidelity for sensitivity-2
self.basic_model_global_assert(
attr_algo,
model,
input,
torch.zeros(1),
additional_args=None,
target=0,
n_perturb_samples=3,
max_batch_size=None,
perturb_func=_global_perturb_func2,
)
# infidelity for sensitivity-3
self.basic_model_global_assert(
attr_algo,
model,
input,
torch.zeros(1),
additional_args=None,
target=0,
n_perturb_samples=3,
max_batch_size=None,
perturb_func=_global_perturb_func1,
)
def basic_model_assert(
self,
model: Module,
inputs: TensorOrTupleOfTensorsGeneric,
expected: Tensor,
n_perturb_samples: int = 10,
max_batch_size: int = None,
perturb_func: Callable = _local_perturb_func,
multiply_by_inputs: bool = False,
normalize: bool = False,
) -> Tensor:
ig = IntegratedGradients(model)
if multiply_by_inputs:
attrs = cast(
TensorOrTupleOfTensorsGeneric,
tuple(
attr / input for input, attr in zip(inputs, ig.attribute(inputs))
),
)
else:
attrs = ig.attribute(inputs)
return self.infidelity_assert(
model,
attrs,
inputs,
expected,
n_perturb_samples=n_perturb_samples,
max_batch_size=max_batch_size,
perturb_func=perturb_func,
normalize=normalize,
)
def basic_model_global_assert(
self,
attr_algo: Attribution,
model: Module,
inputs: TensorOrTupleOfTensorsGeneric,
expected: Tensor,
additional_args: Any = None,
target: TargetType = None,
n_perturb_samples: int = 10,
max_batch_size: int = None,
perturb_func: Callable = _global_perturb_func1,
normalize: bool = False,
) -> Tensor:
attrs = attr_algo.attribute(
inputs, additional_forward_args=additional_args, target=target
)
infid = self.infidelity_assert(
model,
attrs,
inputs,
expected,
additional_args=additional_args,
perturb_func=perturb_func,
target=target,
n_perturb_samples=n_perturb_samples,
max_batch_size=max_batch_size,
normalize=normalize,
)
return infid
def infidelity_assert(
self,
model: Module,
attributions: TensorOrTupleOfTensorsGeneric,
inputs: TensorOrTupleOfTensorsGeneric,
expected: Tensor,
additional_args: Any = None,
baselines: BaselineType = None,
n_perturb_samples: int = 10,
target: TargetType = None,
max_batch_size: int = None,
multi_input: bool = True,
perturb_func: Callable = _local_perturb_func,
normalize: bool = False,
**kwargs: Any,
) -> Tensor:
infid = infidelity(
model,
perturb_func,
inputs,
attributions,
additional_forward_args=additional_args,
target=target,
baselines=baselines,
n_perturb_samples=n_perturb_samples,
max_examples_per_batch=max_batch_size,
normalize=normalize,
)
assertTensorAlmostEqual(self, infid, expected, 0.05)
return infid
|
from unittest.mock import patch
import torch
from captum.insights.attr_vis.features import (
_convert_figure_base64,
EmptyFeature,
FeatureOutput,
GeneralFeature,
ImageFeature,
TextFeature,
)
from matplotlib.figure import Figure
from tests.helpers.basic import BaseTest
class TestTextFeature(BaseTest):
FEATURE_NAME = "question"
def test_text_feature_returns_text_as_visualization_type(self) -> None:
feature = TextFeature(self.FEATURE_NAME, None, None, None)
self.assertEqual(feature.visualization_type(), "text")
def test_text_feature_uses_visualization_transform_if_provided(self) -> None:
input_data = torch.rand(2, 2)
transformed_data = torch.rand(1, 1)
def mock_transform(data):
return transformed_data
feature = TextFeature(
name=self.FEATURE_NAME,
baseline_transforms=None,
input_transforms=None,
visualization_transform=mock_transform,
)
feature_output = feature.visualize(
attribution=torch.rand(1, 1), data=input_data, contribution_frac=1.0
)
# has transformed data
self.assertEqual(feature_output.base, transformed_data)
feature = TextFeature(
name=self.FEATURE_NAME,
baseline_transforms=None,
input_transforms=None,
visualization_transform=None,
)
feature_output = feature.visualize(
attribution=torch.rand(1, 1), data=input_data, contribution_frac=1.0
)
# has original data
self.assertIs(feature_output.base, input_data)
def test_text_feature_generates_correct_visualization_output(self) -> None:
attribution = torch.tensor([0.1, 0.2, 0.3, 0.4])
input_data = torch.rand(1, 2)
expected_modified = [100 * x for x in (attribution / attribution.max())]
contribution_frac = torch.rand(1).item()
feature = TextFeature(
name=self.FEATURE_NAME,
baseline_transforms=None,
input_transforms=None,
visualization_transform=None,
)
feature_output = feature.visualize(attribution, input_data, contribution_frac)
expected_feature_output = FeatureOutput(
name=self.FEATURE_NAME,
base=input_data,
modified=expected_modified,
type="text",
contribution=contribution_frac,
)
self.assertEqual(expected_feature_output, feature_output)
class TestEmptyFeature(BaseTest):
def test_empty_feature_should_generate_fixed_output(self) -> None:
feature = EmptyFeature()
contribution = torch.rand(1).item()
expected_output = FeatureOutput(
name="empty",
base=None,
modified=None,
type="empty",
contribution=contribution,
)
self.assertEqual(expected_output, feature.visualize(None, None, contribution))
class TestImageFeature(BaseTest):
def test_image_feature_generates_correct_ouput(self) -> None:
attribution = torch.zeros(1, 3, 4, 4)
data = torch.ones(1, 3, 4, 4)
contribution = 1.0
name = "photo"
orig_fig = Figure(figsize=(4, 4))
attr_fig = Figure(figsize=(4, 4))
def mock_viz_attr(*args, **kwargs):
if kwargs["method"] == "original_image":
return orig_fig, None
else:
return attr_fig, None
feature = ImageFeature(
name=name,
baseline_transforms=None,
input_transforms=None,
visualization_transform=None,
)
with patch(
"captum.attr._utils.visualization.visualize_image_attr", mock_viz_attr
):
feature_output = feature.visualize(attribution, data, contribution)
expected_feature_output = FeatureOutput(
name=name,
base=_convert_figure_base64(orig_fig),
modified=_convert_figure_base64(attr_fig),
type="image",
contribution=contribution,
)
self.assertEqual(expected_feature_output, feature_output)
class TestGeneralFeature(BaseTest):
def test_general_feature_generates_correct_output(self) -> None:
name = "general_feature"
categories = ["cat1", "cat2", "cat3", "cat4"]
attribution = torch.Tensor(1, 4)
attribution.fill_(0.5)
data = torch.rand(1, 4)
contribution = torch.rand(1).item()
attr_squeezed = attribution.squeeze(0)
expected_modified = [
x * 100 for x in (attr_squeezed / attr_squeezed.norm()).tolist()
]
expected_base = [
f"{c}: {d:.2f}" for c, d in zip(categories, data.squeeze().tolist())
]
feature = GeneralFeature(name, categories)
feature_output = feature.visualize(
attribution=attribution, data=data, contribution_frac=contribution
)
expected_feature_output = FeatureOutput(
name=name,
base=expected_base,
modified=expected_modified,
type="general",
contribution=contribution,
)
self.assertEqual(expected_feature_output, feature_output)
|
#!/usr/bin/env python3
import unittest
from typing import Callable, List, Union
import torch
import torch.nn as nn
from captum.insights import AttributionVisualizer, Batch
from captum.insights.attr_vis.app import FilterConfig
from captum.insights.attr_vis.features import BaseFeature, FeatureOutput, ImageFeature
from tests.helpers.basic import BaseTest
class RealFeature(BaseFeature):
def __init__(
self,
name: str,
baseline_transforms: Union[Callable, List[Callable]],
input_transforms: Union[Callable, List[Callable]],
visualization_transforms: Union[None, Callable, List[Callable]] = None,
) -> None:
super().__init__(
name,
baseline_transforms=baseline_transforms,
input_transforms=input_transforms,
visualization_transform=None,
)
def visualization_type(self) -> str:
return "real"
def visualize(self, attribution, data, contribution_frac) -> FeatureOutput:
return FeatureOutput(
name=self.name,
base=data,
modified=data,
type=self.visualization_type(),
contribution=contribution_frac,
)
def _get_classes():
classes = [
"Plane",
"Car",
"Bird",
"Cat",
"Deer",
"Dog",
"Frog",
"Horse",
"Ship",
"Truck",
]
return classes
class TinyCnn(nn.Module):
def __init__(self, feature_extraction=False) -> None:
super().__init__()
self.feature_extraction = feature_extraction
self.conv1 = nn.Conv2d(3, 3, 5)
self.relu1 = nn.ReLU()
self.pool1 = nn.MaxPool2d(2, 2)
if not self.feature_extraction:
self.conv2 = nn.Conv2d(3, 10, 2)
def forward(self, x):
x = self.pool1(self.relu1(self.conv1(x)))
if not self.feature_extraction:
x = self.conv2(x)
x = x.view(-1, 10)
else:
x = x.view(-1, 12)
return x
class TinyMultiModal(nn.Module):
def __init__(self, input_size=256, pretrained=False) -> None:
super().__init__()
if pretrained:
self.img_model = _get_cnn(feature_extraction=True)
else:
self.img_model = TinyCnn(feature_extraction=True)
self.misc_model = nn.Sequential(nn.Linear(input_size, 12), nn.ReLU())
self.fc = nn.Linear(12 * 2, 10)
def forward(self, img, misc):
img = self.img_model(img)
misc = self.misc_model(misc)
x = torch.cat((img, misc), dim=-1)
return self.fc(x)
def _labelled_img_data(num_samples=10, width=8, height=8, depth=3, num_labels=10):
for _ in range(num_samples):
yield torch.empty(depth, height, width).uniform_(0, 1), torch.randint(
num_labels, (1,)
)
def _multi_modal_data(img_dataset, feature_size=256):
def misc_data(length, feature_size=None):
for _ in range(length):
yield torch.randn(feature_size)
misc_dataset = misc_data(length=len(img_dataset), feature_size=feature_size)
# re-arrange dataset
for (img, label), misc in zip(img_dataset, misc_dataset):
yield ((img, misc), label)
def _get_cnn(feature_extraction=False):
return TinyCnn(feature_extraction=feature_extraction)
def _get_multimodal(input_size=256):
return TinyMultiModal(input_size=input_size, pretrained=True)
def to_iter(data_loader):
# TODO: not sure how to make this cleaner
for x, y in data_loader:
# if it's not multi input
# NOTE: torch.utils.data.DataLoader returns a list in this case
if not isinstance(x, list):
x = (x,)
yield Batch(inputs=tuple(x), labels=y)
class Test(BaseTest):
def test_one_feature(self) -> None:
batch_size = 2
classes = _get_classes()
dataset = list(
_labelled_img_data(num_labels=len(classes), num_samples=batch_size)
)
# NOTE: using DataLoader to batch the inputs
# since AttributionVisualizer requires the input to be of size `B x ...`
data_loader = torch.utils.data.DataLoader(
list(dataset), batch_size=batch_size, shuffle=False, num_workers=0
)
visualizer = AttributionVisualizer(
models=[_get_cnn()],
classes=classes,
features=[
ImageFeature(
"Photo",
input_transforms=[lambda x: x],
baseline_transforms=[lambda x: x * 0],
)
],
dataset=to_iter(data_loader),
score_func=None,
)
visualizer._config = FilterConfig(attribution_arguments={"n_steps": 2})
outputs = visualizer.visualize()
for output in outputs:
total_contrib = sum(abs(f.contribution) for f in output[0].feature_outputs)
self.assertAlmostEqual(total_contrib, 1.0, places=6)
def test_multi_features(self) -> None:
batch_size = 2
classes = _get_classes()
img_dataset = list(
_labelled_img_data(num_labels=len(classes), num_samples=batch_size)
)
misc_feature_size = 2
dataset = _multi_modal_data(
img_dataset=img_dataset, feature_size=misc_feature_size
)
# NOTE: using DataLoader to batch the inputs since
# AttributionVisualizer requires the input to be of size `N x ...`
data_loader = torch.utils.data.DataLoader(
list(dataset), batch_size=batch_size, shuffle=False, num_workers=0
)
visualizer = AttributionVisualizer(
models=[_get_multimodal(input_size=misc_feature_size)],
classes=classes,
features=[
ImageFeature(
"Photo",
input_transforms=[lambda x: x],
baseline_transforms=[lambda x: x * 0],
),
RealFeature(
"Random",
input_transforms=[lambda x: x],
baseline_transforms=[lambda x: x * 0],
),
],
dataset=to_iter(data_loader),
score_func=None,
)
visualizer._config = FilterConfig(attribution_arguments={"n_steps": 2})
outputs = visualizer.visualize()
for output in outputs:
total_contrib = sum(abs(f.contribution) for f in output[0].feature_outputs)
self.assertAlmostEqual(total_contrib, 1.0, places=6)
# TODO: add test for multiple models (related to TODO in captum/insights/api.py)
#
# TODO: add test to make the attribs == 0 -- error occurs
# I know (through manual testing) that this breaks some existing code
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
import collections
from typing import List
import torch
from captum.robust import AttackComparator, FGSM
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import BasicModel, BasicModel_MultiLayer
from torch import Tensor
def float_metric(model_out: Tensor, target: int):
return model_out[:, target]
ModelResult = collections.namedtuple("ModelResult", "accuracy output")
def tuple_metric(model_out: Tensor, target: int, named_tuple=False):
_, pred = torch.max(model_out, dim=1)
acc = (pred == target).float()
output = model_out[:, target]
if named_tuple:
return ModelResult(
accuracy=acc.item() if acc.numel() == 1 else acc,
output=output.item() if output.numel() == 1 else output,
)
return (acc, output)
def drop_column_perturb(inp: Tensor, column: int) -> Tensor:
mask = torch.ones_like(inp)
mask[:, column] = 0.0
return mask * inp
def text_preproc_fn(inp: List[str]) -> Tensor:
return torch.tensor([float(ord(elem[0])) for elem in inp]).unsqueeze(0)
def batch_text_preproc_fn(inp: List[List[str]]) -> Tensor:
return torch.cat([text_preproc_fn(elem) for elem in inp])
def string_perturb(inp: List[str]) -> List[str]:
return ["a" + elem for elem in inp]
def string_batch_perturb(inp: List[List[str]]) -> List[List[str]]:
return [string_perturb(elem) for elem in inp]
class SamplePerturb:
def __init__(self) -> None:
self.count = 0
def perturb(self, inp: Tensor) -> Tensor:
mask = torch.ones_like(inp)
mask[:, self.count % mask.shape[1]] = 0.0
self.count += 1
return mask * inp
class Test(BaseTest):
def test_attack_comparator_basic(self) -> None:
model = BasicModel()
inp = torch.tensor([[2.0, -9.0, 9.0, 1.0, -3.0]])
attack_comp = AttackComparator(
forward_func=lambda x: model(x)
+ torch.tensor([[0.000001, 0.0, 0.0, 0.0, 0.0]]),
metric=tuple_metric,
)
attack_comp.add_attack(
drop_column_perturb,
name="first_column_perturb",
attack_kwargs={"column": 0},
)
attack_comp.add_attack(
drop_column_perturb,
name="last_column_perturb",
attack_kwargs={"column": -1},
)
attack_comp.add_attack(
FGSM(model),
attack_kwargs={"epsilon": 0.5},
additional_attack_arg_names=["target"],
)
batch_results = attack_comp.evaluate(inp, target=0, named_tuple=True)
expected_first_results = {
"Original": (1.0, 1.0),
"first_column_perturb": {"mean": (0.0, 0.0)},
"last_column_perturb": {"mean": (1.0, 1.0)},
"FGSM": {"mean": (1.0, 1.0)},
}
self._compare_results(batch_results, expected_first_results)
alt_inp = torch.tensor([[1.0, 2.0, -3.0, 4.0, -5.0]])
second_batch_results = attack_comp.evaluate(alt_inp, target=4, named_tuple=True)
expected_second_results = {
"Original": (0.0, -5.0),
"first_column_perturb": {"mean": (0.0, -5.0)},
"last_column_perturb": {"mean": (0.0, 0.0)},
"FGSM": {"mean": (0.0, -4.5)},
}
self._compare_results(second_batch_results, expected_second_results)
expected_summary_results = {
"Original": {"mean": (0.5, -2.0)},
"first_column_perturb": {"mean": (0.0, -2.5)},
"last_column_perturb": {"mean": (0.5, 0.5)},
"FGSM": {"mean": (0.5, -1.75)},
}
self._compare_results(attack_comp.summary(), expected_summary_results)
def test_attack_comparator_with_preproc(self) -> None:
model = BasicModel_MultiLayer()
text_inp = ["abc", "zyd", "ghi"]
attack_comp = AttackComparator(
forward_func=model, metric=tuple_metric, preproc_fn=text_preproc_fn
)
attack_comp.add_attack(
SamplePerturb().perturb,
name="Sequence Column Perturb",
num_attempts=5,
apply_before_preproc=False,
)
attack_comp.add_attack(
string_perturb,
name="StringPerturb",
apply_before_preproc=True,
)
batch_results = attack_comp.evaluate(
text_inp, target=0, named_tuple=True, perturbations_per_eval=3
)
expected_first_results = {
"Original": (0.0, 1280.0),
"Sequence Column Perturb": {
"mean": (0.0, 847.2),
"max": (0.0, 892.0),
"min": (0.0, 792.0),
},
"StringPerturb": {"mean": (0.0, 1156.0)},
}
self._compare_results(batch_results, expected_first_results)
expected_summary_results = {
"Original": {"mean": (0.0, 1280.0)},
"Sequence Column Perturb Mean Attempt": {"mean": (0.0, 847.2)},
"Sequence Column Perturb Min Attempt": {"mean": (0.0, 792.0)},
"Sequence Column Perturb Max Attempt": {"mean": (0.0, 892.0)},
"StringPerturb": {"mean": (0.0, 1156.0)},
}
self._compare_results(attack_comp.summary(), expected_summary_results)
def test_attack_comparator_with_additional_args(self) -> None:
model = BasicModel_MultiLayer()
text_inp = [["abc", "zyd", "ghi"], ["mnop", "qrs", "Tuv"]]
additional_forward_args = torch.ones((2, 3)) * -97
attack_comp = AttackComparator(
forward_func=model, metric=tuple_metric, preproc_fn=batch_text_preproc_fn
)
attack_comp.add_attack(
SamplePerturb().perturb,
name="Sequence Column Perturb",
num_attempts=5,
apply_before_preproc=False,
)
attack_comp.add_attack(
string_batch_perturb,
name="StringPerturb",
apply_before_preproc=True,
)
batch_results = attack_comp.evaluate(
text_inp,
additional_forward_args=additional_forward_args,
target=0,
named_tuple=True,
perturbations_per_eval=2,
)
expected_first_results = {
"Original": ([0.0, 0.0], [116.0, 52.0]),
"Sequence Column Perturb": {
"mean": ([0.0, 0.0], [-1.0, -1.0]),
"max": ([0.0, 0.0], [-1.0, -1.0]),
"min": ([0.0, 0.0], [-1.0, -1.0]),
},
"StringPerturb": {"mean": ([0.0, 0.0], [2.0, 2.0])},
}
self._compare_results(batch_results, expected_first_results)
expected_summary_results = {
"Original": {
"mean": (0.0, 84.0),
},
"Sequence Column Perturb Mean Attempt": {"mean": (0.0, -1.0)},
"Sequence Column Perturb Min Attempt": {"mean": (0.0, -1.0)},
"Sequence Column Perturb Max Attempt": {"mean": (0.0, -1.0)},
"StringPerturb": {"mean": (0.0, 2.0)},
}
self._compare_results(attack_comp.summary(), expected_summary_results)
attack_comp.reset()
self.assertEqual(len(attack_comp.summary()), 0)
def _compare_results(self, obtained, expected) -> None:
if isinstance(expected, dict):
self.assertIsInstance(obtained, dict)
for key in expected:
self._compare_results(obtained[key], expected[key])
elif isinstance(expected, tuple):
self.assertIsInstance(obtained, tuple)
for i in range(len(expected)):
self._compare_results(obtained[i], expected[i])
else:
assertTensorAlmostEqual(self, obtained, expected)
|
#!/usr/bin/env python3
from typing import Any, Callable, List, Optional, Tuple, Union
import torch
from captum._utils.typing import TensorLikeList, TensorOrTupleOfTensorsGeneric
from captum.robust import FGSM
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import BasicModel, BasicModel2, BasicModel_MultiLayer
from torch import Tensor
from torch.nn import CrossEntropyLoss
class Test(BaseTest):
def test_attack_nontargeted(self) -> None:
model = BasicModel()
input = torch.tensor([[2.0, -9.0, 9.0, 1.0, -3.0]])
self._FGSM_assert(model, input, 1, 0.1, [[2.0, -8.9, 9.0, 1.0, -3.0]])
def test_attack_targeted(self) -> None:
model = BasicModel()
input = torch.tensor([[9.0, 10.0, -6.0, -1.0]])
self._FGSM_assert(
model, input, 3, 0.2, [[9.0, 10.0, -6.0, -1.2]], targeted=True
)
def test_attack_multiinput(self) -> None:
model = BasicModel2()
input1 = torch.tensor([[4.0, -1.0], [3.0, 10.0]], requires_grad=True)
input2 = torch.tensor([[2.0, -5.0], [-2.0, 1.0]], requires_grad=True)
self._FGSM_assert(
model,
(input1, input2),
0,
0.25,
([[3.75, -1.0], [2.75, 10.0]], [[2.25, -5.0], [-2.0, 1.0]]),
)
def test_attack_label_list(self) -> None:
model = BasicModel2()
input1 = torch.tensor([[4.0, -1.0], [3.0, 10.0]], requires_grad=True)
input2 = torch.tensor([[2.0, -5.0], [-2.0, 1.0]], requires_grad=True)
self._FGSM_assert(
model,
(input1, input2),
[0, 1],
0.1,
([[3.9, -1.0], [3.0, 9.9]], [[2.1, -5.0], [-2.0, 1.1]]),
)
def test_attack_label_tensor(self) -> None:
model = BasicModel2()
input1 = torch.tensor([[4.0, -1.0], [3.0, 10.0]], requires_grad=True)
input2 = torch.tensor([[2.0, -5.0], [-2.0, 1.0]], requires_grad=True)
labels = torch.tensor([0, 1])
self._FGSM_assert(
model,
(input1, input2),
labels,
0.1,
([[4.1, -1.0], [3.0, 10.1]], [[1.9, -5.0], [-2.0, 0.9]]),
targeted=True,
)
def test_attack_label_tuple(self) -> None:
model = BasicModel()
input = torch.tensor(
[[[4.0, 2.0], [-1.0, -2.0]], [[3.0, -4.0], [10.0, 5.0]]], requires_grad=True
)
labels = (0, 1)
self._FGSM_assert(
model,
input,
labels,
0.1,
[[[4.0, 2.0], [-1.0, -2.0]], [[3.0, -3.9], [10.0, 5.0]]],
)
def test_attack_label_listtuple(self) -> None:
model = BasicModel()
input = torch.tensor(
[[[4.0, 2.0], [-1.0, -2.0]], [[3.0, -4.0], [10.0, 5.0]]], requires_grad=True
)
labels: List[Tuple[int, ...]] = [(1, 1), (0, 1)]
self._FGSM_assert(
model,
input,
labels,
0.1,
[[[4.0, 2.0], [-1.0, -1.9]], [[3.0, -3.9], [10.0, 5.0]]],
)
def test_attack_additional_inputs(self) -> None:
model = BasicModel_MultiLayer()
add_input = torch.tensor([[-1.0, 2.0, 2.0]], requires_grad=True)
input = torch.tensor([[1.0, 6.0, -3.0]], requires_grad=True)
self._FGSM_assert(
model, input, 0, 0.2, [[0.8, 5.8, -3.2]], additional_inputs=(add_input,)
)
self._FGSM_assert(
model, input, 0, 0.2, [[0.8, 5.8, -3.2]], additional_inputs=add_input
)
def test_attack_loss_defined(self) -> None:
model = BasicModel_MultiLayer()
add_input = torch.tensor([[-1.0, 2.0, 2.0]])
input = torch.tensor([[1.0, 6.0, -3.0]])
labels = torch.tensor([0])
loss_func = CrossEntropyLoss(reduction="none")
adv = FGSM(model, loss_func)
perturbed_input = adv.perturb(
input, 0.2, labels, additional_forward_args=(add_input,)
)
assertTensorAlmostEqual(
self, perturbed_input, [[1.0, 6.0, -3.0]], delta=0.01, mode="max"
)
def test_attack_bound(self) -> None:
model = BasicModel()
input = torch.tensor([[9.0, 10.0, -6.0, -1.0]])
self._FGSM_assert(
model,
input,
3,
0.2,
[[5.0, 5.0, -5.0, -1.2]],
targeted=True,
lower_bound=-5.0,
upper_bound=5.0,
)
def test_attack_masked_tensor(self) -> None:
model = BasicModel()
input = torch.tensor([[2.0, -9.0, 9.0, 1.0, -3.0]], requires_grad=True)
mask = torch.tensor([[1, 0, 0, 1, 1]])
self._FGSM_assert(
model, input, 1, 0.1, [[2.0, -9.0, 9.0, 1.0, -3.0]], mask=mask
)
def test_attack_masked_multiinput(self) -> None:
model = BasicModel2()
input1 = torch.tensor([[4.0, -1.0], [3.0, 10.0]], requires_grad=True)
input2 = torch.tensor([[2.0, -5.0], [-2.0, 1.0]], requires_grad=True)
mask1 = torch.tensor([[1, 0], [1, 0]])
mask2 = torch.tensor([[0, 0], [0, 0]])
self._FGSM_assert(
model,
(input1, input2),
0,
0.25,
([[3.75, -1.0], [2.75, 10.0]], [[2.0, -5.0], [-2.0, 1.0]]),
mask=(mask1, mask2),
)
def test_attack_masked_loss_defined(self) -> None:
model = BasicModel_MultiLayer()
add_input = torch.tensor([[-1.0, 2.0, 2.0]])
input = torch.tensor([[1.0, 6.0, -3.0]])
labels = torch.tensor([0])
mask = torch.tensor([[0, 0, 1]])
loss_func = CrossEntropyLoss(reduction="none")
adv = FGSM(model, loss_func)
perturbed_input = adv.perturb(
input, 0.2, labels, additional_forward_args=(add_input,), mask=mask
)
assertTensorAlmostEqual(
self, perturbed_input, [[1.0, 6.0, -3.0]], delta=0.01, mode="max"
)
def test_attack_masked_bound(self) -> None:
model = BasicModel()
input = torch.tensor([[9.0, 10.0, -6.0, -1.0]])
mask = torch.tensor([[1, 0, 1, 0]])
self._FGSM_assert(
model,
input,
3,
0.2,
[[5.0, 5.0, -5.0, -1.0]],
targeted=True,
lower_bound=-5.0,
upper_bound=5.0,
mask=mask,
)
def _FGSM_assert(
self,
model: Callable,
inputs: TensorOrTupleOfTensorsGeneric,
target: Any,
epsilon: float,
answer: Union[TensorLikeList, Tuple[TensorLikeList, ...]],
targeted=False,
additional_inputs: Any = None,
lower_bound: float = float("-inf"),
upper_bound: float = float("inf"),
mask: Optional[TensorOrTupleOfTensorsGeneric] = None,
) -> None:
adv = FGSM(model, lower_bound=lower_bound, upper_bound=upper_bound)
perturbed_input = adv.perturb(
inputs, epsilon, target, additional_inputs, targeted, mask
)
if isinstance(perturbed_input, Tensor):
assertTensorAlmostEqual(
self, perturbed_input, answer, delta=0.01, mode="max"
)
else:
for i in range(len(perturbed_input)):
assertTensorAlmostEqual(
self, perturbed_input[i], answer[i], delta=0.01, mode="max"
)
|
#!/usr/bin/env python3
from typing import cast, List
import torch
from captum.robust import MinParamPerturbation
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import BasicModel, BasicModel_MultiLayer
from torch import Tensor
def inp_subtract(inp: Tensor, ind: int = 0, add_arg: int = 0) -> Tensor:
inp_repeat = 1.0 * inp
inp_repeat[0][ind] -= add_arg
return inp_repeat
def add_char(inp: List[str], ind: int = 0, char_val: int = 0) -> List[str]:
list_copy = list(inp)
list_copy[ind] = chr(122 - char_val) + list_copy[ind]
return list_copy
def add_char_batch(inp: List[List[str]], ind: int, char_val: int) -> List[List[str]]:
return [add_char(elem, ind, char_val) for elem in inp]
def text_preproc_fn(inp: List[str]) -> Tensor:
return torch.tensor([float(ord(elem[0])) for elem in inp]).unsqueeze(0)
def batch_text_preproc_fn(inp: List[List[str]]) -> Tensor:
return torch.cat([text_preproc_fn(elem) for elem in inp])
def alt_correct_fn(model_out: Tensor, target: int, threshold: float) -> bool:
if all(model_out[:, target] > threshold):
return True
return False
class Test(BaseTest):
def test_minimal_pert_basic_linear(self) -> None:
model = BasicModel()
inp = torch.tensor([[2.0, -9.0, 9.0, 1.0, -3.0]])
minimal_pert = MinParamPerturbation(
forward_func=lambda x: model(x)
+ torch.tensor([[0.000001, 0.0, 0.0, 0.0, 0.0]]),
attack=inp_subtract,
arg_name="add_arg",
arg_min=0.0,
arg_max=1000.0,
arg_step=1.0,
)
target_inp, pert = minimal_pert.evaluate(
inp, target=0, attack_kwargs={"ind": 0}
)
self.assertAlmostEqual(cast(float, pert), 2.0)
assertTensorAlmostEqual(
self, target_inp, torch.tensor([[0.0, -9.0, 9.0, 1.0, -3.0]])
)
def test_minimal_pert_basic_binary(self) -> None:
model = BasicModel()
inp = torch.tensor([[2.0, -9.0, 9.0, 1.0, -3.0]])
minimal_pert = MinParamPerturbation(
forward_func=lambda x: model(x)
+ torch.tensor([[0.000001, 0.0, 0.0, 0.0, 0.0]]),
attack=inp_subtract,
arg_name="add_arg",
arg_min=0.0,
arg_max=1000.0,
arg_step=1.0,
mode="binary",
)
target_inp, pert = minimal_pert.evaluate(
inp,
target=0,
attack_kwargs={"ind": 0},
perturbations_per_eval=10,
)
self.assertAlmostEqual(cast(float, pert), 2.0)
assertTensorAlmostEqual(
self, target_inp, torch.tensor([[0.0, -9.0, 9.0, 1.0, -3.0]])
)
def test_minimal_pert_preproc(self) -> None:
model = BasicModel_MultiLayer()
text_inp = ["abc", "zyd", "ghi"]
minimal_pert = MinParamPerturbation(
forward_func=model,
attack=add_char,
arg_name="char_val",
arg_min=0,
arg_max=26,
arg_step=1,
preproc_fn=text_preproc_fn,
apply_before_preproc=True,
)
target_inp, pert = minimal_pert.evaluate(
text_inp, target=1, attack_kwargs={"ind": 1}
)
self.assertEqual(pert, None)
self.assertEqual(target_inp, None)
def test_minimal_pert_alt_correct(self) -> None:
model = BasicModel_MultiLayer()
text_inp = ["abc", "zyd", "ghi"]
minimal_pert = MinParamPerturbation(
forward_func=model,
attack=add_char,
arg_name="char_val",
arg_min=0,
arg_max=26,
arg_step=1,
preproc_fn=text_preproc_fn,
apply_before_preproc=True,
correct_fn=alt_correct_fn,
num_attempts=5,
)
expected_list = ["abc", "ezyd", "ghi"]
target_inp, pert = minimal_pert.evaluate(
text_inp,
target=1,
attack_kwargs={"ind": 1},
correct_fn_kwargs={"threshold": 1200},
perturbations_per_eval=5,
)
self.assertEqual(pert, 21)
self.assertListEqual(target_inp, expected_list)
target_inp_single, pert_single = minimal_pert.evaluate(
text_inp,
target=1,
attack_kwargs={"ind": 1},
correct_fn_kwargs={"threshold": 1200},
)
self.assertEqual(pert_single, 21)
self.assertListEqual(target_inp_single, expected_list)
def test_minimal_pert_additional_forward_args(self) -> None:
model = BasicModel_MultiLayer()
text_inp = [["abc", "zyd", "ghi"], ["abc", "uyd", "ghi"]]
additional_forward_args = torch.ones((2, 3)) * -97
model = BasicModel_MultiLayer()
minimal_pert = MinParamPerturbation(
forward_func=model,
attack=add_char_batch,
arg_name="char_val",
arg_min=0,
arg_max=26,
arg_step=1,
preproc_fn=batch_text_preproc_fn,
apply_before_preproc=True,
correct_fn=alt_correct_fn,
)
expected_list = [["abc", "uzyd", "ghi"], ["abc", "uuyd", "ghi"]]
target_inp, pert = minimal_pert.evaluate(
text_inp,
target=1,
attack_kwargs={"ind": 1},
correct_fn_kwargs={"threshold": 100},
perturbations_per_eval=15,
additional_forward_args=(additional_forward_args,),
)
self.assertEqual(pert, 5)
self.assertListEqual(target_inp, expected_list)
target_inp_single, pert_single = minimal_pert.evaluate(
text_inp,
target=1,
attack_kwargs={"ind": 1},
correct_fn_kwargs={"threshold": 100},
additional_forward_args=(additional_forward_args,),
)
self.assertEqual(pert_single, 5)
self.assertListEqual(target_inp_single, expected_list)
def test_minimal_pert_tuple_test(self) -> None:
model = BasicModel_MultiLayer()
text_inp = (
[["abc", "zyd", "ghi"], ["abc", "uyd", "ghi"]],
torch.ones((2, 3)) * -97,
)
model = BasicModel_MultiLayer()
minimal_pert = MinParamPerturbation(
forward_func=lambda x: model(*x),
attack=lambda x, ind, char_val: (add_char_batch(x[0], ind, char_val), x[1]),
arg_name="char_val",
arg_min=0,
arg_max=26,
arg_step=1,
preproc_fn=lambda x: (batch_text_preproc_fn(x[0]), x[1]),
apply_before_preproc=True,
correct_fn=alt_correct_fn,
)
expected_list = [["abc", "uzyd", "ghi"], ["abc", "uuyd", "ghi"]]
target_inp, pert = minimal_pert.evaluate(
text_inp,
target=1,
attack_kwargs={"ind": 1},
correct_fn_kwargs={"threshold": 100},
perturbations_per_eval=15,
)
self.assertEqual(pert, 5)
self.assertListEqual(target_inp[0], expected_list)
|
#!/usr/bin/env python3
import torch
from captum.robust import PGD
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import BasicModel, BasicModel2, BasicModel_MultiLayer
from torch.nn import CrossEntropyLoss
class Test(BaseTest):
def test_attack_nontargeted(self) -> None:
model = BasicModel()
input = torch.tensor([[2.0, -9.0, 9.0, 1.0, -3.0]])
adv = PGD(model)
perturbed_input = adv.perturb(input, 0.25, 0.1, 2, 4)
assertTensorAlmostEqual(
self,
perturbed_input,
[[2.0, -9.0, 9.0, 1.0, -2.8]],
delta=0.01,
mode="max",
)
def test_attack_targeted(self) -> None:
model = BasicModel()
input = torch.tensor([[9.0, 10.0, -6.0, -1.0]], requires_grad=True)
adv = PGD(model)
perturbed_input = adv.perturb(input, 0.2, 0.1, 3, 3, targeted=True)
assertTensorAlmostEqual(
self,
perturbed_input,
[[9.0, 10.0, -6.0, -1.2]],
delta=0.01,
mode="max",
)
def test_attack_l2norm(self) -> None:
model = BasicModel()
input = torch.tensor([[9.0, 10.0, -6.0, -1.0]], requires_grad=True)
adv = PGD(model)
perturbed_input = adv.perturb(input, 0.2, 0.1, 3, 2, targeted=True, norm="L2")
assertTensorAlmostEqual(
self,
perturbed_input,
[[9.0, 10.0, -6.2, -1.0]],
delta=0.01,
mode="max",
)
def test_attack_multiinput(self) -> None:
model = BasicModel2()
input1 = torch.tensor([[4.0, -1.0], [3.0, 10.0]], requires_grad=True)
input2 = torch.tensor([[2.0, -5.0], [-2.0, 1.0]], requires_grad=True)
adv = PGD(model)
perturbed_input = adv.perturb((input1, input2), 0.25, 0.1, 3, 0, norm="L2")
answer = ([[3.75, -1.0], [2.75, 10.0]], [[2.25, -5.0], [-2.0, 1.0]])
for i in range(len(perturbed_input)):
assertTensorAlmostEqual(
self,
perturbed_input[i],
answer[i],
delta=0.01,
mode="max",
)
def test_attack_3dimensional_input(self) -> None:
model = BasicModel()
input = torch.tensor(
[[[4.0, 2.0], [-1.0, -2.0]], [[3.0, -4.0], [10.0, 5.0]]], requires_grad=True
)
adv = PGD(model)
perturbed_input = adv.perturb(input, 0.25, 0.1, 3, (0, 1))
assertTensorAlmostEqual(
self,
perturbed_input,
[[[4.0, 2.0], [-1.0, -2.0]], [[3.0, -3.75], [10.0, 5.0]]],
delta=0.01,
mode="max",
)
def test_attack_loss_defined(self) -> None:
model = BasicModel_MultiLayer()
add_input = torch.tensor([[-1.0, 2.0, 2.0]])
input = torch.tensor([[1.0, 6.0, -3.0]])
labels = torch.tensor([0])
loss_func = CrossEntropyLoss(reduction="none")
adv = PGD(model, loss_func)
perturbed_input = adv.perturb(
input, 0.25, 0.1, 3, labels, additional_forward_args=(add_input,)
)
assertTensorAlmostEqual(
self, perturbed_input, [[1.0, 6.0, -3.0]], delta=0.01, mode="max"
)
def test_attack_random_start(self) -> None:
model = BasicModel()
input = torch.tensor([[2.0, -9.0, 9.0, 1.0, -3.0]])
adv = PGD(model)
perturbed_input = adv.perturb(input, 0.25, 0.1, 0, 4, random_start=True)
assertTensorAlmostEqual(
self,
perturbed_input,
[[2.0, -9.0, 9.0, 1.0, -3.0]],
delta=0.25,
mode="max",
)
perturbed_input = adv.perturb(
input, 0.25, 0.1, 0, 4, norm="L2", random_start=True
)
norm = torch.norm((perturbed_input - input).squeeze()).numpy()
self.assertLessEqual(norm, 0.25)
def test_attack_masked_nontargeted(self) -> None:
model = BasicModel()
input = torch.tensor([[2.0, -9.0, 9.0, 1.0, -3.0]])
mask = torch.tensor([[1, 1, 0, 0, 0]])
adv = PGD(model)
perturbed_input = adv.perturb(input, 0.25, 0.1, 2, 4, mask=mask)
assertTensorAlmostEqual(
self,
perturbed_input,
[[2.0, -9.0, 9.0, 1.0, -3.0]],
delta=0.01,
mode="max",
)
def test_attack_masked_targeted(self) -> None:
model = BasicModel()
input = torch.tensor([[9.0, 10.0, -6.0, -1.0]], requires_grad=True)
mask = torch.tensor([[1, 1, 1, 0]])
adv = PGD(model)
perturbed_input = adv.perturb(input, 0.2, 0.1, 3, 3, targeted=True, mask=mask)
assertTensorAlmostEqual(
self,
perturbed_input,
[[9.0, 10.0, -6.0, -1.0]],
delta=0.01,
mode="max",
)
def test_attack_masked_multiinput(self) -> None:
model = BasicModel2()
input1 = torch.tensor([[4.0, -1.0], [3.0, 10.0]], requires_grad=True)
input2 = torch.tensor([[2.0, -5.0], [-2.0, 1.0]], requires_grad=True)
mask1 = torch.tensor([[1, 1], [0, 0]])
mask2 = torch.tensor([[0, 1], [0, 1]])
adv = PGD(model)
perturbed_input = adv.perturb(
(input1, input2), 0.25, 0.1, 3, 0, norm="L2", mask=(mask1, mask2)
)
answer = ([[3.75, -1.0], [3.0, 10.0]], [[2.0, -5.0], [-2.0, 1.0]])
for i in range(len(perturbed_input)):
assertTensorAlmostEqual(
self,
perturbed_input[i],
answer[i],
delta=0.01,
mode="max",
)
def test_attack_masked_random_start(self) -> None:
model = BasicModel()
input = torch.tensor([[2.0, -9.0, 9.0, 1.0, -3.0]])
mask = torch.tensor([[1, 0, 1, 0, 1]])
adv = PGD(model)
perturbed_input = adv.perturb(
input, 0.25, 0.1, 0, 4, random_start=True, mask=mask
)
assertTensorAlmostEqual(
self,
perturbed_input,
[[2.0, -9.0, 9.0, 1.0, -3.0]],
delta=0.25,
mode="max",
)
perturbed_input = adv.perturb(
input, 0.25, 0.1, 0, 4, norm="L2", random_start=True, mask=mask
)
norm = torch.norm((perturbed_input - input).squeeze()).numpy()
self.assertLessEqual(norm, 0.25)
def test_attack_masked_3dimensional_input(self) -> None:
model = BasicModel()
input = torch.tensor(
[[[4.0, 2.0], [-1.0, -2.0]], [[3.0, -4.0], [10.0, 5.0]]], requires_grad=True
)
mask = torch.tensor([[[1, 0], [0, 1]], [[1, 0], [1, 1]]])
adv = PGD(model)
perturbed_input = adv.perturb(input, 0.25, 0.1, 3, (0, 1), mask=mask)
assertTensorAlmostEqual(
self,
perturbed_input,
[[[4.0, 2.0], [-1.0, -2.0]], [[3.0, -4.0], [10.0, 5.0]]],
delta=0.01,
mode="max",
)
def test_attack_masked_loss_defined(self) -> None:
model = BasicModel_MultiLayer()
add_input = torch.tensor([[-1.0, 2.0, 2.0]])
input = torch.tensor([[1.0, 6.0, -3.0]])
mask = torch.tensor([[0, 1, 0]])
labels = torch.tensor([0])
loss_func = CrossEntropyLoss(reduction="none")
adv = PGD(model, loss_func)
perturbed_input = adv.perturb(
input, 0.25, 0.1, 3, labels, additional_forward_args=(add_input,), mask=mask
)
assertTensorAlmostEqual(
self, perturbed_input, [[1.0, 6.0, -3.0]], delta=0.01, mode="max"
)
|
import inspect
import os
import unittest
from functools import partial
from typing import Callable, Iterator, List, Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from captum.influence import DataInfluence
from captum.influence._core.tracincp_fast_rand_proj import (
TracInCPFast,
TracInCPFastRandProj,
)
from parameterized import parameterized
from parameterized.parameterized import param
from torch import Tensor
from torch.nn import Module
from torch.utils.data import DataLoader, Dataset
def _isSorted(x, key=lambda x: x, descending=True):
if descending:
return all([key(x[i]) >= key(x[i + 1]) for i in range(len(x) - 1)])
else:
return all([key(x[i]) <= key(x[i + 1]) for i in range(len(x) - 1)])
def _wrap_model_in_dataparallel(net):
alt_device_ids = [0] + [x for x in range(torch.cuda.device_count() - 1, 0, -1)]
net = net.cuda()
return torch.nn.DataParallel(net, device_ids=alt_device_ids)
def _move_sample_to_cuda(samples):
return [s.cuda() for s in samples]
class ExplicitDataset(Dataset):
def __init__(self, samples, labels, use_gpu=False) -> None:
self.samples, self.labels = samples, labels
if use_gpu:
self.samples = (
_move_sample_to_cuda(self.samples)
if isinstance(self.samples, list)
else self.samples.cuda()
)
self.labels = self.labels.cuda()
def __len__(self) -> int:
return len(self.samples)
def __getitem__(self, idx):
return (self.samples[idx], self.labels[idx])
class UnpackDataset(Dataset):
def __init__(self, samples, labels, use_gpu=False) -> None:
self.samples, self.labels = samples, labels
if use_gpu:
self.samples = (
_move_sample_to_cuda(self.samples)
if isinstance(self.samples, list)
else self.samples.cuda()
)
self.labels = self.labels.cuda()
def __len__(self) -> int:
return len(self.samples[0])
def __getitem__(self, idx):
"""
The signature of the returning item is: List[List], where the contents
are: [sample_0, sample_1, ...] + [labels] (two lists concacenated).
"""
return [lst[idx] for lst in self.samples] + [self.labels[idx]]
class IdentityDataset(ExplicitDataset):
def __init__(self, num_features, use_gpu=False) -> None:
self.samples = torch.diag(torch.ones(num_features))
self.labels = torch.zeros(num_features).unsqueeze(1)
if use_gpu:
self.samples = self.samples.cuda()
self.labels = self.labels.cuda()
class RangeDataset(ExplicitDataset):
def __init__(self, low, high, num_features, use_gpu=False) -> None:
self.samples = (
torch.arange(start=low, end=high, dtype=torch.float)
.repeat(num_features, 1)
.transpose(1, 0)
)
self.labels = torch.arange(start=low, end=high, dtype=torch.float).unsqueeze(1)
if use_gpu:
self.samples = self.samples.cuda()
self.labels = self.labels.cuda()
class BinaryDataset(ExplicitDataset):
def __init__(self, use_gpu=False) -> None:
self.samples = F.normalize(
torch.stack(
(
torch.Tensor([1, 1]),
torch.Tensor([2, 1]),
torch.Tensor([1, 2]),
torch.Tensor([1, 5]),
torch.Tensor([0.01, 1]),
torch.Tensor([5, 1]),
torch.Tensor([1, 0.01]),
torch.Tensor([-1, -1]),
torch.Tensor([-2, -1]),
torch.Tensor([-1, -2]),
torch.Tensor([-1, -5]),
torch.Tensor([-5, -1]),
torch.Tensor([1, -1]),
torch.Tensor([2, -1]),
torch.Tensor([1, -2]),
torch.Tensor([1, -5]),
torch.Tensor([0.01, -1]),
torch.Tensor([5, -1]),
torch.Tensor([-1, 1]),
torch.Tensor([-2, 1]),
torch.Tensor([-1, 2]),
torch.Tensor([-1, 5]),
torch.Tensor([-5, 1]),
torch.Tensor([-1, 0.01]),
)
)
)
self.labels = torch.cat(
(
torch.Tensor([1]).repeat(12, 1),
torch.Tensor([-1]).repeat(12, 1),
)
)
super().__init__(self.samples, self.labels, use_gpu)
class CoefficientNet(nn.Module):
def __init__(self, in_features=1) -> None:
super().__init__()
self.fc1 = nn.Linear(in_features, 1, bias=False)
self.fc1.weight.data.fill_(0.01)
def forward(self, x):
x = self.fc1(x)
return x
class BasicLinearNet(nn.Module):
def __init__(self, in_features, hidden_nodes, out_features) -> None:
super().__init__()
self.linear1 = nn.Linear(in_features, hidden_nodes)
self.linear2 = nn.Linear(hidden_nodes, out_features)
def forward(self, input):
x = torch.tanh(self.linear1(input))
return torch.tanh(self.linear2(x))
class MultLinearNet(nn.Module):
def __init__(self, in_features, hidden_nodes, out_features, num_inputs) -> None:
super().__init__()
self.pre = nn.Linear(in_features * num_inputs, in_features)
self.linear1 = nn.Linear(in_features, hidden_nodes)
self.linear2 = nn.Linear(hidden_nodes, out_features)
def forward(self, *inputs):
"""
The signature of inputs is List[torch.Tensor],
where torch.Tensor has the dimensions [num_inputs x in_features].
It first concacenates the list and a linear layer to reduce the
dimension.
"""
inputs = self.pre(torch.cat(inputs, dim=1))
x = torch.tanh(self.linear1(inputs))
return torch.tanh(self.linear2(x))
def get_random_model_and_data(
tmpdir, unpack_inputs, return_test_data=True, use_gpu=False
):
in_features, hidden_nodes, out_features = 5, 4, 3
num_inputs = 2
net = (
BasicLinearNet(in_features, hidden_nodes, out_features)
if not unpack_inputs
else MultLinearNet(in_features, hidden_nodes, out_features, num_inputs)
).double()
num_checkpoints = 5
for i in range(num_checkpoints):
net.linear1.weight.data = torch.normal(
3, 4, (hidden_nodes, in_features)
).double()
net.linear2.weight.data = torch.normal(
5, 6, (out_features, hidden_nodes)
).double()
if unpack_inputs:
net.pre.weight.data = torch.normal(
3, 4, (in_features, in_features * num_inputs)
)
if hasattr(net, "pre"):
net.pre.weight.data = net.pre.weight.data.double()
checkpoint_name = "-".join(["checkpoint-reg", str(i + 1) + ".pt"])
net_adjusted = _wrap_model_in_dataparallel(net) if use_gpu else net
torch.save(net_adjusted.state_dict(), os.path.join(tmpdir, checkpoint_name))
num_samples = 50
num_train = 32
all_labels = torch.normal(1, 2, (num_samples, out_features)).double()
train_labels = all_labels[:num_train]
test_labels = all_labels[num_train:]
if unpack_inputs:
all_samples = [
torch.normal(0, 1, (num_samples, in_features)).double()
for _ in range(num_inputs)
]
train_samples = [ts[:num_train] for ts in all_samples]
test_samples = [ts[num_train:] for ts in all_samples]
else:
all_samples = torch.normal(0, 1, (num_samples, in_features)).double()
train_samples = all_samples[:num_train]
test_samples = all_samples[num_train:]
dataset = (
ExplicitDataset(train_samples, train_labels, use_gpu)
if not unpack_inputs
else UnpackDataset(train_samples, train_labels, use_gpu)
)
if return_test_data:
return (
_wrap_model_in_dataparallel(net) if use_gpu else net,
dataset,
_move_sample_to_cuda(test_samples)
if isinstance(test_samples, list) and use_gpu
else test_samples.cuda()
if use_gpu
else test_samples,
test_labels.cuda() if use_gpu else test_labels,
)
else:
return _wrap_model_in_dataparallel(net) if use_gpu else net, dataset
class DataInfluenceConstructor:
name: str = ""
data_influence_class: type
def __init__(
self,
data_influence_class: type,
name: Optional[str] = None,
duplicate_loss_fn: bool = False,
**kwargs,
) -> None:
"""
if `duplicate_loss_fn` is True, will explicitly pass the provided `loss_fn` as
the `test_loss_fn` when constructing the TracInCPBase instance
"""
self.data_influence_class = data_influence_class
self.name = name if name else data_influence_class.__name__
self.duplicate_loss_fn = duplicate_loss_fn
self.kwargs = kwargs
def __repr__(self) -> str:
return self.name
def __call__(
self,
net: Module,
dataset: Union[Dataset, DataLoader],
tmpdir: Union[str, List[str], Iterator],
batch_size: Union[int, None],
loss_fn: Optional[Union[Module, Callable]],
**kwargs,
) -> DataInfluence:
constructor_kwargs = self.kwargs.copy()
constructor_kwargs.update(kwargs)
# if `self.duplicate_loss_fn`, explicitly pass in `loss_fn` as `test_loss_fn`
# when constructing the instance. Doing so should not affect the behavior of
# the returned tracincp instance, since if `test_loss_fn` is not passed in,
# the constructor sets `test_loss_fn` to be the same as `loss_fn`
if self.duplicate_loss_fn:
constructor_kwargs["test_loss_fn"] = loss_fn
if self.data_influence_class is TracInCPFastRandProj:
self.check_annoy()
if self.data_influence_class in [TracInCPFast, TracInCPFastRandProj]:
return self.data_influence_class(
net,
list(net.children())[-1],
dataset,
tmpdir,
loss_fn=loss_fn,
batch_size=batch_size,
**constructor_kwargs,
)
else:
return self.data_influence_class(
net,
dataset,
tmpdir,
batch_size=batch_size,
loss_fn=loss_fn,
**constructor_kwargs,
)
def check_annoy(self) -> None:
try:
import annoy # noqa
except ImportError:
raise unittest.SkipTest(
(
f"Skipping tests for {self.data_influence_class.__name__}, "
"because it requires the Annoy module."
)
)
def generate_test_name(
testcase_func: Callable,
param_num: str,
param: param,
args_to_skip: Optional[List[str]] = None,
) -> str:
"""
Creates human readable names for parameterized tests
"""
if args_to_skip is None:
args_to_skip = []
param_strs = []
func_param_names = list(inspect.signature(testcase_func).parameters)
# skip the first 'self' parameter
if func_param_names[0] == "self":
func_param_names = func_param_names[1:]
for i, arg in enumerate(param.args):
if func_param_names[i] in args_to_skip:
continue
if isinstance(arg, bool):
if arg:
param_strs.append(func_param_names[i])
else:
args_str = str(arg)
if args_str.isnumeric():
param_strs.append(func_param_names[i])
param_strs.append(args_str)
return "%s_%s" % (
testcase_func.__name__,
parameterized.to_safe_name("_".join(param_strs)),
)
def build_test_name_func(args_to_skip: Optional[List[str]] = None):
"""
Returns function to generate human readable names for parameterized tests
"""
return partial(generate_test_name, args_to_skip=args_to_skip)
def _format_batch_into_tuple(
inputs: Union[Tuple, Tensor], targets: Tensor, unpack_inputs: bool
):
if unpack_inputs:
return (*inputs, targets)
else:
return (inputs, targets)
|
import tempfile
from typing import Callable
import torch.nn as nn
from captum.influence._core.tracincp import TracInCP
from captum.influence._core.tracincp_fast_rand_proj import (
TracInCPFast,
TracInCPFastRandProj,
)
from parameterized import parameterized
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.influence._utils.common import (
_format_batch_into_tuple,
build_test_name_func,
DataInfluenceConstructor,
get_random_model_and_data,
)
from torch.utils.data import DataLoader
class TestTracInDataLoader(BaseTest):
"""
This tests that the influence score computed when a Dataset is fed to the
`self.tracin_constructor` and when a DataLoader constructed using the same
Dataset is fed to `self.tracin_constructor` gives the same results.
"""
@parameterized.expand(
[
(
reduction,
constr,
unpack_inputs,
)
for unpack_inputs in [False, True]
for reduction, constr in [
("none", DataInfluenceConstructor(TracInCP)),
("sum", DataInfluenceConstructor(TracInCPFast)),
("sum", DataInfluenceConstructor(TracInCPFastRandProj)),
(
"sum",
DataInfluenceConstructor(
TracInCPFastRandProj,
name="TracInCPFastRandProj_1DProj",
projection_dim=1,
),
),
]
],
name_func=build_test_name_func(args_to_skip=["reduction"]),
)
def test_tracin_dataloader(
self, reduction: str, tracin_constructor: Callable, unpack_inputs: bool
) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
batch_size = 5
(
net,
train_dataset,
test_samples,
test_labels,
) = get_random_model_and_data(tmpdir, unpack_inputs, return_test_data=True)
self.assertTrue(isinstance(reduction, str))
criterion = nn.MSELoss(reduction=reduction)
self.assertTrue(callable(tracin_constructor))
tracin = tracin_constructor(
net,
train_dataset,
tmpdir,
batch_size,
criterion,
)
train_scores = tracin.influence(
_format_batch_into_tuple(test_samples, test_labels, unpack_inputs),
k=None,
)
tracin_dataloader = tracin_constructor(
net,
DataLoader(train_dataset, batch_size=batch_size, shuffle=False),
tmpdir,
None,
criterion,
)
train_scores_dataloader = tracin_dataloader.influence(
_format_batch_into_tuple(test_samples, test_labels, unpack_inputs),
k=None,
)
assertTensorAlmostEqual(
self,
train_scores,
train_scores_dataloader,
delta=0.0,
mode="max",
)
|
import tempfile
from typing import Callable
import torch
import torch.nn as nn
from captum.influence._core.tracincp import TracInCP
from parameterized import parameterized
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.influence._utils.common import (
_format_batch_into_tuple,
build_test_name_func,
DataInfluenceConstructor,
get_random_model_and_data,
)
class TestTracInGetKMostInfluential(BaseTest):
use_gpu_list = (
[True, False]
if torch.cuda.is_available() and torch.cuda.device_count() != 0
else [False]
)
param_list = []
for (batch_size, k) in [(4, 7), (7, 4), (40, 5), (5, 40), (40, 45)]:
for unpack_inputs in [True, False]:
for proponents in [True, False]:
for use_gpu in use_gpu_list:
for reduction, constr in [
(
"none",
DataInfluenceConstructor(
TracInCP, name="TracInCP_all_layers"
),
),
(
"none",
DataInfluenceConstructor(
TracInCP,
name="linear2",
layers=["module.linear2"] if use_gpu else ["linear2"],
),
),
]:
if not (
"sample_wise_grads_per_batch" in constr.kwargs
and constr.kwargs["sample_wise_grads_per_batch"]
and use_gpu
):
param_list.append(
(
reduction,
constr,
unpack_inputs,
proponents,
batch_size,
k,
use_gpu,
)
)
@parameterized.expand(
param_list,
name_func=build_test_name_func(),
)
def test_tracin_k_most_influential(
self,
reduction: str,
tracin_constructor: Callable,
unpack_inputs: bool,
proponents: bool,
batch_size: int,
k: int,
use_gpu: bool,
) -> None:
"""
This test constructs a random BasicLinearNet, and checks that the proponents
obtained by calling `influence` and sorting are equal to the proponents
obtained by calling `_k_most_influential`. Those calls are made through
the calls to wrapper method `influence`.
"""
with tempfile.TemporaryDirectory() as tmpdir:
(
net,
train_dataset,
test_samples,
test_labels,
) = get_random_model_and_data(
tmpdir,
unpack_inputs,
True,
use_gpu,
)
self.assertTrue(isinstance(reduction, str))
self.assertTrue(callable(tracin_constructor))
criterion = nn.MSELoss(reduction=reduction)
tracin = tracin_constructor(
net,
train_dataset,
tmpdir,
batch_size,
criterion,
)
train_scores = tracin.influence(
_format_batch_into_tuple(test_samples, test_labels, unpack_inputs),
k=None,
)
sort_idx = torch.argsort(train_scores, dim=1, descending=proponents)[:, 0:k]
idx, _train_scores = tracin.influence(
_format_batch_into_tuple(test_samples, test_labels, unpack_inputs),
k=k,
proponents=proponents,
)
for i in range(len(idx)):
# check that idx[i] is correct
assertTensorAlmostEqual(
self,
train_scores[i, idx[i]],
train_scores[i, sort_idx[i]],
delta=0.0,
mode="max",
)
# check that _train_scores[i] is correct
assertTensorAlmostEqual(
self,
_train_scores[i],
train_scores[i, sort_idx[i]],
delta=0.001,
mode="max",
)
|
import tempfile
from typing import Callable
import torch
import torch.nn as nn
from captum.influence._core.tracincp import TracInCP
from captum.influence._core.tracincp_fast_rand_proj import TracInCPFast
from parameterized import parameterized
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.influence._utils.common import (
_format_batch_into_tuple,
build_test_name_func,
DataInfluenceConstructor,
get_random_model_and_data,
)
from torch.utils.data import DataLoader
class TestTracInSelfInfluence(BaseTest):
use_gpu_list = (
[True, False]
if torch.cuda.is_available() and torch.cuda.device_count() != 0
else [False]
)
param_list = []
for unpack_inputs in [True, False]:
for use_gpu in use_gpu_list:
for (reduction, constructor) in [
(
"none",
DataInfluenceConstructor(TracInCP, name="TracInCP_all_layers"),
),
(
"none",
DataInfluenceConstructor(
TracInCP,
name="TracInCP_linear1",
layers=["module.linear1"] if use_gpu else ["linear1"],
),
),
(
"none",
DataInfluenceConstructor(
TracInCP,
name="TracInCP_linear1_linear2",
layers=["module.linear1", "module.linear2"]
if use_gpu
else ["linear1", "linear2"],
),
),
(
"sum",
DataInfluenceConstructor(
TracInCP,
name="TracInCP_sample_wise_grads_per_batch_all_layers",
sample_wise_grads_per_batch=True,
),
),
(
"sum",
DataInfluenceConstructor(
TracInCPFast, "TracInCPFast_last_fc_layer"
),
),
(
"mean",
DataInfluenceConstructor(
TracInCPFast, "TracInCPFast_last_fc_layer"
),
),
]:
if not (
"sample_wise_grads_per_batch" in constructor.kwargs
and constructor.kwargs["sample_wise_grads_per_batch"]
and use_gpu
):
param_list.append((reduction, constructor, unpack_inputs, use_gpu))
@parameterized.expand(
param_list,
name_func=build_test_name_func(),
)
def test_tracin_self_influence(
self,
reduction: str,
tracin_constructor: Callable,
unpack_inputs: bool,
use_gpu: bool,
) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
(net, train_dataset,) = get_random_model_and_data(
tmpdir,
unpack_inputs,
False,
use_gpu,
)
# compute tracin_scores of training data on training data
criterion = nn.MSELoss(reduction=reduction)
batch_size = 5
tracin = tracin_constructor(
net,
train_dataset,
tmpdir,
batch_size,
criterion,
)
train_scores = tracin.influence(
_format_batch_into_tuple(
train_dataset.samples, train_dataset.labels, unpack_inputs
),
k=None,
)
# calculate self_tracin_scores
self_tracin_scores = tracin.self_influence(
outer_loop_by_checkpoints=False,
)
# check that self_tracin scores equals the diagonal of influence scores
assertTensorAlmostEqual(
self,
torch.diagonal(train_scores),
self_tracin_scores,
delta=0.01,
mode="max",
)
# check that setting `outer_loop_by_checkpoints=False` and
# `outer_loop_by_checkpoints=True` gives the same self influence scores
self_tracin_scores_by_checkpoints = tracin.self_influence(
DataLoader(train_dataset, batch_size=batch_size),
outer_loop_by_checkpoints=True,
)
assertTensorAlmostEqual(
self,
self_tracin_scores_by_checkpoints,
self_tracin_scores,
delta=0.01,
mode="max",
)
@parameterized.expand(
[
(reduction, constructor, unpack_inputs)
for unpack_inputs in [True, False]
for (reduction, constructor) in [
("none", DataInfluenceConstructor(TracInCP)),
(
"sum",
DataInfluenceConstructor(
TracInCP,
sample_wise_grads_per_batch=True,
),
),
("sum", DataInfluenceConstructor(TracInCPFast)),
("mean", DataInfluenceConstructor(TracInCPFast)),
]
],
name_func=build_test_name_func(),
)
def test_tracin_self_influence_dataloader_vs_single_batch(
self, reduction: str, tracin_constructor: Callable, unpack_inputs: bool
) -> None:
# tests that the result of calling the public method `self_influence` for a
# DataLoader of batches is the same as when the batches are collated into a
# single batch
with tempfile.TemporaryDirectory() as tmpdir:
(
net,
train_dataset,
) = get_random_model_and_data(tmpdir, unpack_inputs, return_test_data=False)
# create a single batch representing the entire dataset
single_batch = next(
iter(DataLoader(train_dataset, batch_size=len(train_dataset)))
)
# create a dataloader that yields batches from the dataset
dataloader = DataLoader(train_dataset, batch_size=5)
# create tracin instance
criterion = nn.MSELoss(reduction=reduction)
batch_size = 5
tracin = tracin_constructor(
net,
train_dataset,
tmpdir,
batch_size,
criterion,
)
# compute self influence using `self_influence` when passing in a single
# batch
single_batch_self_influence = tracin.self_influence(single_batch)
# compute self influence using `self_influence` when passing in a
# dataloader with the same examples
dataloader_self_influence = tracin.self_influence(dataloader)
# the two self influences should be equal
assertTensorAlmostEqual(
self,
single_batch_self_influence,
dataloader_self_influence,
delta=0.01, # due to numerical issues, we can't set this to 0.0
mode="max",
)
|
import tempfile
from typing import Callable
import torch.nn as nn
from captum.influence._core.tracincp import TracInCP
from captum.influence._core.tracincp_fast_rand_proj import TracInCPFast
from parameterized import parameterized
from tests.helpers.basic import BaseTest
from tests.influence._utils.common import (
build_test_name_func,
DataInfluenceConstructor,
get_random_model_and_data,
)
class TestTracinValidator(BaseTest):
param_list = []
for reduction, constr in [
(
"none",
DataInfluenceConstructor(TracInCP, name="TracInCP"),
),
(
"mean",
DataInfluenceConstructor(
TracInCPFast,
name="TracInCpFast",
),
),
]:
param_list.append((reduction, constr))
@parameterized.expand(
param_list,
name_func=build_test_name_func(),
)
def test_tracin_require_inputs_dataset(
self,
reduction,
tracin_constructor: Callable,
) -> None:
"""
This test verifies that tracinCP and tracinCPFast
influence methods required `inputs_dataset`.
"""
with tempfile.TemporaryDirectory() as tmpdir:
(
net,
train_dataset,
test_samples,
test_labels,
) = get_random_model_and_data(tmpdir, unpack_inputs=False)
criterion = nn.MSELoss(reduction=reduction)
tracin = tracin_constructor(
net,
train_dataset,
tmpdir,
loss_fn=criterion,
batch_size=1,
)
with self.assertRaisesRegex(AssertionError, "required."):
tracin.influence(None, k=None)
|
import os
import tempfile
from collections import OrderedDict
from typing import Callable, cast, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from captum.influence._core.tracincp import TracInCP
from parameterized import parameterized
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.influence._utils.common import (
_wrap_model_in_dataparallel,
BasicLinearNet,
BinaryDataset,
build_test_name_func,
DataInfluenceConstructor,
)
class TestTracInXOR(BaseTest):
# TODO: Move test setup to use setUp and tearDown method overrides.
def _test_tracin_xor_setup(self, tmpdir: str, use_gpu: bool = False):
net = BasicLinearNet(2, 2, 1)
state = OrderedDict(
[
(
"linear1.weight",
torch.Tensor([[-1.2956, -1.4465], [-0.3890, -0.7420]]),
),
("linear1.bias", torch.Tensor([1.2924, 0.0021])),
("linear2.weight", torch.Tensor([[-1.2013, 0.7174]])),
("linear2.bias", torch.Tensor([0.5880])),
]
)
net.load_state_dict(state)
net_adjusted = _wrap_model_in_dataparallel(net) if use_gpu else net
checkpoint_name = "-".join(["checkpoint", "class", "0" + ".pt"])
torch.save(net_adjusted.state_dict(), os.path.join(tmpdir, checkpoint_name))
state = OrderedDict(
[
(
"linear1.weight",
torch.Tensor([[-1.3238, -1.4899], [-0.4544, -0.7448]]),
),
("linear1.bias", torch.Tensor([1.3185, -0.0317])),
("linear2.weight", torch.Tensor([[-1.2342, 0.7741]])),
("linear2.bias", torch.Tensor([0.6234])),
]
)
net.load_state_dict(state)
net_adjusted = _wrap_model_in_dataparallel(net) if use_gpu else net
checkpoint_name = "-".join(["checkpoint", "class", "1" + ".pt"])
torch.save(net_adjusted.state_dict(), os.path.join(tmpdir, checkpoint_name))
state = OrderedDict(
[
(
"linear1.weight",
torch.Tensor([[-1.3546, -1.5288], [-0.5250, -0.7591]]),
),
("linear1.bias", torch.Tensor([1.3432, -0.0684])),
("linear2.weight", torch.Tensor([[-1.2490, 0.8534]])),
("linear2.bias", torch.Tensor([0.6749])),
]
)
net.load_state_dict(state)
net_adjusted = _wrap_model_in_dataparallel(net) if use_gpu else net
checkpoint_name = "-".join(["checkpoint", "class", "2" + ".pt"])
torch.save(net_adjusted.state_dict(), os.path.join(tmpdir, checkpoint_name))
state = OrderedDict(
[
(
"linear1.weight",
torch.Tensor([[-1.4022, -1.5485], [-0.5688, -0.7607]]),
),
("linear1.bias", torch.Tensor([1.3740, -0.1571])),
("linear2.weight", torch.Tensor([[-1.3412, 0.9013]])),
("linear2.bias", torch.Tensor([0.6468])),
]
)
net.load_state_dict(state)
net_adjusted = _wrap_model_in_dataparallel(net) if use_gpu else net
checkpoint_name = "-".join(["checkpoint", "class", "3" + ".pt"])
torch.save(net_adjusted.state_dict(), os.path.join(tmpdir, checkpoint_name))
state = OrderedDict(
[
(
"linear1.weight",
torch.Tensor([[-1.4464, -1.5890], [-0.6348, -0.7665]]),
),
("linear1.bias", torch.Tensor([1.3791, -0.2008])),
("linear2.weight", torch.Tensor([[-1.3818, 0.9586]])),
("linear2.bias", torch.Tensor([0.6954])),
]
)
net.load_state_dict(state)
net_adjusted = _wrap_model_in_dataparallel(net) if use_gpu else net
checkpoint_name = "-".join(["checkpoint", "class", "4" + ".pt"])
torch.save(net_adjusted.state_dict(), os.path.join(tmpdir, checkpoint_name))
state = OrderedDict(
[
(
"linear1.weight",
torch.Tensor([[-1.5217, -1.6242], [-0.6644, -0.7842]]),
),
("linear1.bias", torch.Tensor([1.3500, -0.2418])),
("linear2.weight", torch.Tensor([[-1.4304, 0.9980]])),
("linear2.bias", torch.Tensor([0.7567])),
]
)
net.load_state_dict(state)
net_adjusted = _wrap_model_in_dataparallel(net) if use_gpu else net
checkpoint_name = "-".join(["checkpoint", "class", "5" + ".pt"])
torch.save(net_adjusted.state_dict(), os.path.join(tmpdir, checkpoint_name))
state = OrderedDict(
[
(
"linear1.weight",
torch.Tensor([[-1.5551, -1.6631], [-0.7420, -0.8025]]),
),
("linear1.bias", torch.Tensor([1.3508, -0.2618])),
("linear2.weight", torch.Tensor([[-1.4272, 1.0772]])),
("linear2.bias", torch.Tensor([0.8427])),
]
)
net.load_state_dict(state)
net_adjusted = _wrap_model_in_dataparallel(net) if use_gpu else net
checkpoint_name = "-".join(["checkpoint", "class", "6" + ".pt"])
torch.save(net_adjusted.state_dict(), os.path.join(tmpdir, checkpoint_name))
state = OrderedDict(
[
(
"linear1.weight",
torch.Tensor([[-1.5893, -1.6656], [-0.7863, -0.8369]]),
),
("linear1.bias", torch.Tensor([1.3949, -0.3215])),
("linear2.weight", torch.Tensor([[-1.4555, 1.1600]])),
("linear2.bias", torch.Tensor([0.8730])),
]
)
net.load_state_dict(state)
net_adjusted = _wrap_model_in_dataparallel(net) if use_gpu else net
checkpoint_name = "-".join(["checkpoint", "class", "7" + ".pt"])
torch.save(net_adjusted.state_dict(), os.path.join(tmpdir, checkpoint_name))
dataset = BinaryDataset(use_gpu)
return net_adjusted, dataset
parametrized_list = [
(
"none",
DataInfluenceConstructor(
TracInCP, name="TracInCP_linear1", layers=["linear1"]
),
"check_idx",
False,
),
(
"none",
DataInfluenceConstructor(TracInCP, name="TracInCP_all_layers"),
"check_idx",
False,
),
(
None,
DataInfluenceConstructor(TracInCP, name="TracInCP_all_layers"),
"sample_wise_trick",
False,
),
(
None,
DataInfluenceConstructor(
TracInCP, name="TracInCP_linear1_linear2", layers=["linear1", "linear2"]
),
"sample_wise_trick",
False,
),
]
if torch.cuda.is_available() and torch.cuda.device_count() != 0:
parametrized_list.extend(
[
(
"none",
DataInfluenceConstructor(TracInCP, name="TracInCP_all_layers"),
"check_idx",
True,
),
(
"none",
DataInfluenceConstructor(
TracInCP,
name="TracInCP_linear1_linear2",
layers=["module.linear1", "module.linear2"],
),
"check_idx",
True,
),
],
)
@parameterized.expand(
parametrized_list,
name_func=build_test_name_func(args_to_skip=["reduction"]),
)
def test_tracin_xor(
self,
reduction: Optional[str],
tracin_constructor: Callable,
mode: str,
use_gpu: bool,
) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
batch_size = 4
net, dataset = self._test_tracin_xor_setup(tmpdir, use_gpu)
testset = F.normalize(torch.empty(100, 2).normal_(mean=0, std=0.5), dim=1)
mask = ~torch.logical_xor(testset[:, 0] > 0, testset[:, 1] > 0)
testlabels = (
torch.where(mask, torch.tensor(1), torch.tensor(-1))
.unsqueeze(1)
.float()
)
if use_gpu:
testset = testset.cuda()
testlabels = testlabels.cuda()
self.assertTrue(callable(tracin_constructor))
if mode == "check_idx":
self.assertTrue(isinstance(reduction, str))
criterion = nn.MSELoss(reduction=cast(str, reduction))
tracin = tracin_constructor(
net,
dataset,
tmpdir,
batch_size,
criterion,
)
test_scores = tracin.influence((testset, testlabels))
idx = torch.argsort(test_scores, dim=1, descending=True)
# check that top 5 influences have matching binary classification
for i in range(len(idx)):
influence_labels = dataset.labels[idx[i][0:5], 0]
self.assertTrue(torch.all(testlabels[i, 0] == influence_labels))
if mode == "sample_wise_trick":
criterion = nn.MSELoss(reduction="none")
tracin = tracin_constructor(
net,
dataset,
tmpdir,
batch_size,
criterion,
sample_wise_grads_per_batch=False,
)
# With sample-wise trick
criterion = nn.MSELoss(reduction="sum")
tracin_sample_wise_trick = tracin_constructor(
net,
dataset,
tmpdir,
batch_size,
criterion,
sample_wise_grads_per_batch=True,
)
test_scores = tracin.influence((testset, testlabels))
test_scores_sample_wise_trick = tracin_sample_wise_trick.influence(
(testset, testlabels)
)
assertTensorAlmostEqual(
self, test_scores, test_scores_sample_wise_trick
)
|
import os
import tempfile
from typing import Callable, cast, Optional
import torch
import torch.nn as nn
from captum.influence._core.tracincp import TracInCP
from captum.influence._core.tracincp_fast_rand_proj import (
TracInCPFast,
TracInCPFastRandProj,
)
from parameterized import parameterized
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.influence._utils.common import (
_isSorted,
_wrap_model_in_dataparallel,
build_test_name_func,
CoefficientNet,
DataInfluenceConstructor,
IdentityDataset,
RangeDataset,
)
class TestTracInRegression(BaseTest):
def _test_tracin_regression_setup(
self, tmpdir: str, features: int, use_gpu: bool = False
):
low = 1
high = 17
dataset = RangeDataset(low, high, features, use_gpu)
net = CoefficientNet(in_features=features)
checkpoint_name = "-".join(["checkpoint-reg", "0" + ".pt"])
torch.save(net.state_dict(), os.path.join(tmpdir, checkpoint_name))
weights = [0.4379, 0.1653, 0.5132, 0.3651, 0.9992]
for i, weight in enumerate(weights):
net.fc1.weight.data.fill_(weight)
net_adjusted = _wrap_model_in_dataparallel(net) if use_gpu else net
checkpoint_name = "-".join(["checkpoint-reg", str(i + 1) + ".pt"])
torch.save(net_adjusted.state_dict(), os.path.join(tmpdir, checkpoint_name))
return dataset, net_adjusted
use_gpu_list = (
[True, False]
if torch.cuda.is_available() and torch.cuda.device_count() != 0
else [False]
)
param_list = []
for use_gpu in use_gpu_list:
for dim in [1, 20]:
for (mode, reduction, constructor) in [
(
"check_idx",
"none",
DataInfluenceConstructor(TracInCP, name="TracInCP_all_layers"),
),
(
"check_idx",
"none",
DataInfluenceConstructor(
TracInCP,
name="TracInCP_fc1",
layers=["module.fc1"] if use_gpu else ["fc1"],
),
),
(
"sample_wise_trick",
None,
DataInfluenceConstructor(TracInCP, name="TracInCP_fc1"),
),
(
"check_idx",
"sum",
DataInfluenceConstructor(
TracInCPFast, name="TracInCPFast_last_fc_layer"
),
),
(
"check_idx",
"sum",
DataInfluenceConstructor(
TracInCPFastRandProj, name="TracInCPFast_last_fc_layer"
),
),
(
"check_idx",
"mean",
DataInfluenceConstructor(
TracInCPFast, name="TracInCPFast_last_fc_layer"
),
),
(
"check_idx",
"mean",
DataInfluenceConstructor(
TracInCPFastRandProj, name="TracInCPFastRandProj_last_fc_layer"
),
),
(
"check_idx",
"sum",
DataInfluenceConstructor(
TracInCPFastRandProj,
name="TracInCPFastRandProj1DimensionalProjection_last_fc_layer",
projection_dim=1,
),
),
(
"check_idx",
"mean",
DataInfluenceConstructor(
TracInCPFast,
name="TracInCPFastDuplicateLossFn",
duplicate_loss_fn=True,
),
), # add a test where `duplicate_loss_fn` is True
(
"check_idx",
"mean",
DataInfluenceConstructor(
TracInCPFastRandProj,
name="TracInCPFastRandProjDuplicateLossFn",
duplicate_loss_fn=True,
), # add a test where `duplicate_loss_fn` is True
),
]:
if not (mode == "sample_wise_trick" and use_gpu):
param_list.append((reduction, constructor, mode, dim, use_gpu))
@parameterized.expand(
param_list,
name_func=build_test_name_func(args_to_skip=["reduction"]),
)
def test_tracin_regression(
self,
reduction: Optional[str],
tracin_constructor: Callable,
mode: str,
features: int,
use_gpu: bool,
) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
batch_size = 4
dataset, net = self._test_tracin_regression_setup(
tmpdir,
features,
use_gpu,
) # and not mode == 'sample_wise_trick'
# check influence scores of training data
train_inputs = dataset.samples
train_labels = dataset.labels
test_inputs = (
torch.arange(17, 33, dtype=torch.float).unsqueeze(1).repeat(1, features)
)
if use_gpu:
test_inputs = test_inputs.cuda()
test_labels = test_inputs
self.assertTrue(callable(tracin_constructor))
if mode == "check_idx":
self.assertTrue(isinstance(reduction, str))
criterion = nn.MSELoss(reduction=cast(str, reduction))
tracin = tracin_constructor(
net,
dataset,
tmpdir,
batch_size,
criterion,
)
train_scores = tracin.influence((train_inputs, train_labels))
idx, _ = tracin.influence(
(train_inputs, train_labels), k=len(dataset), proponents=True
)
# check that top influence is one with maximal value
# (and hence gradient)
for i in range(len(idx)):
self.assertEqual(idx[i][0], 15)
# check influence scores of test data
test_scores = tracin.influence((test_inputs, test_labels))
idx, _ = tracin.influence(
(test_inputs, test_labels), k=len(test_inputs), proponents=True
)
# check that top influence is one with maximal value
# (and hence gradient)
for i in range(len(idx)):
self.assertTrue(_isSorted(idx[i]))
if mode == "sample_wise_trick":
criterion = nn.MSELoss(reduction="none")
tracin = tracin_constructor(
net,
dataset,
tmpdir,
batch_size,
criterion,
sample_wise_grads_per_batch=False,
)
# With sample-wise trick
criterion = nn.MSELoss(reduction="sum")
tracin_sample_wise_trick = tracin_constructor(
net,
dataset,
tmpdir,
batch_size,
criterion,
sample_wise_grads_per_batch=True,
)
train_scores = tracin.influence((train_inputs, train_labels))
train_scores_sample_wise_trick = tracin_sample_wise_trick.influence(
(train_inputs, train_labels)
)
assertTensorAlmostEqual(
self, train_scores, train_scores_sample_wise_trick
)
test_scores = tracin.influence((test_inputs, test_labels))
test_scores_sample_wise_trick = tracin_sample_wise_trick.influence(
(test_inputs, test_labels)
)
assertTensorAlmostEqual(
self, test_scores, test_scores_sample_wise_trick
)
@parameterized.expand(
[
(
"sum",
DataInfluenceConstructor(TracInCP, sample_wise_grads_per_batch=True),
),
("sum", DataInfluenceConstructor(TracInCPFast)),
("sum", DataInfluenceConstructor(TracInCPFastRandProj)),
("mean", DataInfluenceConstructor(TracInCPFast)),
("mean", DataInfluenceConstructor(TracInCPFastRandProj)),
],
name_func=build_test_name_func(),
)
def test_tracin_regression_1D_numerical(
self, reduction: str, tracin_constructor: Callable
) -> None:
low = 1
high = 17
features = 1
dataset = RangeDataset(low, high, features)
net = CoefficientNet()
self.assertTrue(isinstance(reduction, str))
criterion = nn.MSELoss(reduction=cast(str, reduction))
batch_size = 4
weights = [0.4379, 0.1653, 0.5132, 0.3651, 0.9992]
train_inputs = dataset.samples
train_labels = dataset.labels
with tempfile.TemporaryDirectory() as tmpdir:
for i, weight in enumerate(weights):
net.fc1.weight.data.fill_(weight)
checkpoint_name = "-".join(["checkpoint-reg", str(i + 1) + ".pt"])
torch.save(net.state_dict(), os.path.join(tmpdir, checkpoint_name))
self.assertTrue(callable(tracin_constructor))
tracin = tracin_constructor(
net,
dataset,
tmpdir,
batch_size,
criterion,
)
train_scores = tracin.influence((train_inputs, train_labels), k=None)
r"""
Derivation for gradient / resulting TracIn score:
For each checkpoint: $y = Wx,$ and $loss = (y - label)^2.$ Recall for this
test case, there is no activation on y. For this example, $label = x.$
Fast Rand Proj gives $\nabla_W loss = \nabla_y loss (x^T).$ We have $x$ and
y as scalars so we can simply multiply. So then,
\[\nabla_y loss * x = 2(y-x)*x = 2(Wx -x)*x = 2x^2 (w - 1).\]
And we simply multiply these for x, x'. In this case, $x, x' \in [1..16]$.
"""
for i in range(train_scores.shape[0]):
for j in range(len(train_scores[0])):
_weights = torch.Tensor(weights)
num = 2 * (i + 1) * (i + 1) * (_weights - 1)
num *= 2 * (j + 1) * (j + 1) * (_weights - 1)
assertTensorAlmostEqual(
self, torch.sum(num), train_scores[i][j], delta=0.1
)
def _test_tracin_identity_regression_setup(self, tmpdir: str):
num_features = 7
dataset = IdentityDataset(num_features)
net = CoefficientNet()
num_checkpoints = 5
for i in range(num_checkpoints):
net.fc1.weight.data = torch.rand((1, num_features))
checkpoint_name = "-".join(["checkpoint-reg", str(i) + ".pt"])
torch.save(net.state_dict(), os.path.join(tmpdir, checkpoint_name))
return dataset, net
@parameterized.expand(
[
("check_idx", "none", DataInfluenceConstructor(TracInCP)),
("check_idx", "none", DataInfluenceConstructor(TracInCP, layers=["fc1"])),
("sample_wise_trick", None, DataInfluenceConstructor(TracInCP)),
(
"sample_wise_trick",
None,
DataInfluenceConstructor(TracInCP, layers=["fc1"]),
),
("check_idx", "sum", DataInfluenceConstructor(TracInCPFast)),
("check_idx", "sum", DataInfluenceConstructor(TracInCPFastRandProj)),
("check_idx", "mean", DataInfluenceConstructor(TracInCPFast)),
("check_idx", "mean", DataInfluenceConstructor(TracInCPFastRandProj)),
],
name_func=build_test_name_func(),
)
def test_tracin_identity_regression(
self, mode: str, reduction: Optional[str], tracin_constructor: Callable
) -> None:
"""
This test uses a linear model with positive coefficients, where input feature
matrix is the identity matrix. Since the dot product between 2 different
training instances is always 0, when calculating influence scores on the
training data, only self influence scores will be nonzero. Since the linear
model has positive coefficients, self influence scores will be positive.
Thus, the training instance with the largest influence on another training
instance is itself.
"""
with tempfile.TemporaryDirectory() as tmpdir:
batch_size = 4
dataset, net = self._test_tracin_identity_regression_setup(tmpdir)
train_inputs = dataset.samples
train_labels = dataset.labels
self.assertTrue(callable(tracin_constructor))
if mode == "check_idx":
self.assertTrue(isinstance(reduction, str))
criterion = nn.MSELoss(reduction=cast(str, reduction))
tracin = tracin_constructor(
net,
dataset,
tmpdir,
batch_size,
criterion,
)
# check influence scores of training data
train_scores = tracin.influence((train_inputs, train_labels))
idx, _ = tracin.influence(
(train_inputs, train_labels), k=len(dataset), proponents=True
)
# check that top influence for an instance is itself
for i in range(len(idx)):
self.assertEqual(idx[i][0], i)
if mode == "sample_wise_trick":
criterion = nn.MSELoss(reduction="none")
tracin = tracin_constructor(
net,
dataset,
tmpdir,
batch_size,
criterion,
sample_wise_grads_per_batch=False,
)
# With sample-wise trick
criterion = nn.MSELoss(reduction="sum")
tracin_sample_wise_trick = tracin_constructor(
net,
dataset,
tmpdir,
batch_size,
criterion,
sample_wise_grads_per_batch=True,
)
train_scores = tracin.influence((train_inputs, train_labels))
train_scores_tracin_sample_wise_trick = (
tracin_sample_wise_trick.influence((train_inputs, train_labels))
)
assertTensorAlmostEqual(
self, train_scores, train_scores_tracin_sample_wise_trick
)
@parameterized.expand(
[
("none", "none", DataInfluenceConstructor(TracInCP)),
(
"mean",
"mean",
DataInfluenceConstructor(TracInCP, sample_wise_grads_per_batch=True),
),
("sum", "sum", DataInfluenceConstructor(TracInCPFast)),
("mean", "mean", DataInfluenceConstructor(TracInCPFast)),
("sum", "sum", DataInfluenceConstructor(TracInCPFastRandProj)),
("mean", "mean", DataInfluenceConstructor(TracInCPFastRandProj)),
],
name_func=build_test_name_func(),
)
def test_tracin_constant_test_loss_fn(
self,
reduction: Optional[str],
test_reduction: Optional[str],
tracin_constructor: Callable,
) -> None:
"""
All implementations of `TracInCPBase` can accept `test_loss_fn` in
initialization, which sets the loss function applied to test examples, which
can thus be different from the loss function applied to training examples.
This test passes `test_loss_fn` to be a constant function. Then, the influence
scores should all be 0, because gradients w.r.t. `test_loss_fn` will all be 0.
It re-uses the dataset and model from `test_tracin_identity_regression`.
The reduction for `loss_fn` and `test_loss_fn` initialization arguments is
the same for all parameterized tests, for simplicity, and also because for
`TracInCP`, both loss functions must both be reduction loss functions (i.e.
reduction is "mean" or "sum"), or both be per-example loss functions (i.e.
reduction is "none"). Recall that for `TracInCP`, the
`sample_wise_grads_per_batch` initialization argument determines which of
those cases holds.
"""
with tempfile.TemporaryDirectory() as tmpdir:
batch_size = 4
dataset, net = self._test_tracin_identity_regression_setup(tmpdir)
train_inputs = dataset.samples
train_labels = dataset.labels
self.assertTrue(callable(tracin_constructor))
self.assertTrue(isinstance(reduction, str))
criterion = nn.MSELoss(reduction=cast(str, reduction))
# the output of `net`, i.e. `input` for the loss functions below, is a
# batch_size x 1 2D tensor
if test_reduction == "none":
# loss function returns 1D tensor of all 0's, so is constant
def test_loss_fn(input, target):
return input.squeeze() * 0.0
elif test_reduction in ["sum", "mean"]:
# loss function returns scalar tensor of all 0's, so is constant
def test_loss_fn(input, target):
return input.mean() * 0.0
tracin = tracin_constructor(
net,
dataset,
tmpdir,
batch_size,
criterion,
test_loss_fn=test_loss_fn,
)
# check influence scores of training data. they should all be 0
train_scores = tracin.influence((train_inputs, train_labels), k=None)
assertTensorAlmostEqual(self, train_scores, torch.zeros(train_scores.shape))
|
import io
import tempfile
import unittest
import unittest.mock
from typing import Callable
import torch.nn as nn
from captum.influence._core.tracincp import TracInCP
from captum.influence._core.tracincp_fast_rand_proj import TracInCPFast
from parameterized import parameterized
from tests.helpers.basic import BaseTest
from tests.influence._utils.common import (
build_test_name_func,
DataInfluenceConstructor,
get_random_model_and_data,
)
from torch.utils.data import DataLoader
class TestTracInShowProgress(BaseTest):
"""
This tests that the progress bar correctly shows a "100%" message at some point in
the relevant computations. Progress bars are shown for calls to the `influence`
method for all 3 modes. This is why 3 different modes are tested, and the mode
being tested is a parameter in the test. `TracInCPFastRandProj.influence` is not
tested, because none of its modes involve computations over the entire training
dataset, so that no progress bar is shown (the computation is instead done in
`TracInCPFastRandProj.__init__`. TODO: add progress bar for computations done
in `TracInCPFastRandProj.__init__`).
"""
def _check_error_msg_multiplicity(
self,
mock_stderr: io.StringIO,
msg: str,
msg_multiplicity: int,
greater_than: bool = True,
):
"""
Checks that in `mock_stderr`, the error msg `msg` occurs `msg_multiplicity`
times. If 'greater_than' is true, it checks that the `msg` occurs at least
`msg_multiplicity` times. Otherwise, it checks that `msg` occurs exactly
`msg_multiplicity` times. The reason to let `greater_than` as true by default
is that tqdm sometimes displays the "100%" more than once for each progress bar
because it may want to correct its estimation of it/s. In this case, the
tqdm could remove the original "100%" and then re-display "100%" with the
updated estimate of it/s.
"""
output = mock_stderr.getvalue()
actual_msg_multiplicity = output.count(msg)
assert isinstance(actual_msg_multiplicity, int)
error_msg = (
f"Error in progress of batches with output looking for '{msg}'"
f" at least {msg_multiplicity} times"
f"(found {actual_msg_multiplicity}) in {repr(output)}"
)
if greater_than:
self.assertGreaterEqual(
actual_msg_multiplicity, msg_multiplicity, error_msg
)
else:
self.assertEqual(
actual_msg_multiplicity,
msg_multiplicity,
error_msg,
)
@parameterized.expand(
[
(
reduction,
constr,
mode,
)
for reduction, constr in [
(
"none",
DataInfluenceConstructor(TracInCP),
),
(
"sum",
DataInfluenceConstructor(TracInCPFast),
),
]
for mode in [
"self influence by checkpoints",
"self influence by batches",
"influence",
"k-most",
]
],
name_func=build_test_name_func(args_to_skip=["reduction"]),
)
def test_tracin_show_progress(
self,
reduction: str,
tracin_constructor: Callable,
mode: str,
) -> None:
with unittest.mock.patch("sys.stderr", new_callable=io.StringIO) as mock_stderr:
with tempfile.TemporaryDirectory() as tmpdir:
batch_size = 5
(
net,
train_dataset,
test_samples,
test_labels,
) = get_random_model_and_data(
tmpdir, unpack_inputs=False, return_test_data=True
)
self.assertTrue(isinstance(reduction, str))
criterion = nn.MSELoss(reduction=reduction)
self.assertTrue(callable(tracin_constructor))
tracin = tracin_constructor(
net,
train_dataset,
tmpdir,
batch_size,
criterion,
)
if mode == "self influence by checkpoints":
# this tests progress for computing self influence scores, when
# `outer_loop_by_checkpoints` is True. In this case, we should see a
# single outer progress bar over checkpoints, and for every
# checkpoints, a separate progress bar over batches
tracin.self_influence(
DataLoader(train_dataset, batch_size=batch_size),
show_progress=True,
outer_loop_by_checkpoints=True,
)
# We are showing nested progress bars for the `self_influence`
# method, with the outer progress bar over checkpoints, and
# the inner progress bar over batches. First, we check that
# the outer progress bar reaches 100% once
self._check_error_msg_multiplicity(
mock_stderr,
(
f"Using {tracin.get_name()} to compute self influence. "
"Processing checkpoint: 100%"
),
1,
)
# Second, we check that the inner progress bar reaches 100%
# once for each checkpoint in `tracin.checkpoints`
self._check_error_msg_multiplicity(
mock_stderr,
(
f"Using {tracin.get_name()} to compute self influence. "
"Processing batch: 100%"
),
len(tracin.checkpoints),
)
elif mode == "self influence by batches":
# This tests progress for computing self influence scores, when
# `outer_loop_by_checkpoints` is False. In this case, we should see
# a single outer progress bar over batches.
tracin.self_influence(
DataLoader(train_dataset, batch_size=batch_size),
show_progress=True,
outer_loop_by_checkpoints=False,
)
self._check_error_msg_multiplicity(
mock_stderr,
(
f"Using {tracin.get_name()} to compute self influence. "
"Processing batch: 100%"
),
1,
)
elif mode == "influence":
tracin.influence(
(test_samples, test_labels),
k=None,
show_progress=True,
)
# Since the computation iterates once over training batches, we
# check that the progress bar over batches reaches 100% once
self._check_error_msg_multiplicity(
mock_stderr,
(
f"Using {tracin.get_name()} to compute influence "
"for training batches: 100%"
),
1,
)
elif mode == "k-most":
tracin.influence(
(test_samples, test_labels),
k=2,
proponents=True,
show_progress=True,
)
# Since the computation iterates once over training batches, we
# check that the progress bar over batches reaches 100% once, and
# that the message is specific for finding proponents.
self._check_error_msg_multiplicity(
mock_stderr,
(
f"Using {tracin.get_name()} to perform computation for "
"getting proponents. Processing training batches: 100%"
),
1,
)
mock_stderr.seek(0)
mock_stderr.truncate(0)
tracin.influence(
(test_samples, test_labels),
k=2,
proponents=False,
show_progress=True,
)
# Since the computation iterates once over training batches, we
# check that the progress bar over batches reaches 100% once, and
# that the message is specific for finding opponents.
self._check_error_msg_multiplicity(
mock_stderr,
(
f"Using {tracin.get_name()} to perform computation for "
"getting opponents. Processing training batches: 100%"
),
1,
)
else:
raise Exception("unknown test mode")
mock_stderr.seek(0)
mock_stderr.truncate(0)
|
import tempfile
from typing import Callable
import torch
import torch.nn as nn
from captum.influence._core.tracincp import TracInCP
from captum.influence._core.tracincp_fast_rand_proj import (
TracInCPFast,
TracInCPFastRandProj,
)
from parameterized import parameterized
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.influence._utils.common import (
_format_batch_into_tuple,
build_test_name_func,
DataInfluenceConstructor,
get_random_model_and_data,
)
from torch.utils.data import DataLoader
class TestTracInIntermediateQuantities(BaseTest):
@parameterized.expand(
[
(reduction, constructor, unpack_inputs)
for unpack_inputs in [True, False]
for (reduction, constructor) in [
("none", DataInfluenceConstructor(TracInCP)),
]
],
name_func=build_test_name_func(),
)
def test_tracin_intermediate_quantities_aggregate(
self, reduction: str, tracin_constructor: Callable, unpack_inputs: bool
) -> None:
"""
tests that calling `compute_intermediate_quantities` with `aggregate=True`
does give the same result as calling it with `aggregate=False`, and then
summing
"""
with tempfile.TemporaryDirectory() as tmpdir:
(net, train_dataset,) = get_random_model_and_data(
tmpdir,
unpack_inputs,
return_test_data=False,
)
# create a dataloader that yields batches from the dataset
train_dataset = DataLoader(train_dataset, batch_size=5)
# create tracin instance
criterion = nn.MSELoss(reduction=reduction)
batch_size = 5
tracin = tracin_constructor(
net,
train_dataset,
tmpdir,
batch_size,
criterion,
)
intermediate_quantities = tracin.compute_intermediate_quantities(
train_dataset, aggregate=False
)
aggregated_intermediate_quantities = tracin.compute_intermediate_quantities(
train_dataset, aggregate=True
)
assertTensorAlmostEqual(
self,
torch.sum(intermediate_quantities, dim=0, keepdim=True),
aggregated_intermediate_quantities,
delta=1e-4, # due to numerical issues, we can't set this to 0.0
mode="max",
)
@parameterized.expand(
[
(reduction, constructor, unpack_inputs)
for unpack_inputs in [True, False]
for (reduction, constructor) in [
("sum", DataInfluenceConstructor(TracInCPFastRandProj)),
("none", DataInfluenceConstructor(TracInCP)),
]
],
name_func=build_test_name_func(),
)
def test_tracin_intermediate_quantities_api(
self, reduction: str, tracin_constructor: Callable, unpack_inputs: bool
) -> None:
"""
tests that the result of calling the public method
`compute_intermediate_quantities` for a DataLoader of batches is the same as
when the batches are collated into a single batch
"""
with tempfile.TemporaryDirectory() as tmpdir:
(net, train_dataset,) = get_random_model_and_data(
tmpdir,
unpack_inputs,
return_test_data=False,
)
# create a single batch representing the entire dataset
single_batch = next(
iter(DataLoader(train_dataset, batch_size=len(train_dataset)))
)
# create a dataloader that yields batches from the dataset
dataloader = DataLoader(train_dataset, batch_size=5)
# create tracin instance
criterion = nn.MSELoss(reduction=reduction)
batch_size = 5
tracin = tracin_constructor(
net,
train_dataset,
tmpdir,
batch_size,
criterion,
)
# compute intermediate quantities using `compute_intermediate_quantities`
# when passing in a single batch
single_batch_intermediate_quantities = (
tracin.compute_intermediate_quantities(single_batch)
)
# compute intermediate quantities using `compute_intermediate_quantities`
# when passing in a dataloader with the same examples
dataloader_intermediate_quantities = tracin.compute_intermediate_quantities(
dataloader,
)
# the two self influences should be equal
assertTensorAlmostEqual(
self,
single_batch_intermediate_quantities,
dataloader_intermediate_quantities,
delta=0.01, # due to numerical issues, we can't set this to 0.0
mode="max",
)
@parameterized.expand(
[
(
reduction,
constructor,
intermediate_quantities_tracin_constructor,
unpack_inputs,
)
for unpack_inputs in [True, False]
for (
reduction,
constructor,
intermediate_quantities_tracin_constructor,
) in [
(
"sum",
DataInfluenceConstructor(TracInCPFast),
DataInfluenceConstructor(TracInCPFastRandProj),
),
(
"none",
DataInfluenceConstructor(TracInCP),
DataInfluenceConstructor(TracInCP),
),
]
],
name_func=build_test_name_func(),
)
def test_tracin_intermediate_quantities_consistent(
self,
reduction: str,
tracin_constructor: Callable,
intermediate_quantities_tracin_constructor: Callable,
unpack_inputs: bool,
) -> None:
"""
Since the influence score of a test batch on a training data should be the dot
product of their intermediate quantities, checks that this is the case, by
computing the influence score 2 different ways and checking they give the same
results: 1) with the `influence` method, and by using the
`compute_intermediate_quantities` method on the test and training data, and
taking the dot product. No projection should be done. Otherwise, the
projection will cause error. For 1), we use an implementation that does not use
intermediate quantities, i.e. `TracInCPFast`. For 2), we use a method that
does use intermediate quantities, i.e. `TracInCPFastRandProj`. Since the
methods for the 2 cases are different, we need to parametrize the test with 2
different tracin constructors. `tracin_constructor` is the constructor for the
tracin implementation for case 1. `intermediate_quantities_tracin_constructor`
is the constructor for the tracin implementation for case 2.
"""
with tempfile.TemporaryDirectory() as tmpdir:
(
net,
train_dataset,
test_features,
test_labels,
) = get_random_model_and_data(tmpdir, unpack_inputs, return_test_data=True)
# create a dataloader that yields batches from the dataset
train_dataset = DataLoader(train_dataset, batch_size=5)
# create tracin instance
criterion = nn.MSELoss(reduction=reduction)
batch_size = 5
tracin = tracin_constructor(
net,
train_dataset,
tmpdir,
batch_size,
criterion,
)
# create tracin instance which exposes `intermediate_quantities`
intermediate_quantities_tracin = intermediate_quantities_tracin_constructor(
net,
train_dataset,
tmpdir,
batch_size,
criterion,
)
# compute influence scores without using `compute_intermediate_quantities`
test_batch = _format_batch_into_tuple(
test_features, test_labels, unpack_inputs
)
scores = tracin.influence(
test_batch,
)
# the influence score is the dot product of intermediate quantities
intermediate_quantities_scores = torch.matmul(
intermediate_quantities_tracin.compute_intermediate_quantities(
test_batch
),
intermediate_quantities_tracin.compute_intermediate_quantities(
train_dataset
).T,
)
# the scores computed using the two methods should be the same
assertTensorAlmostEqual(
self,
scores,
intermediate_quantities_scores,
delta=0.01, # due to numerical issues, we can't set this to 0.0
mode="max",
)
@parameterized.expand(
[
(reduction, constructor, projection_dim, unpack_inputs)
for unpack_inputs in [False]
for (reduction, constructor, projection_dim) in [
("sum", DataInfluenceConstructor(TracInCPFastRandProj), None),
("sum", DataInfluenceConstructor(TracInCPFastRandProj), 2),
("sum", DataInfluenceConstructor(TracInCPFastRandProj), 4),
("sum", DataInfluenceConstructor(TracInCPFastRandProj), 9),
("sum", DataInfluenceConstructor(TracInCPFastRandProj), 10),
("sum", DataInfluenceConstructor(TracInCPFastRandProj), 12),
]
],
name_func=build_test_name_func(),
)
def test_tracin_intermediate_quantities_projection_consistency(
self,
reduction: str,
tracin_constructor: Callable,
projection_dim: int,
unpack_inputs: bool,
) -> None:
"""
tests that the result of calling the public method
"compute_intermediate_quantities" with TracInCPFastRandProj
with/without projection_dim gives embedding of correct size.
if projection_dim None, size should be dim of
input to final layer * num classes * num checkpoints.
otherwise it should be "at most" projection_dim * num checkpoints.
See inline comments for "at most" caveat
"""
with tempfile.TemporaryDirectory() as tmpdir:
(net, train_dataset,) = get_random_model_and_data(
tmpdir,
unpack_inputs,
return_test_data=False,
)
# create a single batch
batch_size = 1
single_batch = next(iter(DataLoader(train_dataset, batch_size=batch_size)))
# NOW add projection_dim as a parameter passed in
kwargs = {"projection_dim": projection_dim}
# create tracin instance
criterion = nn.MSELoss(reduction=reduction)
tracin = tracin_constructor(
net, train_dataset, tmpdir, batch_size, criterion, **kwargs
)
# compute intermediate quantities using `compute_intermediate_quantities`
# when passing in a single batch
single_batch_intermediate_quantities = (
tracin.compute_intermediate_quantities(single_batch)
)
"""
net has
in_features = 5,
hidden_nodes (layer_input_dim) = 4,
out_features (jacobian_dim) = 3
and 5 checkpoints
projection only happens
(A) if project_dim < layer_input_dim * jacobian_dim ( 4 * 3 = 12 here )
also if jacobian_dim < int(sqrt(projection dim)),
then jacobian_dim is not projected down
similarly if layer_input_dim < int(sqrt(projection dim)),
then it is not projected down
in other words,
jacobian_dim_post = min(jacobian_dim, int(sqrt(projection dim)))
layer_input_dim_post = min(layer_input_dim, int(sqrt(projection dim)))
and if not None and projection_dim < layer_input_dim * jacobian_dim
(B) final_projection_dim =
jacobian_dim_post * layer_input_dim_post * num_checkpoints
if project dim = None we expect final dimension size of
layer_input * jacobian_dim * num checkpoints = 4 * 3 * 5 = 60 dimension
otherwise using (B) if
project dim = 2 we expect 1 * 1 * 5 = 5
project dim = 4 we expect 2 * 2 * 5 = 20
project dim = 9 we expect 3 * 3 * 5 = 45
project dim = 10 we expect 3 * 3 * 5 = 45
project dim = 12 we expect 4 * 3 * 5 = 60 ( don't project since not (A))
"""
# print(single_batch_intermediate_quantities.shape)
expected_dim = {None: 60, 2: 5, 4: 20, 9: 45, 10: 45, 12: 60}
self.assertEqual(
expected_dim[projection_dim],
single_batch_intermediate_quantities.shape[1],
)
|
import tempfile
from typing import List
import torch
import torch.nn as nn
from captum.influence._core.similarity_influence import (
cosine_similarity,
euclidean_distance,
SimilarityInfluence,
)
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from torch.utils.data import Dataset
class BasicLinearNet(nn.Module):
def __init__(self, num_features) -> None:
super().__init__()
self.fc1 = nn.Linear(num_features, 5, bias=False)
self.fc1.weight.data.fill_(0.02)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(5, 1, bias=False)
self.fc2.weight.data.fill_(0.02)
def forward(self, x):
x = self.fc1(x)
x = self.relu1(x)
x = self.fc2(x)
return x
class RangeDataset(Dataset):
def __init__(self, low, high, num_features) -> None:
self.samples = (
torch.arange(start=low, end=high, dtype=torch.float)
.repeat(num_features, 1)
.transpose(1, 0)
)
def __len__(self) -> int:
return len(self.samples)
def __getitem__(self, idx):
return self.samples[idx]
class Test(BaseTest):
def test_cosine_with_zeros(self) -> None:
a = torch.cat((torch.zeros((1, 3, 16, 16)), torch.rand((1, 3, 16, 16))))
b = torch.rand((2, 3, 16, 16))
similarity = cosine_similarity(a, b)
self.assertFalse(torch.any(torch.isnan(similarity)))
def test_correct_influences_standard(self) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
num_features = 4
low, high = 0, 16
batch_size = high // 2
mymodel = BasicLinearNet(num_features)
mydata = RangeDataset(low, high, num_features)
layers = []
for name, _module in mymodel.named_modules():
layers.append(name)
layers: List[str] = list(filter(None, layers))
testlayers = layers[1:]
sim = SimilarityInfluence(
mymodel,
testlayers,
mydata,
tmpdir,
"linear",
batch_size=batch_size,
similarity_metric=euclidean_distance,
similarity_direction="min",
)
inputs = torch.stack((mydata[1], mydata[8], mydata[14]))
influences = sim.influence(inputs, top_k=3)
self.assertEqual(len(influences), len(testlayers))
assertTensorAlmostEqual(
self,
torch.sum(influences[layers[1]][0], 1),
torch.sum(torch.Tensor([[1, 0, 2], [8, 7, 9], [14, 15, 13]]), 1),
)
assertTensorAlmostEqual(
self,
torch.sum(influences[layers[2]][0], 1),
torch.sum(torch.Tensor([[1, 0, 2], [8, 7, 9], [14, 15, 13]]), 1),
)
def test_correct_influences_batch_single(self) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
num_features = 4
low, high = 0, 16
batch_size = 1
mymodel = BasicLinearNet(num_features)
mydata = RangeDataset(low, high, num_features)
layers = []
for name, _module in mymodel.named_modules():
layers.append(name)
layers: List[str] = list(filter(None, layers))
testlayers = layers[1:]
sim = SimilarityInfluence(
mymodel,
testlayers,
mydata,
tmpdir,
"linear",
batch_size=batch_size,
similarity_metric=euclidean_distance,
similarity_direction="min",
)
inputs = torch.stack((mydata[1], mydata[8], mydata[14]))
influences = sim.influence(inputs, top_k=3)
self.assertEqual(len(influences), len(testlayers))
assertTensorAlmostEqual(
self,
torch.sum(influences[layers[1]][0], 1),
torch.sum(torch.Tensor([[1, 0, 2], [8, 7, 9], [14, 15, 13]]), 1),
)
assertTensorAlmostEqual(
self,
torch.sum(influences[layers[2]][0], 1),
torch.sum(torch.Tensor([[1, 0, 2], [8, 7, 9], [14, 15, 13]]), 1),
)
def test_correct_influences_batch_overflow(self) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
num_features = 4
low, high = 0, 16
batch_size = 12
mymodel = BasicLinearNet(num_features)
mydata = RangeDataset(low, high, num_features)
layers = []
for name, _module in mymodel.named_modules():
layers.append(name)
layers: List[str] = list(filter(None, layers))
testlayers = layers[1:]
sim = SimilarityInfluence(
mymodel,
testlayers,
mydata,
tmpdir,
"linear",
batch_size=batch_size,
similarity_metric=euclidean_distance,
similarity_direction="min",
)
inputs = torch.stack((mydata[1], mydata[8], mydata[14]))
influences = sim.influence(inputs, top_k=3)
self.assertEqual(len(influences), len(testlayers))
assertTensorAlmostEqual(
self,
torch.sum(influences[layers[1]][0], 1),
torch.sum(torch.Tensor([[1, 0, 2], [8, 7, 9], [14, 15, 13]]), 1),
)
assertTensorAlmostEqual(
self,
torch.sum(influences[layers[2]][0], 1),
torch.sum(torch.Tensor([[1, 0, 2], [8, 7, 9], [14, 15, 13]]), 1),
)
def test_zero_activations(self) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
num_features = 4
low, high = 0, 16
batch_size = high // 2
mymodel = BasicLinearNet(num_features)
mydata = RangeDataset(low, high, num_features)
layers = []
for name, _module in mymodel.named_modules():
layers.append(name)
layers: List[str] = list(filter(None, layers))
testlayers = layers[1:]
sim1 = SimilarityInfluence(
mymodel, testlayers, mydata, tmpdir, "linear", batch_size=batch_size
)
inputs = torch.stack((mydata[1], mydata[8], mydata[14]))
influences = sim1.influence(inputs, top_k=3)
self.assertEqual(len(influences), len(layers[1:]) + 1) # zero_acts included
self.assertTrue("zero_acts-fc2" in influences)
|
#!/usr/bin/env fbpython
# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary.
import unittest
import torch
from captum.module.gaussian_stochastic_gates import GaussianStochasticGates
from parameterized import parameterized_class
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
@parameterized_class(
[
{"testing_device": "cpu"},
{"testing_device": "cuda"},
]
)
class TestGaussianStochasticGates(BaseTest):
def setUp(self) -> None:
super().setUp()
if self.testing_device == "cuda" and not torch.cuda.is_available():
raise unittest.SkipTest("Skipping GPU test since CUDA not available.")
def test_gstg_1d_input(self) -> None:
dim = 3
gstg = GaussianStochasticGates(dim).to(self.testing_device)
input_tensor = torch.tensor(
[
[0.0, 0.1, 0.2],
[0.3, 0.4, 0.5],
]
).to(self.testing_device)
gated_input, reg = gstg(input_tensor)
expected_reg = 2.5213
if self.testing_device == "cpu":
expected_gated_input = [[0.0000, 0.0198, 0.1483], [0.1848, 0.3402, 0.1782]]
elif self.testing_device == "cuda":
expected_gated_input = [[0.0000, 0.0788, 0.0470], [0.0134, 0.0000, 0.1884]]
assertTensorAlmostEqual(self, gated_input, expected_gated_input, mode="max")
assertTensorAlmostEqual(self, reg, expected_reg)
def test_gstg_1d_input_with_reg_reduction(self) -> None:
dim = 3
mean_gstg = GaussianStochasticGates(dim, reg_reduction="mean").to(
self.testing_device
)
none_gstg = GaussianStochasticGates(dim, reg_reduction="none").to(
self.testing_device
)
input_tensor = torch.tensor(
[
[0.0, 0.1, 0.2],
[0.3, 0.4, 0.5],
]
).to(self.testing_device)
_, mean_reg = mean_gstg(input_tensor)
_, none_reg = none_gstg(input_tensor)
expected_mean_reg = 0.8404
expected_none_reg = torch.tensor([0.8424, 0.8384, 0.8438])
assertTensorAlmostEqual(self, mean_reg, expected_mean_reg)
assertTensorAlmostEqual(self, none_reg, expected_none_reg)
def test_gstg_1d_input_with_n_gates_error(self) -> None:
dim = 3
gstg = GaussianStochasticGates(dim).to(self.testing_device)
input_tensor = torch.tensor([0.0, 0.1, 0.2]).to(self.testing_device)
with self.assertRaises(AssertionError):
gstg(input_tensor)
def test_gstg_1d_input_with_mask(self) -> None:
dim = 2
mask = torch.tensor([0, 0, 1]).to(self.testing_device)
gstg = GaussianStochasticGates(dim, mask=mask).to(self.testing_device)
input_tensor = torch.tensor(
[
[0.0, 0.1, 0.2],
[0.3, 0.4, 0.5],
]
).to(self.testing_device)
gated_input, reg = gstg(input_tensor)
expected_reg = 1.6849
if self.testing_device == "cpu":
expected_gated_input = [[0.0000, 0.0000, 0.1225], [0.0583, 0.0777, 0.3779]]
elif self.testing_device == "cuda":
expected_gated_input = [[0.0000, 0.0000, 0.1577], [0.0736, 0.0981, 0.0242]]
assertTensorAlmostEqual(self, gated_input, expected_gated_input, mode="max")
assertTensorAlmostEqual(self, reg, expected_reg)
def test_gates_values_matching_dim_when_eval(self) -> None:
dim = 3
gstg = GaussianStochasticGates(dim).to(self.testing_device)
input_tensor = torch.tensor(
[
[0.0, 0.1, 0.2],
[0.3, 0.4, 0.5],
]
).to(self.testing_device)
gstg.train(False)
gated_input, reg = gstg(input_tensor)
assert gated_input.shape == input_tensor.shape
def test_gstg_2d_input(self) -> None:
dim = 3 * 2
gstg = GaussianStochasticGates(dim).to(self.testing_device)
# shape(2,3,2)
input_tensor = torch.tensor(
[
[
[0.0, 0.1],
[0.2, 0.3],
[0.4, 0.5],
],
[
[0.6, 0.7],
[0.8, 0.9],
[1.0, 1.1],
],
]
).to(self.testing_device)
gated_input, reg = gstg(input_tensor)
expected_reg = 5.0458
if self.testing_device == "cpu":
expected_gated_input = [
[[0.0000, 0.0851], [0.0713, 0.3000], [0.2180, 0.1878]],
[[0.2538, 0.0000], [0.3391, 0.8501], [0.3633, 0.8913]],
]
elif self.testing_device == "cuda":
expected_gated_input = [
[[0.0000, 0.0788], [0.0470, 0.0139], [0.0000, 0.1960]],
[[0.0000, 0.7000], [0.1052, 0.2120], [0.5978, 0.0166]],
]
assertTensorAlmostEqual(self, gated_input, expected_gated_input, mode="max")
assertTensorAlmostEqual(self, reg, expected_reg)
def test_gstg_2d_input_with_n_gates_error(self) -> None:
dim = 5
gstg = GaussianStochasticGates(dim).to(self.testing_device)
input_tensor = torch.tensor(
[
[
[0.0, 0.1],
[0.2, 0.3],
[0.4, 0.5],
],
]
).to(self.testing_device)
with self.assertRaises(AssertionError):
gstg(input_tensor)
def test_gstg_2d_input_with_mask(self) -> None:
dim = 3
mask = torch.tensor(
[
[0, 1],
[1, 1],
[0, 2],
]
).to(self.testing_device)
gstg = GaussianStochasticGates(dim, mask=mask).to(self.testing_device)
# shape(2,3,2)
input_tensor = torch.tensor(
[
[
[0.0, 0.1],
[0.2, 0.3],
[0.4, 0.5],
],
[
[0.6, 0.7],
[0.8, 0.9],
[1.0, 1.1],
],
]
).to(self.testing_device)
gated_input, reg = gstg(input_tensor)
expected_reg = 2.5213
if self.testing_device == "cpu":
expected_gated_input = [
[[0.0000, 0.0198], [0.0396, 0.0594], [0.2435, 0.3708]],
[[0.3696, 0.5954], [0.6805, 0.7655], [0.6159, 0.3921]],
]
elif self.testing_device == "cuda":
expected_gated_input = [
[[0.0000, 0.0788], [0.1577, 0.2365], [0.0000, 0.1174]],
[[0.0269, 0.0000], [0.0000, 0.0000], [0.0448, 0.4145]],
]
assertTensorAlmostEqual(self, gated_input, expected_gated_input, mode="max")
assertTensorAlmostEqual(self, reg, expected_reg)
def test_get_gate_values_1d_input(self) -> None:
dim = 3
gstg = GaussianStochasticGates(dim).to(self.testing_device)
input_tensor = torch.tensor(
[
[0.0, 0.1, 0.2],
[0.3, 0.4, 0.5],
]
).to(self.testing_device)
gstg(input_tensor)
gate_values = gstg.get_gate_values()
expected_gate_values = [0.5005, 0.5040, 0.4899]
assertTensorAlmostEqual(self, gate_values, expected_gate_values, mode="max")
def test_get_gate_values_1d_input_with_mask(self) -> None:
dim = 2
mask = torch.tensor([0, 1, 1])
gstg = GaussianStochasticGates(dim, mask=mask).to(self.testing_device)
input_tensor = torch.tensor(
[
[0.0, 0.1, 0.2],
[0.3, 0.4, 0.5],
]
).to(self.testing_device)
gstg(input_tensor)
gate_values = gstg.get_gate_values()
expected_gate_values = [0.5005, 0.5040]
assertTensorAlmostEqual(self, gate_values, expected_gate_values, mode="max")
def test_get_gate_values_2d_input(self) -> None:
dim = 3 * 2
gstg = GaussianStochasticGates(dim).to(self.testing_device)
# shape(2,3,2)
input_tensor = torch.tensor(
[
[
[0.0, 0.1],
[0.2, 0.3],
[0.4, 0.5],
],
[
[0.6, 0.7],
[0.8, 0.9],
[1.0, 1.1],
],
]
).to(self.testing_device)
gstg(input_tensor)
gate_values = gstg.get_gate_values()
expected_gate_values = [0.5005, 0.5040, 0.4899, 0.5022, 0.4939, 0.5050]
assertTensorAlmostEqual(self, gate_values, expected_gate_values, mode="max")
def test_get_gate_values_2d_input_with_mask(self) -> None:
dim = 3
mask = torch.tensor(
[
[0, 1],
[1, 1],
[0, 2],
]
)
gstg = GaussianStochasticGates(dim, mask=mask).to(self.testing_device)
input_tensor = torch.tensor(
[
[
[0.0, 0.1],
[0.2, 0.3],
[0.4, 0.5],
],
[
[0.6, 0.7],
[0.8, 0.9],
[1.0, 1.1],
],
]
).to(self.testing_device)
gstg(input_tensor)
gate_values = gstg.get_gate_values()
expected_gate_values = [0.5005, 0.5040, 0.4899]
assertTensorAlmostEqual(self, gate_values, expected_gate_values, mode="max")
def test_get_gate_values_clamp(self) -> None:
gstg = GaussianStochasticGates._from_pretrained(
torch.tensor([2.0, -2.0, 2.0])
).to(self.testing_device)
clamped_gate_values = gstg.get_gate_values().cpu().tolist()
assert clamped_gate_values == [1.0, 0.0, 1.0]
unclamped_gate_values = gstg.get_gate_values(clamp=False).cpu().tolist()
assert (
unclamped_gate_values[0] > 1
and unclamped_gate_values[1] < 0
and unclamped_gate_values[2] > 1
)
def test_get_gate_active_probs_1d_input(self) -> None:
dim = 3
gstg = GaussianStochasticGates(dim).to(self.testing_device)
input_tensor = torch.tensor(
[
[0.0, 0.1, 0.2],
[0.3, 0.4, 0.5],
]
).to(self.testing_device)
gstg(input_tensor)
gate_active_probs = gstg.get_gate_active_probs()
expected_gate_active_probs = [0.8416, 0.8433, 0.8364]
assertTensorAlmostEqual(
self, gate_active_probs, expected_gate_active_probs, mode="max"
)
def test_get_gate_active_probs_1d_input_with_mask(self) -> None:
dim = 2
mask = torch.tensor([0, 1, 1])
gstg = GaussianStochasticGates(dim, mask=mask).to(self.testing_device)
input_tensor = torch.tensor(
[
[0.0, 0.1, 0.2],
[0.3, 0.4, 0.5],
]
).to(self.testing_device)
gstg(input_tensor)
gate_active_probs = gstg.get_gate_active_probs()
expected_gate_active_probs = [0.8416, 0.8433]
assertTensorAlmostEqual(
self, gate_active_probs, expected_gate_active_probs, mode="max"
)
def test_get_gate_active_probs_2d_input(self) -> None:
dim = 3 * 2
gstg = GaussianStochasticGates(dim).to(self.testing_device)
# shape(2,3,2)
input_tensor = torch.tensor(
[
[
[0.0, 0.1],
[0.2, 0.3],
[0.4, 0.5],
],
[
[0.6, 0.7],
[0.8, 0.9],
[1.0, 1.1],
],
]
).to(self.testing_device)
gstg(input_tensor)
gate_active_probs = gstg.get_gate_active_probs()
expected_gate_active_probs = [0.8416, 0.8433, 0.8364, 0.8424, 0.8384, 0.8438]
assertTensorAlmostEqual(
self, gate_active_probs, expected_gate_active_probs, mode="max"
)
def test_get_gate_active_probs_2d_input_with_mask(self) -> None:
dim = 3
mask = torch.tensor(
[
[0, 1],
[1, 1],
[0, 2],
]
)
gstg = GaussianStochasticGates(dim, mask=mask).to(self.testing_device)
input_tensor = torch.tensor(
[
[
[0.0, 0.1],
[0.2, 0.3],
[0.4, 0.5],
],
[
[0.6, 0.7],
[0.8, 0.9],
[1.0, 1.1],
],
]
).to(self.testing_device)
gstg(input_tensor)
gate_active_probs = gstg.get_gate_active_probs()
expected_gate_active_probs = [0.8416, 0.8433, 0.8364]
assertTensorAlmostEqual(
self, gate_active_probs, expected_gate_active_probs, mode="max"
)
def test_from_pretrained(self) -> None:
mu = torch.tensor([0.1, 0.2, 0.3, 0.4])
kwargs = {
"mask": torch.tensor([0, 1, 1, 0, 2, 3]),
"reg_weight": 0.1,
"std": 0.01,
}
stg = GaussianStochasticGates._from_pretrained(mu, **kwargs)
for key, expected_val in kwargs.items():
val = getattr(stg, key)
if isinstance(expected_val, torch.Tensor):
assertTensorAlmostEqual(self, val, expected_val, mode="max")
else:
assert val == expected_val
|
#!/usr/bin/env python3
import unittest
import torch
from captum.module.binary_concrete_stochastic_gates import BinaryConcreteStochasticGates
from parameterized import parameterized_class
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
@parameterized_class(
[
{"testing_device": "cpu"},
{"testing_device": "cuda"},
]
)
class TestBinaryConcreteStochasticGates(BaseTest):
def setUp(self):
super().setUp()
if self.testing_device == "cuda" and not torch.cuda.is_available():
raise unittest.SkipTest("Skipping GPU test since CUDA not available.")
def test_bcstg_1d_input(self) -> None:
dim = 3
bcstg = BinaryConcreteStochasticGates(dim).to(self.testing_device)
input_tensor = torch.tensor(
[
[0.0, 0.1, 0.2],
[0.3, 0.4, 0.5],
]
).to(self.testing_device)
gated_input, reg = bcstg(input_tensor)
expected_reg = 2.4947
if self.testing_device == "cpu":
expected_gated_input = [[0.0000, 0.0212, 0.1892], [0.1839, 0.3753, 0.4937]]
elif self.testing_device == "cuda":
expected_gated_input = [[0.0000, 0.0985, 0.1149], [0.2329, 0.0497, 0.5000]]
assertTensorAlmostEqual(self, gated_input, expected_gated_input, mode="max")
assertTensorAlmostEqual(self, reg, expected_reg)
def test_bcstg_1d_input_with_reg_reduction(self) -> None:
dim = 3
mean_bcstg = BinaryConcreteStochasticGates(dim, reg_reduction="mean").to(
self.testing_device
)
none_bcstg = BinaryConcreteStochasticGates(dim, reg_reduction="none").to(
self.testing_device
)
input_tensor = torch.tensor(
[
[0.0, 0.1, 0.2],
[0.3, 0.4, 0.5],
]
).to(self.testing_device)
mean_gated_input, mean_reg = mean_bcstg(input_tensor)
none_gated_input, none_reg = none_bcstg(input_tensor)
expected_mean_reg = 0.8316
expected_none_reg = torch.tensor([0.8321, 0.8310, 0.8325])
assertTensorAlmostEqual(self, mean_reg, expected_mean_reg)
assertTensorAlmostEqual(self, none_reg, expected_none_reg)
def test_bcstg_1d_input_with_n_gates_error(self) -> None:
dim = 3
bcstg = BinaryConcreteStochasticGates(dim).to(self.testing_device)
input_tensor = torch.tensor([0.0, 0.1, 0.2]).to(self.testing_device)
with self.assertRaises(AssertionError):
bcstg(input_tensor)
def test_bcstg_num_mask_not_equal_dim_error(self) -> None:
dim = 3
mask = torch.tensor([0, 0, 1]) # only two distinct masks, but given dim is 3
with self.assertRaises(AssertionError):
BinaryConcreteStochasticGates(dim, mask=mask).to(self.testing_device)
def test_gates_values_matching_dim_when_eval(self) -> None:
dim = 3
bcstg = BinaryConcreteStochasticGates(dim).to(self.testing_device)
input_tensor = torch.tensor(
[
[0.0, 0.1, 0.2],
[0.3, 0.4, 0.5],
]
).to(self.testing_device)
bcstg.train(False)
gated_input, reg = bcstg(input_tensor)
assert gated_input.shape == input_tensor.shape
def test_bcstg_1d_input_with_mask(self) -> None:
dim = 2
mask = torch.tensor([0, 0, 1]).to(self.testing_device)
bcstg = BinaryConcreteStochasticGates(dim, mask=mask).to(self.testing_device)
input_tensor = torch.tensor(
[
[0.0, 0.1, 0.2],
[0.3, 0.4, 0.5],
]
).to(self.testing_device)
gated_input, reg = bcstg(input_tensor)
expected_reg = 1.6643
if self.testing_device == "cpu":
expected_gated_input = [[0.0000, 0.0000, 0.1679], [0.0000, 0.0000, 0.2223]]
elif self.testing_device == "cuda":
expected_gated_input = [[0.0000, 0.0000, 0.1971], [0.1737, 0.2317, 0.3888]]
assertTensorAlmostEqual(self, gated_input, expected_gated_input, mode="max")
assertTensorAlmostEqual(self, reg, expected_reg)
def test_bcstg_2d_input(self) -> None:
dim = 3 * 2
bcstg = BinaryConcreteStochasticGates(dim).to(self.testing_device)
# shape(2,3,2)
input_tensor = torch.tensor(
[
[
[0.0, 0.1],
[0.2, 0.3],
[0.4, 0.5],
],
[
[0.6, 0.7],
[0.8, 0.9],
[1.0, 1.1],
],
]
).to(self.testing_device)
gated_input, reg = bcstg(input_tensor)
expected_reg = 4.9903
if self.testing_device == "cpu":
expected_gated_input = [
[[0.0000, 0.0990], [0.0261, 0.2431], [0.0551, 0.3863]],
[[0.0476, 0.6177], [0.5400, 0.1530], [0.0984, 0.8013]],
]
elif self.testing_device == "cuda":
expected_gated_input = [
[[0.0000, 0.0985], [0.1149, 0.2331], [0.0486, 0.5000]],
[[0.1840, 0.1571], [0.4612, 0.7937], [0.2975, 0.7393]],
]
assertTensorAlmostEqual(self, gated_input, expected_gated_input, mode="max")
assertTensorAlmostEqual(self, reg, expected_reg)
def test_bcstg_2d_input_with_n_gates_error(self) -> None:
dim = 5
bcstg = BinaryConcreteStochasticGates(dim).to(self.testing_device)
input_tensor = torch.tensor(
[
[
[0.0, 0.1],
[0.2, 0.3],
[0.4, 0.5],
],
]
).to(self.testing_device)
with self.assertRaises(AssertionError):
bcstg(input_tensor)
def test_bcstg_2d_input_with_mask(self) -> None:
dim = 3
mask = torch.tensor(
[
[0, 1],
[1, 1],
[0, 2],
]
).to(self.testing_device)
bcstg = BinaryConcreteStochasticGates(dim, mask=mask).to(self.testing_device)
# shape(2,3,2)
input_tensor = torch.tensor(
[
[
[0.0, 0.1],
[0.2, 0.3],
[0.4, 0.5],
],
[
[0.6, 0.7],
[0.8, 0.9],
[1.0, 1.1],
],
]
).to(self.testing_device)
gated_input, reg = bcstg(input_tensor)
expected_reg = 2.4947
if self.testing_device == "cpu":
expected_gated_input = [
[[0.0000, 0.0212], [0.0424, 0.0636], [0.3191, 0.4730]],
[[0.3678, 0.6568], [0.7507, 0.8445], [0.6130, 1.0861]],
]
elif self.testing_device == "cuda":
expected_gated_input = [
[[0.0000, 0.0985], [0.1971, 0.2956], [0.0000, 0.2872]],
[[0.4658, 0.0870], [0.0994, 0.1119], [0.7764, 1.1000]],
]
assertTensorAlmostEqual(self, gated_input, expected_gated_input, mode="max")
assertTensorAlmostEqual(self, reg, expected_reg)
def test_get_gate_values_1d_input(self) -> None:
dim = 3
bcstg = BinaryConcreteStochasticGates(dim).to(self.testing_device)
input_tensor = torch.tensor(
[
[0.0, 0.1, 0.2],
[0.3, 0.4, 0.5],
]
).to(self.testing_device)
bcstg(input_tensor)
gate_values = bcstg.get_gate_values()
expected_gate_values = [0.5001, 0.5012, 0.4970]
assertTensorAlmostEqual(self, gate_values, expected_gate_values, mode="max")
def test_get_gate_values_1d_input_with_mask(self) -> None:
dim = 2
mask = torch.tensor([0, 1, 1])
bcstg = BinaryConcreteStochasticGates(dim, mask=mask).to(self.testing_device)
input_tensor = torch.tensor(
[
[0.0, 0.1, 0.2],
[0.3, 0.4, 0.5],
]
).to(self.testing_device)
bcstg(input_tensor)
gate_values = bcstg.get_gate_values()
expected_gate_values = [0.5001, 0.5012]
assertTensorAlmostEqual(self, gate_values, expected_gate_values, mode="max")
def test_get_gate_values_2d_input(self) -> None:
dim = 3 * 2
bcstg = BinaryConcreteStochasticGates(dim).to(self.testing_device)
# shape(2,3,2)
input_tensor = torch.tensor(
[
[
[0.0, 0.1],
[0.2, 0.3],
[0.4, 0.5],
],
[
[0.6, 0.7],
[0.8, 0.9],
[1.0, 1.1],
],
]
).to(self.testing_device)
bcstg(input_tensor)
gate_values = bcstg.get_gate_values()
expected_gate_values = [0.5001, 0.5012, 0.4970, 0.5007, 0.4982, 0.5015]
assertTensorAlmostEqual(self, gate_values, expected_gate_values, mode="max")
def test_get_gate_values_clamp(self) -> None:
# enlarge the bounds & extremify log_alpha to mock gate values beyond 0 & 1
bcstg = BinaryConcreteStochasticGates._from_pretrained(
torch.tensor([10.0, -10.0, 10.0]), lower_bound=-2, upper_bound=2
).to(self.testing_device)
clamped_gate_values = bcstg.get_gate_values().cpu().tolist()
assert clamped_gate_values == [1.0, 0.0, 1.0]
unclamped_gate_values = bcstg.get_gate_values(clamp=False).cpu().tolist()
assert (
unclamped_gate_values[0] > 1
and unclamped_gate_values[1] < 0
and unclamped_gate_values[2] > 1
)
def test_get_gate_values_2d_input_with_mask(self) -> None:
dim = 3
mask = torch.tensor(
[
[0, 1],
[1, 1],
[0, 2],
]
)
bcstg = BinaryConcreteStochasticGates(dim, mask=mask).to(self.testing_device)
input_tensor = torch.tensor(
[
[
[0.0, 0.1],
[0.2, 0.3],
[0.4, 0.5],
],
[
[0.6, 0.7],
[0.8, 0.9],
[1.0, 1.1],
],
]
).to(self.testing_device)
bcstg(input_tensor)
gate_values = bcstg.get_gate_values()
expected_gate_values = [0.5001, 0.5012, 0.4970]
assertTensorAlmostEqual(self, gate_values, expected_gate_values, mode="max")
def test_get_gate_active_probs_1d_input(self) -> None:
dim = 3
bcstg = BinaryConcreteStochasticGates(dim).to(self.testing_device)
input_tensor = torch.tensor(
[
[0.0, 0.1, 0.2],
[0.3, 0.4, 0.5],
]
).to(self.testing_device)
bcstg(input_tensor)
gate_active_probs = bcstg.get_gate_active_probs()
expected_gate_active_probs = [0.8319, 0.8324, 0.8304]
assertTensorAlmostEqual(
self, gate_active_probs, expected_gate_active_probs, mode="max"
)
def test_get_gate_active_probs_1d_input_with_mask(self) -> None:
dim = 2
mask = torch.tensor([0, 1, 1])
bcstg = BinaryConcreteStochasticGates(dim, mask=mask).to(self.testing_device)
input_tensor = torch.tensor(
[
[0.0, 0.1, 0.2],
[0.3, 0.4, 0.5],
]
).to(self.testing_device)
bcstg(input_tensor)
gate_active_probs = bcstg.get_gate_active_probs()
expected_gate_active_probs = [0.8319, 0.8324]
assertTensorAlmostEqual(
self, gate_active_probs, expected_gate_active_probs, mode="max"
)
def test_get_gate_active_probs_2d_input(self) -> None:
dim = 3 * 2
bcstg = BinaryConcreteStochasticGates(dim).to(self.testing_device)
# shape(2,3,2)
input_tensor = torch.tensor(
[
[
[0.0, 0.1],
[0.2, 0.3],
[0.4, 0.5],
],
[
[0.6, 0.7],
[0.8, 0.9],
[1.0, 1.1],
],
]
).to(self.testing_device)
bcstg(input_tensor)
gate_active_probs = bcstg.get_gate_active_probs()
expected_gate_active_probs = [0.8319, 0.8324, 0.8304, 0.8321, 0.8310, 0.8325]
assertTensorAlmostEqual(
self, gate_active_probs, expected_gate_active_probs, mode="max"
)
def test_get_gate_active_probs_2d_input_with_mask(self) -> None:
dim = 3
mask = torch.tensor(
[
[0, 1],
[1, 1],
[0, 2],
]
)
bcstg = BinaryConcreteStochasticGates(dim, mask=mask).to(self.testing_device)
input_tensor = torch.tensor(
[
[
[0.0, 0.1],
[0.2, 0.3],
[0.4, 0.5],
],
[
[0.6, 0.7],
[0.8, 0.9],
[1.0, 1.1],
],
]
).to(self.testing_device)
bcstg(input_tensor)
gate_active_probs = bcstg.get_gate_active_probs()
expected_gate_active_probs = [0.8319, 0.8324, 0.8304]
assertTensorAlmostEqual(
self, gate_active_probs, expected_gate_active_probs, mode="max"
)
def test_from_pretrained(self) -> None:
log_alpha_param = torch.tensor([0.1, 0.2, 0.3, 0.4])
kwargs = {
"mask": torch.tensor([0, 1, 1, 0, 2, 3]),
"reg_weight": 0.1,
"lower_bound": -0.2,
"upper_bound": 1.2,
}
stg = BinaryConcreteStochasticGates._from_pretrained(log_alpha_param, **kwargs)
for key, expected_val in kwargs.items():
val = getattr(stg, key)
if isinstance(expected_val, torch.Tensor):
assertTensorAlmostEqual(self, val, expected_val, mode="max")
else:
assert val == expected_val
|
#!/usr/bin/env python3
from typing import List, Tuple
import torch
from captum._utils.gradient import (
apply_gradient_requirements,
compute_gradients,
compute_layer_gradients_and_eval,
undo_gradient_requirements,
)
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import (
BasicModel,
BasicModel2,
BasicModel4_MultiArgs,
BasicModel5_MultiArgs,
BasicModel6_MultiTensor,
BasicModel_MultiLayer,
)
class Test(BaseTest):
def test_apply_gradient_reqs(self) -> None:
initial_grads = [False, True, False]
test_tensor = torch.tensor([[6.0]], requires_grad=True)
test_tensor.grad = torch.tensor([[7.0]])
test_tensor_tuple = (torch.tensor([[5.0]]), test_tensor, torch.tensor([[7.0]]))
out_mask = apply_gradient_requirements(test_tensor_tuple)
for i in range(len(test_tensor_tuple)):
self.assertTrue(test_tensor_tuple[i].requires_grad)
self.assertEqual(out_mask[i], initial_grads[i])
def test_undo_gradient_reqs(self) -> None:
initial_grads = [False, True, False]
test_tensor = torch.tensor([[6.0]], requires_grad=True)
test_tensor.grad = torch.tensor([[7.0]])
test_tensor_tuple = (
torch.tensor([[6.0]], requires_grad=True),
test_tensor,
torch.tensor([[7.0]], requires_grad=True),
)
undo_gradient_requirements(test_tensor_tuple, initial_grads)
for i in range(len(test_tensor_tuple)):
self.assertEqual(test_tensor_tuple[i].requires_grad, initial_grads[i])
def test_gradient_basic(self) -> None:
model = BasicModel()
input = torch.tensor([[5.0]], requires_grad=True)
input.grad = torch.tensor([[9.0]])
grads = compute_gradients(model, input)[0]
assertTensorAlmostEqual(self, grads, [[0.0]], delta=0.01, mode="max")
# Verify grad attribute is not altered
assertTensorAlmostEqual(self, input.grad, [[9.0]], delta=0.0, mode="max")
def test_gradient_basic_2(self) -> None:
model = BasicModel()
input = torch.tensor([[-3.0]], requires_grad=True)
input.grad = torch.tensor([[14.0]])
grads = compute_gradients(model, input)[0]
assertTensorAlmostEqual(self, grads, [[1.0]], delta=0.01, mode="max")
# Verify grad attribute is not altered
assertTensorAlmostEqual(self, input.grad, [[14.0]], delta=0.0, mode="max")
def test_gradient_multiinput(self) -> None:
model = BasicModel6_MultiTensor()
input1 = torch.tensor([[-3.0, -5.0]], requires_grad=True)
input2 = torch.tensor([[-5.0, 2.0]], requires_grad=True)
grads = compute_gradients(model, (input1, input2))
assertTensorAlmostEqual(self, grads[0], [[0.0, 1.0]], delta=0.01, mode="max")
assertTensorAlmostEqual(self, grads[1], [[0.0, 1.0]], delta=0.01, mode="max")
def test_gradient_additional_args(self) -> None:
model = BasicModel4_MultiArgs()
input1 = torch.tensor([[10.0]], requires_grad=True)
input2 = torch.tensor([[8.0]], requires_grad=True)
grads = compute_gradients(model, (input1, input2), additional_forward_args=(2,))
assertTensorAlmostEqual(self, grads[0], [[1.0]], delta=0.01, mode="max")
assertTensorAlmostEqual(self, grads[1], [[-0.5]], delta=0.01, mode="max")
def test_gradient_additional_args_2(self) -> None:
model = BasicModel5_MultiArgs()
input1 = torch.tensor([[-10.0]], requires_grad=True)
input2 = torch.tensor([[6.0]], requires_grad=True)
grads = compute_gradients(
model, (input1, input2), additional_forward_args=([3, -4],)
)
assertTensorAlmostEqual(self, grads[0], [[0.0]], delta=0.01, mode="max")
assertTensorAlmostEqual(self, grads[1], [[4.0]], delta=0.01, mode="max")
def test_gradient_target_int(self) -> None:
model = BasicModel2()
input1 = torch.tensor([[4.0, -1.0]], requires_grad=True)
input2 = torch.tensor([[2.0, 5.0]], requires_grad=True)
grads0 = compute_gradients(model, (input1, input2), target_ind=0)
grads1 = compute_gradients(model, (input1, input2), target_ind=1)
assertTensorAlmostEqual(self, grads0[0], [[1.0, 0.0]], delta=0.01, mode="max")
assertTensorAlmostEqual(self, grads0[1], [[-1.0, 0.0]], delta=0.01, mode="max")
assertTensorAlmostEqual(self, grads1[0], [[0.0, 0.0]], delta=0.01, mode="max")
assertTensorAlmostEqual(self, grads1[1], [[0.0, 0.0]], delta=0.01, mode="max")
def test_gradient_target_list(self) -> None:
model = BasicModel2()
input1 = torch.tensor([[4.0, -1.0], [3.0, 10.0]], requires_grad=True)
input2 = torch.tensor([[2.0, -5.0], [-2.0, 1.0]], requires_grad=True)
grads = compute_gradients(model, (input1, input2), target_ind=[0, 1])
assertTensorAlmostEqual(
self,
grads[0],
[[1.0, 0.0], [0.0, 1.0]],
delta=0.01,
mode="max",
)
assertTensorAlmostEqual(
self,
grads[1],
[[-1.0, 0.0], [0.0, -1.0]],
delta=0.01,
mode="max",
)
def test_gradient_target_tuple(self) -> None:
model = BasicModel()
input = torch.tensor(
[[[4.0, 2.0], [-1.0, -2.0]], [[3.0, -4.0], [10.0, 5.0]]], requires_grad=True
)
grads = compute_gradients(model, input, target_ind=(0, 1))[0]
assertTensorAlmostEqual(
self,
grads,
[[[0.0, 0.0], [0.0, 0.0]], [[0.0, 1.0], [0.0, 0.0]]],
delta=0.01,
mode="max",
)
def test_gradient_target_listtuple(self) -> None:
model = BasicModel()
input = torch.tensor(
[[[4.0, 2.0], [-1.0, -2.0]], [[3.0, -4.0], [10.0, 5.0]]], requires_grad=True
)
target: List[Tuple[int, ...]] = [(1, 1), (0, 1)]
grads = compute_gradients(model, input, target_ind=target)[0]
assertTensorAlmostEqual(
self,
grads,
[[[0.0, 0.0], [0.0, 1.0]], [[0.0, 1.0], [0.0, 0.0]]],
delta=0.01,
mode="max",
)
def test_gradient_inplace(self) -> None:
model = BasicModel_MultiLayer(inplace=True)
input = torch.tensor([[1.0, 6.0, -3.0]], requires_grad=True)
grads = compute_gradients(model, input, target_ind=0)[0]
assertTensorAlmostEqual(self, grads, [[3.0, 3.0, 3.0]], delta=0.01, mode="max")
def test_layer_gradient_linear0(self) -> None:
model = BasicModel_MultiLayer()
input = torch.tensor([[5.0, -11.0, 23.0]], requires_grad=True)
grads, eval = compute_layer_gradients_and_eval(
model, model.linear0, input, target_ind=0
)
assertTensorAlmostEqual(
self, grads[0], [[4.0, 4.0, 4.0]], delta=0.01, mode="max"
)
assertTensorAlmostEqual(
self,
eval[0],
[[5.0, -11.0, 23.0]],
delta=0.01,
mode="max",
)
def test_layer_gradient_linear1(self) -> None:
model = BasicModel_MultiLayer()
input = torch.tensor([[5.0, 2.0, 1.0]], requires_grad=True)
grads, eval = compute_layer_gradients_and_eval(
model, model.linear1, input, target_ind=1
)
assertTensorAlmostEqual(
self,
grads[0],
[[0.0, 1.0, 1.0, 1.0]],
delta=0.01,
mode="max",
)
assertTensorAlmostEqual(
self,
eval[0],
[[-2.0, 9.0, 9.0, 9.0]],
delta=0.01,
mode="max",
)
def test_layer_gradient_linear1_inplace(self) -> None:
model = BasicModel_MultiLayer(inplace=True)
input = torch.tensor([[5.0, 2.0, 1.0]], requires_grad=True)
grads, eval = compute_layer_gradients_and_eval(
model, model.linear1, input, target_ind=1
)
assertTensorAlmostEqual(
self,
grads[0],
[[0.0, 1.0, 1.0, 1.0]],
delta=0.01,
mode="max",
)
assertTensorAlmostEqual(
self,
eval[0],
[[-2.0, 9.0, 9.0, 9.0]],
delta=0.01,
mode="max",
)
def test_layer_gradient_relu_input_inplace(self) -> None:
model = BasicModel_MultiLayer(inplace=True)
input = torch.tensor([[5.0, 2.0, 1.0]], requires_grad=True)
grads, eval = compute_layer_gradients_and_eval(
model, model.relu, input, target_ind=1, attribute_to_layer_input=True
)
assertTensorAlmostEqual(
self,
grads[0],
[[0.0, 1.0, 1.0, 1.0]],
delta=0.01,
mode="max",
)
assertTensorAlmostEqual(
self,
eval[0],
[[-2.0, 9.0, 9.0, 9.0]],
delta=0.01,
mode="max",
)
def test_layer_gradient_output(self) -> None:
model = BasicModel_MultiLayer()
input = torch.tensor([[5.0, 2.0, 1.0]], requires_grad=True)
grads, eval = compute_layer_gradients_and_eval(
model, model.linear2, input, target_ind=1
)
assertTensorAlmostEqual(self, grads[0], [[0.0, 1.0]], delta=0.01, mode="max")
assertTensorAlmostEqual(self, eval[0], [[26.0, 28.0]], delta=0.01, mode="max")
|
#!/usr/bin/env python3
from typing import cast, List, Tuple
import torch
from captum._utils.common import (
_format_feature_mask,
_get_max_feature_index,
_parse_version,
_reduce_list,
_select_targets,
_sort_key_list,
safe_div,
)
from tests.helpers.basic import (
assertTensorAlmostEqual,
assertTensorTuplesAlmostEqual,
BaseTest,
)
class Test(BaseTest):
def test_safe_div_number_denom(self) -> None:
num = torch.tensor(4.0)
assert safe_div(num, 2) == 2.0
assert safe_div(num, 0, 2) == 2.0
assert safe_div(num, 2.0) == 2.0
assert safe_div(num, 0.0, 2.0) == 2.0
def test_safe_div_tensor_denom(self) -> None:
num = torch.tensor([4.0, 6.0])
exp = torch.tensor([2.0, 3.0])
assert (safe_div(num, torch.tensor([2.0, 2.0])) == exp).all()
# tensor default denom
assert (safe_div(num, torch.tensor([0.0, 0.0]), torch.tensor(2.0)) == exp).all()
assert (
safe_div(
num,
torch.tensor([0.0, 0.0]),
torch.tensor([2.0, 2.0]),
)
== exp
).all()
# float default denom
assert (safe_div(num, torch.tensor([0.0, 0.0]), 2.0) == exp).all()
def test_reduce_list_tensors(self) -> None:
tensors = [torch.tensor([[3, 4, 5]]), torch.tensor([[0, 1, 2]])]
reduced = _reduce_list(tensors)
assertTensorAlmostEqual(self, reduced, [[3, 4, 5], [0, 1, 2]])
def test_reduce_list_tuples(self):
tensors = [
(torch.tensor([[3, 4, 5]]), torch.tensor([[0, 1, 2]])),
(torch.tensor([[3, 4, 5]]), torch.tensor([[0, 1, 2]])),
]
reduced = _reduce_list(tensors)
assertTensorAlmostEqual(self, reduced[0], [[3, 4, 5], [3, 4, 5]])
assertTensorAlmostEqual(self, reduced[1], [[0, 1, 2], [0, 1, 2]])
def test_sort_key_list(self) -> None:
key_list = [
torch.device("cuda:13"),
torch.device("cuda:17"),
torch.device("cuda:10"),
torch.device("cuda:0"),
]
device_index_list = [0, 10, 13, 17]
sorted_keys = _sort_key_list(key_list, device_index_list)
for i in range(len(key_list)):
self.assertEqual(sorted_keys[i].index, device_index_list[i])
def test_sort_key_list_incomplete(self) -> None:
key_list = [torch.device("cuda:10"), torch.device("cuda:0")]
device_index_list = [0, 10, 13, 17]
sorted_keys = _sort_key_list(key_list, device_index_list)
for i in range(len(key_list)):
self.assertEqual(sorted_keys[i].index, device_index_list[i])
def test_select_target_2d(self) -> None:
output_tensor = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assertTensorAlmostEqual(self, _select_targets(output_tensor, 1), [2, 5, 8])
assertTensorAlmostEqual(
self, _select_targets(output_tensor, torch.tensor(0)), [1, 4, 7]
)
assertTensorAlmostEqual(
self,
_select_targets(output_tensor, torch.tensor([1, 2, 0])),
[[2], [6], [7]],
)
assertTensorAlmostEqual(
self, _select_targets(output_tensor, [1, 2, 0]), [[2], [6], [7]]
)
# Verify error is raised if too many dimensions are provided.
with self.assertRaises(AssertionError):
_select_targets(output_tensor, (1, 2))
def test_select_target_3d(self) -> None:
output_tensor = torch.tensor(
[[[1, 2, 3], [4, 5, 6], [7, 8, 9]], [[9, 8, 7], [6, 5, 4], [3, 2, 1]]]
)
assertTensorAlmostEqual(self, _select_targets(output_tensor, (0, 1)), [2, 8])
assertTensorAlmostEqual(
self,
_select_targets(
output_tensor, cast(List[Tuple[int, ...]], [(0, 1), (2, 0)])
),
[2, 3],
)
# Verify error is raised if list is longer than number of examples.
with self.assertRaises(AssertionError):
_select_targets(
output_tensor, cast(List[Tuple[int, ...]], [(0, 1), (2, 0), (3, 2)])
)
# Verify error is raised if too many dimensions are provided.
with self.assertRaises(AssertionError):
_select_targets(output_tensor, (1, 2, 3))
def test_format_feature_mask_of_tensor(self) -> None:
formatted_inputs = (torch.tensor([[0.0, 0.0], [0.0, 0.0]]),)
tensor_mask = torch.tensor([[0, 1]])
formatted_tensor_mask = _format_feature_mask(tensor_mask, formatted_inputs)
self.assertEqual(type(formatted_tensor_mask), tuple)
assertTensorTuplesAlmostEqual(self, formatted_tensor_mask, (tensor_mask,))
def test_format_feature_mask_of_tuple(self) -> None:
formatted_inputs = (
torch.tensor([[0.0, 0.0], [0.0, 0.0]]),
torch.tensor([[0.0, 0.0], [0.0, 0.0]]),
)
tuple_mask = (
torch.tensor([[0, 1], [2, 3]]),
torch.tensor([[4, 5], [6, 6]]),
)
formatted_tuple_mask = _format_feature_mask(tuple_mask, formatted_inputs)
self.assertEqual(type(formatted_tuple_mask), tuple)
assertTensorTuplesAlmostEqual(self, formatted_tuple_mask, tuple_mask)
def test_format_feature_mask_of_none(self) -> None:
formatted_inputs = (
torch.tensor([[0.0, 0.0], [0.0, 0.0]]),
torch.tensor([]), # empty tensor
torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]),
)
expected_mask = (
torch.tensor([[0, 1]]),
torch.tensor([]),
torch.tensor([[2, 3, 4]]),
)
formatted_none_mask = _format_feature_mask(None, formatted_inputs)
self.assertEqual(type(formatted_none_mask), tuple)
assertTensorTuplesAlmostEqual(self, formatted_none_mask, expected_mask)
def test_get_max_feature_index(self) -> None:
mask = (
torch.tensor([[0, 1], [2, 3]]),
torch.tensor([]),
torch.tensor([[4, 5], [6, 100]]),
torch.tensor([[0, 1], [2, 3]]),
)
assert _get_max_feature_index(mask) == 100
class TestParseVersion(BaseTest):
def test_parse_version_dev(self) -> None:
version_str = "1.12.0.dev20201109"
output = _parse_version(version_str)
self.assertEqual(output, (1, 12, 0))
def test_parse_version_post(self) -> None:
version_str = "1.3.0.post2"
output = _parse_version(version_str)
self.assertEqual(output, (1, 3, 0))
def test_parse_version_1_12_0(self) -> None:
version_str = "1.12.0"
output = _parse_version(version_str)
self.assertEqual(output, (1, 12, 0))
def test_parse_version_1_12_2(self) -> None:
version_str = "1.12.2"
output = _parse_version(version_str)
self.assertEqual(output, (1, 12, 2))
def test_parse_version_1_6_0(self) -> None:
version_str = "1.6.0"
output = _parse_version(version_str)
self.assertEqual(output, (1, 6, 0))
def test_parse_version_1_12(self) -> None:
version_str = "1.12"
output = _parse_version(version_str)
self.assertEqual(output, (1, 12))
|
#!/usr/bin/env python3
import torch
import torch.nn as nn
from captum._utils.gradient import (
_compute_jacobian_wrt_params,
_compute_jacobian_wrt_params_with_sample_wise_trick,
)
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import BasicLinearModel2, BasicLinearModel_Multilayer
class Test(BaseTest):
def test_jacobian_scores_single_scalar(self) -> None:
model = BasicLinearModel2(5, 1)
model.linear.weight = nn.Parameter(torch.arange(0, 5).float().reshape(1, 5))
a = torch.ones(5).unsqueeze(0)
grads = _compute_jacobian_wrt_params(model, (a,))
assertTensorAlmostEqual(self, grads[0][0], a)
grads = _compute_jacobian_wrt_params_with_sample_wise_trick(model, (a,))
assertTensorAlmostEqual(self, grads[0][0], a)
def test_jacobian_scores_single_vector(self) -> None:
model = BasicLinearModel2(5, 2)
model.linear.weight = nn.Parameter(torch.arange(0, 10).view(2, 5).float())
a = torch.ones(5).unsqueeze(0)
grads = _compute_jacobian_wrt_params(model, (a,))
assertTensorAlmostEqual(self, grads[0][0], torch.cat((a, a)))
grads = _compute_jacobian_wrt_params_with_sample_wise_trick(model, (a,))
assertTensorAlmostEqual(self, grads[0][0], torch.cat((a, a)))
def test_jacobian_scores_single_scalar_multilayer(self) -> None:
model = BasicLinearModel_Multilayer(5, 2, 1)
model.linear1.weight = nn.Parameter(torch.arange(0, 10).view(2, 5).float())
model.linear2.weight = nn.Parameter(torch.arange(1, 3).view(1, 2).float())
a = torch.ones(5).unsqueeze(0)
grads = _compute_jacobian_wrt_params(model, (a,))
assertTensorAlmostEqual(self, grads[0][0], torch.cat((a, 2 * a)))
assertTensorAlmostEqual(self, grads[1][0], torch.Tensor([[10, 35]]))
grads = _compute_jacobian_wrt_params_with_sample_wise_trick(model, (a,))
assertTensorAlmostEqual(self, grads[0][0], torch.cat((a, 2 * a)))
assertTensorAlmostEqual(self, grads[1][0], torch.Tensor([[10, 35]]))
def test_jacobian_scores_single_vector_multilayer(self) -> None:
model = BasicLinearModel_Multilayer(5, 2, 2)
model.linear1.weight = nn.Parameter(torch.arange(0, 10).view(2, 5).float())
model.linear2.weight = nn.Parameter(torch.arange(0, 4).view(2, 2).float())
a = torch.ones(5).unsqueeze(0)
grads = _compute_jacobian_wrt_params(model, (a,))
assertTensorAlmostEqual(self, grads[0][0], torch.cat((2 * a, 4 * a)))
assertTensorAlmostEqual(self, grads[1][0], torch.Tensor([[10, 35], [10, 35]]))
grads = _compute_jacobian_wrt_params_with_sample_wise_trick(model, (a,))
assertTensorAlmostEqual(self, grads[0][0], torch.cat((2 * a, 4 * a)))
assertTensorAlmostEqual(self, grads[1][0], torch.Tensor([[10, 35], [10, 35]]))
def test_jacobian_scores_batch_scalar(self) -> None:
model = BasicLinearModel2(5, 1)
model.linear.weight = nn.Parameter(torch.arange(0, 5).float().reshape(1, 5))
a = torch.stack((torch.ones(5), torch.ones(5) * 2))
grads = _compute_jacobian_wrt_params(model, (a,))
assertTensorAlmostEqual(self, grads[0][0], a[0:1])
assertTensorAlmostEqual(self, grads[0][1], a[1:2])
grads = _compute_jacobian_wrt_params_with_sample_wise_trick(model, (a,))
assertTensorAlmostEqual(self, grads[0][0], a[0:1])
assertTensorAlmostEqual(self, grads[0][1], a[1:2])
def test_jacobian_scores_batch_vector(self) -> None:
model = BasicLinearModel2(5, 2)
model.linear.weight = nn.Parameter(torch.arange(0, 10).view(2, 5).float())
a = torch.stack((torch.ones(5), torch.ones(5) * 2))
grads = _compute_jacobian_wrt_params(model, (a,))
assertTensorAlmostEqual(self, grads[0][0], torch.stack((a[0], a[0])))
assertTensorAlmostEqual(self, grads[0][1], torch.stack((a[1], a[1])))
grads = _compute_jacobian_wrt_params_with_sample_wise_trick(model, (a,))
assertTensorAlmostEqual(self, grads[0][0], torch.stack((a[0], a[0])))
assertTensorAlmostEqual(self, grads[0][1], torch.stack((a[1], a[1])))
def test_jacobian_scores_batch_scalar_multilayer(self) -> None:
model = BasicLinearModel_Multilayer(5, 2, 1)
model.linear1.weight = nn.Parameter(torch.arange(0, 10).view(2, 5).float())
model.linear2.weight = nn.Parameter(torch.arange(1, 3).view(1, 2).float())
a = torch.stack((torch.ones(5), torch.ones(5) * 2))
grads = _compute_jacobian_wrt_params(model, (a,))
assertTensorAlmostEqual(self, grads[0][0], torch.stack((a[0], 2 * a[0])))
assertTensorAlmostEqual(self, grads[1][0], torch.Tensor([[10, 35]]))
assertTensorAlmostEqual(self, grads[0][1], torch.stack((a[1], 2 * a[1])))
assertTensorAlmostEqual(self, grads[1][1], torch.Tensor([[20, 70]]))
grads = _compute_jacobian_wrt_params_with_sample_wise_trick(model, (a,))
assertTensorAlmostEqual(self, grads[0][0], torch.stack((a[0], 2 * a[0])))
assertTensorAlmostEqual(self, grads[1][0], torch.Tensor([[10, 35]]))
assertTensorAlmostEqual(self, grads[0][1], torch.stack((a[1], 2 * a[1])))
assertTensorAlmostEqual(self, grads[1][1], torch.Tensor([[20, 70]]))
def test_jacobian_scores_batch_vector_multilayer(self) -> None:
model = BasicLinearModel_Multilayer(5, 2, 2)
model.linear1.weight = nn.Parameter(torch.arange(0, 10).view(2, 5).float())
model.linear2.weight = nn.Parameter(torch.arange(0, 4).view(2, 2).float())
a = torch.stack((torch.ones(5), torch.ones(5) * 2))
grads = _compute_jacobian_wrt_params(model, (a,))
assertTensorAlmostEqual(self, grads[0][0], torch.stack((2 * a[0], 4 * a[0])))
assertTensorAlmostEqual(self, grads[1][0], torch.Tensor([[10, 35], [10, 35]]))
assertTensorAlmostEqual(self, grads[0][1], torch.stack((2 * a[1], 4 * a[1])))
assertTensorAlmostEqual(self, grads[1][1], torch.Tensor([[20, 70], [20, 70]]))
grads = _compute_jacobian_wrt_params_with_sample_wise_trick(model, (a,))
assertTensorAlmostEqual(self, grads[0][0], torch.stack((2 * a[0], 4 * a[0])))
assertTensorAlmostEqual(self, grads[1][0], torch.Tensor([[10, 35], [10, 35]]))
assertTensorAlmostEqual(self, grads[0][1], torch.stack((2 * a[1], 4 * a[1])))
assertTensorAlmostEqual(self, grads[1][1], torch.Tensor([[20, 70], [20, 70]]))
def test_jacobian_loss_single_scalar(self) -> None:
model = BasicLinearModel2(5, 1)
model.linear.weight = nn.Parameter(torch.arange(0, 5).view(1, 5).float())
a = torch.ones(5).unsqueeze(0)
label = torch.Tensor([9])
loss_fn = nn.MSELoss(reduction="none")
grads = _compute_jacobian_wrt_params(model, (a,), label, loss_fn)
assertTensorAlmostEqual(self, grads[0][0], 2 * (10 - 9) * a)
loss_fn = nn.MSELoss(reduction="sum")
grads = _compute_jacobian_wrt_params_with_sample_wise_trick(
model, (a,), label, loss_fn
)
assertTensorAlmostEqual(self, grads[0][0], 2 * (10 - 9) * a)
def test_jacobian_loss_single_vector(self) -> None:
model = BasicLinearModel2(5, 2)
model.linear.weight = nn.Parameter(torch.arange(0, 10).view(2, 5).float())
a = torch.ones(5).unsqueeze(0)
label = torch.Tensor([[9, 38]])
loss_fn = nn.MSELoss(reduction="none")
grads = _compute_jacobian_wrt_params(model, (a,), label, loss_fn)
assertTensorAlmostEqual(
self, grads[0][0], torch.cat((2 * (10 - 9) * a, 2 * (35 - 38) * a))
)
loss_fn = nn.MSELoss(reduction="sum")
grads = _compute_jacobian_wrt_params_with_sample_wise_trick(
model, (a,), label, loss_fn
)
assertTensorAlmostEqual(
self, grads[0][0], torch.cat((2 * (10 - 9) * a, 2 * (35 - 38) * a))
)
def test_jacobian_loss_batch_scalar(self) -> None:
model = BasicLinearModel2(5, 1)
model.linear.weight = nn.Parameter(torch.arange(0, 5).float().reshape(1, 5))
a = torch.stack((torch.ones(5), torch.ones(5) * 2))
label = torch.Tensor([[9], [18]])
loss_fn = nn.MSELoss(reduction="none")
grads = _compute_jacobian_wrt_params(model, (a,), label, loss_fn)
assertTensorAlmostEqual(self, grads[0][0], 2 * (10 - 9) * a[0:1])
assertTensorAlmostEqual(self, grads[0][1], 2 * (20 - 18) * a[1:2])
loss_fn = nn.MSELoss(reduction="sum")
grads = _compute_jacobian_wrt_params_with_sample_wise_trick(
model, (a,), label, loss_fn
)
assertTensorAlmostEqual(self, grads[0][0], 2 * (10 - 9) * a[0:1])
assertTensorAlmostEqual(self, grads[0][1], 2 * (20 - 18) * a[1:2])
def test_jacobian_loss_batch_vector(self) -> None:
model = BasicLinearModel2(5, 2)
model.linear.weight = nn.Parameter(torch.arange(0, 10).view(2, 5).float())
a = torch.stack((torch.ones(5), torch.ones(5) * 2))
label = torch.Tensor([[9, 38], [18, 74]])
loss_fn = nn.MSELoss(reduction="none")
grads = _compute_jacobian_wrt_params(model, (a,), label, loss_fn)
assertTensorAlmostEqual(
self, grads[0][0], torch.stack((2 * (10 - 9) * a[0], 2 * (35 - 38) * a[0]))
)
assertTensorAlmostEqual(
self, grads[0][1], torch.stack((2 * (20 - 18) * a[1], 2 * (70 - 74) * a[1]))
)
loss_fn = nn.MSELoss(reduction="sum")
grads = _compute_jacobian_wrt_params_with_sample_wise_trick(
model, (a,), label, loss_fn
)
assertTensorAlmostEqual(
self, grads[0][0], torch.stack((2 * (10 - 9) * a[0], 2 * (35 - 38) * a[0]))
)
assertTensorAlmostEqual(
self, grads[0][1], torch.stack((2 * (20 - 18) * a[1], 2 * (70 - 74) * a[1]))
)
def test_jacobian_loss_single_scalar_multilayer(self) -> None:
model = BasicLinearModel_Multilayer(5, 2, 1)
model.linear1.weight = nn.Parameter(torch.arange(0, 10).view(2, 5).float())
model.linear2.weight = nn.Parameter(torch.arange(1, 3).view(1, 2).float())
a = torch.ones(5).unsqueeze(0)
label = torch.Tensor([[78]])
loss_fn = nn.MSELoss(reduction="none")
grads = _compute_jacobian_wrt_params(model, (a,), label, loss_fn)
assertTensorAlmostEqual(
self, grads[0][0], torch.cat((2 * (80 - 78) * a, 2 * 2 * (80 - 78) * a))
)
assertTensorAlmostEqual(
self, grads[1][0], 2 * (80 - 78) * torch.Tensor([[10, 35]])
)
loss_fn = nn.MSELoss(reduction="sum")
grads = _compute_jacobian_wrt_params_with_sample_wise_trick(
model, (a,), label, loss_fn
)
assertTensorAlmostEqual(
self, grads[0][0], torch.cat((2 * (80 - 78) * a, 2 * 2 * (80 - 78) * a))
)
assertTensorAlmostEqual(
self, grads[1][0], 2 * (80 - 78) * torch.Tensor([[10, 35]])
)
def test_jacobian_loss_batch_vector_multilayer(self) -> None:
model = BasicLinearModel_Multilayer(5, 2, 2)
model.linear1.weight = nn.Parameter(torch.arange(0, 10).view(2, 5).float())
model.linear2.weight = nn.Parameter(torch.arange(0, 4).view(2, 2).float())
a = torch.stack((torch.ones(5), torch.ones(5) * 2))
label = torch.Tensor([[33, 124], [69, 256]])
loss_fn = nn.MSELoss(reduction="none")
grads = _compute_jacobian_wrt_params(model, (a,), label, loss_fn)
assertTensorAlmostEqual(
self,
grads[0][0],
torch.stack(
(
2 * (0 * (35 - 33) + 2 * (125 - 124)) * a[0],
2 * (1 * (35 - 33) + 3 * (125 - 124)) * a[0],
)
),
)
assertTensorAlmostEqual(
self,
grads[1][0],
torch.Tensor(
[
[2 * (35 - 33) * 10, 2 * (35 - 33) * 35],
[2 * (125 - 124) * 10, 2 * (125 - 124) * 35],
]
),
)
assertTensorAlmostEqual(
self,
grads[0][1],
torch.stack(
(
2 * (0 * (70 - 69) + 2 * (250 - 256)) * a[1],
2 * (1 * (70 - 69) + 3 * (250 - 256)) * a[1],
)
),
)
assertTensorAlmostEqual(
self,
grads[1][1],
torch.Tensor(
[
[2 * (70 - 69) * 10 * 2, 2 * (70 - 69) * 35 * 2],
[2 * (250 - 256) * 10 * 2, 2 * (250 - 256) * 35 * 2],
]
),
)
loss_fn = nn.MSELoss(reduction="sum")
grads_h = _compute_jacobian_wrt_params_with_sample_wise_trick(
model, (a,), label, loss_fn
)
assertTensorAlmostEqual(self, grads_h[0][0], grads[0][0])
assertTensorAlmostEqual(self, grads_h[1][0], grads[1][0])
assertTensorAlmostEqual(self, grads_h[0][1], grads[0][1])
assertTensorAlmostEqual(self, grads_h[1][1], grads[1][1])
def test_jacobian_loss_custom_correct(self) -> None:
model = BasicLinearModel2(5, 2)
model.linear.weight = nn.Parameter(torch.arange(0, 10).view(2, 5).float())
def my_loss(out, label):
return (out - label).pow(2)
a = torch.stack((torch.ones(5), torch.ones(5) * 2))
label = torch.Tensor([[9, 38], [18, 74]])
grads = _compute_jacobian_wrt_params(model, (a,), label, my_loss)
assertTensorAlmostEqual(
self, grads[0][0], torch.stack((2 * (10 - 9) * a[0], 2 * (35 - 38) * a[0]))
)
assertTensorAlmostEqual(
self, grads[0][1], torch.stack((2 * (20 - 18) * a[1], 2 * (70 - 74) * a[1]))
)
def test_jacobian_loss_custom_wrong(self) -> None:
model = BasicLinearModel2(5, 2)
model.linear.weight = nn.Parameter(torch.arange(0, 10).view(2, 5).float())
def my_loss(out, label):
return torch.sum((out - label).pow(2))
a = torch.stack((torch.ones(5), torch.ones(5) * 2))
label = torch.Tensor([[9, 38], [18, 74]])
with self.assertRaises(AssertionError):
_compute_jacobian_wrt_params(model, (a,), label, my_loss)
def test_jacobian_loss_custom_correct_sample_wise_trick(self) -> None:
model = BasicLinearModel2(5, 2)
model.linear.weight = nn.Parameter(torch.arange(0, 10).view(2, 5).float())
def my_loss(out, label):
return torch.sum((out - label).pow(2))
a = torch.stack((torch.ones(5), torch.ones(5) * 2))
label = torch.Tensor([[9, 38], [18, 74]])
grads = _compute_jacobian_wrt_params_with_sample_wise_trick(
model, (a,), label, my_loss # type: ignore
)
assertTensorAlmostEqual(
self, grads[0][0], torch.stack((2 * (10 - 9) * a[0], 2 * (35 - 38) * a[0]))
)
assertTensorAlmostEqual(
self, grads[0][1], torch.stack((2 * (20 - 18) * a[1], 2 * (70 - 74) * a[1]))
)
def test_jacobian_loss_custom_wrong_sample_wise_trick(self) -> None:
model = BasicLinearModel2(5, 2)
model.linear.weight = nn.Parameter(torch.arange(0, 10).view(2, 5).float())
def my_loss(out, label):
return (out - label).pow(2)
a = torch.stack((torch.ones(5), torch.ones(5) * 2))
label = torch.Tensor([[9, 38], [18, 74]])
with self.assertRaises(AssertionError):
_compute_jacobian_wrt_params_with_sample_wise_trick(
model, (a,), label, my_loss # type: ignore
)
def test_jacobian_loss_wrong_reduction_sample_wise_trick(self) -> None:
model = BasicLinearModel2(5, 2)
model.linear.weight = nn.Parameter(torch.arange(0, 10).view(2, 5).float())
loss_fn = nn.MSELoss(reduction="none")
a = torch.stack((torch.ones(5), torch.ones(5) * 2))
label = torch.Tensor([[9, 38], [18, 74]])
with self.assertRaises(AssertionError):
_compute_jacobian_wrt_params_with_sample_wise_trick(
model, (a,), label, loss_fn
)
|
#!/usr/bin/env python3
import io
import unittest
import unittest.mock
from captum._utils.progress import NullProgress, progress
from tests.helpers.basic import BaseTest
class Test(BaseTest):
@unittest.mock.patch("sys.stderr", new_callable=io.StringIO)
def test_nullprogress(self, mock_stderr) -> None:
count = 0
with NullProgress(["x", "y", "z"]) as np:
for _ in np:
for _ in NullProgress([1, 2, 3]):
count += 1
self.assertEqual(count, 9)
output = mock_stderr.getvalue()
self.assertEqual(output, "")
@unittest.mock.patch("sys.stderr", new_callable=io.StringIO)
def test_nested_progress_tqdm(self, mock_stderr) -> None:
try:
import tqdm # noqa: F401
except ImportError:
raise unittest.SkipTest("Skipping tqdm test, tqdm not available.")
parent_data = ["x", "y", "z"]
test_data = [1, 2, 3]
with progress(parent_data, desc="parent progress") as parent:
for item in parent:
for _ in progress(test_data, desc=f"test progress {item}"):
pass
output = mock_stderr.getvalue()
self.assertIn("parent progress:", output)
for item in parent_data:
self.assertIn(f"test progress {item}:", output)
@unittest.mock.patch("sys.stderr", new_callable=io.StringIO)
def test_nested_simple_progress(self, mock_stderr) -> None:
parent_data = ["x", "y", "z"]
test_data = [1, 2, 3]
with progress(
parent_data, desc="parent progress", use_tqdm=False, mininterval=0.0
) as parent:
for item in parent:
for _ in progress(
test_data, desc=f"test progress {item}", use_tqdm=False
):
pass
output = mock_stderr.getvalue()
self.assertEqual(
output.count("parent progress:"), 5, "5 'parent' progress bar expected"
)
for item in parent_data:
self.assertIn(f"test progress {item}:", output)
@unittest.mock.patch("sys.stderr", new_callable=io.StringIO)
def test_progress_tqdm(self, mock_stderr) -> None:
try:
import tqdm # noqa: F401
except ImportError:
raise unittest.SkipTest("Skipping tqdm test, tqdm not available.")
test_data = [1, 3, 5]
progressed = progress(test_data, desc="test progress")
assert list(progressed) == test_data
assert "test progress: " in mock_stderr.getvalue()
@unittest.mock.patch("sys.stderr", new_callable=io.StringIO)
def test_simple_progress(self, mock_stderr) -> None:
test_data = [1, 3, 5]
desc = "test progress"
progressed = progress(test_data, desc=desc, use_tqdm=False)
assert list(progressed) == test_data
assert mock_stderr.getvalue().startswith(f"\r{desc}: 0% 0/3")
assert mock_stderr.getvalue().endswith(f"\r{desc}: 100% 3/3\n")
# progress iterable without len but explicitly specify total
def gen():
for n in test_data:
yield n
mock_stderr.seek(0)
mock_stderr.truncate(0)
progressed = progress(gen(), desc=desc, total=len(test_data), use_tqdm=False)
assert list(progressed) == test_data
assert mock_stderr.getvalue().startswith(f"\r{desc}: 0% 0/3")
assert mock_stderr.getvalue().endswith(f"\r{desc}: 100% 3/3\n")
@unittest.mock.patch("sys.stderr", new_callable=io.StringIO)
def test_simple_progress_without_total(self, mock_stderr) -> None:
test_data = [1, 3, 5]
desc = "test progress"
def gen():
for n in test_data:
yield n
progressed = progress(gen(), desc=desc, use_tqdm=False)
assert list(progressed) == test_data
assert mock_stderr.getvalue().startswith(f"\r{desc}: ")
assert mock_stderr.getvalue().endswith(f"\r{desc}: ...\n")
@unittest.mock.patch("sys.stderr", new_callable=io.StringIO)
def test_simple_progress_update_manually(self, mock_stderr) -> None:
desc = "test progress"
p = progress(total=5, desc=desc, use_tqdm=False)
p.update(0)
p.update(2)
p.update(2)
p.update(1)
p.close()
assert mock_stderr.getvalue().startswith(f"\r{desc}: 0% 0/5")
assert mock_stderr.getvalue().endswith(f"\r{desc}: 100% 5/5\n")
|
import glob
import tempfile
from datetime import datetime
from typing import cast, List
import torch
from captum._utils.av import AV
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import BasicLinearReLULinear
from torch.utils.data import DataLoader, Dataset
DEFAULT_IDENTIFIER = "default_identifier"
class RangeDataset(Dataset):
def __init__(self, low, high, num_features) -> None:
self.samples = (
torch.arange(start=low, end=high, dtype=torch.float)
.repeat(num_features, 1)
.transpose(1, 0)
)
def __len__(self) -> int:
return len(self.samples)
def __getitem__(self, idx):
return self.samples[idx]
class Test(BaseTest):
def test_exists_without_version(self) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
av_0 = torch.randn(64, 16)
self.assertFalse(AV.exists(tmpdir, "dummy", "layer1.0.conv1"))
AV.save(tmpdir, "dummy", DEFAULT_IDENTIFIER, "layer1.0.conv1", av_0, "0")
self.assertTrue(
AV.exists(
tmpdir,
"dummy",
DEFAULT_IDENTIFIER,
"layer1.0.conv1",
)
)
def test_exists_with_version(self) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
idf1 = str(int(datetime.now().microsecond))
idf2 = "idf2"
av_0 = torch.randn(64, 16)
self.assertFalse(AV.exists(tmpdir, "dummy", "layer1.0.conv1", idf1))
self.assertFalse(AV.exists(tmpdir, "dummy", "layer1.0.conv1", idf2))
AV.save(tmpdir, "dummy", idf1, "layer1.0.conv1", av_0, "0")
self.assertTrue(AV.exists(tmpdir, "dummy", idf1, "layer1.0.conv1"))
self.assertFalse(AV.exists(tmpdir, "dummy", idf2, "layer1.0.conv1"))
AV.save(tmpdir, "dummy", idf2, "layer1.0.conv1", av_0, "0")
self.assertTrue(AV.exists(tmpdir, "dummy", idf2, "layer1.0.conv1"))
def test_av_save_two_layers(self) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
av_0 = torch.randn(64, 16)
AV.save(tmpdir, "dummy", DEFAULT_IDENTIFIER, "layer1.0.conv1", av_0, "0")
self.assertTrue(
AV.exists(tmpdir, "dummy", DEFAULT_IDENTIFIER, "layer1.0.conv1")
)
self.assertFalse(
AV.exists(tmpdir, "dummy", DEFAULT_IDENTIFIER, "layer1.0.conv2")
)
# experimenting with adding to another layer
av_1 = torch.randn(64, 16)
AV.save(tmpdir, "dummy", DEFAULT_IDENTIFIER, "layer1.0.conv2", av_1, "0")
self.assertTrue(
AV.exists(tmpdir, "dummy", DEFAULT_IDENTIFIER, "layer1.0.conv2")
)
def test_av_save_multi_layer(self) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
av_0 = torch.randn(64, 16)
av_1 = torch.randn(64, 16)
av_2 = torch.randn(64, 16)
model_path = AV._assemble_model_dir(tmpdir, "dummy")
# save first layer
AV.save(tmpdir, "dummy", DEFAULT_IDENTIFIER, "layer1.0.conv1", av_0, "0")
self.assertEqual(len(glob.glob(model_path + "*")), 1)
# add two new layers at once
AV.save(
tmpdir,
"dummy",
DEFAULT_IDENTIFIER,
["layer1.0.conv2", "layer1.1.conv1"],
[av_1, av_2],
"0",
)
self.assertEqual(len(glob.glob(model_path + "/*/*/*")), 3)
# overwrite the first saved layer
AV.save(tmpdir, "dummy", DEFAULT_IDENTIFIER, "layer1.0.conv1", av_0, "0")
self.assertEqual(len(glob.glob(model_path + "/*/*/*")), 3)
# save a new version of the first layer
idf1 = str(int(datetime.now().microsecond))
self.assertFalse(AV.exists(tmpdir, "dummy", idf1, "layer1.0.conv1"))
AV.save(tmpdir, "dummy", idf1, "layer1.0.conv1", av_0, "0")
self.assertTrue(AV.exists(tmpdir, "dummy", idf1, "layer1.0.conv1"))
self.assertEqual(len(glob.glob(model_path + "/*/*/*")), 4)
def test_av_save_multiple_batches_per_layer(self) -> None:
def save_and_assert_batch(layer_path, total_num_batches, batch, n_batch_name):
# save n-th batch and verify the number of saved batches
AV.save(
tmpdir,
model_id,
DEFAULT_IDENTIFIER,
"layer1.0.conv1",
batch,
n_batch_name,
)
self.assertEqual(
len(glob.glob("/".join([layer_path, "*.pt"]))),
total_num_batches,
)
self.assertTrue(
AV.exists(
tmpdir, model_id, DEFAULT_IDENTIFIER, "layer1.0.conv1", n_batch_name
)
)
with tempfile.TemporaryDirectory() as tmpdir:
b0 = torch.randn(64, 16)
b1 = torch.randn(64, 16)
b2 = torch.randn(64, 16)
model_id = "dummy"
model_path = AV._assemble_model_dir(tmpdir, model_id)
layer_path = AV._assemble_file_path(
model_path, DEFAULT_IDENTIFIER, "layer1.0.conv1"
)
# save first batch and verify the number of saved batches
save_and_assert_batch(layer_path, 1, b0, "0")
# save second batch and verify the number of saved batches
save_and_assert_batch(layer_path, 2, b1, "1")
# save third batch and verify the number of saved batches
save_and_assert_batch(layer_path, 3, b2, "2")
def test_av_load_multiple_batches_per_layer(self) -> None:
def save_load_and_assert_batch(
layer_path, total_num_batches, batch, n_batch_name
):
# save n-th batch and verify the number of saved batches
AV.save(
tmpdir,
model_id,
DEFAULT_IDENTIFIER,
"layer1.0.conv1",
batch,
n_batch_name,
)
loaded_dataset = AV.load(
tmpdir, model_id, DEFAULT_IDENTIFIER, "layer1.0.conv1", n_batch_name
)
assertTensorAlmostEqual(self, next(iter(loaded_dataset)), batch, 0.0)
loaded_dataset_for_layer = AV.load(
tmpdir, model_id, DEFAULT_IDENTIFIER, "layer1.0.conv1"
)
self.assertEqual(
loaded_dataset_for_layer.__len__(),
total_num_batches,
)
with tempfile.TemporaryDirectory() as tmpdir:
b0 = torch.randn(64, 16)
b1 = torch.randn(64, 16)
b2 = torch.randn(64, 16)
model_id = "dummy"
model_path = AV._assemble_model_dir(tmpdir, model_id)
layer_path = AV._assemble_file_path(
model_path, DEFAULT_IDENTIFIER, "layer1.0.conv1"
)
# save first batch and verify the number of saved batches
save_load_and_assert_batch(layer_path, 1, b0, "0")
# save second batch and verify the number of saved batches
save_load_and_assert_batch(layer_path, 2, b1, "1")
# save third batch and verify the number of saved batches
save_load_and_assert_batch(layer_path, 3, b2, "2")
def test_av_load_non_saved_layer(self) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
model_id = "dummy"
with self.assertRaises(RuntimeError) as context:
AV.load(tmpdir, model_id)
self.assertTrue(
(
f"Activation vectors for model {model_id} "
f"was not found at path {tmpdir}"
)
== str(context.exception)
)
def test_av_load_one_batch(self) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
av_0 = torch.randn(64, 16)
av_1 = torch.randn(36, 16)
avs = [av_0, av_1]
# add av_0 to the list of activations
model_id = "dummy"
with self.assertRaises(RuntimeError) as context:
AV.load(tmpdir, model_id)
self.assertTrue(
(
f"Activation vectors for model {model_id} "
f"was not found at path {tmpdir}"
)
== str(context.exception)
)
AV.save(tmpdir, "dummy", DEFAULT_IDENTIFIER, "layer1.0.conv1", av_0, "0")
model_id = "dummy"
dataset = AV.load(tmpdir, model_id, identifier=DEFAULT_IDENTIFIER)
for i, av in enumerate(DataLoader(cast(Dataset, dataset))):
assertTensorAlmostEqual(self, av, avs[i].unsqueeze(0))
# add av_1 to the list of activations
dataloader_2 = DataLoader(
cast(
Dataset,
AV.load(tmpdir, "dummy", DEFAULT_IDENTIFIER, "layer1.0.conv2"),
)
)
self.assertEqual(len(dataloader_2), 0)
AV.save(tmpdir, "dummy", DEFAULT_IDENTIFIER, "layer1.0.conv2", av_1, "0")
dataset = AV.load(tmpdir, "dummy", identifier=DEFAULT_IDENTIFIER)
dataloader = DataLoader(cast(Dataset, dataset))
self.assertEqual(len(dataloader), 2)
for i, av in enumerate(dataloader):
assertTensorAlmostEqual(self, av, avs[i].unsqueeze(0))
def test_av_load_all_identifiers_one_layer(self) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
av_0 = torch.randn(64, 16)
av_1 = torch.randn(36, 16)
av_2 = torch.randn(16, 16)
av_3 = torch.randn(4, 16)
avs = [av_1, av_2, av_3]
idf1, idf2, idf3 = "idf1", "idf2", "idf3"
AV.save(tmpdir, "dummy", DEFAULT_IDENTIFIER, "layer1.0.conv1", av_0, "0")
dataloader = DataLoader(
cast(Dataset, AV.load(tmpdir, "dummy", identifier=DEFAULT_IDENTIFIER))
)
self.assertEqual(len(dataloader), 1)
# add activations for another layer
AV.save(tmpdir, "dummy", idf1, "layer1.0.conv2", av_1, "0")
AV.save(tmpdir, "dummy", idf2, "layer1.0.conv2", av_2, "0")
AV.save(tmpdir, "dummy", idf3, "layer1.0.conv2", av_3, "0")
dataloader_layer = DataLoader(
cast(
Dataset,
AV.load(
tmpdir,
"dummy",
layer="layer1.0.conv2",
),
)
)
self.assertEqual(len(dataloader_layer), 3)
for i, av in enumerate(dataloader_layer):
assertTensorAlmostEqual(self, av, avs[i].unsqueeze(0))
dataloader = DataLoader(cast(Dataset, AV.load(tmpdir, "dummy")))
self.assertEqual(len(dataloader), 4)
def test_av_load_all_layers_one_identifier(self) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
av_01 = torch.randn(36, 16)
av_02 = torch.randn(16, 16)
av_03 = torch.randn(4, 16)
avs_0 = [av_01, av_02, av_03]
av_11 = torch.randn(36, 16)
av_12 = torch.randn(16, 16)
av_13 = torch.randn(4, 16)
avs_1 = [av_11, av_12, av_13]
idf1, idf2 = "idf1", "idf2"
AV.save(
tmpdir,
"dummy",
idf1,
["layer1.0.conv1", "layer1.0.conv2", "layer1.1.conv1"],
avs_0,
"0",
)
dataloader = DataLoader(cast(Dataset, AV.load(tmpdir, "dummy")))
self.assertEqual(len(dataloader), 3)
AV.save(
tmpdir,
"dummy",
idf2,
["layer1.0.conv1", "layer1.0.conv2", "layer1.1.conv1"],
avs_1,
"0",
)
dataloader = DataLoader(cast(Dataset, AV.load(tmpdir, "dummy")))
self.assertEqual(len(dataloader), 6)
# check activations for idf1
dataloader_layer = DataLoader(
cast(Dataset, AV.load(tmpdir, "dummy", identifier=idf1))
)
self.assertEqual(len(dataloader_layer), 3)
for i, av in enumerate(dataloader_layer):
assertTensorAlmostEqual(self, av, avs_0[i].unsqueeze(0))
# check activations for idf2
dataloader_layer = DataLoader(
cast(Dataset, AV.load(tmpdir, "dummy", identifier=idf2))
)
self.assertEqual(len(dataloader_layer), 3)
for i, av in enumerate(dataloader_layer):
assertTensorAlmostEqual(self, av, avs_1[i].unsqueeze(0))
def test_av_sort_files(self) -> None:
files = ["resnet50-cifar-3000", "resnet50-cifar-1000", "resnet50-cifar-2000"]
exp_files = [
"resnet50-cifar-1000",
"resnet50-cifar-2000",
"resnet50-cifar-3000",
]
files = AV.sort_files(files)
self.assertEqual(files, exp_files)
files = ["resnet50-cifar-0900", "resnet50-cifar-0000", "resnet50-cifar-1000"]
exp_files = [
"resnet50-cifar-0000",
"resnet50-cifar-0900",
"resnet50-cifar-1000",
]
files = AV.sort_files(files)
self.assertEqual(files, exp_files)
files = ["resnet50-cifar-100", "resnet50-cifar-90", "resnet50-cifar-3000"]
exp_files = [
"resnet50-cifar-90",
"resnet50-cifar-100",
"resnet50-cifar-3000",
]
files = AV.sort_files(files)
self.assertEqual(files, exp_files)
files = [
"av/pretrained-net-0/fc1-src10-710935.pt",
"av/pretrained-net-0/fc1-src11-755317.pt",
"av/pretrained-net-0/fc3-src2-655646.pt",
"av/pretrained-net-0/fc1-src9-952381.pt",
"av/pretrained-net-0/conv2-src7-811286.pt",
"av/pretrained-net-0/fc1-src10-176141.pt",
"av/pretrained-net-0/conv11-src9-384927.pt",
]
exp_files = [
"av/pretrained-net-0/conv2-src7-811286.pt",
"av/pretrained-net-0/conv11-src9-384927.pt",
"av/pretrained-net-0/fc1-src9-952381.pt",
"av/pretrained-net-0/fc1-src10-176141.pt",
"av/pretrained-net-0/fc1-src10-710935.pt",
"av/pretrained-net-0/fc1-src11-755317.pt",
"av/pretrained-net-0/fc3-src2-655646.pt",
]
files = AV.sort_files(files)
self.assertEqual(files, exp_files)
def test_generate_activation(self) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
num_features = 4
low, high = 0, 16
mymodel = BasicLinearReLULinear(num_features)
mydata = RangeDataset(low, high, num_features)
layers: List[str] = [
value[0] for value in mymodel.named_modules() if value[0]
]
# First AV generation on last 2 layers
inputs = torch.stack((mydata[1], mydata[8], mydata[14]))
AV._compute_and_save_activations(
tmpdir, mymodel, "model_id_1", layers[1:], inputs, "test", "0"
)
av_test = AV._construct_file_search(tmpdir, "model_id_1", identifier="test")
av_test = glob.glob(av_test)
self.assertEqual(len(av_test), len(layers[1:]))
# Second AV generation on first 2 layers.
# Second layer overlaps with existing activations, should be loaded.
inputs = torch.stack((mydata[0], mydata[7], mydata[13]))
AV._compute_and_save_activations(
tmpdir, mymodel, "model_id_1", layers[:2], inputs, "test", "0"
)
av_test = AV._construct_file_search(tmpdir, "model_id_1", identifier="test")
av_test = glob.glob(av_test)
self.assertEqual(len(av_test), len(layers))
def test_generate_dataset_activations(self) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
num_features = 4
low, high = 0, 16
batch_size = high // 2
mymodel = BasicLinearReLULinear(num_features)
mydata = RangeDataset(low, high, num_features)
layers: List[str] = [
value[0] for value in mymodel.named_modules() if value[0]
]
# First AV generation on last 2 layers
layer_AVDatasets = AV.generate_dataset_activations(
tmpdir,
mymodel,
"model_id1",
layers[1:],
DataLoader(mydata, batch_size, shuffle=False),
"src",
return_activations=True,
)
av_src = AV._construct_file_search(
tmpdir, model_id="model_id1", identifier="src"
)
av_src = glob.glob(av_src)
self.assertEqual(len(av_src), high / batch_size * len(layers[1:]))
self.assertTrue(isinstance(layer_AVDatasets, list))
layer_AVDatasets = cast(list, layer_AVDatasets)
self.assertEqual(len(layer_AVDatasets), len(layers[1:]))
for layer_AVDataset in layer_AVDatasets:
self.assertEqual(len(layer_AVDataset), high / batch_size)
# Second AV generation on first 2 layers.
# Second layer overlaps with existing activations, should be loaded.
layer_AVDatasets = AV.generate_dataset_activations(
tmpdir,
mymodel,
"model_id1",
layers[:2],
DataLoader(mydata, batch_size, shuffle=False),
"src",
return_activations=True,
)
av_src = AV._construct_file_search(
tmpdir, model_id="model_id1", identifier="src"
)
av_src = glob.glob(av_src)
self.assertEqual(len(av_src), high / batch_size * len(layers))
self.assertTrue(isinstance(layer_AVDatasets, list))
layer_AVDatasets = cast(list, layer_AVDatasets)
self.assertEqual(len(layer_AVDatasets), len(layers[:2]))
for layer_AVDataset in layer_AVDatasets:
self.assertEqual(len(layer_AVDataset), high / batch_size)
# check that if return_activations is False, None is returned
self.assertIsNone(
AV.generate_dataset_activations(
tmpdir,
mymodel,
"model_id1",
layers[:2],
DataLoader(mydata, batch_size, shuffle=False),
"src",
return_activations=False,
)
)
def test_equal_activation(self) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
num_features = 4
low, high = 0, 16
mymodel = BasicLinearReLULinear(num_features)
mydata = RangeDataset(low, high, num_features)
layers: List[str] = [
value[0] for value in mymodel.named_modules() if value[0]
]
# First AV generation on last 2 layers
test_input = mydata[1].unsqueeze(0)
model_id = "id_1"
identifier = "test"
num_id = "0"
AV._compute_and_save_activations(
tmpdir, mymodel, model_id, layers[2], test_input, identifier, num_id
)
act_dataset = AV.load(tmpdir, model_id, identifier, layers[2], num_id)
_layer_act = [act.squeeze(0) for act in DataLoader(act_dataset)]
act = torch.cat(_layer_act)
out = mymodel(test_input)
assertTensorAlmostEqual(self, out, act)
|
#!/usr/bin/env python3
import torch
from captum._utils.models.linear_model.model import (
SGDLasso,
SGDLinearRegression,
SGDRidge,
)
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
def _evaluate(test_data, classifier):
classifier.eval()
l1_loss = 0.0
l2_loss = 0.0
n = 0
l2_losses = []
with torch.no_grad():
for data in test_data:
if len(data) == 2:
x, y = data
w = None
else:
x, y, w = data
out = classifier(x)
y = y.view(x.shape[0], -1)
assert y.shape == out.shape
if w is None:
l1_loss += (out - y).abs().sum(0).to(dtype=torch.float64)
l2_loss += ((out - y) ** 2).sum(0).to(dtype=torch.float64)
l2_losses.append(((out - y) ** 2).to(dtype=torch.float64))
else:
l1_loss += (
(w.view(-1, 1) * (out - y)).abs().sum(0).to(dtype=torch.float64)
)
l2_loss += (
(w.view(-1, 1) * ((out - y) ** 2)).sum(0).to(dtype=torch.float64)
)
l2_losses.append(
(w.view(-1, 1) * ((out - y) ** 2)).to(dtype=torch.float64)
)
n += x.shape[0]
l2_losses = torch.cat(l2_losses, dim=0)
assert n > 0
# just to double check
assert ((l2_losses.mean(0) - l2_loss / n).abs() <= 0.1).all()
classifier.train()
return {"l1": l1_loss / n, "l2": l2_loss / n}
class TestLinearModel(BaseTest):
MAX_POINTS: int = 3
def train_and_compare(
self,
model_type,
xs,
ys,
expected_loss,
expected_reg=0.0,
expected_hyperplane=None,
norm_hyperplane=True,
weights=None,
delta=0.1,
init_scheme="zeros",
objective="lasso",
bias=True,
):
assert objective in ["lasso", "ridge", "ols"]
if weights is None:
train_dataset = torch.utils.data.TensorDataset(xs, ys)
else:
train_dataset = torch.utils.data.TensorDataset(xs, ys, weights)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=len(train_dataset), num_workers=0
)
model = model_type(bias=bias)
model.fit(
train_loader,
init_scheme=init_scheme,
max_epoch=150,
initial_lr=0.1,
patience=5,
)
self.assertTrue(model.bias() is not None if bias else model.bias() is None)
l2_loss = _evaluate(train_loader, model)["l2"]
if objective == "lasso":
reg = model.representation().norm(p=1).view_as(l2_loss)
elif objective == "ridge":
reg = model.representation().norm(p=2).view_as(l2_loss)
else:
assert objective == "ols"
reg = torch.zeros_like(l2_loss)
if not isinstance(expected_loss, torch.Tensor):
expected_loss = torch.tensor([expected_loss], dtype=l2_loss.dtype).view(1)
if not isinstance(expected_reg, torch.Tensor):
expected_reg = torch.tensor([expected_reg], dtype=reg.dtype)
assertTensorAlmostEqual(self, l2_loss, expected_loss, delta=delta)
assertTensorAlmostEqual(self, reg, expected_reg, delta=delta)
if expected_hyperplane is not None:
h = model.representation()
if norm_hyperplane:
h /= h.norm(p=2)
assertTensorAlmostEqual(self, h, expected_hyperplane, delta=delta)
def test_simple_linear_regression(self) -> None:
xs = torch.randn(TestLinearModel.MAX_POINTS, 1)
ys = 3 * xs + 1
self.train_and_compare(
SGDLinearRegression,
xs,
ys,
expected_loss=0,
expected_reg=0,
objective="ols",
)
self.train_and_compare(
SGDLasso,
xs,
ys,
expected_loss=3,
expected_reg=0,
objective="lasso",
delta=0.2,
)
self.train_and_compare(
SGDRidge,
xs,
ys,
expected_loss=3,
expected_reg=0,
objective="ridge",
delta=0.2,
)
def test_simple_multi_output(self) -> None:
xs = torch.randn(TestLinearModel.MAX_POINTS, 1)
y1 = 3 * xs + 1
y2 = -5 * xs
ys = torch.stack((y1, y2), dim=1).squeeze()
self.train_and_compare(
SGDLinearRegression,
xs,
ys,
expected_loss=torch.DoubleTensor([0, 0]),
expected_reg=torch.DoubleTensor([0, 0]),
objective="ols",
)
def test_simple_linear_classification(self) -> None:
xs = torch.tensor([[0.5, 0.5], [-0.5, -0.5], [0.5, -0.5], [-0.5, 0.5]])
ys = torch.tensor([1.0, -1.0, 1.0, -1.0])
self.train_and_compare(
SGDLinearRegression,
xs,
ys,
expected_loss=0,
expected_reg=0,
objective="ols",
)
self.train_and_compare(
SGDLasso, xs, ys, expected_loss=1, expected_reg=0.0, objective="lasso"
)
self.train_and_compare(
SGDRidge, xs, ys, expected_loss=1, expected_reg=0.0, objective="ridge"
)
ys = torch.tensor([1.0, 0.0, 1.0, 0.0])
self.train_and_compare(
SGDLinearRegression,
xs,
ys,
expected_loss=0,
expected_reg=0,
objective="ols",
)
self.train_and_compare(
SGDLasso, xs, ys, expected_loss=0.25, expected_reg=0, objective="lasso"
)
self.train_and_compare(
SGDRidge, xs, ys, expected_loss=0.25, expected_reg=0, objective="ridge"
)
def test_simple_xor_problem(self) -> None:
r"""
^
o | x
---|--->
x | o
"""
xs = torch.tensor([[0.5, 0.5], [-0.5, -0.5], [0.5, -0.5], [-0.5, 0.5]])
ys = torch.tensor([1.0, 1.0, -1.0, -1.0])
expected_hyperplane = torch.Tensor([[0, 0]])
self.train_and_compare(
SGDLinearRegression,
xs,
ys,
expected_loss=1,
expected_reg=0,
objective="ols",
expected_hyperplane=expected_hyperplane,
norm_hyperplane=False,
bias=False,
)
self.train_and_compare(
SGDLasso,
xs,
ys,
expected_loss=1,
expected_reg=0,
objective="lasso",
expected_hyperplane=expected_hyperplane,
norm_hyperplane=False,
bias=False,
)
self.train_and_compare(
SGDRidge,
xs,
ys,
expected_loss=1,
expected_reg=0,
objective="ridge",
expected_hyperplane=expected_hyperplane,
norm_hyperplane=False,
bias=False,
)
def test_weighted_problem(self) -> None:
r"""
^
0 | x
---|--->
0 | o
"""
xs = torch.tensor([[0.5, 0.5], [-0.5, -0.5], [0.5, -0.5], [-0.5, 0.5]])
ys = torch.tensor([1.0, 1.0, -1.0, -1.0])
weights = torch.tensor([1.0, 0.0, 1.0, 0.0])
self.train_and_compare(
SGDLinearRegression,
xs,
ys,
expected_loss=0,
expected_reg=0,
expected_hyperplane=torch.Tensor([[0.0, 1.0]]),
weights=weights,
norm_hyperplane=True,
init_scheme="zeros",
objective="ols",
bias=False,
)
self.train_and_compare(
SGDLasso,
xs,
ys,
expected_loss=0.5,
expected_reg=0,
expected_hyperplane=torch.Tensor([[0.0, 0.0]]),
weights=weights,
norm_hyperplane=False,
init_scheme="zeros",
objective="lasso",
bias=False,
)
self.train_and_compare(
SGDRidge,
xs,
ys,
expected_loss=0.5,
expected_reg=0,
expected_hyperplane=torch.Tensor([[0.0, 0.0]]),
weights=weights,
norm_hyperplane=False,
init_scheme="zeros",
objective="ridge",
bias=False,
)
|
#!/usr/bin/env python3
import torch
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
class HelpersTest(BaseTest):
def test_assert_tensor_almost_equal(self) -> None:
with self.assertRaises(AssertionError) as cm:
assertTensorAlmostEqual(self, [[1.0]], [[1.0]])
self.assertEqual(
cm.exception.args,
("Actual parameter given for comparison must be a tensor.",),
)
with self.assertRaises(AssertionError) as cm:
assertTensorAlmostEqual(self, torch.tensor([[]]), torch.tensor([[1.0]]))
self.assertEqual(
cm.exception.args,
(
"Expected tensor with shape: torch.Size([1, 1]). Actual shape torch.Size([1, 0]).", # noqa: E501
),
)
assertTensorAlmostEqual(self, torch.tensor([[1.0]]), [[1.0]])
with self.assertRaises(AssertionError) as cm:
assertTensorAlmostEqual(self, torch.tensor([[1.0]]), [1.0])
self.assertEqual(
cm.exception.args,
(
"Expected tensor with shape: torch.Size([1]). Actual shape torch.Size([1, 1]).", # noqa: E501
),
)
assertTensorAlmostEqual(
self, torch.tensor([[1.0, 1.0]]), [[1.0, 0.0]], delta=1.0, mode="max"
)
with self.assertRaises(AssertionError) as cm:
assertTensorAlmostEqual(
self, torch.tensor([[1.0, 1.0]]), [[1.0, 0.0]], mode="max"
)
self.assertEqual(
cm.exception.args,
(
"Values at index 0, tensor([1., 1.]) and tensor([1., 0.]), differ more than by 0.0001", # noqa: E501
),
)
assertTensorAlmostEqual(
self, torch.tensor([[1.0, 1.0]]), [[1.0, 0.0]], delta=1.0
)
with self.assertRaises(AssertionError):
assertTensorAlmostEqual(self, torch.tensor([[1.0, 1.0]]), [[1.0, 0.0]])
|
#!/usr/bin/env python3
import unittest
from typing import Callable, Tuple
import torch
from captum._utils.gradient import apply_gradient_requirements
from captum._utils.sample_gradient import (
_reset_sample_grads,
SampleGradientWrapper,
SUPPORTED_MODULES,
)
from packaging import version
from tests.helpers.basic import assertTensorAlmostEqual, BaseTest
from tests.helpers.basic_models import (
BasicModel_ConvNet_One_Conv,
BasicModel_ConvNetWithPaddingDilation,
BasicModel_MultiLayer,
)
from torch import Tensor
from torch.nn import Module
class Test(BaseTest):
def test_sample_grads_linear_sum(self) -> None:
model = BasicModel_MultiLayer(multi_input_module=True)
inp = (torch.randn(6, 3), torch.randn(6, 3))
self._compare_sample_grads_per_sample(model, inp, lambda x: torch.sum(x), "sum")
def test_sample_grads_linear_mean(self) -> None:
model = BasicModel_MultiLayer(multi_input_module=True)
inp = (20 * torch.randn(6, 3),)
self._compare_sample_grads_per_sample(model, inp, lambda x: torch.mean(x))
def test_sample_grads_conv_sum(self) -> None:
model = BasicModel_ConvNet_One_Conv()
inp = (123 * torch.randn(6, 1, 4, 4),)
self._compare_sample_grads_per_sample(model, inp, lambda x: torch.sum(x), "sum")
def test_sample_grads_conv_mean_multi_inp(self) -> None:
model = BasicModel_ConvNet_One_Conv()
inp = (20 * torch.randn(6, 1, 4, 4), 9 * torch.randn(6, 1, 4, 4))
self._compare_sample_grads_per_sample(model, inp, lambda x: torch.mean(x))
def test_sample_grads_modified_conv_mean(self) -> None:
if version.parse(torch.__version__) < version.parse("1.8.0"):
raise unittest.SkipTest(
"Skipping sample gradient test with 3D linear module"
"since torch version < 1.8"
)
model = BasicModel_ConvNetWithPaddingDilation()
inp = (20 * torch.randn(6, 1, 5, 5),)
self._compare_sample_grads_per_sample(
model, inp, lambda x: torch.mean(x), "mean"
)
def test_sample_grads_modified_conv_sum(self) -> None:
if version.parse(torch.__version__) < version.parse("1.8.0"):
raise unittest.SkipTest(
"Skipping sample gradient test with 3D linear module"
"since torch version < 1.8"
)
model = BasicModel_ConvNetWithPaddingDilation()
inp = (20 * torch.randn(6, 1, 5, 5),)
self._compare_sample_grads_per_sample(model, inp, lambda x: torch.sum(x), "sum")
def _compare_sample_grads_per_sample(
self,
model: Module,
inputs: Tuple[Tensor, ...],
loss_fn: Callable,
loss_type: str = "mean",
):
wrapper = SampleGradientWrapper(model)
wrapper.add_hooks()
apply_gradient_requirements(inputs)
out = model(*inputs)
wrapper.compute_param_sample_gradients(loss_fn(out), loss_type)
batch_size = inputs[0].shape[0]
for i in range(batch_size):
model.zero_grad()
single_inp = tuple(inp[i : i + 1] for inp in inputs)
out = model(*single_inp)
loss_fn(out).backward()
for layer in model.modules():
if isinstance(layer, tuple(SUPPORTED_MODULES.keys())):
assertTensorAlmostEqual(
self,
layer.weight.grad,
layer.weight.sample_grad[i], # type: ignore
mode="max",
)
assertTensorAlmostEqual(
self,
layer.bias.grad,
layer.bias.sample_grad[i], # type: ignore
mode="max",
)
def test_sample_grads_layer_modules(self):
"""
tests that if `layer_modules` argument is specified for `SampleGradientWrapper`
that only per-sample gradients for the specified layers are calculated
"""
model = BasicModel_ConvNet_One_Conv()
inp = (20 * torch.randn(6, 1, 4, 4), 9 * torch.randn(6, 1, 4, 4))
# possible candidates for `layer_modules`, which are the modules whose
# parameters we want to compute sample grads for
layer_moduless = [[model.conv1], [model.fc1], [model.conv1, model.fc1]]
# hard coded all modules we want to check
all_modules = [model.conv1, model.fc1]
for layer_modules in layer_moduless:
# we will call the wrapper multiple times, so should reset each time
for module in all_modules:
_reset_sample_grads(module)
# compute sample grads
wrapper = SampleGradientWrapper(model, layer_modules)
wrapper.add_hooks()
apply_gradient_requirements(inp)
out = model(*inp)
wrapper.compute_param_sample_gradients(torch.sum(out), "sum")
for module in all_modules:
if module in layer_modules:
# If we calculated the sample grads for the layer, none
# of its parameters' `sample_grad` attributes` would be an int,
# since even though they were all set to 0 in beginning of loop
# computing sample grads would override that 0.
# So, check that we did calculate sample grads for the desired
# layers via the above checking approach.
for parameter in module.parameters():
assert not isinstance(parameter.sample_grad, int)
else:
# For the layers we do not want sample grads for, their
# `sample_grad` should still be 0, since they should not have been
# over-written.
for parameter in module.parameters():
assert parameter.sample_grad == 0
|
import argparse
import random
from typing import Optional
import captum._utils.models.linear_model.model as pytorch_model_module
import numpy as np
import sklearn.datasets as datasets
import torch
from tests.utils.test_linear_model import _evaluate
from torch.utils.data import DataLoader, TensorDataset
def sklearn_dataset_to_loaders(
data, train_prop=0.7, batch_size=64, num_workers=4, shuffle=False, one_hot=False
):
xs, ys = data
if one_hot and ys.dtype != float:
oh = np.zeros((ys.size, ys.max() + 1))
oh[np.arange(ys.size), ys] = 1
ys = oh
dataset = TensorDataset(torch.FloatTensor(xs), torch.FloatTensor(ys))
lens = [int(train_prop * len(xs))]
lens += [len(xs) - lens[0]]
train_dset, val_dset = torch.utils.data.random_split(dataset, lens)
train_loader = DataLoader(
train_dset,
batch_size=min(batch_size, lens[0]),
shuffle=shuffle,
num_workers=num_workers,
)
val_loader = DataLoader(
val_dset,
batch_size=min(batch_size, lens[1]),
num_workers=num_workers,
shuffle=False,
)
return train_loader, val_loader, xs.shape[1], xs.shape[0]
def compare_to_sk_learn(
max_epoch: int,
train_loader: DataLoader,
val_loader: DataLoader,
train_prop: float,
sklearn_model_type: str,
pytorch_model_type: str,
norm_type: Optional[str],
objective: str,
alpha: float,
init_scheme: str = "zeros",
):
if "LinearRegression" not in sklearn_model_type:
sklearn_classifier = getattr(pytorch_model_module, sklearn_model_type)(
alpha=alpha
)
else:
sklearn_classifier = getattr(pytorch_model_module, sklearn_model_type)()
pytorch_classifier = getattr(pytorch_model_module, pytorch_model_type)(
norm_type=args.norm_type,
)
sklearn_stats = sklearn_classifier.fit(
train_data=train_loader,
norm_input=args.norm_sklearn,
)
pytorch_stats = pytorch_classifier.fit(
train_data=train_loader,
max_epoch=max_epoch,
init_scheme=init_scheme,
alpha=alpha,
)
sklearn_stats.update(_evaluate(val_loader, sklearn_classifier))
pytorch_stats.update(_evaluate(val_loader, pytorch_classifier))
train_stats_pytorch = _evaluate(train_loader, pytorch_classifier)
train_stats_sklearn = _evaluate(train_loader, sklearn_classifier)
o_pytorch = {"l2": train_stats_pytorch["l2"]}
o_sklearn = {"l2": train_stats_sklearn["l2"]}
pytorch_h = pytorch_classifier.representation()
sklearn_h = sklearn_classifier.representation()
if objective == "ridge":
o_pytorch["l2_reg"] = alpha * pytorch_h.norm(p=2, dim=-1)
o_sklearn["l2_reg"] = alpha * sklearn_h.norm(p=2, dim=-1)
elif objective == "lasso":
o_pytorch["l1_reg"] = alpha * pytorch_h.norm(p=1, dim=-1)
o_sklearn["l1_reg"] = alpha * sklearn_h.norm(p=1, dim=-1)
rel_diff = (sum(o_sklearn.values()) - sum(o_pytorch.values())) / abs(
sum(o_sklearn.values())
)
return (
{
"objective_rel_diff": rel_diff.tolist(),
"objective_pytorch": o_pytorch,
"objective_sklearn": o_sklearn,
},
sklearn_stats,
pytorch_stats,
)
def main(args):
if args.seed:
torch.manual_seed(0)
random.seed(0)
assert args.norm_type in [None, "layer_norm", "batch_norm"]
print(
"dataset,num_samples,dimensionality,objective_diff,objective_pytorch,"
+ "objective_sklearn,pytorch_time,sklearn_time,pytorch_l2_val,sklearn_l2_val"
)
for dataset in args.datasets:
dataset_fn = getattr(datasets, dataset)
data = dataset_fn(return_X_y=True)
(
train_loader,
val_loader,
in_features,
num_samples,
) = sklearn_dataset_to_loaders(
data,
batch_size=args.batch_size,
num_workers=args.workers,
shuffle=args.shuffle,
one_hot=args.one_hot,
)
similarity, sklearn_stats, pytorch_stats = compare_to_sk_learn(
alpha=args.alpha,
max_epoch=args.max_epoch,
train_loader=train_loader,
val_loader=val_loader,
train_prop=args.training_prop,
pytorch_model_type=args.pytorch_model_type,
sklearn_model_type=args.sklearn_model_type,
norm_type=args.norm_type,
init_scheme=args.init_scheme,
objective=args.objective,
)
print(
f"{dataset},{num_samples},{in_features},{similarity['objective_rel_diff']},"
+ f"{similarity['objective_pytorch']},{similarity['objective_sklearn']},"
+ f"{pytorch_stats['train_time']},{sklearn_stats['train_time']},"
+ f"{pytorch_stats['l2']},{sklearn_stats['l2']}"
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="train & test linear model with SGD + compare to sklearn"
)
parser.add_argument(
"--norm_type",
type=str,
default=None,
)
parser.add_argument(
"--datasets",
type=str,
nargs="+",
default=[
"load_boston",
"load_breast_cancer",
"load_diabetes",
"fetch_california_housing",
],
)
parser.add_argument("--initial_lr", type=float, default=0.01)
parser.add_argument("--alpha", type=float, default=1.0)
parser.add_argument("--max_epoch", type=int, default=100)
parser.add_argument("--seed", type=int, default=None)
parser.add_argument("--shuffle", default=False, action="store_true")
parser.add_argument("--one_hot", default=False, action="store_true")
parser.add_argument("--batch_size", type=int, default=256)
parser.add_argument("--training_prop", type=float, default=0.7)
parser.add_argument("--workers", type=int, default=1)
parser.add_argument("--sklearn_model_type", type=str, default="Lasso")
parser.add_argument("--pytorch_model_type", type=str, default="SGDLasso")
parser.add_argument("--init_scheme", type=str, default="xavier")
parser.add_argument("--norm_sklearn", default=False, action="store_true")
parser.add_argument("--objective", type=str, default="lasso")
args = parser.parse_args()
main(args)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.