diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/backcompat/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/backcompat/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fdd16eec5aca342c4eab9e3910daac074225d6dc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/backcompat/__init__.py @@ -0,0 +1,21 @@ +from torch._C import _set_backcompat_broadcast_warn +from torch._C import _get_backcompat_broadcast_warn +from torch._C import _set_backcompat_keepdim_warn +from torch._C import _get_backcompat_keepdim_warn + + +class Warning: + def __init__(self, setter, getter): + self.setter = setter + self.getter = getter + + def set_enabled(self, value): + self.setter(value) + + def get_enabled(self): + return self.getter() + + enabled = property(get_enabled, set_enabled) + +broadcast_warning = Warning(_set_backcompat_broadcast_warn, _get_backcompat_broadcast_warn) +keepdim_warning = Warning(_set_backcompat_keepdim_warn, _get_backcompat_keepdim_warn) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/backcompat/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/backcompat/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..80f725b0c9660107aa1b5d505cb48556331ce2ba Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/backcompat/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/bottleneck/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/bottleneck/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/bottleneck/__main__.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/bottleneck/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..f7fd209e1438fa130c6351ad50a7c878bb1bdbf2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/bottleneck/__main__.py @@ -0,0 +1,229 @@ +import argparse +import cProfile +import pstats +import sys +import os +from typing import Dict + +import torch +from torch.autograd import profiler +from torch.utils.collect_env import get_env_info + + +def redirect_argv(new_argv): + sys.argv[:] = new_argv[:] + + +def compiled_with_cuda(sysinfo): + if sysinfo.cuda_compiled_version: + return f'compiled w/ CUDA {sysinfo.cuda_compiled_version}' + return 'not compiled w/ CUDA' + + +env_summary = """ +-------------------------------------------------------------------------------- + Environment Summary +-------------------------------------------------------------------------------- +PyTorch {pytorch_version}{debug_str} {cuda_compiled} +Running with Python {py_version} and {cuda_runtime} + +`{pip_version} list` truncated output: +{pip_list_output} +""".strip() + + +def run_env_analysis(): + print('Running environment analysis...') + info = get_env_info() + + result: Dict[str, str] = {} + + debug_str = '' + if info.is_debug_build: + debug_str = ' DEBUG' + + cuda_avail = '' + if info.is_cuda_available: + cuda = info.cuda_runtime_version + if cuda is not None: + cuda_avail = 'CUDA ' + cuda + else: + cuda = 'CUDA unavailable' + + pip_version = info.pip_version + pip_list_output = info.pip_packages + if pip_list_output is None: + pip_list_output = 'Unable to fetch' + + result = { + 'debug_str': debug_str, + 'pytorch_version': info.torch_version, + 'cuda_compiled': compiled_with_cuda(info), + 'py_version': f'{sys.version_info[0]}.{sys.version_info[1]}', + 'cuda_runtime': cuda_avail, + 'pip_version': pip_version, + 'pip_list_output': pip_list_output, + } + + return env_summary.format(**result) + + +def run_cprofile(code, globs, launch_blocking=False): + print('Running your script with cProfile') + prof = cProfile.Profile() + prof.enable() + exec(code, globs, None) + prof.disable() + return prof + + +cprof_summary = """ +-------------------------------------------------------------------------------- + cProfile output +-------------------------------------------------------------------------------- +""".strip() + + +def print_cprofile_summary(prof, sortby='tottime', topk=15): + print(cprof_summary) + cprofile_stats = pstats.Stats(prof).sort_stats(sortby) + cprofile_stats.print_stats(topk) + + +def run_autograd_prof(code, globs): + def run_prof(use_cuda=False): + with profiler.profile(use_cuda=use_cuda) as prof: + exec(code, globs, None) + return prof + + print('Running your script with the autograd profiler...') + result = [run_prof(use_cuda=False)] + if torch.cuda.is_available(): + result.append(run_prof(use_cuda=True)) + else: + result.append(None) + + return result + + +autograd_prof_summary = """ +-------------------------------------------------------------------------------- + autograd profiler output ({mode} mode) +-------------------------------------------------------------------------------- + {description} +{cuda_warning} +{output} +""".strip() + + +def print_autograd_prof_summary(prof, mode, sortby='cpu_time', topk=15): + valid_sortby = ['cpu_time', 'cuda_time', 'cpu_time_total', 'cuda_time_total', 'count'] + if sortby not in valid_sortby: + warn = ('WARNING: invalid sorting option for autograd profiler results: {}\n' + 'Expected `cpu_time`, `cpu_time_total`, or `count`. ' + 'Defaulting to `cpu_time`.') + print(warn.format(sortby)) + sortby = 'cpu_time' + + if mode == 'CUDA': + cuda_warning = ('\n\tBecause the autograd profiler uses the CUDA event API,\n' + '\tthe CUDA time column reports approximately max(cuda_time, cpu_time).\n' + '\tPlease ignore this output if your code does not use CUDA.\n') + else: + cuda_warning = '' + + sorted_events = sorted(prof.function_events, + key=lambda x: getattr(x, sortby), reverse=True) + topk_events = sorted_events[:topk] + + result = { + 'mode': mode, + 'description': f'top {topk} events sorted by {sortby}', + 'output': torch.autograd.profiler_util._build_table(topk_events), + 'cuda_warning': cuda_warning + } + + print(autograd_prof_summary.format(**result)) + + +descript = """ +`bottleneck` is a tool that can be used as an initial step for debugging +bottlenecks in your program. + +It summarizes runs of your script with the Python profiler and PyTorch\'s +autograd profiler. Because your script will be profiled, please ensure that it +exits in a finite amount of time. + +For more complicated uses of the profilers, please see +https://docs.python.org/3/library/profile.html and +https://pytorch.org/docs/master/autograd.html#profiler for more information. +""".strip() + + +def parse_args(): + parser = argparse.ArgumentParser(description=descript) + parser.add_argument('scriptfile', type=str, + help='Path to the script to be run. ' + 'Usually run with `python path/to/script`.') + parser.add_argument('args', type=str, nargs=argparse.REMAINDER, + help='Command-line arguments to be passed to the script.') + return parser.parse_args() + + +def cpu_time_total(autograd_prof): + return sum([event.cpu_time_total for event in autograd_prof.function_events]) + + +def main(): + args = parse_args() + + # Customizable constants. + scriptfile = args.scriptfile + scriptargs = [] if args.args is None else args.args + scriptargs.insert(0, scriptfile) + cprofile_sortby = 'tottime' + cprofile_topk = 15 + autograd_prof_sortby = 'cpu_time_total' + autograd_prof_topk = 15 + + redirect_argv(scriptargs) + + sys.path.insert(0, os.path.dirname(scriptfile)) + with open(scriptfile, 'rb') as stream: + code = compile(stream.read(), scriptfile, 'exec') + globs = { + '__file__': scriptfile, + '__name__': '__main__', + '__package__': None, + '__cached__': None, + } + + print(descript) + + env_summary = run_env_analysis() + + if torch.cuda.is_available(): + torch.cuda.init() + cprofile_prof = run_cprofile(code, globs) + autograd_prof_cpu, autograd_prof_cuda = run_autograd_prof(code, globs) + + print(env_summary) + print_cprofile_summary(cprofile_prof, cprofile_sortby, cprofile_topk) + + if not torch.cuda.is_available(): + print_autograd_prof_summary(autograd_prof_cpu, 'CPU', autograd_prof_sortby, autograd_prof_topk) + return + + # Print both the result of the CPU-mode and CUDA-mode autograd profilers + # if their execution times are very different. + cuda_prof_exec_time = cpu_time_total(autograd_prof_cuda) + if len(autograd_prof_cpu.function_events) > 0: + cpu_prof_exec_time = cpu_time_total(autograd_prof_cpu) + pct_diff = (cuda_prof_exec_time - cpu_prof_exec_time) / cuda_prof_exec_time + if abs(pct_diff) > 0.05: + print_autograd_prof_summary(autograd_prof_cpu, 'CPU', autograd_prof_sortby, autograd_prof_topk) + + print_autograd_prof_summary(autograd_prof_cuda, 'CUDA', autograd_prof_sortby, autograd_prof_topk) + +if __name__ == '__main__': + main() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/bottleneck/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/bottleneck/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fab3911faac5da55d1870531f7c3c72d4376fc23 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/bottleneck/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/bottleneck/__pycache__/__main__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/bottleneck/__pycache__/__main__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..63ad4a8a156544654aae8a49d6f718220b11f1dd Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/bottleneck/__pycache__/__main__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/_utils/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/_utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..62cfdf91f1ea416cf6ed72f966460ec7602fe68b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/_utils/__init__.py @@ -0,0 +1,51 @@ +r"""Utility classes & functions for data loading. Code in this folder is mostly used by ../dataloder.py. + +A lot of multiprocessing is used in data loading, which only supports running +functions defined in global environment (py2 can't serialize static methods). +Therefore, for code tidiness we put these functions into different files in this +folder. +""" + +import sys +import atexit + +# old private location of the ExceptionWrapper that some users rely on: +from torch._utils import ExceptionWrapper + + +IS_WINDOWS = sys.platform == "win32" + + +MP_STATUS_CHECK_INTERVAL = 5.0 +r"""Interval (in seconds) to check status of processes to avoid hanging in + multiprocessing data loading. This is mainly used in getting data from + another process, in which case we need to periodically check whether the + sender is alive to prevent hanging.""" + + +python_exit_status = False +r"""Whether Python is shutting down. This flag is guaranteed to be set before +the Python core library resources are freed, but Python may already be exiting +for some time when this is set. + +Hook to set this flag is `_set_python_exit_flag`, and is inspired by a similar +hook in Python 3.7 multiprocessing library: +https://github.com/python/cpython/blob/d4d60134b29290049e28df54f23493de4f1824b6/Lib/multiprocessing/util.py#L277-L327 +""" + + +try: + import numpy + HAS_NUMPY = True +except ModuleNotFoundError: + HAS_NUMPY = False + + +def _set_python_exit_flag(): + global python_exit_status + python_exit_status = True + +atexit.register(_set_python_exit_flag) + + +from . import worker, signal_handling, pin_memory, collate, fetch diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c25fa2c7f20e5163cd8b6a196cfac41672f57f45 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/collate.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/collate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d539d4f9e0e5dc0cc16b38fecdc1e59fcbffb479 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/collate.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/fetch.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/fetch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50b61bb155564f8398d7e3047180e6867c9c28f5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/fetch.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/pin_memory.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/pin_memory.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e5a40c71213877a3568864707e0b4eee4ee67efc Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/pin_memory.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/signal_handling.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/signal_handling.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d75d675db027b20c5a465ab6bfada307465ccfdc Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/signal_handling.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/worker.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/worker.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb87dfe82561efc89e2767d0bc5c2a038a7058e6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/_utils/__pycache__/worker.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/_utils/collate.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/_utils/collate.py new file mode 100644 index 0000000000000000000000000000000000000000..6460e2f947d10d456354723073542466db00fde1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/_utils/collate.py @@ -0,0 +1,316 @@ +r"""Contains definitions of the methods used by the _BaseDataLoaderIter workers. + +These methods are used to collate samples fetched from dataset into Tensor(s). +These **needs** to be in global scope since Py2 doesn't support serializing +static methods. + +`default_collate` and `default_convert` are exposed to users via 'dataloader.py'. +""" + +import collections +import contextlib +import copy +import re +import torch + +from typing import Callable, Dict, Optional, Tuple, Type, Union + +np_str_obj_array_pattern = re.compile(r'[SaUO]') + + +def default_convert(data): + r""" + Convert each NumPy array element into a :class:`torch.Tensor`. + + If the input is a `Sequence`, `Collection`, or `Mapping`, it tries to convert each element inside to a :class:`torch.Tensor`. + If the input is not an NumPy array, it is left unchanged. + This is used as the default function for collation when both `batch_sampler` and `batch_size` + are NOT defined in :class:`~torch.utils.data.DataLoader`. + + The general input type to output type mapping is similar to that + of :func:`~torch.utils.data.default_collate`. See the description there for more details. + + Args: + data: a single data point to be converted + + Examples: + >>> # xdoctest: +SKIP + >>> # Example with `int` + >>> default_convert(0) + 0 + >>> # Example with NumPy array + >>> default_convert(np.array([0, 1])) + tensor([0, 1]) + >>> # Example with NamedTuple + >>> Point = namedtuple('Point', ['x', 'y']) + >>> default_convert(Point(0, 0)) + Point(x=0, y=0) + >>> default_convert(Point(np.array(0), np.array(0))) + Point(x=tensor(0), y=tensor(0)) + >>> # Example with List + >>> default_convert([np.array([0, 1]), np.array([2, 3])]) + [tensor([0, 1]), tensor([2, 3])] + """ + elem_type = type(data) + if isinstance(data, torch.Tensor): + return data + elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \ + and elem_type.__name__ != 'string_': + # array of string classes and object + if elem_type.__name__ == 'ndarray' \ + and np_str_obj_array_pattern.search(data.dtype.str) is not None: + return data + return torch.as_tensor(data) + elif isinstance(data, collections.abc.Mapping): + try: + if isinstance(data, collections.abc.MutableMapping): + # The mapping type may have extra properties, so we can't just + # use `type(data)(...)` to create the new mapping. + # Create a clone and update it if the mapping type is mutable. + clone = copy.copy(data) + clone.update({key: default_convert(data[key]) for key in data}) + return clone + else: + return elem_type({key: default_convert(data[key]) for key in data}) + except TypeError: + # The mapping type may not support `copy()` / `update(mapping)` + # or `__init__(iterable)`. + return {key: default_convert(data[key]) for key in data} + elif isinstance(data, tuple) and hasattr(data, '_fields'): # namedtuple + return elem_type(*(default_convert(d) for d in data)) + elif isinstance(data, tuple): + return [default_convert(d) for d in data] # Backwards compatibility. + elif isinstance(data, collections.abc.Sequence) and not isinstance(data, (str, bytes)): + try: + if isinstance(data, collections.abc.MutableSequence): + # The sequence type may have extra properties, so we can't just + # use `type(data)(...)` to create the new sequence. + # Create a clone and update it if the sequence type is mutable. + clone = copy.copy(data) # type: ignore[arg-type] + for i, d in enumerate(data): + clone[i] = default_convert(d) + return clone + else: + return elem_type([default_convert(d) for d in data]) + except TypeError: + # The sequence type may not support `copy()` / `__setitem__(index, item)` + # or `__init__(iterable)` (e.g., `range`). + return [default_convert(d) for d in data] + else: + return data + + +default_collate_err_msg_format = ( + "default_collate: batch must contain tensors, numpy arrays, numbers, " + "dicts or lists; found {}") + + +def collate(batch, *, collate_fn_map: Optional[Dict[Union[Type, Tuple[Type, ...]], Callable]] = None): + r""" + General collate function that handles collection type of element within each batch. + + The function also opens function registry to deal with specific element types. `default_collate_fn_map` + provides default collate functions for tensors, numpy arrays, numbers and strings. + + Args: + batch: a single batch to be collated + collate_fn_map: Optional dictionary mapping from element type to the corresponding collate function. + If the element type isn't present in this dictionary, + this function will go through each key of the dictionary in the insertion order to + invoke the corresponding collate function if the element type is a subclass of the key. + + Examples: + >>> def collate_tensor_fn(batch, *, collate_fn_map): + >>> # Extend this function to handle batch of tensors + ... return torch.stack(batch, 0) + >>> def custom_collate(batch): + ... collate_map = {torch.Tensor: collate_tensor_fn} + ... return collate(batch, collate_fn_map=collate_map) + >>> # Extend `default_collate` by in-place modifying `default_collate_fn_map` + >>> default_collate_fn_map.update({torch.Tensor: collate_tensor_fn}) + + Note: + Each collate function requires a positional argument for batch and a keyword argument + for the dictionary of collate functions as `collate_fn_map`. + """ + elem = batch[0] + elem_type = type(elem) + + if collate_fn_map is not None: + if elem_type in collate_fn_map: + return collate_fn_map[elem_type](batch, collate_fn_map=collate_fn_map) + + for collate_type in collate_fn_map: + if isinstance(elem, collate_type): + return collate_fn_map[collate_type](batch, collate_fn_map=collate_fn_map) + + if isinstance(elem, collections.abc.Mapping): + try: + if isinstance(elem, collections.abc.MutableMapping): + # The mapping type may have extra properties, so we can't just + # use `type(data)(...)` to create the new mapping. + # Create a clone and update it if the mapping type is mutable. + clone = copy.copy(elem) + clone.update({key: collate([d[key] for d in batch], collate_fn_map=collate_fn_map) for key in elem}) + return clone + else: + return elem_type({key: collate([d[key] for d in batch], collate_fn_map=collate_fn_map) for key in elem}) + except TypeError: + # The mapping type may not support `copy()` / `update(mapping)` + # or `__init__(iterable)`. + return {key: collate([d[key] for d in batch], collate_fn_map=collate_fn_map) for key in elem} + elif isinstance(elem, tuple) and hasattr(elem, '_fields'): # namedtuple + return elem_type(*(collate(samples, collate_fn_map=collate_fn_map) for samples in zip(*batch))) + elif isinstance(elem, collections.abc.Sequence): + # check to make sure that the elements in batch have consistent size + it = iter(batch) + elem_size = len(next(it)) + if not all(len(elem) == elem_size for elem in it): + raise RuntimeError('each element in list of batch should be of equal size') + transposed = list(zip(*batch)) # It may be accessed twice, so we use a list. + + if isinstance(elem, tuple): + return [collate(samples, collate_fn_map=collate_fn_map) for samples in transposed] # Backwards compatibility. + else: + try: + if isinstance(elem, collections.abc.MutableSequence): + # The sequence type may have extra properties, so we can't just + # use `type(data)(...)` to create the new sequence. + # Create a clone and update it if the sequence type is mutable. + clone = copy.copy(elem) # type: ignore[arg-type] + for i, samples in enumerate(transposed): + clone[i] = collate(samples, collate_fn_map=collate_fn_map) + return clone + else: + return elem_type([collate(samples, collate_fn_map=collate_fn_map) for samples in transposed]) + except TypeError: + # The sequence type may not support `copy()` / `__setitem__(index, item)` + # or `__init__(iterable)` (e.g., `range`). + return [collate(samples, collate_fn_map=collate_fn_map) for samples in transposed] + + raise TypeError(default_collate_err_msg_format.format(elem_type)) + + +def collate_tensor_fn(batch, *, collate_fn_map: Optional[Dict[Union[Type, Tuple[Type, ...]], Callable]] = None): + elem = batch[0] + out = None + if elem.is_nested: + raise RuntimeError( + "Batches of nested tensors are not currently supported by the default collate_fn; " + "please provide a custom collate_fn to handle them appropriately." + ) + if elem.layout in {torch.sparse_coo, torch.sparse_csr, torch.sparse_bsr, torch.sparse_csc, torch.sparse_bsc}: + raise RuntimeError( + "Batches of sparse tensors are not currently supported by the default collate_fn; " + "please provide a custom collate_fn to handle them appropriately." + ) + if torch.utils.data.get_worker_info() is not None: + # If we're in a background process, concatenate directly into a + # shared memory tensor to avoid an extra copy + numel = sum(x.numel() for x in batch) + storage = elem._typed_storage()._new_shared(numel, device=elem.device) + out = elem.new(storage).resize_(len(batch), *list(elem.size())) + return torch.stack(batch, 0, out=out) + + +def collate_numpy_array_fn(batch, *, collate_fn_map: Optional[Dict[Union[Type, Tuple[Type, ...]], Callable]] = None): + elem = batch[0] + # array of string classes and object + if np_str_obj_array_pattern.search(elem.dtype.str) is not None: + raise TypeError(default_collate_err_msg_format.format(elem.dtype)) + + return collate([torch.as_tensor(b) for b in batch], collate_fn_map=collate_fn_map) + + +def collate_numpy_scalar_fn(batch, *, collate_fn_map: Optional[Dict[Union[Type, Tuple[Type, ...]], Callable]] = None): + return torch.as_tensor(batch) + + +def collate_float_fn(batch, *, collate_fn_map: Optional[Dict[Union[Type, Tuple[Type, ...]], Callable]] = None): + return torch.tensor(batch, dtype=torch.float64) + + +def collate_int_fn(batch, *, collate_fn_map: Optional[Dict[Union[Type, Tuple[Type, ...]], Callable]] = None): + return torch.tensor(batch) + + +def collate_str_fn(batch, *, collate_fn_map: Optional[Dict[Union[Type, Tuple[Type, ...]], Callable]] = None): + return batch + + +default_collate_fn_map: Dict[Union[Type, Tuple[Type, ...]], Callable] = {torch.Tensor: collate_tensor_fn} +with contextlib.suppress(ImportError): + import numpy as np + # For both ndarray and memmap (subclass of ndarray) + default_collate_fn_map[np.ndarray] = collate_numpy_array_fn + # See scalars hierarchy: https://numpy.org/doc/stable/reference/arrays.scalars.html + # Skip string scalars + default_collate_fn_map[(np.bool_, np.number, np.object_)] = collate_numpy_scalar_fn +default_collate_fn_map[float] = collate_float_fn +default_collate_fn_map[int] = collate_int_fn +default_collate_fn_map[str] = collate_str_fn +default_collate_fn_map[bytes] = collate_str_fn + + +def default_collate(batch): + r""" + Take in a batch of data and put the elements within the batch into a tensor with an additional outer dimension - batch size. + + The exact output type can be a :class:`torch.Tensor`, a `Sequence` of :class:`torch.Tensor`, a + Collection of :class:`torch.Tensor`, or left unchanged, depending on the input type. + This is used as the default function for collation when + `batch_size` or `batch_sampler` is defined in :class:`~torch.utils.data.DataLoader`. + + Here is the general input type (based on the type of the element within the batch) to output type mapping: + + * :class:`torch.Tensor` -> :class:`torch.Tensor` (with an added outer dimension batch size) + * NumPy Arrays -> :class:`torch.Tensor` + * `float` -> :class:`torch.Tensor` + * `int` -> :class:`torch.Tensor` + * `str` -> `str` (unchanged) + * `bytes` -> `bytes` (unchanged) + * `Mapping[K, V_i]` -> `Mapping[K, default_collate([V_1, V_2, ...])]` + * `NamedTuple[V1_i, V2_i, ...]` -> `NamedTuple[default_collate([V1_1, V1_2, ...]), + default_collate([V2_1, V2_2, ...]), ...]` + * `Sequence[V1_i, V2_i, ...]` -> `Sequence[default_collate([V1_1, V1_2, ...]), + default_collate([V2_1, V2_2, ...]), ...]` + + Args: + batch: a single batch to be collated + + Examples: + >>> # xdoctest: +SKIP + >>> # Example with a batch of `int`s: + >>> default_collate([0, 1, 2, 3]) + tensor([0, 1, 2, 3]) + >>> # Example with a batch of `str`s: + >>> default_collate(['a', 'b', 'c']) + ['a', 'b', 'c'] + >>> # Example with `Map` inside the batch: + >>> default_collate([{'A': 0, 'B': 1}, {'A': 100, 'B': 100}]) + {'A': tensor([ 0, 100]), 'B': tensor([ 1, 100])} + >>> # Example with `NamedTuple` inside the batch: + >>> Point = namedtuple('Point', ['x', 'y']) + >>> default_collate([Point(0, 0), Point(1, 1)]) + Point(x=tensor([0, 1]), y=tensor([0, 1])) + >>> # Example with `Tuple` inside the batch: + >>> default_collate([(0, 1), (2, 3)]) + [tensor([0, 2]), tensor([1, 3])] + >>> # Example with `List` inside the batch: + >>> default_collate([[0, 1], [2, 3]]) + [tensor([0, 2]), tensor([1, 3])] + >>> # Two options to extend `default_collate` to handle specific type + >>> # Option 1: Write custom collate function and invoke `default_collate` + >>> def custom_collate(batch): + ... elem = batch[0] + ... if isinstance(elem, CustomType): # Some custom condition + ... return ... + ... else: # Fall back to `default_collate` + ... return default_collate(batch) + >>> # Option 2: In-place modify `default_collate_fn_map` + >>> def collate_customtype_fn(batch, *, collate_fn_map=None): + ... return ... + >>> default_collate_fn_map.update(CustoType, collate_customtype_fn) + >>> default_collate(batch) # Handle `CustomType` automatically + """ + return collate(batch, collate_fn_map=default_collate_fn_map) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/_utils/fetch.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/_utils/fetch.py new file mode 100644 index 0000000000000000000000000000000000000000..c5696b401c5a8a7482bbfbcb9631c9df62096364 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/_utils/fetch.py @@ -0,0 +1,54 @@ +r"""Contains definitions of the methods used by the _BaseDataLoaderIter to fetch data from an iterable-style or map-style dataset. + +This logic is shared in both single- and multi-processing data loading. +""" + + +class _BaseDatasetFetcher: + def __init__(self, dataset, auto_collation, collate_fn, drop_last): + self.dataset = dataset + self.auto_collation = auto_collation + self.collate_fn = collate_fn + self.drop_last = drop_last + + def fetch(self, possibly_batched_index): + raise NotImplementedError() + + +class _IterableDatasetFetcher(_BaseDatasetFetcher): + def __init__(self, dataset, auto_collation, collate_fn, drop_last): + super().__init__(dataset, auto_collation, collate_fn, drop_last) + self.dataset_iter = iter(dataset) + self.ended = False + + def fetch(self, possibly_batched_index): + if self.ended: + raise StopIteration + + if self.auto_collation: + data = [] + for _ in possibly_batched_index: + try: + data.append(next(self.dataset_iter)) + except StopIteration: + self.ended = True + break + if len(data) == 0 or ( + self.drop_last and len(data) < len(possibly_batched_index) + ): + raise StopIteration + else: + data = next(self.dataset_iter) + return self.collate_fn(data) + + +class _MapDatasetFetcher(_BaseDatasetFetcher): + def fetch(self, possibly_batched_index): + if self.auto_collation: + if hasattr(self.dataset, "__getitems__") and self.dataset.__getitems__: + data = self.dataset.__getitems__(possibly_batched_index) + else: + data = [self.dataset[idx] for idx in possibly_batched_index] + else: + data = self.dataset[possibly_batched_index] + return self.collate_fn(data) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/_utils/pin_memory.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/_utils/pin_memory.py new file mode 100644 index 0000000000000000000000000000000000000000..9de645cd7ee77b2e2e43ac769eba1d59de9a85c9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/_utils/pin_memory.py @@ -0,0 +1,98 @@ +r"""Contains definitions of the methods used by the _BaseDataLoaderIter to put fetched tensors into pinned memory. + +These **needs** to be in global scope since Py2 doesn't support serializing +static methods. +""" + +import collections +import copy +import queue + +import torch +from . import MP_STATUS_CHECK_INTERVAL +from torch._utils import ExceptionWrapper + + +def _pin_memory_loop(in_queue, out_queue, device_id, done_event, device): + # This setting is thread local, and prevents the copy in pin_memory from + # consuming all CPU cores. + torch.set_num_threads(1) + + if device == "cuda": + torch.cuda.set_device(device_id) + elif device == "xpu": + torch.xpu.set_device(device_id) # type: ignore[attr-defined] + elif device == torch._C._get_privateuse1_backend_name(): + custom_device_mod = getattr(torch, torch._C._get_privateuse1_backend_name()) + custom_device_mod.set_device(device_id) + + def do_one_step(): + try: + r = in_queue.get(timeout=MP_STATUS_CHECK_INTERVAL) + except queue.Empty: + return + idx, data = r + if not done_event.is_set() and not isinstance(data, ExceptionWrapper): + try: + data = pin_memory(data, device) + except Exception: + data = ExceptionWrapper( + where=f"in pin memory thread for device {device_id}") + r = (idx, data) + while not done_event.is_set(): + try: + out_queue.put(r, timeout=MP_STATUS_CHECK_INTERVAL) + break + except queue.Full: + continue + + # See NOTE [ Data Loader Multiprocessing Shutdown Logic ] for details on the + # logic of this function. + while not done_event.is_set(): + # Make sure that we don't preserve any object from one iteration + # to the next + do_one_step() + +def pin_memory(data, device=None): + if isinstance(data, torch.Tensor): + return data.pin_memory(device) + elif isinstance(data, (str, bytes)): + return data + elif isinstance(data, collections.abc.Mapping): + try: + if isinstance(data, collections.abc.MutableMapping): + # The sequence type may have extra properties, so we can't just + # use `type(data)(...)` to create the new sequence. + # Create a clone and update it if the sequence type is mutable. + clone = copy.copy(data) + clone.update({k: pin_memory(sample, device) for k, sample in data.items()}) + return clone + else: + return type(data)({k: pin_memory(sample, device) for k, sample in data.items()}) # type: ignore[call-arg] + except TypeError: + # The mapping type may not support `copy()` / `update(mapping)` + # or `__init__(iterable)`. + return {k: pin_memory(sample, device) for k, sample in data.items()} + elif isinstance(data, tuple) and hasattr(data, '_fields'): # namedtuple + return type(data)(*(pin_memory(sample, device) for sample in data)) + elif isinstance(data, tuple): + return [pin_memory(sample, device) for sample in data] # Backwards compatibility. + elif isinstance(data, collections.abc.Sequence): + try: + if isinstance(data, collections.abc.MutableSequence): + # The sequence type may have extra properties, so we can't just + # use `type(data)(...)` to create the new sequence. + # Create a clone and update it if the sequence type is mutable. + clone = copy.copy(data) # type: ignore[arg-type] + for i, item in enumerate(data): + clone[i] = pin_memory(item, device) + return clone + return type(data)([pin_memory(sample, device) for sample in data]) # type: ignore[call-arg] + except TypeError: + # The sequence type may not support `copy()` / `__setitem__(index, item)` + # or `__init__(iterable)` (e.g., `range`). + return [pin_memory(sample, device) for sample in data] + elif hasattr(data, "pin_memory"): + return data.pin_memory() + else: + return data diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/_utils/signal_handling.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/_utils/signal_handling.py new file mode 100644 index 0000000000000000000000000000000000000000..da8f3780bed253e39a055345febfaad82035b0ed --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/_utils/signal_handling.py @@ -0,0 +1,72 @@ +r"""Signal handling for multiprocessing data loading. + +NOTE [ Signal handling in multiprocessing data loading ] + +In cases like DataLoader, if a worker process dies due to bus error/segfault +or just hang, the main process will hang waiting for data. This is difficult +to avoid on PyTorch side as it can be caused by limited shm, or other +libraries users call in the workers. In this file and `DataLoader.cpp`, we make +our best effort to provide some error message to users when such unfortunate +events happen. + +When a _BaseDataLoaderIter starts worker processes, their pids are registered in a +defined in `DataLoader.cpp`: id(_BaseDataLoaderIter) => Collection[ Worker pids ] +via `_set_worker_pids`. + +When an error happens in a worker process, the main process received a SIGCHLD, +and Python will eventually call the handler registered below +(in `_set_SIGCHLD_handler`). In the handler, the `_error_if_any_worker_fails` +call checks all registered worker pids and raise proper error message to +prevent main process from hanging waiting for data from worker. + +Additionally, at the beginning of each worker's `_utils.worker._worker_loop`, +`_set_worker_signal_handlers` is called to register critical signal handlers +(e.g., for SIGSEGV, SIGBUS, SIGFPE, SIGTERM) in C, which just prints an error +message to stderr before triggering the default handler. So a message will also +be printed from the worker process when it is killed by such signals. + +See NOTE [ Data Loader Multiprocessing Shutdown Logic ] for the reasoning of +this signal handling design and other mechanism we implement to make our +multiprocessing data loading robust to errors. +""" + +import signal +import threading +from . import IS_WINDOWS + +# Some of the following imported functions are not used in this file, but are to +# be used `_utils.signal_handling.XXXXX`. +from torch._C import _set_worker_pids, _remove_worker_pids # noqa: F401 +from torch._C import _error_if_any_worker_fails, _set_worker_signal_handlers # noqa: F401 + +_SIGCHLD_handler_set = False +r"""Whether SIGCHLD handler is set for DataLoader worker failures. Only one +handler needs to be set for all DataLoaders in a process.""" + + +def _set_SIGCHLD_handler(): + # Windows doesn't support SIGCHLD handler + if IS_WINDOWS: + return + # can't set signal in child threads + if not isinstance(threading.current_thread(), threading._MainThread): # type: ignore[attr-defined] + return + global _SIGCHLD_handler_set + if _SIGCHLD_handler_set: + return + previous_handler = signal.getsignal(signal.SIGCHLD) + if not callable(previous_handler): + # This doesn't catch default handler, but SIGCHLD default handler is a + # no-op. + previous_handler = None + + def handler(signum, frame): + # This following call uses `waitid` with WNOHANG from C side. Therefore, + # Python can still get and update the process status successfully. + _error_if_any_worker_fails() + if previous_handler is not None: + assert callable(previous_handler) + previous_handler(signum, frame) + + signal.signal(signal.SIGCHLD, handler) + _SIGCHLD_handler_set = True diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/_utils/worker.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/_utils/worker.py new file mode 100644 index 0000000000000000000000000000000000000000..137791c4c43627852048dc38d3ccf915964bc202 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/_utils/worker.py @@ -0,0 +1,329 @@ +r""""Contains definitions of the methods used by the _BaseDataLoaderIter workers. + +These **needs** to be in global scope since Py2 doesn't support serializing +static methods. +""" + +import torch +import random +import os +import queue +from dataclasses import dataclass +from torch._utils import ExceptionWrapper +from typing import Optional, Union, TYPE_CHECKING +from . import signal_handling, MP_STATUS_CHECK_INTERVAL, IS_WINDOWS, HAS_NUMPY +if TYPE_CHECKING: + from torch.utils.data import Dataset + +if IS_WINDOWS: + import ctypes + from ctypes.wintypes import DWORD, BOOL, HANDLE + + # On Windows, the parent ID of the worker process remains unchanged when the manager process + # is gone, and the only way to check it through OS is to let the worker have a process handle + # of the manager and ask if the process status has changed. + class ManagerWatchdog: + def __init__(self): + self.manager_pid = os.getppid() + + # mypy cannot detect this code is windows only + self.kernel32 = ctypes.WinDLL('kernel32', use_last_error=True) # type: ignore[attr-defined] + self.kernel32.OpenProcess.argtypes = (DWORD, BOOL, DWORD) + self.kernel32.OpenProcess.restype = HANDLE + self.kernel32.WaitForSingleObject.argtypes = (HANDLE, DWORD) + self.kernel32.WaitForSingleObject.restype = DWORD + + # Value obtained from https://msdn.microsoft.com/en-us/library/ms684880.aspx + SYNCHRONIZE = 0x00100000 + self.manager_handle = self.kernel32.OpenProcess(SYNCHRONIZE, 0, self.manager_pid) + + if not self.manager_handle: + raise ctypes.WinError(ctypes.get_last_error()) # type: ignore[attr-defined] + + self.manager_dead = False + + def is_alive(self): + if not self.manager_dead: + # Value obtained from https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032.aspx + self.manager_dead = self.kernel32.WaitForSingleObject(self.manager_handle, 0) == 0 + return not self.manager_dead +else: + class ManagerWatchdog: # type: ignore[no-redef] + def __init__(self): + self.manager_pid = os.getppid() + self.manager_dead = False + + def is_alive(self): + if not self.manager_dead: + self.manager_dead = os.getppid() != self.manager_pid + return not self.manager_dead + +_worker_info: Optional["WorkerInfo"] = None + + +class WorkerInfo: + id: int + num_workers: int + seed: int + dataset: 'Dataset' + __initialized = False + + def __init__(self, **kwargs): + for k, v in kwargs.items(): + setattr(self, k, v) + self.__keys = tuple(kwargs.keys()) + self.__initialized = True + + def __setattr__(self, key, val): + if self.__initialized: + raise RuntimeError(f"Cannot assign attributes to {self.__class__.__name__} objects") + return super().__setattr__(key, val) + + def __repr__(self): + items = [] + for k in self.__keys: + items.append(f'{k}={getattr(self, k)}') + return f"{self.__class__.__name__}({', '.join(items)})" + + +def get_worker_info() -> Optional[WorkerInfo]: + r"""Returns the information about the current + :class:`~torch.utils.data.DataLoader` iterator worker process. + + When called in a worker, this returns an object guaranteed to have the + following attributes: + + * :attr:`id`: the current worker id. + * :attr:`num_workers`: the total number of workers. + * :attr:`seed`: the random seed set for the current worker. This value is + determined by main process RNG and the worker id. See + :class:`~torch.utils.data.DataLoader`'s documentation for more details. + * :attr:`dataset`: the copy of the dataset object in **this** process. Note + that this will be a different object in a different process than the one + in the main process. + + When called in the main process, this returns ``None``. + + .. note:: + When used in a :attr:`worker_init_fn` passed over to + :class:`~torch.utils.data.DataLoader`, this method can be useful to + set up each worker process differently, for instance, using ``worker_id`` + to configure the ``dataset`` object to only read a specific fraction of a + sharded dataset, or use ``seed`` to seed other libraries used in dataset + code. + """ + return _worker_info + + +r"""Dummy class used to signal the end of an IterableDataset""" +@dataclass(frozen=True) +class _IterableDatasetStopIteration: + worker_id: int + +r"""Dummy class used to resume the fetching when worker reuse is enabled""" +@dataclass(frozen=True) +class _ResumeIteration: + seed: Optional[int] = None + +# The function `_generate_state` is adapted from `numpy.random.SeedSequence` +# from https://github.com/numpy/numpy/blob/main/numpy/random/bit_generator.pyx +# It's MIT licensed, here is the copyright: + +# Copyright (c) 2015 Melissa E. O'Neill +# Copyright (c) 2019 NumPy Developers +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# This function generates an array of int32 as the seed for +# `numpy.random`, in order to prevent state collision due to same +# seed and algorithm for `numpy.random` and `random` modules. +# TODO: Implement `SeedSequence` like object for `torch.random` +def _generate_state(base_seed, worker_id): + INIT_A = 0x43b0d7e5 + MULT_A = 0x931e8875 + INIT_B = 0x8b51f9dd + MULT_B = 0x58f38ded + MIX_MULT_L = 0xca01f9dd + MIX_MULT_R = 0x4973f715 + XSHIFT = 4 * 8 // 2 + MASK32 = 0xFFFFFFFF + + entropy = [worker_id, base_seed & MASK32, base_seed >> 32, 0] + pool = [0] * 4 + + hash_const_A = INIT_A + + def hash(value): + nonlocal hash_const_A + value = (value ^ hash_const_A) & MASK32 + hash_const_A = (hash_const_A * MULT_A) & MASK32 + value = (value * hash_const_A) & MASK32 + value = (value ^ (value >> XSHIFT)) & MASK32 + return value + + def mix(x, y): + result_x = (MIX_MULT_L * x) & MASK32 + result_y = (MIX_MULT_R * y) & MASK32 + result = (result_x - result_y) & MASK32 + result = (result ^ (result >> XSHIFT)) & MASK32 + return result + + # Add in the entropy to the pool. + for i in range(len(pool)): + pool[i] = hash(entropy[i]) + + # Mix all bits together so late bits can affect earlier bits. + for i_src in range(len(pool)): + for i_dst in range(len(pool)): + if i_src != i_dst: + pool[i_dst] = mix(pool[i_dst], hash(pool[i_src])) + + hash_const_B = INIT_B + state = [] + for i_dst in range(4): + data_val = pool[i_dst] + data_val = (data_val ^ hash_const_B) & MASK32 + hash_const_B = (hash_const_B * MULT_B) & MASK32 + data_val = (data_val * hash_const_B) & MASK32 + data_val = (data_val ^ (data_val >> XSHIFT)) & MASK32 + state.append(data_val) + return state + +def _worker_loop(dataset_kind, dataset, index_queue, data_queue, done_event, + auto_collation, collate_fn, drop_last, base_seed, init_fn, worker_id, + num_workers, persistent_workers, shared_seed): + # See NOTE [ Data Loader Multiprocessing Shutdown Logic ] for details on the + # logic of this function. + + try: + # Initialize C side signal handlers for SIGBUS and SIGSEGV. Python signal + # module's handlers are executed after Python returns from C low-level + # handlers, likely when the same fatal signal had already happened + # again. + # https://docs.python.org/3/library/signal.html#execution-of-python-signal-handlers + signal_handling._set_worker_signal_handlers() + + torch.set_num_threads(1) + seed = base_seed + worker_id + random.seed(seed) + torch.manual_seed(seed) + if HAS_NUMPY: + np_seed = _generate_state(base_seed, worker_id) + import numpy as np + np.random.seed(np_seed) + + from torch.utils.data import IterDataPipe + from torch.utils.data.graph_settings import apply_random_seed + + shared_rng = torch.Generator() + if isinstance(dataset, IterDataPipe): + assert shared_seed is not None + shared_rng.manual_seed(shared_seed) + dataset = apply_random_seed(dataset, shared_rng) + + global _worker_info + _worker_info = WorkerInfo(id=worker_id, num_workers=num_workers, + seed=seed, dataset=dataset) + + from torch.utils.data import _DatasetKind + + init_exception = None + + try: + if init_fn is not None: + init_fn(worker_id) + + fetcher = _DatasetKind.create_fetcher(dataset_kind, dataset, auto_collation, collate_fn, drop_last) + except Exception: + init_exception = ExceptionWrapper( + where=f"in DataLoader worker process {worker_id}") + + # When using Iterable mode, some worker can exit earlier than others due + # to the IterableDataset behaving differently for different workers. + # When such things happen, an `_IterableDatasetStopIteration` object is + # sent over to the main process with the ID of this worker, so that the + # main process won't send more tasks to this worker, and will send + # `None` to this worker to properly exit it. + # + # Note that we cannot set `done_event` from a worker as it is shared + # among all processes. Instead, we set the `iteration_end` flag to + # signify that the iterator is exhausted. When either `done_event` or + # `iteration_end` is set, we skip all processing step and just wait for + # `None`. + iteration_end = False + + watchdog = ManagerWatchdog() + + while watchdog.is_alive(): + try: + r = index_queue.get(timeout=MP_STATUS_CHECK_INTERVAL) + except queue.Empty: + continue + if isinstance(r, _ResumeIteration): + # Acknowledge the main process + data_queue.put((r, None)) + iteration_end = False + + if isinstance(dataset, IterDataPipe): + assert r.seed is not None + shared_rng.manual_seed(r.seed) + dataset = apply_random_seed(dataset, shared_rng) + + # Recreate the fetcher for worker-reuse policy + fetcher = _DatasetKind.create_fetcher( + dataset_kind, dataset, auto_collation, collate_fn, drop_last) + continue + elif r is None: + # Received the final signal + assert done_event.is_set() or iteration_end + break + elif done_event.is_set() or iteration_end: + # `done_event` is set. But I haven't received the final signal + # (None) yet. I will keep continuing until get it, and skip the + # processing steps. + continue + idx, index = r + data: Union[_IterableDatasetStopIteration, ExceptionWrapper] + if init_exception is not None: + data = init_exception + init_exception = None + else: + try: + data = fetcher.fetch(index) # type: ignore[possibly-undefined] + except Exception as e: + if isinstance(e, StopIteration) and dataset_kind == _DatasetKind.Iterable: + data = _IterableDatasetStopIteration(worker_id) + # Set `iteration_end` + # (1) to save future `next(...)` calls, and + # (2) to avoid sending multiple `_IterableDatasetStopIteration`s. + iteration_end = True + else: + # It is important that we don't store exc_info in a variable. + # `ExceptionWrapper` does the correct thing. + # See NOTE [ Python Traceback Reference Cycle Problem ] + data = ExceptionWrapper( + where=f"in DataLoader worker process {worker_id}") + data_queue.put((idx, data)) + del data, idx, index, r # save memory + except KeyboardInterrupt: + # Main process will raise KeyboardInterrupt anyways. + pass + if done_event.is_set(): + data_queue.cancel_join_thread() + data_queue.close() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..08860663bc6eff15634cadc98bd7e4c5441d2d43 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/__pycache__/_decorator.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/__pycache__/_decorator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..60b23cd27e2c990ebac9dc3844d6b0d2cae8e816 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/__pycache__/_decorator.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/__pycache__/_hook_iterator.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/__pycache__/_hook_iterator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16f0ac5b474ab261dda36371b13866525c06446f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/__pycache__/_hook_iterator.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/__pycache__/_typing.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/__pycache__/_typing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fcfbb56fb2b4d3bb15060b45c9b26b04cedf25a5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/__pycache__/_typing.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/__pycache__/datapipe.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/__pycache__/datapipe.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb9eceb81112bd5de83398fa894913767fb59bff Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/__pycache__/datapipe.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/__pycache__/gen_pyi.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/__pycache__/gen_pyi.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..05ac6cc822cebca3d12b995ae130fdc4cc86bce0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/__pycache__/gen_pyi.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/dataframe_wrapper.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/dataframe_wrapper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ebed168a3aec7e5a18404cf221864679e725e2c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/dataframe_wrapper.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/dataframes.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/dataframes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba7c4670919d545ece6575b5e5ac27f4fa3ea3d8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/__pycache__/dataframes.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/dataframe_wrapper.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/dataframe_wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..3596cc171e5da567417535cedc4a174cd417cae1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/dataframe/dataframe_wrapper.py @@ -0,0 +1,125 @@ +from typing import Any, Optional + +_pandas: Any = None +_WITH_PANDAS: Optional[bool] = None + + +def _try_import_pandas() -> bool: + try: + import pandas # type: ignore[import] + global _pandas + _pandas = pandas + return True + except ImportError: + return False + + +# pandas used only for prototyping, will be shortly replaced with TorchArrow +def _with_pandas() -> bool: + global _WITH_PANDAS + if _WITH_PANDAS is None: + _WITH_PANDAS = _try_import_pandas() + return _WITH_PANDAS + + +class PandasWrapper: + @classmethod + def create_dataframe(cls, data, columns): + if not _with_pandas(): + raise Exception("DataFrames prototype requires pandas to function") + return _pandas.DataFrame(data, columns=columns) # type: ignore[union-attr] + + @classmethod + def is_dataframe(cls, data): + if not _with_pandas(): + return False + return isinstance(data, _pandas.core.frame.DataFrame) # type: ignore[union-attr] + + @classmethod + def is_column(cls, data): + if not _with_pandas(): + return False + return isinstance(data, _pandas.core.series.Series) # type: ignore[union-attr] + + @classmethod + def iterate(cls, data): + if not _with_pandas(): + raise Exception("DataFrames prototype requires pandas to function") + yield from data.itertuples(index=False) + + @classmethod + def concat(cls, buffer): + if not _with_pandas(): + raise Exception("DataFrames prototype requires pandas to function") + return _pandas.concat(buffer) # type: ignore[union-attr] + + @classmethod + def get_item(cls, data, idx): + if not _with_pandas(): + raise Exception("DataFrames prototype requires pandas to function") + return data[idx: idx + 1] + + @classmethod + def get_len(cls, df): + if not _with_pandas(): + raise Exception("DataFrames prototype requires pandas to function") + return len(df.index) + + @classmethod + def get_columns(cls, df): + if not _with_pandas(): + raise Exception("DataFrames prototype requires pandas to function") + return list(df.columns.values.tolist()) + + +# When you build own implementation just override it with dataframe_wrapper.set_df_wrapper(new_wrapper_class) +default_wrapper = PandasWrapper + + +def get_df_wrapper(): + return default_wrapper + + +def set_df_wrapper(wrapper): + global default_wrapper + default_wrapper = wrapper + + +def create_dataframe(data, columns=None): + wrapper = get_df_wrapper() + return wrapper.create_dataframe(data, columns) + + +def is_dataframe(data): + wrapper = get_df_wrapper() + return wrapper.is_dataframe(data) + + +def get_columns(data): + wrapper = get_df_wrapper() + return wrapper.get_columns(data) + + +def is_column(data): + wrapper = get_df_wrapper() + return wrapper.is_column(data) + + +def concat(buffer): + wrapper = get_df_wrapper() + return wrapper.concat(buffer) + + +def iterate(data): + wrapper = get_df_wrapper() + return wrapper.iterate(data) + + +def get_item(data, idx): + wrapper = get_df_wrapper() + return wrapper.get_item(data, idx) + + +def get_len(df): + wrapper = get_df_wrapper() + return wrapper.get_len(df) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a775f0be8753677f8255e1201dc8d70649172baf --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__init__.py @@ -0,0 +1,64 @@ +from torch.utils.data.datapipes.iter.utils import ( + IterableWrapperIterDataPipe as IterableWrapper, +) +from torch.utils.data.datapipes.iter.callable import ( + CollatorIterDataPipe as Collator, + MapperIterDataPipe as Mapper, +) +from torch.utils.data.datapipes.iter.combinatorics import ( + SamplerIterDataPipe as Sampler, + ShufflerIterDataPipe as Shuffler, +) +from torch.utils.data.datapipes.iter.combining import ( + ConcaterIterDataPipe as Concater, + DemultiplexerIterDataPipe as Demultiplexer, + ForkerIterDataPipe as Forker, + MultiplexerIterDataPipe as Multiplexer, + ZipperIterDataPipe as Zipper, +) +from torch.utils.data.datapipes.iter.filelister import ( + FileListerIterDataPipe as FileLister, +) +from torch.utils.data.datapipes.iter.fileopener import ( + FileOpenerIterDataPipe as FileOpener, +) +from torch.utils.data.datapipes.iter.grouping import ( + BatcherIterDataPipe as Batcher, + GrouperIterDataPipe as Grouper, + UnBatcherIterDataPipe as UnBatcher, +) +from torch.utils.data.datapipes.iter.sharding import ( + ShardingFilterIterDataPipe as ShardingFilter, +) +from torch.utils.data.datapipes.iter.routeddecoder import ( + RoutedDecoderIterDataPipe as RoutedDecoder, +) +from torch.utils.data.datapipes.iter.selecting import ( + FilterIterDataPipe as Filter, +) +from torch.utils.data.datapipes.iter.streamreader import ( + StreamReaderIterDataPipe as StreamReader, +) + +__all__ = ['Batcher', + 'Collator', + 'Concater', + 'Demultiplexer', + 'FileLister', + 'FileOpener', + 'Filter', + 'Forker', + 'Grouper', + 'IterableWrapper', + 'Mapper', + 'Multiplexer', + 'RoutedDecoder', + 'Sampler', + 'ShardingFilter', + 'Shuffler', + 'StreamReader', + 'UnBatcher', + 'Zipper'] + +# Please keep this list sorted +assert __all__ == sorted(__all__) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..21f974f998ed2766c28c460aeed531eacedb8b40 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/callable.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/callable.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f431a87c59136f5465e0924efa092335b93755ca Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/callable.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/combinatorics.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/combinatorics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d380f96005612ba1cebd5b972d206e5227e14667 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/combinatorics.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/combining.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/combining.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c64c03e947145d068a524da15bc2962ec64f320 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/combining.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/filelister.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/filelister.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bdefe35bd6b28299882a1174392f72217c2654cd Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/filelister.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/fileopener.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/fileopener.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f235bf942aba1874f5f3d0470c4e525b4c8c4b42 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/fileopener.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/grouping.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/grouping.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..96a3e83491e31947cd9d9e04d5e7c81bd70ee021 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/grouping.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/routeddecoder.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/routeddecoder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f207ced7a57619a380e851f8eeb526799bf68dbb Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/routeddecoder.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/selecting.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/selecting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e63074cda91c663880f9f6693331662d92f00d5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/selecting.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/sharding.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/sharding.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff97a36a5ab839a7e572a075d07eab5801754a09 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/sharding.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/streamreader.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/streamreader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..95eba676bcbc4ddde1cb3034569884e00696f08c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/streamreader.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7762d80c32a84ff94e80841fb4169d2fdc5b7e7c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/__pycache__/utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/callable.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/callable.py new file mode 100644 index 0000000000000000000000000000000000000000..48875e40a68d111042e21d086cf895b24b6e0474 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/callable.py @@ -0,0 +1,237 @@ +import functools +from collections import namedtuple + +from typing import Callable, Iterator, Sized, TypeVar, Optional, Union, Any, Dict, List + +from torch.utils.data.datapipes._decorator import functional_datapipe +from torch.utils.data._utils.collate import default_collate +from torch.utils.data.datapipes.dataframe import dataframe_wrapper as df_wrapper +from torch.utils.data.datapipes.datapipe import IterDataPipe +from torch.utils.data.datapipes.utils.common import (_check_unpickable_fn, + validate_input_col) + +__all__ = [ + "CollatorIterDataPipe", + "MapperIterDataPipe", +] + +T_co = TypeVar("T_co", covariant=True) + + +@functional_datapipe("map") +class MapperIterDataPipe(IterDataPipe[T_co]): + r""" + Applies a function over each item from the source DataPipe (functional name: ``map``). + + The function can be any regular Python function or partial object. Lambda + function is not recommended as it is not supported by pickle. + + Args: + datapipe: Source Iterable DataPipe + fn: Function being applied over each item + input_col: Index or indices of data which ``fn`` is applied, such as: + + - ``None`` as default to apply ``fn`` to the data directly. + - Integer(s) is used for list/tuple. + - Key(s) is used for dict. + + output_col: Index of data where result of ``fn`` is placed. ``output_col`` can be specified + only when ``input_col`` is not ``None`` + + - ``None`` as default to replace the index that ``input_col`` specified; For ``input_col`` with + multiple indices, the left-most one is used, and other indices will be removed. + - Integer is used for list/tuple. ``-1`` represents to append result at the end. + - Key is used for dict. New key is acceptable. + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import IterableWrapper, Mapper + >>> def add_one(x): + ... return x + 1 + >>> dp = IterableWrapper(range(10)) + >>> map_dp_1 = dp.map(add_one) # Invocation via functional form is preferred + >>> list(map_dp_1) + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + >>> # We discourage the usage of `lambda` functions as they are not serializable with `pickle` + >>> # Use `functools.partial` or explicitly define the function instead + >>> map_dp_2 = Mapper(dp, lambda x: x + 1) + >>> list(map_dp_2) + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + """ + + datapipe: IterDataPipe + fn: Callable + + def __init__( + self, + datapipe: IterDataPipe, + fn: Callable, + input_col=None, + output_col=None, + ) -> None: + super().__init__() + self.datapipe = datapipe + + _check_unpickable_fn(fn) + self.fn = fn # type: ignore[assignment] + + self.input_col = input_col + if input_col is None and output_col is not None: + raise ValueError("`output_col` must be None when `input_col` is None.") + if isinstance(output_col, (list, tuple)): + if len(output_col) > 1: + raise ValueError("`output_col` must be a single-element list or tuple") + output_col = output_col[0] + self.output_col = output_col + validate_input_col(fn, input_col) + + def _apply_fn(self, data): + if self.input_col is None and self.output_col is None: + return self.fn(data) + + if self.input_col is None: + res = self.fn(data) + elif isinstance(self.input_col, (list, tuple)): + args = tuple(data[col] for col in self.input_col) + res = self.fn(*args) + else: + res = self.fn(data[self.input_col]) + + # Copy tuple to list and run in-place modification because tuple is immutable. + if isinstance(data, tuple): + t_flag = True + data = list(data) + else: + t_flag = False + + if self.output_col is None: + if isinstance(self.input_col, (list, tuple)): + data[self.input_col[0]] = res + for idx in sorted(self.input_col[1:], reverse=True): + del data[idx] + else: + data[self.input_col] = res + else: + if self.output_col == -1: + data.append(res) + else: + data[self.output_col] = res + + # Convert list back to tuple + return tuple(data) if t_flag else data + + def __iter__(self) -> Iterator[T_co]: + for data in self.datapipe: + yield self._apply_fn(data) + + def __len__(self) -> int: + if isinstance(self.datapipe, Sized): + return len(self.datapipe) + raise TypeError( + f"{type(self).__name__} instance doesn't have valid length" + ) + + +def _collate_helper(conversion, item): + # TODO(VitalyFedyunin): Verify that item is any sort of batch + if len(item.items) > 1: + # TODO(VitalyFedyunin): Compact all batch dataframes into one + raise Exception("Only supports one DataFrame per batch") + df = item[0] + columns_name = df_wrapper.get_columns(df) + tuple_names: List = [] + tuple_values: List = [] + + for name in conversion.keys(): + if name not in columns_name: + raise Exception("Conversion keys missmatch") + + for name in columns_name: + if name in conversion: + if not callable(conversion[name]): + raise Exception('Collate (DF)DataPipe requires callable as dict values') + collation_fn = conversion[name] + else: + # TODO(VitalyFedyunin): Add default collation into df_wrapper + try: + import torcharrow.pytorch as tap # type: ignore[import] + collation_fn = tap.rec.Default() + except Exception as e: + raise Exception("unable to import default collation function from the TorchArrow") from e + + tuple_names.append(str(name)) + value = collation_fn(df[name]) + tuple_values.append(value) + + # TODO(VitalyFedyunin): We can dynamically extract types from the tuple_values here + # TODO(VitalyFedyunin): Instead of ignoring mypy error, make sure tuple_names is not empty + tpl_cls = namedtuple("CollateResult", tuple_names) # type: ignore[misc] + tuple = tpl_cls(*tuple_values) + return tuple + + +@functional_datapipe("collate") +class CollatorIterDataPipe(MapperIterDataPipe): + r""" + Collates samples from DataPipe to Tensor(s) by a custom collate function (functional name: ``collate``). + + By default, it uses :func:`torch.utils.data.default_collate`. + + .. note:: + While writing a custom collate function, you can import :func:`torch.utils.data.default_collate` for the + default behavior and `functools.partial` to specify any additional arguments. + + Args: + datapipe: Iterable DataPipe being collated + collate_fn: Customized collate function to collect and combine data or a batch of data. + Default function collates to Tensor(s) based on data type. + + Example: + >>> # xdoctest: +SKIP + >>> # Convert integer data to float Tensor + >>> class MyIterDataPipe(torch.utils.data.IterDataPipe): + ... def __init__(self, start, end): + ... super(MyIterDataPipe).__init__() + ... assert end > start, "this example code only works with end >= start" + ... self.start = start + ... self.end = end + ... + ... def __iter__(self): + ... return iter(range(self.start, self.end)) + ... + ... def __len__(self): + ... return self.end - self.start + ... + >>> ds = MyIterDataPipe(start=3, end=7) + >>> print(list(ds)) + [3, 4, 5, 6] + >>> def collate_fn(batch): + ... return torch.tensor(batch, dtype=torch.float) + ... + >>> collated_ds = CollateIterDataPipe(ds, collate_fn=collate_fn) + >>> print(list(collated_ds)) + [tensor(3.), tensor(4.), tensor(5.), tensor(6.)] + """ + + def __init__( + self, + datapipe: IterDataPipe, + conversion: Optional[ + Union[ + Callable[..., Any], + Dict[Union[str, Any], Union[Callable, Any]], + ] + ] = default_collate, + collate_fn: Optional[Callable] = None, + ) -> None: + # TODO(VitalyFedyunin): Replace `Callable[..., Any]` with `Callable[[IColumn], Any]` + # TODO(VitalyFedyunin): Replace with `Dict[Union[str, IColumn], Union[Callable, Enum]]` + if collate_fn is not None: + super().__init__(datapipe, fn=collate_fn) + else: + if callable(conversion): + super().__init__(datapipe, fn=conversion) + else: + # TODO(VitalyFedyunin): Validate passed dictionary + collate_fn = functools.partial(_collate_helper, conversion) + super().__init__(datapipe, fn=collate_fn) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/combinatorics.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/combinatorics.py new file mode 100644 index 0000000000000000000000000000000000000000..16d2f5444dcd9e46e482b9f06ffcb11d24c71c44 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/combinatorics.py @@ -0,0 +1,183 @@ +import random +import torch + +from torch.utils.data import Sampler, SequentialSampler +from torch.utils.data.datapipes._decorator import functional_datapipe +from torch.utils.data.datapipes.datapipe import IterDataPipe +from typing import Dict, Iterator, List, Optional, Sized, Tuple, Type, TypeVar + +__all__ = [ + "SamplerIterDataPipe", + "ShufflerIterDataPipe", +] + +T_co = TypeVar('T_co', covariant=True) + + +class SamplerIterDataPipe(IterDataPipe[T_co]): + r""" + Generate sample elements using the provided ``Sampler`` (defaults to :class:`SequentialSampler`). + + Args: + datapipe: IterDataPipe to sample from + sampler: Sampler class to generate sample elements from input DataPipe. + Default is :class:`SequentialSampler` for IterDataPipe + """ + + datapipe: IterDataPipe + sampler: Sampler + + def __init__(self, + datapipe: IterDataPipe, + sampler: Type[Sampler] = SequentialSampler, + sampler_args: Optional[Tuple] = None, + sampler_kwargs: Optional[Dict] = None + ) -> None: + assert isinstance(datapipe, Sized), \ + "Sampler class requires input datapipe implemented `__len__`" + super().__init__() + self.datapipe = datapipe + self.sampler_args = () if sampler_args is None else sampler_args + self.sampler_kwargs = {} if sampler_kwargs is None else sampler_kwargs + # https://github.com/python/mypy/pull/9629 will solve + self.sampler = sampler(*self.sampler_args, data_source=self.datapipe, **self.sampler_kwargs) # type: ignore[misc] + + def __iter__(self) -> Iterator[T_co]: + return iter(self.sampler) + + def __len__(self) -> int: + # Dataset has been tested as `Sized` + if isinstance(self.sampler, Sized): + return len(self.sampler) + raise TypeError(f"{type(self).__name__} instance doesn't have valid length") + + +@functional_datapipe('shuffle') +class ShufflerIterDataPipe(IterDataPipe[T_co]): + r""" + Shuffle the input DataPipe with a buffer (functional name: ``shuffle``). + + The buffer with ``buffer_size`` is filled with elements from the datapipe first. Then, + each item will be yielded from the buffer by reservoir sampling via iterator. + + ``buffer_size`` is required to be larger than ``0``. For ``buffer_size == 1``, the + datapipe is not shuffled. In order to fully shuffle all elements from datapipe, + ``buffer_size`` is required to be greater than or equal to the size of datapipe. + + When it is used with :class:`torch.utils.data.DataLoader`, the methods to + set up random seed are different based on :attr:`num_workers`. + + For single-process mode (:attr:`num_workers == 0`), the random seed is set before + the :class:`~torch.utils.data.DataLoader` in the main process. For multi-process + mode (:attr:`num_worker > 0`), `worker_init_fn` is used to set up a random seed + for each worker process. + + Args: + datapipe: The IterDataPipe being shuffled + buffer_size: The buffer size for shuffling (default to ``10000``) + unbatch_level: Specifies if it is necessary to unbatch source data before + applying the shuffle + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import IterableWrapper + >>> dp = IterableWrapper(range(10)) + >>> shuffle_dp = dp.shuffle() + >>> list(shuffle_dp) + [0, 4, 1, 6, 3, 2, 9, 5, 7, 8] + """ + + datapipe: IterDataPipe[T_co] + buffer_size: int + _buffer: List[T_co] + _enabled: bool + _seed: Optional[int] + _rng: random.Random + + def __init__(self, + datapipe: IterDataPipe[T_co], + *, + buffer_size: int = 10000, + unbatch_level: int = 0 + ) -> None: + super().__init__() + # TODO: Performance optimization + # buffer can be a fixed size and remove expensive `append()` and `len()` operations + self._buffer: List[T_co] = [] + assert buffer_size > 0, "buffer_size should be larger than 0" + if unbatch_level == 0: + self.datapipe = datapipe + else: + self.datapipe = datapipe.unbatch(unbatch_level=unbatch_level) + self.buffer_size = buffer_size + self._enabled = True + self._seed = None + self._rng = random.Random() + + def set_shuffle(self, shuffle=True): + self._enabled = shuffle + return self + + def set_seed(self, seed: int): + self._seed = seed + return self + + def __iter__(self) -> Iterator[T_co]: + if not self._enabled: + yield from self.datapipe + else: + for x in self.datapipe: + if len(self._buffer) == self.buffer_size: + idx = self._rng.randint(0, len(self._buffer) - 1) + val, self._buffer[idx] = self._buffer[idx], x + yield val + else: + self._buffer.append(x) + while self._buffer: + idx = self._rng.randint(0, len(self._buffer) - 1) + yield self._buffer.pop(idx) + + def __len__(self) -> int: + if isinstance(self.datapipe, Sized): + return len(self.datapipe) + raise TypeError(f"{type(self).__name__} instance doesn't have valid length") + + def reset(self) -> None: + self._buffer = [] + if self._enabled: + if self._seed is None: + self._seed = int(torch.empty((), dtype=torch.int64).random_().item()) + self._rng.seed(self._seed) + self._seed = None + + def __getstate__(self): + state = ( + self.datapipe, + self.buffer_size, + self._enabled, + self._seed, + self._buffer, + self._rng.getstate(), + self._valid_iterator_id, + self._number_of_samples_yielded, + ) + if IterDataPipe.getstate_hook is not None: + return IterDataPipe.getstate_hook(state) + return state + + def __setstate__(self, state): + ( + self.datapipe, + self.buffer_size, + self._enabled, + self._seed, + self._buffer, + rng_state, + self._valid_iterator_id, + self._number_of_samples_yielded, + ) = state + self._rng = random.Random() + self._rng.setstate(rng_state) + + def __del__(self): + self._buffer.clear() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/combining.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/combining.py new file mode 100644 index 0000000000000000000000000000000000000000..9a4365516a33f3f9bc3a3877db09a78bc72a6289 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/combining.py @@ -0,0 +1,639 @@ +import warnings + +from abc import ABC, abstractmethod +from collections import deque +import copy as copymodule +from typing import Any, Callable, Iterator, List, Literal, Optional, Sized, Tuple, TypeVar, Deque + +from torch.utils.data.datapipes._decorator import functional_datapipe +from torch.utils.data.datapipes._hook_iterator import _SnapshotState +from torch.utils.data.datapipes.datapipe import IterDataPipe +from torch.utils.data.datapipes.utils.common import StreamWrapper, _check_unpickable_fn + +__all__ = [ + "ConcaterIterDataPipe", + "DemultiplexerIterDataPipe", + "ForkerIterDataPipe", + "MultiplexerIterDataPipe", + "ZipperIterDataPipe", +] + +T_co = TypeVar('T_co', covariant=True) + + +@functional_datapipe('concat') +class ConcaterIterDataPipe(IterDataPipe): + r""" + Concatenates multiple Iterable DataPipes (functional name: ``concat``). + + The resulting DataPipe will yield all the elements from the first input DataPipe, before yielding from the subsequent ones. + + Args: + datapipes: Iterable DataPipes being concatenated + + Example: + >>> # xdoctest: +REQUIRES(module:torchdata) + >>> import random + >>> from torchdata.datapipes.iter import IterableWrapper + >>> dp1 = IterableWrapper(range(3)) + >>> dp2 = IterableWrapper(range(5)) + >>> list(dp1.concat(dp2)) + [0, 1, 2, 0, 1, 2, 3, 4] + """ + + datapipes: Tuple[IterDataPipe] + + def __init__(self, *datapipes: IterDataPipe): + if len(datapipes) == 0: + raise ValueError("Expected at least one DataPipe, but got nothing") + if not all(isinstance(dp, IterDataPipe) for dp in datapipes): + raise TypeError("Expected all inputs to be `IterDataPipe`") + self.datapipes = datapipes # type: ignore[assignment] + + def __iter__(self) -> Iterator: + for dp in self.datapipes: + yield from dp + + def __len__(self) -> int: + if all(isinstance(dp, Sized) for dp in self.datapipes): + return sum(len(dp) for dp in self.datapipes) + else: + raise TypeError(f"{type(self).__name__} instance doesn't have valid length") + + +@functional_datapipe('fork') +class ForkerIterDataPipe(IterDataPipe): + r""" + Creates multiple instances of the same Iterable DataPipe (functional name: ``fork``). + + Args: + datapipe: Iterable DataPipe being copied + num_instances: number of instances of the datapipe to create + buffer_size: this restricts how far ahead the leading child DataPipe + can read relative to the slowest child DataPipe. + Defaults to ``1000``. Use ``-1`` for the unlimited buffer. + copy: copy strategy to use for items yielded by each branch. Supported + options are ``None`` for no copying, ``"shallow"`` for shallow object + copies, and ``"deep"`` for deep object copies. Defaults to ``None``. + + Note: + All branches of the forked pipeline return the identical object unless + the copy parameter is supplied. If the object is mutable or contains + mutable objects, changing them in one branch will affect all others. + + Example: + >>> # xdoctest: +REQUIRES(module:torchdata) + >>> from torchdata.datapipes.iter import IterableWrapper + >>> source_dp = IterableWrapper(range(5)) + >>> dp1, dp2 = source_dp.fork(num_instances=2) + >>> list(dp1) + [0, 1, 2, 3, 4] + >>> list(dp2) + [0, 1, 2, 3, 4] + """ + + def __new__( + cls, + datapipe: IterDataPipe, + num_instances: int, + buffer_size: int = 1000, + copy: Optional[Literal["shallow", "deep"]] = None + ): + if num_instances < 1: + raise ValueError(f"Expected `num_instances` larger than 0, but {num_instances} is found") + if num_instances == 1: + return datapipe + container = _ForkerIterDataPipe(datapipe, num_instances, buffer_size, copy) # type: ignore[abstract] + return [_ChildDataPipe(container, i) for i in range(num_instances)] + + +class _ContainerTemplate(ABC): + r"""Abstract class for container ``DataPipes``. The followings are three required methods.""" + + @abstractmethod + def get_next_element_by_instance(self, instance_id: int): + ... + + @abstractmethod + def is_every_instance_exhausted(self) -> bool: + ... + + @abstractmethod + def reset(self) -> None: + ... + + @abstractmethod + def get_length_by_instance(self, instance_id: int): + r"""Raise TypeError if it's not supposed to be implemented to support `list(datapipe)`.""" + + +def _no_op(x): + return x + + +class _ForkerIterDataPipe(IterDataPipe, _ContainerTemplate): + r""" + Container to hold instance-specific information on behalf of ForkerIterDataPipe. + + It tracks the state of its child DataPipes, maintains the buffer, and yields the next value + as requested by the child DataPipes. + """ + + def __init__( + self, + datapipe: IterDataPipe, + num_instances: int, + buffer_size: int = 1000, + copy: Optional[Literal["shallow", "deep"]] = None + ): + self.main_datapipe = datapipe + self._datapipe_iterator: Optional[Iterator[Any]] = None + self.num_instances = num_instances + self.buffer: Deque = deque() + self.buffer_size = buffer_size + if self.buffer_size < 0: + warnings.warn( + "Unlimited buffer size is set for `fork`, " + "please be aware of OOM at random places", + UserWarning + ) + if copy is None: + self.copy_fn = _no_op + elif copy == "shallow": + self.copy_fn = copymodule.copy + elif copy == "deep": + self.copy_fn = copymodule.deepcopy + else: + raise ValueError(f"Unknown copy method `{copy}` requested, choose one of None, `shallow` or `deep`.") + + self.child_pointers: List[int] = [0] * num_instances # Indicate the indices of the next element to get + self.slowest_ptr = 0 # The index to read by the slowest child + self.leading_ptr = 0 # The index to read by the fastest child + self.end_ptr: Optional[int] = None # The index to stop child + self._child_stop: List[bool] = [True for _ in range(num_instances)] + + def __len__(self): + return len(self.main_datapipe) + + def get_next_element_by_instance(self, instance_id: int): + if self._datapipe_iterator is None and self._child_stop[instance_id]: + self._datapipe_iterator = iter(self.main_datapipe) + self._snapshot_state = _SnapshotState.Iterating + for i in range(self.num_instances): + self._child_stop[i] = False + try: + while not self._child_stop[instance_id]: + self.child_pointers[instance_id] += 1 + if self.end_ptr is not None and self.child_pointers[instance_id] == self.end_ptr: + self._child_stop[instance_id] = True + break + # Use buffer + if self.buffer and self.child_pointers[instance_id] <= self.leading_ptr: + idx = self.child_pointers[instance_id] - self.slowest_ptr - 1 + return_val = self.buffer[idx] + else: # Retrieve one element from main datapipe + self.leading_ptr = self.child_pointers[instance_id] + try: + return_val = next(self._datapipe_iterator) # type: ignore[arg-type] + self.buffer.append(return_val) + except StopIteration: + self._child_stop[instance_id] = True + self._datapipe_iterator = None + self.end_ptr = self.leading_ptr + continue + if self.child_pointers[instance_id] == self.slowest_ptr + 1: + new_min = min(self.child_pointers) # Can optimize by avoiding the call to min() + if self.slowest_ptr < new_min: + self.slowest_ptr = new_min + self.buffer.popleft() + if self.buffer_size >= 0 and self.leading_ptr > self.buffer_size + self.slowest_ptr: + raise BufferError("ForkerIterDataPipe buffer overflow," + + f"buffer size {self.buffer_size} is insufficient.") + + yield self.copy_fn(return_val) # type: ignore[possibly-undefined] + finally: + self._child_stop[instance_id] = True + # Cleanup _datapipe_iterator for the case that fork exits earlier + if all(self._child_stop): + self._datapipe_iterator = None + self._cleanup() + + def is_every_instance_exhausted(self) -> bool: + return self.end_ptr is not None and all(self._child_stop) + + def get_length_by_instance(self, instance_id: int) -> int: + return len(self.main_datapipe) + + def reset(self) -> None: + self._datapipe_iterator = None + self.buffer = deque() + self.child_pointers = [0] * self.num_instances + self.slowest_ptr = 0 + self.leading_ptr = 0 + self.end_ptr = None + self._child_stop = [True for _ in range(self.num_instances)] + + def __getstate__(self): + state = ( + self.main_datapipe, + self.num_instances, + self.buffer_size, + self.copy_fn, + self._valid_iterator_id, + self._number_of_samples_yielded, + ) + if IterDataPipe.getstate_hook is not None: + return IterDataPipe.getstate_hook(state) + return state + + def __setstate__(self, state): + ( + self.main_datapipe, + self.num_instances, + self.buffer_size, + self.copy_fn, + self._valid_iterator_id, + self._number_of_samples_yielded, + ) = state + self._datapipe_iterator = None + self.buffer = deque() + self.child_pointers = [0] * self.num_instances + self.slowest_ptr = 0 + self.leading_ptr = 0 + self.end_ptr = None + self._child_stop = [True for _ in range(self.num_instances)] + + def _cleanup(self): + while self.buffer: + d = self.buffer.popleft() + StreamWrapper.close_streams(d) + + def __del__(self): + self._cleanup() + + +class _ChildDataPipe(IterDataPipe): + r""" + Iterable Datapipe that is a child of a main DataPipe. + + The instance of this class will pass its instance_id to get the next value from its main DataPipe. + + Note: + ChildDataPipe, like all other IterDataPipe, follows the single iterator per IterDataPipe constraint. + Since ChildDataPipes share a common buffer, when an iterator is created for one of the ChildDataPipes, + the previous iterators for all ChildDataPipes must be invalidated, with the exception when a ChildDataPipe + hasn't had an iterator created from it since the last invalidation. See the example below. + + Example: + >>> # xdoctest: +REQUIRES(module:torchdata) + >>> # Singler Iterator per IteraDataPipe Invalidation + >>> from torchdata.datapipes.iter import IterableWrapper + >>> source_dp = IterableWrapper(range(10)) + >>> cdp1, cdp2 = source_dp.fork(num_instances=2) + >>> it1, it2 = iter(cdp1), iter(cdp2) + >>> it3 = iter(cdp1) + >>> # The line above invalidates `it1` and `it2`, and resets `ForkerIterDataPipe`. + >>> it4 = iter(cdp2) + >>> # The line above doesn't invalidate `it3`, because an iterator for `cdp2` hasn't been created since + >>> # the last invalidation. + + Args: + main_datapipe: Main DataPipe with a method 'get_next_element_by_instance(instance_id)' + instance_id: integer identifier of this instance + """ + + _is_child_datapipe: bool = True + + def __init__(self, main_datapipe: IterDataPipe, instance_id: int): + assert isinstance(main_datapipe, _ContainerTemplate) + + self.main_datapipe: IterDataPipe = main_datapipe + self.instance_id = instance_id + + def __iter__(self): + # Note that the logic behind setting iterator ID and `reset` are handled within `hook_iterator` + # We want to separate the code for reset and yield, so that 'reset' executes before __next__ is called + return self.main_datapipe.get_next_element_by_instance(self.instance_id) + + def __len__(self): + return self.main_datapipe.get_length_by_instance(self.instance_id) + + # This method is called by `hook_iterator` in `_typing.py`. + def _set_main_datapipe_valid_iterator_id(self) -> int: + r""" + Update the valid iterator ID for both this DataPipe object and `main_datapipe`. + + `main_datapipe.reset()` is called when the ID is incremented to a new generation. + """ + # 1. First time any child iterator is created + if self.main_datapipe._valid_iterator_id is None: + self.main_datapipe._valid_iterator_id = 0 # type: ignore[attr-defined] + # 2. This instance was already in the same generation as `main_datapipe`, + # we need to increment the ID further by 1 + elif self.main_datapipe._valid_iterator_id == self._valid_iterator_id: # type: ignore[has-type] + self.main_datapipe._valid_iterator_id += 1 # type: ignore[attr-defined] + # Whenever a new generation of iterator is created, the `main_datapipe` must reset + if not self.main_datapipe.is_every_instance_exhausted(): + warnings.warn("Some child DataPipes are not exhausted when __iter__ is called. We are resetting " + "the buffer and each child DataPipe will read from the start again.", UserWarning) + self.main_datapipe.reset() + # 3. Otherwise, the iterator is behind the others, so it will just need to catch up by setting + # the instance's iterator to match that of `main_datapipe` + self._valid_iterator_id = self.main_datapipe._valid_iterator_id + return self._valid_iterator_id + + # This method is called by `hook_iterator` in `_typing.py`. + def _check_valid_iterator_id(self, iterator_id) -> bool: + r"""Check the valid iterator ID against that of DataPipe object and that of `main_datapipe`.""" + return iterator_id == self._valid_iterator_id and iterator_id == self.main_datapipe._valid_iterator_id + + +@functional_datapipe('demux') +class DemultiplexerIterDataPipe(IterDataPipe): + r""" + Splits the input DataPipe into multiple child DataPipes, using the given classification function (functional name: ``demux``). + + A list of the child DataPipes is returned from this operation. + + Args: + datapipe: Iterable DataPipe being filtered + num_instances: number of instances of the DataPipe to create + classifier_fn: a function that maps values to an integer within the range ``[0, num_instances - 1]`` or ``None`` + drop_none: defaults to ``False``, if ``True``, the function will skip over elements classified as ``None`` + buffer_size: this defines the maximum number of inputs that the buffer can hold across all child + DataPipes while waiting for their values to be yielded. + Defaults to ``1000``. Use ``-1`` for the unlimited buffer. + + Examples: + >>> # xdoctest: +REQUIRES(module:torchdata) + >>> from torchdata.datapipes.iter import IterableWrapper + >>> def odd_or_even(n): + ... return n % 2 + >>> source_dp = IterableWrapper(range(5)) + >>> dp1, dp2 = source_dp.demux(num_instances=2, classifier_fn=odd_or_even) + >>> list(dp1) + [0, 2, 4] + >>> list(dp2) + [1, 3] + >>> # It can also filter out any element that gets `None` from the `classifier_fn` + >>> def odd_or_even_no_zero(n): + ... return n % 2 if n != 0 else None + >>> dp1, dp2 = source_dp.demux(num_instances=2, classifier_fn=odd_or_even_no_zero, drop_none=True) + >>> list(dp1) + [2, 4] + >>> list(dp2) + [1, 3] + """ + + def __new__(cls, datapipe: IterDataPipe, num_instances: int, + classifier_fn: Callable[[T_co], Optional[int]], drop_none: bool = False, buffer_size: int = 1000): + if num_instances < 1: + raise ValueError(f"Expected `num_instances` larger than 0, but {num_instances} is found") + + _check_unpickable_fn(classifier_fn) + + # When num_instances == 1, demux can be replaced by filter, + # but keep it as Demultiplexer for the sake of consistency + # like throwing Error when classification result is out of o range + container = _DemultiplexerIterDataPipe(datapipe, num_instances, classifier_fn, drop_none, buffer_size) # type: ignore[abstract] + return [_ChildDataPipe(container, i) for i in range(num_instances)] + + +class _DemultiplexerIterDataPipe(IterDataPipe, _ContainerTemplate): + r""" + Container to hold instance-specific information on behalf of DemultiplexerIterDataPipe. + + It tracks the state of its child DataPipes, maintains the buffer, classifies and yields the next correct value + as requested by the child DataPipes. + """ + + def __init__(self, datapipe: IterDataPipe[T_co], num_instances: int, + classifier_fn: Callable[[T_co], Optional[int]], drop_none: bool, buffer_size: int): + self.main_datapipe = datapipe + self._datapipe_iterator: Optional[Iterator[Any]] = None + self.num_instances = num_instances + self.buffer_size = buffer_size + if self.buffer_size < 0: + warnings.warn( + "Unlimited buffer size is set for `demux`, " + "please be aware of OOM at random places", + UserWarning + ) + self.current_buffer_usage = 0 + self.child_buffers: List[Deque[T_co]] = [deque() for _ in range(num_instances)] + self.classifier_fn = classifier_fn + self.drop_none = drop_none + self.main_datapipe_exhausted = False + self._child_stop: List[bool] = [True for _ in range(num_instances)] + + def _find_next(self, instance_id: int) -> T_co: # type: ignore[type-var] + while True: + if self.main_datapipe_exhausted or self._child_stop[instance_id]: + raise StopIteration + if self._datapipe_iterator is None: + raise ValueError( + "_datapipe_iterator has not been set, likely because this private method is called directly " + "without invoking get_next_element_by_instance() first.") + value = next(self._datapipe_iterator) + classification = self.classifier_fn(value) + if classification is None and self.drop_none: + StreamWrapper.close_streams(value) + continue + if classification is None or classification >= self.num_instances or classification < 0: + raise ValueError(f"Output of the classification fn should be between 0 and {self.num_instances - 1}. " + + f"{classification} is returned.") + if classification == instance_id: + return value + self.child_buffers[classification].append(value) + self.current_buffer_usage += 1 + if self.buffer_size >= 0 and self.current_buffer_usage > self.buffer_size: + raise BufferError( + f"DemultiplexerIterDataPipe buffer overflow, buffer size {self.buffer_size} is insufficient.") + + def get_next_element_by_instance(self, instance_id: int): + if self._datapipe_iterator is None and self._child_stop[instance_id]: + self._datapipe_iterator = iter(self.main_datapipe) + self._snapshot_state = _SnapshotState.Iterating # This is necessary for the DataPipe to reset properly. + self.main_datapipe_exhausted = False + for i in range(self.num_instances): + self._child_stop[i] = False + + try: + while not self._child_stop[instance_id]: + if self.child_buffers[instance_id]: + self.current_buffer_usage -= 1 + yield self.child_buffers[instance_id].popleft() + else: + try: + yield self._find_next(instance_id) + except StopIteration: + self._child_stop[instance_id] = True + self.main_datapipe_exhausted = True + self._datapipe_iterator = None + finally: + self._child_stop[instance_id] = True + # Cleanup _datapipe_iterator for the case that demux exits earlier + if all(self._child_stop): + self._datapipe_iterator = None + if self.child_buffers[instance_id]: + self._cleanup(instance_id) + + def is_every_instance_exhausted(self) -> bool: + return self.main_datapipe_exhausted and all(self._child_stop) + + def get_length_by_instance(self, instance_id: int) -> int: + raise TypeError + + def reset(self) -> None: + self._datapipe_iterator = None + self.current_buffer_usage = 0 + self.child_buffers = [deque() for _ in range(self.num_instances)] + self._child_stop = [True for _ in range(self.num_instances)] + self.main_datapipe_exhausted = False + + def __getstate__(self): + state = ( + self.main_datapipe, + self.num_instances, + self.buffer_size, + self.classifier_fn, + self.drop_none, + self._valid_iterator_id, + self._number_of_samples_yielded, + ) + if IterDataPipe.getstate_hook is not None: + return IterDataPipe.getstate_hook(state) + return state + + def __setstate__(self, state): + ( + self.main_datapipe, + self.num_instances, + self.buffer_size, + self.classifier_fn, + self.drop_none, + self._valid_iterator_id, + self._number_of_samples_yielded, + ) = state + self._datapipe_iterator = None + self.current_buffer_usage = 0 + self.child_buffers = [deque() for _ in range(self.num_instances)] + self._child_stop = [True for _ in range(self.num_instances)] + self.main_datapipe_exhausted = False + + def _cleanup(self, instance_id: Optional[int] = None): + ids = range(self.num_instances) if instance_id is None else [instance_id, ] + for i in ids: + q = self.child_buffers[i] + while q: + d = q.popleft() + StreamWrapper.close_streams(d) + + + def __del__(self): + self._cleanup() + + +@functional_datapipe('mux') +class MultiplexerIterDataPipe(IterDataPipe): + r""" + Yields one element at a time from each of the input Iterable DataPipes (functional name: ``mux``). + + As in, one element from the 1st input DataPipe, then one element from the 2nd DataPipe in the next iteration, + and so on. It ends when the shortest input DataPipe is exhausted. + + Args: + datapipes: Iterable DataPipes that will take turn to yield their elements, until the shortest DataPipe is exhausted + + Example: + >>> # xdoctest: +REQUIRES(module:torchdata) + >>> from torchdata.datapipes.iter import IterableWrapper + >>> dp1, dp2, dp3 = IterableWrapper(range(3)), IterableWrapper(range(10, 15)), IterableWrapper(range(20, 25)) + >>> list(dp1.mux(dp2, dp3)) + [0, 10, 20, 1, 11, 21, 2, 12, 22] + """ + + def __init__(self, *datapipes): + self.datapipes = datapipes + self.buffer: List = [] # Store values to be yielded only when every iterator provides one + + def __iter__(self): + iterators = [iter(x) for x in self.datapipes] + while len(iterators): + for it in iterators: + try: + value = next(it) + self.buffer.append(value) + except StopIteration: + self.buffer.clear() + return + yield from self.buffer + self.buffer.clear() + + def __len__(self): + if all(isinstance(dp, Sized) for dp in self.datapipes): + return min(len(dp) for dp in self.datapipes) * len(self.datapipes) + else: + raise TypeError(f"{type(self).__name__} instance doesn't have valid length") + + def reset(self) -> None: + self.buffer = [] + + def __getstate__(self): + state = ( + self.datapipes, + self._valid_iterator_id, + self._number_of_samples_yielded, + ) + if IterDataPipe.getstate_hook is not None: + return IterDataPipe.getstate_hook(state) + return state + + def __setstate__(self, state): + ( + self.datapipes, + self._valid_iterator_id, + self._number_of_samples_yielded, + ) = state + self.buffer = [] + + def __del__(self): + self.buffer.clear() + + +@functional_datapipe('zip') +class ZipperIterDataPipe(IterDataPipe[Tuple[T_co]]): + r""" + Aggregates elements into a tuple from each of the input DataPipes (functional name: ``zip``). + + The output is stopped as soon as the shortest input DataPipe is exhausted. + + Args: + *datapipes: Iterable DataPipes being aggregated + + Example: + >>> # xdoctest: +REQUIRES(module:torchdata) + >>> from torchdata.datapipes.iter import IterableWrapper + >>> dp1, dp2, dp3 = IterableWrapper(range(5)), IterableWrapper(range(10, 15)), IterableWrapper(range(20, 25)) + >>> list(dp1.zip(dp2, dp3)) + [(0, 10, 20), (1, 11, 21), (2, 12, 22), (3, 13, 23), (4, 14, 24)] + """ + + datapipes: Tuple[IterDataPipe] + + def __init__(self, *datapipes: IterDataPipe): + if not all(isinstance(dp, IterDataPipe) for dp in datapipes): + raise TypeError("All inputs are required to be `IterDataPipe` " + "for `ZipIterDataPipe`.") + super().__init__() + self.datapipes = datapipes # type: ignore[assignment] + + def __iter__(self) -> Iterator[Tuple[T_co]]: + iterators = [iter(datapipe) for datapipe in self.datapipes] + yield from zip(*iterators) + + def __len__(self) -> int: + if all(isinstance(dp, Sized) for dp in self.datapipes): + return min(len(dp) for dp in self.datapipes) + else: + raise TypeError(f"{type(self).__name__} instance doesn't have valid length") diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/filelister.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/filelister.py new file mode 100644 index 0000000000000000000000000000000000000000..bb10fe4c4965c0355662e9177ea1a17b9e9ce3f2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/filelister.py @@ -0,0 +1,66 @@ +from typing import Iterator, List, Sequence, Union + + +from torch.utils.data.datapipes._decorator import functional_datapipe + +from torch.utils.data.datapipes.datapipe import IterDataPipe +from torch.utils.data.datapipes.iter import IterableWrapper +from torch.utils.data.datapipes.utils.common import get_file_pathnames_from_root + +__all__ = ["FileListerIterDataPipe", ] + + +@functional_datapipe("list_files") +class FileListerIterDataPipe(IterDataPipe[str]): + r""" + Given path(s) to the root directory, yields file pathname(s) (path + filename) of files within the root directory. + + Multiple root directories can be provided (functional name: ``list_files``). + + Args: + root: Root directory or a sequence of root directories + masks: Unix style filter string or string list for filtering file name(s) + recursive: Whether to return pathname from nested directories or not + abspath: Whether to return relative pathname or absolute pathname + non_deterministic: Whether to return pathname in sorted order or not. + If ``False``, the results yielded from each root directory will be sorted + length: Nominal length of the datapipe + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import FileLister + >>> dp = FileLister(root=".", recursive=True) + >>> list(dp) + ['example.py', './data/data.tar'] + """ + + def __init__( + self, + root: Union[str, Sequence[str], IterDataPipe] = '.', + masks: Union[str, List[str]] = '', + *, + recursive: bool = False, + abspath: bool = False, + non_deterministic: bool = False, + length: int = -1 + ) -> None: + super().__init__() + if isinstance(root, str): + root = [root, ] + if not isinstance(root, IterDataPipe): + root = IterableWrapper(root) + self.datapipe: IterDataPipe = root + self.masks: Union[str, List[str]] = masks + self.recursive: bool = recursive + self.abspath: bool = abspath + self.non_deterministic: bool = non_deterministic + self.length: int = length + + def __iter__(self) -> Iterator[str] : + for path in self.datapipe: + yield from get_file_pathnames_from_root(path, self.masks, self.recursive, self.abspath, self.non_deterministic) + + def __len__(self): + if self.length == -1: + raise TypeError(f"{type(self).__name__} instance doesn't have valid length") + return self.length diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/fileopener.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/fileopener.py new file mode 100644 index 0000000000000000000000000000000000000000..67e9797fe3356f9d0756de492eee3ca618f43fd3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/fileopener.py @@ -0,0 +1,71 @@ +from io import IOBase +from typing import Iterable, Tuple, Optional + +from torch.utils.data.datapipes._decorator import functional_datapipe +from torch.utils.data.datapipes.datapipe import IterDataPipe +from torch.utils.data.datapipes.utils.common import get_file_binaries_from_pathnames + +__all__ = [ + "FileOpenerIterDataPipe", +] + + +@functional_datapipe("open_files") +class FileOpenerIterDataPipe(IterDataPipe[Tuple[str, IOBase]]): + r""" + Given pathnames, opens files and yield pathname and file stream in a tuple (functional name: ``open_files``). + + Args: + datapipe: Iterable datapipe that provides pathnames + mode: An optional string that specifies the mode in which + the file is opened by ``open()``. It defaults to ``r``, other options are + ``b`` for reading in binary mode and ``t`` for text mode. + encoding: An optional string that specifies the encoding of the + underlying file. It defaults to ``None`` to match the default encoding of ``open``. + length: Nominal length of the datapipe + + Note: + The opened file handles will be closed by Python's GC periodically. Users can choose + to close them explicitly. + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import FileLister, FileOpener, StreamReader + >>> dp = FileLister(root=".").filter(lambda fname: fname.endswith('.txt')) + >>> dp = FileOpener(dp) + >>> dp = StreamReader(dp) + >>> list(dp) + [('./abc.txt', 'abc')] + """ + + def __init__( + self, + datapipe: Iterable[str], + mode: str = 'r', + encoding: Optional[str] = None, + length: int = -1): + super().__init__() + self.datapipe: Iterable = datapipe + self.mode: str = mode + self.encoding: Optional[str] = encoding + + if self.mode not in ('b', 't', 'rb', 'rt', 'r'): + raise ValueError(f"Invalid mode {mode}") + # TODO: enforce typing for each instance based on mode, otherwise + # `argument_validation` with this DataPipe may be potentially broken + + if 'b' in mode and encoding is not None: + raise ValueError("binary mode doesn't take an encoding argument") + + self.length: int = length + + # Remove annotation due to 'IOBase' is a general type and true type + # is determined at runtime based on mode. Some `DataPipe` requiring + # a subtype would cause mypy error. + def __iter__(self): + yield from get_file_binaries_from_pathnames(self.datapipe, self.mode, self.encoding) + + def __len__(self): + if self.length == -1: + raise TypeError(f"{type(self).__name__} instance doesn't have valid length") + return self.length diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/grouping.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/grouping.py new file mode 100644 index 0000000000000000000000000000000000000000..c11804ea2cc05563173da8f51756138233beb5ac --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/grouping.py @@ -0,0 +1,300 @@ +import warnings +from collections import defaultdict +from typing import Any, Callable, DefaultDict, Iterator, List, Optional, Sized, TypeVar + +import torch.utils.data.datapipes.iter.sharding + +from torch.utils.data.datapipes._decorator import functional_datapipe +from torch.utils.data.datapipes.datapipe import DataChunk, IterDataPipe +from torch.utils.data.datapipes.utils.common import _check_unpickable_fn + +__all__ = [ + "BatcherIterDataPipe", + "GrouperIterDataPipe", + "UnBatcherIterDataPipe", +] + +T_co = TypeVar("T_co", covariant=True) + +def __getattr__(name: str): + if name in ["SHARDING_PRIORITIES", "ShardingFilterIterDataPipe"]: + warnings.warn(f"`{name}` from `torch.utils.data.datapipes.iter.grouping` is going to be removed in PyTorch 2.1" + f"Please use `{name}` from the `torch.utils.data.datapipes.iter.sharding`", + category=FutureWarning, stacklevel=2) + + return getattr(torch.utils.data.datapipes.iter.sharding, name) + + raise AttributeError(f"module {__name__} has no attribute {name}") + +@functional_datapipe('batch') +class BatcherIterDataPipe(IterDataPipe[DataChunk]): + r""" + Creates mini-batches of data (functional name: ``batch``). + + An outer dimension will be added as ``batch_size`` if ``drop_last`` is set to ``True``, or ``length % batch_size`` for the + last batch if ``drop_last`` is set to ``False``. + + Args: + datapipe: Iterable DataPipe being batched + batch_size: The size of each batch + drop_last: Option to drop the last batch if it's not full + wrapper_class: wrapper to apply onto each batch (type ``List``) before yielding, + defaults to ``DataChunk`` + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import IterableWrapper + >>> dp = IterableWrapper(range(10)) + >>> dp = dp.batch(batch_size=3, drop_last=True) + >>> list(dp) + [[0, 1, 2], [3, 4, 5], [6, 7, 8]] + """ + + datapipe: IterDataPipe + batch_size: int + drop_last: bool + + def __init__(self, + datapipe: IterDataPipe, + batch_size: int, + drop_last: bool = False, + wrapper_class=DataChunk, + ) -> None: + assert batch_size > 0, "Batch size is required to be larger than 0!" + super().__init__() + self.datapipe = datapipe + self.batch_size = batch_size + self.drop_last = drop_last + self.wrapper_class = wrapper_class + + def __iter__(self) -> Iterator[DataChunk]: + batch: List = [] + for x in self.datapipe: + batch.append(x) + if len(batch) == self.batch_size: + yield self.wrapper_class(batch) + batch = [] + if len(batch) > 0: + if not self.drop_last: + yield self.wrapper_class(batch) + + def __len__(self) -> int: + if isinstance(self.datapipe, Sized): + if self.drop_last: + return len(self.datapipe) // self.batch_size + else: + return (len(self.datapipe) + self.batch_size - 1) // self.batch_size + else: + raise TypeError(f"{type(self).__name__} instance doesn't have valid length") + + +@functional_datapipe('unbatch') +class UnBatcherIterDataPipe(IterDataPipe): + r""" + Undos batching of data (functional name: ``unbatch``). + + In other words, it flattens the data up to the specified level within a batched DataPipe. + + Args: + datapipe: Iterable DataPipe being un-batched + unbatch_level: Defaults to ``1`` (only flattening the top level). If set to ``2``, + it will flatten the top two levels, and ``-1`` will flatten the entire DataPipe. + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import IterableWrapper + >>> source_dp = IterableWrapper([[[0, 1], [2]], [[3, 4], [5]], [[6]]]) + >>> dp1 = source_dp.unbatch() + >>> list(dp1) + [[0, 1], [2], [3, 4], [5], [6]] + >>> dp2 = source_dp.unbatch(unbatch_level=2) + >>> list(dp2) + [0, 1, 2, 3, 4, 5, 6] + """ + + def __init__(self, + datapipe: IterDataPipe, + unbatch_level: int = 1): + self.datapipe = datapipe + self.unbatch_level = unbatch_level + + def __iter__(self): + for element in self.datapipe: + yield from self._dive(element, unbatch_level=self.unbatch_level) + + def _dive(self, element, unbatch_level): + if unbatch_level < -1: + raise ValueError("unbatch_level must be -1 or >= 0") + if unbatch_level == -1: + if isinstance(element, (list, DataChunk)): + for item in element: + yield from self._dive(item, unbatch_level=-1) + else: + yield element + elif unbatch_level == 0: + yield element + else: + if isinstance(element, (list, DataChunk)): + for item in element: + yield from self._dive(item, unbatch_level=unbatch_level - 1) + else: + raise IndexError(f"unbatch_level {self.unbatch_level} exceeds the depth of the DataPipe") + + +@functional_datapipe('groupby') +class GrouperIterDataPipe(IterDataPipe[DataChunk]): + r""" + Groups data from IterDataPipe by keys from ``group_key_fn``, yielding a ``DataChunk`` with batch size up to ``group_size``. + + (functional name: ``groupby``). + + The samples are read sequentially from the source ``datapipe``, and a batch of samples belonging to the same group + will be yielded as soon as the size of the batch reaches ``group_size``. When the buffer is full, + the DataPipe will yield the largest batch with the same key, provided that its size is larger + than ``guaranteed_group_size``. If its size is smaller, it will be dropped if ``drop_remaining=True``. + + After iterating through the entirety of source ``datapipe``, everything not dropped due to the buffer capacity + will be yielded from the buffer, even if the group sizes are smaller than ``guaranteed_group_size``. + + Args: + datapipe: Iterable datapipe to be grouped + group_key_fn: Function used to generate group key from the data of the source datapipe + keep_key: Option to yield the matching key along with the items in a tuple, + resulting in `(key, [items])` otherwise returning [items] + buffer_size: The size of buffer for ungrouped data + group_size: The max size of each group, a batch is yielded as soon as it reaches this size + guaranteed_group_size: The guaranteed minimum group size to be yielded in case the buffer is full + drop_remaining: Specifies if the group smaller than ``guaranteed_group_size`` will be dropped from buffer + when the buffer is full + + Example: + >>> import os + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import IterableWrapper + >>> def group_fn(file): + ... return os.path.basename(file).split(".")[0] + >>> source_dp = IterableWrapper(["a.png", "b.png", "a.json", "b.json", "a.jpg", "c.json"]) + >>> dp0 = source_dp.groupby(group_key_fn=group_fn) + >>> list(dp0) + [['a.png', 'a.json', 'a.jpg'], ['b.png', 'b.json'], ['c.json']] + >>> # A group is yielded as soon as its size equals to `group_size` + >>> dp1 = source_dp.groupby(group_key_fn=group_fn, group_size=2) + >>> list(dp1) + [['a.png', 'a.json'], ['b.png', 'b.json'], ['a.jpg'], ['c.json']] + >>> # Scenario where `buffer` is full, and group 'a' needs to be yielded since its size > `guaranteed_group_size` + >>> dp2 = source_dp.groupby(group_key_fn=group_fn, buffer_size=3, group_size=3, guaranteed_group_size=2) + >>> list(dp2) + [['a.png', 'a.json'], ['b.png', 'b.json'], ['a.jpg'], ['c.json']] + """ + + def __init__(self, + datapipe: IterDataPipe[T_co], + group_key_fn: Callable[[T_co], Any], + *, + keep_key: bool = False, + buffer_size: int = 10000, + group_size: Optional[int] = None, + guaranteed_group_size: Optional[int] = None, + drop_remaining: bool = False): + _check_unpickable_fn(group_key_fn) + self.datapipe = datapipe + self.group_key_fn = group_key_fn + + self.keep_key = keep_key + self.max_buffer_size = buffer_size + self.buffer_elements: DefaultDict[Any, List] = defaultdict(list) + self.curr_buffer_size = 0 + self.group_size = group_size + self.guaranteed_group_size = None + if group_size is not None and buffer_size is not None: + assert 0 < group_size <= buffer_size + self.guaranteed_group_size = group_size + if guaranteed_group_size is not None: + assert group_size is not None and 0 < guaranteed_group_size <= group_size + self.guaranteed_group_size = guaranteed_group_size + self.drop_remaining = drop_remaining + self.wrapper_class = DataChunk + + def _remove_biggest_key(self): + biggest_key = None + biggest_size = 0 + result_to_yield = None + for findkey in self.buffer_elements.keys(): + if len(self.buffer_elements[findkey]) > biggest_size: + biggest_size = len(self.buffer_elements[findkey]) + biggest_key = findkey + + if self.guaranteed_group_size is not None and biggest_size < self.guaranteed_group_size and not self.drop_remaining: + raise RuntimeError('Failed to group items', str(self.buffer_elements[biggest_key])) + + if self.guaranteed_group_size is None or biggest_size >= self.guaranteed_group_size: + result_to_yield = self.buffer_elements[biggest_key] + + self.curr_buffer_size -= biggest_size + del self.buffer_elements[biggest_key] + + return result_to_yield + + def __iter__(self): + for x in self.datapipe: + key = self.group_key_fn(x) + + self.buffer_elements[key].append(x) + self.curr_buffer_size += 1 + + if self.group_size is not None and self.group_size == len(self.buffer_elements[key]): + result: DataChunk[Any] = self.wrapper_class(self.buffer_elements[key]) + yield (key, result) if self.keep_key else result + self.curr_buffer_size -= len(self.buffer_elements[key]) + del self.buffer_elements[key] + + if self.curr_buffer_size == self.max_buffer_size: + result_to_yield = self._remove_biggest_key() + if result_to_yield is not None: + result = self.wrapper_class(result_to_yield) + yield (key, result) if self.keep_key else result + + for key in tuple(self.buffer_elements.keys()): + result = self.wrapper_class(self.buffer_elements.pop(key)) + self.curr_buffer_size -= len(result) + yield (key, result) if self.keep_key else result + + def reset(self) -> None: + self.curr_buffer_size = 0 + self.buffer_elements = defaultdict(list) + + def __getstate__(self): + state = ( + self.datapipe, + self.group_key_fn, + self.keep_key, + self.max_buffer_size, + self.group_size, + self.guaranteed_group_size, + self.drop_remaining, + self.wrapper_class, + self._valid_iterator_id, + self._number_of_samples_yielded, + ) + if IterDataPipe.getstate_hook is not None: + return IterDataPipe.getstate_hook(state) + return state + + def __setstate__(self, state): + ( + self.datapipe, + self.group_key_fn, + self.keep_key, + self.max_buffer_size, + self.group_size, + self.guaranteed_group_size, + self.drop_remaining, + self.wrapper_class, + self._valid_iterator_id, + self._number_of_samples_yielded, + ) = state + self.curr_buffer_size = 0 + self.buffer_elements = defaultdict(list) + + def __del__(self): + self.buffer_elements.clear() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/routeddecoder.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/routeddecoder.py new file mode 100644 index 0000000000000000000000000000000000000000..f5f1878365538362d8f870e4119798601d0d1173 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/routeddecoder.py @@ -0,0 +1,66 @@ +from io import BufferedIOBase +from typing import Any, Callable, Iterable, Iterator, Sized, Tuple + +from torch.utils.data.datapipes._decorator import functional_datapipe +from torch.utils.data.datapipes.datapipe import IterDataPipe +from torch.utils.data.datapipes.utils.common import _deprecation_warning +from torch.utils.data.datapipes.utils.decoder import ( + Decoder, + basichandlers as decoder_basichandlers, + imagehandler as decoder_imagehandler, + extension_extract_fn +) + +__all__ = ["RoutedDecoderIterDataPipe", ] + + +@functional_datapipe('routed_decode') +class RoutedDecoderIterDataPipe(IterDataPipe[Tuple[str, Any]]): + r""" + Decodes binary streams from input DataPipe, yields pathname and decoded data in a tuple. + + (functional name: ``routed_decode``) + + Args: + datapipe: Iterable datapipe that provides pathname and binary stream in tuples + handlers: Optional user defined decoder handlers. If ``None``, basic and image decoder + handlers will be set as default. If multiple handles are provided, the priority + order follows the order of handlers (the first handler has the top priority) + key_fn: Function for decoder to extract key from pathname to dispatch handlers. + Default is set to extract file extension from pathname + + Note: + When ``key_fn`` is specified returning anything other than extension, the default + handler will not work and users need to specify custom handler. Custom handler + could use regex to determine the eligibility to handle data. + """ + + def __init__(self, + datapipe: Iterable[Tuple[str, BufferedIOBase]], + *handlers: Callable, + key_fn: Callable = extension_extract_fn) -> None: + super().__init__() + self.datapipe: Iterable[Tuple[str, BufferedIOBase]] = datapipe + if not handlers: + handlers = (decoder_basichandlers, decoder_imagehandler('torch')) + self.decoder = Decoder(*handlers, key_fn=key_fn) + _deprecation_warning( + type(self).__name__, + deprecation_version="1.12", + removal_version="1.13", + old_functional_name="routed_decode", + ) + + def add_handler(self, *handler: Callable) -> None: + self.decoder.add_handler(*handler) + + def __iter__(self) -> Iterator[Tuple[str, Any]]: + for data in self.datapipe: + pathname = data[0] + result = self.decoder(data) + yield (pathname, result[pathname]) + + def __len__(self) -> int: + if isinstance(self.datapipe, Sized): + return len(self.datapipe) + raise TypeError(f"{type(self).__name__} instance doesn't have valid length") diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/selecting.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/selecting.py new file mode 100644 index 0000000000000000000000000000000000000000..fee74582e61bd613a60bf5eac7c7f5c3f60ca91f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/selecting.py @@ -0,0 +1,96 @@ +from typing import Callable, Iterator, Tuple, TypeVar + +from torch.utils.data.datapipes._decorator import functional_datapipe +from torch.utils.data.datapipes.datapipe import IterDataPipe +from torch.utils.data.datapipes.dataframe import dataframe_wrapper as df_wrapper +from torch.utils.data.datapipes.utils.common import ( + _check_unpickable_fn, + StreamWrapper, + validate_input_col +) + + +__all__ = ["FilterIterDataPipe", ] + +T = TypeVar('T') +T_co = TypeVar('T_co', covariant=True) + + +@functional_datapipe('filter') +class FilterIterDataPipe(IterDataPipe[T_co]): + r""" + Filters out elements from the source datapipe according to input ``filter_fn`` (functional name: ``filter``). + + Args: + datapipe: Iterable DataPipe being filtered + filter_fn: Customized function mapping an element to a boolean. + input_col: Index or indices of data which ``filter_fn`` is applied, such as: + + - ``None`` as default to apply ``filter_fn`` to the data directly. + - Integer(s) is used for list/tuple. + - Key(s) is used for dict. + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import IterableWrapper + >>> def is_even(n): + ... return n % 2 == 0 + >>> dp = IterableWrapper(range(5)) + >>> filter_dp = dp.filter(filter_fn=is_even) + >>> list(filter_dp) + [0, 2, 4] + """ + + datapipe: IterDataPipe[T_co] + filter_fn: Callable + + def __init__( + self, + datapipe: IterDataPipe[T_co], + filter_fn: Callable, + input_col=None, + ) -> None: + super().__init__() + self.datapipe = datapipe + + _check_unpickable_fn(filter_fn) + self.filter_fn = filter_fn # type: ignore[assignment] + + self.input_col = input_col + validate_input_col(filter_fn, input_col) + + def _apply_filter_fn(self, data) -> bool: + if self.input_col is None: + return self.filter_fn(data) + elif isinstance(self.input_col, (list, tuple)): + args = tuple(data[col] for col in self.input_col) + return self.filter_fn(*args) + else: + return self.filter_fn(data[self.input_col]) + + def __iter__(self) -> Iterator[T_co]: + for data in self.datapipe: + condition, filtered = self._returnIfTrue(data) + if condition: + yield filtered + else: + StreamWrapper.close_streams(data) + + def _returnIfTrue(self, data: T) -> Tuple[bool, T]: + condition = self._apply_filter_fn(data) + + if df_wrapper.is_column(condition): + # We are operating on DataFrames filter here + result = [] + for idx, mask in enumerate(df_wrapper.iterate(condition)): + if mask: + result.append(df_wrapper.get_item(data, idx)) + if len(result): + return True, df_wrapper.concat(result) + else: + return False, None # type: ignore[return-value] + + if not isinstance(condition, bool): + raise ValueError("Boolean output is required for `filter_fn` of FilterIterDataPipe, got", type(condition)) + + return condition, data diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/sharding.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/sharding.py new file mode 100644 index 0000000000000000000000000000000000000000..0b25d6baf796aa4645b009451af1bbe7ab759c42 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/sharding.py @@ -0,0 +1,84 @@ +from typing import ( + Dict, + Sized, + Tuple, +) + +from torch.utils.data.datapipes._decorator import functional_datapipe +from torch.utils.data.datapipes.datapipe import IterDataPipe +from enum import IntEnum + +__all__ = [ + "SHARDING_PRIORITIES", + "ShardingFilterIterDataPipe", +] + + +class SHARDING_PRIORITIES(IntEnum): + DEFAULT = 1 + DISTRIBUTED = 2 + MULTIPROCESSING = 3 + + +class _ShardingIterDataPipe(IterDataPipe): + def apply_sharding(self, num_of_instances: int, instance_id: int, sharding_group: SHARDING_PRIORITIES): + raise NotImplementedError + + +@functional_datapipe('sharding_filter') +class ShardingFilterIterDataPipe(_ShardingIterDataPipe): + r""" + Wrapper that allows DataPipe to be sharded (functional name: ``sharding_filter``). + + After ``apply_sharding`` is called, each instance of the DataPipe (on different workers) will have every `n`-th element of the + original DataPipe, where `n` equals to the number of instances. + + Args: + source_datapipe: Iterable DataPipe that will be sharded + """ + + def __init__(self, source_datapipe: IterDataPipe, sharding_group_filter=None): + self.source_datapipe = source_datapipe + self.sharding_group_filter = sharding_group_filter + self.groups: Dict[int, Tuple[int, int]] = {} + self.num_of_instances = 1 + self.instance_id = 0 + self._update_num_of_instances() + + def apply_sharding(self, num_of_instances, instance_id, sharding_group=SHARDING_PRIORITIES.DEFAULT): + if instance_id >= num_of_instances: + raise ValueError(f"instance_id({instance_id}) should be smaller than num_of_instances({num_of_instances})") + if sharding_group == SHARDING_PRIORITIES.DEFAULT: + if len(self.groups) and SHARDING_PRIORITIES.DEFAULT not in self.groups: + raise Exception('ShardingFilter cannot mix DEFAULT and non DEFAULT groups') + else: + if SHARDING_PRIORITIES.DEFAULT in self.groups: + raise Exception('ShardingFilter cannot mix DEFAULT and non DEFAULT groups') + self.groups[sharding_group] = (num_of_instances, instance_id) + self._update_num_of_instances() + + def _update_num_of_instances(self): + sorted_sharding_groups = [] + for key in sorted(self.groups.keys()): + if self.sharding_group_filter is None or key == self.sharding_group_filter: + sorted_sharding_groups.append(self.groups[key]) + + sorted_sharding_groups.reverse() + + self.num_of_instances = 1 + self.instance_id = 0 + + for group_num_of_instances, group_instance_id in sorted_sharding_groups: + self.instance_id += self.num_of_instances * group_instance_id + self.num_of_instances *= group_num_of_instances + + def __iter__(self): + for i, item in enumerate(self.source_datapipe): + if i % self.num_of_instances == self.instance_id: + yield item + + def __len__(self): + if isinstance(self.source_datapipe, Sized): + return len(self.source_datapipe) // self.num_of_instances +\ + (1 if (self.instance_id < len(self.source_datapipe) % self.num_of_instances) else 0) + raise TypeError(f"{type(self).__name__} instance doesn't have valid length") diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/streamreader.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/streamreader.py new file mode 100644 index 0000000000000000000000000000000000000000..9fd80e94e509a7713fc513cb65b40b1c780a1847 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/streamreader.py @@ -0,0 +1,40 @@ +from typing import Tuple +from torch.utils.data.datapipes._decorator import functional_datapipe +from torch.utils.data.datapipes.datapipe import IterDataPipe + +__all__ = ["StreamReaderIterDataPipe", ] + + +@functional_datapipe('read_from_stream') +class StreamReaderIterDataPipe(IterDataPipe[Tuple[str, bytes]]): + r""" + Given IO streams and their label names, yield bytes with label name as tuple. + + (functional name: ``read_from_stream``). + + Args: + datapipe: Iterable DataPipe provides label/URL and byte stream + chunk: Number of bytes to be read from stream per iteration. + If ``None``, all bytes will be read until the EOF. + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import IterableWrapper, StreamReader + >>> from io import StringIO + >>> dp = IterableWrapper([("alphabet", StringIO("abcde"))]) + >>> list(StreamReader(dp, chunk=1)) + [('alphabet', 'a'), ('alphabet', 'b'), ('alphabet', 'c'), ('alphabet', 'd'), ('alphabet', 'e')] + """ + + def __init__(self, datapipe, chunk=None): + self.datapipe = datapipe + self.chunk = chunk + + def __iter__(self): + for furl, stream in self.datapipe: + while True: + d = stream.read(self.chunk) + if not d: + stream.close() + break + yield (furl, d) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/utils.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..3794f7f0e77834bf5da0d21be8a2d00285eb07ed --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/iter/utils.py @@ -0,0 +1,51 @@ +import copy +import warnings +from torch.utils.data.datapipes.datapipe import IterDataPipe + +__all__ = ["IterableWrapperIterDataPipe", ] + + +class IterableWrapperIterDataPipe(IterDataPipe): + r""" + Wraps an iterable object to create an IterDataPipe. + + Args: + iterable: Iterable object to be wrapped into an IterDataPipe + deepcopy: Option to deepcopy input iterable object for each + iterator. The copy is made when the first element is read in ``iter()``. + + .. note:: + If ``deepcopy`` is explicitly set to ``False``, users should ensure + that the data pipeline doesn't contain any in-place operations over + the iterable instance to prevent data inconsistency across iterations. + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.iter import IterableWrapper + >>> dp = IterableWrapper(range(10)) + >>> list(dp) + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + """ + + def __init__(self, iterable, deepcopy=True): + self.iterable = iterable + self.deepcopy = deepcopy + + def __iter__(self): + source_data = self.iterable + if self.deepcopy: + try: + source_data = copy.deepcopy(self.iterable) + # For the case that data cannot be deep-copied, + # all in-place operations will affect iterable variable. + # When this DataPipe is iterated second time, it will + # yield modified items. + except TypeError: + warnings.warn( + "The input iterable can not be deepcopied, " + "please be aware of in-place modification would affect source data." + ) + yield from source_data + + def __len__(self): + return len(self.iterable) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/map/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/map/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..dee04d15cc7b4f501a0b263a4be598954f3016f2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/map/__init__.py @@ -0,0 +1,17 @@ +# Functional DataPipe +from torch.utils.data.datapipes.map.callable import MapperMapDataPipe as Mapper +from torch.utils.data.datapipes.map.combinatorics import ShufflerIterDataPipe as Shuffler +from torch.utils.data.datapipes.map.combining import ( + ConcaterMapDataPipe as Concater, + ZipperMapDataPipe as Zipper +) +from torch.utils.data.datapipes.map.grouping import ( + BatcherMapDataPipe as Batcher +) +from torch.utils.data.datapipes.map.utils import SequenceWrapperMapDataPipe as SequenceWrapper + + +__all__ = ['Batcher', 'Concater', 'Mapper', 'SequenceWrapper', 'Shuffler', 'Zipper'] + +# Please keep this list sorted +assert __all__ == sorted(__all__) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/map/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/map/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c223693bf69d8af5cbe7156d14aa9a7d63e4f78 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/map/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/map/__pycache__/callable.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/map/__pycache__/callable.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..079efb638e8f761f84c1d06bad4c6c9f3a3069ae Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/map/__pycache__/callable.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/map/__pycache__/combinatorics.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/map/__pycache__/combinatorics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..426beaecac879a5c7f9e1ce1a4559822073caa5c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/map/__pycache__/combinatorics.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/map/__pycache__/combining.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/map/__pycache__/combining.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..228eb72443ac6a44568af1e9d725f3c11cc726fb Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/map/__pycache__/combining.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/map/__pycache__/grouping.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/map/__pycache__/grouping.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..acfc0a976e58b12650b3aa83c1d696067f4a3045 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/map/__pycache__/grouping.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/map/__pycache__/utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/map/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aeceecc02ecc72c6bc9481ea068daaaf8d7aca2c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/map/__pycache__/utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/map/callable.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/map/callable.py new file mode 100644 index 0000000000000000000000000000000000000000..c9202bb1eefbb36373a2c805036687926bc97dec --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/map/callable.py @@ -0,0 +1,61 @@ +from torch.utils.data.datapipes.utils.common import _check_unpickable_fn +from typing import Callable, TypeVar +from torch.utils.data.datapipes._decorator import functional_datapipe +from torch.utils.data.datapipes.datapipe import MapDataPipe + +__all__ = ["MapperMapDataPipe", "default_fn"] + +T_co = TypeVar('T_co', covariant=True) + + +# Default function to return each item directly +# In order to keep datapipe picklable, eliminates the usage +# of python lambda function +def default_fn(data): + return data + + +@functional_datapipe('map') +class MapperMapDataPipe(MapDataPipe[T_co]): + r""" + Apply the input function over each item from the source DataPipe (functional name: ``map``). + + The function can be any regular Python function or partial object. Lambda + function is not recommended as it is not supported by pickle. + + Args: + datapipe: Source MapDataPipe + fn: Function being applied to each item + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.map import SequenceWrapper, Mapper + >>> def add_one(x): + ... return x + 1 + >>> dp = SequenceWrapper(range(10)) + >>> map_dp_1 = dp.map(add_one) + >>> list(map_dp_1) + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + >>> map_dp_2 = Mapper(dp, lambda x: x + 1) + >>> list(map_dp_2) + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + """ + + datapipe: MapDataPipe + fn: Callable + + def __init__( + self, + datapipe: MapDataPipe, + fn: Callable = default_fn, + ) -> None: + super().__init__() + self.datapipe = datapipe + _check_unpickable_fn(fn) + self.fn = fn # type: ignore[assignment] + + def __len__(self) -> int: + return len(self.datapipe) + + def __getitem__(self, index) -> T_co: + return self.fn(self.datapipe[index]) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/map/combinatorics.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/map/combinatorics.py new file mode 100644 index 0000000000000000000000000000000000000000..c21d532d4925d59296d2f111c55a6755b4ae9101 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/map/combinatorics.py @@ -0,0 +1,126 @@ +import random + +import torch +from torch.utils.data.datapipes.datapipe import IterDataPipe, MapDataPipe +from typing import Iterator, List, Optional, TypeVar + +__all__ = ["ShufflerIterDataPipe", ] + + +T_co = TypeVar('T_co', covariant=True) + + +# @functional_datapipe('shuffle') +class ShufflerIterDataPipe(IterDataPipe[T_co]): + r""" + Shuffle the input MapDataPipe via its indices (functional name: ``shuffle``). + + When it is used with :class:`~torch.utils.data.DataLoader`, the methods to + set up random seed are different based on :attr:`num_workers`. + + For single-process mode (:attr:`num_workers == 0`), the random seed is set before + the :class:`~torch.utils.data.DataLoader` in the main process. For multi-process + mode (:attr:`num_worker > 0`), ``worker_init_fn`` is used to set up a random seed + for each worker process. + + Args: + datapipe: MapDataPipe being shuffled + indices: a list of indices of the MapDataPipe. If not provided, we assume it uses 0-based indexing + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.map import SequenceWrapper + >>> dp = SequenceWrapper(range(10)) + >>> shuffle_dp = dp.shuffle().set_seed(0) + >>> list(shuffle_dp) + [7, 8, 1, 5, 3, 4, 2, 0, 9, 6] + >>> list(shuffle_dp) + [6, 1, 9, 5, 2, 4, 7, 3, 8, 0] + >>> # Reset seed for Shuffler + >>> shuffle_dp = shuffle_dp.set_seed(0) + >>> list(shuffle_dp) + [7, 8, 1, 5, 3, 4, 2, 0, 9, 6] + + Note: + Even thought this ``shuffle`` operation takes a ``MapDataPipe`` as the input, it would return an + ``IterDataPipe`` rather than a ``MapDataPipe``, because ``MapDataPipe`` should be non-sensitive to + the order of data order for the sake of random reads, but ``IterDataPipe`` depends on the order + of data during data-processing. + """ + + datapipe: MapDataPipe[T_co] + _enabled: bool + _seed: Optional[int] + _rng: random.Random + + def __init__(self, + datapipe: MapDataPipe[T_co], + *, + indices: Optional[List] = None, + ) -> None: + super().__init__() + self.datapipe = datapipe + self.indices = list(range(len(datapipe))) if indices is None else indices + self._enabled = True + self._seed = None + self._rng = random.Random() + self._shuffled_indices: List = self.indices + + def set_shuffle(self, shuffle=True): + self._enabled = shuffle + return self + + def set_seed(self, seed: int): + self._seed = seed + return self + + def __iter__(self) -> Iterator[T_co]: + if not self._enabled: + for idx in self.indices: + yield self.datapipe[idx] + else: + while self._shuffled_indices: + idx = self._shuffled_indices.pop() + yield self.datapipe[idx] + + def reset(self) -> None: + if self._enabled and self._seed is None: + self._seed = int(torch.empty((), dtype=torch.int64).random_().item()) + self._rng.seed(self._seed) + self._seed = None + self._shuffled_indices = self._rng.sample(self.indices, len(self.indices)) + + def __len__(self) -> int: + return len(self.datapipe) + + def __getstate__(self): + state = ( + self.datapipe, + self.indices, + self._enabled, + self._seed, + self._rng.getstate(), + self._shuffled_indices, + self._valid_iterator_id, + self._number_of_samples_yielded, + ) + if IterDataPipe.getstate_hook is not None: + return IterDataPipe.getstate_hook(state) + return state + + def __setstate__(self, state): + ( + self.datapipe, + self.indices, + self._enabled, + self._seed, + rng_state, + self._shuffled_indices, + self._valid_iterator_id, + self._number_of_samples_yielded, + ) = state + self._rng = random.Random() + self._rng.setstate(rng_state) + + +MapDataPipe.register_datapipe_as_function("shuffle", ShufflerIterDataPipe) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/map/combining.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/map/combining.py new file mode 100644 index 0000000000000000000000000000000000000000..809b44dc96cd8f0a8e7d3bf8795f76b512cb244f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/map/combining.py @@ -0,0 +1,99 @@ +from torch.utils.data.datapipes._decorator import functional_datapipe +from torch.utils.data.datapipes.datapipe import MapDataPipe +from typing import Sized, Tuple, TypeVar + +__all__ = ["ConcaterMapDataPipe", "ZipperMapDataPipe"] + +T_co = TypeVar('T_co', covariant=True) + + +@functional_datapipe('concat') +class ConcaterMapDataPipe(MapDataPipe): + r""" + Concatenate multiple Map DataPipes (functional name: ``concat``). + + The new index of is the cumulative sum of source DataPipes. + For example, if there are 2 source DataPipes both with length 5, + index 0 to 4 of the resulting `ConcatMapDataPipe` would refer to + elements of the first DataPipe, and 5 to 9 would refer to elements + of the second DataPipe. + + Args: + datapipes: Map DataPipes being concatenated + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.map import SequenceWrapper + >>> dp1 = SequenceWrapper(range(3)) + >>> dp2 = SequenceWrapper(range(3)) + >>> concat_dp = dp1.concat(dp2) + >>> list(concat_dp) + [0, 1, 2, 0, 1, 2] + """ + + datapipes: Tuple[MapDataPipe] + + def __init__(self, *datapipes: MapDataPipe): + if len(datapipes) == 0: + raise ValueError("Expected at least one DataPipe, but got nothing") + if not all(isinstance(dp, MapDataPipe) for dp in datapipes): + raise TypeError("Expected all inputs to be `MapDataPipe`") + if not all(isinstance(dp, Sized) for dp in datapipes): + raise TypeError("Expected all inputs to be `Sized`") + self.datapipes = datapipes # type: ignore[assignment] + + def __getitem__(self, index) -> T_co: # type: ignore[type-var] + offset = 0 + for dp in self.datapipes: + if index - offset < len(dp): + return dp[index - offset] + else: + offset += len(dp) + raise IndexError(f"Index {index} is out of range.") + + def __len__(self) -> int: + return sum(len(dp) for dp in self.datapipes) + + +@functional_datapipe('zip') +class ZipperMapDataPipe(MapDataPipe[Tuple[T_co, ...]]): + r""" + Aggregates elements into a tuple from each of the input DataPipes (functional name: ``zip``). + + This MataPipe is out of bound as soon as the shortest input DataPipe is exhausted. + + Args: + *datapipes: Map DataPipes being aggregated + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.map import SequenceWrapper + >>> dp1 = SequenceWrapper(range(3)) + >>> dp2 = SequenceWrapper(range(10, 13)) + >>> zip_dp = dp1.zip(dp2) + >>> list(zip_dp) + [(0, 10), (1, 11), (2, 12)] + """ + + datapipes: Tuple[MapDataPipe[T_co], ...] + + def __init__(self, *datapipes: MapDataPipe[T_co]) -> None: + if len(datapipes) == 0: + raise ValueError("Expected at least one DataPipe, but got nothing") + if not all(isinstance(dp, MapDataPipe) for dp in datapipes): + raise TypeError("Expected all inputs to be `MapDataPipe`") + if not all(isinstance(dp, Sized) for dp in datapipes): + raise TypeError("Expected all inputs to be `Sized`") + self.datapipes = datapipes + + def __getitem__(self, index) -> Tuple[T_co, ...]: + res = [] + for dp in self.datapipes: + try: + res.append(dp[index]) + except IndexError as e: + raise IndexError(f"Index {index} is out of range for one of the input MapDataPipes {dp}.") from e + return tuple(res) + + def __len__(self) -> int: + return min(len(dp) for dp in self.datapipes) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/map/grouping.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/map/grouping.py new file mode 100644 index 0000000000000000000000000000000000000000..a94cc7b5679e9107818f4ec73ae11497b002a7af --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/map/grouping.py @@ -0,0 +1,69 @@ +from torch.utils.data.datapipes._decorator import functional_datapipe +from torch.utils.data.datapipes.datapipe import MapDataPipe, DataChunk +from typing import List, Sized, TypeVar + +__all__ = ["BatcherMapDataPipe", ] + +T = TypeVar('T') + + +@functional_datapipe('batch') +class BatcherMapDataPipe(MapDataPipe[DataChunk]): + r""" + Create mini-batches of data (functional name: ``batch``). + + An outer dimension will be added as ``batch_size`` if ``drop_last`` is set to ``True``, + or ``length % batch_size`` for the last batch if ``drop_last`` is set to ``False``. + + Args: + datapipe: Iterable DataPipe being batched + batch_size: The size of each batch + drop_last: Option to drop the last batch if it's not full + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.map import SequenceWrapper + >>> dp = SequenceWrapper(range(10)) + >>> batch_dp = dp.batch(batch_size=2) + >>> list(batch_dp) + [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]] + """ + + datapipe: MapDataPipe + batch_size: int + drop_last: bool + + def __init__(self, + datapipe: MapDataPipe[T], + batch_size: int, + drop_last: bool = False, + wrapper_class=DataChunk, + ) -> None: + assert batch_size > 0, "Batch size is required to be larger than 0!" + super().__init__() + self.datapipe = datapipe + self.batch_size = batch_size + self.drop_last = drop_last + self.wrapper_class = wrapper_class + + def __getitem__(self, index) -> DataChunk: + batch: List = [] + indices = range(index * self.batch_size, (index + 1) * self.batch_size) + try: + for i in indices: + batch.append(self.datapipe[i]) + return self.wrapper_class(batch) + except IndexError as e: + if not self.drop_last and len(batch) > 0: + return self.wrapper_class(batch) + else: + raise IndexError(f"Index {index} is out of bound.") from e + + def __len__(self) -> int: + if isinstance(self.datapipe, Sized): + if self.drop_last: + return len(self.datapipe) // self.batch_size + else: + return (len(self.datapipe) + self.batch_size - 1) // self.batch_size + else: + raise TypeError(f"{type(self).__name__} instance doesn't have valid length") diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/map/utils.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/map/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..18d4fd18a1936fc89ab881b1cb7a7c826be0f2d4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/map/utils.py @@ -0,0 +1,50 @@ +import copy +import warnings +from torch.utils.data.datapipes.datapipe import MapDataPipe + +__all__ = ["SequenceWrapperMapDataPipe", ] + + +class SequenceWrapperMapDataPipe(MapDataPipe): + r""" + Wraps a sequence object into a MapDataPipe. + + Args: + sequence: Sequence object to be wrapped into an MapDataPipe + deepcopy: Option to deepcopy input sequence object + + .. note:: + If ``deepcopy`` is set to False explicitly, users should ensure + that data pipeline doesn't contain any in-place operations over + the iterable instance, in order to prevent data inconsistency + across iterations. + + Example: + >>> # xdoctest: +SKIP + >>> from torchdata.datapipes.map import SequenceWrapper + >>> dp = SequenceWrapper(range(10)) + >>> list(dp) + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + >>> dp = SequenceWrapper({'a': 100, 'b': 200, 'c': 300, 'd': 400}) + >>> dp['a'] + 100 + """ + + def __init__(self, sequence, deepcopy=True): + if deepcopy: + try: + self.sequence = copy.deepcopy(sequence) + except TypeError: + warnings.warn( + "The input sequence can not be deepcopied, " + "please be aware of in-place modification would affect source data" + ) + self.sequence = sequence + else: + self.sequence = sequence + + def __getitem__(self, index): + return self.sequence[index] + + def __len__(self): + return len(self.sequence) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..af8fba1783608c75454b800cb8001978c548e8d9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/__pycache__/common.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d5c032f6da29ea13fbff397d3bf684d9c59a462 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/__pycache__/common.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/__pycache__/decoder.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/__pycache__/decoder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a89cf426b93cd1a0dbf40ec5a9f08c8c15efccee Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/__pycache__/decoder.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/__pycache__/snapshot.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/__pycache__/snapshot.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..70631d400fb95a58f13e68aa720ed68bf2f576d0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/__pycache__/snapshot.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/common.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/common.py new file mode 100644 index 0000000000000000000000000000000000000000..faf45c078890c494dcf26e5bb340bcff54123f05 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/common.py @@ -0,0 +1,379 @@ +import fnmatch +import functools +import inspect +import os +import warnings + +from io import IOBase + +from functools import partial +from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union +from torch.utils._import_utils import dill_available + +__all__ = [ + "validate_input_col", + "StreamWrapper", + "get_file_binaries_from_pathnames", + "get_file_pathnames_from_root", + "match_masks", + "validate_pathname_binary_tuple", +] + + +def validate_input_col(fn: Callable, input_col: Optional[Union[int, tuple, list]]): + """ + Check that function used in a callable datapipe works with the input column. + + This simply ensures that the number of positional arguments matches the size + of the input column. The function must not contain any non-default + keyword-only arguments. + + Examples: + >>> # xdoctest: +SKIP("Failing on some CI machines") + >>> def f(a, b, *, c=1): + >>> return a + b + c + >>> def f_def(a, b=1, *, c=1): + >>> return a + b + c + >>> assert validate_input_col(f, [1, 2]) + >>> assert validate_input_col(f_def, 1) + >>> assert validate_input_col(f_def, [1, 2]) + + Notes: + If the function contains variable positional (`inspect.VAR_POSITIONAL`) arguments, + for example, f(a, *args), the validator will accept any size of input column + greater than or equal to the number of positional arguments. + (in this case, 1). + + Args: + fn: The function to check. + input_col: The input column to check. + + Raises: + ValueError: If the function is not compatible with the input column. + """ + try: + sig = inspect.signature(fn) + except ValueError: # Signature cannot be inspected, likely it is a built-in fn or written in C + return + if isinstance(input_col, (list, tuple)): + input_col_size = len(input_col) + else: + input_col_size = 1 + + pos = [] + var_positional = False + non_default_kw_only = [] + + for p in sig.parameters.values(): + if p.kind in (inspect.Parameter.POSITIONAL_ONLY, inspect.Parameter.POSITIONAL_OR_KEYWORD): + pos.append(p) + elif p.kind is inspect.Parameter.VAR_POSITIONAL: + var_positional = True + elif p.kind is inspect.Parameter.KEYWORD_ONLY: + if p.default is p.empty: + non_default_kw_only.append(p) + else: + continue + + if isinstance(fn, functools.partial): + fn_name = getattr(fn.func, "__name__", repr(fn.func)) + else: + fn_name = getattr(fn, "__name__", repr(fn)) + + if len(non_default_kw_only) > 0: + raise ValueError( + f"The function {fn_name} takes {len(non_default_kw_only)} " + f"non-default keyword-only parameters, which is not allowed." + ) + + if len(sig.parameters) < input_col_size: + if not var_positional: + raise ValueError( + f"The function {fn_name} takes {len(sig.parameters)} " + f"parameters, but {input_col_size} are required." + ) + else: + if len(pos) > input_col_size: + if any(p.default is p.empty for p in pos[input_col_size:]): + raise ValueError( + f"The function {fn_name} takes {len(pos)} " + f"positional parameters, but {input_col_size} are required." + ) + elif len(pos) < input_col_size: + if not var_positional: + raise ValueError( + f"The function {fn_name} takes {len(pos)} " + f"positional parameters, but {input_col_size} are required." + ) + + +def _is_local_fn(fn): + # Functions or Methods + if hasattr(fn, "__code__"): + return fn.__code__.co_flags & inspect.CO_NESTED + # Callable Objects + else: + if hasattr(fn, "__qualname__"): + return "" in fn.__qualname__ + fn_type = type(fn) + if hasattr(fn_type, "__qualname__"): + return "" in fn_type.__qualname__ + return False + + +def _check_unpickable_fn(fn: Callable): + """ + Check function is pickable or not. + + If it is a lambda or local function, a UserWarning will be raised. If it's not a callable function, a TypeError will be raised. + """ + if not callable(fn): + raise TypeError(f"A callable function is expected, but {type(fn)} is provided.") + + # Extract function from partial object + # Nested partial function is automatically expanded as a single partial object + if isinstance(fn, partial): + fn = fn.func + + # Local function + if _is_local_fn(fn) and not dill_available(): + warnings.warn( + "Local function is not supported by pickle, please use " + "regular python function or functools.partial instead." + ) + return + + # Lambda function + if hasattr(fn, "__name__") and fn.__name__ == "" and not dill_available(): + warnings.warn( + "Lambda function is not supported by pickle, please use " + "regular python function or functools.partial instead." + ) + return + + +def match_masks(name : str, masks : Union[str, List[str]]) -> bool: + # empty mask matches any input name + if not masks: + return True + + if isinstance(masks, str): + return fnmatch.fnmatch(name, masks) + + for mask in masks: + if fnmatch.fnmatch(name, mask): + return True + return False + + +def get_file_pathnames_from_root( + root: str, + masks: Union[str, List[str]], + recursive: bool = False, + abspath: bool = False, + non_deterministic: bool = False) -> Iterable[str]: + + # print out an error message and raise the error out + def onerror(err : OSError): + warnings.warn(err.filename + " : " + err.strerror) + raise err + + if os.path.isfile(root): + path = root + if abspath: + path = os.path.abspath(path) + fname = os.path.basename(path) + if match_masks(fname, masks): + yield path + else: + for path, dirs, files in os.walk(root, onerror=onerror): + if abspath: + path = os.path.abspath(path) + if not non_deterministic: + files.sort() + for f in files: + if match_masks(f, masks): + yield os.path.join(path, f) + if not recursive: + break + if not non_deterministic: + # Note that this is in-place modifying the internal list from `os.walk` + # This only works because `os.walk` doesn't shallow copy before turn + # https://github.com/python/cpython/blob/f4c03484da59049eb62a9bf7777b963e2267d187/Lib/os.py#L407 + dirs.sort() + + +def get_file_binaries_from_pathnames(pathnames: Iterable, mode: str, encoding: Optional[str] = None): + if not isinstance(pathnames, Iterable): + pathnames = [pathnames, ] + + if mode in ('b', 't'): + mode = 'r' + mode + + for pathname in pathnames: + if not isinstance(pathname, str): + raise TypeError(f"Expected string type for pathname, but got {type(pathname)}") + yield pathname, StreamWrapper(open(pathname, mode, encoding=encoding)) + + +def validate_pathname_binary_tuple(data: Tuple[str, IOBase]): + if not isinstance(data, tuple): + raise TypeError(f"pathname binary data should be tuple type, but it is type {type(data)}") + if len(data) != 2: + raise TypeError(f"pathname binary stream tuple length should be 2, but got {len(data)}") + if not isinstance(data[0], str): + raise TypeError(f"pathname within the tuple should have string type pathname, but it is type {type(data[0])}") + if not isinstance(data[1], IOBase) and not isinstance(data[1], StreamWrapper): + raise TypeError( + f"binary stream within the tuple should have IOBase or" + f"its subclasses as type, but it is type {type(data[1])}" + ) + + +# Deprecated function names and its corresponding DataPipe type and kwargs for the `_deprecation_warning` function +_iter_deprecated_functional_names: Dict[str, Dict] = {} +_map_deprecated_functional_names: Dict[str, Dict] = {} + + +def _deprecation_warning( + old_class_name: str, + *, + deprecation_version: str, + removal_version: str, + old_functional_name: str = "", + old_argument_name: str = "", + new_class_name: str = "", + new_functional_name: str = "", + new_argument_name: str = "", + deprecate_functional_name_only: bool = False, +) -> None: + if new_functional_name and not old_functional_name: + raise ValueError("Old functional API needs to be specified for the deprecation warning.") + if new_argument_name and not old_argument_name: + raise ValueError("Old argument name needs to be specified for the deprecation warning.") + + if old_functional_name and old_argument_name: + raise ValueError("Deprecating warning for functional API and argument should be separated.") + + msg = f"`{old_class_name}()`" + if deprecate_functional_name_only and old_functional_name: + msg = f"{msg}'s functional API `.{old_functional_name}()` is" + elif old_functional_name: + msg = f"{msg} and its functional API `.{old_functional_name}()` are" + elif old_argument_name: + msg = f"The argument `{old_argument_name}` of {msg} is" + else: + msg = f"{msg} is" + msg = ( + f"{msg} deprecated since {deprecation_version} and will be removed in {removal_version}." + f"\nSee https://github.com/pytorch/data/issues/163 for details." + ) + + if new_class_name or new_functional_name: + msg = f"{msg}\nPlease use" + if new_class_name: + msg = f"{msg} `{new_class_name}()`" + if new_class_name and new_functional_name: + msg = f"{msg} or" + if new_functional_name: + msg = f"{msg} `.{new_functional_name}()`" + msg = f"{msg} instead." + + if new_argument_name: + msg = f"{msg}\nPlease use `{old_class_name}({new_argument_name}=)` instead." + + warnings.warn(msg, FutureWarning) + + +class StreamWrapper: + """ + StreamWrapper is introduced to wrap file handler generated by DataPipe operation like `FileOpener`. + + StreamWrapper would guarantee the wrapped file handler is closed when it's out of scope. + """ + + session_streams: Dict[Any, int] = {} + debug_unclosed_streams: bool = False + + def __init__(self, file_obj, parent_stream=None, name=None): + self.file_obj = file_obj + self.child_counter = 0 + self.parent_stream = parent_stream + self.close_on_last_child = False + self.name = name + self.closed = False + if parent_stream is not None: + if not isinstance(parent_stream, StreamWrapper): + raise RuntimeError(f'Parent stream should be StreamWrapper, {type(parent_stream)} was given') + parent_stream.child_counter += 1 + self.parent_stream = parent_stream + if StreamWrapper.debug_unclosed_streams: + StreamWrapper.session_streams[self] = 1 + + @classmethod + def close_streams(cls, v, depth=0): + """Traverse structure and attempts to close all found StreamWrappers on best effort basis.""" + if depth > 10: + return + if isinstance(v, StreamWrapper): + v.close() + else: + # Traverse only simple structures + if isinstance(v, dict): + for vv in v.values(): + cls.close_streams(vv, depth=depth + 1) + elif isinstance(v, (list, tuple)): + for vv in v: + cls.close_streams(vv, depth=depth + 1) + + def __getattr__(self, name): + file_obj = self.__dict__['file_obj'] + return getattr(file_obj, name) + + def close(self, *args, **kwargs): + if self.closed: + return + if StreamWrapper.debug_unclosed_streams: + del StreamWrapper.session_streams[self] + if hasattr(self, "parent_stream") and self.parent_stream is not None: + self.parent_stream.child_counter -= 1 + if not self.parent_stream.child_counter and self.parent_stream.close_on_last_child: + self.parent_stream.close() + try: + self.file_obj.close(*args, **kwargs) + except AttributeError: + pass + self.closed = True + + def autoclose(self): + """Automatically close stream when all child streams are closed or if there are none.""" + self.close_on_last_child = True + if self.child_counter == 0: + self.close() + + def __dir__(self): + attrs = list(self.__dict__.keys()) + list(StreamWrapper.__dict__.keys()) + attrs += dir(self.file_obj) + return list(set(attrs)) + + def __del__(self): + if not self.closed: + self.close() + + def __iter__(self): + yield from self.file_obj + + def __next__(self): + return next(self.file_obj) + + def __repr__(self): + if self.name is None: + return f"StreamWrapper<{self.file_obj!r}>" + else: + return f"StreamWrapper<{self.name},{self.file_obj!r}>" + + def __getstate__(self): + return self.file_obj + + def __setstate__(self, obj): + self.file_obj = obj diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/decoder.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/decoder.py new file mode 100644 index 0000000000000000000000000000000000000000..0211a8fe4ba462a768d41e95f2a00c4084aec7df --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/decoder.py @@ -0,0 +1,330 @@ +# This file takes partial of the implementation from NVIDIA's webdataset at here: +# https://github.com/tmbdev/webdataset/blob/master/webdataset/autodecode.py + +import io +import json +import os.path +import pickle +import tempfile + +import torch +from torch.utils.data.datapipes.utils.common import StreamWrapper + + +__all__ = [ + "Decoder", + "ImageHandler", + "MatHandler", + "audiohandler", + "basichandlers", + "extension_extract_fn", + "handle_extension", + "imagehandler", + "mathandler", + "videohandler", +] + + +################################################################ +# handle basic datatypes +################################################################ +def basichandlers(extension, data): + + if extension in "txt text transcript": + return data.decode("utf-8") + + if extension in "cls cls2 class count index inx id".split(): + try: + return int(data) + except ValueError: + return None + + if extension in "json jsn": + return json.loads(data) + + if extension in "pyd pickle".split(): + return pickle.loads(data) + + if extension in "pt".split(): + stream = io.BytesIO(data) + return torch.load(stream) + + # if extension in "ten tb".split(): + # from . import tenbin + # return tenbin.decode_buffer(data) + + # if extension in "mp msgpack msg".split(): + # import msgpack + # return msgpack.unpackb(data) + + return None + + +################################################################ +# handle images +################################################################ +imagespecs = { + "l8": ("numpy", "uint8", "l"), + "rgb8": ("numpy", "uint8", "rgb"), + "rgba8": ("numpy", "uint8", "rgba"), + "l": ("numpy", "float", "l"), + "rgb": ("numpy", "float", "rgb"), + "rgba": ("numpy", "float", "rgba"), + "torchl8": ("torch", "uint8", "l"), + "torchrgb8": ("torch", "uint8", "rgb"), + "torchrgba8": ("torch", "uint8", "rgba"), + "torchl": ("torch", "float", "l"), + "torchrgb": ("torch", "float", "rgb"), + "torch": ("torch", "float", "rgb"), + "torchrgba": ("torch", "float", "rgba"), + "pill": ("pil", None, "l"), + "pil": ("pil", None, "rgb"), + "pilrgb": ("pil", None, "rgb"), + "pilrgba": ("pil", None, "rgba"), +} + +def handle_extension(extensions, f): + """ + Return a decoder handler function for the list of extensions. + + Extensions can be a space separated list of extensions. + Extensions can contain dots, in which case the corresponding number + of extension components must be present in the key given to f. + Comparisons are case insensitive. + Examples: + handle_extension("jpg jpeg", my_decode_jpg) # invoked for any file.jpg + handle_extension("seg.jpg", special_case_jpg) # invoked only for file.seg.jpg + """ + extensions = extensions.lower().split() + + def g(key, data): + extension = key.lower().split(".") + + for target in extensions: + target = target.split(".") + if len(target) > len(extension): + continue + + if extension[-len(target):] == target: + return f(data) + return None + return g + + +class ImageHandler: + """ + Decode image data using the given `imagespec`. + + The `imagespec` specifies whether the image is decoded + to numpy/torch/pi, decoded to uint8/float, and decoded + to l/rgb/rgba: + + - l8: numpy uint8 l + - rgb8: numpy uint8 rgb + - rgba8: numpy uint8 rgba + - l: numpy float l + - rgb: numpy float rgb + - rgba: numpy float rgba + - torchl8: torch uint8 l + - torchrgb8: torch uint8 rgb + - torchrgba8: torch uint8 rgba + - torchl: torch float l + - torchrgb: torch float rgb + - torch: torch float rgb + - torchrgba: torch float rgba + - pill: pil None l + - pil: pil None rgb + - pilrgb: pil None rgb + - pilrgba: pil None rgba + """ + + def __init__(self, imagespec): + assert imagespec in list(imagespecs.keys()), f"unknown image specification: {imagespec}" + self.imagespec = imagespec.lower() + + def __call__(self, extension, data): + if extension.lower() not in "jpg jpeg png ppm pgm pbm pnm".split(): + return None + + try: + import numpy as np + except ImportError as e: + raise ModuleNotFoundError("Package `numpy` is required to be installed for default image decoder." + "Please use `pip install numpy` to install the package") from e + + try: + import PIL.Image + except ImportError as e: + raise ModuleNotFoundError("Package `PIL` is required to be installed for default image decoder." + "Please use `pip install Pillow` to install the package") from e + + imagespec = self.imagespec + atype, etype, mode = imagespecs[imagespec] + + with io.BytesIO(data) as stream: + img = PIL.Image.open(stream) + img.load() + img = img.convert(mode.upper()) + if atype == "pil": + return img + elif atype == "numpy": + result = np.asarray(img) + assert result.dtype == np.uint8, f"numpy image array should be type uint8, but got {result.dtype}" + if etype == "uint8": + return result + else: + return result.astype("f") / 255.0 + elif atype == "torch": + result = np.asarray(img) + assert result.dtype == np.uint8, f"numpy image array should be type uint8, but got {result.dtype}" + + if etype == "uint8": + result = np.array(result.transpose(2, 0, 1)) + return torch.tensor(result) + else: + result = np.array(result.transpose(2, 0, 1)) + return torch.tensor(result) / 255.0 + return None + +def imagehandler(imagespec): + return ImageHandler(imagespec) + + +################################################################ +# torch video +################################################################ +def videohandler(extension, data): + if extension not in "mp4 ogv mjpeg avi mov h264 mpg webm wmv".split(): + return None + + try: + import torchvision.io + except ImportError as e: + raise ModuleNotFoundError("Package `torchvision` is required to be installed for default video file loader." + "Please use `pip install torchvision` or `conda install torchvision -c pytorch`" + "to install the package") from e + + with tempfile.TemporaryDirectory() as dirname: + fname = os.path.join(dirname, f"file.{extension}") + with open(fname, "wb") as stream: + stream.write(data) + return torchvision.io.read_video(fname) + + +################################################################ +# torchaudio +################################################################ +def audiohandler(extension, data): + if extension not in ["flac", "mp3", "sox", "wav", "m4a", "ogg", "wma"]: + return None + + try: + import torchaudio # type: ignore[import] + except ImportError as e: + raise ModuleNotFoundError("Package `torchaudio` is required to be installed for default audio file loader." + "Please use `pip install torchaudio` or `conda install torchaudio -c pytorch`" + "to install the package") from e + + with tempfile.TemporaryDirectory() as dirname: + fname = os.path.join(dirname, f"file.{extension}") + with open(fname, "wb") as stream: + stream.write(data) + return torchaudio.load(fname) + + +################################################################ +# mat +################################################################ +class MatHandler: + def __init__(self, **loadmat_kwargs) -> None: + try: + import scipy.io as sio + except ImportError as e: + raise ModuleNotFoundError("Package `scipy` is required to be installed for mat file." + "Please use `pip install scipy` or `conda install scipy`" + "to install the package") from e + self.sio = sio + self.loadmat_kwargs = loadmat_kwargs + + def __call__(self, extension, data): + if extension != 'mat': + return None + with io.BytesIO(data) as stream: + return self.sio.loadmat(stream, **self.loadmat_kwargs) + +def mathandler(**loadmat_kwargs): + return MatHandler(**loadmat_kwargs) + + +################################################################ +# a sample decoder +################################################################ +# Extract extension from pathname +def extension_extract_fn(pathname): + ext = os.path.splitext(pathname)[1] + # Remove dot + if ext: + ext = ext[1:] + return ext + + +class Decoder: + """ + Decode key/data sets using a list of handlers. + + For each key/data item, this iterates through the list of + handlers until some handler returns something other than None. + """ + + def __init__(self, *handler, key_fn=extension_extract_fn): + self.handlers = list(handler) if handler else [] + self.key_fn = key_fn + + # Insert new handler from the beginning of handlers list to make sure the new + # handler having the highest priority + def add_handler(self, *handler): + if not handler: + return + self.handlers = list(handler) + self.handlers + + @staticmethod + def _is_stream_handle(data): + obj_to_check = data.file_obj if isinstance(data, StreamWrapper) else data + return isinstance(obj_to_check, (io.BufferedIOBase, io.RawIOBase)) + + def decode1(self, key, data): + if not data: + return data + + # if data is a stream handle, we need to read all the content before decoding + if Decoder._is_stream_handle(data): + ds = data + # The behavior of .read can differ between streams (e.g. HTTPResponse), hence this is used instead + data = b"".join(data) + ds.close() + + for f in self.handlers: + result = f(key, data) + if result is not None: + return result + return data + + def decode(self, data): + result = {} + # single data tuple(pathname, data stream) + if isinstance(data, tuple): + data = [data] + + if data is not None: + for k, v in data: + # TODO: xinyu, figure out why Nvidia do this? + if k[0] == "_": + if isinstance(v, bytes): + v = v.decode("utf-8") + result[k] = v + continue + result[k] = self.decode1(self.key_fn(k), v) + return result + + def __call__(self, data): + return self.decode(data) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/snapshot.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/snapshot.py new file mode 100644 index 0000000000000000000000000000000000000000..02487d0da5737363a59bdcd18a4fe16ead2fdcbb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/data/datapipes/utils/snapshot.py @@ -0,0 +1,58 @@ +from torch.utils.data.datapipes._hook_iterator import _SnapshotState +from torch.utils.data.datapipes.datapipe import IterDataPipe +from torch.utils.data.graph_settings import apply_random_seed + + +# TODO: Caveats +# 1. Caller (either the ReadingService or DataLoader) must pass in the initial RNG +# 2. `in_batch_shuffle` and `bucketbatch` are not compatible with this because they currently +# lack the option to `set_seed`. +def _simple_graph_snapshot_restoration(datapipe: IterDataPipe, n_iterations: int, rng=None) -> None: + r""" + Fast-forward the given DataPipe and its parents by ``n_iterations``, re-doing computations to restore a snapshot. + + For instance, applying this function to the final DataPipe of a graph will restore the snapshot + (via fast-forward) every DataPipe within the graph. + + After you deserialize a DataPipe, you can use its `_number_of_samples_yielded` attribute as the input + to this function to forward the DataPipe. + + A DataPipe cannot be restored twice in a row unless there is an iteration started between the restoration + attempts. + + Note: + This is the simplest but least efficient way to fast-forward a DataPipe. Usage of other fast-forwarding + methods (custom ones if necessary) are recommended. + + Args: + datapipe: IterDataPipe to be fast-forwarded + n_iterations: number of iterations to fast-forward + rng: ``Optional[torch.Generator]``. If not ``None``, this RNG will be used for shuffling. The generator + should be in its `initial` state as it was first passed into ``DataLoader`` or ``ReadingService``. + """ + if datapipe._snapshot_state == _SnapshotState.Restored: + raise RuntimeError( + "Snapshot restoration cannot be applied. You can only restore simple snapshot to the graph " + "if your graph has not been restored.") + + # For this snapshot restoration function, we want the DataPipe to be at its initial state prior to + # simple fast-forwarding. Therefore, we need to call `reset` twice, because if `SnapshotState` is `Restored`, + # the first reset will not actually reset. + datapipe.reset() # This ensures `SnapshotState` is `Iterating` by this point, even if it was `Restored`. + apply_random_seed(datapipe, rng) + + remainder = n_iterations + it = iter(datapipe) # This always reset the DataPipe if it hasn't already. + while remainder > 0: + try: + next(it) + remainder -= 1 + except StopIteration as e: + raise RuntimeError(f"Fast-forward {datapipe} by {n_iterations} iterations " + "exceeds the number of samples available.") from e + datapipe._fast_forward_iterator = it + # While the DataPipe has `_fast_forward_iterator`, `next()` will get result from there instead of elsewhere. + + # This will prevent the DataPipe from resetting in the `iter()` call + # If another DataPipe is consuming it, it won't have to start over again + datapipe._snapshot_state = _SnapshotState.Restored diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/jit/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/jit/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/jit/__init__.py @@ -0,0 +1 @@ + diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/jit/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/jit/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d54950d55f070f0d5c44879859d5a902598c7c81 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/jit/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/jit/__pycache__/log_extract.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/utils/jit/__pycache__/log_extract.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a162f908f233b8886d8673615b9a8d90e0e51078 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/utils/jit/__pycache__/log_extract.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/utils/jit/log_extract.py b/llmeval-env/lib/python3.10/site-packages/torch/utils/jit/log_extract.py new file mode 100644 index 0000000000000000000000000000000000000000..2e89a769eff0c8dc82d8f003ac62b18e9e78624e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/utils/jit/log_extract.py @@ -0,0 +1,113 @@ +from contextlib import contextmanager +from typing import Any, List, Tuple, cast +import random +import torch +import time +from torch.utils.benchmark import Timer + +def extract_ir(filename: str) -> List[str]: + BEGIN = "" + END = "" + pfx = None + current = "" + graphs = [] + with open(filename) as f: + split_strs = f.read().split(BEGIN) + for i, split_str in enumerate(split_strs): + if i == 0: + continue + end_loc = split_str.find(END) + if end_loc == -1: + continue + s = split_str[:end_loc] + pfx = split_strs[i - 1].splitlines()[-1] + lines = [x[len(pfx):] for x in s.splitlines(keepends=True)] + graphs.append(''.join(lines)) + + return graphs + + +def make_tensor_from_type(inp_type: torch._C.TensorType): + size = inp_type.sizes() + stride = inp_type.strides() + device = inp_type.device() + dtype = inp_type.dtype() + assert size is not None + assert stride is not None + assert device is not None + assert dtype is not None + return torch.empty_strided(size=size, stride=stride, device=device, dtype=dtype) + +def load_graph_and_inputs(ir: str) -> Tuple[Any, List[Any]]: + graph = torch._C.parse_ir(ir, parse_tensor_constants=True) + graph.makeMultiOutputIntoTuple() + inputs = [] + for inp in graph.inputs(): + if isinstance(inp.type(), torch._C.FloatType): + inputs.append(random.uniform(.1, 100)) + elif isinstance(inp.type(), torch._C.IntType): + inputs.append(random.randint(1, 100)) + elif isinstance(inp.type(), torch._C.TensorType): + tensorType = cast(torch._C.TensorType, inp.type()) + inputs.append(make_tensor_from_type(tensorType)) + elif isinstance(inp.type(), torch._C.BoolType): + inputs.append(random.randint(0, 1) == 1) + else: + raise NotImplementedError(f"A default value is not implemented for type {inp.type()}") + + func = torch._C._create_function_from_graph("forward", graph) + torch._C._jit_pass_erase_shape_information(func.graph) + return (func, inputs) + +def time_cuda(fn, inputs, test_runs): + t = Timer(stmt="fn(*inputs)", globals={"fn": fn, "inputs" : inputs}) + times = t.blocked_autorange() + return times.median * 1000 # time in ms + +def time_cpu(fn, inputs, test_runs): + s = time.perf_counter() + for _ in range(test_runs): + fn(*inputs) + e = time.perf_counter() + return (e - s) / test_runs * 1000 # time in ms + +def run_test(ir, inputs, *, warmup_runs=10, test_runs=20) -> float: + graph, _ = load_graph_and_inputs(ir) + for _ in range(warmup_runs): + graph(*inputs) + + is_cpu = None + for input in inputs: + if isinstance(input, torch.Tensor): + is_cpu = input.device.type == "cpu" + break + assert is_cpu is not None + + out = time_cpu(graph, inputs, test_runs) if is_cpu else time_cuda(graph, inputs, test_runs) + return out + +@contextmanager +def no_fuser(*args, **kwargs): + old_optimize = torch._C._get_graph_executor_optimize(False) + try: + yield + finally: + torch._C._get_graph_executor_optimize(old_optimize) + +def run_baseline_no_fusion(ir, inputs) -> float: + with no_fuser(): + return run_test(ir, inputs) + + +def run_nnc(ir, inputs, dynamic) -> float: + try: + strat = [("DYNAMIC", 10)] if dynamic else [("STATIC", 10)] + old_strat = torch.jit.set_fusion_strategy(strat) + with torch.jit.fuser("fuser1"): + return run_test(ir, inputs) + finally: + torch.jit.set_fusion_strategy(old_strat) + +def run_nvfuser(ir, inputs) -> float: + with torch.jit.fuser("fuser2"): + return run_test(ir, inputs)