python_code
stringlengths
0
992k
repo_name
stringlengths
8
46
file_path
stringlengths
5
162
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import itertools import logging import math import operator import os import queue import time from threading import Thread import numpy as np import torch from fairseq.data import data_utils logger = logging.getLogger(__name__) # Object used by _background_consumer to signal the source is exhausted # to the main thread. _sentinel = object() class CountingIterator(object): """Wrapper around an iterable that maintains the iteration count. Args: iterable (iterable): iterable to wrap start (int): starting iteration count. Note that this doesn't actually advance the iterator. total (int): override the iterator length returned by ``__len``. This can be used to truncate *iterator*. Attributes: n (int): number of elements consumed from this iterator """ def __init__(self, iterable, start=None, total=None): self._itr = iter(iterable) self.n = start or getattr(iterable, "n", 0) self.total = total or self.n + len(iterable) def __len__(self): return self.total def __iter__(self): return self def __next__(self): if not self.has_next(): raise StopIteration try: x = next(self._itr) except StopIteration: raise IndexError(f"Iterator expected to have length {self.total}, " "but exhausted at position {self.n}.") self.n += 1 return x def has_next(self): """Whether the iterator has been exhausted.""" return self.n < self.total def skip(self, n): """Fast-forward the iterator by skipping n elements.""" for _ in range(n): next(self) return self def take(self, n): """Truncate the iterator to n elements at most.""" self.total = min(self.total, n) # Propagate this change to the underlying iterator if hasattr(self._itr, "take"): self._itr.take(max(n - self.n, 0)) return self class EpochBatchIterating(object): def __len__(self) -> int: raise NotImplementedError @property def next_epoch_idx(self): raise NotImplementedError def next_epoch_itr( self, shuffle=True, fix_batches_to_gpus=False, set_dataset_epoch=True ): """Return a new iterator over the dataset. Args: shuffle (bool, optional): shuffle batches before returning the iterator (default: True). fix_batches_to_gpus (bool, optional): ensure that batches are always allocated to the same shards across epochs. Requires that :attr:`dataset` supports prefetching (default: False). set_dataset_epoch (bool, optional): update the wrapped Dataset with the new epoch number (default: True). """ raise NotImplementedError def end_of_epoch(self) -> bool: """Returns whether the most recent epoch iterator has been exhausted""" raise NotImplementedError @property def iterations_in_epoch(self) -> int: """The number of consumed batches in the current epoch.""" raise NotImplementedError def state_dict(self): """Returns a dictionary containing a whole state of the iterator.""" raise NotImplementedError def load_state_dict(self, state_dict): """Copies the state of the iterator from the given *state_dict*.""" raise NotImplementedError @property def first_batch(self): return "DUMMY" class StreamingEpochBatchIterator(EpochBatchIterating): """A steaming-style iterator over a :class:`torch.utils.data.IterableDataset`. Args: dataset (~torch.utils.data.Dataset): dataset from which to load the data max_sentences: batch size collate_fn (callable): merges a list of samples to form a mini-batch num_workers (int, optional): how many subprocesses to use for data loading. 0 means the data will be loaded in the main process (default: 0). epoch (int, optional): the epoch to start the iterator from (default: 1). buffer_size (int, optional): the number of batches to keep ready in the queue. Helps speeding up dataloading. When buffer_size is zero, the default torch.utils.data.DataLoader preloading is used. timeout (int, optional): if positive, the timeout value for collecting a batch from workers. Should always be non-negative (default: ``0``). """ def __init__( self, dataset, max_sentences=1, collate_fn=None, epoch=1, num_workers=0, buffer_size=0, timeout=0, ): assert isinstance(dataset, torch.utils.data.IterableDataset) self.dataset = dataset self.max_sentences = max_sentences self.collate_fn = collate_fn self.epoch = max(epoch, 1) # we use 1-based indexing for epochs self.num_workers = num_workers # This upper limit here is to prevent people from abusing this feature # in a shared computing environment. self.buffer_size = min(buffer_size, 20) self.timeout = timeout self._current_epoch_iterator = None @property def next_epoch_idx(self): """Return the epoch index after *next_epoch_itr* is called.""" if self._current_epoch_iterator is not None and self.end_of_epoch(): return self.epoch + 1 else: return self.epoch def next_epoch_itr( self, shuffle=True, fix_batches_to_gpus=False, set_dataset_epoch=True ): self.epoch = self.next_epoch_idx if set_dataset_epoch and hasattr(self.dataset, "set_epoch"): self.dataset.set_epoch(self.epoch) self._current_epoch_iterator = self._get_iterator_for_epoch(self.epoch, shuffle) return self._current_epoch_iterator def end_of_epoch(self) -> bool: return not self._current_epoch_iterator.has_next() @property def iterations_in_epoch(self) -> int: if self._current_epoch_iterator is not None: return self._current_epoch_iterator.n return 0 def state_dict(self): return { "epoch": self.epoch, } def load_state_dict(self, state_dict): self.epoch = state_dict["epoch"] def _get_iterator_for_epoch(self, epoch, shuffle, offset=0): if self.num_workers > 0: os.environ["PYTHONWARNINGS"] = "ignore:semaphore_tracker:UserWarning" # Create data loader worker_init_fn = getattr(self.dataset, "worker_init_fn", None) itr = torch.utils.data.DataLoader( self.dataset, batch_size=self.max_sentences, collate_fn=self.collate_fn, num_workers=self.num_workers, timeout=self.timeout, worker_init_fn=worker_init_fn, pin_memory=True, ) # Wrap with a BufferedIterator if needed if self.buffer_size > 0: itr = BufferedIterator(self.buffer_size, itr) # Wrap with CountingIterator itr = CountingIterator(itr, start=offset) return itr class EpochBatchIterator(EpochBatchIterating): """A multi-epoch iterator over a :class:`torch.utils.data.Dataset`. Compared to :class:`torch.utils.data.DataLoader`, this iterator: - can be reused across multiple epochs with the :func:`next_epoch_itr` method (optionally shuffled between epochs) - can be serialized/deserialized with the :func:`state_dict` and :func:`load_state_dict` methods - supports sharding with the *num_shards* and *shard_id* arguments Args: dataset (~torch.utils.data.Dataset): dataset from which to load the data collate_fn (callable): merges a list of samples to form a mini-batch batch_sampler (~torch.utils.data.Sampler or a callable): an iterator over batches of indices, or a callable to create such an iterator (~torch.utils.data.Sampler). A callable batch_sampler will be called for each epoch to enable per epoch dynamic batch iterators defined by this callable batch_sampler. seed (int, optional): seed for random number generator for reproducibility (default: 1). num_shards (int, optional): shard the data iterator into N shards (default: 1). shard_id (int, optional): which shard of the data iterator to return (default: 0). num_workers (int, optional): how many subprocesses to use for data loading. 0 means the data will be loaded in the main process (default: 0). epoch (int, optional): the epoch to start the iterator from (default: 1). buffer_size (int, optional): the number of batches to keep ready in the queue. Helps speeding up dataloading. When buffer_size is zero, the default torch.utils.data.DataLoader preloading is used. timeout (int, optional): if positive, the timeout value for collecting a batch from workers. Should always be non-negative (default: ``0``). disable_shuffling (bool, optional): force disable shuffling (default: ``False``). """ def __init__( self, dataset, collate_fn, batch_sampler, seed=1, num_shards=1, shard_id=0, num_workers=0, epoch=1, buffer_size=0, timeout=0, disable_shuffling=False, ): assert isinstance(dataset, torch.utils.data.Dataset) self.dataset = dataset self.collate_fn = collate_fn self.batch_sampler = batch_sampler self._frozen_batches = ( tuple(batch_sampler) if not callable(batch_sampler) else None ) self.seed = seed self.num_shards = num_shards self.shard_id = shard_id self.num_workers = num_workers # This upper limit here is to prevent people from abusing this feature # in a shared computing environment. self.buffer_size = min(buffer_size, 20) self.timeout = timeout self.disable_shuffling = disable_shuffling self.epoch = max(epoch, 1) # we use 1-based indexing for epochs self.shuffle = not disable_shuffling self._cur_epoch_itr = None self._next_epoch_itr = None self._supports_prefetch = getattr(dataset, "supports_prefetch", False) @property def frozen_batches(self): if self._frozen_batches is None: self._frozen_batches = tuple(self.batch_sampler(self.dataset, self.epoch)) return self._frozen_batches @property def first_batch(self): if len(self.frozen_batches) == 0: raise Exception( "The dataset is empty. This could indicate " "that all elements in the dataset have been skipped. " "Try increasing the max number of allowed tokens or using " "a larger dataset." ) if getattr(self.dataset, "supports_fetch_outside_dataloader", True): return self.collate_fn([self.dataset[i] for i in self.frozen_batches[0]]) else: return "DUMMY" def __len__(self): return int(math.ceil(len(self.frozen_batches) / float(self.num_shards))) @property def n(self): return self.iterations_in_epoch @property def next_epoch_idx(self): """Return the epoch index after *next_epoch_itr* is called.""" if self._next_epoch_itr is not None: return self.epoch elif self._cur_epoch_itr is not None and self.end_of_epoch(): return self.epoch + 1 else: return self.epoch def next_epoch_itr( self, shuffle=True, fix_batches_to_gpus=False, set_dataset_epoch=True ): """Return a new iterator over the dataset. Args: shuffle (bool, optional): shuffle batches before returning the iterator (default: True). fix_batches_to_gpus (bool, optional): ensure that batches are always allocated to the same shards across epochs. Requires that :attr:`dataset` supports prefetching (default: False). set_dataset_epoch (bool, optional): update the wrapped Dataset with the new epoch number (default: True). """ if self.disable_shuffling: shuffle = False prev_epoch = self.epoch self.epoch = self.next_epoch_idx if set_dataset_epoch and hasattr(self.dataset, "set_epoch"): self.dataset.set_epoch(self.epoch) if self._next_epoch_itr is not None: self._cur_epoch_itr = self._next_epoch_itr self._next_epoch_itr = None else: if callable(self.batch_sampler) and prev_epoch != self.epoch: # reset _frozen_batches to refresh the next epoch self._frozen_batches = None self._cur_epoch_itr = self._get_iterator_for_epoch( self.epoch, shuffle, fix_batches_to_gpus=fix_batches_to_gpus, ) self.shuffle = shuffle return self._cur_epoch_itr def end_of_epoch(self) -> bool: """Returns whether the most recent epoch iterator has been exhausted""" return not self._cur_epoch_itr.has_next() @property def iterations_in_epoch(self): """The number of consumed batches in the current epoch.""" if self._cur_epoch_itr is not None: return self._cur_epoch_itr.n elif self._next_epoch_itr is not None: return self._next_epoch_itr.n return 0 def state_dict(self): """Returns a dictionary containing a whole state of the iterator.""" if self.end_of_epoch(): epoch = self.epoch + 1 iter_in_epoch = 0 else: epoch = self.epoch iter_in_epoch = self.iterations_in_epoch return { "version": 2, "epoch": epoch, "iterations_in_epoch": iter_in_epoch, "shuffle": self.shuffle, } def load_state_dict(self, state_dict): """Copies the state of the iterator from the given *state_dict*.""" self.epoch = state_dict["epoch"] itr_pos = state_dict.get("iterations_in_epoch", 0) version = state_dict.get("version", 1) if itr_pos > 0: # fast-forward epoch iterator self._next_epoch_itr = self._get_iterator_for_epoch( self.epoch, shuffle=state_dict.get("shuffle", True), offset=itr_pos, ) if self._next_epoch_itr is None: if version == 1: # legacy behavior: we finished the epoch, increment epoch counter self.epoch += 1 else: raise RuntimeError( "Cannot resume training due to dataloader mismatch, please " "report this to the fairseq developers. You can relaunch " "training with `--reset-dataloader` and it should work." ) else: self._next_epoch_itr = None def _get_iterator_for_epoch( self, epoch, shuffle, fix_batches_to_gpus=False, offset=0 ): def shuffle_batches(batches, seed): with data_utils.numpy_seed(seed): np.random.shuffle(batches) return batches if self._supports_prefetch: batches = self.frozen_batches if shuffle and not fix_batches_to_gpus: batches = shuffle_batches(list(batches), self.seed + epoch) batches = list( ShardedIterator(batches, self.num_shards, self.shard_id, fill_value=[]) ) self.dataset.prefetch([i for s in batches for i in s]) if shuffle and fix_batches_to_gpus: batches = shuffle_batches(batches, self.seed + epoch + self.shard_id) else: if shuffle: batches = shuffle_batches(list(self.frozen_batches), self.seed + epoch) else: batches = self.frozen_batches batches = list( ShardedIterator(batches, self.num_shards, self.shard_id, fill_value=[]) ) if offset > 0 and offset >= len(batches): return None if self.num_workers > 0: os.environ["PYTHONWARNINGS"] = "ignore:semaphore_tracker:UserWarning" # Create data loader itr = torch.utils.data.DataLoader( self.dataset, collate_fn=self.collate_fn, batch_sampler=batches[offset:], num_workers=self.num_workers, timeout=self.timeout, pin_memory=True, ) # Wrap with a BufferedIterator if needed if self.buffer_size > 0: itr = BufferedIterator(self.buffer_size, itr) # Wrap with CountingIterator itr = CountingIterator(itr, start=offset) return itr class GroupedIterator(CountingIterator): """Wrapper around an iterable that returns groups (chunks) of items. Args: iterable (iterable): iterable to wrap chunk_size (int): size of each chunk Attributes: n (int): number of elements consumed from this iterator """ def __init__(self, iterable, chunk_size): itr = _chunk_iterator(iterable, chunk_size) super().__init__( itr, start=int(math.ceil(getattr(iterable, "n", 0) / float(chunk_size))), total=int(math.ceil(len(iterable) / float(chunk_size))), ) self.chunk_size = chunk_size def _chunk_iterator(itr, chunk_size): chunk = [] for x in itr: chunk.append(x) if len(chunk) == chunk_size: yield chunk chunk = [] if len(chunk) > 0: yield chunk class ShardedIterator(CountingIterator): """A sharded wrapper around an iterable, padded to length. Args: iterable (iterable): iterable to wrap num_shards (int): number of shards to split the iterable into shard_id (int): which shard to iterator over fill_value (Any, optional): padding value when the iterable doesn't evenly divide *num_shards* (default: None). Attributes: n (int): number of elements consumed from this iterator """ def __init__(self, iterable, num_shards, shard_id, fill_value=None): if shard_id < 0 or shard_id >= num_shards: raise ValueError("shard_id must be between 0 and num_shards") sharded_len = int(math.ceil(len(iterable) / float(num_shards))) itr = map( operator.itemgetter(1), itertools.zip_longest( range(sharded_len), itertools.islice(iterable, shard_id, len(iterable), num_shards), fillvalue=fill_value, ), ) super().__init__( itr, start=int(math.ceil(getattr(iterable, "n", 0) / float(num_shards))), total=sharded_len, ) class BackgroundConsumer(Thread): def __init__(self, queue, source, max_len, cuda_device): Thread.__init__(self) self._queue = queue self._source = source self._max_len = max_len self.count = 0 self.cuda_device = cuda_device def run(self): # set_device to avoid creation of GPU0 context when using pin_memory if self.cuda_device is not None: torch.cuda.set_device(self.cuda_device) try: for item in self._source: self._queue.put(item) # Stop if we reached the maximum length self.count += 1 if self._max_len is not None and self.count >= self._max_len: break # Signal the consumer we are done. self._queue.put(_sentinel) except Exception as e: self._queue.put(e) class BufferedIterator(object): def __init__(self, size, iterable): self._queue = queue.Queue(size) self._iterable = iterable self._consumer = None self.start_time = time.time() self.warning_time = None self.total = len(iterable) def _create_consumer(self): self._consumer = BackgroundConsumer( self._queue, self._iterable, self.total, torch.cuda.current_device() if torch.cuda.is_available() else None ) self._consumer.daemon = True self._consumer.start() def __iter__(self): return self def __len__(self): return self.total def take(self, n): self.total = min(self.total, n) # Propagate this change to the underlying iterator if hasattr(self._iterable, "take"): self._iterable.take(n) return self def __next__(self): # Create consumer if not created yet if self._consumer is None: self._create_consumer() # Notify the user if there is a data loading bottleneck if self._queue.qsize() < min(2, max(1, self._queue.maxsize // 2)): if time.time() - self.start_time > 5 * 60: if ( self.warning_time is None or time.time() - self.warning_time > 15 * 60 ): logger.debug( "Data loading buffer is empty or nearly empty. This may " "indicate a data loading bottleneck, and increasing the " "number of workers (--num-workers) may help." ) self.warning_time = time.time() # Get next example item = self._queue.get(True) if isinstance(item, Exception): raise item if item is _sentinel: raise StopIteration() return item class GroupedEpochBatchIterator(EpochBatchIterator): """Grouped version of EpochBatchIterator It takes several samplers from different datasets. Each epoch shuffle the dataset wise sampler individually with different random seed. The those sub samplers are combined with into one big samplers with deterministic permutation to mix batches from different datasets. It will act like EpochBatchIterator but make sure 1) data from one data set each time 2) for different workers, they use the same order to fetch the data so they will use data from the same dataset everytime mult_rate is used for update_freq > 1 case where we want to make sure update_freq mini-batches come from same source """ def __init__( self, dataset, collate_fn, batch_samplers, seed=1, num_shards=1, shard_id=0, num_workers=0, epoch=0, mult_rate=1, buffer_size=0, ): super().__init__( dataset, collate_fn, batch_samplers, seed, num_shards, shard_id, num_workers, epoch, buffer_size, ) # level 0: sub-samplers 1: batch_idx 2: batches self._frozen_batches = tuple([tuple(sub_batch) for sub_batch in batch_samplers]) self.step_size = mult_rate * num_shards self.lengths = [ (len(x) // self.step_size) * self.step_size for x in self.frozen_batches ] def __len__(self): return sum(self.lengths) @property def first_batch(self): if len(self.frozen_batches) == 0: raise Exception( "The dataset is empty. This could indicate " "that all elements in the dataset have been skipped. " "Try increasing the max number of allowed tokens or using " "a larger dataset." ) if self.dataset.supports_fetch_outside_dataloader: return self.collate_fn([self.dataset[i] for i in self.frozen_batches[0][0]]) else: return "DUMMY" def _get_iterator_for_epoch( self, epoch, shuffle, fix_batches_to_gpus=False, offset=0 ): def shuffle_batches(batches, seed): with data_utils.numpy_seed(seed): np.random.shuffle(batches) return batches def return_full_batches(batch_sets, seed, shuffle): if shuffle: batch_sets = [shuffle_batches(list(x), seed) for x in batch_sets] batch_sets = [ batch_sets[i][: self.lengths[i]] for i in range(len(batch_sets)) ] batches = list(itertools.chain.from_iterable(batch_sets)) if shuffle: with data_utils.numpy_seed(seed): idx = np.random.permutation(len(batches) // self.step_size) if len(idx) * self.step_size != len(batches): raise ValueError( "ERROR: %d %d %d %d" % (len(idx), self.step_size, len(batches), self.shard_id), ":".join(["%d" % x for x in self.lengths]), ) mini_shards = [ batches[i * self.step_size : (i + 1) * self.step_size] for i in idx ] batches = list(itertools.chain.from_iterable(mini_shards)) return batches if self._supports_prefetch: raise NotImplementedError("To be implemented") else: batches = return_full_batches( self.frozen_batches, self.seed + epoch, shuffle ) batches = list( ShardedIterator(batches, self.num_shards, self.shard_id, fill_value=[]) ) if offset > 0 and offset >= len(batches): return None if self.num_workers > 0: os.environ["PYTHONWARNINGS"] = "ignore:semaphore_tracker:UserWarning" itr = torch.utils.data.DataLoader( self.dataset, collate_fn=self.collate_fn, batch_sampler=batches[offset:], num_workers=self.num_workers, ) if self.buffer_size > 0: itr = BufferedIterator(self.buffer_size, itr) return CountingIterator(itr, start=offset)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/iterators.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import subprocess import json import tempfile import hashlib from typing import Hashable try: import pyarrow.plasma as plasma PYARROW_AVAILABLE = True except ImportError: plasma = None PYARROW_AVAILABLE = False class PlasmaArray: """ Wrapper around numpy arrays that automatically moves the data to shared memory upon serialization. This is particularly helpful when passing numpy arrays through multiprocessing, so that data is not unnecessarily duplicated or pickled. """ def __init__(self, array): super().__init__() self.array = array self.disable = array.nbytes < 134217728 # disable for arrays <128MB self.object_id = None self.path = None # variables with underscores shouldn't be pickled self._client = None self._server = None self._server_tmp = None self._plasma = None @property def plasma(self): if self._plasma is None and not self.disable: self._plasma = plasma return self._plasma def start_server(self): if self.plasma is None or self._server is not None: return assert self.object_id is None assert self.path is None self._server_tmp = tempfile.NamedTemporaryFile() self.path = self._server_tmp.name self._server = subprocess.Popen( ["plasma_store", "-m", str(int(1.05 * self.array.nbytes)), "-s", self.path] ) @property def client(self): if self._client is None: assert self.path is not None self._client = self.plasma.connect(self.path, num_retries=200) return self._client def __getstate__(self): """Called on pickle load""" if self.plasma is None: return self.__dict__ if self.object_id is None: self.start_server() self.object_id = self.client.put(self.array) state = self.__dict__.copy() del state["array"] state["_client"] = None state["_server"] = None state["_server_tmp"] = None state["_plasma"] = None return state def __setstate__(self, state): """Called on pickle save""" self.__dict__.update(state) if self.plasma is None: return self.array = self.client.get(self.object_id) def __del__(self): if self._server is not None: self._server.kill() self._server = None self._server_tmp.close() self._server_tmp = None DEFAULT_PLASMA_PATH = "/tmp/plasma" class PlasmaView: """Interface to write and read from shared memory. Whereas PlasmaArray writes to plasma on serialization, PlasmaView writes to shared memory on instantiation.""" def __init__(self, array, split_path: str, hash_data: Hashable, plasma_path=None): """ Args: array: numpy array to store. This can be read with ``PlasmaView().array`` split_path: the path whence the data was read, used for hashing hash_data: other metadata about the array that can be used to create a unique key. as of writing, the 3 callers in ``TokenBlockDataset`` use:: hash_data = ((block_size, document_sep_len, str(break_mode), len(dataset)), 0|1|2) """ assert PYARROW_AVAILABLE assert split_path is not None if plasma_path is None: plasma_path = DEFAULT_PLASMA_PATH self.path = plasma_path self.split_path = split_path self._client = None # Initialize lazily for pickle. plasma clients should not be deep copied or serialized. self._n = None self.object_id = self.get_object_id(self.split_path, hash_data) try: self.client.put(array, object_id=self.object_id) except plasma.PlasmaObjectExists: pass @property def client(self): if self._client is None: self._client = plasma.connect(self.path, num_retries=200) return self._client @property def array(self): """Fetch a read only view of an np.array, stored in plasma.""" ret = self.client.get(self.object_id) return ret @staticmethod def get_object_id(split_path: str, hash_data: Hashable): """Returns plasma.ObjectID from hashing split_path and object_num.""" hash = hashlib.blake2b(bytes(split_path, "utf-8"), digest_size=20) harg = json.dumps(hash_data).encode("utf-8") hash.update(harg) return plasma.ObjectID(hash.digest()) def __getstate__(self): """Called on pickle save""" self.disconnect() state = self.__dict__.copy() assert state["_client"] is None assert "object_id" in state return state def __setstate__(self, state): """Called on pickle load""" self.__dict__.update(state) def __del__(self): self.disconnect() def disconnect(self): if self._client is not None: self._client.disconnect() self._client = None def __len__(self): """Save reads by caching len""" if self._n is None: self._n = len(self.array) return self._n GB100 = (1024 ** 3) * 100 class PlasmaStore: def __init__(self, path=DEFAULT_PLASMA_PATH, nbytes: int = GB100): self.server = self.start(path, nbytes) def __del__(self): self.server.kill() @staticmethod def start(path=DEFAULT_PLASMA_PATH, nbytes: int = GB100) -> subprocess.Popen: if not PYARROW_AVAILABLE: raise ImportError("please run pip install pyarrow to use --use_plasma_view") # best practice is to allocate more space than we need. The limitation seems to be the size of /dev/shm _server = subprocess.Popen(["plasma_store", "-m", str(nbytes), "-s", path]) plasma.connect(path, num_retries=200) # If we can't connect we fail immediately return _server
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/plasma_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import numpy as np from fairseq.data import BaseWrapperDataset, plasma_utils logger = logging.getLogger(__name__) class ResamplingDataset(BaseWrapperDataset): """Randomly samples from a given dataset at each epoch. Sampling is done with or without replacement, depending on the "replace" parameter. Optionally, the epoch size can be rescaled. This is potentially desirable to increase per-epoch coverage of the base dataset (since sampling with replacement means that many items in the dataset will be left out). In the case of sampling without replacement, size_ratio should be strictly less than 1. Args: dataset (~torch.utils.data.Dataset): dataset on which to sample. weights (List[float]): list of probability weights (default: None, which corresponds to uniform sampling). replace (bool): sampling mode; True for "with replacement", or False for "without replacement" (default: True) size_ratio (float): the ratio to subsample to; must be positive (default: 1.0). batch_by_size (bool): whether or not to batch by sequence length (default: True). seed (int): RNG seed to use (default: 0). epoch (int): starting epoch number (default: 1). """ def __init__( self, dataset, weights=None, replace=True, size_ratio=1.0, batch_by_size=True, seed=0, epoch=1, ): super().__init__(dataset) if weights is None: self.weights = None else: assert len(weights) == len(dataset) weights_arr = np.array(weights, dtype=np.float64) weights_arr /= weights_arr.sum() self.weights = plasma_utils.PlasmaArray(weights_arr) self.replace = replace assert size_ratio > 0.0 if not self.replace: assert size_ratio < 1.0 self.size_ratio = float(size_ratio) self.actual_size = np.ceil(len(dataset) * self.size_ratio).astype(int) self.batch_by_size = batch_by_size self.seed = seed self._cur_epoch = None self._cur_indices = None self.set_epoch(epoch) def __getitem__(self, index): return self.dataset[self._cur_indices.array[index]] def __len__(self): return self.actual_size @property def sizes(self): if isinstance(self.dataset.sizes, list): return [s[self._cur_indices.array] for s in self.dataset.sizes] return self.dataset.sizes[self._cur_indices.array] def num_tokens(self, index): return self.dataset.num_tokens(self._cur_indices.array[index]) def size(self, index): return self.dataset.size(self._cur_indices.array[index]) def ordered_indices(self): if self.batch_by_size: order = [ np.arange(len(self)), self.sizes, ] # No need to handle `self.shuffle == True` return np.lexsort(order) else: return np.arange(len(self)) def prefetch(self, indices): self.dataset.prefetch(self._cur_indices.array[indices]) @property def can_reuse_epoch_itr_across_epochs(self): return False def set_epoch(self, epoch): logger.debug("ResamplingDataset.set_epoch: {}".format(epoch)) super().set_epoch(epoch) if epoch == self._cur_epoch: return self._cur_epoch = epoch # Generate a weighted sample of indices as a function of the # random seed and the current epoch. rng = np.random.RandomState( [ 42, # magic number self.seed % (2 ** 32), # global seed self._cur_epoch, # epoch index ] ) self._cur_indices = plasma_utils.PlasmaArray( rng.choice( len(self.dataset), self.actual_size, replace=self.replace, p=(None if self.weights is None else self.weights.array), ) )
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/resampling_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import numpy as np import torch from fairseq.data import FairseqDataset, data_utils logger = logging.getLogger(__name__) def collate( samples, pad_idx, eos_idx, left_pad_source=True, left_pad_target=False, input_feeding=True, pad_to_length=None, pad_to_multiple=1, ): if len(samples) == 0: return {} def merge(key, left_pad, move_eos_to_beginning=False, pad_to_length=None): return data_utils.collate_tokens( [s[key] for s in samples], pad_idx, eos_idx, left_pad, move_eos_to_beginning, pad_to_length=pad_to_length, pad_to_multiple=pad_to_multiple, ) def check_alignment(alignment, src_len, tgt_len): if alignment is None or len(alignment) == 0: return False if ( alignment[:, 0].max().item() >= src_len - 1 or alignment[:, 1].max().item() >= tgt_len - 1 ): logger.warning("alignment size mismatch found, skipping alignment!") return False return True def compute_alignment_weights(alignments): """ Given a tensor of shape [:, 2] containing the source-target indices corresponding to the alignments, a weight vector containing the inverse frequency of each target index is computed. For e.g. if alignments = [[5, 7], [2, 3], [1, 3], [4, 2]], then a tensor containing [1., 0.5, 0.5, 1] should be returned (since target index 3 is repeated twice) """ align_tgt = alignments[:, 1] _, align_tgt_i, align_tgt_c = torch.unique( align_tgt, return_inverse=True, return_counts=True ) align_weights = align_tgt_c[align_tgt_i[np.arange(len(align_tgt))]] return 1.0 / align_weights.float() id = torch.LongTensor([s["id"] for s in samples]) src_tokens = merge( "source", left_pad=left_pad_source, pad_to_length=pad_to_length["source"] if pad_to_length is not None else None, ) # sort by descending source length src_lengths = torch.LongTensor( [s["source"].ne(pad_idx).long().sum() for s in samples] ) src_lengths, sort_order = src_lengths.sort(descending=True) id = id.index_select(0, sort_order) src_tokens = src_tokens.index_select(0, sort_order) prev_output_tokens = None target = None if samples[0].get("target", None) is not None: target = merge( "target", left_pad=left_pad_target, pad_to_length=pad_to_length["target"] if pad_to_length is not None else None, ) target = target.index_select(0, sort_order) tgt_lengths = torch.LongTensor( [s["target"].ne(pad_idx).long().sum() for s in samples] ).index_select(0, sort_order) ntokens = tgt_lengths.sum().item() if samples[0].get("prev_output_tokens", None) is not None: prev_output_tokens = merge("prev_output_tokens", left_pad=left_pad_target) elif input_feeding: # we create a shifted version of targets for feeding the # previous output token(s) into the next decoder step prev_output_tokens = merge( "target", left_pad=left_pad_target, move_eos_to_beginning=True, pad_to_length=pad_to_length["target"] if pad_to_length is not None else None, ) else: ntokens = src_lengths.sum().item() batch = { "id": id, "nsentences": len(samples), "ntokens": ntokens, "net_input": {"src_tokens": src_tokens, "src_lengths": src_lengths,}, "target": target, } if prev_output_tokens is not None: batch["net_input"]["prev_output_tokens"] = prev_output_tokens.index_select( 0, sort_order ) if samples[0].get("alignment", None) is not None: bsz, tgt_sz = batch["target"].shape src_sz = batch["net_input"]["src_tokens"].shape[1] offsets = torch.zeros((len(sort_order), 2), dtype=torch.long) offsets[:, 1] += torch.arange(len(sort_order), dtype=torch.long) * tgt_sz if left_pad_source: offsets[:, 0] += src_sz - src_lengths if left_pad_target: offsets[:, 1] += tgt_sz - tgt_lengths alignments = [ alignment + offset for align_idx, offset, src_len, tgt_len in zip( sort_order, offsets, src_lengths, tgt_lengths ) for alignment in [samples[align_idx]["alignment"].view(-1, 2)] if check_alignment(alignment, src_len, tgt_len) ] if len(alignments) > 0: alignments = torch.cat(alignments, dim=0) align_weights = compute_alignment_weights(alignments) batch["alignments"] = alignments batch["align_weights"] = align_weights if samples[0].get("constraints", None) is not None: # Collate the packed constraints across the samples, padding to # the length of the longest sample. lens = [sample.get("constraints").size(0) for sample in samples] max_len = max(lens) constraints = torch.zeros((len(samples), max(lens))).long() for i, sample in enumerate(samples): constraints[i, 0 : lens[i]] = samples[i].get("constraints") batch["constraints"] = constraints.index_select(0, sort_order) return batch class LanguagePairDataset(FairseqDataset): """ A pair of torch.utils.data.Datasets. Args: src (torch.utils.data.Dataset): source dataset to wrap src_sizes (List[int]): source sentence lengths src_dict (~fairseq.data.Dictionary): source vocabulary tgt (torch.utils.data.Dataset, optional): target dataset to wrap tgt_sizes (List[int], optional): target sentence lengths tgt_dict (~fairseq.data.Dictionary, optional): target vocabulary left_pad_source (bool, optional): pad source tensors on the left side (default: True). left_pad_target (bool, optional): pad target tensors on the left side (default: False). shuffle (bool, optional): shuffle dataset elements before batching (default: True). input_feeding (bool, optional): create a shifted version of the targets to be passed into the model for teacher forcing (default: True). remove_eos_from_source (bool, optional): if set, removes eos from end of source if it's present (default: False). append_eos_to_target (bool, optional): if set, appends eos to end of target if it's absent (default: False). align_dataset (torch.utils.data.Dataset, optional): dataset containing alignments. constraints (Tensor, optional): 2d tensor with a concatenated, zero- delimited list of constraints for each sentence. append_bos (bool, optional): if set, appends bos to the beginning of source/target sentence. num_buckets (int, optional): if set to a value greater than 0, then batches will be bucketed into the given number of batch shapes. src_lang_id (int, optional): source language ID, if set, the collated batch will contain a field 'src_lang_id' in 'net_input' which indicates the source language of the samples. tgt_lang_id (int, optional): target language ID, if set, the collated batch will contain a field 'tgt_lang_id' which indicates the target language of the samples. """ def __init__( self, src, src_sizes, src_dict, tgt=None, tgt_sizes=None, tgt_dict=None, left_pad_source=True, left_pad_target=False, shuffle=True, input_feeding=True, remove_eos_from_source=False, append_eos_to_target=False, align_dataset=None, constraints=None, append_bos=False, eos=None, num_buckets=0, src_lang_id=None, tgt_lang_id=None, pad_to_multiple=1, ): if tgt_dict is not None: assert src_dict.pad() == tgt_dict.pad() assert src_dict.eos() == tgt_dict.eos() assert src_dict.unk() == tgt_dict.unk() if tgt is not None: assert len(src) == len( tgt ), "Source and target must contain the same number of examples" self.src = src self.tgt = tgt self.src_sizes = np.array(src_sizes) self.tgt_sizes = np.array(tgt_sizes) if tgt_sizes is not None else None self.sizes = ( np.vstack((self.src_sizes, self.tgt_sizes)).T if self.tgt_sizes is not None else self.src_sizes ) self.src_dict = src_dict self.tgt_dict = tgt_dict self.left_pad_source = left_pad_source self.left_pad_target = left_pad_target self.shuffle = shuffle self.input_feeding = input_feeding self.remove_eos_from_source = remove_eos_from_source self.append_eos_to_target = append_eos_to_target self.align_dataset = align_dataset if self.align_dataset is not None: assert ( self.tgt_sizes is not None ), "Both source and target needed when alignments are provided" self.constraints = constraints self.append_bos = append_bos self.eos = eos if eos is not None else src_dict.eos() self.src_lang_id = src_lang_id self.tgt_lang_id = tgt_lang_id if num_buckets > 0: from fairseq.data import BucketPadLengthDataset self.src = BucketPadLengthDataset( self.src, sizes=self.src_sizes, num_buckets=num_buckets, pad_idx=self.src_dict.pad(), left_pad=self.left_pad_source, ) self.src_sizes = self.src.sizes logger.info("bucketing source lengths: {}".format(list(self.src.buckets))) if self.tgt is not None: self.tgt = BucketPadLengthDataset( self.tgt, sizes=self.tgt_sizes, num_buckets=num_buckets, pad_idx=self.tgt_dict.pad(), left_pad=self.left_pad_target, ) self.tgt_sizes = self.tgt.sizes logger.info( "bucketing target lengths: {}".format(list(self.tgt.buckets)) ) # determine bucket sizes using self.num_tokens, which will return # the padded lengths (thanks to BucketPadLengthDataset) num_tokens = np.vectorize(self.num_tokens, otypes=[np.compat.long]) self.bucketed_num_tokens = num_tokens(np.arange(len(self.src))) self.buckets = [ (None, num_tokens) for num_tokens in np.unique(self.bucketed_num_tokens) ] else: self.buckets = None self.pad_to_multiple = pad_to_multiple def get_batch_shapes(self): return self.buckets def __getitem__(self, index): tgt_item = self.tgt[index] if self.tgt is not None else None src_item = self.src[index] # Append EOS to end of tgt sentence if it does not have an EOS and remove # EOS from end of src sentence if it exists. This is useful when we use # use existing datasets for opposite directions i.e., when we want to # use tgt_dataset as src_dataset and vice versa if self.append_eos_to_target: eos = self.tgt_dict.eos() if self.tgt_dict else self.src_dict.eos() if self.tgt and self.tgt[index][-1] != eos: tgt_item = torch.cat([self.tgt[index], torch.LongTensor([eos])]) if self.append_bos: bos = self.tgt_dict.bos() if self.tgt_dict else self.src_dict.bos() if self.tgt and self.tgt[index][0] != bos: tgt_item = torch.cat([torch.LongTensor([bos]), self.tgt[index]]) bos = self.src_dict.bos() if self.src[index][0] != bos: src_item = torch.cat([torch.LongTensor([bos]), self.src[index]]) if self.remove_eos_from_source: eos = self.src_dict.eos() if self.src[index][-1] == eos: src_item = self.src[index][:-1] example = { "id": index, "source": src_item, "target": tgt_item, } if self.align_dataset is not None: example["alignment"] = self.align_dataset[index] if self.constraints is not None: example["constraints"] = self.constraints[index] return example def __len__(self): return len(self.src) def collater(self, samples, pad_to_length=None): """Merge a list of samples to form a mini-batch. Args: samples (List[dict]): samples to collate pad_to_length (dict, optional): a dictionary of {'source': source_pad_to_length, 'target': target_pad_to_length} to indicate the max length to pad to in source and target respectively. Returns: dict: a mini-batch with the following keys: - `id` (LongTensor): example IDs in the original input order - `ntokens` (int): total number of tokens in the batch - `net_input` (dict): the input to the Model, containing keys: - `src_tokens` (LongTensor): a padded 2D Tensor of tokens in the source sentence of shape `(bsz, src_len)`. Padding will appear on the left if *left_pad_source* is ``True``. - `src_lengths` (LongTensor): 1D Tensor of the unpadded lengths of each source sentence of shape `(bsz)` - `prev_output_tokens` (LongTensor): a padded 2D Tensor of tokens in the target sentence, shifted right by one position for teacher forcing, of shape `(bsz, tgt_len)`. This key will not be present if *input_feeding* is ``False``. Padding will appear on the left if *left_pad_target* is ``True``. - `src_lang_id` (LongTensor): a long Tensor which contains source language IDs of each sample in the batch - `target` (LongTensor): a padded 2D Tensor of tokens in the target sentence of shape `(bsz, tgt_len)`. Padding will appear on the left if *left_pad_target* is ``True``. - `tgt_lang_id` (LongTensor): a long Tensor which contains target language IDs of each sample in the batch """ res = collate( samples, pad_idx=self.src_dict.pad(), eos_idx=self.eos, left_pad_source=self.left_pad_source, left_pad_target=self.left_pad_target, input_feeding=self.input_feeding, pad_to_length=pad_to_length, pad_to_multiple=self.pad_to_multiple, ) if self.src_lang_id is not None or self.tgt_lang_id is not None: src_tokens = res["net_input"]["src_tokens"] bsz = src_tokens.size(0) if self.src_lang_id is not None: res["net_input"]["src_lang_id"] = ( torch.LongTensor([[self.src_lang_id]]).expand(bsz, 1).to(src_tokens) ) if self.tgt_lang_id is not None: res["tgt_lang_id"] = ( torch.LongTensor([[self.tgt_lang_id]]).expand(bsz, 1).to(src_tokens) ) return res def num_tokens(self, index): """Return the number of tokens in a sample. This value is used to enforce ``--max-tokens`` during batching.""" return max( self.src_sizes[index], self.tgt_sizes[index] if self.tgt_sizes is not None else 0, ) def num_tokens_vec(self, indices): """Return the number of tokens for a set of positions defined by indices. This value is used to enforce ``--max-tokens`` during batching.""" sizes = self.src_sizes[indices] if self.tgt_sizes is not None: sizes = np.maximum(sizes, self.tgt_sizes[indices]) return sizes def size(self, index): """Return an example's size as a float or tuple. This value is used when filtering a dataset with ``--max-positions``.""" return ( self.src_sizes[index], self.tgt_sizes[index] if self.tgt_sizes is not None else 0, ) def ordered_indices(self): """Return an ordered list of indices. Batches will be constructed based on this order.""" if self.shuffle: indices = np.random.permutation(len(self)).astype(np.int64) else: indices = np.arange(len(self), dtype=np.int64) if self.buckets is None: # sort by target length, then source length if self.tgt_sizes is not None: indices = indices[np.argsort(self.tgt_sizes[indices], kind="mergesort")] return indices[np.argsort(self.src_sizes[indices], kind="mergesort")] else: # sort by bucketed_num_tokens, which is: # max(padded_src_len, padded_tgt_len) return indices[ np.argsort(self.bucketed_num_tokens[indices], kind="mergesort") ] @property def supports_prefetch(self): return getattr(self.src, "supports_prefetch", False) and ( getattr(self.tgt, "supports_prefetch", False) or self.tgt is None ) def prefetch(self, indices): self.src.prefetch(indices) if self.tgt is not None: self.tgt.prefetch(indices) if self.align_dataset is not None: self.align_dataset.prefetch(indices) def filter_indices_by_size(self, indices, max_sizes): """Filter a list of sample indices. Remove those that are longer than specified in max_sizes. Args: indices (np.array): original array of sample indices max_sizes (int or list[int] or tuple[int]): max sample size, can be defined separately for src and tgt (then list or tuple) Returns: np.array: filtered sample array list: list of removed indices """ return data_utils.filter_paired_dataset_indices_by_size( self.src_sizes, self.tgt_sizes, indices, max_sizes, )
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/language_pair_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch from . import BaseWrapperDataset class AppendTokenDataset(BaseWrapperDataset): def __init__(self, dataset, token=None): super().__init__(dataset) self.token = token if token is not None: self._sizes = np.array(dataset.sizes) + 1 else: self._sizes = dataset.sizes def __getitem__(self, idx): item = self.dataset[idx] if self.token is not None: item = torch.cat([item, item.new([self.token])]) return item @property def sizes(self): return self._sizes def num_tokens(self, index): n = self.dataset.num_tokens(index) if self.token is not None: n += 1 return n def size(self, index): n = self.dataset.size(index) if self.token is not None: n += 1 return n
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/append_token_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq.data import data_utils from . import BaseWrapperDataset class PadDataset(BaseWrapperDataset): def __init__(self, dataset, pad_idx, left_pad): super().__init__(dataset) self.pad_idx = pad_idx self.left_pad = left_pad def collater(self, samples): return data_utils.collate_tokens(samples, self.pad_idx, left_pad=self.left_pad) class LeftPadDataset(PadDataset): def __init__(self, dataset, pad_idx): super().__init__(dataset, pad_idx, left_pad=True) class RightPadDataset(PadDataset): def __init__(self, dataset, pad_idx): super().__init__(dataset, pad_idx, left_pad=False)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/pad_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import shutil import struct from functools import lru_cache import numpy as np import torch from fairseq.dataclass.constants import DATASET_IMPL_CHOICES from fairseq.data.fasta_dataset import FastaDataset from fairseq.file_io import PathManager from fairseq.data.huffman import HuffmanMMapIndexedDataset, HuffmanMMapIndex from . import FairseqDataset from typing import Union def best_fitting_int_dtype( max_int_to_represent, ) -> Union[np.uint16, np.uint32, np.int64]: if max_int_to_represent is None: return np.uint32 # Safe guess elif max_int_to_represent < 65500: return np.uint16 elif max_int_to_represent < 4294967295: return np.uint32 else: return np.int64 # we avoid np.uint64 because it doesn't save space and its type promotion behaves unexpectedly # https://github.com/numpy/numpy/issues/5745 def get_available_dataset_impl(): return list(map(str, DATASET_IMPL_CHOICES)) def infer_dataset_impl(path): if IndexedRawTextDataset.exists(path): return "raw" elif IndexedDataset.exists(path): with open(index_file_path(path), "rb") as f: magic = f.read(8) if magic == IndexedDataset._HDR_MAGIC: return "cached" elif magic == MMapIndexedDataset.Index._HDR_MAGIC[:8]: return "mmap" elif magic == HuffmanMMapIndex._HDR_MAGIC[:8]: return "huffman" else: return None elif FastaDataset.exists(path): return "fasta" else: return None def make_builder(out_file, impl, vocab_size=None): if impl == "mmap": return MMapIndexedDatasetBuilder( out_file, dtype=best_fitting_int_dtype(vocab_size) ) elif impl == "fasta": raise NotImplementedError elif impl == "huffman": raise ValueError("Use HuffmanCodeBuilder directly as it has a different interface.") else: return IndexedDatasetBuilder(out_file) def make_dataset(path, impl, fix_lua_indexing=False, dictionary=None): if impl == "raw" and IndexedRawTextDataset.exists(path): assert dictionary is not None return IndexedRawTextDataset(path, dictionary) elif impl == "lazy" and IndexedDataset.exists(path): return IndexedDataset(path, fix_lua_indexing=fix_lua_indexing) elif impl == "cached" and IndexedDataset.exists(path): return IndexedCachedDataset(path, fix_lua_indexing=fix_lua_indexing) elif impl == "mmap" and MMapIndexedDataset.exists(path): return MMapIndexedDataset(path) elif impl == "fasta" and FastaDataset.exists(path): from fairseq.data.fasta_dataset import EncodedFastaDataset return EncodedFastaDataset(path, dictionary) elif impl == "huffman" and HuffmanMMapIndexedDataset.exists(path): return HuffmanMMapIndexedDataset(path) return None def dataset_exists(path, impl): if impl == "raw": return IndexedRawTextDataset.exists(path) elif impl == "mmap": return MMapIndexedDataset.exists(path) elif impl == "huffman": return HuffmanMMapIndexedDataset.exists(path) else: return IndexedDataset.exists(path) def read_longs(f, n): a = np.empty(n, dtype=np.int64) f.readinto(a) return a def write_longs(f, a): f.write(np.array(a, dtype=np.int64)) _code_to_dtype = { 1: np.uint8, 2: np.int8, 3: np.int16, 4: np.int32, 5: np.int64, 6: np.float, 7: np.double, 8: np.uint16, 9: np.uint32, 10: np.uint64, } def _dtype_header_code(dtype) -> int: for k in _code_to_dtype.keys(): if _code_to_dtype[k] == dtype: return k raise ValueError(dtype) def index_file_path(prefix_path): return prefix_path + ".idx" def data_file_path(prefix_path): return prefix_path + ".bin" class IndexedDataset(FairseqDataset): """Loader for TorchNet IndexedDataset""" _HDR_MAGIC = b"TNTIDX\x00\x00" def __init__(self, path, fix_lua_indexing=False): super().__init__() self.path = path self.fix_lua_indexing = fix_lua_indexing self.data_file = None self.read_index(path) def read_index(self, path): with open(index_file_path(path), "rb") as f: magic = f.read(8) assert magic == self._HDR_MAGIC, ( "Index file doesn't match expected format. " "Make sure that --dataset-impl is configured properly." ) version = f.read(8) assert struct.unpack("<Q", version) == (1,) code, self.element_size = struct.unpack("<QQ", f.read(16)) self.dtype = _code_to_dtype[code] self._len, self.s = struct.unpack("<QQ", f.read(16)) self.dim_offsets = read_longs(f, self._len + 1) self.data_offsets = read_longs(f, self._len + 1) self.sizes = read_longs(f, self.s) def read_data(self, path): self.data_file = open(data_file_path(path), "rb", buffering=0) def check_index(self, i): if i < 0 or i >= self._len: raise IndexError("index out of range") def __del__(self): if self.data_file: self.data_file.close() @lru_cache(maxsize=8) def __getitem__(self, i) -> torch.Tensor: if not self.data_file: self.read_data(self.path) self.check_index(i) tensor_size = self.sizes[self.dim_offsets[i] : self.dim_offsets[i + 1]] a = np.empty(tensor_size, dtype=self.dtype) self.data_file.seek(self.data_offsets[i] * self.element_size) self.data_file.readinto(a) item = torch.from_numpy(a).long() if self.fix_lua_indexing: item -= 1 # subtract 1 for 0-based indexing return item def __len__(self): return self._len def num_tokens(self, index): return self.sizes[index] def size(self, index): return self.sizes[index] @staticmethod def exists(path): return PathManager.exists(index_file_path(path)) and PathManager.exists( data_file_path(path) ) @property def supports_prefetch(self): return False # avoid prefetching to save memory class IndexedCachedDataset(IndexedDataset): def __init__(self, path, fix_lua_indexing=False): super().__init__(path, fix_lua_indexing=fix_lua_indexing) self.cache = None self.cache_index = {} @property def supports_prefetch(self): return True def prefetch(self, indices): if all(i in self.cache_index for i in indices): return if not self.data_file: self.read_data(self.path) indices = sorted(set(indices)) total_size = 0 for i in indices: total_size += self.data_offsets[i + 1] - self.data_offsets[i] self.cache = np.empty(total_size, dtype=self.dtype) ptx = 0 self.cache_index.clear() for i in indices: self.cache_index[i] = ptx size = self.data_offsets[i + 1] - self.data_offsets[i] a = self.cache[ptx : ptx + size] self.data_file.seek(self.data_offsets[i] * self.element_size) self.data_file.readinto(a) ptx += size if self.data_file: # close and delete data file after prefetch so we can pickle self.data_file.close() self.data_file = None @lru_cache(maxsize=8) def __getitem__(self, i): self.check_index(i) tensor_size = self.sizes[self.dim_offsets[i] : self.dim_offsets[i + 1]] a = np.empty(tensor_size, dtype=self.dtype) ptx = self.cache_index[i] np.copyto(a, self.cache[ptx : ptx + a.size]) item = torch.from_numpy(a).long() if self.fix_lua_indexing: item -= 1 # subtract 1 for 0-based indexing return item class IndexedRawTextDataset(FairseqDataset): """Takes a text file as input and binarizes it in memory at instantiation. Original lines are also kept in memory""" def __init__(self, path, dictionary, append_eos=True, reverse_order=False): self.tokens_list = [] self.lines = [] self.sizes = [] self.append_eos = append_eos self.reverse_order = reverse_order self.read_data(path, dictionary) self.size = len(self.tokens_list) def read_data(self, path, dictionary): with open(path, "r", encoding="utf-8") as f: for line in f: self.lines.append(line.strip("\n")) tokens = dictionary.encode_line( line, add_if_not_exist=False, append_eos=self.append_eos, reverse_order=self.reverse_order, ).long() self.tokens_list.append(tokens) self.sizes.append(len(tokens)) self.sizes = np.array(self.sizes) def check_index(self, i): if i < 0 or i >= self.size: raise IndexError("index out of range") @lru_cache(maxsize=8) def __getitem__(self, i): self.check_index(i) return self.tokens_list[i] def get_original_text(self, i): self.check_index(i) return self.lines[i] def __del__(self): pass def __len__(self): return self.size def num_tokens(self, index): return self.sizes[index] def size(self, index): return self.sizes[index] @staticmethod def exists(path): return PathManager.exists(path) class IndexedDatasetBuilder: element_sizes = { np.uint8: 1, np.int8: 1, np.int16: 2, np.int32: 4, np.int64: 8, np.float: 4, np.double: 8, } def __init__(self, out_file, dtype=np.int32): self.out_file = open(out_file, "wb") self.dtype = dtype self.data_offsets = [0] self.dim_offsets = [0] self.sizes = [] self.element_size = self.element_sizes[self.dtype] def add_item(self, tensor): # +1 for Lua compatibility bytes = self.out_file.write(np.array(tensor.numpy() + 1, dtype=self.dtype)) self.data_offsets.append(self.data_offsets[-1] + bytes / self.element_size) for s in tensor.size(): self.sizes.append(s) self.dim_offsets.append(self.dim_offsets[-1] + len(tensor.size())) def merge_file_(self, another_file): index = IndexedDataset(another_file) assert index.dtype == self.dtype begin = self.data_offsets[-1] for offset in index.data_offsets[1:]: self.data_offsets.append(begin + offset) self.sizes.extend(index.sizes) begin = self.dim_offsets[-1] for dim_offset in index.dim_offsets[1:]: self.dim_offsets.append(begin + dim_offset) with open(data_file_path(another_file), "rb") as f: while True: data = f.read(1024) if data: self.out_file.write(data) else: break def finalize(self, index_file): self.out_file.close() index = open(index_file, "wb") index.write(b"TNTIDX\x00\x00") index.write(struct.pack("<Q", 1)) index.write( struct.pack("<QQ", _dtype_header_code(self.dtype), self.element_size) ) index.write(struct.pack("<QQ", len(self.data_offsets) - 1, len(self.sizes))) write_longs(index, self.dim_offsets) write_longs(index, self.data_offsets) write_longs(index, self.sizes) index.close() def _warmup_mmap_file(path): with open(path, "rb") as stream: while stream.read(100 * 1024 * 1024): pass class MMapIndexedDataset(torch.utils.data.Dataset): class Index: _HDR_MAGIC = b"MMIDIDX\x00\x00" @classmethod def writer(cls, path, dtype): class _Writer: def __enter__(self): self._file = open(path, "wb") self._file.write(cls._HDR_MAGIC) self._file.write(struct.pack("<Q", 1)) self._file.write(struct.pack("<B", _dtype_header_code(dtype))) return self @staticmethod def _get_pointers(sizes): dtype_size = dtype().itemsize address = 0 pointers = [] for size in sizes: pointers.append(address) address += size * dtype_size return pointers def write(self, sizes): pointers = self._get_pointers(sizes) self._file.write(struct.pack("<Q", len(sizes))) sizes = np.array(sizes, dtype=np.int32) self._file.write(sizes.tobytes(order="C")) del sizes pointers = np.array(pointers, dtype=np.int64) self._file.write(pointers.tobytes(order="C")) del pointers def __exit__(self, exc_type, exc_val, exc_tb): self._file.close() return _Writer() def __init__(self, path): with open(path, "rb") as stream: magic_test = stream.read(9) assert self._HDR_MAGIC == magic_test, ( "Index file doesn't match expected format. " "Make sure that --dataset-impl is configured properly." ) version = struct.unpack("<Q", stream.read(8)) assert (1,) == version (dtype_code,) = struct.unpack("<B", stream.read(1)) self._dtype = _code_to_dtype[dtype_code] self._dtype_size = self._dtype().itemsize self._len = struct.unpack("<Q", stream.read(8))[0] offset = stream.tell() _warmup_mmap_file(path) self._bin_buffer_mmap = np.memmap(path, mode="r", order="C") self._bin_buffer = memoryview(self._bin_buffer_mmap) self._sizes = np.frombuffer( self._bin_buffer, dtype=np.int32, count=self._len, offset=offset ) self._pointers = np.frombuffer( self._bin_buffer, dtype=np.int64, count=self._len, offset=offset + self._sizes.nbytes, ) def __del__(self): self._bin_buffer_mmap._mmap.close() del self._bin_buffer_mmap @property def dtype(self): return self._dtype @property def sizes(self): return self._sizes @lru_cache(maxsize=8) def __getitem__(self, i): return self._pointers[i], self._sizes[i] def __len__(self): return self._len def __init__(self, path): super().__init__() self._path = None self._index = None self._bin_buffer = None self._do_init(path) def __getstate__(self): return self._path def __setstate__(self, state): self._do_init(state) def _do_init(self, path): self._path = path self._index = self.Index(index_file_path(self._path)) _warmup_mmap_file(data_file_path(self._path)) self._bin_buffer_mmap = np.memmap( data_file_path(self._path), mode="r", order="C" ) self._bin_buffer = memoryview(self._bin_buffer_mmap) def __del__(self): self._bin_buffer_mmap._mmap.close() del self._bin_buffer_mmap del self._index def __len__(self): return len(self._index) @lru_cache(maxsize=8) def __getitem__(self, i): ptr, size = self._index[i] np_array = np.frombuffer( self._bin_buffer, dtype=self._index.dtype, count=size, offset=ptr ) if self._index.dtype != np.int64: np_array = np_array.astype(np.int64) return torch.from_numpy(np_array) @property def sizes(self): return self._index.sizes @property def supports_prefetch(self): return False @staticmethod def exists(path): return PathManager.exists(index_file_path(path)) and PathManager.exists( data_file_path(path) ) def get_indexed_dataset_to_local(path) -> str: local_index_path = PathManager.get_local_path(index_file_path(path)) local_data_path = PathManager.get_local_path(data_file_path(path)) assert local_index_path.endswith(".idx") and local_data_path.endswith(".bin"), ( "PathManager.get_local_path does not return files with expected patterns: " f"{local_index_path} and {local_data_path}" ) local_path = local_data_path[:-4] # stripping surfix ".bin" assert local_path == local_index_path[:-4] # stripping surfix ".idx" return local_path class MMapIndexedDatasetBuilder: def __init__(self, out_file, dtype=np.int64): self._data_file = open(out_file, "wb") self._dtype = dtype self._sizes = [] def add_item(self, tensor): np_array = np.array(tensor.numpy(), dtype=self._dtype) self._data_file.write(np_array.tobytes(order="C")) self._sizes.append(np_array.size) def merge_file_(self, another_file): # Concatenate index index = MMapIndexedDataset.Index(index_file_path(another_file)) assert index.dtype == self._dtype for size in index.sizes: self._sizes.append(size) # Concatenate data with open(data_file_path(another_file), "rb") as f: shutil.copyfileobj(f, self._data_file) def finalize(self, index_file): self._data_file.close() with MMapIndexedDataset.Index.writer(index_file, self._dtype) as index: index.write(self._sizes)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/indexed_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from . import BaseWrapperDataset class RollDataset(BaseWrapperDataset): def __init__(self, dataset, shifts): super().__init__(dataset) self.shifts = shifts def __getitem__(self, index): item = self.dataset[index] return torch.roll(item, self.shifts)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/roll_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os from collections import Counter from multiprocessing import Pool import torch from fairseq import utils from fairseq.data import data_utils from fairseq.file_chunker_utils import Chunker, find_offsets from fairseq.file_io import PathManager from fairseq.tokenizer import tokenize_line class Dictionary: """A mapping from symbols to consecutive integers""" def __init__( self, *, # begin keyword-only arguments bos="<s>", pad="<pad>", eos="</s>", unk="<unk>", extra_special_symbols=None, ): self.bos_word, self.unk_word, self.pad_word, self.eos_word = bos, unk, pad, eos self.symbols = [] self.count = [] self.indices = {} self.bos_index = self.add_symbol(bos) self.pad_index = self.add_symbol(pad) self.eos_index = self.add_symbol(eos) self.unk_index = self.add_symbol(unk) if extra_special_symbols: for s in extra_special_symbols: self.add_symbol(s) self.nspecial = len(self.symbols) def __eq__(self, other): return self.indices == other.indices def __getitem__(self, idx): if idx < len(self.symbols): return self.symbols[idx] return self.unk_word def get_count(self, idx): return self.count[idx] def __len__(self): """Returns the number of symbols in the dictionary""" return len(self.symbols) def __contains__(self, sym): return sym in self.indices def index(self, sym): """Returns the index of the specified symbol""" assert isinstance(sym, str) if sym in self.indices: return self.indices[sym] return self.unk_index def string( self, tensor, bpe_symbol=None, escape_unk=False, extra_symbols_to_ignore=None, unk_string=None, include_eos=False, separator=" ", ): """Helper for converting a tensor of token indices to a string. Can optionally remove BPE symbols or escape <unk> words. """ if torch.is_tensor(tensor) and tensor.dim() == 2: return "\n".join( self.string( t, bpe_symbol, escape_unk, extra_symbols_to_ignore, include_eos=include_eos, ) for t in tensor ) extra_symbols_to_ignore = set(extra_symbols_to_ignore or []) if not include_eos: extra_symbols_to_ignore.add(self.eos()) def token_string(i): if i == self.unk(): if unk_string is not None: return unk_string else: return self.unk_string(escape_unk) else: return self[i] if hasattr(self, "bos_index"): extra_symbols_to_ignore.add(self.bos()) sent = separator.join( token_string(i) for i in tensor if utils.item(i) not in extra_symbols_to_ignore ) return data_utils.post_process(sent, bpe_symbol) def unk_string(self, escape=False): """Return unknown string, optionally escaped as: <<unk>>""" if escape: return "<{}>".format(self.unk_word) else: return self.unk_word def add_symbol(self, word, n=1, overwrite=False): """Adds a word to the dictionary""" if word in self.indices and not overwrite: idx = self.indices[word] self.count[idx] = self.count[idx] + n return idx else: idx = len(self.symbols) self.indices[word] = idx self.symbols.append(word) self.count.append(n) return idx def update(self, new_dict): """Updates counts from new dictionary.""" for word in new_dict.symbols: idx2 = new_dict.indices[word] if word in self.indices: idx = self.indices[word] self.count[idx] = self.count[idx] + new_dict.count[idx2] else: idx = len(self.symbols) self.indices[word] = idx self.symbols.append(word) self.count.append(new_dict.count[idx2]) def finalize(self, threshold=-1, nwords=-1, padding_factor=8): """Sort symbols by frequency in descending order, ignoring special ones. Args: - threshold defines the minimum word count - nwords defines the total number of words in the final dictionary, including special symbols - padding_factor can be used to pad the dictionary size to be a multiple of 8, which is important on some hardware (e.g., Nvidia Tensor Cores). """ if nwords <= 0: nwords = len(self) new_indices = dict(zip(self.symbols[: self.nspecial], range(self.nspecial))) new_symbols = self.symbols[: self.nspecial] new_count = self.count[: self.nspecial] c = Counter( dict( sorted(zip(self.symbols[self.nspecial :], self.count[self.nspecial :])) ) ) for symbol, count in c.most_common(nwords - self.nspecial): if count >= threshold: new_indices[symbol] = len(new_symbols) new_symbols.append(symbol) new_count.append(count) else: break assert len(new_symbols) == len(new_indices) self.count = list(new_count) self.symbols = list(new_symbols) self.indices = new_indices self.pad_to_multiple_(padding_factor) def pad_to_multiple_(self, padding_factor): """Pad Dictionary size to be a multiple of *padding_factor*.""" if padding_factor > 1: i = 0 while len(self) % padding_factor != 0: symbol = "madeupword{:04d}".format(i) self.add_symbol(symbol, n=0) i += 1 def bos(self): """Helper to get index of beginning-of-sentence symbol""" return self.bos_index def pad(self): """Helper to get index of pad symbol""" return self.pad_index def eos(self): """Helper to get index of end-of-sentence symbol""" return self.eos_index def unk(self): """Helper to get index of unk symbol""" return self.unk_index @classmethod def load(cls, f): """Loads the dictionary from a text file with the format: ``` <symbol0> <count0> <symbol1> <count1> ... ``` """ d = cls() d.add_from_file(f) return d def add_from_file(self, f): """ Loads a pre-existing dictionary from a text file and adds its symbols to this instance. """ if isinstance(f, str): try: with open(PathManager.get_local_path(f), "r", encoding="utf-8") as fd: self.add_from_file(fd) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception( "Incorrect encoding detected in {}, please " "rebuild the dataset".format(f) ) return lines = f.readlines() indices_start_line = self._load_meta(lines) for line in lines[indices_start_line:]: try: line, field = line.rstrip().rsplit(" ", 1) if field == "#fairseq:overwrite": overwrite = True line, field = line.rsplit(" ", 1) else: overwrite = False count = int(field) word = line if word in self and not overwrite: raise RuntimeError( "Duplicate word found when loading Dictionary: '{}'. " "Duplicate words can overwrite earlier ones by adding the " "#fairseq:overwrite flag at the end of the corresponding row " "in the dictionary file. If using the Camembert model, please " "download an updated copy of the model file.".format(word) ) self.add_symbol(word, n=count, overwrite=overwrite) except ValueError: raise ValueError( f"Incorrect dictionary format, expected '<token> <cnt> [flags]': \"{line}\"" ) def _save(self, f, kv_iterator): if isinstance(f, str): PathManager.mkdirs(os.path.dirname(f)) with PathManager.open(f, "w", encoding="utf-8") as fd: return self.save(fd) for k, v in kv_iterator: print("{} {}".format(k, v), file=f) def _get_meta(self): return [], [] def _load_meta(self, lines): return 0 def save(self, f): """Stores dictionary into a text file""" ex_keys, ex_vals = self._get_meta() self._save( f, zip( ex_keys + self.symbols[self.nspecial :], ex_vals + self.count[self.nspecial :], ), ) def dummy_sentence(self, length): t = torch.Tensor(length).uniform_(self.nspecial + 1, len(self)).long() t[-1] = self.eos() return t def encode_line( self, line, line_tokenizer=tokenize_line, add_if_not_exist=True, consumer=None, append_eos=True, reverse_order=False, ) -> torch.IntTensor: words = line_tokenizer(line) if reverse_order: words = list(reversed(words)) nwords = len(words) ids = torch.IntTensor(nwords + 1 if append_eos else nwords) for i, word in enumerate(words): if add_if_not_exist: idx = self.add_symbol(word) else: idx = self.index(word) if consumer is not None: consumer(word, idx) ids[i] = idx if append_eos: ids[nwords] = self.eos_index return ids @staticmethod def _add_file_to_dictionary_single_worker( filename, tokenize, eos_word, start_offset, end_offset, ): counter = Counter() with Chunker(filename, start_offset, end_offset) as line_iterator: for line in line_iterator: for word in tokenize(line): counter.update([word]) counter.update([eos_word]) return counter @staticmethod def add_file_to_dictionary(filename, dict, tokenize, num_workers): def merge_result(counter): for w, c in sorted(counter.items()): dict.add_symbol(w, c) local_file = PathManager.get_local_path(filename) offsets = find_offsets(local_file, num_workers) if num_workers > 1: chunks = zip(offsets, offsets[1:]) pool = Pool(processes=num_workers) results = [] for (start_offset, end_offset) in chunks: results.append( pool.apply_async( Dictionary._add_file_to_dictionary_single_worker, ( local_file, tokenize, dict.eos_word, start_offset, end_offset, ), ) ) pool.close() pool.join() for r in results: merge_result(r.get()) else: merge_result( Dictionary._add_file_to_dictionary_single_worker( local_file, tokenize, dict.eos_word, offsets[0], offsets[1] ) ) class TruncatedDictionary(object): def __init__(self, wrapped_dict, length): self.__class__ = type( wrapped_dict.__class__.__name__, (self.__class__, wrapped_dict.__class__), {}, ) self.__dict__ = wrapped_dict.__dict__ self.wrapped_dict = wrapped_dict self.length = min(len(self.wrapped_dict), length) def __len__(self): return self.length def __getitem__(self, i): if i < self.length: return self.wrapped_dict[i] return self.wrapped_dict.unk()
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/dictionary.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import numpy as np import torch.utils.data from fairseq.data import data_utils logger = logging.getLogger(__name__) class EpochListening: """Mixin for receiving updates whenever the epoch increments.""" @property def can_reuse_epoch_itr_across_epochs(self): """ Whether we can reuse the :class:`fairseq.data.EpochBatchIterator` for this dataset across epochs. This needs to return ``False`` if the sample sizes can change across epochs, in which case we may need to regenerate batches at each epoch. If your dataset relies in ``set_epoch`` then you should consider setting this to ``False``. """ return True def set_epoch(self, epoch): """Will receive the updated epoch number at the beginning of the epoch.""" pass class FairseqDataset(torch.utils.data.Dataset, EpochListening): """A dataset that provides helpers for batching.""" def __getitem__(self, index): raise NotImplementedError def __len__(self): raise NotImplementedError def collater(self, samples): """Merge a list of samples to form a mini-batch. Args: samples (List[dict]): samples to collate Returns: dict: a mini-batch suitable for forwarding with a Model """ raise NotImplementedError def num_tokens(self, index): """Return the number of tokens in a sample. This value is used to enforce ``--max-tokens`` during batching.""" raise NotImplementedError def num_tokens_vec(self, indices): """Return the number of tokens for a set of positions defined by indices. This value is used to enforce ``--max-tokens`` during batching.""" raise NotImplementedError def size(self, index): """Return an example's size as a float or tuple. This value is used when filtering a dataset with ``--max-positions``.""" raise NotImplementedError def ordered_indices(self): """Return an ordered list of indices. Batches will be constructed based on this order.""" return np.arange(len(self), dtype=np.int64) @property def supports_prefetch(self): """Whether this dataset supports prefetching.""" return False def attr(self, attr: str, index: int): return getattr(self, attr, None) def prefetch(self, indices): """Prefetch the data required for this epoch.""" raise NotImplementedError def get_batch_shapes(self): """ Return a list of valid batch shapes, for example:: [(8, 512), (16, 256), (32, 128)] The first dimension of each tuple is the batch size and can be ``None`` to automatically infer the max batch size based on ``--max-tokens``. The second dimension of each tuple is the max supported length as given by :func:`fairseq.data.FairseqDataset.num_tokens`. This will be used by :func:`fairseq.data.FairseqDataset.batch_by_size` to restrict batch shapes. This is useful on TPUs to avoid too many dynamic shapes (and recompilations). """ return None def batch_by_size( self, indices, max_tokens=None, max_sentences=None, required_batch_size_multiple=1, ): """ Given an ordered set of indices, return batches according to *max_tokens*, *max_sentences* and *required_batch_size_multiple*. """ from fairseq.data import data_utils fixed_shapes = self.get_batch_shapes() if fixed_shapes is not None: def adjust_bsz(bsz, num_tokens): if bsz is None: assert max_tokens is not None, "Must specify --max-tokens" bsz = max_tokens // num_tokens if max_sentences is not None: bsz = min(bsz, max_sentences) elif ( bsz >= required_batch_size_multiple and bsz % required_batch_size_multiple != 0 ): bsz -= bsz % required_batch_size_multiple return bsz fixed_shapes = np.array( [ [adjust_bsz(bsz, num_tokens), num_tokens] for (bsz, num_tokens) in fixed_shapes ] ) try: num_tokens_vec = self.num_tokens_vec(indices).astype('int64') except NotImplementedError: num_tokens_vec = None return data_utils.batch_by_size( indices, num_tokens_fn=self.num_tokens, num_tokens_vec=num_tokens_vec, max_tokens=max_tokens, max_sentences=max_sentences, required_batch_size_multiple=required_batch_size_multiple, fixed_shapes=fixed_shapes, ) def filter_indices_by_size(self, indices, max_sizes): """ Filter a list of sample indices. Remove those that are longer than specified in *max_sizes*. WARNING: don't update, override method in child classes Args: indices (np.array): original array of sample indices max_sizes (int or list[int] or tuple[int]): max sample size, can be defined separately for src and tgt (then list or tuple) Returns: np.array: filtered sample array list: list of removed indices """ if isinstance(max_sizes, float) or isinstance(max_sizes, int): if hasattr(self, "sizes") and isinstance(self.sizes, np.ndarray): ignored = indices[self.sizes[indices] > max_sizes].tolist() indices = indices[self.sizes[indices] <= max_sizes] elif ( hasattr(self, "sizes") and isinstance(self.sizes, list) and len(self.sizes) == 1 ): ignored = indices[self.sizes[0][indices] > max_sizes].tolist() indices = indices[self.sizes[0][indices] <= max_sizes] else: indices, ignored = data_utils._filter_by_size_dynamic( indices, self.size, max_sizes ) else: indices, ignored = data_utils._filter_by_size_dynamic( indices, self.size, max_sizes ) return indices, ignored @property def supports_fetch_outside_dataloader(self): """Whether this dataset supports fetching outside the workers of the dataloader.""" return True class FairseqIterableDataset(torch.utils.data.IterableDataset, EpochListening): """ For datasets that need to be read sequentially, usually because the data is being streamed or otherwise can't be manipulated on a single machine. """ def __iter__(self): raise NotImplementedError
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/fairseq_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch from typing import Dict from fairseq.data.monolingual_dataset import MonolingualDataset from . import FairseqDataset class LMContextWindowDataset(FairseqDataset): """ Wraps a MonolingualDataset and provides more context for evaluation. Each item in the new dataset will have a maximum size of ``tokens_per_sample + context_window``. Args: dataset: dataset to wrap tokens_per_sample (int): the max number of tokens in each dataset item context_window (int): the number of accumulated tokens to add to each dataset item pad_idx (int): padding symbol """ def __init__( self, dataset: MonolingualDataset, tokens_per_sample: int, context_window: int, pad_idx: int, ): assert context_window > 0 self.dataset = dataset self.tokens_per_sample = tokens_per_sample self.context_window = context_window self.pad_idx = pad_idx self.prev_tokens = np.empty([0]) def __getitem__(self, index): return self.dataset[index] def __len__(self): return len(self.dataset) def collater(self, samples) -> Dict: sample = self.dataset.collater(samples) pad = self.pad_idx max_sample_len = self.tokens_per_sample + self.context_window bsz, tsz = sample["net_input"]["src_tokens"].shape start_idxs = [0] * bsz toks = sample["net_input"]["src_tokens"] lengths = sample["net_input"]["src_lengths"] tgt = sample["target"] new_toks = np.empty([bsz, tsz + self.context_window], dtype=np.int64) new_tgt = np.full([bsz, tsz + self.context_window], pad, dtype=np.int64) sample_lens = toks.ne(pad).long().sum(dim=1).cpu() for i in range(bsz): sample_len = sample_lens[i] extra = len(self.prev_tokens) + sample_len - max_sample_len if extra > 0: self.prev_tokens = self.prev_tokens[extra:] pads = np.full(self.context_window - len(self.prev_tokens), pad) new_toks[i] = np.concatenate([self.prev_tokens, toks[i].numpy(), pads]) new_tgt[ i, len(self.prev_tokens) : len(self.prev_tokens) + len(tgt[i]) ] = tgt[i] start_idxs[i] = len(self.prev_tokens) lengths[i] += len(self.prev_tokens) self.prev_tokens = new_toks[i][new_toks[i] != pad][-self.context_window :] sample["net_input"]["src_tokens"] = torch.from_numpy(new_toks) sample["target"] = torch.from_numpy(new_tgt) sample["start_indices"] = start_idxs return sample def num_tokens(self, index): return self.dataset.num_tokens(index) def size(self, index): return self.dataset.size(index) def ordered_indices(self): # NOTE we don't shuffle the data to retain access to the previous dataset elements return np.arange(len(self.dataset)) @property def supports_prefetch(self): return getattr(self.dataset, "supports_prefetch", False) def prefetch(self, indices): return self.dataset.prefetch(indices)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/lm_context_window_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch.nn.functional as F from fairseq.data import BaseWrapperDataset from fairseq.data.data_utils import get_buckets, get_bucketed_sizes class BucketPadLengthDataset(BaseWrapperDataset): """ Bucket and pad item lengths to the nearest bucket size. This can be used to reduce the number of unique batch shapes, which is important on TPUs since each new batch shape requires a recompilation. Args: dataset (FairseqDatset): dataset to bucket sizes (List[int]): all item sizes num_buckets (int): number of buckets to create pad_idx (int): padding symbol left_pad (bool): if True, pad on the left; otherwise right pad """ def __init__( self, dataset, sizes, num_buckets, pad_idx, left_pad, tensor_key=None, ): super().__init__(dataset) self.pad_idx = pad_idx self.left_pad = left_pad assert num_buckets > 0 self.buckets = get_buckets(sizes, num_buckets) self._bucketed_sizes = get_bucketed_sizes(sizes, self.buckets) self._tensor_key = tensor_key def _set_tensor(self, item, val): if self._tensor_key is None: return val item[self._tensor_key] = val return item def _get_tensor(self, item): if self._tensor_key is None: return item return item[self._tensor_key] def _pad(self, tensor, bucket_size, dim=-1): num_pad = bucket_size - tensor.size(dim) return F.pad( tensor, (num_pad if self.left_pad else 0, 0 if self.left_pad else num_pad), value=self.pad_idx, ) def __getitem__(self, index): item = self.dataset[index] bucket_size = self._bucketed_sizes[index] tensor = self._get_tensor(item) padded = self._pad(tensor, bucket_size) return self._set_tensor(item, padded) @property def sizes(self): return self._bucketed_sizes def num_tokens(self, index): return self._bucketed_sizes[index] def size(self, index): return self._bucketed_sizes[index]
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/bucket_pad_length_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import torch from fairseq.data import FairseqDataset, plasma_utils from fairseq.data.indexed_dataset import best_fitting_int_dtype from typing import Tuple class TokenBlockDataset(FairseqDataset): """Break a Dataset of tokens into blocks. Args: dataset (~torch.utils.data.Dataset): dataset to break into blocks sizes (List[int]): sentence lengths (required for 'complete' and 'eos') block_size (int): maximum block size (ignored in 'eos' break mode) break_mode (str, optional): Mode used for breaking tokens. Values can be one of: - 'none': break tokens into equally sized blocks (up to block_size) - 'complete': break tokens into blocks (up to block_size) such that blocks contains complete sentences, although block_size may be exceeded if some sentences exceed block_size - 'complete_doc': similar to 'complete' mode, but do not cross document boundaries - 'eos': each block contains one sentence (block_size is ignored) include_targets (bool, optional): return next tokens as targets (default: False). document_sep_len (int, optional): document separator size (required for 'complete_doc' break mode). Typically 1 if the sentences have eos and 0 otherwise. """ def __init__( self, dataset, sizes, block_size, pad, eos, break_mode=None, include_targets=False, document_sep_len=1, use_plasma_view=False, split_path=None, plasma_path=None, ): super().__init__() self.dataset = dataset self.pad = pad self.eos = eos self.include_targets = include_targets assert len(dataset) > 0 assert len(dataset) == len(sizes) _sizes, block_to_dataset_index, slice_indices = self._build_slice_indices( sizes, break_mode, document_sep_len, block_size ) if use_plasma_view: plasma_id = (block_size, document_sep_len, str(break_mode), len(dataset)) self._slice_indices = plasma_utils.PlasmaView( slice_indices, split_path, (plasma_id, 0), plasma_path=plasma_path ) self._sizes = plasma_utils.PlasmaView( _sizes, split_path, (plasma_id, 1), plasma_path=plasma_path ) self._block_to_dataset_index = plasma_utils.PlasmaView( block_to_dataset_index, split_path, (plasma_id, 2), plasma_path=plasma_path, ) else: self._slice_indices = plasma_utils.PlasmaArray(slice_indices) self._sizes = plasma_utils.PlasmaArray(_sizes) self._block_to_dataset_index = plasma_utils.PlasmaArray( block_to_dataset_index ) @staticmethod def _build_slice_indices( sizes, break_mode, document_sep_len, block_size ) -> Tuple[np.ndarray]: """Use token_block_utils_fast to build arrays for indexing into self.dataset""" try: from fairseq.data.token_block_utils_fast import ( _get_slice_indices_fast, _get_block_to_dataset_index_fast, ) except ImportError: raise ImportError( "Please build Cython components with: `pip install --editable .` " "or `python setup.py build_ext --inplace`" ) if isinstance(sizes, list): sizes = np.array(sizes, dtype=np.int64) else: if torch.is_tensor(sizes): sizes = sizes.numpy() sizes = sizes.astype(np.int64) break_mode = break_mode if break_mode is not None else "none" # For "eos" break-mode, block_size is not required parameters. if break_mode == "eos" and block_size is None: block_size = 0 slice_indices = _get_slice_indices_fast( sizes, str(break_mode), block_size, document_sep_len ) _sizes = slice_indices[:, 1] - slice_indices[:, 0] # build index mapping block indices to the underlying dataset indices if break_mode == "eos": # much faster version for eos break mode block_to_dataset_index = np.stack( [ np.arange(len(sizes)), # starting index in dataset np.zeros( len(sizes), dtype=np.compat.long ), # starting offset within starting index np.arange(len(sizes)), # ending index in dataset ], 1, ) else: block_to_dataset_index = _get_block_to_dataset_index_fast( sizes, slice_indices, ) size_dtype = np.uint16 if block_size < 65535 else np.uint32 num_tokens = slice_indices[-1].max() slice_indices_dtype = best_fitting_int_dtype(num_tokens) slice_indices = slice_indices.astype(slice_indices_dtype) _sizes = _sizes.astype(size_dtype) block_to_dataset_index = block_to_dataset_index.astype(slice_indices_dtype) return _sizes, block_to_dataset_index, slice_indices @property def slice_indices(self): return self._slice_indices.array @property def sizes(self): return self._sizes.array @property def block_to_dataset_index(self): return self._block_to_dataset_index.array def attr(self, attr: str, index: int): start_ds_idx, _, _ = self.block_to_dataset_index[index] return self.dataset.attr(attr, start_ds_idx) def __getitem__(self, index): start_ds_idx, start_offset, end_ds_idx = self.block_to_dataset_index[index] buffer = torch.cat( [self.dataset[idx] for idx in range(start_ds_idx, end_ds_idx + 1)] ) slice_s, slice_e = self.slice_indices[index] length = slice_e - slice_s s, e = start_offset, start_offset + length item = buffer[s:e] if self.include_targets: # *target* is the original sentence (=item) # *source* is shifted right by 1 (maybe left-padded with eos) # *past_target* is shifted right by 2 (left-padded as needed) if s == 0: source = torch.cat([item.new([self.eos]), buffer[0 : e - 1]]) past_target = torch.cat( [item.new([self.pad, self.eos]), buffer[0 : e - 2]] ) else: source = buffer[s - 1 : e - 1] if s == 1: past_target = torch.cat([item.new([self.eos]), buffer[0 : e - 2]]) else: past_target = buffer[s - 2 : e - 2] return source, item, past_target return item def __len__(self): return len(self.slice_indices) @property def supports_prefetch(self): return getattr(self.dataset, "supports_prefetch", False) def prefetch(self, indices): self.dataset.prefetch( { ds_idx for index in indices for start_ds_idx, _, end_ds_idx in [self.block_to_dataset_index[index]] for ds_idx in range(start_ds_idx, end_ds_idx + 1) } )
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/token_block_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Optional import torch from . import FairseqDataset class TransformEosLangPairDataset(FairseqDataset): """A :class:`~fairseq.data.FairseqDataset` wrapper that transform bos on collated samples of language pair dataset. Note that the transformation is applied in :func:`collater`. Args: dataset (~fairseq.data.FairseqDataset): dataset that collates sample into LanguagePairDataset schema src_eos (int): original source end-of-sentence symbol index to be replaced new_src_eos (int, optional): new end-of-sentence symbol index to replace source eos symbol tgt_bos (int, optional): original target beginning-of-sentence symbol index to be replaced new_tgt_bos (int, optional): new beginning-of-sentence symbol index to replace at the beginning of 'prev_output_tokens' """ def __init__( self, dataset: FairseqDataset, src_eos: int, new_src_eos: Optional[int] = None, tgt_bos: Optional[int] = None, new_tgt_bos: Optional[int] = None, ): self.dataset = dataset self.src_eos = src_eos self.new_src_eos = new_src_eos self.tgt_bos = tgt_bos self.new_tgt_bos = new_tgt_bos def __getitem__(self, index): return self.dataset[index] def __len__(self): return len(self.dataset) def collater(self, samples, **extra_args): samples = self.dataset.collater(samples, **extra_args) if len(samples) == 0: return samples if 'net_input' not in samples: return samples if self.new_src_eos is not None: if self.dataset.left_pad_source: assert ( samples["net_input"]["src_tokens"][:, -1] != self.src_eos ).sum() == 0 samples["net_input"]["src_tokens"][:, -1] = self.new_src_eos else: eos_idx = samples["net_input"]["src_lengths"] - 1 assert ( samples["net_input"]["src_tokens"][ torch.arange(eos_idx.size(0)), eos_idx ] != self.src_eos ).sum() == 0 eos_idx = eos_idx.resize_(len(samples["net_input"]["src_lengths"]), 1) samples["net_input"]["src_tokens"].scatter_( 1, eos_idx, self.new_src_eos ) if ( self.new_tgt_bos is not None and "prev_output_tokens" in samples["net_input"] ): if self.dataset.left_pad_target: # TODO: support different padding direction on target side raise NotImplementedError( "TransformEosLangPairDataset does not implement --left-pad-target True option" ) else: assert ( samples["net_input"]["prev_output_tokens"][:, 0] != self.tgt_bos ).sum() == 0 samples["net_input"]["prev_output_tokens"][:, 0] = self.new_tgt_bos return samples def num_tokens(self, index): return self.dataset.num_tokens(index) def size(self, index): return self.dataset.size(index) @property def sizes(self): # dataset.sizes can be a dynamically computed sizes: return self.dataset.sizes def ordered_indices(self): return self.dataset.ordered_indices() @property def supports_prefetch(self): return getattr(self.dataset, "supports_prefetch", False) def prefetch(self, indices): return self.dataset.prefetch(indices)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/transform_eos_lang_pair_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from . import BaseWrapperDataset, data_utils from fairseq.data.text_compressor import TextCompressor, TextCompressionLevel class AddTargetDataset(BaseWrapperDataset): def __init__( self, dataset, labels, pad, eos, batch_targets, process_label=None, label_len_fn=None, add_to_input=False, text_compression_level=TextCompressionLevel.none ): super().__init__(dataset) self.labels = labels self.batch_targets = batch_targets self.pad = pad self.eos = eos self.process_label = process_label self.label_len_fn = label_len_fn self.add_to_input = add_to_input self.text_compressor = TextCompressor(level=text_compression_level) def get_label(self, index, process_fn=None): lbl = self.labels[index] lbl = self.text_compressor.decompress(lbl) return lbl if process_fn is None else process_fn(lbl) def __getitem__(self, index): item = self.dataset[index] item["label"] = self.get_label(index, process_fn=self.process_label) return item def size(self, index): sz = self.dataset.size(index) own_sz = self.label_len_fn(self.get_label(index)) return sz, own_sz def collater(self, samples): collated = self.dataset.collater(samples) if len(collated) == 0: return collated indices = set(collated["id"].tolist()) target = [s["label"] for s in samples if s["id"] in indices] if self.batch_targets: collated["target_lengths"] = torch.LongTensor([len(t) for t in target]) target = data_utils.collate_tokens(target, pad_idx=self.pad, left_pad=False) collated["ntokens"] = collated["target_lengths"].sum().item() else: collated["ntokens"] = sum([len(t) for t in target]) collated["target"] = target if self.add_to_input: eos = target.new_full((target.size(0), 1), self.eos) collated["target"] = torch.cat([target, eos], dim=-1).long() collated["net_input"]["prev_output_tokens"] = torch.cat( [eos, target], dim=-1 ).long() collated["ntokens"] += target.size(0) return collated def filter_indices_by_size(self, indices, max_sizes): indices, ignored = data_utils._filter_by_size_dynamic( indices, self.size, max_sizes ) return indices, ignored
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/add_target_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq.data import Dictionary class MaskedLMDictionary(Dictionary): """ Dictionary for Masked Language Modelling tasks. This extends Dictionary by adding the mask symbol. """ def __init__( self, pad="<pad>", eos="</s>", unk="<unk>", mask="<mask>", ): super().__init__(pad=pad, eos=eos, unk=unk) self.mask_word = mask self.mask_index = self.add_symbol(mask) self.nspecial = len(self.symbols) def mask(self): """Helper to get index of mask symbol""" return self.mask_index class BertDictionary(MaskedLMDictionary): """ Dictionary for BERT task. This extends MaskedLMDictionary by adding support for cls and sep symbols. """ def __init__( self, pad="<pad>", eos="</s>", unk="<unk>", mask="<mask>", cls="<cls>", sep="<sep>", ): super().__init__(pad=pad, eos=eos, unk=unk, mask=mask) self.cls_word = cls self.sep_word = sep self.cls_index = self.add_symbol(cls) self.sep_index = self.add_symbol(sep) self.nspecial = len(self.symbols) def cls(self): """Helper to get index of cls symbol""" return self.cls_index def sep(self): """Helper to get index of sep symbol""" return self.sep_index
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/legacy/masked_lm_dictionary.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import numpy as np import torch from fairseq.data import FairseqDataset class BlockPairDataset(FairseqDataset): """Break a Dataset of tokens into sentence pair blocks for next sentence prediction as well as masked language model. High-level logics are: 1. break input tensor to tensor blocks 2. pair the blocks with 50% next sentence and 50% random sentence 3. return paired blocks as well as related segment labels Args: dataset (~torch.utils.data.Dataset): dataset to break into blocks sizes: array of sentence lengths dictionary: dictionary for the task block_size: maximum block size break_mode: mode for breaking copurs into block pairs. currently we support 2 modes doc: respect document boundaries and each part of the pair should belong to on document none: don't respect any boundary and cut tokens evenly short_seq_prob: probability for generating shorter block pairs doc_break_size: Size for empty line separating documents. Typically 1 if the sentences have eos, 0 otherwise. """ def __init__( self, dataset, dictionary, sizes, block_size, break_mode="doc", short_seq_prob=0.1, doc_break_size=1, ): super().__init__() self.dataset = dataset self.pad = dictionary.pad() self.eos = dictionary.eos() self.cls = dictionary.cls() self.mask = dictionary.mask() self.sep = dictionary.sep() self.break_mode = break_mode self.dictionary = dictionary self.short_seq_prob = short_seq_prob self.block_indices = [] assert len(dataset) == len(sizes) if break_mode == "doc": cur_doc = [] for sent_id, sz in enumerate(sizes): assert doc_break_size == 0 or sz != 0, ( "when doc_break_size is non-zero, we expect documents to be" "separated by a blank line with a single eos." ) # empty line as document separator if sz == doc_break_size: if len(cur_doc) == 0: continue self.block_indices.append(cur_doc) cur_doc = [] else: cur_doc.append(sent_id) max_num_tokens = block_size - 3 # Account for [CLS], [SEP], [SEP] self.sent_pairs = [] self.sizes = [] for doc_id, doc in enumerate(self.block_indices): self._generate_sentence_pair(doc, doc_id, max_num_tokens, sizes) elif break_mode is None or break_mode == "none": # each block should have half of the block size since we are constructing block pair sent_length = (block_size - 3) // 2 total_len = sum(dataset.sizes) length = math.ceil(total_len / sent_length) def block_at(i): start = i * sent_length end = min(start + sent_length, total_len) return (start, end) sent_indices = np.array([block_at(i) for i in range(length)]) sent_sizes = np.array([e - s for s, e in sent_indices]) dataset_index = self._sent_to_dataset_index(sent_sizes) # pair sentences self._pair_sentences(dataset_index) else: raise ValueError("Invalid break_mode: " + break_mode) def _pair_sentences(self, dataset_index): """ Give a list of evenly cut blocks/sentences, pair these sentences with 50% consecutive sentences and 50% random sentences. This is used for none break mode """ # pair sentences for sent_id, sent in enumerate(dataset_index): next_sent_label = ( 1 if np.random.rand() > 0.5 and sent_id != len(dataset_index) - 1 else 0 ) if next_sent_label: next_sent = dataset_index[sent_id + 1] else: next_sent = dataset_index[ self._skip_sampling(len(dataset_index), [sent_id, sent_id + 1]) ] self.sent_pairs.append((sent, next_sent, next_sent_label)) # The current blocks don't include the special tokens but the # sizes already account for this self.sizes.append(3 + sent[3] + next_sent[3]) def _sent_to_dataset_index(self, sent_sizes): """ Build index mapping block indices to the underlying dataset indices """ dataset_index = [] ds_idx, ds_remaining = -1, 0 for to_consume in sent_sizes: sent_size = to_consume if ds_remaining == 0: ds_idx += 1 ds_remaining = sent_sizes[ds_idx] start_ds_idx = ds_idx start_offset = sent_sizes[ds_idx] - ds_remaining while to_consume > ds_remaining: to_consume -= ds_remaining ds_idx += 1 ds_remaining = sent_sizes[ds_idx] ds_remaining -= to_consume dataset_index.append( ( start_ds_idx, # starting index in dataset start_offset, # starting offset within starting index ds_idx, # ending index in dataset sent_size, # sentence length ) ) assert ds_remaining == 0 assert ds_idx == len(self.dataset) - 1 return dataset_index def _generate_sentence_pair(self, doc, doc_id, max_num_tokens, sizes): """ Go through a single document and genrate sentence paris from it """ current_chunk = [] current_length = 0 curr = 0 # To provide more randomness, we decrease target seq length for parts of # samples (10% by default). Note that max_num_tokens is the hard threshold # for batching and will never be changed. target_seq_length = max_num_tokens if np.random.random() < self.short_seq_prob: target_seq_length = np.random.randint(2, max_num_tokens) # loop through all sentences in document while curr < len(doc): sent_id = doc[curr] current_chunk.append(sent_id) current_length = sum(sizes[current_chunk]) # split chunk and generate pair when exceed target_seq_length or # finish the loop if curr == len(doc) - 1 or current_length >= target_seq_length: # split the chunk into 2 parts a_end = 1 if len(current_chunk) > 2: a_end = np.random.randint(1, len(current_chunk) - 1) sent_a = current_chunk[:a_end] len_a = sum(sizes[sent_a]) # generate next sentence label, note that if there is only 1 sentence # in current chunk, label is always 0 next_sent_label = ( 1 if np.random.rand() > 0.5 and len(current_chunk) != 1 else 0 ) if not next_sent_label: # if next sentence label is 0, sample sent_b from a random doc target_b_length = target_seq_length - len_a rand_doc_id = self._skip_sampling(len(self.block_indices), [doc_id]) random_doc = self.block_indices[rand_doc_id] random_start = np.random.randint(0, len(random_doc)) sent_b = [] len_b = 0 for j in range(random_start, len(random_doc)): sent_b.append(random_doc[j]) len_b = sum(sizes[sent_b]) if len_b >= target_b_length: break # return the second part of the chunk since it's not used num_unused_segments = len(current_chunk) - a_end curr -= num_unused_segments else: # if next sentence label is 1, use the second part of chunk as sent_B sent_b = current_chunk[a_end:] len_b = sum(sizes[sent_b]) # currently sent_a and sent_B may be longer than max_num_tokens, # truncate them and return block idx and offsets for them sent_a, sent_b = self._truncate_sentences( sent_a, sent_b, max_num_tokens ) self.sent_pairs.append((sent_a, sent_b, next_sent_label)) self.sizes.append(3 + sent_a[3] + sent_b[3]) current_chunk = [] curr += 1 def _skip_sampling(self, total, skip_ids): """ Generate a random integer which is not in skip_ids. Sample range is [0, total) TODO: ids in skip_ids should be consecutive, we can extend it to more generic version later """ rand_id = np.random.randint(total - len(skip_ids)) return rand_id if rand_id < min(skip_ids) else rand_id + len(skip_ids) def _truncate_sentences(self, sent_a, sent_b, max_num_tokens): """ Trancate a pair of sentence to limit total length under max_num_tokens Logics: 1. Truncate longer sentence 2. Tokens to be truncated could be at the beginning or the end of the sentnce Returns: Truncated sentences represented by dataset idx """ len_a, len_b = sum(self.dataset.sizes[sent_a]), sum(self.dataset.sizes[sent_b]) front_cut_a = front_cut_b = end_cut_a = end_cut_b = 0 while True: total_length = ( len_a + len_b - front_cut_a - front_cut_b - end_cut_a - end_cut_b ) if total_length <= max_num_tokens: break if len_a - front_cut_a - end_cut_a > len_b - front_cut_b - end_cut_b: if np.random.rand() < 0.5: front_cut_a += 1 else: end_cut_a += 1 else: if np.random.rand() < 0.5: front_cut_b += 1 else: end_cut_b += 1 # calculate ds indices as well as offsets and return truncated_sent_a = self._cut_sentence(sent_a, front_cut_a, end_cut_a) truncated_sent_b = self._cut_sentence(sent_b, front_cut_b, end_cut_b) return truncated_sent_a, truncated_sent_b def _cut_sentence(self, sent, front_cut, end_cut): """ Cut a sentence based on the numbers of tokens to be cut from beginning and end Represent the sentence as dataset idx and return """ start_ds_idx, end_ds_idx, offset = sent[0], sent[-1], 0 target_len = sum(self.dataset.sizes[sent]) - front_cut - end_cut while front_cut > 0: if self.dataset.sizes[start_ds_idx] > front_cut: offset += front_cut break else: front_cut -= self.dataset.sizes[start_ds_idx] start_ds_idx += 1 while end_cut > 0: if self.dataset.sizes[end_ds_idx] > end_cut: break else: end_cut -= self.dataset.sizes[end_ds_idx] end_ds_idx -= 1 return start_ds_idx, offset, end_ds_idx, target_len def _fetch_block(self, start_ds_idx, offset, end_ds_idx, length): """ Fetch a block of tokens based on its dataset idx """ buffer = torch.cat( [self.dataset[idx] for idx in range(start_ds_idx, end_ds_idx + 1)] ) s, e = offset, offset + length return buffer[s:e] def __getitem__(self, index): block1, block2, next_sent_label = self.sent_pairs[index] block1 = self._fetch_block(*block1) block2 = self._fetch_block(*block2) return block1, block2, next_sent_label def __len__(self): return len(self.sizes) @property def supports_prefetch(self): return getattr(self.dataset, "supports_prefetch", False) def prefetch(self, indices): prefetch_idx = set() for index in indices: for block1, block2, _ in [self.sent_pairs[index]]: for ds_idx in range(block1[0], block1[2] + 1): prefetch_idx.add(ds_idx) for ds_idx in range(block2[0], block2[2] + 1): prefetch_idx.add(ds_idx) self.dataset.prefetch(prefetch_idx)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/legacy/block_pair_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .block_pair_dataset import BlockPairDataset from .masked_lm_dataset import MaskedLMDataset from .masked_lm_dictionary import BertDictionary, MaskedLMDictionary __all__ = [ "BertDictionary", "BlockPairDataset", "MaskedLMDataset", "MaskedLMDictionary", ]
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/legacy/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from typing import Dict, List, Tuple import numpy as np import torch from fairseq.data import Dictionary, FairseqDataset, data_utils from fairseq.data.concat_dataset import ConcatDataset from fairseq.data.legacy.block_pair_dataset import BlockPairDataset from fairseq.data.token_block_dataset import TokenBlockDataset class MaskedLMDataset(FairseqDataset): """ A wrapper Dataset for masked language modelling. The dataset wraps around TokenBlockDataset or BlockedPairDataset and creates a batch where the input blocks are masked according to the specified masking probability. Additionally the batch can also contain sentence level targets if this is specified. Args: dataset: Dataset which generates blocks of data. Only BlockPairDataset and TokenBlockDataset are supported. sizes: Sentence lengths vocab: Dictionary with the vocabulary and special tokens. pad_idx: Id of padding token in dictionary mask_idx: Id of mask token in dictionary classif_token_idx: Id of classification token in dictionary. This is the token associated with the sentence embedding (Eg: CLS for BERT) sep_token_idx: Id of separator token in dictionary (Eg: SEP in BERT) seed: Seed for random number generator for reproducibility. shuffle: Shuffle the elements before batching. has_pairs: Specifies whether the underlying dataset generates a pair of blocks along with a sentence_target or not. Setting it to True assumes that the underlying dataset generates a label for the pair of sentences which is surfaced as sentence_target. The default value assumes a single block with no sentence target. segment_id: An optional segment id for filling in the segment labels when we are in the single block setting (Eg: XLM). Default is 0. masking_ratio: specifies what percentage of the blocks should be masked. masking_prob: specifies the probability of a given token being replaced with the "MASK" token. random_token_prob: specifies the probability of a given token being replaced by a random token from the vocabulary. """ def __init__( self, dataset: FairseqDataset, sizes: np.ndarray, vocab: Dictionary, pad_idx: int, mask_idx: int, classif_token_idx: int, sep_token_idx: int, seed: int = 1, shuffle: bool = True, has_pairs: bool = True, segment_id: int = 0, masking_ratio: float = 0.15, masking_prob: float = 0.8, random_token_prob: float = 0.1, ): # Make sure the input datasets are the ones supported assert ( isinstance(dataset, TokenBlockDataset) or isinstance(dataset, BlockPairDataset) or isinstance(dataset, ConcatDataset) ), ( "MaskedLMDataset only wraps TokenBlockDataset or BlockPairDataset or " "ConcatDataset" ) self.dataset = dataset self.sizes = np.array(sizes) self.vocab = vocab self.pad_idx = pad_idx self.mask_idx = mask_idx self.classif_token_idx = classif_token_idx self.sep_token_idx = sep_token_idx self.shuffle = shuffle self.seed = seed self.has_pairs = has_pairs self.segment_id = segment_id self.masking_ratio = masking_ratio self.masking_prob = masking_prob self.random_token_prob = random_token_prob # If we have only one block then sizes needs to be updated to include # the classification token if not has_pairs: self.sizes = self.sizes + 1 def __getitem__(self, index: int): # if has_pairs, then expect 2 blocks and a sentence target if self.has_pairs: (block_one, block_two, sentence_target) = self.dataset[index] else: block_one = self.dataset[index] return { "id": index, "block_one": block_one, "block_two": block_two if self.has_pairs else None, "sentence_target": sentence_target if self.has_pairs else None, } def __len__(self): return len(self.dataset) def _mask_block( self, sentence: np.ndarray, mask_idx: int, pad_idx: int, dictionary_token_range: Tuple, ): """ Mask tokens for Masked Language Model training Samples mask_ratio tokens that will be predicted by LM. Note:This function may not be efficient enough since we had multiple conversions between np and torch, we can replace them with torch operators later. Args: sentence: 1d tensor to be masked mask_idx: index to use for masking the sentence pad_idx: index to use for masking the target for tokens we aren't predicting dictionary_token_range: range of indices in dictionary which can be used for random word replacement (e.g. without special characters) Return: masked_sent: masked sentence target: target with words which we are not predicting replaced by pad_idx """ masked_sent = np.copy(sentence) sent_length = len(sentence) mask_num = math.ceil(sent_length * self.masking_ratio) mask = np.random.choice(sent_length, mask_num, replace=False) target = np.copy(sentence) for i in range(sent_length): if i in mask: rand = np.random.random() # replace with mask if probability is less than masking_prob # (Eg: 0.8) if rand < self.masking_prob: masked_sent[i] = mask_idx # replace with random token if probability is less than # masking_prob + random_token_prob (Eg: 0.9) elif rand < (self.masking_prob + self.random_token_prob): # sample random token from dictionary masked_sent[i] = np.random.randint( dictionary_token_range[0], dictionary_token_range[1] ) else: target[i] = pad_idx return masked_sent, target def _collate(self, samples: List[Dict], pad_idx: int, eos_idx: int): """ Does the heavy lifting for creating a batch from the input list of examples. The logic is as follows: 1. Mask the input blocks. In case has_pair is True then we have 2 blocks to mask. 2. Prepend the first masked block tensor with the special token used as sentence embedding. Eg: CLS in BERT. This happens irrespective of the value of has_pair. 3. If has_pair is True, then append the first masked block with the special separator token (eg: SEP for BERT) and compute segment label accordingly. In this case, also append the second masked block with this special separator token and compute its segment label. 4. For the targets tensor, prepend and append with padding index accordingly. 5. Concatenate all tensors. """ if len(samples) == 0: return {} # To ensure determinism, we reset the state of the PRNG after every # batch based on the seed and the first id of the batch. This ensures # that across epochs we get the same mask for the same example. This # is needed for reproducibility and is how BERT does masking # TODO: Can we add deteminism without this constraint? with data_utils.numpy_seed(self.seed + samples[0]["id"]): for s in samples: # token range is needed for replacing with random token during # masking token_range = (self.vocab.nspecial, len(self.vocab)) # mask according to specified probabilities. masked_blk_one, masked_tgt_one = self._mask_block( s["block_one"], self.mask_idx, self.pad_idx, token_range, ) tokens = np.concatenate([[self.classif_token_idx], masked_blk_one]) targets = np.concatenate([[self.pad_idx], masked_tgt_one]) segments = np.ones(len(tokens)) * self.segment_id # if has_pairs is True then we need to add the SEP token to both # the blocks after masking and re-compute segments based on the new # lengths. if self.has_pairs: tokens_one = np.concatenate([tokens, [self.sep_token_idx]]) targets_one = np.concatenate([targets, [self.pad_idx]]) masked_blk_two, masked_tgt_two = self._mask_block( s["block_two"], self.mask_idx, self.pad_idx, token_range ) tokens_two = np.concatenate([masked_blk_two, [self.sep_token_idx]]) targets_two = np.concatenate([masked_tgt_two, [self.pad_idx]]) # block + 1 sep + 1 special (CLS) segments_one = np.zeros(len(tokens_one)) # block + 1 sep segments_two = np.ones(len(tokens_two)) tokens = np.concatenate([tokens_one, tokens_two]) targets = np.concatenate([targets_one, targets_two]) segments = np.concatenate([segments_one, segments_two]) s["source"] = torch.LongTensor(tokens) s["segment_labels"] = torch.LongTensor(segments) s["lm_target"] = torch.LongTensor(targets) def merge(key): return data_utils.collate_tokens( [s[key] for s in samples], pad_idx, eos_idx, left_pad=False ) return { "id": torch.LongTensor([s["id"] for s in samples]), "ntokens": sum(len(s["source"]) for s in samples), "net_input": { "src_tokens": merge("source"), "segment_labels": merge("segment_labels"), }, "lm_target": merge("lm_target"), "sentence_target": torch.LongTensor([s["sentence_target"] for s in samples]) if self.has_pairs else None, "nsentences": len(samples), } def collater(self, samples: List[Dict]): """Merge a list of samples to form a mini-batch. Args: samples (List[dict]): samples to collate Returns: dict: a mini-batch of data """ return self._collate(samples, self.vocab.pad(), self.vocab.eos()) def num_tokens(self, index: int): """ Return the number of tokens in a sample. This value is used to enforce max-tokens during batching. """ return self.sizes[index] def size(self, index: int): """ Return an example's size as a float or tuple. This value is used when filtering a dataset with max-positions. """ return self.sizes[index] def ordered_indices(self): """ Return an ordered list of indices. Batches will be constructed based on this order. """ if self.shuffle: return np.random.permutation(len(self)) else: order = [np.arange(len(self))] order.append(self.sizes) return np.lexsort(order) @property def supports_prefetch(self): return getattr(self.dataset, "supports_prefetch", False) def prefetch(self, indices): self.dataset.prefetch(indices)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/legacy/masked_lm_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import hashlib import logging import math import numpy as np from fairseq.data import SampledMultiDataset from .sampled_multi_dataset import CollateFormat, default_virtual_size_func logger = logging.getLogger(__name__) class SampledMultiEpochDataset(SampledMultiDataset): """Samples from multiple sub-datasets according to sampling ratios using virtual epoch sizes to speed up dataloading. Args: datasets ( List[~torch.utils.data.Dataset] or OrderedDict[str, ~torch.utils.data.Dataset] ): datasets sampling_ratios (List[float]): list of probability of each dataset to be sampled (default: None, which corresponds to concating all dataset together). seed (int): RNG seed to use (default: 2). epoch (int): starting epoch number (default: 1). eval_key (str, optional): a key used at evaluation time that causes this instance to pass-through batches from *datasets[eval_key]*. collate_format (CollateFormat): collater output format, either CollateFormat.ordered_dict or CollateFormat.single (default: CollateFormat.single) where CollateFormat.single configures the collater to output batches of data mixed from all sub-datasets, and CollateFormat.ordered_dict configures the collater to output a dictionary of batches indexed by keys of sub-datasets. Note that not all sub-datasets will present in a single batch in both formats. virtual_size (int, or callable): the expected virtual size of the dataset (default: default_virtual_size_func). split (str): the split of the data, e.g. 'train', 'valid' or 'test'. virtual_epoch_size (int): virtual epoch size, the dataset will go through the data by this virtual epoch size one by one to speed up data loading, e.g. indicing and filtering can be performed whenever a virtual epoch is loaded without waiting for the whole dataset to be loaded. shared_collater (bool): whether or not to all sub-datasets have the same collater. shard_epoch (int): the real epoch number for shard selection. shuffle (bool): whether or not to shuffle data (default: True). """ def __init__( self, datasets, sampling_ratios=None, seed=2, epoch=1, eval_key=None, collate_format=CollateFormat.single, virtual_size=default_virtual_size_func, split="", virtual_epoch_size=None, shared_collater=False, shard_epoch=1, shuffle=True, ): self.virtual_epoch_size = virtual_epoch_size self._current_epoch_start_index = None self._random_global_indices = None self.shard_epoch = shard_epoch if shard_epoch is not None else 1 self.load_next_shard = None self._epoch_sizes = None super().__init__( datasets=datasets, sampling_ratios=sampling_ratios, seed=seed, epoch=epoch, eval_key=eval_key, collate_format=collate_format, virtual_size=virtual_size, split=split, shared_collater=shared_collater, shuffle=shuffle, ) def _setup(self, epoch): self.virtual_epoch_size = ( self.virtual_epoch_size if self.virtual_epoch_size is not None else self.virtual_size ) if self.virtual_epoch_size > self.virtual_size: logger.warning( f"virtual epoch size {self.virtual_epoch_size} " f"is greater than virtual dataset size {self.virtual_size}" ) self.virtual_epoch_size = self.virtual_size self.num_virtual_epochs = math.ceil(self.virtual_size / self.virtual_epoch_size) self._current_epoch_start_index = self._get_epoch_start_index(epoch) logger.info( f"virtual epoch size {self.virtual_epoch_size}; virtual dataset size {self.virtual_size}" ) def _map_epoch_index_to_global(self, index): index = self._current_epoch_start_index + index # add randomness return self._random_global_indices[index] @property def sizes(self): if self._epoch_sizes is not None: return self._epoch_sizes _sizes = super().sizes indices = self._random_global_indices[ self._current_epoch_start_index : self._current_epoch_start_index + len(self) ] self._epoch_sizes = _sizes[indices] # del super()._sizes to save memory del self._sizes self._sizes = None return self._epoch_sizes def _get_dataset_and_index(self, index): i = self._map_epoch_index_to_global(index) return super()._get_dataset_and_index(i) def __len__(self): return ( self.virtual_epoch_size if self._current_epoch_start_index + self.virtual_epoch_size < self.virtual_size else self.virtual_size - self._current_epoch_start_index ) def set_epoch(self, epoch): if self._current_epoch_start_index is None: # initializing epoch idnices of a virtual dataset self._setup(epoch) self._next_virtual_epoch(epoch) else: # working on already intialized epoch indices if epoch == self._cur_epoch: # re-enter so return return self._next_virtual_epoch(epoch) def _get_epoch_start_index(self, epoch): assert epoch >= 1 # fairseq is using 1-based epoch everywhere return ((epoch - 1) % self.num_virtual_epochs) * self.virtual_epoch_size def _next_global_indices(self, epoch): rng = np.random.RandomState( [ int( hashlib.sha1( str(self.__class__.__name__).encode("utf-8") ).hexdigest(), 16, ) % (2 ** 32), self.seed % (2 ** 32), # global seed epoch, # epoch index, ] ) del self._random_global_indices self._random_global_indices = rng.choice( self.virtual_size, self.virtual_size, replace=False ) if self.load_next_shard is None: self.load_next_shard = False else: # increase shard epoch for next loading self.shard_epoch += 1 self.load_next_shard = True logger.info( "to load next epoch/shard in next load_dataset: " f"epoch={epoch}/shard_epoch={self.shard_epoch}" ) def _next_virtual_epoch(self, epoch): index = self._get_epoch_start_index(epoch) if index == 0 or self._random_global_indices is None: # need to start from the beginning, # so call super().set_epoch(epoch) to establish the global virtual indices logger.info( "establishing a new set of global virtual indices for " f"epoch={epoch}/shard_epoch={self.shard_epoch}" ) super().set_epoch(epoch) self._next_global_indices(epoch) else: self._cur_epoch = epoch # reset cache sizes and ordered_indices for the epoch after moving to a new epoch self._clean_if_not_none( [ self._epoch_sizes, ] ) self._epoch_sizes = None self._current_epoch_start_index = index
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/multilingual/sampled_multi_epoch_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree.
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/multilingual/__init__.py
from enum import Enum from typing import Dict, List, Optional, Sequence import torch from fairseq.data import Dictionary class EncoderLangtok(Enum): """ Prepend to the beginning of source sentence either the source or target language token. (src/tgt). """ src = "src" tgt = "tgt" class LangTokSpec(Enum): main = "main" mono_dae = "mono_dae" class LangTokStyle(Enum): multilingual = "multilingual" mbart = "mbart" @torch.jit.export def get_lang_tok( lang: str, lang_tok_style: str, spec: str = LangTokSpec.main.value ) -> str: # TOKEN_STYLES can't be defined outside this fn since it needs to be # TorchScriptable. TOKEN_STYLES: Dict[str, str] = { LangTokStyle.mbart.value: "[{}]", LangTokStyle.multilingual.value: "__{}__", } if spec.endswith("dae"): lang = f"{lang}_dae" elif spec.endswith("mined"): lang = f"{lang}_mined" style = TOKEN_STYLES[lang_tok_style] return style.format(lang) def augment_dictionary( dictionary: Dictionary, language_list: List[str], lang_tok_style: str, langtoks_specs: Sequence[str] = (LangTokSpec.main.value,), extra_data: Optional[Dict[str, str]] = None, ) -> None: for spec in langtoks_specs: for language in language_list: dictionary.add_symbol( get_lang_tok(lang=language, lang_tok_style=lang_tok_style, spec=spec) ) if lang_tok_style == LangTokStyle.mbart.value or ( extra_data is not None and LangTokSpec.mono_dae.value in extra_data ): dictionary.add_symbol("<mask>")
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/multilingual/multilingual_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from typing import List logger = logging.getLogger(__name__) def uniform(dataset_sizes: List[int]): return [1.0] * len(dataset_sizes) def temperature_sampling(dataset_sizes, temp): total_size = sum(dataset_sizes) return [(size / total_size) ** (1.0 / temp) for size in dataset_sizes] def make_temperature_sampling(temp=1.0): def sampling_func(dataset_sizes): return temperature_sampling(dataset_sizes, temp) return sampling_func def make_ratio_sampling(ratios): def sampling_func(dataset_sizes): return ratios return sampling_func class SamplingMethod: @staticmethod def add_arguments(parser): parser.add_argument( "--sampling-method", choices=[ "uniform", "temperature", "concat", "RoundRobin", ], type=str, default="concat", help="The method to sample data per language pairs", ) parser.add_argument( "--sampling-temperature", default=1.5, type=float, help="only work with --sampling-method temperature", ) @staticmethod def build_sampler(args, task): return SamplingMethod(args, task) def __init__(self, args, task): self.args = args self.task = task def is_adaptive(self): return False def sampling_method_selector(self): args = self.args logger.info(f"selected sampler: {args.sampling_method}") if args.sampling_method == "uniform": return uniform elif args.sampling_method == "temperature" or self.is_adaptive(): return make_temperature_sampling(float(args.sampling_temperature)) else: # default to concating all data set together return None
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/multilingual/sampling_method.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import datetime import hashlib import logging import time from bisect import bisect_right from collections import OrderedDict, defaultdict from enum import Enum from typing import List import numpy as np import torch from fairseq.data import FairseqDataset, data_utils from fairseq.distributed import utils as distributed_utils def get_time_gap(s, e): return ( datetime.datetime.fromtimestamp(e) - datetime.datetime.fromtimestamp(s) ).__str__() logger = logging.getLogger(__name__) def default_virtual_size_func(datasets, ratios, max_scale_up=1.5): sizes = [len(d) for d in datasets] if ratios is None: return sum(sizes) largest_idx = np.argmax(sizes) largest_r = ratios[largest_idx] largest_s = sizes[largest_idx] # set virtual sizes relative to the largest dataset virtual_sizes = [(r / largest_r) * largest_s for r in ratios] vsize = sum(virtual_sizes) max_size = sum(sizes) * max_scale_up return int(vsize if vsize < max_size else max_size) class CollateFormat(Enum): single = 1 ordered_dict = 2 class SampledMultiDataset(FairseqDataset): """Samples from multiple sub-datasets according to given sampling ratios. Args: datasets ( List[~torch.utils.data.Dataset] or OrderedDict[str, ~torch.utils.data.Dataset] ): datasets sampling_ratios (List[float]): list of probability of each dataset to be sampled (default: None, which corresponds to concatenating all dataset together). seed (int): RNG seed to use (default: 2). epoch (int): starting epoch number (default: 1). eval_key (str, optional): a key used at evaluation time that causes this instance to pass-through batches from *datasets[eval_key]*. collate_format (CollateFormat): collater output format, either CollateFormat.ordered_dict or CollateFormat.single (default: CollateFormat.single) where CollateFormat.single configures the collater to output batches of data mixed from all sub-datasets, and CollateFormat.ordered_dict configures the collater to output a dictionary of batches indexed by keys of sub-datasets. Note that not all sub-datasets will present in a single batch in both formats. virtual_size (int, or callable): the expected virtual size of the dataset (default: default_virtual_size_func). split (str): the split of the data, e.g. 'train', 'valid' or 'test'. shared_collater (bool): whether or not to all sub-datasets have the same collater. shuffle (bool): whether or not to shuffle data (default: True). """ def __init__( self, datasets, sampling_ratios=None, seed=2, epoch=1, eval_key=None, collate_format=CollateFormat.single, virtual_size=default_virtual_size_func, split="", shared_collater=False, shuffle=True, ): super().__init__() self.shared_collater = shared_collater self.shuffle = shuffle if isinstance(datasets, OrderedDict): self.keys = list(datasets.keys()) datasets = list(datasets.values()) elif isinstance(datasets, List): self.keys = list(range(len(datasets))) else: raise AssertionError() self.datasets = datasets self.split = split self.eval_key = eval_key if self.eval_key is not None: self.collate_format = CollateFormat.single else: self.collate_format = collate_format self.seed = seed self._cur_epoch = None self.cumulated_sizes = None # self.datasets[k][self._cur_indices[i]] is the data item i in this sampled dataset # namely, data item i is sampled from the kth sub-dataset self.datasets[k] # where self.cumulated_sizes[k-1] <= i < self.cumulated_sizes[k] self._cur_indices = None self._sizes = None self.virtual_size_per_dataset = None # caching properties self._reset_cached_properties() self.setup_sampling(sampling_ratios, virtual_size) self.set_epoch(epoch) def _clean_if_not_none(self, var_list): for v in var_list: if v is not None: del v def _reset_cached_properties(self): self._clean_if_not_none([self._sizes, self._cur_indices]) self._sizes = None self._cur_indices = None def setup_sampling(self, sample_ratios, virtual_size): sizes = [len(d) for d in self.datasets] if sample_ratios is None: # default back to concating datasets self.sample_ratios = None self.virtual_size = sum(sizes) else: if not isinstance(sample_ratios, np.ndarray): sample_ratios = np.array(sample_ratios) self.sample_ratios = sample_ratios virtual_size = ( default_virtual_size_func if virtual_size is None else virtual_size ) self.virtual_size = ( virtual_size(self.datasets, self.sample_ratios) if callable(virtual_size) else virtual_size ) def adjust_sampling(self, epoch, sampling_ratios, virtual_size): if sampling_ratios is not None: sampling_ratios = self._sync_sample_ratios(sampling_ratios) self.setup_sampling(sampling_ratios, virtual_size) def _sync_sample_ratios(self, ratios): # in case the ratios are not precisely the same across processes # also to ensure every procresses update the ratios in the same pace ratios = torch.DoubleTensor(ratios) if torch.distributed.is_initialized(): if torch.cuda.is_available(): distributed_utils.all_reduce( ratios.cuda(), group=distributed_utils.get_data_parallel_group() ) else: distributed_utils.all_reduce( ratios, group=distributed_utils.get_data_parallel_group() ) ret = ratios.cpu() ret = ret.numpy() return ret def random_choice_in_dataset(self, rng, dataset, choice_size): if hasattr(dataset, "random_choice_in_dataset"): return dataset.random_choice_in_dataset(rng, choice_size) dataset_size = len(dataset) return rng.choice( dataset_size, choice_size, replace=(choice_size > dataset_size) ) def get_virtual_indices(self, rng, datasets, sample_ratios, virtual_size): def get_counts(sample_ratios): counts = np.array([virtual_size * r for r in sample_ratios], dtype=np.int64) diff = virtual_size - counts.sum() assert diff >= 0 # due to round-offs, the size might not match the desired sizes if diff > 0: dataset_indices = rng.choice( len(sample_ratios), size=diff, p=sample_ratios ) for i in dataset_indices: counts[i] += 1 return counts def get_in_dataset_indices(datasets, sizes, sample_ratios): counts = get_counts(sample_ratios) # uniformally sample desired counts for each dataset # if the desired counts are large, sample with replacement: indices = [ self.random_choice_in_dataset(rng, d, c) for c, d in zip(counts, datasets) ] return indices sizes = [len(d) for d in datasets] if sample_ratios is None: # default back to concating datasets in_dataset_indices = [list(range(s)) for s in sizes] virtual_sizes_per_dataset = sizes else: ratios = sample_ratios / sample_ratios.sum() in_dataset_indices = get_in_dataset_indices(datasets, sizes, ratios) virtual_sizes_per_dataset = [len(d) for d in in_dataset_indices] virtual_sizes_per_dataset = np.array(virtual_sizes_per_dataset, np.int64) cumulative_sizes = np.cumsum(virtual_sizes_per_dataset) assert sum(virtual_sizes_per_dataset) == virtual_size assert cumulative_sizes[-1] == virtual_size if virtual_size < sum(sizes): logger.warning( f"virtual data size ({virtual_size}) is less than real data size ({sum(sizes)})." " If virtual size << real data size, there could be data coverage issue." ) in_dataset_indices = np.hstack(in_dataset_indices) return in_dataset_indices, cumulative_sizes, virtual_sizes_per_dataset def _get_dataset_and_index(self, index): i = bisect_right(self.cumulated_sizes, index) return i, self._cur_indices[index] def __getitem__(self, index): # self.__getitem__(index) returns self.datasets[k][self._cur_indices[index]] # where k satisfies self.cumulated_sizes[k - 1] <= k < self.cumulated_sizes[k] ds_idx, ds_sample_idx = self._get_dataset_and_index(index) ret = (ds_idx, self.datasets[ds_idx][ds_sample_idx]) return ret def num_tokens(self, index): return self.sizes[index].max() def num_tokens_vec(self, indices): sizes_vec = self.sizes[np.array(indices)] # max across all dimensions but first one return np.amax(sizes_vec, axis=tuple(range(1, len(sizes_vec.shape)))) def size(self, index): return self.sizes[index] def __len__(self): return self.virtual_size def collater(self, samples, **extra_args): """Merge a list of samples to form a mini-batch.""" if len(samples) == 0: return None if self.collate_format == "ordered_dict": collect_samples = [[] for _ in range(len(self.datasets))] for (i, sample) in samples: collect_samples[i].append(sample) batch = OrderedDict( [ (self.keys[i], dataset.collater(collect_samples[i])) for i, (key, dataset) in enumerate(zip(self.keys, self.datasets)) if len(collect_samples[i]) > 0 ] ) elif self.shared_collater: batch = self.datasets[0].collater([s for _, s in samples]) else: samples_dict = defaultdict(list) pad_to_length = ( defaultdict(int) if "pad_to_length" not in extra_args else extra_args["pad_to_length"] ) for ds_idx, s in samples: pad_to_length["source"] = max( pad_to_length["source"], s["source"].size(0) ) if s["target"] is not None: pad_to_length["target"] = max( pad_to_length["target"], s["target"].size(0) ) samples_dict[ds_idx].append(s) batches = [ self.datasets[i].collater(samples_dict[i], pad_to_length=pad_to_length) for i in range(len(self.datasets)) if len(samples_dict[i]) > 0 ] def straight_data(tensors): batch = torch.cat(tensors, dim=0) return batch src_lengths = straight_data( [b["net_input"]["src_lengths"] for b in batches] ) src_lengths, sort_order = src_lengths.sort(descending=True) def straight_order(tensors): batch = straight_data(tensors) return batch.index_select(0, sort_order) batch = { "id": straight_order([b["id"] for b in batches]), "nsentences": sum(b["nsentences"] for b in batches), "ntokens": sum(b["ntokens"] for b in batches), "net_input": { "src_tokens": straight_order( [b["net_input"]["src_tokens"] for b in batches] ), "src_lengths": src_lengths, }, "target": straight_order([b["target"] for b in batches]) if batches[0]["target"] is not None else None, } if "prev_output_tokens" in batches[0]["net_input"]: batch["net_input"]["prev_output_tokens"] = straight_order( [b["net_input"]["prev_output_tokens"] for b in batches] ) if "src_lang_id" in batches[0]["net_input"]: batch["net_input"]["src_lang_id"] = straight_order( [b["net_input"]["src_lang_id"] for b in batches] ) if "tgt_lang_id" in batches[0]: batch["tgt_lang_id"] = straight_order( [b["tgt_lang_id"] for b in batches] ) return batch @property def sizes(self): if self._sizes is not None: return self._sizes start_time = time.time() in_sub_dataset_indices = [ self._cur_indices[ 0 if i == 0 else self.cumulated_sizes[i - 1] : self.cumulated_sizes[i] ] for i in range(len(self.datasets)) ] sub_dataset_sizes = [ d.sizes[indices] for d, indices in zip(self.datasets, in_sub_dataset_indices) ] self._sizes = np.vstack(sub_dataset_sizes) logger.info(f"sizes() calling time: {get_time_gap(start_time, time.time())}") return self._sizes def ordered_indices(self): if self.shuffle: indices = np.random.permutation(len(self)) else: indices = np.arange(len(self)) sizes = self.sizes tgt_sizes = sizes[:, 1] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else None src_sizes = ( sizes[:, 0] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else sizes ) # sort by target length, then source length if tgt_sizes is not None: indices = indices[np.argsort(tgt_sizes[indices], kind="mergesort")] sort_indices = indices[np.argsort(src_sizes[indices], kind="mergesort")] return sort_indices def prefetch(self, indices): prefetch_indices = [[] for _ in range(len(self.datasets))] for i in indices: ds_idx, ds_sample_idx = self._get_dataset_and_index(i) prefetch_indices[ds_idx].append(ds_sample_idx) for i in range(len(prefetch_indices)): self.datasets[i].prefetch(prefetch_indices[i]) @property def can_reuse_epoch_itr_across_epochs(self): return False def set_epoch(self, epoch): super().set_epoch(epoch) if epoch == self._cur_epoch: # re-enter so return return for d in self.datasets: if hasattr(d, "set_epoch"): d.set_epoch(epoch) self._cur_epoch = epoch self._establish_virtual_datasets() def _establish_virtual_datasets(self): if self.sample_ratios is None and self._cur_indices is not None: # not a samping dataset, no need to resample if indices are already established return self._reset_cached_properties() start_time = time.time() # Generate a weighted sample of indices as a function of the # random seed and the current epoch. rng = np.random.RandomState( [ int( hashlib.sha1( str(self.__class__.__name__).encode("utf-8") ).hexdigest(), 16, ) % (2 ** 32), self.seed % (2 ** 32), # global seed self._cur_epoch, # epoch index, ] ) self._clean_if_not_none( [self.cumulated_sizes, self.virtual_size_per_dataset, self._sizes] ) self._sizes = None indices, cumulated_sizes, virtual_size_per_dataset = self.get_virtual_indices( rng, self.datasets, self.sample_ratios, self.virtual_size ) self._cur_indices = indices self.cumulated_sizes = cumulated_sizes self.virtual_size_per_dataset = virtual_size_per_dataset raw_sizes = [len(d) for d in self.datasets] sampled_sizes = self.virtual_size_per_dataset logger.info( f"[{self.split}] Raw sizes: {str(dict(zip(self.keys, raw_sizes)))}; " f"raw total size: {sum(raw_sizes)}" ) logger.info( f"[{self.split}] Resampled sizes: {str(dict(zip(self.keys, sampled_sizes)))}; " f"resampled total size: {sum(sampled_sizes)}" ) if self.sample_ratios is not None: logger.info( f"[{self.split}] Upsampling ratios: {str(dict(zip(self.keys, self.sample_ratios)))}" ) else: logger.info(f"[{self.split}] A concat dataset") logger.info( f"[{self.split}] virtual dataset established time: {get_time_gap(start_time, time.time())}" ) def filter_indices_by_size(self, indices, max_sizes): """Filter a list of sample indices. Remove those that are longer than specified in max_sizes. Args: indices (np.array): original array of sample indices max_sizes (int or list[int] or tuple[int]): max sample size, can be defined separately for src and tgt (then list or tuple) Returns: np.array: filtered sample array list: list of removed indices """ sizes = self.sizes tgt_sizes = sizes[:, 1] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else None src_sizes = ( sizes[:, 0] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else sizes ) return data_utils.filter_paired_dataset_indices_by_size( src_sizes, tgt_sizes, indices, max_sizes )
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/multilingual/sampled_multi_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import itertools import json import logging import math import os from collections import OrderedDict, defaultdict from argparse import ArgumentError from fairseq import utils from fairseq.data import ( AppendTokenDataset, ConcatDataset, Dictionary, LanguagePairDataset, PrependTokenDataset, SampledMultiDataset, SampledMultiEpochDataset, StripTokenDataset, TransformEosLangPairDataset, TruncateDataset, data_utils, indexed_dataset, ) from fairseq.data.multilingual.multilingual_utils import ( EncoderLangtok, LangTokSpec, LangTokStyle, augment_dictionary, get_lang_tok, ) from fairseq.data.multilingual.sampled_multi_dataset import CollateFormat from fairseq.file_io import PathManager from fairseq.utils import FileContentsAction, csv_str_list, eval_str_dict logger = logging.getLogger(__name__) SRC_DICT_NAME = 'src' TGT_DICT_NAME = 'tgt' def _lang_id(dic: Dictionary, lang: str): """Return language ID index.""" idx = dic.index(lang) assert idx != dic.unk_index, "cannot find language ID for lang {}".format(lang) return idx def load_sampling_weights(from_file): with open(from_file) as f: weights = json.load(f) return weights class MultilingualDatasetManager(object): def __init__(self, args, lang_pairs, langs, dicts, sampling_method): super().__init__() self.args = args self.seed = args.seed self.lang_pairs = lang_pairs self.extra_lang_pairs = ( list( {p for _, v in args.extra_lang_pairs.items() for p in v.split(",")} ) if args.extra_lang_pairs else [] ) self.src_langs = {p.split("-")[0] for p in args.lang_pairs + self.extra_lang_pairs} self.tgt_langs = {p.split("-")[1] for p in args.lang_pairs + self.extra_lang_pairs} self.langs = langs self.dicts = dicts self.lang_dict = self.create_lang_dictionary(self.langs) self.sampling_method = sampling_method self.sampling_scheduler = None self._has_sharded_data = False self._num_shards_dict = {} self._training_data_sizes = defaultdict(lambda: {}) @classmethod def setup_data_manager(cls, args, lang_pairs, langs, dicts, sampling_method): return MultilingualDatasetManager( args, lang_pairs, langs, dicts, sampling_method ) @staticmethod def add_args(parser): parser.add_argument( "data", help="colon separated path to data directories list, \ will be iterated upon during epochs in round-robin manner", action=FileContentsAction, ) parser.add_argument( "--langs", default=None, type=csv_str_list, help="a list of languages comma sperated languages which can appear in lang-pairs; " "note that the ordering determines language token IDs", ) parser.add_argument( "--lang-dict", default=None, type=str, help="an external file which contains a list of " "languages which can appear in lang-pairs; " "note that the ordering determines language token IDs; " "--langs and --lang-dict are two exclusive options", ) parser.add_argument('--source-dict', default=None, type=str, help='path to source dictionary; if specified it will override per language dictionary loading') parser.add_argument('--target-dict', default=None, type=str, help='path to target dictionary; if specified it will override per language dictionary loading') parser.add_argument( "--lang-tok-style", default=LangTokStyle.multilingual.value, type=str, choices=[LangTokStyle.multilingual.value, LangTokStyle.mbart.value], help="language token styles", ) parser.add_argument( "--load-alignments", action="store_true", help="load the binarized alignments", ) parser.add_argument( "--left-pad-source", default="True", type=str, metavar="BOOL", help="pad the source on the left", ) parser.add_argument( "--left-pad-target", default="False", type=str, metavar="BOOL", help="pad the target on the left", ) try: parser.add_argument( "--max-source-positions", default=1024, type=int, metavar="N", help="max number of tokens in the source sequence", ) parser.add_argument( "--max-target-positions", default=1024, type=int, metavar="N", help="max number of tokens in the target sequence", ) except ArgumentError: # this might have already been defined. Once we transition this to hydra it should be fine to add it here. pass parser.add_argument( "--upsample-primary", default=1, type=int, help="amount to upsample primary dataset", ) parser.add_argument( "--truncate-source", action="store_true", default=False, help="truncate source to max-source-positions", ) parser.add_argument( "--encoder-langtok", default=None, type=str, choices=[EncoderLangtok.src.value, EncoderLangtok.tgt.value], metavar="SRCTGT", help="prepend to the beginning of source sentence the source or target " "language token. (src/tgt)", ) parser.add_argument( "--decoder-langtok", action="store_true", help="prepend to the beginning of target sentence the target language token", ) parser.add_argument( "--lang-tok-replacing-bos-eos", action="store_true", default=False ) parser.add_argument( "--enable-lang-ids", default=False, action="store_true", help="whether to include language IDs in samples", ) parser.add_argument( "--enable-reservsed-directions-shared-datasets", default=False, action="store_true", help="whether to allow datasets be used in reversed directions", ) parser.add_argument( "--extra-data", help='a dictionary of data name to this path, \ e.g. {"mined", path_to_mined_data, "denoised": path_to_denoised_data}', type=lambda uf: eval_str_dict(uf, type=str), default=None, ) parser.add_argument( "--extra-lang-pairs", help='a dictionary of data name to the language pairs they serve, \ e.g. {"mined": comma-separated-lang-pairs, "denoised": comma-separated-lang-pairs}', type=lambda uf: eval_str_dict(uf, type=str), default=None, ) parser.add_argument( "--fixed-dictionary", help="Fixed dictionary to use with model path", default=None, type=str, ) parser.add_argument( "--langtoks-specs", help='a list of comma separated data types that a set of language tokens to be specialized for, \ e.g. "main,dae,mined". There will be a set of language tokens added to the vocab to \ distinguish languages in different training data types. If not specified, default language \ tokens per languages will be added', default=LangTokSpec.main.value, type=csv_str_list, ) parser.add_argument( "--langtoks", help='a dictionary of how to add language tokens, \ e.g. {"mined": (None, "tgt"), "mono_dae": ("src.dae", "tgt"), "main": \ ("src", "tgt")}, or {"mined": ("src.mined", "tgt")}', default=None, type=lambda uf: eval_str_dict(uf, type=str), ) parser.add_argument( "--sampling-weights-from-file", help='a file contain a python dictionary of how to sample data sets, \ e.g. { "main:en_XX-es_XX": 0.2, "mined:en_XX-pt_XX": 0.5, \ "mono_dae:es_XX-es_XX: 0.3, "main:en_xx-fr_XX": 0.8 }', default=None, type=str, ) parser.add_argument( "--sampling-weights", help='a dictionary of how to sample data sets, \ e.g. { "main:en_XX-es_XX": 0.2, "mined:en_XX-pt_XX": 0.5, \ "mono_dae:es_XX-es_XX: 0.3, "main:en_xx-fr_XX": 0.8 }', default=None, type=lambda uf: eval_str_dict(uf, type=str), ) parser.add_argument( "--virtual-epoch-size", default=None, type=int, help="virtual epoch size to speed up data loading", ) parser.add_argument( "--virtual-data-size", default=None, type=int, help="virtual data size of the whole joint dataset to speed" "up data loading and have specific dynamic sampling strategy interval", ) @classmethod def load_langs(cls, args, **kwargs): if args.lang_dict and args.langs: raise ValueError("--langs and --lang-dict can not both be specified") if args.lang_dict is None and args.langs is None: logger.warning( "External language dictionary is not provided; " "use lang-pairs to infer the set of supported languages. " "The language ordering is not stable which might cause " "misalignment in pretraining and finetuning." ) # infer from lang_pairs as it is langs = list( {x for lang_pair in args.lang_pairs for x in lang_pair.split("-")} ) langs = sorted(langs) logger.info(f"inferred language list: {langs}") elif args.lang_dict: with open( PathManager.get_local_path(args.lang_dict), "r", encoding="utf-8" ) as f: langs = [lang.strip() for lang in f.readlines() if lang.strip()] logger.info( f"loaded language list from {args.lang_dict} as they are ordered in file" ) elif args.langs: langs = args.langs logger.info( f"parsed the language list as they are ordered in the option: {langs}" ) return langs def has_sharded_data(self, split): return self._has_sharded_data and split == getattr( self.args, "train_subset", None ) def _shared_collater(self): return not (self.args.extra_data and "mono_dae" in self.args.extra_data) and ( not self.args.lang_tok_replacing_bos_eos ) def estimate_global_pass_epoch(self, epoch): if self.args.virtual_epoch_size is None or self.args.virtual_data_size is None: return None # one epoch more for remaining data in each shard virtual_epochs_per_shard = math.ceil( self.args.virtual_data_size / self.args.virtual_epoch_size ) # note that fairseq epoch / shard_epoch starts from 1 shard_epoch = (epoch - 1) // virtual_epochs_per_shard + 1 return shard_epoch @classmethod def prepare(cls, load_dictionary, args, **kargs): args.left_pad_source = utils.eval_bool(args.left_pad_source) args.left_pad_target = utils.eval_bool(args.left_pad_target) if not hasattr(args, "shuffle_instance"): args.shuffle_instance = False if args.langtoks is None: args.langtoks = {} if "main" not in args.langtoks: src_langtok_spec = args.encoder_langtok if args.encoder_langtok else None tgt_langtok_spec = "tgt" if args.decoder_langtok else None args.langtoks["main"] = (src_langtok_spec, tgt_langtok_spec) def check_langs(langs, pairs): messages = [] for src, tgt in pairs: if src not in langs or tgt not in langs: messages.append( f"language pair {src}-{tgt} contains languages " "that are not in the language dictionary" ) if len(messages) > 0: raise ValueError(" ".join(messages) + f"; langs: {langs}") if args.lang_pairs is None: raise ValueError( "--lang-pairs is required. List all the language pairs in the training objective." ) if isinstance(args.lang_pairs, str): args.lang_pairs = args.lang_pairs.split(",") if args.source_lang is not None or args.target_lang is not None: training = False else: training = True language_list = cls.load_langs(args, **kargs) check_langs( language_list, ( [p.split("-") for p in args.lang_pairs] if training else [(args.source_lang, args.target_lang)] ), ) def load_dictionary_and_postproc(path): d = load_dictionary(path) augment_dictionary( dictionary=d, language_list=language_list, lang_tok_style=args.lang_tok_style, langtoks_specs=args.langtoks_specs, extra_data=args.extra_data, ) return d dicts = cls.load_all_dictionaries(args, language_list, load_dictionary_and_postproc, training) return language_list, dicts, training @classmethod def load_all_dictionaries(cls, args, language_list, load_dictionary, training): dicts = OrderedDict() if args.source_dict is not None: dicts[SRC_DICT_NAME] = load_dictionary(args.source_dict) if args.target_dict is not None: dicts[TGT_DICT_NAME] = load_dictionary(args.target_dict) if training: extra_lang_pairs = ( list( {p for _, v in args.extra_lang_pairs.items() for p in v.split(",")} ) if args.extra_lang_pairs else [] ) src_langs_to_load_dicts = sorted( {p.split("-")[0] for p in (args.lang_pairs + extra_lang_pairs)} ) tgt_langs_to_load_dicts = sorted( {p.split("-")[1] for p in (args.lang_pairs + extra_lang_pairs)} ) else: src_langs_to_load_dicts = [args.source_lang] tgt_langs_to_load_dicts = [args.target_lang] paths = utils.split_paths(args.data) assert len(paths) > 0 def load_dicts(langs_to_load_dicts): for lang in langs_to_load_dicts: dicts[lang] = load_dictionary( os.path.join(paths[0], "dict.{}.txt".format(lang)) ) if len(dicts) > 0: dict0 = next(iter(dicts.values())) assert dicts[lang].pad() == dict0.pad() assert dicts[lang].eos() == dict0.eos() assert dicts[lang].unk() == dict0.unk() logger.info("[{}] dictionary: {} types".format(lang, len(dicts[lang]))) if args.fixed_dictionary is not None: fixed_dict = load_dictionary(args.fixed_dictionary) dicts = {lang: fixed_dict for lang in src_langs_to_load_dicts + tgt_langs_to_load_dicts} else: if args.source_dict is None: load_dicts(src_langs_to_load_dicts) if args.target_dict is None: load_dicts(tgt_langs_to_load_dicts) return dicts def get_source_dictionary(self, lang): if self.args.source_dict is not None: return self.dicts[SRC_DICT_NAME] else: return self.dicts[lang] def get_target_dictionary(self, lang): if self.args.target_dict is not None: return self.dicts[TGT_DICT_NAME] else: return self.dicts[lang] @classmethod def create_lang_dictionary(cls, langs): unk = "<unk>" # hack to remove symbols other than unk as they are not needed by lang dict lang_dict = Dictionary(pad=unk, eos=unk, unk=unk, bos=unk) for lang in langs: lang_dict.add_symbol(lang) return lang_dict @classmethod def get_langtok_index(cls, lang_tok, dic): idx = dic.index(lang_tok) assert ( idx != dic.unk_index ), "cannot find language token {} in the dictionary".format(lang_tok) return idx def get_encoder_langtok(self, src_lang, tgt_lang, spec=None): if spec is None: return None if spec and spec.startswith("src"): if src_lang is None: return None langtok = get_lang_tok( lang=src_lang, lang_tok_style=self.args.lang_tok_style, spec=spec ) else: if tgt_lang is None: return None langtok = get_lang_tok( lang=tgt_lang, lang_tok_style=self.args.lang_tok_style, spec=spec ) return self.get_langtok_index( langtok, self.get_source_dictionary(src_lang) if src_lang else self.get_target_dictionary(tgt_lang) ) def get_decoder_langtok(self, tgt_lang, spec=None): if spec is None: return None langtok = get_lang_tok( lang=tgt_lang, lang_tok_style=self.args.lang_tok_style, spec=spec ) return self.get_langtok_index(langtok, self.get_target_dictionary(tgt_lang)) @classmethod def load_data(cls, path, vdict, impl): dataset = data_utils.load_indexed_dataset(path, vdict, impl) return dataset @classmethod def split_exists(cls, split, src, tgt, lang, data_path, dataset_impl): filename = os.path.join(data_path, "{}.{}-{}.{}".format(split, src, tgt, lang)) return indexed_dataset.dataset_exists(filename, impl=dataset_impl) def load_lang_dataset( self, data_path, split, src, src_dict, tgt, tgt_dict, combine, dataset_impl, upsample_primary, max_source_positions, prepend_bos=False, load_alignments=False, truncate_source=False, ): src_datasets = [] tgt_datasets = [] for k in itertools.count(): split_k = split + (str(k) if k > 0 else "") # infer langcode if self.split_exists(split_k, src, tgt, src, data_path, dataset_impl): prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, src, tgt)) elif self.split_exists(split_k, tgt, src, src, data_path, dataset_impl): prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, tgt, src)) else: if k > 0: break else: logger.error( f"Dataset not found: {data_path}, {split_k}, {src}, {tgt}" ) raise FileNotFoundError( "Dataset not found: {} ({})".format(split, data_path) ) src_dataset = self.load_data(prefix + src, src_dict, dataset_impl) if truncate_source: src_dataset = AppendTokenDataset( TruncateDataset( StripTokenDataset(src_dataset, src_dict.eos()), max_source_positions - 1, ), src_dict.eos(), ) src_datasets.append(src_dataset) tgt_datasets.append(self.load_data(prefix + tgt, tgt_dict, dataset_impl)) logger.info( "{} {} {}-{} {} examples".format( data_path, split_k, src, tgt, len(src_datasets[-1]) ) ) if not combine: break assert len(src_datasets) == len(tgt_datasets) if len(src_datasets) == 1: src_dataset, tgt_dataset = src_datasets[0], tgt_datasets[0] else: sample_ratios = [1] * len(src_datasets) sample_ratios[0] = upsample_primary src_dataset = ConcatDataset(src_datasets, sample_ratios) tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios) if prepend_bos: assert hasattr(src_dict, "bos_index") and hasattr(tgt_dict, "bos_index") src_dataset = PrependTokenDataset(src_dataset, src_dict.bos()) tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos()) align_dataset = None if load_alignments: align_path = os.path.join( data_path, "{}.align.{}-{}".format(split, src, tgt) ) if indexed_dataset.dataset_exists(align_path, impl=dataset_impl): align_dataset = data_utils.load_indexed_dataset( align_path, None, dataset_impl ) return src_dataset, tgt_dataset, align_dataset def load_langpair_dataset( self, data_path, split, src, src_dict, tgt, tgt_dict, combine, dataset_impl, upsample_primary, left_pad_source, left_pad_target, max_source_positions, max_target_positions, prepend_bos=False, load_alignments=False, truncate_source=False, src_dataset_transform_func=lambda dataset: dataset, tgt_dataset_transform_func=lambda dataset: dataset, src_lang_id=None, tgt_lang_id=None, langpairs_sharing_datasets=None, ): norm_direction = "-".join(sorted([src, tgt])) if langpairs_sharing_datasets is not None: src_dataset = langpairs_sharing_datasets.get( (data_path, split, norm_direction, src), "NotInCache" ) tgt_dataset = langpairs_sharing_datasets.get( (data_path, split, norm_direction, tgt), "NotInCache" ) align_dataset = langpairs_sharing_datasets.get( (data_path, split, norm_direction, src, tgt), "NotInCache" ) # a hack: any one is not in cache, we need to reload them if ( langpairs_sharing_datasets is None or src_dataset == "NotInCache" or tgt_dataset == "NotInCache" or align_dataset == "NotInCache" or split != getattr(self.args, "train_subset", None) ): # source and target datasets can be reused in reversed directions to save memory # reversed directions of valid and test data will not share source and target datasets src_dataset, tgt_dataset, align_dataset = self.load_lang_dataset( data_path, split, src, src_dict, tgt, tgt_dict, combine, dataset_impl, upsample_primary, max_source_positions=max_source_positions, prepend_bos=prepend_bos, load_alignments=load_alignments, truncate_source=truncate_source, ) src_dataset = src_dataset_transform_func(src_dataset) tgt_dataset = tgt_dataset_transform_func(tgt_dataset) if langpairs_sharing_datasets is not None: langpairs_sharing_datasets[ (data_path, split, norm_direction, src) ] = src_dataset langpairs_sharing_datasets[ (data_path, split, norm_direction, tgt) ] = tgt_dataset langpairs_sharing_datasets[ (data_path, split, norm_direction, src, tgt) ] = align_dataset if align_dataset is None: # no align data so flag the reverse direction as well in sharing langpairs_sharing_datasets[ (data_path, split, norm_direction, tgt, src) ] = align_dataset else: logger.info( f"Reusing source and target datasets of [{split}] {tgt}-{src} for reversed direction: " f"[{split}] {src}-{tgt}: src length={len(src_dataset)}; tgt length={len(tgt_dataset)}" ) return LanguagePairDataset( src_dataset, src_dataset.sizes, src_dict, tgt_dataset, tgt_dataset.sizes if tgt_dataset is not None else None, tgt_dict, left_pad_source=left_pad_source, left_pad_target=left_pad_target, align_dataset=align_dataset, src_lang_id=src_lang_id, tgt_lang_id=tgt_lang_id, ) def src_dataset_tranform_func(self, src_lang, tgt_lang, dataset, spec=None): if self.args.lang_tok_replacing_bos_eos: # it is handled by self.alter_dataset_langtok # TODO: Unifiy with alter_dataset_langtok return dataset if spec is None: return dataset tok = self.get_encoder_langtok(src_lang, tgt_lang, spec) if tok: return PrependTokenDataset(dataset, tok) return dataset def tgt_dataset_tranform_func(self, source_lang, target_lang, dataset, spec=None): if dataset is None: # note that target dataset can be None during inference time return None if self.args.lang_tok_replacing_bos_eos: # TODO: Unifiy with alter_dataset_langtok # It is handled by self.alter_dataset_langtok. # The complication in self.alter_dataset_langtok # makes a unified framework difficult. return dataset # if not self.args.decoder_langtok: if not spec: return dataset tok = self.get_decoder_langtok(target_lang, spec) if tok: return PrependTokenDataset(dataset, tok) return dataset def alter_dataset_langtok( self, lang_pair_dataset, src_eos=None, src_lang=None, tgt_eos=None, tgt_lang=None, src_langtok_spec=None, tgt_langtok_spec=None, ): if src_langtok_spec is None and tgt_langtok_spec is None: return lang_pair_dataset new_src_eos = None if ( src_langtok_spec is not None and src_eos is not None and (src_lang is not None or tgt_lang is not None) ): new_src_eos = self.get_encoder_langtok(src_lang, tgt_lang, src_langtok_spec) else: src_eos = None new_tgt_bos = None if tgt_langtok_spec and tgt_eos is not None and tgt_lang is not None: new_tgt_bos = self.get_decoder_langtok(tgt_lang, tgt_langtok_spec) else: tgt_eos = None return TransformEosLangPairDataset( lang_pair_dataset, src_eos=src_eos, new_src_eos=new_src_eos, tgt_bos=tgt_eos, new_tgt_bos=new_tgt_bos, ) def load_a_dataset( self, split, data_path, src, src_dict, tgt, tgt_dict, combine, prepend_bos=False, langpairs_sharing_datasets=None, data_category=None, **extra_kwargs, ): dataset_impl = self.args.dataset_impl upsample_primary = self.args.upsample_primary left_pad_source = self.args.left_pad_source left_pad_target = self.args.left_pad_target max_source_positions = self.args.max_source_positions max_target_positions = self.args.max_target_positions load_alignments = self.args.load_alignments truncate_source = self.args.truncate_source src_dataset_transform_func = self.src_dataset_tranform_func tgt_dataset_transform_func = self.tgt_dataset_tranform_func enable_lang_ids = self.args.enable_lang_ids lang_dictionary = self.lang_dict src_langtok_spec, tgt_langtok_spec = extra_kwargs["langtok_spec"] src_langtok = self.get_encoder_langtok(src, tgt, src_langtok_spec) tgt_langtok = self.get_decoder_langtok(tgt, tgt_langtok_spec) logger.info( f"{data_category}:{src}-{tgt} src_langtok: {src_langtok}; tgt_langtok: {tgt_langtok}" ) langpair_ds = self.load_langpair_dataset( data_path, split, src, src_dict, tgt, tgt_dict, combine, dataset_impl, upsample_primary, left_pad_source, left_pad_target, max_source_positions, max_target_positions, prepend_bos, load_alignments, truncate_source, src_dataset_transform_func=lambda dataset: src_dataset_transform_func( src, tgt, dataset, src_langtok_spec ), tgt_dataset_transform_func=lambda dataset: tgt_dataset_transform_func( src, tgt, dataset, tgt_langtok_spec ), src_lang_id=_lang_id(lang_dictionary, src) if enable_lang_ids and lang_dictionary is not None else None, tgt_lang_id=_lang_id(lang_dictionary, tgt) if enable_lang_ids and lang_dictionary is not None else None, langpairs_sharing_datasets=langpairs_sharing_datasets, ) # TODO: handle modified lang toks for mined data and dae data if self.args.lang_tok_replacing_bos_eos: ds = self.alter_dataset_langtok( langpair_ds, src_eos=self.get_source_dictionary(src).eos() if src else self.get_target_dictionary(tgt).eos(), src_lang=src, tgt_eos=self.get_target_dictionary(tgt).eos(), tgt_lang=tgt, src_langtok_spec=src_langtok_spec, tgt_langtok_spec=tgt_langtok_spec, ) else: ds = langpair_ds return ds def load_split_langpair_datasets(self, split, data_param_list): datasets = [] langpairs_sharing_datasets = ( {} if self.args.enable_reservsed_directions_shared_datasets else None ) for param in data_param_list: ds = self.load_a_dataset( split=split, langpairs_sharing_datasets=langpairs_sharing_datasets, **param, ) datasets.append(ds) return datasets def get_data_paths_and_lang_pairs(self, split): datapaths = {"main": self.args.data} lang_pairs = {"main": self.lang_pairs} if split == getattr(self.args, "train_subset", None): # only training data can have extra data and extra language pairs if self.args.extra_data: extra_datapaths = self.args.extra_data datapaths.update(extra_datapaths) if self.args.extra_lang_pairs: extra_lang_pairs = { k: v.split(",") for k, v in self.args.extra_lang_pairs.items() } lang_pairs.update(extra_lang_pairs) return datapaths, lang_pairs @classmethod def get_dataset_key(cls, data_category, src, tgt): return f"{data_category}:{src}-{tgt}" @classmethod def _get_shard_num_dict(cls, split, paths): shards = defaultdict(int) for path in paths: files = PathManager.ls(path) directions = set() for f in files: if f.startswith(split) and f.endswith(".idx"): # idx files of the form "{split}.{src}-{tgt}.{lang}.idx" direction = f.split(".")[-3] directions.add(direction) for direction in directions: shards[direction] += 1 return shards def get_split_num_data_shards(self, split): if split in self._num_shards_dict: return self._num_shards_dict[split] num_shards_dict = {} data_paths, lang_pairs = self.get_data_paths_and_lang_pairs(split) for data_category, paths in data_paths.items(): if data_category not in lang_pairs: continue paths = utils.split_paths(paths) shards_dict = self._get_shard_num_dict(split, paths) lang_dirs = [ lang_pair.split("-") for lang_pair in lang_pairs[data_category] ] lang_dirs = [x if len(x) > 1 else (x[0], x[0]) for x in lang_dirs] for src, tgt in lang_dirs: key = self.get_dataset_key(data_category, src, tgt) if "mono_" in data_category: # monolingual data requires tgt only assert src is None or src == tgt, ( f"error: src={src}, " "tgt={tgt} for data_category={data_category}" ) num_shards_dict[key] = shards_dict[tgt] else: if f"{src}-{tgt}" in shards_dict: num_shards_dict[key] = shards_dict[f"{src}-{tgt}"] elif f"{tgt}-{src}" in shards_dict: # follow the fairseq tradition to use reversed direction data if it is not available num_shards_dict[key] = shards_dict[f"{tgt}-{src}"] self._num_shards_dict[split] = num_shards_dict logger.info(f"[{split}] num of shards: {num_shards_dict}") return num_shards_dict @classmethod def get_shard_id(cls, num_shards, epoch, shard_epoch=None): shard = epoch if shard_epoch is None else shard_epoch shard = (shard - 1) % num_shards return shard def get_split_data_path(self, paths, epoch, shard_epoch, num_shards): path = paths[self.get_shard_id(num_shards, epoch, shard_epoch)] return path def get_split_data_param_list(self, split, epoch, shard_epoch=None): # TODO: to extend with extra datasets and keys and loop over different shard data paths param_list = [] data_paths, lang_pairs = self.get_data_paths_and_lang_pairs(split) logger.info(f"langtoks settings: {self.args.langtoks}") split_num_shards_dict = self.get_split_num_data_shards(split) for data_category, paths in data_paths.items(): if data_category not in lang_pairs: continue paths = utils.split_paths(paths) assert len(paths) > 0 if len(paths) > 1: self._has_sharded_data = True if split != getattr(self.args, "train_subset", None): # if not training data set, use the first shard for valid and test paths = paths[:1] if data_category in self.args.langtoks: lang_tok_spec = self.args.langtoks[data_category] else: # default to None lang_tok_spec = (None, None) # infer langcode lang_dirs = [ lang_pair.split("-") for lang_pair in lang_pairs[data_category] ] lang_dirs = [x if len(x) > 1 else (x[0], x[0]) for x in lang_dirs] for src, tgt in lang_dirs: assert src is not None or data_category == "mono_dae", ( f"error: src={src}, " "tgt={tgt} for data_category={data_category}" ) # logger.info(f"preparing param for {data_category}: {src} - {tgt}") key = self.get_dataset_key(data_category, src, tgt) data_path = self.get_split_data_path( paths, epoch, shard_epoch, split_num_shards_dict[key] ) param_list.append( { "key": key, "data_path": data_path, "split": split, "src": src, "src_dict": self.get_source_dictionary(src) if src and data_category != "mono_dae" else None, "tgt": tgt, "tgt_dict": self.get_target_dictionary(tgt), "data_category": data_category, "langtok_spec": lang_tok_spec, } ) return param_list def get_train_dataset_sizes( self, data_param_list, datasets, epoch, shard_epoch=None ): num_shards = [ self.get_split_num_data_shards(param["split"])[param["key"]] for param in data_param_list ] data_sizes = [] for (key, d), num_shard in zip(datasets, num_shards): my_data_sizes = self._training_data_sizes[key] shard_ind = self.get_shard_id(num_shard, epoch, shard_epoch) if shard_ind not in my_data_sizes: my_data_sizes[shard_ind] = len(d) known_size = max(my_data_sizes.values()) data_sizes.append( # If we don't know the data size of the shard yet, # use the the max known data size to approximate. # Note that we preprocess shards by a designated shard size # and put any remaining data at the end into the last shard so # the max shard size approximation is almost correct before loading # the last shard; after loading the last shard, it will have the # exact data sizes of the whole data size. (key, sum(my_data_sizes.get(i, known_size) for i in range(num_shard))) ) logger.info( f"estimated total data sizes of all shards used in sampling ratios: {data_sizes}. " "Note that if the data a shard has not been loaded yet, use the max known data size to approximate" ) return [s for _, s in data_sizes] def get_train_sampling_ratios( self, data_param_list, datasets, epoch=1, shard_epoch=None ): data_sizes = self.get_train_dataset_sizes( data_param_list, datasets, epoch, shard_epoch ) sampling_func = self.sampling_method.sampling_method_selector() sample_ratios = sampling_func(data_sizes) if sampling_func is not None else None return sample_ratios def get_sampling_ratios(self, data_param_list, datasets, epoch, shard_epoch=None): if self.args.sampling_weights_from_file: weights = load_sampling_weights(self.args.sampling_weights_from_file) sample_ratios = [weights[k] for k, _ in datasets] logger.info( "| ignoring --sampling-weights when loadding sampling weights " f"from file {self.args.sampling_weights_from_file}" ) elif self.args.sampling_weights: sample_ratios = [self.args.sampling_weights[k] for k, _ in datasets] else: sample_ratios = self.get_train_sampling_ratios( data_param_list, datasets, epoch, shard_epoch ) if sample_ratios is not None: logger.info( "| Upsample ratios: {}".format( list(zip(map(lambda x: x["key"], data_param_list), sample_ratios)) ) ) assert len(sample_ratios) == len(datasets) return sample_ratios def load_split_datasets( self, split, training, epoch=1, combine=False, shard_epoch=None, **kwargs ): data_param_list = self.get_split_data_param_list( split, epoch, shard_epoch=shard_epoch ) langpairs_sharing_datasets = ( {} if self.args.enable_reservsed_directions_shared_datasets else None ) datasets = [ ( param["key"], self.load_a_dataset( combine=combine, langpairs_sharing_datasets=langpairs_sharing_datasets, **param, ), ) for param in data_param_list ] return datasets, data_param_list def load_into_concat_dataset(self, split, datasets, data_param_list): if self.args.lang_tok_replacing_bos_eos: # TODO: to investigate why TransformEosLangPairDataset doesn't work with ConcatDataset return SampledMultiDataset( OrderedDict(datasets), sampling_ratios=None, eval_key=None, collate_format=CollateFormat.single, virtual_size=None, split=split, ) return ConcatDataset([d for _, d in datasets]) def load_sampled_multi_epoch_dataset( self, split, training, epoch=0, combine=False, shard_epoch=None, **kwargs ): datasets, data_param_list = self.load_split_datasets( split, training, epoch, combine, shard_epoch=shard_epoch, **kwargs ) if training and split == getattr(self.args, "train_subset", None): sample_ratios = self.get_sampling_ratios(data_param_list, datasets, epoch) return SampledMultiEpochDataset( OrderedDict(datasets), epoch=epoch, shard_epoch=shard_epoch, # valid and test datasets will be degenerate to concating datasets: sampling_ratios=sample_ratios, eval_key=None, collate_format=CollateFormat.single, virtual_size=self.args.virtual_data_size, split=split, virtual_epoch_size=self.args.virtual_epoch_size, # if not using lang_tok altering, simplified to use the same collater shared_collater=self._shared_collater(), ) else: return self.load_into_concat_dataset(split, datasets, data_param_list) def load_sampled_multi_dataset( self, split, training, epoch=0, combine=False, shard_epoch=None, **kwargs ): datasets, data_param_list = self.load_split_datasets( split, training, epoch, combine, shard_epoch=shard_epoch, **kwargs ) if training and split == getattr(self.args, "train_subset", None): sample_ratios = self.get_sampling_ratios(data_param_list, datasets, epoch) return SampledMultiDataset( OrderedDict(datasets), epoch=epoch, # valid and test datasets will be degerate to concating datasets: sampling_ratios=sample_ratios, eval_key=None, collate_format=CollateFormat.single, virtual_size=self.args.virtual_data_size, split=split, # if not using lang_tok altering, simplified to use the same collater shared_collater=self._shared_collater(), ) else: return self.load_into_concat_dataset(split, datasets, data_param_list) def load_dataset( self, split, training, epoch=0, combine=False, shard_epoch=None, **kwargs ): if self.args.virtual_epoch_size is None: return self.load_sampled_multi_dataset( split, training, epoch, combine, shard_epoch, **kwargs ) else: return self.load_sampled_multi_epoch_dataset( split, training, epoch, combine, shard_epoch, **kwargs )
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/multilingual/multilingual_data_manager.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .huffman_coder import HuffmanCodeBuilder, HuffmanCoder from .huffman_mmap_indexed_dataset import ( HuffmanMMapIndex, HuffmanMMapIndexedDataset, HuffmanMMapIndexedDatasetBuilder, vocab_file_path, ) __all__ = [ "HuffmanCoder", "HuffmanCodeBuilder", "HuffmanMMapIndexedDatasetBuilder", "HuffmanMMapIndexedDataset", "HuffmanMMapIndex", "vocab_file_path", ]
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/huffman/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import mmap import os import shutil import struct import typing as tp from functools import lru_cache import numpy as np import torch from fairseq.data import indexed_dataset from fairseq.data.huffman import HuffmanCoder from fairseq.file_io import PathManager class HuffmanMMapIndex: """ keep an index of the offsets in the huffman binary file. First a header, then the list of sizes (num tokens) for each instance and finally the addresses of each instance. """ _HDR_MAGIC = b"HUFFIDX\x00\x00" _VERSION = 1 @classmethod def writer(cls, path: str, data_len: int): class _Writer: def __enter__(self): self._file = open(path, "wb") # write header (magic + version) self._file.write(cls._HDR_MAGIC) self._file.write(struct.pack("<Q", cls._VERSION)) self._file.write(struct.pack("<Q", data_len)) return self def write(self, sizes, pointers): # add number of items in the index to the header self._file.write(struct.pack("<Q", len(sizes))) # write sizes sizes = np.array(sizes, dtype=np.int32) self._file.write(sizes.tobytes(order="C")) del sizes # write address pointers pointers = np.array(pointers, dtype=np.int64) self._file.write(pointers.tobytes(order="C")) del pointers def __exit__(self, exc_type, exc_val, exc_tb): self._file.close() return _Writer() def __init__(self, path): with open(path, "rb") as stream: # read headers magic_test = stream.read(9) assert self._HDR_MAGIC == magic_test, ( "Index file doesn't match expected format. " "Make sure that --dataset-impl is configured properly." ) (version,) = struct.unpack("<Q", stream.read(8)) assert ( self._VERSION == version ), "Unexpected file version f{version} != code version f{self._VERSION}" # read length of data file (self._data_len,) = struct.unpack("<Q", stream.read(8)) # read number of items in data file/index (self._len,) = struct.unpack("<Q", stream.read(8)) offset = stream.tell() indexed_dataset._warmup_mmap_file(path) self._bin_buffer_mmap = np.memmap(path, mode="r", order="C") self._bin_buffer = memoryview(self._bin_buffer_mmap) self._sizes = np.frombuffer( self._bin_buffer, dtype=np.int32, count=self._len, offset=offset ) self._pointers = np.frombuffer( self._bin_buffer, dtype=np.int64, count=self._len, offset=offset + self._sizes.nbytes, ) def __del__(self): self._bin_buffer_mmap._mmap.close() del self._bin_buffer_mmap def __iter__(self): for i in range(self._len): yield self[i] @property def data_len(self): return self._data_len @property def sizes(self): return self._sizes @lru_cache(maxsize=8) def __getitem__(self, i): return self._pointers[i], self._sizes[i] def __len__(self): return self._len def vocab_file_path(prefix_path): return prefix_path + ".vocab" class HuffmanMMapIndexedDataset(torch.utils.data.Dataset): """ an indexed dataset that use mmap and memoryview to access data from disk that was compressed with a HuffmanCoder. """ def __init__(self, prefix_path): super().__init__() self._prefix_path = None self._index = None self._bin_buffer = None self._coder = None self._file = None self._bin_buffer_mmap = None self._do_init(prefix_path) def __getstate__(self): return self._prefix_path def __setstate__(self, state): self._do_init(state) def _do_init(self, prefix_path): self._prefix_path = prefix_path self._index = HuffmanMMapIndex( indexed_dataset.index_file_path(self._prefix_path) ) self._coder = HuffmanCoder.from_file(vocab_file_path(self._prefix_path)) indexed_dataset._warmup_mmap_file( indexed_dataset.data_file_path(self._prefix_path) ) self._file = os.open( indexed_dataset.data_file_path(self._prefix_path), os.O_RDONLY ) self._bin_buffer_mmap = mmap.mmap( self._file, self._index.data_len, access=mmap.ACCESS_READ, ) self._bin_buffer = memoryview(self._bin_buffer_mmap) def __del__(self): del self._bin_buffer if self._file: os.close(self._file) del self._index def __len__(self): return len(self._index) def _decode(self, i): ptr, _ = self._index[i] if i == 0: raw_bytes = self._bin_buffer[:ptr] else: (prev_ptr, _) = self._index[i - 1] raw_bytes = self._bin_buffer[prev_ptr:ptr] return self._coder.decode(raw_bytes.tobytes()) @lru_cache(maxsize=8) def __getitem__(self, i): nodes = self._decode(i) return torch.tensor([n.id for n in nodes], dtype=torch.int64) def __iter__(self): for idx in range(len(self)): yield self[idx] def get_symbols(self, i): nodes = self._decode(i) for n in nodes: yield n.symbol @property def sizes(self): return self._index.sizes @property def supports_prefetch(self): return False @property def coder(self): return self._coder @staticmethod def exists(prefix_path): return ( PathManager.exists(indexed_dataset.index_file_path(prefix_path)) and PathManager.exists(indexed_dataset.data_file_path(prefix_path)) and PathManager.exists(vocab_file_path(prefix_path)) ) class HuffmanMMapIndexedDatasetBuilder: """ Helper to build a memory mapped datasets with a huffman encoder. You can either open/close this manually or use it as a ContextManager. Provide your own coder, it will then be stored alongside the dataset. The builder will first write the vocab file, then open the binary file so you can stream into it, finally the index will be written when the builder is closed (your index should fit in memory). """ def __init__(self, path_prefix: str, coder: HuffmanCoder) -> None: self._path_prefix = path_prefix self._coder = coder self._sizes = [] self._ptrs = [] self._data_len = 0 def open(self): self._coder.to_file(vocab_file_path(self._path_prefix)) self._data_file = open(indexed_dataset.data_file_path(self._path_prefix), "wb") def __enter__(self) -> "HuffmanMMapIndexedDatasetBuilder": self.open() return self def add_item(self, tokens: tp.List[str]) -> None: """ add a list of tokens to the dataset, they will compressed with the provided coder before being written to file. """ encoded = self._coder.encode(tokens) code_len = len(encoded) last_ptr = 0 if len(self._ptrs) > 0: last_ptr = self._ptrs[-1] self._sizes.append(len(tokens)) self._ptrs.append(last_ptr + code_len) self._data_len += code_len self._data_file.write(encoded) def append(self, other_dataset_path_prefix: str) -> None: """ append an existing dataset. Beware, if it wasn't built with the same coder, you are in trouble. """ other_index = HuffmanMMapIndex( indexed_dataset.index_file_path(other_dataset_path_prefix) ) for (ptr, size) in other_index: self._ptrs.append(ptr + self._data_len) self._sizes.append(size) # Concatenate data with open(indexed_dataset.data_file_path(other_dataset_path_prefix), "rb") as f: shutil.copyfileobj(f, self._data_file) self._data_len += other_index.data_len def close(self): self._data_file.close() with HuffmanMMapIndex.writer( indexed_dataset.index_file_path(self._path_prefix), self._data_len ) as index: index.write(self._sizes, self._ptrs) def __exit__(self, exc_type, exc_val, exc_tb) -> None: self.close()
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/huffman/huffman_mmap_indexed_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import re import typing as tp from collections import Counter, deque from dataclasses import dataclass from bitarray import bitarray, util from fairseq.data import Dictionary # basically we have to write to addressable bytes for the memory mapped # dataset loader. Sentences that get encoded to a length that is not a # multiple of BLOCKSIZE (a byte) will be padded to fit. (see _pad in the coder) BLOCKSIZE = 8 class HuffmanCoder: def __init__( self, root: "HuffmanNode", bos="<s>", pad="<pad>", eos="</s>", unk="<unk>" ): self.root = root self.table = root.code_table() self.bos_word, self.unk_word, self.pad_word, self.eos_word = bos, unk, pad, eos def _pad(self, a: bitarray) -> bitarray: """ bitpadding, 1 then 0. If the array is already a multiple of blocksize, we add a full block. """ pad_len = BLOCKSIZE - (len(a) % BLOCKSIZE) - 1 padding = bitarray("1" + "0" * pad_len) return a + padding def _unpad(self, a: bitarray) -> bitarray: """ remove the bitpadding. There will be a set of 0s preceded by a 1 at the end of the bitarray, we remove that """ # count the 0 padding at the end until we find the first 1 # we want to remove the one too remove_cnt = util.rindex(a, 1) return a[:remove_cnt] def encode(self, iter: tp.List[str]) -> bytes: """ encode a list of tokens a return bytes. We use bitpadding to make sure the encoded bits fit in bytes. """ a = bitarray() for token in iter: code = self.get_code(token) if code is None: if self.unk_word is None: raise Exception(f"unknown token {token} cannot be encoded.") else: token = self.unk_word a = a + self.get_code(token) return self._pad(a).tobytes() def decode(self, bits: bytes) -> tp.Iterator["HuffmanNode"]: """ take bitpadded bytes and decode it to a set of leaves. You can then use each node to find the symbol/id """ a = bitarray() a.frombytes(bits) return self.root.decode(self._unpad(a)) def get_code(self, symbol: str) -> tp.Optional[bitarray]: node = self.get_node(symbol) return None if node is None else node.code def get_node(self, symbol: str) -> "HuffmanNode": return self.table.get(symbol) @classmethod def from_file( cls, filename: str, bos="<s>", pad="<pad>", eos="</s>", unk="<unk>", ) -> "HuffmanCoder": builder = HuffmanCodeBuilder.from_file(filename) return builder.build_code(bos=bos, pad=pad, eos=eos, unk=unk) def to_file(self, filename, sep="\t"): nodes = list(self.table.values()) nodes.sort(key=lambda n: n.id) with open(filename, "w", encoding="utf-8") as output: for n in nodes: output.write(f"{n.symbol}{sep}{n.count}\n") def __iter__(self): for n in self.table.values(): yield n def merge(self, other_coder: "HuffmanCoder") -> "HuffmanCoder": builder = HuffmanCodeBuilder() for n in self: builder.increment(n.symbol, n.count) for n in other_coder: builder.increment(n.symbol, n.count) return builder.build_code() def __eq__(self, other: "HuffmanCoder") -> bool: return self.table == other.table def __len__(self) -> int: return len(self.table) def __contains__(self, sym: str) -> bool: return sym in self.table def to_dictionary(self) -> Dictionary: dictionary = Dictionary(bos=self.bos, unk=self.unk, pad=self.pad, eos=self.eos) for n in self: dictionary.add_symbol(n.symbol, n=n.count) dictionary.finalize() return dictionary @dataclass class HuffmanNode: """ a node in a Huffman tree """ id: int count: int symbol: tp.Optional[str] = None left: tp.Optional["HuffmanNode"] = None right: tp.Optional["HuffmanNode"] = None code: tp.Optional[bitarray] = None def is_leaf(self) -> bool: return self.left is None and self.right is None def code_table(self, prefix: tp.Optional[bitarray] = None) -> tp.Dict[str, "HuffmanNode"]: defaulted_prefix = prefix if prefix is not None else bitarray() if self.is_leaf(): self.code = ( defaulted_prefix if len(defaulted_prefix) > 0 else bitarray("0") ) # leaf could be the root if there is only one symbol return {self.symbol: self} codes_right = self.right.code_table(defaulted_prefix + bitarray([0])) codes_left = self.left.code_table(defaulted_prefix + bitarray([1])) return {**codes_left, **codes_right} def decode(self, bits: bitarray) -> tp.Iterator["HuffmanNode"]: current_node = self for bit in bits: if bit == 0: # go right current_node = current_node.right else: # go left current_node = current_node.left if current_node is None: # we shouldn't be on a leaf here raise Exception("fell off a leaf") if current_node.is_leaf(): yield current_node current_node = self if current_node != self: raise Exception("couldn't decode all the bits") class HuffmanCodeBuilder: """ build a dictionary with occurence count and then build the Huffman code for it. """ def __init__(self): self.symbols = Counter() def add_symbols(self, *syms) -> None: self.symbols.update(syms) def increment(self, symbol: str, cnt: int) -> None: self.symbols[symbol] += cnt @classmethod def from_file(cls, filename): c = cls() with open(filename, "r", encoding="utf-8") as input: for line in input: split = re.split(r"[\s]+", line) c.increment(split[0], int(split[1])) return c def to_file(self, filename, sep="\t"): with open(filename, "w", encoding="utf-8") as output: for (tok, cnt) in self.symbols.most_common(): output.write(f"{tok}{sep}{cnt}\n") def _smallest(self, q1: deque, q2: deque) -> HuffmanNode: if len(q1) == 0: return q2.pop() if len(q2) == 0: return q1.pop() if q1[-1].count < q2[-1].count: return q1.pop() return q2.pop() def __add__(self, c: "HuffmanCodeBuilder") -> "HuffmanCodeBuilder": new_c = self.symbols + c.symbols new_b = HuffmanCodeBuilder() new_b.symbols = new_c return new_b def build_code( self, bos="<s>", pad="<pad>", eos="</s>", unk="<unk>", ) -> HuffmanCoder: assert len(self.symbols) > 0, "cannot build code from empty list of symbols" if self.symbols[bos] == 0: self.add_symbols(bos) if self.symbols[pad] == 0: self.add_symbols(pad) if self.symbols[eos] == 0: self.add_symbols(eos) if self.symbols[unk] == 0: self.add_symbols(unk) node_id = 0 leaves_queue = deque( [ HuffmanNode(symbol=symbol, count=count, id=idx) for idx, (symbol, count) in enumerate(self.symbols.most_common()) ] ) # left are the most common, right are the least common if len(leaves_queue) == 1: root = leaves_queue.pop() root.id = 0 return HuffmanCoder(root) nodes_queue = deque() while len(leaves_queue) > 0 or len(nodes_queue) != 1: # get the lowest two nodes at the head of each queue node1 = self._smallest(leaves_queue, nodes_queue) node2 = self._smallest(leaves_queue, nodes_queue) # add new node nodes_queue.appendleft( HuffmanNode( count=node1.count + node2.count, left=node1, right=node2, id=node_id ) ) node_id += 1 # we are left with the root return HuffmanCoder(nodes_queue.pop(), bos=bos, pad=pad, eos=eos, unk=unk)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/huffman/huffman_coder.py
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory.abs import csv import logging import os.path as op from typing import List, Optional import numpy as np import torch from fairseq.data import Dictionary from fairseq.data.audio.speech_to_text_dataset import ( S2TDataConfig ) from fairseq.data.audio.text_to_speech_dataset import ( TextToSpeechDataset, TextToSpeechDatasetCreator ) logger = logging.getLogger(__name__) class FrmTextToSpeechDataset(TextToSpeechDataset): def __init__( self, split: str, is_train_split: bool, data_cfg: S2TDataConfig, audio_paths: List[str], n_frames: List[int], src_texts: Optional[List[str]] = None, tgt_texts: Optional[List[str]] = None, speakers: Optional[List[str]] = None, src_langs: Optional[List[str]] = None, tgt_langs: Optional[List[str]] = None, ids: Optional[List[str]] = None, tgt_dict: Optional[Dictionary] = None, pre_tokenizer=None, bpe_tokenizer=None, n_frames_per_step=1, speaker_to_id=None, do_chunk=False, chunk_bound=-1, chunk_init=50, chunk_incr=5, add_eos=True, dedup=True, ref_fpu=-1 ): # It assumes texts are encoded at a fixed frame-rate super().__init__( split=split, is_train_split=is_train_split, data_cfg=data_cfg, audio_paths=audio_paths, n_frames=n_frames, src_texts=src_texts, tgt_texts=tgt_texts, speakers=speakers, src_langs=src_langs, tgt_langs=tgt_langs, ids=ids, tgt_dict=tgt_dict, pre_tokenizer=pre_tokenizer, bpe_tokenizer=bpe_tokenizer, n_frames_per_step=n_frames_per_step, speaker_to_id=speaker_to_id ) self.do_chunk = do_chunk self.chunk_bound = chunk_bound self.chunk_init = chunk_init self.chunk_incr = chunk_incr self.add_eos = add_eos self.dedup = dedup self.ref_fpu = ref_fpu self.chunk_size = -1 if do_chunk: assert self.chunk_incr >= 0 assert self.pre_tokenizer is None def __getitem__(self, index): index, source, target, speaker_id, _, _, _ = super().__getitem__(index) if target[-1].item() == self.tgt_dict.eos_index: target = target[:-1] fpu = source.size(0) / target.size(0) # frame-per-unit fps = self.n_frames_per_step assert ( self.ref_fpu == -1 or abs((fpu * fps - self.ref_fpu) / self.ref_fpu) < 0.1 ), f"{fpu*fps} != {self.ref_fpu}" # only chunk training split if self.is_train_split and self.do_chunk and self.chunk_size > 0: lang = target[:int(self.data_cfg.prepend_tgt_lang_tag)] text = target[int(self.data_cfg.prepend_tgt_lang_tag):] size = len(text) chunk_size = min(self.chunk_size, size) chunk_start = np.random.randint(size - chunk_size + 1) text = text[chunk_start:chunk_start+chunk_size] target = torch.cat((lang, text), 0) f_size = int(np.floor(chunk_size * fpu)) f_start = int(np.floor(chunk_start * fpu)) assert(f_size > 0) source = source[f_start:f_start+f_size, :] if self.dedup: target = torch.unique_consecutive(target) if self.add_eos: eos_idx = self.tgt_dict.eos_index target = torch.cat((target, torch.LongTensor([eos_idx])), 0) return index, source, target, speaker_id def set_epoch(self, epoch): if self.is_train_split and self.do_chunk: old = self.chunk_size self.chunk_size = self.chunk_init + epoch * self.chunk_incr if self.chunk_bound > 0: self.chunk_size = min(self.chunk_size, self.chunk_bound) logger.info(( f"{self.split}: setting chunk size " f"from {old} to {self.chunk_size}" )) class FrmTextToSpeechDatasetCreator(TextToSpeechDatasetCreator): # inherit for key names @classmethod def from_tsv( cls, root: str, data_cfg: S2TDataConfig, split: str, tgt_dict, pre_tokenizer, bpe_tokenizer, is_train_split: bool, n_frames_per_step: int, speaker_to_id, do_chunk: bool = False, chunk_bound: int = -1, chunk_init: int = 50, chunk_incr: int = 5, add_eos: bool = True, dedup: bool = True, ref_fpu: float = -1 ) -> FrmTextToSpeechDataset: tsv_path = op.join(root, f"{split}.tsv") if not op.isfile(tsv_path): raise FileNotFoundError(f"Dataset not found: {tsv_path}") with open(tsv_path) as f: reader = csv.DictReader( f, delimiter="\t", quotechar=None, doublequote=False, lineterminator="\n", quoting=csv.QUOTE_NONE, ) s = [dict(e) for e in reader] assert len(s) > 0 ids = [ss[cls.KEY_ID] for ss in s] audio_paths = [ op.join(data_cfg.audio_root, ss[cls.KEY_AUDIO]) for ss in s ] n_frames = [int(ss[cls.KEY_N_FRAMES]) for ss in s] tgt_texts = [ss[cls.KEY_TGT_TEXT] for ss in s] src_texts = [ss.get(cls.KEY_SRC_TEXT, cls.DEFAULT_SRC_TEXT) for ss in s] speakers = [ss.get(cls.KEY_SPEAKER, cls.DEFAULT_SPEAKER) for ss in s] src_langs = [ss.get(cls.KEY_SRC_LANG, cls.DEFAULT_LANG) for ss in s] tgt_langs = [ss.get(cls.KEY_TGT_LANG, cls.DEFAULT_LANG) for ss in s] return FrmTextToSpeechDataset( split=split, is_train_split=is_train_split, data_cfg=data_cfg, audio_paths=audio_paths, n_frames=n_frames, src_texts=src_texts, tgt_texts=tgt_texts, speakers=speakers, src_langs=src_langs, tgt_langs=tgt_langs, ids=ids, tgt_dict=tgt_dict, pre_tokenizer=pre_tokenizer, bpe_tokenizer=bpe_tokenizer, n_frames_per_step=n_frames_per_step, speaker_to_id=speaker_to_id, do_chunk=do_chunk, chunk_bound=chunk_bound, chunk_init=chunk_init, chunk_incr=chunk_incr, add_eos=add_eos, dedup=dedup, ref_fpu=ref_fpu )
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/audio/frm_text_to_speech_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from pathlib import Path from typing import Dict, List, Optional, NamedTuple import torch from fairseq.data import ( ConcatDataset, Dictionary, ResamplingDataset, data_utils as fairseq_data_utils, ) from fairseq.data.audio.speech_to_text_dataset import ( SpeechToTextDataset, S2TDataConfig, SpeechToTextDatasetCreator, ) logger = logging.getLogger(__name__) class S2TJointDataConfig(S2TDataConfig): """Wrapper class for data config YAML""" @property def src_vocab_filename(self): """fairseq vocabulary file under data root""" return self.config.get("src_vocab_filename", "src_dict.txt") @property def src_pre_tokenizer(self) -> Dict: """Pre-tokenizer to apply before subword tokenization. Returning a dictionary with `tokenizer` providing the tokenizer name and the other items providing the tokenizer-specific arguments. Tokenizers are defined in `fairseq.data.encoders.*`""" return self.config.get("src_pre_tokenizer", {"tokenizer": None}) @property def src_bpe_tokenizer(self) -> Dict: """Subword tokenizer to apply on source text after pre-tokenization. Returning a dictionary with `bpe` providing the tokenizer name and the other items providing the tokenizer-specific arguments. Tokenizers are defined in `fairseq.data.encoders.*`""" return self.config.get("src_bpe_tokenizer", {"bpe": None}) @property def prepend_tgt_lang_tag_no_change(self) -> bool: """Prepend target lang ID token as the prev_output_tokens BOS (e.g. for to-many multilingual setting). No change needed during inference. """ return self.config.get("prepend_tgt_lang_tag_no_change", False) class SpeechToTextJointDatasetItem(NamedTuple): index: int source: torch.Tensor target: Optional[torch.Tensor] = None src_txt_tokens: Optional[torch.Tensor] = None tgt_lang_tag: Optional[int] = None class SpeechToTextJointDataset(SpeechToTextDataset): def __init__( self, split: str, is_train_split: bool, cfg: S2TJointDataConfig, audio_paths: List[str], n_frames: List[int], src_texts: Optional[List[str]] = None, tgt_texts: Optional[List[str]] = None, speakers: Optional[List[str]] = None, src_langs: Optional[List[str]] = None, tgt_langs: Optional[List[str]] = None, ids: Optional[List[str]] = None, tgt_dict: Optional[Dictionary] = None, src_dict: Optional[Dictionary] = None, pre_tokenizer=None, bpe_tokenizer=None, src_pre_tokenizer=None, src_bpe_tokenizer=None, ): super().__init__( split, is_train_split, cfg, audio_paths, n_frames, src_texts=src_texts, tgt_texts=tgt_texts, speakers=speakers, src_langs=src_langs, tgt_langs=tgt_langs, ids=ids, tgt_dict=tgt_dict, pre_tokenizer=pre_tokenizer, bpe_tokenizer=bpe_tokenizer, ) self.src_dict = src_dict self.src_pre_tokenizer = src_pre_tokenizer self.src_bpe_tokenizer = src_bpe_tokenizer def get_tokenized_src_text(self, index: int): text = self.tokenize(self.src_pre_tokenizer, self.src_texts[index]) text = self.tokenize(self.src_bpe_tokenizer, text) return text def __getitem__(self, index: int) -> SpeechToTextJointDatasetItem: s2t_dataset_item = super().__getitem__(index) src_tokens = None if self.src_texts is not None and self.src_dict is not None: src_tokens = self.get_tokenized_src_text(index) src_tokens = self.src_dict.encode_line( src_tokens, add_if_not_exist=False, append_eos=True ).long() tgt_lang_tag = None if self.cfg.prepend_tgt_lang_tag_no_change: # prepend_tgt_lang_tag_no_change: modify prev_output_tokens instead tgt_lang_tag = self.get_lang_tag_idx(self.tgt_langs[index], self.tgt_dict) return SpeechToTextJointDatasetItem( index=index, source=s2t_dataset_item.source, target=s2t_dataset_item.target, src_txt_tokens=src_tokens, tgt_lang_tag=tgt_lang_tag, ) def __len__(self): return self.n_samples def collater(self, samples: List[SpeechToTextJointDatasetItem]) -> Dict: s2t_out = super().collater(samples, return_order=True) if s2t_out == {}: return s2t_out net_input, order = s2t_out["net_input"], s2t_out["order"] if self.src_texts is not None and self.src_dict is not None: src_txt_tokens = fairseq_data_utils.collate_tokens( [x.src_txt_tokens for x in samples], self.src_dict.pad(), self.src_dict.eos(), left_pad=False, move_eos_to_beginning=False, ) src_txt_tokens = src_txt_tokens.index_select(0, order) src_txt_lengths = torch.tensor( [x.src_txt_tokens.size()[0] for x in samples], dtype=torch.long ).index_select(0, order) net_input["src_txt_tokens"] = src_txt_tokens net_input["src_txt_lengths"] = src_txt_lengths if self.tgt_texts is not None and samples[0].tgt_lang_tag is not None: for i in range(len(samples)): net_input["prev_output_tokens"][i][0] = samples[order[i]].tgt_lang_tag out = { "id": s2t_out["id"], "net_input": net_input, "target": s2t_out["target"], "target_lengths": s2t_out["target_lengths"], "ntokens": s2t_out["ntokens"], "nsentences": len(samples), } return out class SpeechToTextJointDatasetCreator(SpeechToTextDatasetCreator): @classmethod def _from_list( cls, split_name: str, is_train_split, samples: List[Dict], cfg: S2TJointDataConfig, tgt_dict, src_dict, pre_tokenizer, bpe_tokenizer, src_pre_tokenizer, src_bpe_tokenizer, ) -> SpeechToTextJointDataset: audio_root = Path(cfg.audio_root) ids = [s[cls.KEY_ID] for s in samples] audio_paths = [(audio_root / s[cls.KEY_AUDIO]).as_posix() for s in samples] n_frames = [int(s[cls.KEY_N_FRAMES]) for s in samples] tgt_texts = [s[cls.KEY_TGT_TEXT] for s in samples] src_texts = [s.get(cls.KEY_SRC_TEXT, cls.DEFAULT_SRC_TEXT) for s in samples] speakers = [s.get(cls.KEY_SPEAKER, cls.DEFAULT_SPEAKER) for s in samples] src_langs = [s.get(cls.KEY_SRC_LANG, cls.DEFAULT_LANG) for s in samples] tgt_langs = [s.get(cls.KEY_TGT_LANG, cls.DEFAULT_LANG) for s in samples] return SpeechToTextJointDataset( split_name, is_train_split, cfg, audio_paths, n_frames, src_texts=src_texts, tgt_texts=tgt_texts, speakers=speakers, src_langs=src_langs, tgt_langs=tgt_langs, ids=ids, tgt_dict=tgt_dict, src_dict=src_dict, pre_tokenizer=pre_tokenizer, bpe_tokenizer=bpe_tokenizer, src_pre_tokenizer=src_pre_tokenizer, src_bpe_tokenizer=src_bpe_tokenizer, ) @classmethod def _from_tsv( cls, root: str, cfg: S2TJointDataConfig, split: str, tgt_dict, src_dict, is_train_split: bool, pre_tokenizer, bpe_tokenizer, src_pre_tokenizer, src_bpe_tokenizer, ) -> SpeechToTextJointDataset: samples = cls._load_samples_from_tsv(root, split) return cls._from_list( split, is_train_split, samples, cfg, tgt_dict, src_dict, pre_tokenizer, bpe_tokenizer, src_pre_tokenizer, src_bpe_tokenizer, ) @classmethod def from_tsv( cls, root: str, cfg: S2TJointDataConfig, splits: str, tgt_dict, src_dict, pre_tokenizer, bpe_tokenizer, src_pre_tokenizer, src_bpe_tokenizer, is_train_split: bool, epoch: int, seed: int, ) -> SpeechToTextJointDataset: datasets = [ cls._from_tsv( root, cfg, split, tgt_dict, src_dict, is_train_split, pre_tokenizer, bpe_tokenizer, src_pre_tokenizer, src_bpe_tokenizer, ) for split in splits.split(",") ] if is_train_split and len(datasets) > 1 and cfg.sampling_alpha != 1.0: # temperature-based sampling size_ratios = cls.get_size_ratios(datasets, alpha=cfg.sampling_alpha) datasets = [ ResamplingDataset( d, size_ratio=r, seed=seed, epoch=epoch, replace=(r >= 1.0) ) for r, d in zip(size_ratios, datasets) ] return ConcatDataset(datasets) if len(datasets) > 1 else datasets[0]
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/audio/speech_to_text_joint_dataset.py
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/audio/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from pathlib import Path from typing import Dict, Optional class S2TDataConfig(object): """Wrapper class for data config YAML""" def __init__(self, yaml_path: Path): try: import yaml except ImportError: print("Please install PyYAML: pip install PyYAML") self.config = {} if yaml_path.is_file(): try: with open(yaml_path) as f: self.config = yaml.load(f, Loader=yaml.FullLoader) except Exception as e: raise Exception( f"Failed to load config from {yaml_path.as_posix()}: {e}" ) else: raise FileNotFoundError(f"{yaml_path.as_posix()} not found") self.root = yaml_path.parent def _auto_convert_to_abs_path(self, x): if isinstance(x, str): if not Path(x).exists() and (self.root / x).exists(): return (self.root / x).as_posix() elif isinstance(x, dict): return {k: self._auto_convert_to_abs_path(v) for k, v in x.items()} return x @property def vocab_filename(self): """fairseq vocabulary file under data root""" return self.config.get("vocab_filename", "dict.txt") @property def speaker_set_filename(self): """fairseq vocabulary file under data root""" return self.config.get("speaker_set_filename", None) @property def shuffle(self) -> bool: """Shuffle dataset samples before batching""" return self.config.get("shuffle", False) @property def pre_tokenizer(self) -> Dict: """Pre-tokenizer to apply before subword tokenization. Returning a dictionary with `tokenizer` providing the tokenizer name and the other items providing the tokenizer-specific arguments. Tokenizers are defined in `fairseq.data.encoders.*`""" tokenizer = self.config.get("pre_tokenizer", {"tokenizer": None}) return self._auto_convert_to_abs_path(tokenizer) @property def bpe_tokenizer(self) -> Dict: """Subword tokenizer to apply after pre-tokenization. Returning a dictionary with `bpe` providing the tokenizer name and the other items providing the tokenizer-specific arguments. Tokenizers are defined in `fairseq.data.encoders.*`""" tokenizer = self.config.get("bpe_tokenizer", {"bpe": None}) return self._auto_convert_to_abs_path(tokenizer) @property def prepend_tgt_lang_tag(self) -> bool: """Prepend target lang ID token as the target BOS (e.g. for to-many multilingual setting). During inference, this requires `--prefix-size 1` to force BOS to be lang ID token.""" return self.config.get("prepend_tgt_lang_tag", False) @property def input_feat_per_channel(self): """The dimension of input features (per audio channel)""" return self.config.get("input_feat_per_channel", 80) @property def input_channels(self): """The number of channels in the input audio""" return self.config.get("input_channels", 1) @property def sample_rate(self): return self.config.get("sample_rate", 16_000) @property def sampling_alpha(self): """Hyper-parameter alpha = 1/T for temperature-based resampling. (alpha = 1 for no resampling)""" return self.config.get("sampling_alpha", 1.0) @property def use_audio_input(self): """Needed by the dataset loader to see if the model requires raw audio as inputs.""" return self.config.get("use_audio_input", False) @property def use_sample_rate(self): """Needed by the dataset loader to see if the model requires raw audio with specific sample rate as inputs.""" return self.config.get("use_sample_rate", 16000) @property def audio_root(self): """Audio paths in the manifest TSV can be relative and this provides the root path. Set this to empty string when using absolute paths.""" return self.config.get("audio_root", "") def get_feature_transforms(self, split, is_train): """Split-specific feature transforms. Allowing train set wildcard `_train`, evaluation set wildcard `_eval` and general wildcard `*` for matching.""" from copy import deepcopy cfg = deepcopy(self.config) _cur = cfg.get("transforms", {}) cur = _cur.get(split) cur = _cur.get("_train") if cur is None and is_train else cur cur = _cur.get("_eval") if cur is None and not is_train else cur cur = _cur.get("*") if cur is None else cur cfg["transforms"] = cur return cfg @property def global_cmvn_stats_npz(self) -> Optional[str]: path = self.config.get("global_cmvn", {}).get("stats_npz_path", None) return self._auto_convert_to_abs_path(path) @property def vocoder(self) -> Optional[Dict[str, str]]: return self.config.get("vocoder", None)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/audio/data_cfg.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import csv import io import logging import re from collections import defaultdict from pathlib import Path from typing import Dict, List, Optional from dataclasses import dataclass import numpy as np import torch from fairseq.data import ( ConcatDataset, Dictionary, FairseqDataset, ResamplingDataset, data_utils as fairseq_data_utils, ) from fairseq.data.audio.audio_utils import ( get_fbank, get_waveform, read_from_stored_zip, is_npy_data, is_sf_audio_data, parse_path, FEATURE_OR_SF_AUDIO_FILE_EXTENSIONS, ) from fairseq.data.audio.feature_transforms import CompositeAudioFeatureTransform from fairseq.data.audio.data_cfg import S2TDataConfig logger = logging.getLogger(__name__) def get_features_from_npy_or_audio(path): ext = Path(path).suffix if ext not in FEATURE_OR_SF_AUDIO_FILE_EXTENSIONS: raise ValueError(f'Unsupported file format for "{path}"') return np.load(path) if ext == ".npy" else get_fbank(path) def get_features_or_waveform_from_stored_zip( path, byte_offset, byte_size, need_waveform=False, use_sample_rate=None, ): assert path.endswith(".zip") data = read_from_stored_zip(path, byte_offset, byte_size) f = io.BytesIO(data) if is_npy_data(data): features_or_waveform = np.load(f) elif is_sf_audio_data(data): features_or_waveform = \ get_waveform( f, always_2d=False, output_sample_rate=use_sample_rate )[0] if need_waveform else get_fbank(f) else: raise ValueError(f'Unknown file format for "{path}"') return features_or_waveform def get_features_or_waveform( path: str, need_waveform=False, use_sample_rate=None ): """Get speech features from .npy file or waveform from .wav/.flac file. The file may be inside an uncompressed ZIP file and is accessed via byte offset and length. Args: path (str): File path in the format of "<.npy/.wav/.flac path>" or "<zip path>:<byte offset>:<byte length>". need_waveform (bool): return waveform instead of features. use_sample_rate (int): change sample rate for the input wave file Returns: features_or_waveform (numpy.ndarray): speech features or waveform. """ _path, slice_ptr = parse_path(path) if len(slice_ptr) == 0: if need_waveform: return get_waveform( _path, always_2d=False, output_sample_rate=use_sample_rate )[0] return get_features_from_npy_or_audio(_path) elif len(slice_ptr) == 2: features_or_waveform = get_features_or_waveform_from_stored_zip( _path, slice_ptr[0], slice_ptr[1], need_waveform=need_waveform, use_sample_rate=use_sample_rate ) else: raise ValueError(f"Invalid path: {path}") return features_or_waveform def _collate_frames( frames: List[torch.Tensor], is_audio_input: bool = False ) -> torch.Tensor: """ Convert a list of 2D frames into a padded 3D tensor Args: frames (list): list of 2D frames of size L[i]*f_dim. Where L[i] is length of i-th frame and f_dim is static dimension of features Returns: 3D tensor of size len(frames)*len_max*f_dim where len_max is max of L[i] """ max_len = max(frame.size(0) for frame in frames) if is_audio_input: out = frames[0].new_zeros((len(frames), max_len)) else: out = frames[0].new_zeros((len(frames), max_len, frames[0].size(1))) for i, v in enumerate(frames): out[i, : v.size(0)] = v return out @dataclass class SpeechToTextDatasetItem(object): index: int source: torch.Tensor target: Optional[torch.Tensor] = None speaker_id: Optional[int] = None class SpeechToTextDataset(FairseqDataset): LANG_TAG_TEMPLATE = "<lang:{}>" def __init__( self, split: str, is_train_split: bool, cfg: S2TDataConfig, audio_paths: List[str], n_frames: List[int], src_texts: Optional[List[str]] = None, tgt_texts: Optional[List[str]] = None, speakers: Optional[List[str]] = None, src_langs: Optional[List[str]] = None, tgt_langs: Optional[List[str]] = None, ids: Optional[List[str]] = None, tgt_dict: Optional[Dictionary] = None, pre_tokenizer=None, bpe_tokenizer=None, n_frames_per_step=1, speaker_to_id=None ): self.split, self.is_train_split = split, is_train_split self.cfg = cfg self.audio_paths, self.n_frames = audio_paths, n_frames self.n_samples = len(audio_paths) assert len(n_frames) == self.n_samples > 0 assert src_texts is None or len(src_texts) == self.n_samples assert tgt_texts is None or len(tgt_texts) == self.n_samples assert speakers is None or len(speakers) == self.n_samples assert src_langs is None or len(src_langs) == self.n_samples assert tgt_langs is None or len(tgt_langs) == self.n_samples assert ids is None or len(ids) == self.n_samples assert (tgt_dict is None and tgt_texts is None) or ( tgt_dict is not None and tgt_texts is not None ) self.src_texts, self.tgt_texts = src_texts, tgt_texts self.src_langs, self.tgt_langs = src_langs, tgt_langs self.speakers = speakers self.tgt_dict = tgt_dict self.check_tgt_lang_tag() self.ids = ids self.shuffle = cfg.shuffle if is_train_split else False self.feature_transforms = CompositeAudioFeatureTransform.from_config_dict( self.cfg.get_feature_transforms(split, is_train_split) ) self.pre_tokenizer = pre_tokenizer self.bpe_tokenizer = bpe_tokenizer self.n_frames_per_step = n_frames_per_step self.speaker_to_id = speaker_to_id self.tgt_lens = self.get_tgt_lens_and_check_oov() logger.info(self.__repr__()) def get_tgt_lens_and_check_oov(self): if self.tgt_texts is None: return [0 for _ in range(self.n_samples)] tgt_lens = [] n_tokens, n_oov_tokens = 0, 0 for i in range(self.n_samples): tokenized = self.get_tokenized_tgt_text(i).split(" ") oov_tokens = [ t for t in tokenized if self.tgt_dict.index(t) == self.tgt_dict.unk_index ] n_tokens += len(tokenized) n_oov_tokens += len(oov_tokens) tgt_lens.append(len(tokenized)) logger.info(f"'{self.split}' has {n_oov_tokens / n_tokens * 100:.2f}% OOV") return tgt_lens def __repr__(self): return ( self.__class__.__name__ + f'(split="{self.split}", n_samples={self.n_samples:_}, ' f"prepend_tgt_lang_tag={self.cfg.prepend_tgt_lang_tag}, " f"shuffle={self.shuffle}, transforms={self.feature_transforms}, " f"n_frames_per_step={self.n_frames_per_step}" ) @classmethod def is_lang_tag(cls, token): pattern = cls.LANG_TAG_TEMPLATE.replace("{}", "(.*)") return re.match(pattern, token) def check_tgt_lang_tag(self): if self.cfg.prepend_tgt_lang_tag: assert self.tgt_langs is not None and self.tgt_dict is not None tgt_lang_tags = [ self.LANG_TAG_TEMPLATE.format(t) for t in set(self.tgt_langs) ] assert all(t in self.tgt_dict for t in tgt_lang_tags) @classmethod def tokenize(cls, tokenizer, text: str): return text if tokenizer is None else tokenizer.encode(text) def get_tokenized_tgt_text(self, index: int): text = self.tokenize(self.pre_tokenizer, self.tgt_texts[index]) text = self.tokenize(self.bpe_tokenizer, text) return text def pack_frames(self, feature: torch.Tensor): if self.n_frames_per_step == 1: return feature n_packed_frames = feature.shape[0] // self.n_frames_per_step feature = feature[:self.n_frames_per_step * n_packed_frames] return feature.reshape(n_packed_frames, -1) @classmethod def get_lang_tag_idx(cls, lang: str, dictionary: Dictionary): lang_tag_idx = dictionary.index(cls.LANG_TAG_TEMPLATE.format(lang)) assert lang_tag_idx != dictionary.unk() return lang_tag_idx def __getitem__(self, index: int) -> SpeechToTextDatasetItem: source = get_features_or_waveform( self.audio_paths[index], need_waveform=self.cfg.use_audio_input, use_sample_rate=self.cfg.use_sample_rate, ) if self.feature_transforms is not None: assert not self.cfg.use_audio_input source = self.feature_transforms(source) source = torch.from_numpy(source).float() source = self.pack_frames(source) target = None if self.tgt_texts is not None: tokenized = self.get_tokenized_tgt_text(index) target = self.tgt_dict.encode_line( tokenized, add_if_not_exist=False, append_eos=True ).long() if self.cfg.prepend_tgt_lang_tag: lang_tag_idx = self.get_lang_tag_idx( self.tgt_langs[index], self.tgt_dict ) target = torch.cat((torch.LongTensor([lang_tag_idx]), target), 0) speaker_id = None if self.speaker_to_id is not None: speaker_id = self.speaker_to_id[self.speakers[index]] return SpeechToTextDatasetItem( index=index, source=source, target=target, speaker_id=speaker_id ) def __len__(self): return self.n_samples def collater( self, samples: List[SpeechToTextDatasetItem], return_order: bool = False ) -> Dict: if len(samples) == 0: return {} indices = torch.tensor([x.index for x in samples], dtype=torch.long) frames = _collate_frames([x.source for x in samples], self.cfg.use_audio_input) # sort samples by descending number of frames n_frames = torch.tensor([x.source.size(0) for x in samples], dtype=torch.long) n_frames, order = n_frames.sort(descending=True) indices = indices.index_select(0, order) frames = frames.index_select(0, order) target, target_lengths = None, None prev_output_tokens = None ntokens = None if self.tgt_texts is not None: target = fairseq_data_utils.collate_tokens( [x.target for x in samples], self.tgt_dict.pad(), self.tgt_dict.eos(), left_pad=False, move_eos_to_beginning=False, ) target = target.index_select(0, order) target_lengths = torch.tensor( [x.target.size(0) for x in samples], dtype=torch.long ).index_select(0, order) prev_output_tokens = fairseq_data_utils.collate_tokens( [x.target for x in samples], self.tgt_dict.pad(), self.tgt_dict.eos(), left_pad=False, move_eos_to_beginning=True, ) prev_output_tokens = prev_output_tokens.index_select(0, order) ntokens = sum(x.target.size(0) for x in samples) speaker = None if self.speaker_to_id is not None: speaker = torch.tensor( [s.speaker_id for s in samples], dtype=torch.long ).index_select(0, order).view(-1, 1) net_input = { "src_tokens": frames, "src_lengths": n_frames, "prev_output_tokens": prev_output_tokens, } out = { "id": indices, "net_input": net_input, "speaker": speaker, "target": target, "target_lengths": target_lengths, "ntokens": ntokens, "nsentences": len(samples), } if return_order: out["order"] = order return out def num_tokens(self, index): return self.n_frames[index] def size(self, index): return self.n_frames[index], self.tgt_lens[index] @property def sizes(self): return np.array(self.n_frames) @property def can_reuse_epoch_itr_across_epochs(self): return True def ordered_indices(self): if self.shuffle: order = [np.random.permutation(len(self))] else: order = [np.arange(len(self))] # first by descending order of # of frames then by original/random order order.append([-n for n in self.n_frames]) return np.lexsort(order) def prefetch(self, indices): raise False class SpeechToTextDatasetCreator(object): # mandatory columns KEY_ID, KEY_AUDIO, KEY_N_FRAMES = "id", "audio", "n_frames" KEY_TGT_TEXT = "tgt_text" # optional columns KEY_SPEAKER, KEY_SRC_TEXT = "speaker", "src_text" KEY_SRC_LANG, KEY_TGT_LANG = "src_lang", "tgt_lang" # default values DEFAULT_SPEAKER = DEFAULT_SRC_TEXT = DEFAULT_LANG = "" @classmethod def _from_list( cls, split_name: str, is_train_split, samples: List[Dict], cfg: S2TDataConfig, tgt_dict, pre_tokenizer, bpe_tokenizer, n_frames_per_step, speaker_to_id ) -> SpeechToTextDataset: audio_root = Path(cfg.audio_root) ids = [s[cls.KEY_ID] for s in samples] audio_paths = [(audio_root / s[cls.KEY_AUDIO]).as_posix() for s in samples] n_frames = [int(s[cls.KEY_N_FRAMES]) for s in samples] tgt_texts = [s[cls.KEY_TGT_TEXT] for s in samples] src_texts = [s.get(cls.KEY_SRC_TEXT, cls.DEFAULT_SRC_TEXT) for s in samples] speakers = [s.get(cls.KEY_SPEAKER, cls.DEFAULT_SPEAKER) for s in samples] src_langs = [s.get(cls.KEY_SRC_LANG, cls.DEFAULT_LANG) for s in samples] tgt_langs = [s.get(cls.KEY_TGT_LANG, cls.DEFAULT_LANG) for s in samples] return SpeechToTextDataset( split_name, is_train_split, cfg, audio_paths, n_frames, src_texts=src_texts, tgt_texts=tgt_texts, speakers=speakers, src_langs=src_langs, tgt_langs=tgt_langs, ids=ids, tgt_dict=tgt_dict, pre_tokenizer=pre_tokenizer, bpe_tokenizer=bpe_tokenizer, n_frames_per_step=n_frames_per_step, speaker_to_id=speaker_to_id ) @classmethod def get_size_ratios( cls, datasets: List[SpeechToTextDataset], alpha: float = 1.0 ) -> List[float]: """Size ratios for temperature-based sampling (https://arxiv.org/abs/1907.05019)""" id_to_lp, lp_to_sz = {}, defaultdict(int) for ds in datasets: lang_pairs = {f"{s}->{t}" for s, t in zip(ds.src_langs, ds.tgt_langs)} assert len(lang_pairs) == 1 lang_pair = list(lang_pairs)[0] id_to_lp[ds.split] = lang_pair lp_to_sz[lang_pair] += sum(ds.n_frames) sz_sum = sum(v for v in lp_to_sz.values()) lp_to_prob = {k: v / sz_sum for k, v in lp_to_sz.items()} lp_to_tgt_prob = {k: v ** alpha for k, v in lp_to_prob.items()} prob_sum = sum(v for v in lp_to_tgt_prob.values()) lp_to_tgt_prob = {k: v / prob_sum for k, v in lp_to_tgt_prob.items()} lp_to_sz_ratio = { k: (lp_to_tgt_prob[k] * sz_sum) / v for k, v in lp_to_sz.items() } size_ratio = [lp_to_sz_ratio[id_to_lp[ds.split]] for ds in datasets] p_formatted = { k: f"{lp_to_prob[k]:.3f}->{lp_to_tgt_prob[k]:.3f}" for k in lp_to_sz } logger.info(f"sampling probability balancing: {p_formatted}") sr_formatted = {ds.split: f"{r:.3f}" for ds, r in zip(datasets, size_ratio)} logger.info(f"balanced sampling size ratio: {sr_formatted}") return size_ratio @classmethod def _load_samples_from_tsv(cls, root: str, split: str): tsv_path = Path(root) / f"{split}.tsv" if not tsv_path.is_file(): raise FileNotFoundError(f"Dataset not found: {tsv_path}") with open(tsv_path) as f: reader = csv.DictReader( f, delimiter="\t", quotechar=None, doublequote=False, lineterminator="\n", quoting=csv.QUOTE_NONE, ) samples = [dict(e) for e in reader] if len(samples) == 0: raise ValueError(f"Empty manifest: {tsv_path}") return samples @classmethod def _from_tsv( cls, root: str, cfg: S2TDataConfig, split: str, tgt_dict, is_train_split: bool, pre_tokenizer, bpe_tokenizer, n_frames_per_step, speaker_to_id ) -> SpeechToTextDataset: samples = cls._load_samples_from_tsv(root, split) return cls._from_list( split, is_train_split, samples, cfg, tgt_dict, pre_tokenizer, bpe_tokenizer, n_frames_per_step, speaker_to_id ) @classmethod def from_tsv( cls, root: str, cfg: S2TDataConfig, splits: str, tgt_dict, pre_tokenizer, bpe_tokenizer, is_train_split: bool, epoch: int, seed: int, n_frames_per_step: int = 1, speaker_to_id=None ) -> SpeechToTextDataset: datasets = [ cls._from_tsv( root, cfg, split, tgt_dict, is_train_split, pre_tokenizer, bpe_tokenizer, n_frames_per_step, speaker_to_id ) for split in splits.split(",") ] if is_train_split and len(datasets) > 1 and cfg.sampling_alpha != 1.0: # temperature-based sampling size_ratios = cls.get_size_ratios(datasets, alpha=cfg.sampling_alpha) datasets = [ ResamplingDataset( d, size_ratio=r, seed=seed, epoch=epoch, replace=(r >= 1.0) ) for r, d in zip(size_ratios, datasets) ] return ConcatDataset(datasets) if len(datasets) > 1 else datasets[0]
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/audio/speech_to_text_dataset.py
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory.abs from pathlib import Path from typing import List, Dict, Optional, Any from dataclasses import dataclass import numpy as np import torch from fairseq.data.audio.speech_to_text_dataset import ( SpeechToTextDataset, SpeechToTextDatasetCreator, S2TDataConfig, _collate_frames, get_features_or_waveform ) from fairseq.data import Dictionary, data_utils as fairseq_data_utils @dataclass class TextToSpeechDatasetItem(object): index: int source: torch.Tensor target: Optional[torch.Tensor] = None speaker_id: Optional[int] = None duration: Optional[torch.Tensor] = None pitch: Optional[torch.Tensor] = None energy: Optional[torch.Tensor] = None class TextToSpeechDataset(SpeechToTextDataset): def __init__( self, split: str, is_train_split: bool, cfg: S2TDataConfig, audio_paths: List[str], n_frames: List[int], src_texts: Optional[List[str]] = None, tgt_texts: Optional[List[str]] = None, speakers: Optional[List[str]] = None, src_langs: Optional[List[str]] = None, tgt_langs: Optional[List[str]] = None, ids: Optional[List[str]] = None, tgt_dict: Optional[Dictionary] = None, pre_tokenizer=None, bpe_tokenizer=None, n_frames_per_step=1, speaker_to_id=None, durations: Optional[List[List[int]]] = None, pitches: Optional[List[str]] = None, energies: Optional[List[str]] = None ): super(TextToSpeechDataset, self).__init__( split, is_train_split, cfg, audio_paths, n_frames, src_texts=src_texts, tgt_texts=tgt_texts, speakers=speakers, src_langs=src_langs, tgt_langs=tgt_langs, ids=ids, tgt_dict=tgt_dict, pre_tokenizer=pre_tokenizer, bpe_tokenizer=bpe_tokenizer, n_frames_per_step=n_frames_per_step, speaker_to_id=speaker_to_id ) self.durations = durations self.pitches = pitches self.energies = energies def __getitem__(self, index: int) -> TextToSpeechDatasetItem: s2t_item = super().__getitem__(index) duration, pitch, energy = None, None, None if self.durations is not None: duration = torch.tensor( self.durations[index] + [0], dtype=torch.long # pad 0 for EOS ) if self.pitches is not None: pitch = get_features_or_waveform(self.pitches[index]) pitch = torch.from_numpy( np.concatenate((pitch, [0])) # pad 0 for EOS ).float() if self.energies is not None: energy = get_features_or_waveform(self.energies[index]) energy = torch.from_numpy( np.concatenate((energy, [0])) # pad 0 for EOS ).float() return TextToSpeechDatasetItem( index=index, source=s2t_item.source, target=s2t_item.target, speaker_id=s2t_item.speaker_id, duration=duration, pitch=pitch, energy=energy ) def collater(self, samples: List[TextToSpeechDatasetItem]) -> Dict[str, Any]: if len(samples) == 0: return {} src_lengths, order = torch.tensor( [s.target.shape[0] for s in samples], dtype=torch.long ).sort(descending=True) id_ = torch.tensor([s.index for s in samples], dtype=torch.long).index_select(0, order) feat = _collate_frames( [s.source for s in samples], self.cfg.use_audio_input ).index_select(0, order) target_lengths = torch.tensor( [s.source.shape[0] for s in samples], dtype=torch.long ).index_select(0, order) src_tokens = fairseq_data_utils.collate_tokens( [s.target for s in samples], self.tgt_dict.pad(), self.tgt_dict.eos(), left_pad=False, move_eos_to_beginning=False, ).index_select(0, order) speaker = None if self.speaker_to_id is not None: speaker = torch.tensor( [s.speaker_id for s in samples], dtype=torch.long ).index_select(0, order).view(-1, 1) bsz, _, d = feat.size() prev_output_tokens = torch.cat( (feat.new_zeros((bsz, 1, d)), feat[:, :-1, :]), dim=1 ) durations, pitches, energies = None, None, None if self.durations is not None: durations = fairseq_data_utils.collate_tokens( [s.duration for s in samples], 0 ).index_select(0, order) assert src_tokens.shape[1] == durations.shape[1] if self.pitches is not None: pitches = _collate_frames([s.pitch for s in samples], True) pitches = pitches.index_select(0, order) assert src_tokens.shape[1] == pitches.shape[1] if self.energies is not None: energies = _collate_frames([s.energy for s in samples], True) energies = energies.index_select(0, order) assert src_tokens.shape[1] == energies.shape[1] src_texts = [self.tgt_dict.string(samples[i].target) for i in order] return { "id": id_, "net_input": { "src_tokens": src_tokens, "src_lengths": src_lengths, "prev_output_tokens": prev_output_tokens, }, "speaker": speaker, "target": feat, "durations": durations, "pitches": pitches, "energies": energies, "target_lengths": target_lengths, "ntokens": sum(target_lengths).item(), "nsentences": len(samples), "src_texts": src_texts, } class TextToSpeechDatasetCreator(SpeechToTextDatasetCreator): KEY_DURATION = "duration" KEY_PITCH = "pitch" KEY_ENERGY = "energy" @classmethod def _from_list( cls, split_name: str, is_train_split, samples: List[Dict], cfg: S2TDataConfig, tgt_dict, pre_tokenizer, bpe_tokenizer, n_frames_per_step, speaker_to_id ) -> TextToSpeechDataset: audio_root = Path(cfg.audio_root) ids = [s[cls.KEY_ID] for s in samples] audio_paths = [(audio_root / s[cls.KEY_AUDIO]).as_posix() for s in samples] n_frames = [int(s[cls.KEY_N_FRAMES]) for s in samples] tgt_texts = [s[cls.KEY_TGT_TEXT] for s in samples] src_texts = [s.get(cls.KEY_SRC_TEXT, cls.DEFAULT_SRC_TEXT) for s in samples] speakers = [s.get(cls.KEY_SPEAKER, cls.DEFAULT_SPEAKER) for s in samples] src_langs = [s.get(cls.KEY_SRC_LANG, cls.DEFAULT_LANG) for s in samples] tgt_langs = [s.get(cls.KEY_TGT_LANG, cls.DEFAULT_LANG) for s in samples] durations = [s.get(cls.KEY_DURATION, None) for s in samples] durations = [ None if dd is None else [int(d) for d in dd.split(" ")] for dd in durations ] durations = None if any(dd is None for dd in durations) else durations pitches = [s.get(cls.KEY_PITCH, None) for s in samples] pitches = [ None if pp is None else (audio_root / pp).as_posix() for pp in pitches ] pitches = None if any(pp is None for pp in pitches) else pitches energies = [s.get(cls.KEY_ENERGY, None) for s in samples] energies = [ None if ee is None else (audio_root / ee).as_posix() for ee in energies] energies = None if any(ee is None for ee in energies) else energies return TextToSpeechDataset( split_name, is_train_split, cfg, audio_paths, n_frames, src_texts, tgt_texts, speakers, src_langs, tgt_langs, ids, tgt_dict, pre_tokenizer, bpe_tokenizer, n_frames_per_step, speaker_to_id, durations, pitches, energies )
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/audio/text_to_speech_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from pathlib import Path from typing import BinaryIO, Optional, Tuple, Union, List import numpy as np import torch import torch.nn.functional as F SF_AUDIO_FILE_EXTENSIONS = {".wav", ".flac", ".ogg"} FEATURE_OR_SF_AUDIO_FILE_EXTENSIONS = {".npy", ".wav", ".flac", ".ogg"} def convert_waveform( waveform: Union[np.ndarray, torch.Tensor], sample_rate: int, normalize_volume: bool = False, to_mono: bool = False, to_sample_rate: Optional[int] = None ) -> Tuple[Union[np.ndarray, torch.Tensor], int]: """convert a waveform: - to a target sample rate - from multi-channel to mono channel - volume normalization Args: waveform (numpy.ndarray or torch.Tensor): 2D original waveform (channels x length) sample_rate (int): original sample rate normalize_volume (bool): perform volume normalization to_mono (bool): convert to mono channel if having multiple channels to_sample_rate (Optional[int]): target sample rate Returns: waveform (numpy.ndarray): converted 2D waveform (channels x length) sample_rate (float): target sample rate """ try: import torchaudio.sox_effects as ta_sox except ImportError: raise ImportError("Please install torchaudio: pip install torchaudio") effects = [] if normalize_volume: effects.append(["gain", "-n"]) if to_sample_rate is not None and to_sample_rate != sample_rate: effects.append(["rate", f"{to_sample_rate}"]) if to_mono and waveform.shape[0] > 1: effects.append(["channels", "1"]) if len(effects) > 0: is_np_input = isinstance(waveform, np.ndarray) _waveform = torch.from_numpy(waveform) if is_np_input else waveform converted, converted_sample_rate = ta_sox.apply_effects_tensor( _waveform, sample_rate, effects ) if is_np_input: converted = converted.numpy() return converted, converted_sample_rate return waveform, sample_rate def get_waveform( path_or_fp: Union[str, BinaryIO], normalization: bool = True, mono: bool = True, frames: int = -1, start: int = 0, always_2d: bool = True, output_sample_rate: Optional[int] = None, normalize_volume: bool = False ) -> Tuple[np.ndarray, int]: """Get the waveform and sample rate of a 16-bit WAV/FLAC/OGG Vorbis audio. Args: path_or_fp (str or BinaryIO): the path or file-like object normalization (bool): normalize values to [-1, 1] (Default: True) mono (bool): convert multi-channel audio to mono-channel one frames (int): the number of frames to read. (-1 for reading all) start (int): Where to start reading. A negative value counts from the end. always_2d (bool): always return 2D array even for mono-channel audios output_sample_rate (Optional[int]): output sample rate normalize_volume (bool): normalize volume Returns: waveform (numpy.ndarray): 1D or 2D waveform (channels x length) sample_rate (float): sample rate """ if isinstance(path_or_fp, str): ext = Path(path_or_fp).suffix if ext not in SF_AUDIO_FILE_EXTENSIONS: raise ValueError(f"Unsupported audio format: {ext}") try: import soundfile as sf except ImportError: raise ImportError("Please install soundfile: pip install soundfile") waveform, sample_rate = sf.read( path_or_fp, dtype="float32", always_2d=True, frames=frames, start=start ) waveform = waveform.T # T x C -> C x T waveform, sample_rate = convert_waveform( waveform, sample_rate, normalize_volume=normalize_volume, to_mono=mono, to_sample_rate=output_sample_rate ) if not normalization: waveform *= 2 ** 15 # denormalized to 16-bit signed integers if not always_2d: waveform = waveform.squeeze(axis=0) return waveform, sample_rate def _get_kaldi_fbank( waveform: np.ndarray, sample_rate: int, n_bins=80 ) -> Optional[np.ndarray]: """Get mel-filter bank features via PyKaldi.""" try: from kaldi.feat.fbank import FbankOptions, Fbank from kaldi.feat.mel import MelBanksOptions from kaldi.feat.window import FrameExtractionOptions from kaldi.matrix import Vector mel_opts = MelBanksOptions() mel_opts.num_bins = n_bins frame_opts = FrameExtractionOptions() frame_opts.samp_freq = sample_rate opts = FbankOptions() opts.mel_opts = mel_opts opts.frame_opts = frame_opts fbank = Fbank(opts=opts) features = fbank.compute(Vector(waveform.squeeze()), 1.0).numpy() return features except ImportError: return None def _get_torchaudio_fbank( waveform: np.ndarray, sample_rate, n_bins=80 ) -> Optional[np.ndarray]: """Get mel-filter bank features via TorchAudio.""" try: import torchaudio.compliance.kaldi as ta_kaldi waveform = torch.from_numpy(waveform) features = ta_kaldi.fbank( waveform, num_mel_bins=n_bins, sample_frequency=sample_rate ) return features.numpy() except ImportError: return None def get_fbank(path_or_fp: Union[str, BinaryIO], n_bins=80) -> np.ndarray: """Get mel-filter bank features via PyKaldi or TorchAudio. Prefer PyKaldi (faster CPP implementation) to TorchAudio (Python implementation). Note that Kaldi/TorchAudio requires 16-bit signed integers as inputs and hence the waveform should not be normalized.""" waveform, sample_rate = get_waveform(path_or_fp, normalization=False) features = _get_kaldi_fbank(waveform, sample_rate, n_bins) if features is None: features = _get_torchaudio_fbank(waveform, sample_rate, n_bins) if features is None: raise ImportError( "Please install pyKaldi or torchaudio to enable " "online filterbank feature extraction" ) return features def is_npy_data(data: bytes) -> bool: return data[0] == 147 and data[1] == 78 def is_sf_audio_data(data: bytes) -> bool: is_wav = data[0] == 82 and data[1] == 73 and data[2] == 70 is_flac = data[0] == 102 and data[1] == 76 and data[2] == 97 is_ogg = data[0] == 79 and data[1] == 103 and data[2] == 103 return is_wav or is_flac or is_ogg def read_from_stored_zip(zip_path: str, offset: int, file_size: int) -> bytes: with open(zip_path, "rb") as f: f.seek(offset) data = f.read(file_size) return data def parse_path(path: str) -> Tuple[str, List[int]]: """Parse data path which is either a path to 1. a .npy/.wav/.flac/.ogg file 2. a stored ZIP file with slicing info: "[zip_path]:[offset]:[length]" Args: path (str): the data path to parse Returns: file_path (str): the file path slice_ptr (list of int): empty in case 1; byte offset and length for the slice in case 2 """ if Path(path).suffix in FEATURE_OR_SF_AUDIO_FILE_EXTENSIONS: _path, slice_ptr = path, [] else: _path, *slice_ptr = path.split(":") if not Path(_path).is_file(): raise FileNotFoundError(f"File not found: {_path}") assert len(slice_ptr) in {0, 2}, f"Invalid path: {path}" slice_ptr = [int(i) for i in slice_ptr] return _path, slice_ptr def get_window( window_fn: callable, n_fft: int, win_length: int ) -> torch.Tensor: padding = n_fft - win_length assert padding >= 0 return F.pad(window_fn(win_length), (padding // 2, padding - padding // 2)) def get_fourier_basis(n_fft: int) -> torch.Tensor: basis = np.fft.fft(np.eye(n_fft)) basis = np.vstack( [np.real(basis[:n_fft // 2 + 1, :]), np.imag(basis[:n_fft // 2 + 1, :])] ) return torch.from_numpy(basis).float() def get_mel_filters( sample_rate: int, n_fft: int, n_mels: int, f_min: float, f_max: float ) -> torch.Tensor: try: import librosa except ImportError: raise ImportError("Please install librosa: pip install librosa") basis = librosa.filters.mel(sample_rate, n_fft, n_mels, f_min, f_max) return torch.from_numpy(basis).float() class TTSSpectrogram(torch.nn.Module): def __init__( self, n_fft: int, win_length: int, hop_length: int, window_fn: callable = torch.hann_window, return_phase: bool = False ) -> None: super(TTSSpectrogram, self).__init__() self.n_fft = n_fft self.hop_length = hop_length self.return_phase = return_phase basis = get_fourier_basis(n_fft).unsqueeze(1) basis *= get_window(window_fn, n_fft, win_length) self.register_buffer('basis', basis) def forward( self, waveform: torch.Tensor ) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: padding = (self.n_fft // 2, self.n_fft // 2) x = F.pad(waveform.unsqueeze(1), padding, mode='reflect') x = F.conv1d(x, self.basis, stride=self.hop_length) real_part = x[:, :self.n_fft // 2 + 1, :] imag_part = x[:, self.n_fft // 2 + 1:, :] magnitude = torch.sqrt(real_part ** 2 + imag_part ** 2) if self.return_phase: phase = torch.atan2(imag_part, real_part) return magnitude, phase return magnitude class TTSMelScale(torch.nn.Module): def __init__( self, n_mels: int, sample_rate: int, f_min: float, f_max: float, n_stft: int ) -> None: super(TTSMelScale, self).__init__() basis = get_mel_filters(sample_rate, (n_stft - 1) * 2, n_mels, f_min, f_max) self.register_buffer('basis', basis) def forward(self, specgram: torch.Tensor) -> torch.Tensor: return torch.matmul(self.basis, specgram)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/audio/audio_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import sys import io import numpy as np import torch import torch.nn.functional as F from .. import FairseqDataset from ..data_utils import compute_mask_indices, get_buckets, get_bucketed_sizes from fairseq.data.audio.audio_utils import ( parse_path, read_from_stored_zip, is_sf_audio_data, ) from fairseq.data.text_compressor import TextCompressor, TextCompressionLevel logger = logging.getLogger(__name__) class RawAudioDataset(FairseqDataset): def __init__( self, sample_rate, max_sample_size=None, min_sample_size=0, shuffle=True, pad=False, normalize=False, compute_mask_indices=False, **mask_compute_kwargs, ): super().__init__() self.sample_rate = sample_rate self.sizes = [] self.max_sample_size = ( max_sample_size if max_sample_size is not None else sys.maxsize ) self.min_sample_size = min_sample_size self.pad = pad self.shuffle = shuffle self.normalize = normalize self.compute_mask_indices = compute_mask_indices if self.compute_mask_indices: self.mask_compute_kwargs = mask_compute_kwargs self._features_size_map = {} self._C = mask_compute_kwargs["encoder_embed_dim"] self._conv_feature_layers = eval(mask_compute_kwargs["conv_feature_layers"]) def __getitem__(self, index): raise NotImplementedError() def __len__(self): return len(self.sizes) def postprocess(self, feats, curr_sample_rate): if feats.dim() == 2: feats = feats.mean(-1) if curr_sample_rate != self.sample_rate: raise Exception(f"sample rate: {curr_sample_rate}, need {self.sample_rate}") assert feats.dim() == 1, feats.dim() if self.normalize: with torch.no_grad(): feats = F.layer_norm(feats, feats.shape) return feats def crop_to_max_size(self, wav, target_size): size = len(wav) diff = size - target_size if diff <= 0: return wav start = np.random.randint(0, diff + 1) end = size - diff + start return wav[start:end] def _compute_mask_indices(self, dims, padding_mask): B, T, C = dims mask_indices, mask_channel_indices = None, None if self.mask_compute_kwargs["mask_prob"] > 0: mask_indices = compute_mask_indices( (B, T), padding_mask, self.mask_compute_kwargs["mask_prob"], self.mask_compute_kwargs["mask_length"], self.mask_compute_kwargs["mask_selection"], self.mask_compute_kwargs["mask_other"], min_masks=2, no_overlap=self.mask_compute_kwargs["no_mask_overlap"], min_space=self.mask_compute_kwargs["mask_min_space"], ) mask_indices = torch.from_numpy(mask_indices) if self.mask_compute_kwargs["mask_channel_prob"] > 0: mask_channel_indices = compute_mask_indices( (B, C), None, self.mask_compute_kwargs["mask_channel_prob"], self.mask_compute_kwargs["mask_channel_length"], self.mask_compute_kwargs["mask_channel_selection"], self.mask_compute_kwargs["mask_channel_other"], no_overlap=self.mask_compute_kwargs["no_mask_channel_overlap"], min_space=self.mask_compute_kwargs["mask_channel_min_space"], ) mask_channel_indices = ( torch.from_numpy(mask_channel_indices).unsqueeze(1).expand(-1, T, -1) ) return mask_indices, mask_channel_indices @staticmethod def _bucket_tensor(tensor, num_pad, value): return F.pad(tensor, (0, num_pad), value=value) def collater(self, samples): samples = [s for s in samples if s["source"] is not None] if len(samples) == 0: return {} sources = [s["source"] for s in samples] sizes = [len(s) for s in sources] if self.pad: target_size = min(max(sizes), self.max_sample_size) else: target_size = min(min(sizes), self.max_sample_size) collated_sources = sources[0].new_zeros(len(sources), target_size) padding_mask = ( torch.BoolTensor(collated_sources.shape).fill_(False) if self.pad else None ) for i, (source, size) in enumerate(zip(sources, sizes)): diff = size - target_size if diff == 0: collated_sources[i] = source elif diff < 0: assert self.pad collated_sources[i] = torch.cat( [source, source.new_full((-diff,), 0.0)] ) padding_mask[i, diff:] = True else: collated_sources[i] = self.crop_to_max_size(source, target_size) input = {"source": collated_sources} out = {"id": torch.LongTensor([s["id"] for s in samples])} if self.pad: input["padding_mask"] = padding_mask if hasattr(self, "num_buckets") and self.num_buckets > 0: assert self.pad, "Cannot bucket without padding first." bucket = max(self._bucketed_sizes[s["id"]] for s in samples) num_pad = bucket - collated_sources.size(-1) if num_pad: input["source"] = self._bucket_tensor(collated_sources, num_pad, 0) input["padding_mask"] = self._bucket_tensor(padding_mask, num_pad, True) if self.compute_mask_indices: B = input["source"].size(0) T = self._get_mask_indices_dims(input["source"].size(-1)) padding_mask_reshaped = input["padding_mask"].clone() extra = padding_mask_reshaped.size(1) % T if extra > 0: padding_mask_reshaped = padding_mask_reshaped[:, :-extra] padding_mask_reshaped = padding_mask_reshaped.view( padding_mask_reshaped.size(0), T, -1 ) padding_mask_reshaped = padding_mask_reshaped.all(-1) input["padding_count"] = padding_mask_reshaped.sum(-1).max().item() mask_indices, mask_channel_indices = self._compute_mask_indices( (B, T, self._C), padding_mask_reshaped, ) input["mask_indices"] = mask_indices input["mask_channel_indices"] = mask_channel_indices out["sample_size"] = mask_indices.sum().item() out["net_input"] = input return out def _get_mask_indices_dims(self, size, padding=0, dilation=1): if size not in self._features_size_map: L_in = size for (_, kernel_size, stride) in self._conv_feature_layers: L_out = L_in + 2 * padding - dilation * (kernel_size - 1) - 1 L_out = 1 + L_out // stride L_in = L_out self._features_size_map[size] = L_out return self._features_size_map[size] def num_tokens(self, index): return self.size(index) def size(self, index): """Return an example's size as a float or tuple. This value is used when filtering a dataset with ``--max-positions``.""" if self.pad: return self.sizes[index] return min(self.sizes[index], self.max_sample_size) def ordered_indices(self): """Return an ordered list of indices. Batches will be constructed based on this order.""" if self.shuffle: order = [np.random.permutation(len(self))] order.append( np.minimum( np.array(self.sizes), self.max_sample_size, ) ) return np.lexsort(order)[::-1] else: return np.arange(len(self)) def set_bucket_info(self, num_buckets): self.num_buckets = num_buckets if self.num_buckets > 0: self._collated_sizes = np.minimum( np.array(self.sizes), self.max_sample_size, ) self.buckets = get_buckets( self._collated_sizes, self.num_buckets, ) self._bucketed_sizes = get_bucketed_sizes( self._collated_sizes, self.buckets ) logger.info( f"{len(self.buckets)} bucket(s) for the audio dataset: " f"{self.buckets}" ) class FileAudioDataset(RawAudioDataset): def __init__( self, manifest_path, sample_rate, max_sample_size=None, min_sample_size=0, shuffle=True, pad=False, normalize=False, num_buckets=0, compute_mask_indices=False, text_compression_level=TextCompressionLevel.none, **mask_compute_kwargs, ): super().__init__( sample_rate=sample_rate, max_sample_size=max_sample_size, min_sample_size=min_sample_size, shuffle=shuffle, pad=pad, normalize=normalize, compute_mask_indices=compute_mask_indices, **mask_compute_kwargs, ) self.text_compressor = TextCompressor(level=text_compression_level) skipped = 0 self.fnames = [] sizes = [] self.skipped_indices = set() with open(manifest_path, "r") as f: self.root_dir = f.readline().strip() for i, line in enumerate(f): items = line.strip().split("\t") assert len(items) == 2, line sz = int(items[1]) if min_sample_size is not None and sz < min_sample_size: skipped += 1 self.skipped_indices.add(i) continue self.fnames.append(self.text_compressor.compress(items[0])) sizes.append(sz) logger.info(f"loaded {len(self.fnames)}, skipped {skipped} samples") self.sizes = np.array(sizes, dtype=np.int64) try: import pyarrow self.fnames = pyarrow.array(self.fnames) except: logger.debug( "Could not create a pyarrow array. Please install pyarrow for better performance" ) pass self.set_bucket_info(num_buckets) def __getitem__(self, index): import soundfile as sf fn = self.fnames[index] fn = fn if isinstance(self.fnames, list) else fn.as_py() fn = self.text_compressor.decompress(fn) path_or_fp = os.path.join(self.root_dir, fn) _path, slice_ptr = parse_path(path_or_fp) if len(slice_ptr) == 2: byte_data = read_from_stored_zip(_path, slice_ptr[0], slice_ptr[1]) assert is_sf_audio_data(byte_data) path_or_fp = io.BytesIO(byte_data) wav, curr_sample_rate = sf.read(path_or_fp, dtype="float32") feats = torch.from_numpy(wav).float() feats = self.postprocess(feats, curr_sample_rate) return {"id": index, "source": feats} class BinarizedAudioDataset(RawAudioDataset): def __init__( self, data_dir, split, sample_rate, max_sample_size=None, min_sample_size=0, shuffle=True, pad=False, normalize=False, num_buckets=0, compute_mask_indices=False, **mask_compute_kwargs, ): super().__init__( sample_rate=sample_rate, max_sample_size=max_sample_size, min_sample_size=min_sample_size, shuffle=shuffle, pad=pad, normalize=normalize, compute_mask_indices=compute_mask_indices, **mask_compute_kwargs, ) from fairseq.data import data_utils, Dictionary self.fnames_dict = Dictionary.load(os.path.join(data_dir, "dict.txt")) root_path = os.path.join(data_dir, f"{split}.root") if os.path.exists(root_path): with open(root_path, "r") as f: self.root_dir = next(f).strip() else: self.root_dir = None fnames_path = os.path.join(data_dir, split) self.fnames = data_utils.load_indexed_dataset(fnames_path, self.fnames_dict) lengths_path = os.path.join(data_dir, f"{split}.lengths") with open(lengths_path, "r") as f: for line in f: sz = int(line.rstrip()) assert ( sz >= min_sample_size ), f"Min sample size is not supported for binarized dataset, but found a sample with size {sz}" self.sizes.append(sz) self.sizes = np.array(self.sizes, dtype=np.int64) self.set_bucket_info(num_buckets) logger.info(f"loaded {len(self.fnames)} samples") def __getitem__(self, index): import soundfile as sf fname = self.fnames_dict.string(self.fnames[index], separator="") if self.root_dir: fname = os.path.join(self.root_dir, fname) wav, curr_sample_rate = sf.read(fname) feats = torch.from_numpy(wav).float() feats = self.postprocess(feats, curr_sample_rate) return {"id": index, "source": feats}
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/audio/raw_audio_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import itertools import logging import os import sys from typing import Any, List, Optional, Union import numpy as np import torch import torch.nn.functional as F from fairseq.data import data_utils from fairseq.data.fairseq_dataset import FairseqDataset logger = logging.getLogger(__name__) def load_audio(manifest_path, max_keep, min_keep): n_long, n_short = 0, 0 names, inds, sizes = [], [], [] with open(manifest_path) as f: root = f.readline().strip() for ind, line in enumerate(f): items = line.strip().split("\t") assert len(items) == 2, line sz = int(items[1]) if min_keep is not None and sz < min_keep: n_short += 1 elif max_keep is not None and sz > max_keep: n_long += 1 else: names.append(items[0]) inds.append(ind) sizes.append(sz) tot = ind + 1 logger.info( ( f"max_keep={max_keep}, min_keep={min_keep}, " f"loaded {len(names)}, skipped {n_short} short and {n_long} long, " f"longest-loaded={max(sizes)}, shortest-loaded={min(sizes)}" ) ) return root, names, inds, tot, sizes def load_label(label_path, inds, tot): with open(label_path) as f: labels = [line.rstrip() for line in f] assert ( len(labels) == tot ), f"number of labels does not match ({len(labels)} != {tot})" labels = [labels[i] for i in inds] return labels def load_label_offset(label_path, inds, tot): with open(label_path) as f: code_lengths = [len(line.encode("utf-8")) for line in f] assert ( len(code_lengths) == tot ), f"number of labels does not match ({len(code_lengths)} != {tot})" offsets = list(itertools.accumulate([0] + code_lengths)) offsets = [(offsets[i], offsets[i + 1]) for i in inds] return offsets def verify_label_lengths( audio_sizes, audio_rate, label_path, label_rate, inds, tot, tol=0.1, # tolerance in seconds ): if label_rate < 0: logger.info(f"{label_path} is sequence label. skipped") return with open(label_path) as f: lengths = [len(line.rstrip().split()) for line in f] assert len(lengths) == tot lengths = [lengths[i] for i in inds] num_invalid = 0 for i, ind in enumerate(inds): dur_from_audio = audio_sizes[i] / audio_rate dur_from_label = lengths[i] / label_rate if abs(dur_from_audio - dur_from_label) > tol: logger.warning( ( f"audio and label duration differ too much " f"(|{dur_from_audio} - {dur_from_label}| > {tol}) " f"in line {ind+1} of {label_path}. Check if `label_rate` " f"is correctly set (currently {label_rate}). " f"num. of samples = {audio_sizes[i]}; " f"label length = {lengths[i]}" ) ) num_invalid += 1 if num_invalid > 0: logger.warning( f"total {num_invalid} (audio, label) pairs with mismatched lengths" ) class HubertDataset(FairseqDataset): def __init__( self, manifest_path: str, sample_rate: float, label_paths: List[str], label_rates: Union[List[float], float], # -1 for sequence labels pad_list: List[str], eos_list: List[str], label_processors: Optional[List[Any]] = None, max_keep_sample_size: Optional[int] = None, min_keep_sample_size: Optional[int] = None, max_sample_size: Optional[int] = None, shuffle: bool = True, pad_audio: bool = False, normalize: bool = False, store_labels: bool = True, random_crop: bool = False, single_target: bool = False, ): self.audio_root, self.audio_names, inds, tot, self.sizes = load_audio( manifest_path, max_keep_sample_size, min_keep_sample_size ) self.sample_rate = sample_rate self.shuffle = shuffle self.random_crop = random_crop self.num_labels = len(label_paths) self.pad_list = pad_list self.eos_list = eos_list self.label_processors = label_processors self.single_target = single_target self.label_rates = ( [label_rates for _ in range(len(label_paths))] if isinstance(label_rates, int) else label_rates ) self.store_labels = store_labels if store_labels: self.label_list = [load_label(p, inds, tot) for p in label_paths] else: self.label_paths = label_paths self.label_offsets_list = [ load_label_offset(p, inds, tot) for p in label_paths ] assert ( label_processors is None or len(label_processors) == self.num_labels ) for label_path, label_rate in zip(label_paths, self.label_rates): verify_label_lengths( self.sizes, sample_rate, label_path, label_rate, inds, tot ) self.max_sample_size = ( max_sample_size if max_sample_size is not None else sys.maxsize ) self.pad_audio = pad_audio self.normalize = normalize logger.info( f"pad_audio={pad_audio}, random_crop={random_crop}, " f"normalize={normalize}, max_sample_size={self.max_sample_size}" ) def get_audio(self, index): import soundfile as sf wav_path = os.path.join(self.audio_root, self.audio_names[index]) wav, cur_sample_rate = sf.read(wav_path) wav = torch.from_numpy(wav).float() wav = self.postprocess(wav, cur_sample_rate) return wav def get_label(self, index, label_idx): if self.store_labels: label = self.label_list[label_idx][index] else: with open(self.label_paths[label_idx]) as f: offset_s, offset_e = self.label_offsets_list[label_idx][index] f.seek(offset_s) label = f.read(offset_e - offset_s) if self.label_processors is not None: label = self.label_processors[label_idx](label) return label def get_labels(self, index): return [self.get_label(index, i) for i in range(self.num_labels)] def __getitem__(self, index): wav = self.get_audio(index) labels = self.get_labels(index) return {"id": index, "source": wav, "label_list": labels} def __len__(self): return len(self.sizes) def crop_to_max_size(self, wav, target_size): size = len(wav) diff = size - target_size if diff <= 0: return wav, 0 start, end = 0, target_size if self.random_crop: start = np.random.randint(0, diff + 1) end = size - diff + start return wav[start:end], start def collater(self, samples): # target = max(sizes) -> random_crop not used # target = max_sample_size -> random_crop used for long samples = [s for s in samples if s["source"] is not None] if len(samples) == 0: return {} audios = [s["source"] for s in samples] audio_sizes = [len(s) for s in audios] if self.pad_audio: audio_size = min(max(audio_sizes), self.max_sample_size) else: audio_size = min(min(audio_sizes), self.max_sample_size) collated_audios, padding_mask, audio_starts = self.collater_audio( audios, audio_size ) targets_by_label = [ [s["label_list"][i] for s in samples] for i in range(self.num_labels) ] targets_list, lengths_list, ntokens_list = self.collater_label( targets_by_label, audio_size, audio_starts ) net_input = {"source": collated_audios, "padding_mask": padding_mask} batch = { "id": torch.LongTensor([s["id"] for s in samples]), "net_input": net_input, } if self.single_target: batch["target_lengths"] = lengths_list[0] batch["ntokens"] = ntokens_list[0] batch["target"] = targets_list[0] else: batch["target_lengths_list"] = lengths_list batch["ntokens_list"] = ntokens_list batch["target_list"] = targets_list return batch def collater_audio(self, audios, audio_size): collated_audios = audios[0].new_zeros(len(audios), audio_size) padding_mask = ( torch.BoolTensor(collated_audios.shape).fill_(False) # if self.pad_audio else None ) audio_starts = [0 for _ in audios] for i, audio in enumerate(audios): diff = len(audio) - audio_size if diff == 0: collated_audios[i] = audio elif diff < 0: assert self.pad_audio collated_audios[i] = torch.cat( [audio, audio.new_full((-diff,), 0.0)] ) padding_mask[i, diff:] = True else: collated_audios[i], audio_starts[i] = self.crop_to_max_size( audio, audio_size ) return collated_audios, padding_mask, audio_starts def collater_frm_label( self, targets, audio_size, audio_starts, label_rate, pad ): assert label_rate > 0 s2f = label_rate / self.sample_rate frm_starts = [int(round(s * s2f)) for s in audio_starts] frm_size = int(round(audio_size * s2f)) if not self.pad_audio: rem_size = [len(t) - s for t, s in zip(targets, frm_starts)] frm_size = min(frm_size, *rem_size) targets = [t[s: s + frm_size] for t, s in zip(targets, frm_starts)] logger.debug(f"audio_starts={audio_starts}") logger.debug(f"frame_starts={frm_starts}") logger.debug(f"frame_size={frm_size}") lengths = torch.LongTensor([len(t) for t in targets]) ntokens = lengths.sum().item() targets = data_utils.collate_tokens( targets, pad_idx=pad, left_pad=False ) return targets, lengths, ntokens def collater_seq_label(self, targets, pad): lengths = torch.LongTensor([len(t) for t in targets]) ntokens = lengths.sum().item() targets = data_utils.collate_tokens( targets, pad_idx=pad, left_pad=False ) return targets, lengths, ntokens def collater_label(self, targets_by_label, audio_size, audio_starts): targets_list, lengths_list, ntokens_list = [], [], [] itr = zip(targets_by_label, self.label_rates, self.pad_list) for targets, label_rate, pad in itr: if label_rate == -1: targets, lengths, ntokens = self.collater_seq_label( targets, pad ) else: targets, lengths, ntokens = self.collater_frm_label( targets, audio_size, audio_starts, label_rate, pad ) targets_list.append(targets) lengths_list.append(lengths) ntokens_list.append(ntokens) return targets_list, lengths_list, ntokens_list def num_tokens(self, index): return self.size(index) def size(self, index): if self.pad_audio: return self.sizes[index] return min(self.sizes[index], self.max_sample_size) def ordered_indices(self): if self.shuffle: order = [np.random.permutation(len(self))] else: order = [np.arange(len(self))] order.append(self.sizes) return np.lexsort(order)[::-1] def postprocess(self, wav, cur_sample_rate): if wav.dim() == 2: wav = wav.mean(-1) assert wav.dim() == 1, wav.dim() if cur_sample_rate != self.sample_rate: raise Exception(f"sr {cur_sample_rate} != {self.sample_rate}") if self.normalize: with torch.no_grad(): wav = F.layer_norm(wav, wav.shape) return wav
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/audio/hubert_dataset.py
# Copyright (c) 2021-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import logging import math from typing import List, Optional, NamedTuple import numpy as np import torch from fairseq.data import ( ConcatDataset, LanguagePairDataset, FileAudioDataset, data_utils, ) from fairseq.data import FairseqDataset logger = logging.getLogger(__name__) class ModalityDatasetItem(NamedTuple): datasetname: str dataset: any max_positions: List[int] max_tokens: Optional[int] = None max_sentences: Optional[int] = None # MultiModalityDataset: it concate multiple datasets with different modalities. # Compared with ConcatDataset it can 1) sample data given the ratios for different datasets # 2) it adds mode to indicate what type of the data samples come from. # It will be used with GroupedEpochBatchIterator together to generate mini-batch with samples # from the same type of dataset # If only one dataset is used, it will perform like the original dataset with mode added class MultiModalityDataset(ConcatDataset): def __init__(self, datasets: List[ModalityDatasetItem]): id_to_mode = [] dsets = [] max_tokens = [] max_sentences = [] max_positions = [] for dset in datasets: id_to_mode.append(dset.datasetname) dsets.append(dset.dataset) max_tokens.append(dset.max_tokens) max_positions.append(dset.max_positions) max_sentences.append(dset.max_sentences) weights = [1.0 for s in dsets] super().__init__(dsets, weights) self.max_tokens = max_tokens self.max_positions = max_positions self.max_sentences = max_sentences self.id_to_mode = id_to_mode self.raw_sub_batch_samplers = [] self._cur_epoch = 0 def set_epoch(self, epoch): super().set_epoch(epoch) self._cur_epoch = epoch def __getitem__(self, idx): dataset_idx, sample_idx = self._get_dataset_and_sample_index(idx) sample = self.datasets[dataset_idx][sample_idx] return (dataset_idx, sample) def collater(self, samples): if len(samples) == 0: return {} dataset_idx = samples[0][0] # make sure all samples in samples are from same dataset assert sum([0 if dataset_idx == s[0] else 1 for s in samples]) == 0 samples = self.datasets[dataset_idx].collater([x[1] for x in samples]) # add mode samples["net_input"]["mode"] = self.id_to_mode[dataset_idx] return samples def size(self, index: int): if len(self.datasets) == 1: return self.datasets[0].size(index) return super().size(index) @property def sizes(self): if len(self.datasets) == 1: return self.datasets[0].sizes super().sizes def ordered_indices(self): """ Returns indices sorted by length. So less padding is needed. """ if len(self.datasets) == 1: return self.datasets[0].ordered_indices() indices_group = [] for d_idx, ds in enumerate(self.datasets): sample_num = self.cumulative_sizes[d_idx] if d_idx > 0: sample_num = sample_num - self.cumulative_sizes[d_idx - 1] assert sample_num == len(ds) indices_group.append(ds.ordered_indices()) return indices_group def get_raw_batch_samplers(self, required_batch_size_multiple, seed): if len(self.raw_sub_batch_samplers) > 0: logger.info(" raw_sub_batch_samplers exists. No action is taken") return with data_utils.numpy_seed(seed): indices = self.ordered_indices() for i, ds in enumerate(self.datasets): indices[i] = ds.filter_indices_by_size( indices[i], self.max_positions[i], )[0] sub_batch_sampler = ds.batch_by_size( indices[i], max_tokens=self.max_tokens[i], max_sentences=self.max_sentences[i], required_batch_size_multiple=required_batch_size_multiple, ) self.raw_sub_batch_samplers.append(sub_batch_sampler) def get_batch_samplers(self, mult_ratios, required_batch_size_multiple, seed): self.get_raw_batch_samplers(required_batch_size_multiple, seed) batch_samplers = [] for i, _ in enumerate(self.datasets): if i > 0: sub_batch_sampler = [ [y + self.cumulative_sizes[i - 1] for y in x] for x in self.raw_sub_batch_samplers[i] ] else: sub_batch_sampler = list(self.raw_sub_batch_samplers[i]) smp_r = mult_ratios[i] if smp_r != 1: is_increase = "increased" if smp_r > 1 else "decreased" logger.info( "number of batch for the dataset {} is {} from {} to {}".format( self.id_to_mode[i], is_increase, len(sub_batch_sampler), int(len(sub_batch_sampler) * smp_r), ) ) mul_samplers = [] for _ in range(math.floor(smp_r)): mul_samplers = mul_samplers + sub_batch_sampler if math.floor(smp_r) != smp_r: with data_utils.numpy_seed(seed + self._cur_epoch): np.random.shuffle(sub_batch_sampler) smp_num = int( (smp_r - math.floor(smp_r)) * len(sub_batch_sampler) ) mul_samplers = mul_samplers + sub_batch_sampler[:smp_num] sub_batch_sampler = mul_samplers else: logger.info( "dataset {} batch number is {} ".format( self.id_to_mode[i], len(sub_batch_sampler) ) ) batch_samplers.append(sub_batch_sampler) return batch_samplers class LangPairMaskDataset(FairseqDataset): def __init__( self, dataset: LanguagePairDataset, src_eos: int, src_bos: Optional[int] = None, noise_id: Optional[int] = -1, mask_ratio: Optional[float] = 0, mask_type: Optional[str] = "random", ): self.dataset = dataset self.src_eos = src_eos self.src_bos = src_bos self.noise_id = noise_id self.mask_ratio = mask_ratio self.mask_type = mask_type assert mask_type in ("random", "tail") @property def src_sizes(self): return self.dataset.src_sizes @property def tgt_sizes(self): return self.dataset.tgt_sizes @property def sizes(self): # dataset.sizes can be a dynamically computed sizes: return self.dataset.sizes def get_batch_shapes(self): return self.dataset.buckets def num_tokens_vec(self, indices): return self.dataset.num_tokens_vec(indices) def __len__(self): return len(self.dataset) def num_tokens(self, index): return self.dataset.num_tokens(index) def size(self, index): return self.dataset.size(index) def ordered_indices(self): return self.dataset.ordered_indices() @property def supports_prefetch(self): return getattr(self.dataset, "supports_prefetch", False) def prefetch(self, indices): return self.dataset.prefetch(indices) def mask_src_tokens(self, sample): src_item = sample["source"] mask = None if self.mask_type == "random": mask = torch.rand(len(src_item)).le(self.mask_ratio) else: mask = torch.ones(len(src_item)) mask[: int(len(src_item) * (1 - self.mask_ratio))] = 0 mask = mask.eq(1) if src_item[0] == self.src_bos: mask[0] = False if src_item[-1] == self.src_eos: mask[-1] = False mask_src_item = src_item.masked_fill(mask, self.noise_id) smp = {"id": sample["id"], "source": mask_src_item, "target": sample["target"]} return smp def __getitem__(self, index): sample = self.dataset[index] if self.mask_ratio > 0: sample = self.mask_src_tokens(sample) return sample def collater(self, samples, pad_to_length=None): return self.dataset.collater(samples, pad_to_length) class FileAudioDatasetWrapper(FileAudioDataset): def collater(self, samples): samples = super().collater(samples) if len(samples) == 0: return {} samples["net_input"]["src_tokens"] = samples["net_input"]["source"] samples["net_input"]["prev_output_tokens"] = None del samples["net_input"]["source"] samples["net_input"]["src_lengths"] = None samples["net_input"]["alignment"] = None return samples
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/audio/multi_modality_dataset.py
import numpy as np from fairseq.data.audio.feature_transforms import ( AudioFeatureTransform, register_audio_feature_transform, ) @register_audio_feature_transform("global_cmvn") class GlobalCMVN(AudioFeatureTransform): """Global CMVN (cepstral mean and variance normalization). The global mean and variance need to be pre-computed and stored in NumPy format (.npz).""" @classmethod def from_config_dict(cls, config=None): _config = {} if config is None else config return GlobalCMVN(_config.get("stats_npz_path")) def __init__(self, stats_npz_path): self.stats_npz_path = stats_npz_path stats = np.load(stats_npz_path) self.mean, self.std = stats["mean"], stats["std"] def __repr__(self): return self.__class__.__name__ + f'(stats_npz_path="{self.stats_npz_path}")' def __call__(self, x): x = np.subtract(x, self.mean) x = np.divide(x, self.std) return x
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/audio/feature_transforms/global_cmvn.py
import importlib import os from abc import ABC, abstractmethod from typing import Dict, Optional class AudioFeatureTransform(ABC): @classmethod @abstractmethod def from_config_dict(cls, config: Optional[Dict] = None): pass AUDIO_FEATURE_TRANSFORM_REGISTRY = {} AUDIO_FEATURE_TRANSFORM_CLASS_NAMES = set() def register_audio_feature_transform(name): def register_audio_feature_transform_cls(cls): if name in AUDIO_FEATURE_TRANSFORM_REGISTRY: raise ValueError(f"Cannot register duplicate transform ({name})") if not issubclass(cls, AudioFeatureTransform): raise ValueError( f"Transform ({name}: {cls.__name__}) must extend " "AudioFeatureTransform" ) if cls.__name__ in AUDIO_FEATURE_TRANSFORM_CLASS_NAMES: raise ValueError( f"Cannot register audio feature transform with duplicate " f"class name ({cls.__name__})" ) AUDIO_FEATURE_TRANSFORM_REGISTRY[name] = cls AUDIO_FEATURE_TRANSFORM_CLASS_NAMES.add(cls.__name__) return cls return register_audio_feature_transform_cls def get_audio_feature_transform(name): return AUDIO_FEATURE_TRANSFORM_REGISTRY[name] transforms_dir = os.path.dirname(__file__) for file in os.listdir(transforms_dir): path = os.path.join(transforms_dir, file) if ( not file.startswith("_") and not file.startswith(".") and (file.endswith(".py") or os.path.isdir(path)) ): name = file[: file.find(".py")] if file.endswith(".py") else file importlib.import_module("fairseq.data.audio.feature_transforms." + name) class CompositeAudioFeatureTransform(AudioFeatureTransform): @classmethod def from_config_dict(cls, config=None): _config = {} if config is None else config _transforms = _config.get("transforms") if _transforms is None: return None transforms = [ get_audio_feature_transform(_t).from_config_dict(_config.get(_t)) for _t in _transforms ] return CompositeAudioFeatureTransform(transforms) def __init__(self, transforms): self.transforms = [t for t in transforms if t is not None] def __call__(self, x): for t in self.transforms: x = t(x) return x def __repr__(self): format_string = ( [self.__class__.__name__ + "("] + [f" {t.__repr__()}" for t in self.transforms] + [")"] ) return "\n".join(format_string)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/audio/feature_transforms/__init__.py
import math import numbers from typing import Optional import numpy as np from fairseq.data.audio.feature_transforms import ( AudioFeatureTransform, register_audio_feature_transform, ) @register_audio_feature_transform("specaugment") class SpecAugmentTransform(AudioFeatureTransform): """SpecAugment (https://arxiv.org/abs/1904.08779)""" @classmethod def from_config_dict(cls, config=None): _config = {} if config is None else config return SpecAugmentTransform( _config.get("time_warp_W", 0), _config.get("freq_mask_N", 0), _config.get("freq_mask_F", 0), _config.get("time_mask_N", 0), _config.get("time_mask_T", 0), _config.get("time_mask_p", 0.0), _config.get("mask_value", None), ) def __init__( self, time_warp_w: int = 0, freq_mask_n: int = 0, freq_mask_f: int = 0, time_mask_n: int = 0, time_mask_t: int = 0, time_mask_p: float = 0.0, mask_value: Optional[float] = 0.0, ): # Sanity checks assert mask_value is None or isinstance( mask_value, numbers.Number ), f"mask_value (type: {type(mask_value)}) must be None or a number" if freq_mask_n > 0: assert freq_mask_f > 0, ( f"freq_mask_F ({freq_mask_f}) " f"must be larger than 0 when doing freq masking." ) if time_mask_n > 0: assert time_mask_t > 0, ( f"time_mask_T ({time_mask_t}) must be larger than 0 when " f"doing time masking." ) self.time_warp_w = time_warp_w self.freq_mask_n = freq_mask_n self.freq_mask_f = freq_mask_f self.time_mask_n = time_mask_n self.time_mask_t = time_mask_t self.time_mask_p = time_mask_p self.mask_value = mask_value def __repr__(self): return ( self.__class__.__name__ + "(" + ", ".join( [ f"time_warp_w={self.time_warp_w}", f"freq_mask_n={self.freq_mask_n}", f"freq_mask_f={self.freq_mask_f}", f"time_mask_n={self.time_mask_n}", f"time_mask_t={self.time_mask_t}", f"time_mask_p={self.time_mask_p}", ] ) + ")" ) def __call__(self, spectrogram): assert len(spectrogram.shape) == 2, "spectrogram must be a 2-D tensor." distorted = spectrogram.copy() # make a copy of input spectrogram. num_frames = spectrogram.shape[0] # or 'tau' in the paper. num_freqs = spectrogram.shape[1] # or 'miu' in the paper. mask_value = self.mask_value if mask_value is None: # if no value was specified, use local mean. mask_value = spectrogram.mean() if num_frames == 0: return spectrogram if num_freqs < self.freq_mask_f: return spectrogram if self.time_warp_w > 0: if 2 * self.time_warp_w < num_frames: import cv2 w0 = np.random.randint(self.time_warp_w, num_frames - self.time_warp_w) w = np.random.randint(-self.time_warp_w + 1, self.time_warp_w) upper, lower = distorted[:w0, :], distorted[w0:, :] upper = cv2.resize( upper, dsize=(num_freqs, w0 + w), interpolation=cv2.INTER_LINEAR ) lower = cv2.resize( lower, dsize=(num_freqs, num_frames - w0 - w), interpolation=cv2.INTER_LINEAR, ) distorted = np.concatenate((upper, lower), axis=0) for _i in range(self.freq_mask_n): f = np.random.randint(0, self.freq_mask_f) f0 = np.random.randint(0, num_freqs - f) if f != 0: distorted[:, f0 : f0 + f] = mask_value max_time_mask_t = min( self.time_mask_t, math.floor(num_frames * self.time_mask_p) ) if max_time_mask_t < 1: return distorted for _i in range(self.time_mask_n): t = np.random.randint(0, max_time_mask_t) t0 = np.random.randint(0, num_frames - t) if t != 0: distorted[t0 : t0 + t, :] = mask_value return distorted
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/audio/feature_transforms/specaugment.py
import numpy as np from fairseq.data.audio.feature_transforms import ( AudioFeatureTransform, register_audio_feature_transform, ) @register_audio_feature_transform("utterance_cmvn") class UtteranceCMVN(AudioFeatureTransform): """Utterance-level CMVN (cepstral mean and variance normalization)""" @classmethod def from_config_dict(cls, config=None): _config = {} if config is None else config return UtteranceCMVN( _config.get("norm_means", True), _config.get("norm_vars", True), ) def __init__(self, norm_means=True, norm_vars=True): self.norm_means, self.norm_vars = norm_means, norm_vars def __repr__(self): return ( self.__class__.__name__ + f"(norm_means={self.norm_means}, norm_vars={self.norm_vars})" ) def __call__(self, x): mean = x.mean(axis=0) square_sums = (x ** 2).sum(axis=0) if self.norm_means: x = np.subtract(x, mean) if self.norm_vars: var = square_sums / x.shape[0] - mean ** 2 std = np.sqrt(np.maximum(var, 1e-10)) x = np.divide(x, std) return x
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/audio/feature_transforms/utterance_cmvn.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass, field from fairseq import file_utils from fairseq.data.encoders import register_bpe from fairseq.dataclass import FairseqDataclass @dataclass class SentencepieceConfig(FairseqDataclass): sentencepiece_model: str = field( default="???", metadata={"help": "path to sentencepiece model"} ) @register_bpe("sentencepiece", dataclass=SentencepieceConfig) class SentencepieceBPE(object): def __init__(self, cfg): sentencepiece_model = file_utils.cached_path(cfg.sentencepiece_model) try: import sentencepiece as spm self.sp = spm.SentencePieceProcessor() self.sp.Load(sentencepiece_model) except ImportError: raise ImportError( "Please install sentencepiece with: pip install sentencepiece" ) def encode(self, x: str) -> str: return " ".join(self.sp.EncodeAsPieces(x)) def decode(self, x: str) -> str: return x.replace(" ", "").replace("\u2581", " ").strip() def is_beginning_of_word(self, x: str) -> bool: if x in ["<unk>", "<s>", "</s>", "<pad>"]: # special elements are always considered beginnings # HACK: this logic is already present in fairseq/tasks/masked_lm.py # but these special tokens are also contained in the sentencepiece # vocabulary which causes duplicate special tokens. This hack makes # sure that they are all taken into account. return True return x.startswith("\u2581")
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/encoders/sentencepiece_bpe.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass, field from fairseq import file_utils from fairseq.data.encoders import register_bpe from fairseq.dataclass import FairseqDataclass @dataclass class fastBPEConfig(FairseqDataclass): bpe_codes: str = field(default="???", metadata={"help": "path to fastBPE BPE"}) @register_bpe("fastbpe", dataclass=fastBPEConfig) class fastBPE(object): def __init__(self, cfg): if cfg.bpe_codes is None: raise ValueError("--bpe-codes is required for --bpe=fastbpe") codes = file_utils.cached_path(cfg.bpe_codes) try: import fastBPE self.bpe = fastBPE.fastBPE(codes) self.bpe_symbol = "@@ " except ImportError: raise ImportError("Please install fastBPE with: pip install fastBPE") def encode(self, x: str) -> str: return self.bpe.apply([x])[0] def decode(self, x: str) -> str: return (x + " ").replace(self.bpe_symbol, "").rstrip()
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/encoders/fastbpe.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq.data.encoders import register_tokenizer from fairseq.dataclass import FairseqDataclass @register_tokenizer("nltk", dataclass=FairseqDataclass) class NLTKTokenizer(object): def __init__(self, *unused): try: from nltk.tokenize import word_tokenize self.word_tokenize = word_tokenize except ImportError: raise ImportError("Please install nltk with: pip install nltk") def encode(self, x: str) -> str: return " ".join(self.word_tokenize(x)) def decode(self, x: str) -> str: return x
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/encoders/nltk_tokenizer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass, field from fairseq import file_utils from fairseq.data.encoders import register_bpe from fairseq.dataclass import FairseqDataclass from .gpt2_bpe_utils import get_encoder DEFAULT_ENCODER_JSON = "https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/encoder.json" DEFAULT_VOCAB_BPE = "https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/vocab.bpe" @dataclass class GPT2BPEConfig(FairseqDataclass): gpt2_encoder_json: str = field( default=DEFAULT_ENCODER_JSON, metadata={"help": "path to encoder.json"} ) gpt2_vocab_bpe: str = field( default=DEFAULT_VOCAB_BPE, metadata={"help": "path to vocab.bpe"} ) @register_bpe("gpt2", dataclass=GPT2BPEConfig) class GPT2BPE(object): def __init__(self, cfg): encoder_json = file_utils.cached_path(cfg.gpt2_encoder_json) vocab_bpe = file_utils.cached_path(cfg.gpt2_vocab_bpe) self.bpe = get_encoder(encoder_json, vocab_bpe) def encode(self, x: str) -> str: return " ".join(map(str, self.bpe.encode(x))) def decode(self, x: str) -> str: return self.bpe.decode( [int(tok) if tok not in {"<unk>", "<mask>"} else tok for tok in x.split()] ) def is_beginning_of_word(self, x: str) -> bool: return self.decode(x).startswith(" ")
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/encoders/gpt2_bpe.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass, field from fairseq import file_utils from fairseq.data.encoders import register_bpe from fairseq.dataclass import FairseqDataclass @dataclass class SubwordNMTBPEConfig(FairseqDataclass): bpe_codes: str = field(default="???", metadata={"help": "path to subword NMT BPE"}) bpe_separator: str = field(default="@@", metadata={"help": "BPE separator"}) @register_bpe("subword_nmt", dataclass=SubwordNMTBPEConfig) class SubwordNMTBPE(object): def __init__(self, cfg): if cfg.bpe_codes is None: raise ValueError("--bpe-codes is required for --bpe=subword_nmt") codes = file_utils.cached_path(cfg.bpe_codes) try: from subword_nmt import apply_bpe bpe_parser = apply_bpe.create_parser() bpe_args = bpe_parser.parse_args( [ "--codes", codes, "--separator", cfg.bpe_separator, ] ) self.bpe = apply_bpe.BPE( bpe_args.codes, bpe_args.merges, bpe_args.separator, None, bpe_args.glossaries, ) self.bpe_symbol = bpe_args.separator + " " except ImportError: raise ImportError( "Please install subword_nmt with: pip install subword-nmt" ) def encode(self, x: str) -> str: return self.bpe.process_line(x) def decode(self, x: str) -> str: return (x + " ").replace(self.bpe_symbol, "").rstrip()
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/encoders/subword_nmt_bpe.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass, field from fairseq.data.encoders import register_bpe from fairseq.dataclass import FairseqDataclass from fairseq import file_utils @dataclass class HuggingFaceByteLevelBPEConfig(FairseqDataclass): bpe_merges: str = field(default="???", metadata={"help": "path to merges.txt"}) bpe_vocab: str = field(default="???", metadata={"help": "path to vocab.json"}) bpe_add_prefix_space: bool = field( default=False, metadata={"help": "add prefix space before encoding"} ) @register_bpe("hf_byte_bpe", dataclass=HuggingFaceByteLevelBPEConfig) class HuggingFaceByteLevelBPE(object): def __init__(self, cfg): try: from tokenizers import ByteLevelBPETokenizer except ImportError: raise ImportError( "Please install huggingface/tokenizers with: " "pip install tokenizers" ) bpe_vocab = file_utils.cached_path(cfg.bpe_vocab) bpe_merges = file_utils.cached_path(cfg.bpe_merges) self.bpe = ByteLevelBPETokenizer( bpe_vocab, bpe_merges, add_prefix_space=cfg.bpe_add_prefix_space, ) def encode(self, x: str) -> str: return " ".join(map(str, self.bpe.encode(x).ids)) def decode(self, x: str) -> str: return self.bpe.decode( [int(tok) if tok not in {"<unk>", "<mask>"} else tok for tok in x.split()] ) def is_beginning_of_word(self, x: str) -> bool: return self.decode(x).startswith(" ")
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/encoders/hf_byte_bpe.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import importlib import os from fairseq import registry build_tokenizer, register_tokenizer, TOKENIZER_REGISTRY, _ = registry.setup_registry( "--tokenizer", default=None, ) build_bpe, register_bpe, BPE_REGISTRY, _ = registry.setup_registry( "--bpe", default=None, ) # automatically import any Python files in the encoders/ directory for file in sorted(os.listdir(os.path.dirname(__file__))): if file.endswith(".py") and not file.startswith("_"): module = file[: file.find(".py")] importlib.import_module("fairseq.data.encoders." + module)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/encoders/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass, field from typing import Optional from fairseq.data.encoders import register_bpe from fairseq.dataclass import FairseqDataclass @dataclass class BertBPEConfig(FairseqDataclass): bpe_cased: bool = field(default=False, metadata={"help": "set for cased BPE"}) bpe_vocab_file: Optional[str] = field( default=None, metadata={"help": "bpe vocab file"} ) @register_bpe("bert", dataclass=BertBPEConfig) class BertBPE(object): def __init__(self, cfg): try: from transformers import BertTokenizer except ImportError: raise ImportError( "Please install transformers with: pip install transformers" ) if cfg.bpe_vocab_file: self.bert_tokenizer = BertTokenizer( cfg.bpe_vocab_file, do_lower_case=not cfg.bpe_cased ) else: vocab_file_name = ( "bert-base-cased" if cfg.bpe_cased else "bert-base-uncased" ) self.bert_tokenizer = BertTokenizer.from_pretrained(vocab_file_name) def encode(self, x: str) -> str: return " ".join(self.bert_tokenizer.tokenize(x)) def decode(self, x: str) -> str: return self.bert_tokenizer.clean_up_tokenization( self.bert_tokenizer.convert_tokens_to_string(x.split(" ")) ) def is_beginning_of_word(self, x: str) -> bool: return not x.startswith("##")
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/encoders/hf_bert_bpe.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass, field from fairseq import file_utils from fairseq.data.encoders import register_bpe from fairseq.data.encoders.byte_utils import ( SPACE, SPACE_ESCAPE, byte_encode, smart_byte_decode, ) from fairseq.dataclass import FairseqDataclass @dataclass class ByteBpeConfig(FairseqDataclass): sentencepiece_model_path: str = field( default="???", metadata={"help": "path to sentencepiece model"} ) @register_bpe("byte_bpe", dataclass=ByteBpeConfig) class ByteBPE(object): def __init__(self, cfg): vocab = file_utils.cached_path(cfg.sentencepiece_model_path) try: import sentencepiece as spm self.sp = spm.SentencePieceProcessor() self.sp.Load(vocab) except ImportError: raise ImportError( "Please install sentencepiece with: pip install sentencepiece" ) def encode(self, x: str) -> str: byte_encoded = byte_encode(x) return SPACE.join(self.sp.EncodeAsPieces(byte_encoded)) @staticmethod def decode(x: str) -> str: unescaped = x.replace(SPACE, "").replace(SPACE_ESCAPE, SPACE) return smart_byte_decode(unescaped)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/encoders/byte_bpe.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from fairseq.data import encoders def get_whole_word_mask(args, dictionary): bpe = encoders.build_bpe(args) if bpe is not None: def is_beginning_of_word(i): if i < dictionary.nspecial: # special elements are always considered beginnings return True tok = dictionary[i] if tok.startswith("madeupword"): return True try: return bpe.is_beginning_of_word(tok) except ValueError: return True mask_whole_words = torch.ByteTensor( list(map(is_beginning_of_word, range(len(dictionary)))) ) return mask_whole_words return None
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/encoders/utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import re from fairseq.data.encoders import register_tokenizer from fairseq.dataclass import FairseqDataclass @register_tokenizer("space", dataclass=FairseqDataclass) class SpaceTokenizer(object): def __init__(self, *unused): self.space_tok = re.compile(r"\s+") def encode(self, x: str) -> str: return self.space_tok.sub(" ", x) def decode(self, x: str) -> str: return x
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/encoders/space_tokenizer.py
""" Byte pair encoding utilities from GPT-2. Original source: https://github.com/openai/gpt-2/blob/master/src/encoder.py Original license: MIT """ import json from functools import lru_cache @lru_cache() def bytes_to_unicode(): """ Returns list of utf-8 byte and a corresponding list of unicode strings. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a signficant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. And avoids mapping to whitespace/control characters the bpe code barfs on. """ bs = ( list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1)) ) cs = bs[:] n = 0 for b in range(2 ** 8): if b not in bs: bs.append(b) cs.append(2 ** 8 + n) n += 1 cs = [chr(n) for n in cs] return dict(zip(bs, cs)) def get_pairs(word): """Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length strings). """ pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs class Encoder: def __init__(self, encoder, bpe_merges, errors="replace"): self.encoder = encoder self.decoder = {v: k for k, v in self.encoder.items()} self.errors = errors # how to handle errors in decoding self.byte_encoder = bytes_to_unicode() self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) self.cache = {} try: import regex as re self.re = re except ImportError: raise ImportError("Please install regex with: pip install regex") # Should haved added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions self.pat = self.re.compile( r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" ) def bpe(self, token): if token in self.cache: return self.cache[token] word = tuple(token) pairs = get_pairs(word) if not pairs: return token while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) new_word.extend(word[i:j]) i = j except: new_word.extend(word[i:]) break if word[i] == first and i < len(word) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = " ".join(word) self.cache[token] = word return word def encode(self, text): bpe_tokens = [] for token in self.re.findall(self.pat, text): token = "".join(self.byte_encoder[b] for b in token.encode("utf-8")) bpe_tokens.extend( self.encoder[bpe_token] for bpe_token in self.bpe(token).split(" ") ) return bpe_tokens def decode(self, tokens): text = "".join([self.decoder.get(token, token) for token in tokens]) text = bytearray([self.byte_decoder[c] for c in text]).decode( "utf-8", errors=self.errors ) return text def get_encoder(encoder_json_path, vocab_bpe_path): with open(encoder_json_path, "r") as f: encoder = json.load(f) with open(vocab_bpe_path, "r", encoding="utf-8") as f: bpe_data = f.read() bpe_merges = [tuple(merge_str.split()) for merge_str in bpe_data.split("\n")[1:-1]] return Encoder( encoder=encoder, bpe_merges=bpe_merges, )
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/encoders/gpt2_bpe_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass, field from fairseq.data.encoders import register_tokenizer from fairseq.dataclass import FairseqDataclass @dataclass class MosesTokenizerConfig(FairseqDataclass): source_lang: str = field(default="en", metadata={"help": "source language"}) target_lang: str = field(default="en", metadata={"help": "target language"}) moses_no_dash_splits: bool = field( default=False, metadata={"help": "don't apply dash split rules"} ) moses_no_escape: bool = field( default=False, metadata={"help": "don't perform HTML escaping on apostrophe, quotes, etc."}, ) @register_tokenizer("moses", dataclass=MosesTokenizerConfig) class MosesTokenizer(object): def __init__(self, cfg: MosesTokenizerConfig): self.cfg = cfg try: from sacremoses import MosesTokenizer, MosesDetokenizer self.tok = MosesTokenizer(cfg.source_lang) self.detok = MosesDetokenizer(cfg.target_lang) except ImportError: raise ImportError( "Please install Moses tokenizer with: pip install sacremoses" ) def encode(self, x: str) -> str: return self.tok.tokenize( x, aggressive_dash_splits=(not self.cfg.moses_no_dash_splits), return_str=True, escape=(not self.cfg.moses_no_escape), ) def decode(self, x: str) -> str: return self.detok.detokenize(x.split())
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/encoders/moses_tokenizer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq.data.encoders import register_bpe SPACE = chr(32) SPACE_ESCAPE = chr(9601) @register_bpe("characters") class Characters(object): def __init__(self, *unused): pass @staticmethod def add_args(parser): pass @staticmethod def encode(x: str) -> str: escaped = x.replace(SPACE, SPACE_ESCAPE) return SPACE.join(list(escaped)) @staticmethod def decode(x: str) -> str: return x.replace(SPACE, "").replace(SPACE_ESCAPE, SPACE)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/encoders/characters.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq.data.encoders import register_bpe from fairseq.data.encoders.byte_utils import ( SPACE, SPACE_ESCAPE, byte_encode, smart_byte_decode, ) @register_bpe("bytes") class Bytes(object): def __init__(self, *unused): pass @staticmethod def add_args(parser): pass @staticmethod def encode(x: str) -> str: encoded = byte_encode(x) escaped = encoded.replace(SPACE, SPACE_ESCAPE) return SPACE.join(list(escaped)) @staticmethod def decode(x: str) -> str: unescaped = x.replace(SPACE, "").replace(SPACE_ESCAPE, SPACE) return smart_byte_decode(unescaped)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/encoders/bytes.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import re WHITESPACE_NORMALIZER = re.compile(r"\s+") SPACE = chr(32) SPACE_ESCAPE = chr(9601) # excluding non-breaking space (160) here PRINTABLE_LATIN = set( list(range(32, 126 + 1)) + list(range(161, 172 + 1)) + list(range(174, 255 + 1)) ) BYTE_TO_BCHAR = { b: chr(b) if b in PRINTABLE_LATIN else chr(256 + b) for b in range(256) } BCHAR_TO_BYTE = {bc: b for b, bc in BYTE_TO_BCHAR.items()} def byte_encode(x: str) -> str: normalized = WHITESPACE_NORMALIZER.sub(SPACE, x) return "".join([BYTE_TO_BCHAR[b] for b in normalized.encode("utf-8")]) def byte_decode(x: str) -> str: try: return bytes([BCHAR_TO_BYTE[bc] for bc in x]).decode("utf-8") except ValueError: return "" def smart_byte_decode(x: str) -> str: output = byte_decode(x) if output == "": # DP the best recovery (max valid chars) if it's broken n_bytes = len(x) f = [0 for _ in range(n_bytes + 1)] pt = [0 for _ in range(n_bytes + 1)] for i in range(1, n_bytes + 1): f[i], pt[i] = f[i - 1], i - 1 for j in range(1, min(4, i) + 1): if f[i - j] + 1 > f[i] and len(byte_decode(x[i - j : i])) > 0: f[i], pt[i] = f[i - j] + 1, i - j cur_pt = n_bytes while cur_pt > 0: if f[cur_pt] == f[pt[cur_pt]] + 1: output = byte_decode(x[pt[cur_pt] : cur_pt]) + output cur_pt = pt[cur_pt] return output
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/encoders/byte_utils.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os, collections import pickle import logging import numpy as np import six def whitespace_tokenize(text): """Runs basic whitespace cleaning and splitting on a piece of text.""" text = text.strip() if not text: return [] tokens = text.split() return tokens class SquadExample(object): """A single training/test example for simple sequence classification. For examples without an answer, the start and end position are -1. """ def __init__(self, qas_id, question_text, doc_tokens, orig_answer_text=None, start_position=None, end_position=None, is_impossible=False, answers=[]): self.qas_id = qas_id self.question_text = question_text self.doc_tokens = doc_tokens self.orig_answer_text = orig_answer_text self.start_position = start_position self.end_position = end_position self.is_impossible = is_impossible self.answers = answers def __str__(self): return self.__repr__() def __repr__(self): s = "" s += "qas_id: %s" % (str(self.qas_id)) s += ", question_text: %s" % ( str(self.question_text)) s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens)) if self.start_position: s += ", start_position: %d" % (self.start_position) if self.start_position: s += ", end_position: %d" % (self.end_position) if self.start_position: s += ", is_impossible: %r" % (self.is_impossible) if self.orig_answer_text: s += ", ori_answer_text: %s" % (self.orig_answer_text) s += ", answer_text: %s" % (' '.join(self.doc_tokens[self.start_position: self.end_position + 1])) return s class SquadFeature(object): """A single set of features of data.""" def __init__(self, unique_id, example_index, doc_span_index, tokens, token_to_orig_map, token_is_max_context, input_ids, p_mask, doc_offset, start_position=None, end_position=None, is_impossible=None): self.unique_id = unique_id self.example_index = example_index self.doc_span_index = doc_span_index self.tokens = tokens self.p_mask = p_mask self.doc_offset = doc_offset self.token_to_orig_map = token_to_orig_map self.token_is_max_context = token_is_max_context self.input_ids = input_ids self.start_position = start_position self.end_position = end_position self.is_impossible = is_impossible def read_squad_examples(input_file, is_training, version_2_with_negative): """Read a SQuAD json file into a list of SquadExample.""" with open(input_file, "r") as reader: input_data = json.load(reader)["data"] def is_whitespace(c): if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F: return True return False examples = [] for entry in input_data: for paragraph in entry["paragraphs"]: paragraph_text = paragraph["context"] doc_tokens = [] char_to_word_offset = [] prev_is_whitespace = True for c in paragraph_text: if is_whitespace(c): prev_is_whitespace = True else: if prev_is_whitespace: doc_tokens.append(c) else: doc_tokens[-1] += c prev_is_whitespace = False char_to_word_offset.append(len(doc_tokens) - 1) for qa in paragraph["qas"]: qas_id = qa["id"] question_text = qa["question"] start_position = None end_position = None orig_answer_text = None is_impossible = qa.get("is_impossible", False) answers = [] if is_training: if (len(qa["answers"]) != 1) and (not is_impossible): raise ValueError( "For training, each question should have exactly 1 answer.") if not is_impossible: answer = qa["answers"][0] orig_answer_text = answer["text"] answer_offset = answer["answer_start"] answer_length = len(orig_answer_text) start_position = char_to_word_offset[answer_offset] end_position = char_to_word_offset[answer_offset + answer_length - 1] # Only add answers where the text can be exactly recovered from the # document. If this CAN'T happen it's likely due to weird Unicode # stuff so we will just skip the example. # # Note that this means for training mode, every example is NOT # guaranteed to be preserved. actual_text = " ".join( doc_tokens[start_position:(end_position + 1)]) cleaned_answer_text = " ".join( whitespace_tokenize(orig_answer_text)) if actual_text.find(cleaned_answer_text) == -1: print("Could not find answer: '%s' vs. '%s'", actual_text, cleaned_answer_text) continue else: start_position = -1 end_position = -1 orig_answer_text = "" elif not is_impossible: answers = qa["answers"] example = SquadExample( qas_id=qas_id, question_text=question_text, doc_tokens=doc_tokens, orig_answer_text=orig_answer_text, start_position=start_position, end_position=end_position, is_impossible=is_impossible, answers=answers) examples.append(example) return examples def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text): """Returns tokenized answer spans that better match the annotated answer.""" # The SQuAD annotations are character based. We first project them to # whitespace-tokenized words. But then after WordPiece tokenization, we can # often find a "better match". For example: # # Question: What year was John Smith born? # Context: The leader was John Smith (1895-1943). # Answer: 1895 # # The original whitespace-tokenized answer will be "(1895-1943).". However # after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match # the exact answer, 1895. # # However, this is not always possible. Consider the following: # # Question: What country is the top exporter of electornics? # Context: The Japanese electronics industry is the lagest in the world. # Answer: Japan # # In this case, the annotator chose "Japan" as a character sub-span of # the word "Japanese". Since our WordPiece tokenizer does not split # "Japanese", we just use "Japanese" as the annotation. This is fairly rare # in SQuAD, but does happen. tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text)) for new_start in range(input_start, input_end + 1): for new_end in range(input_end, new_start - 1, -1): text_span = " ".join(doc_tokens[new_start:(new_end + 1)]) if text_span == tok_answer_text: return (new_start, new_end) return (input_start, input_end) def _check_is_max_context(doc_spans, cur_span_index, position): """Check if this is the 'max context' doc span for the token.""" # Because of the sliding window approach taken to scoring documents, a single # token can appear in multiple documents. E.g. # Doc: the man went to the store and bought a gallon of milk # Span A: the man went to the # Span B: to the store and bought # Span C: and bought a gallon of # ... # # Now the word 'bought' will have two scores from spans B and C. We only # want to consider the score with "maximum context", which we define as # the *minimum* of its left and right context (the *sum* of left and # right context will always be the same, of course). # # In the example the maximum context for 'bought' would be span C since # it has 1 left context and 3 right context, while span B has 4 left context # and 0 right context. best_score = None best_span_index = None for (span_index, doc_span) in enumerate(doc_spans): end = doc_span.start + doc_span.length - 1 if position < doc_span.start: continue if position > end: continue num_left_context = position - doc_span.start num_right_context = end - position score = min(num_left_context, num_right_context) + 0.01 * doc_span.length if best_score is None or score > best_score: best_score = score best_span_index = span_index return cur_span_index == best_span_index def squad_convert_examples_to_features(examples, tokenizer, max_seq_length, doc_stride, max_query_length, is_training, cls_token='[CLS]', sep_token='[SEP]', additional_seq=True): features = [] unique_id = 1000000000 for (example_index, example) in enumerate(examples): query_tokens = tokenizer.tokenize(example.question_text) if len(query_tokens) > max_query_length: query_tokens = query_tokens[0:max_query_length] tok_to_orig_index = [] orig_to_tok_index = [] all_doc_tokens = [] for (i, token) in enumerate(example.doc_tokens): orig_to_tok_index.append(len(all_doc_tokens)) sub_tokens = tokenizer.tokenize(token) for sub_token in sub_tokens: tok_to_orig_index.append(i) all_doc_tokens.append(sub_token) tok_start_position = None tok_end_position = None if is_training and example.is_impossible: tok_start_position = -1 tok_end_position = -1 if is_training and not example.is_impossible: tok_start_position = orig_to_tok_index[example.start_position] if example.end_position < len(example.doc_tokens) - 1: tok_end_position = orig_to_tok_index[example.end_position + 1] - 1 else: tok_end_position = len(all_doc_tokens) - 1 (tok_start_position, tok_end_position) = _improve_answer_span( all_doc_tokens, tok_start_position, tok_end_position, tokenizer, example.orig_answer_text) max_tokens_for_doc = max_seq_length - len(query_tokens) - 3 # We can have documents that are longer than the maximum sequence length. # To deal with this we do a sliding window approach, where we take chunks # of the up to our max length with a stride of `doc_stride`. _DocSpan = collections.namedtuple( # pylint: disable=invalid-name "DocSpan", ["start", "length"]) doc_spans = [] start_offset = 0 while start_offset < len(all_doc_tokens): length = len(all_doc_tokens) - start_offset if length > max_tokens_for_doc: length = max_tokens_for_doc doc_spans.append(_DocSpan(start=start_offset, length=length)) if start_offset + length == len(all_doc_tokens): break start_offset += min(length, doc_stride) for (doc_span_index, doc_span) in enumerate(doc_spans): tokens = [] p_mask = [] token_to_orig_map = {} token_is_max_context = {} tokens.append(cls_token) p_mask.append(0) for token in query_tokens: tokens.append(token) p_mask.append(1) tokens.append(sep_token) p_mask.append(1) if additional_seq: tokens.append(sep_token) p_mask.append(1) doc_offset = len(tokens) for i in range(doc_span.length): split_token_index = doc_span.start + i token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index] is_max_context = _check_is_max_context(doc_spans, doc_span_index, split_token_index) token_is_max_context[len(tokens)] = is_max_context tokens.append(all_doc_tokens[split_token_index]) p_mask.append(0) tokens.append(sep_token) p_mask.append(1) input_ids = tokenizer.convert_tokens_to_ids(tokens) assert len(p_mask) == len(input_ids) start_position = None end_position = None if is_training and not example.is_impossible: # For training, if our document chunk does not contain an annotation # we throw it out, since there is nothing to predict. doc_start = doc_span.start doc_end = doc_span.start + doc_span.length - 1 out_of_span = False if not (tok_start_position >= doc_start and tok_end_position <= doc_end): out_of_span = True if out_of_span: start_position = 0 end_position = 0 else: start_position = tok_start_position - doc_start + doc_offset end_position = tok_end_position - doc_start + doc_offset if is_training and example.is_impossible: start_position = 0 end_position = 0 if example_index < 10: print("*** Example ***") print("unique_id: %s" % (unique_id)) print("example_index: %s" % (example_index)) print("doc_span_index: %s" % (doc_span_index)) print("tokens: %s" % " ".join( [x for x in tokens])) print("token_to_orig_map: %s" % " ".join( ["%d:%d" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)])) print("token_is_max_context: %s" % " ".join([ "%d:%s" % (x, y) for (x, y) in six.iteritems(token_is_max_context) ])) print("input_ids: %s" % " ".join([str(x) for x in input_ids])) if is_training and example.is_impossible: print("impossible example") if is_training and not example.is_impossible: answer_text = " ".join(tokens[start_position:(end_position + 1)]) print("start_position: %d" % (start_position)) print("end_position: %d" % (end_position)) print("answer: %s" % (answer_text)) feature = SquadFeature( unique_id=unique_id, example_index=example_index, doc_span_index=doc_span_index, tokens=tokens, token_to_orig_map=token_to_orig_map, token_is_max_context=token_is_max_context, input_ids=input_ids, p_mask=p_mask, doc_offset=doc_offset, start_position=start_position, end_position=end_position, is_impossible=example.is_impossible) features.append(feature) unique_id += 1 return features
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/squad/squad_extractor.py
from .squad_extractor import SquadExample, SquadFeature, read_squad_examples, squad_convert_examples_to_features from .basic_tokenizer import BasicTokenizer from .squad_metrics import SquadResult, compute_predictions_logits, squad_evaluate
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/squad/__init__.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Very heavily inspired by the official evaluation script for SQuAD version 2.0 which was modified by XLNet authors to update `find_best_threshold` scripts for SQuAD V2.0 In addition to basic functionality, we also compute additional statistics and plot precision-recall curves if an additional na_prob.json file is provided. This file is expected to map question ID's to the model's predicted probability that a question is unanswerable. """ import collections import json import math import re import string import logging logger = logging.getLogger(__name__) from . import BasicTokenizer #todo, check xl-net implementation https://github.com/google-research/albert/blob/master/squad_utils.py class SquadResult: """ Constructs a SquadResult which can be used to evaluate a model's output on the SQuAD dataset. Args: unique_id: The unique identifier corresponding to that example. start_logits: The logits corresponding to the start of the answer end_logits: The logits corresponding to the end of the answer """ def __init__(self, unique_id, start_logits, end_logits): self.start_logits = start_logits self.end_logits = end_logits self.unique_id = unique_id def normalize_answer(s): """Lower text and remove punctuation, articles and extra whitespace.""" def remove_articles(text): regex = re.compile(r"\b(a|an|the)\b", re.UNICODE) return re.sub(regex, " ", text) def white_space_fix(text): return " ".join(text.split()) def remove_punc(text): exclude = set(string.punctuation) return "".join(ch for ch in text if ch not in exclude) def lower(text): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(s)))) def get_tokens(s): if not s: return [] return normalize_answer(s).split() def compute_exact(a_gold, a_pred): return int(normalize_answer(a_gold) == normalize_answer(a_pred)) def compute_f1(a_gold, a_pred): gold_toks = get_tokens(a_gold) pred_toks = get_tokens(a_pred) common = collections.Counter(gold_toks) & collections.Counter(pred_toks) num_same = sum(common.values()) if len(gold_toks) == 0 or len(pred_toks) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks) if num_same == 0: return 0 precision = 1.0 * num_same / len(pred_toks) recall = 1.0 * num_same / len(gold_toks) f1 = (2 * precision * recall) / (precision + recall) return f1 def get_raw_scores(examples, preds): """ Computes the exact and f1 scores from the examples and the model predictions """ exact_scores = {} f1_scores = {} for example in examples: qas_id = example.qas_id gold_answers = [answer["text"] for answer in example.answers if normalize_answer(answer["text"])] if not gold_answers: # For unanswerable questions, only correct answer is empty string gold_answers = [""] if qas_id not in preds: print("Missing prediction for %s" % qas_id) continue prediction = preds[qas_id] exact_scores[qas_id] = max(compute_exact(a, prediction) for a in gold_answers) f1_scores[qas_id] = max(compute_f1(a, prediction) for a in gold_answers) return exact_scores, f1_scores def apply_no_ans_threshold(scores, na_probs, qid_to_has_ans, na_prob_thresh): new_scores = {} for qid, s in scores.items(): pred_na = na_probs[qid] > na_prob_thresh if pred_na: new_scores[qid] = float(not qid_to_has_ans[qid]) else: new_scores[qid] = s return new_scores def make_eval_dict(exact_scores, f1_scores, qid_list=None): if not qid_list: total = len(exact_scores) return collections.OrderedDict( [ ("exact", 100.0 * sum(exact_scores.values()) / total), ("f1", 100.0 * sum(f1_scores.values()) / total), ("total", total), ] ) else: total = len(qid_list) return collections.OrderedDict( [ ("exact", 100.0 * sum(exact_scores[k] for k in qid_list) / total), ("f1", 100.0 * sum(f1_scores[k] for k in qid_list) / total), ("total", total), ] ) def merge_eval(main_eval, new_eval, prefix): for k in new_eval: main_eval["%s_%s" % (prefix, k)] = new_eval[k] def find_best_thresh_v2(preds, scores, na_probs, qid_to_has_ans): num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k]) cur_score = num_no_ans best_score = cur_score best_thresh = 0.0 qid_list = sorted(na_probs, key=lambda k: na_probs[k]) for i, qid in enumerate(qid_list): if qid not in scores: continue if qid_to_has_ans[qid]: diff = scores[qid] else: if preds[qid]: diff = -1 else: diff = 0 cur_score += diff if cur_score > best_score: best_score = cur_score best_thresh = na_probs[qid] has_ans_score, has_ans_cnt = 0, 0 for qid in qid_list: if not qid_to_has_ans[qid]: continue has_ans_cnt += 1 if qid not in scores: continue has_ans_score += scores[qid] return 100.0 * best_score / len(scores), best_thresh, 1.0 * has_ans_score / has_ans_cnt def find_all_best_thresh_v2(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans): best_exact, exact_thresh, has_ans_exact = find_best_thresh_v2(preds, exact_raw, na_probs, qid_to_has_ans) best_f1, f1_thresh, has_ans_f1 = find_best_thresh_v2(preds, f1_raw, na_probs, qid_to_has_ans) main_eval["best_exact"] = best_exact main_eval["best_exact_thresh"] = exact_thresh main_eval["best_f1"] = best_f1 main_eval["best_f1_thresh"] = f1_thresh main_eval["has_ans_exact"] = has_ans_exact main_eval["has_ans_f1"] = has_ans_f1 def find_best_thresh(preds, scores, na_probs, qid_to_has_ans): num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k]) cur_score = num_no_ans best_score = cur_score best_thresh = 0.0 qid_list = sorted(na_probs, key=lambda k: na_probs[k]) for _, qid in enumerate(qid_list): if qid not in scores: continue if qid_to_has_ans[qid]: diff = scores[qid] else: if preds[qid]: diff = -1 else: diff = 0 cur_score += diff if cur_score > best_score: best_score = cur_score best_thresh = na_probs[qid] return 100.0 * best_score / len(scores), best_thresh def find_all_best_thresh(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans): best_exact, exact_thresh = find_best_thresh(preds, exact_raw, na_probs, qid_to_has_ans) best_f1, f1_thresh = find_best_thresh(preds, f1_raw, na_probs, qid_to_has_ans) main_eval["best_exact"] = best_exact main_eval["best_exact_thresh"] = exact_thresh main_eval["best_f1"] = best_f1 main_eval["best_f1_thresh"] = f1_thresh def squad_evaluate(examples, preds, no_answer_probs=None, no_answer_probability_threshold=1.0): qas_id_to_has_answer = {example.qas_id: bool(example.answers) for example in examples} has_answer_qids = [qas_id for qas_id, has_answer in qas_id_to_has_answer.items() if has_answer] no_answer_qids = [qas_id for qas_id, has_answer in qas_id_to_has_answer.items() if not has_answer] if not no_answer_probs: no_answer_probs = {k: 0.0 for k in preds} exact, f1 = get_raw_scores(examples, preds) exact_threshold = apply_no_ans_threshold( exact, no_answer_probs, qas_id_to_has_answer, no_answer_probability_threshold ) f1_threshold = apply_no_ans_threshold(f1, no_answer_probs, qas_id_to_has_answer, no_answer_probability_threshold) evaluation = make_eval_dict(exact_threshold, f1_threshold) if has_answer_qids: has_ans_eval = make_eval_dict(exact_threshold, f1_threshold, qid_list=has_answer_qids) merge_eval(evaluation, has_ans_eval, "HasAns") if no_answer_qids: no_ans_eval = make_eval_dict(exact_threshold, f1_threshold, qid_list=no_answer_qids) merge_eval(evaluation, no_ans_eval, "NoAns") if no_answer_probs: find_all_best_thresh(evaluation, preds, exact, f1, no_answer_probs, qas_id_to_has_answer) return evaluation def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False): """Project the tokenized prediction back to the original text.""" # When we created the data, we kept track of the alignment between original # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So # now `orig_text` contains the span of our original text corresponding to the # span that we predicted. # # However, `orig_text` may contain extra characters that we don't want in # our prediction. # # For example, let's say: # pred_text = steve smith # orig_text = Steve Smith's # # We don't want to return `orig_text` because it contains the extra "'s". # # We don't want to return `pred_text` because it's already been normalized # (the SQuAD eval script also does punctuation stripping/lower casing but # our tokenizer does additional normalization like stripping accent # characters). # # What we really want to return is "Steve Smith". # # Therefore, we have to apply a semi-complicated alignment heuristic between # `pred_text` and `orig_text` to get a character-to-character alignment. This # can fail in certain cases in which case we just return `orig_text`. def _strip_spaces(text): ns_chars = [] ns_to_s_map = collections.OrderedDict() for (i, c) in enumerate(text): if c == " ": continue ns_to_s_map[len(ns_chars)] = i ns_chars.append(c) ns_text = "".join(ns_chars) return (ns_text, ns_to_s_map) # We first tokenize `orig_text`, strip whitespace from the result # and `pred_text`, and check if they are the same length. If they are # NOT the same length, the heuristic has failed. If they are the same # length, we assume the characters are one-to-one aligned. tokenizer = BasicTokenizer(do_lower_case=do_lower_case) tok_text = " ".join(tokenizer.tokenize(orig_text)) start_position = tok_text.find(pred_text) if start_position == -1: if verbose_logging: logger.info("Unable to find text: '%s' in '%s'" % (pred_text, orig_text)) return orig_text end_position = start_position + len(pred_text) - 1 (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text) (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text) if len(orig_ns_text) != len(tok_ns_text): if verbose_logging: logger.info("Length not equal after stripping spaces: '%s' vs '%s'", orig_ns_text, tok_ns_text) return orig_text # We then project the characters in `pred_text` back to `orig_text` using # the character-to-character alignment. tok_s_to_ns_map = {} for (i, tok_index) in tok_ns_to_s_map.items(): tok_s_to_ns_map[tok_index] = i orig_start_position = None if start_position in tok_s_to_ns_map: ns_start_position = tok_s_to_ns_map[start_position] if ns_start_position in orig_ns_to_s_map: orig_start_position = orig_ns_to_s_map[ns_start_position] if orig_start_position is None: if verbose_logging: logger.info("Couldn't map start position") return orig_text orig_end_position = None if end_position in tok_s_to_ns_map: ns_end_position = tok_s_to_ns_map[end_position] if ns_end_position in orig_ns_to_s_map: orig_end_position = orig_ns_to_s_map[ns_end_position] if orig_end_position is None: if verbose_logging: logger.info("Couldn't map end position") return orig_text output_text = orig_text[orig_start_position : (orig_end_position + 1)] return output_text def _get_best_indexes(logits, n_best_size): """Get the n-best logits from a list.""" index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True) best_indexes = [] for i in range(len(index_and_score)): if i >= n_best_size: break best_indexes.append(index_and_score[i][0]) return best_indexes def _compute_softmax(scores): """Compute softmax probability over raw logits.""" if not scores: return [] max_score = None for score in scores: if max_score is None or score > max_score: max_score = score exp_scores = [] total_sum = 0.0 for score in scores: x = math.exp(score - max_score) exp_scores.append(x) total_sum += x probs = [] for score in exp_scores: probs.append(score / total_sum) return probs def compute_predictions_logits( all_examples, all_features, all_results, n_best_size, max_answer_length, do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file, verbose_logging, version_2_with_negative, null_score_diff_threshold, tokenizer, ): """Write final predictions to the json file and log-odds of null if needed.""" if output_prediction_file: logger.info(f"Writing predictions to: {output_prediction_file}") if output_nbest_file: logger.info(f"Writing nbest to: {output_nbest_file}") if output_null_log_odds_file and version_2_with_negative: logger.info(f"Writing null_log_odds to: {output_null_log_odds_file}") example_index_to_features = collections.defaultdict(list) for feature in all_features: example_index_to_features[feature.example_index].append(feature) unique_id_to_result = {} for result in all_results: unique_id_to_result[result.unique_id] = result _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name "PrelimPrediction", ["feature_index", "start_index", "end_index", "start_logit", "end_logit"] ) all_predictions = collections.OrderedDict() all_nbest_json = collections.OrderedDict() scores_diff_json = collections.OrderedDict() all_null_scores = collections.OrderedDict() for (example_index, example) in enumerate(all_examples): features = example_index_to_features[example_index] prelim_predictions = [] # keep track of the minimum score of null start+end of position 0 score_null = 1000000 # large and positive min_null_feature_index = 0 # the paragraph slice with min null score null_start_logit = 0 # the start logit at the slice with min null score null_end_logit = 0 # the end logit at the slice with min null score for (feature_index, feature) in enumerate(features): result = unique_id_to_result[feature.unique_id] start_indexes = _get_best_indexes(result.start_logits, n_best_size) end_indexes = _get_best_indexes(result.end_logits, n_best_size) # if we could have irrelevant answers, get the min score of irrelevant if version_2_with_negative: feature_null_score = result.start_logits[0] + result.end_logits[0] if feature_null_score < score_null: score_null = feature_null_score min_null_feature_index = feature_index null_start_logit = result.start_logits[0] null_end_logit = result.end_logits[0] doc_offset = feature.doc_offset for start_index in start_indexes: for end_index in end_indexes: # We could hypothetically create invalid predictions, e.g., predict # that the start of the span is in the question. We throw out all # invalid predictions. if start_index >= len(feature.tokens) or start_index < doc_offset: continue if end_index >= len(feature.tokens) or end_index < doc_offset: continue if start_index not in feature.token_to_orig_map: continue if end_index not in feature.token_to_orig_map: continue if not feature.token_is_max_context.get(start_index, False): continue if end_index < start_index: continue length = end_index - start_index + 1 if length > max_answer_length: continue prelim_predictions.append( _PrelimPrediction( feature_index=feature_index, start_index=start_index, end_index=end_index, start_logit=result.start_logits[start_index], end_logit=result.end_logits[end_index], ) ) if version_2_with_negative: prelim_predictions.append( _PrelimPrediction( feature_index=min_null_feature_index, start_index=0, end_index=0, start_logit=null_start_logit, end_logit=null_end_logit, ) ) prelim_predictions = sorted(prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True) _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name "NbestPrediction", ["text", "start_logit", "end_logit"] ) seen_predictions = {} nbest = [] for pred in prelim_predictions: if len(nbest) >= n_best_size: break feature = features[pred.feature_index] if pred.start_index > 0: # this is a non-null prediction tok_tokens = feature.tokens[pred.start_index : (pred.end_index + 1)] orig_doc_start = feature.token_to_orig_map[pred.start_index] orig_doc_end = feature.token_to_orig_map[pred.end_index] orig_tokens = example.doc_tokens[orig_doc_start : (orig_doc_end + 1)] tok_text = tokenizer.convert_tokens_to_string(tok_tokens) # tok_text = " ".join(tok_tokens) # # # De-tokenize WordPieces that have been split off. # tok_text = tok_text.replace(" ##", "") # tok_text = tok_text.replace("##", "") # Clean whitespace tok_text = tok_text.strip() tok_text = " ".join(tok_text.split()) orig_text = " ".join(orig_tokens) final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging) if final_text in seen_predictions: continue seen_predictions[final_text] = True else: final_text = "" seen_predictions[final_text] = True nbest.append(_NbestPrediction(text=final_text, start_logit=pred.start_logit, end_logit=pred.end_logit)) # if we didn't include the empty option in the n-best, include it if version_2_with_negative: if "" not in seen_predictions: nbest.append(_NbestPrediction(text="", start_logit=null_start_logit, end_logit=null_end_logit)) # In very rare edge cases we could only have single null prediction. # So we just create a nonce prediction in this case to avoid failure. if len(nbest) == 1: nbest.insert(0, _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) # In very rare edge cases we could have no valid predictions. So we # just create a nonce prediction in this case to avoid failure. if not nbest: nbest.append(_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) assert len(nbest) >= 1, "No valid predictions" total_scores = [] best_non_null_entry = None for entry in nbest: total_scores.append(entry.start_logit + entry.end_logit) if not best_non_null_entry: if entry.text: best_non_null_entry = entry probs = _compute_softmax(total_scores) nbest_json = [] for (i, entry) in enumerate(nbest): output = collections.OrderedDict() output["text"] = entry.text output["probability"] = probs[i] output["start_logit"] = entry.start_logit output["end_logit"] = entry.end_logit nbest_json.append(output) assert len(nbest_json) >= 1, "No valid predictions" if not version_2_with_negative: all_predictions[example.qas_id] = nbest_json[0]["text"] else: # predict "" iff the null score - the score of best non-null > threshold score_diff = score_null - best_non_null_entry.start_logit - (best_non_null_entry.end_logit) scores_diff_json[example.qas_id] = score_diff all_predictions[example.qas_id] = best_non_null_entry.text all_null_scores[example.qas_id] = score_diff all_nbest_json[example.qas_id] = nbest_json if output_prediction_file: with open(output_prediction_file, "w") as writer: writer.write(json.dumps(all_predictions, indent=4) + "\n") if output_nbest_file: with open(output_nbest_file, "w") as writer: writer.write(json.dumps(all_nbest_json, indent=4) + "\n") if output_null_log_odds_file and version_2_with_negative: with open(output_null_log_odds_file, "w") as writer: writer.write(json.dumps(scores_diff_json, indent=4) + "\n") return all_predictions, all_null_scores
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/squad/squad_metrics.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import re import unicodedata import six def convert_to_unicode(text): """Converts `text` to Unicode (if it's not already), assuming utf-8 input.""" if six.PY3: if isinstance(text, str): return text elif isinstance(text, bytes): return text.decode("utf-8", "ignore") else: raise ValueError("Unsupported string type: %s" % (type(text))) elif six.PY2: if isinstance(text, str): return text.decode("utf-8", "ignore") elif isinstance(text, unicode): return text else: raise ValueError("Unsupported string type: %s" % (type(text))) else: raise ValueError("Not running on Python2 or Python 3?") def printable_text(text): """Returns text encoded in a way suitable for print or `tf.logging`.""" # These functions want `str` for both Python2 and Python3, but in one case # it's a Unicode string and in the other it's a byte string. if six.PY3: if isinstance(text, str): return text elif isinstance(text, bytes): return text.decode("utf-8", "ignore") else: raise ValueError("Unsupported string type: %s" % (type(text))) elif six.PY2: if isinstance(text, str): return text elif isinstance(text, unicode): return text.encode("utf-8") else: raise ValueError("Unsupported string type: %s" % (type(text))) else: raise ValueError("Not running on Python2 or Python 3?") def whitespace_tokenize(text): """Runs basic whitespace cleaning and splitting on a piece of text.""" text = text.strip() if not text: return [] tokens = text.split() return tokens class BasicTokenizer(object): """Runs basic tokenization (punctuation splitting, lower casing, etc.).""" def __init__(self, do_lower_case=True): """Constructs a BasicTokenizer. Args: do_lower_case: Whether to lower case the input. """ self.do_lower_case = do_lower_case def tokenize(self, text): """Tokenizes a piece of text.""" text = convert_to_unicode(text) text = self._clean_text(text) orig_tokens = whitespace_tokenize(text) split_tokens = [] for token in orig_tokens: if self.do_lower_case: token = token.lower() token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token)) output_tokens = whitespace_tokenize(" ".join(split_tokens)) return output_tokens def _run_strip_accents(self, text): """Strips accents from a piece of text.""" text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output) def _run_split_on_punc(self, text): """Splits punctuation on a piece of text.""" chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return ["".join(x) for x in output] def _clean_text(self, text): """Performs invalid character removal and whitespace cleanup on text.""" output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xfffd or _is_control(char): continue if _is_whitespace(char): output.append(" ") else: output.append(char) return "".join(output) def _is_whitespace(char): """Checks whether `chars` is a whitespace character.""" # \t, \n, and \r are technically contorl characters but we treat them # as whitespace since they are generally considered as such. if char == " " or char == "\t" or char == "\n" or char == "\r": return True cat = unicodedata.category(char) if cat == "Zs": return True return False def _is_control(char): """Checks whether `chars` is a control character.""" # These are technically control characters but we count them as whitespace # characters. if char == "\t" or char == "\n" or char == "\r": return False cat = unicodedata.category(char) if cat in ("Cc", "Cf"): return True return False def _is_punctuation(char): """Checks whether `chars` is a punctuation character.""" cp = ord(char) # We treat all non-letter/number ASCII as punctuation. # Characters such as "^", "$", and "`" are not in the Unicode # Punctuation class but we treat them as punctuation anyways, for # consistency. if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)): return True cat = unicodedata.category(char) if cat.startswith("P"): return True return False
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/data/squad/basic_tokenizer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ A standalone module for aggregating metrics. Metrics can be logged from anywhere using the `log_*` functions defined in this module. The logged values will be aggregated dynamically based on the aggregation context in which the logging occurs. See the :func:`aggregate` context manager for more details. """ import contextlib import uuid from collections import defaultdict from typing import Callable, List, Optional from .meters import * # Aggregation contexts are considered "active" when inside the scope # created by the :func:`aggregate` context manager. _aggregators = OrderedDict() _active_aggregators = OrderedDict() _active_aggregators_cnt = defaultdict(lambda: 0) def reset() -> None: """Reset all metrics aggregators.""" _aggregators.clear() _active_aggregators.clear() _active_aggregators_cnt.clear() # The "default" aggregator observes all logged values. _aggregators["default"] = MetersDict() _active_aggregators["default"] = _aggregators["default"] _active_aggregators_cnt["default"] = 1 reset() @contextlib.contextmanager def aggregate(name: Optional[str] = None, new_root: bool = False): """Context manager to aggregate metrics under a given name. Aggregations can be nested. If *new_root* is ``False``, then logged metrics will be recorded along the entire stack of nested aggregators, including a global "default" aggregator. If *new_root* is ``True``, then this aggregator will be the root of a new aggregation stack, thus bypassing any parent aggregators. Note that aggregation contexts are uniquely identified by their *name* (e.g., train, valid). Creating a context with an existing name will reuse the corresponding :class:`MetersDict` instance. If no name is given, then a temporary aggregator will be created. Usage:: with metrics.aggregate("train"): for step, batch in enumerate(epoch): with metrics.aggregate("train_inner") as agg: metrics.log_scalar("loss", get_loss(batch)) if step % log_interval == 0: print(agg.get_smoothed_value("loss")) agg.reset() print(metrics.get_smoothed_values("train")["loss"]) Args: name (str): name of the aggregation. Defaults to a random/temporary name if not given explicitly. new_root (bool): make this aggregation the root of a new aggregation stack. """ if name is None: # generate a temporary name name = str(uuid.uuid4()) assert name not in _aggregators agg = MetersDict() else: assert name != "default" agg = _aggregators.setdefault(name, MetersDict()) if new_root: backup_aggregators = _active_aggregators.copy() _active_aggregators.clear() backup_aggregators_cnt = _active_aggregators_cnt.copy() _active_aggregators_cnt.clear() _active_aggregators[name] = agg _active_aggregators_cnt[name] += 1 yield agg _active_aggregators_cnt[name] -= 1 if _active_aggregators_cnt[name] == 0 and name in _active_aggregators: del _active_aggregators[name] if new_root: _active_aggregators.clear() _active_aggregators.update(backup_aggregators) _active_aggregators_cnt.clear() _active_aggregators_cnt.update(backup_aggregators_cnt) def get_active_aggregators() -> List[MetersDict]: return list(_active_aggregators.values()) def log_scalar( key: str, value: float, weight: float = 1, priority: int = 10, round: Optional[int] = None, ): """Log a scalar value. Args: key (str): name of the field to log value (float): value to log weight (float): weight that this value contributes to the average. A weight of 0 will always log the latest value. priority (int): smaller values are logged earlier in the output round (Optional[int]): number of digits to round to when displaying """ for agg in get_active_aggregators(): if key not in agg: agg.add_meter(key, AverageMeter(round=round), priority) agg[key].update(value, weight) def log_scalar_sum( key: str, value: float, priority: int = 10, round: Optional[int] = None, ): """Log a scalar value that is summed for reporting. Args: key (str): name of the field to log value (float): value to log priority (int): smaller values are logged earlier in the output round (Optional[int]): number of digits to round to when displaying """ for agg in get_active_aggregators(): if key not in agg: agg.add_meter(key, SumMeter(round=round), priority) agg[key].update(value) def log_derived(key: str, fn: Callable[[MetersDict], float], priority: int = 20): """Log a scalar value derived from other meters. Args: key (str): name of the field to log fn (Callable[[MetersDict], float]): function that takes a single argument *meters* and returns the derived value priority (int): smaller values are logged earlier in the output """ for agg in get_active_aggregators(): if key not in agg: agg.add_meter(key, MetersDict._DerivedMeter(fn), priority) def log_speed( key: str, value: float, priority: int = 30, round: Optional[int] = None, ): """Log the rate of some quantity per second. Args: key (str): name of the field to log value (float): value to log priority (int): smaller values are logged earlier in the output round (Optional[int]): number of digits to round to when displaying """ for agg in get_active_aggregators(): if key not in agg: agg.add_meter(key, TimeMeter(round=round), priority) agg[key].reset() # reset meter on the first call else: agg[key].update(value) def log_start_time(key: str, priority: int = 40, round: Optional[int] = None): """Log the duration of some event in seconds. The duration will be computed once :func:`log_stop_time` is called. Args: key (str): name of the field to log priority (int): smaller values are logged earlier in the output round (Optional[int]): number of digits to round to when displaying """ for agg in get_active_aggregators(): if key not in agg: agg.add_meter(key, StopwatchMeter(round=round), priority) agg[key].start() def log_stop_time(key: str, weight: float = 0.0, prehook=None): """Log the duration of some event in seconds. The duration will be computed since :func:`log_start_time` was called. Set weight > 0 to report the average time instead of the sum. Args: key (str): name of the field to log weight (float): weight that this time contributes to the average prehook (function, no arguments): will be called before the timer is stopped. For example, use prehook=torch.cuda.synchronize to make sure all gpu operations are done before timer is stopped. """ for agg in get_active_aggregators(): if key in agg: agg[key].stop(weight, prehook) def log_custom( new_meter_fn: Callable[[], Meter], key: str, *args, priority: int = 50, **kwargs, ): """Log using a custom Meter. Any extra *args* or *kwargs* will be passed through to the Meter's *update* method. Args: new_meter_fn (Callable[[], Meter]): function that returns a new Meter instance key (str): name of the field to log priority (int): smaller values are logged earlier in the output """ for agg in get_active_aggregators(): if key not in agg: agg.add_meter(key, new_meter_fn(), priority) agg[key].update(*args, **kwargs) def reset_meter(name: str, key: str) -> None: """Reset Meter instance aggregated under a given *name* and *key*.""" meter = get_meter(name, key) if meter is not None: meter.reset() def reset_meters(name: str) -> None: """Reset Meter instances aggregated under a given *name*.""" meters = get_meters(name) if meters is not None: meters.reset() def get_meter(name: str, key: str) -> Meter: """Get a single Meter instance aggregated under *name* and *key*. Returns: Meter or None if no metrics have been logged under *name* and *key*. """ if name not in _aggregators: return None return _aggregators[name].get(key, None) def get_meters(name: str) -> MetersDict: """Get Meter instances aggregated under a given *name*. Returns: MetersDict or None if no metrics have been logged under *name*. """ return _aggregators.get(name, None) def get_smoothed_value(name: str, key: str) -> float: """Get a single smoothed value. Raises: KeyError: if no metrics have been logged under *name* and *key*. """ return _aggregators[name].get_smoothed_value(key) def get_smoothed_values(name: str) -> Dict[str, float]: """Get smoothed values aggregated under a given *name*. Raises: KeyError: if no metrics have been logged under *name*. """ return _aggregators[name].get_smoothed_values() def state_dict(): return OrderedDict([(name, agg.state_dict()) for name, agg in _aggregators.items()]) def load_state_dict(state_dict): for name, agg_state in state_dict.items(): _aggregators[name] = MetersDict() _aggregators[name].load_state_dict(agg_state) def xla_metrics_report(): try: import torch_xla.debug.metrics as met print(met.metrics_report()) except ImportError: return
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/logging/metrics.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import bisect import time from collections import OrderedDict from typing import Dict, Optional try: import torch def type_as(a, b): if torch.is_tensor(a) and torch.is_tensor(b): return a.to(b) else: return a except ImportError: torch = None def type_as(a, b): return a try: import numpy as np except ImportError: np = None class Meter(object): """Base class for Meters.""" def __init__(self): pass def state_dict(self): return {} def load_state_dict(self, state_dict): pass def reset(self): raise NotImplementedError @property def smoothed_value(self) -> float: """Smoothed value used for logging.""" raise NotImplementedError def safe_round(number, ndigits): if hasattr(number, "__round__"): return round(number, ndigits) elif torch is not None and torch.is_tensor(number) and number.numel() == 1: return safe_round(number.item(), ndigits) elif np is not None and np.ndim(number) == 0 and hasattr(number, "item"): return safe_round(number.item(), ndigits) else: return number class AverageMeter(Meter): """Computes and stores the average and current value""" def __init__(self, round: Optional[int] = None): self.round = round self.reset() def reset(self): self.val = None # most recent update self.sum = 0 # sum from all updates self.count = 0 # total n from all updates def update(self, val, n=1): if val is not None: self.val = val if n > 0: self.sum = type_as(self.sum, val) + (val * n) self.count = type_as(self.count, n) + n def state_dict(self): return { "val": self.val, "sum": self.sum, "count": self.count, "round": self.round, } def load_state_dict(self, state_dict): self.val = state_dict["val"] self.sum = state_dict["sum"] self.count = state_dict["count"] self.round = state_dict.get("round", None) @property def avg(self): return self.sum / self.count if self.count > 0 else self.val @property def smoothed_value(self) -> float: val = self.avg if self.round is not None and val is not None: val = safe_round(val, self.round) return val class SumMeter(Meter): """Computes and stores the sum""" def __init__(self, round: Optional[int] = None): self.round = round self.reset() def reset(self): self.sum = 0 # sum from all updates def update(self, val): if val is not None: self.sum = type_as(self.sum, val) + val def state_dict(self): return { "sum": self.sum, "round": self.round, } def load_state_dict(self, state_dict): self.sum = state_dict["sum"] self.round = state_dict.get("round", None) @property def smoothed_value(self) -> float: val = self.sum if self.round is not None and val is not None: val = safe_round(val, self.round) return val class TimeMeter(Meter): """Computes the average occurrence of some event per second""" def __init__( self, init: int = 0, n: int = 0, round: Optional[int] = None, ): self.round = round self.reset(init, n) def reset(self, init=0, n=0): self.init = init self.start = time.perf_counter() self.n = n self.i = 0 def update(self, val=1): self.n = type_as(self.n, val) + val self.i += 1 def state_dict(self): return { "init": self.elapsed_time, "n": self.n, "round": self.round, } def load_state_dict(self, state_dict): if "start" in state_dict: # backwards compatibility for old state_dicts self.reset(init=state_dict["init"]) else: self.reset(init=state_dict["init"], n=state_dict["n"]) self.round = state_dict.get("round", None) @property def avg(self): return self.n / self.elapsed_time @property def elapsed_time(self): return self.init + (time.perf_counter() - self.start) @property def smoothed_value(self) -> float: val = self.avg if self.round is not None and val is not None: val = safe_round(val, self.round) return val class StopwatchMeter(Meter): """Computes the sum/avg duration of some event in seconds""" def __init__(self, round: Optional[int] = None): self.round = round self.sum = 0 self.n = 0 self.start_time = None def start(self): self.start_time = time.perf_counter() def stop(self, n=1, prehook=None): if self.start_time is not None: if prehook is not None: prehook() delta = time.perf_counter() - self.start_time self.sum = self.sum + delta self.n = type_as(self.n, n) + n def reset(self): self.sum = 0 # cumulative time during which stopwatch was active self.n = 0 # total n across all start/stop self.start() def state_dict(self): return { "sum": self.sum, "n": self.n, "round": self.round, } def load_state_dict(self, state_dict): self.sum = state_dict["sum"] self.n = state_dict["n"] self.start_time = None self.round = state_dict.get("round", None) @property def avg(self): return self.sum / self.n if self.n > 0 else self.sum @property def elapsed_time(self): if self.start_time is None: return 0.0 return time.perf_counter() - self.start_time @property def smoothed_value(self) -> float: val = self.avg if self.sum > 0 else self.elapsed_time if self.round is not None and val is not None: val = safe_round(val, self.round) return val class MetersDict(OrderedDict): """A sorted dictionary of :class:`Meters`. Meters are sorted according to a priority that is given when the meter is first added to the dictionary. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.priorities = [] def __setitem__(self, key, value): assert key not in self, "MetersDict doesn't support reassignment" priority, value = value bisect.insort(self.priorities, (priority, len(self.priorities), key)) super().__setitem__(key, value) for _, _, key in self.priorities: # reorder dict to match priorities self.move_to_end(key) def add_meter(self, key, meter, priority): self.__setitem__(key, (priority, meter)) def state_dict(self): return [ (pri, key, self[key].__class__.__name__, self[key].state_dict()) for pri, _, key in self.priorities # can't serialize DerivedMeter instances if not isinstance(self[key], MetersDict._DerivedMeter) ] def load_state_dict(self, state_dict): self.clear() self.priorities.clear() for pri, key, meter_cls, meter_state in state_dict: meter = globals()[meter_cls]() meter.load_state_dict(meter_state) self.add_meter(key, meter, pri) def get_smoothed_value(self, key: str) -> float: """Get a single smoothed value.""" meter = self[key] if isinstance(meter, MetersDict._DerivedMeter): return meter.fn(self) else: return meter.smoothed_value def get_smoothed_values(self) -> Dict[str, float]: """Get all smoothed values.""" return OrderedDict( [ (key, self.get_smoothed_value(key)) for key in self.keys() if not key.startswith("_") ] ) def reset(self): """Reset Meter instances.""" for meter in self.values(): if isinstance(meter, MetersDict._DerivedMeter): continue meter.reset() class _DerivedMeter(Meter): """A Meter whose values are derived from other Meters.""" def __init__(self, fn): self.fn = fn def reset(self): pass
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/logging/meters.py
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/logging/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Wrapper around various loggers and progress bars (e.g., tqdm). """ import atexit import json import logging import os import sys from collections import OrderedDict from contextlib import contextmanager from numbers import Number from typing import Optional import torch from .meters import AverageMeter, StopwatchMeter, TimeMeter logger = logging.getLogger(__name__) def progress_bar( iterator, log_format: Optional[str] = None, log_interval: int = 100, log_file: Optional[str] = None, epoch: Optional[int] = None, prefix: Optional[str] = None, tensorboard_logdir: Optional[str] = None, default_log_format: str = "tqdm", wandb_project: Optional[str] = None, wandb_run_name: Optional[str] = None, azureml_logging: Optional[bool] = False, ): if log_format is None: log_format = default_log_format if log_file is not None: handler = logging.FileHandler(filename=log_file) logger.addHandler(handler) if log_format == "tqdm" and not sys.stderr.isatty(): log_format = "simple" if log_format == "json": bar = JsonProgressBar(iterator, epoch, prefix, log_interval) elif log_format == "none": bar = NoopProgressBar(iterator, epoch, prefix) elif log_format == "simple": bar = SimpleProgressBar(iterator, epoch, prefix, log_interval) elif log_format == "tqdm": bar = TqdmProgressBar(iterator, epoch, prefix) else: raise ValueError("Unknown log format: {}".format(log_format)) if tensorboard_logdir: try: # [FB only] custom wrapper for TensorBoard import palaas # noqa from .fb_tbmf_wrapper import FbTbmfWrapper bar = FbTbmfWrapper(bar, log_interval) except ImportError: bar = TensorboardProgressBarWrapper(bar, tensorboard_logdir) if wandb_project: bar = WandBProgressBarWrapper(bar, wandb_project, run_name=wandb_run_name) if azureml_logging: bar = AzureMLProgressBarWrapper(bar) return bar def build_progress_bar( args, iterator, epoch: Optional[int] = None, prefix: Optional[str] = None, default: str = "tqdm", no_progress_bar: str = "none", ): """Legacy wrapper that takes an argparse.Namespace.""" if getattr(args, "no_progress_bar", False): default = no_progress_bar if getattr(args, "distributed_rank", 0) == 0: tensorboard_logdir = getattr(args, "tensorboard_logdir", None) else: tensorboard_logdir = None return progress_bar( iterator, log_format=args.log_format, log_interval=args.log_interval, epoch=epoch, prefix=prefix, tensorboard_logdir=tensorboard_logdir, default_log_format=default, ) def format_stat(stat): if isinstance(stat, Number): stat = "{:g}".format(stat) elif isinstance(stat, AverageMeter): stat = "{:.3f}".format(stat.avg) elif isinstance(stat, TimeMeter): stat = "{:g}".format(round(stat.avg)) elif isinstance(stat, StopwatchMeter): stat = "{:g}".format(round(stat.sum)) elif torch.is_tensor(stat): stat = stat.tolist() return stat class BaseProgressBar(object): """Abstract class for progress bars.""" def __init__(self, iterable, epoch=None, prefix=None): self.iterable = iterable self.n = getattr(iterable, "n", 0) self.epoch = epoch self.prefix = "" if epoch is not None: self.prefix += "epoch {:03d}".format(epoch) if prefix is not None: self.prefix += (" | " if self.prefix != "" else "") + prefix def __len__(self): return len(self.iterable) def __enter__(self): return self def __exit__(self, *exc): return False def __iter__(self): raise NotImplementedError def log(self, stats, tag=None, step=None): """Log intermediate stats according to log_interval.""" raise NotImplementedError def print(self, stats, tag=None, step=None): """Print end-of-epoch stats.""" raise NotImplementedError def update_config(self, config): """Log latest configuration.""" pass def _str_commas(self, stats): return ", ".join(key + "=" + stats[key].strip() for key in stats.keys()) def _str_pipes(self, stats): return " | ".join(key + " " + stats[key].strip() for key in stats.keys()) def _format_stats(self, stats): postfix = OrderedDict(stats) # Preprocess stats according to datatype for key in postfix.keys(): postfix[key] = str(format_stat(postfix[key])) return postfix @contextmanager def rename_logger(logger, new_name): old_name = logger.name if new_name is not None: logger.name = new_name yield logger logger.name = old_name class JsonProgressBar(BaseProgressBar): """Log output in JSON format.""" def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000): super().__init__(iterable, epoch, prefix) self.log_interval = log_interval self.i = None self.size = None def __iter__(self): self.size = len(self.iterable) for i, obj in enumerate(self.iterable, start=self.n): self.i = i yield obj def log(self, stats, tag=None, step=None): """Log intermediate stats according to log_interval.""" step = step or self.i or 0 if step > 0 and self.log_interval is not None and step % self.log_interval == 0: update = ( self.epoch - 1 + (self.i + 1) / float(self.size) if self.epoch is not None else None ) stats = self._format_stats(stats, epoch=self.epoch, update=update) with rename_logger(logger, tag): logger.info(json.dumps(stats)) def print(self, stats, tag=None, step=None): """Print end-of-epoch stats.""" self.stats = stats if tag is not None: self.stats = OrderedDict( [(tag + "_" + k, v) for k, v in self.stats.items()] ) stats = self._format_stats(self.stats, epoch=self.epoch) with rename_logger(logger, tag): logger.info(json.dumps(stats)) def _format_stats(self, stats, epoch=None, update=None): postfix = OrderedDict() if epoch is not None: postfix["epoch"] = epoch if update is not None: postfix["update"] = round(update, 3) # Preprocess stats according to datatype for key in stats.keys(): postfix[key] = format_stat(stats[key]) return postfix class NoopProgressBar(BaseProgressBar): """No logging.""" def __init__(self, iterable, epoch=None, prefix=None): super().__init__(iterable, epoch, prefix) def __iter__(self): for obj in self.iterable: yield obj def log(self, stats, tag=None, step=None): """Log intermediate stats according to log_interval.""" pass def print(self, stats, tag=None, step=None): """Print end-of-epoch stats.""" pass class SimpleProgressBar(BaseProgressBar): """A minimal logger for non-TTY environments.""" def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000): super().__init__(iterable, epoch, prefix) self.log_interval = log_interval self.i = None self.size = None def __iter__(self): self.size = len(self.iterable) for i, obj in enumerate(self.iterable, start=self.n): self.i = i yield obj def log(self, stats, tag=None, step=None): """Log intermediate stats according to log_interval.""" step = step or self.i or 0 if step > 0 and self.log_interval is not None and step % self.log_interval == 0: stats = self._format_stats(stats) postfix = self._str_commas(stats) with rename_logger(logger, tag): logger.info( "{}: {:5d} / {:d} {}".format( self.prefix, self.i + 1, self.size, postfix ) ) def print(self, stats, tag=None, step=None): """Print end-of-epoch stats.""" postfix = self._str_pipes(self._format_stats(stats)) with rename_logger(logger, tag): logger.info("{} | {}".format(self.prefix, postfix)) class TqdmProgressBar(BaseProgressBar): """Log to tqdm.""" def __init__(self, iterable, epoch=None, prefix=None): super().__init__(iterable, epoch, prefix) from tqdm import tqdm self.tqdm = tqdm( iterable, self.prefix, leave=False, disable=(logger.getEffectiveLevel() > logging.INFO), ) def __iter__(self): return iter(self.tqdm) def log(self, stats, tag=None, step=None): """Log intermediate stats according to log_interval.""" self.tqdm.set_postfix(self._format_stats(stats), refresh=False) def print(self, stats, tag=None, step=None): """Print end-of-epoch stats.""" postfix = self._str_pipes(self._format_stats(stats)) with rename_logger(logger, tag): logger.info("{} | {}".format(self.prefix, postfix)) try: _tensorboard_writers = {} from torch.utils.tensorboard import SummaryWriter except ImportError: try: from tensorboardX import SummaryWriter except ImportError: SummaryWriter = None def _close_writers(): for w in _tensorboard_writers.values(): w.close() atexit.register(_close_writers) class TensorboardProgressBarWrapper(BaseProgressBar): """Log to tensorboard.""" def __init__(self, wrapped_bar, tensorboard_logdir): self.wrapped_bar = wrapped_bar self.tensorboard_logdir = tensorboard_logdir if SummaryWriter is None: logger.warning( "tensorboard not found, please install with: pip install tensorboard" ) def _writer(self, key): if SummaryWriter is None: return None _writers = _tensorboard_writers if key not in _writers: _writers[key] = SummaryWriter(os.path.join(self.tensorboard_logdir, key)) _writers[key].add_text("sys.argv", " ".join(sys.argv)) return _writers[key] def __iter__(self): return iter(self.wrapped_bar) def log(self, stats, tag=None, step=None): """Log intermediate stats to tensorboard.""" self._log_to_tensorboard(stats, tag, step) self.wrapped_bar.log(stats, tag=tag, step=step) def print(self, stats, tag=None, step=None): """Print end-of-epoch stats.""" self._log_to_tensorboard(stats, tag, step) self.wrapped_bar.print(stats, tag=tag, step=step) def update_config(self, config): """Log latest configuration.""" # TODO add hparams to Tensorboard self.wrapped_bar.update_config(config) def _log_to_tensorboard(self, stats, tag=None, step=None): writer = self._writer(tag or "") if writer is None: return if step is None: step = stats["num_updates"] for key in stats.keys() - {"num_updates"}: if isinstance(stats[key], AverageMeter): writer.add_scalar(key, stats[key].val, step) elif isinstance(stats[key], Number): writer.add_scalar(key, stats[key], step) elif torch.is_tensor(stats[key]) and stats[key].numel() == 1: writer.add_scalar(key, stats[key].item(), step) writer.flush() try: import wandb except ImportError: wandb = None class WandBProgressBarWrapper(BaseProgressBar): """Log to Weights & Biases.""" def __init__(self, wrapped_bar, wandb_project, run_name=None): self.wrapped_bar = wrapped_bar if wandb is None: logger.warning("wandb not found, pip install wandb") return # reinit=False to ensure if wandb.init() is called multiple times # within one process it still references the same run wandb.init(project=wandb_project, reinit=False, name=run_name) def __iter__(self): return iter(self.wrapped_bar) def log(self, stats, tag=None, step=None): """Log intermediate stats to tensorboard.""" self._log_to_wandb(stats, tag, step) self.wrapped_bar.log(stats, tag=tag, step=step) def print(self, stats, tag=None, step=None): """Print end-of-epoch stats.""" self._log_to_wandb(stats, tag, step) self.wrapped_bar.print(stats, tag=tag, step=step) def update_config(self, config): """Log latest configuration.""" if wandb is not None: wandb.config.update(config) self.wrapped_bar.update_config(config) def _log_to_wandb(self, stats, tag=None, step=None): if wandb is None: return if step is None: step = stats["num_updates"] prefix = "" if tag is None else tag + "/" for key in stats.keys() - {"num_updates"}: if isinstance(stats[key], AverageMeter): wandb.log({prefix + key: stats[key].val}, step=step) elif isinstance(stats[key], Number): wandb.log({prefix + key: stats[key]}, step=step) try: from azureml.core import Run except ImportError: Run = None class AzureMLProgressBarWrapper(BaseProgressBar): """Log to Azure ML""" def __init__(self, wrapped_bar): self.wrapped_bar = wrapped_bar if Run is None: logger.warning("azureml.core not found, pip install azureml-core") return self.run = Run.get_context() def __exit__(self, *exc): if Run is not None: self.run.complete() return False def __iter__(self): return iter(self.wrapped_bar) def log(self, stats, tag=None, step=None): """Log intermediate stats to AzureML""" self._log_to_azureml(stats, tag, step) self.wrapped_bar.log(stats, tag=tag, step=step) def print(self, stats, tag=None, step=None): """Print end-of-epoch stats""" self._log_to_azureml(stats, tag, step) self.wrapped_bar.print(stats, tag=tag, step=step) def update_config(self, config): """Log latest configuration.""" self.wrapped_bar.update_config(config) def _log_to_azureml(self, stats, tag=None, step=None): if Run is None: return if step is None: step = stats["num_updates"] prefix = "" if tag is None else tag + "/" for key in stats.keys() - {"num_updates"}: name = prefix + key if isinstance(stats[key], AverageMeter): self.run.log_row(name=name, **{"step": step, key: stats[key].val}) elif isinstance(stats[key], Number): self.run.log_row(name=name, **{"step": step, key: stats[key]})
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/logging/progress_bar.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import inspect from typing import Any, Dict, List from fairseq import metrics, utils from fairseq.dataclass import FairseqDataclass from fairseq.dataclass.utils import gen_parser_from_dataclass from torch.nn.modules.loss import _Loss class FairseqCriterion(_Loss): def __init__(self, task): super().__init__() self.task = task if hasattr(task, "target_dictionary"): tgt_dict = task.target_dictionary self.padding_idx = tgt_dict.pad() if tgt_dict is not None else -100 @classmethod def add_args(cls, parser): """Add criterion-specific arguments to the parser.""" dc = getattr(cls, "__dataclass", None) if dc is not None: gen_parser_from_dataclass(parser, dc()) @classmethod def build_criterion(cls, cfg: FairseqDataclass, task): """Construct a criterion from command-line args.""" # arguments in the __init__. init_args = {} for p in inspect.signature(cls).parameters.values(): if ( p.kind == p.POSITIONAL_ONLY or p.kind == p.VAR_POSITIONAL or p.kind == p.VAR_KEYWORD ): # we haven't implemented inference for these argument types, # but PRs welcome :) raise NotImplementedError("{} not supported".format(p.kind)) assert p.kind in {p.POSITIONAL_OR_KEYWORD, p.KEYWORD_ONLY} if p.name == "task": init_args["task"] = task elif p.name == "cfg": init_args["cfg"] = cfg elif hasattr(cfg, p.name): init_args[p.name] = getattr(cfg, p.name) elif p.default != p.empty: pass # we'll use the default value else: raise NotImplementedError( "Unable to infer Criterion arguments, please implement " "{}.build_criterion".format(cls.__name__) ) return cls(**init_args) def forward(self, model, sample, reduce=True): """Compute the loss for the given sample. Returns a tuple with three elements: 1) the loss 2) the sample size, which is used as the denominator for the gradient 3) logging outputs to display while training """ raise NotImplementedError @staticmethod def aggregate_logging_outputs( logging_outputs: List[Dict[str, Any]] ) -> Dict[str, Any]: """Aggregate logging outputs from data parallel training.""" utils.deprecation_warning( "The aggregate_logging_outputs API is deprecated. " "Please use the reduce_metrics API instead." ) raise NotImplementedError @classmethod def reduce_metrics(cls, logging_outputs: List[Dict[str, Any]]) -> None: """Aggregate logging outputs from data parallel training.""" utils.deprecation_warning( "Criterions should implement the reduce_metrics API. " "Falling back to deprecated aggregate_logging_outputs API." ) agg_logging_outputs = cls.aggregate_logging_outputs(logging_outputs) for k, v in agg_logging_outputs.items(): if k in {"nsentences", "ntokens", "sample_size"}: continue metrics.log_scalar(k, v) @staticmethod def logging_outputs_can_be_summed() -> bool: """ Whether the logging outputs returned by `forward` can be summed across workers prior to calling `reduce_metrics`. Setting this to True will improves distributed training speed. """ return False class LegacyFairseqCriterion(FairseqCriterion): def __init__(self, args, task): super().__init__(task=task) self.args = args utils.deprecation_warning( "Criterions should take explicit arguments instead of an " "argparse.Namespace object, please update your criterion by " "extending FairseqCriterion instead of LegacyFairseqCriterion." ) @classmethod def build_criterion(cls, args, task): """Construct a criterion from command-line args.""" return cls(args, task)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/criterions/fairseq_criterion.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from dataclasses import dataclass import torch.nn.functional as F from fairseq import metrics, utils from fairseq.criterions import FairseqCriterion, register_criterion from fairseq.dataclass import FairseqDataclass from omegaconf import II @dataclass class CrossEntropyCriterionConfig(FairseqDataclass): sentence_avg: bool = II("optimization.sentence_avg") @register_criterion("cross_entropy", dataclass=CrossEntropyCriterionConfig) class CrossEntropyCriterion(FairseqCriterion): def __init__(self, task, sentence_avg): super().__init__(task) self.sentence_avg = sentence_avg def forward(self, model, sample, reduce=True): """Compute the loss for the given sample. Returns a tuple with three elements: 1) the loss 2) the sample size, which is used as the denominator for the gradient 3) logging outputs to display while training """ net_output = model(**sample["net_input"]) loss, _ = self.compute_loss(model, net_output, sample, reduce=reduce) sample_size = ( sample["target"].size(0) if self.sentence_avg else sample["ntokens"] ) logging_output = { "loss": loss.data, "ntokens": sample["ntokens"], "nsentences": sample["target"].size(0), "sample_size": sample_size, } return loss, sample_size, logging_output def compute_loss(self, model, net_output, sample, reduce=True): lprobs = model.get_normalized_probs(net_output, log_probs=True) lprobs = lprobs.view(-1, lprobs.size(-1)) target = model.get_targets(sample, net_output).view(-1) loss = F.nll_loss( lprobs, target, ignore_index=self.padding_idx, reduction="sum" if reduce else "none", ) return loss, loss @staticmethod def reduce_metrics(logging_outputs) -> None: """Aggregate logging outputs from data parallel training.""" loss_sum = sum(log.get("loss", 0) for log in logging_outputs) ntokens = sum(log.get("ntokens", 0) for log in logging_outputs) sample_size = sum(log.get("sample_size", 0) for log in logging_outputs) # we divide by log(2) to convert the loss from base e to base 2 metrics.log_scalar( "loss", loss_sum / sample_size / math.log(2), sample_size, round=3 ) if sample_size != ntokens: metrics.log_scalar( "nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3 ) metrics.log_derived( "ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg) ) else: metrics.log_derived( "ppl", lambda meters: utils.get_perplexity(meters["loss"].avg) ) @staticmethod def logging_outputs_can_be_summed() -> bool: """ Whether the logging outputs returned by `forward` can be summed across workers prior to calling `reduce_metrics`. Setting this to True will improves distributed training speed. """ return True
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/criterions/cross_entropy.py
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. from typing import List, Dict, Any from dataclasses import dataclass, field import torch import torch.nn.functional as F from fairseq import metrics, utils from fairseq.criterions import FairseqCriterion, register_criterion from fairseq.dataclass import FairseqDataclass from fairseq.data.data_utils import lengths_to_mask from fairseq.models.fairseq_model import FairseqEncoderModel @dataclass class FastSpeech2CriterionConfig(FairseqDataclass): ctc_weight: float = field( default=0.0, metadata={"help": "weight for CTC loss"} ) @register_criterion("fastspeech2", dataclass=FastSpeech2CriterionConfig) class FastSpeech2Loss(FairseqCriterion): def __init__(self, task, ctc_weight): super().__init__(task) self.ctc_weight = ctc_weight def forward(self, model: FairseqEncoderModel, sample, reduction="mean"): src_tokens = sample["net_input"]["src_tokens"] src_lens = sample["net_input"]["src_lengths"] tgt_lens = sample["target_lengths"] _feat_out, _, log_dur_out, pitch_out, energy_out = model( src_tokens=src_tokens, src_lengths=src_lens, prev_output_tokens=sample["net_input"]["prev_output_tokens"], incremental_state=None, target_lengths=tgt_lens, speaker=sample["speaker"], durations=sample["durations"], pitches=sample["pitches"], energies=sample["energies"] ) src_mask = lengths_to_mask(sample["net_input"]["src_lengths"]) tgt_mask = lengths_to_mask(sample["target_lengths"]) pitches, energies = sample["pitches"], sample["energies"] pitch_out, pitches = pitch_out[src_mask], pitches[src_mask] energy_out, energies = energy_out[src_mask], energies[src_mask] feat_out, feat = _feat_out[tgt_mask], sample["target"][tgt_mask] l1_loss = F.l1_loss(feat_out, feat, reduction=reduction) pitch_loss = F.mse_loss(pitch_out, pitches, reduction=reduction) energy_loss = F.mse_loss(energy_out, energies, reduction=reduction) log_dur_out = log_dur_out[src_mask] dur = sample["durations"].float() dur = dur.half() if log_dur_out.type().endswith(".HalfTensor") else dur log_dur = torch.log(dur + 1)[src_mask] dur_loss = F.mse_loss(log_dur_out, log_dur, reduction=reduction) ctc_loss = torch.tensor(0.).type_as(l1_loss) if self.ctc_weight > 0.: lprobs = model.get_normalized_probs((_feat_out,), log_probs=True) lprobs = lprobs.transpose(0, 1) # T x B x C src_mask = lengths_to_mask(src_lens) src_tokens_flat = src_tokens.masked_select(src_mask) ctc_loss = F.ctc_loss( lprobs, src_tokens_flat, tgt_lens, src_lens, reduction=reduction, zero_infinity=True ) * self.ctc_weight loss = l1_loss + dur_loss + pitch_loss + energy_loss + ctc_loss sample_size = sample["nsentences"] logging_output = { "loss": utils.item(loss.data), "ntokens": sample["ntokens"], "nsentences": sample["nsentences"], "sample_size": sample_size, "l1_loss": utils.item(l1_loss.data), "dur_loss": utils.item(dur_loss.data), "pitch_loss": utils.item(pitch_loss.data), "energy_loss": utils.item(energy_loss.data), "ctc_loss": utils.item(ctc_loss.data), } return loss, sample_size, logging_output @classmethod def reduce_metrics(cls, logging_outputs: List[Dict[str, Any]]) -> None: ns = [log.get("sample_size", 0) for log in logging_outputs] ntot = sum(ns) ws = [n / (ntot + 1e-8) for n in ns] for key in [ "loss", "l1_loss", "dur_loss", "pitch_loss", "energy_loss", "ctc_loss" ]: vals = [log.get(key, 0) for log in logging_outputs] val = sum(val * w for val, w in zip(vals, ws)) metrics.log_scalar(key, val, ntot, round=3) metrics.log_scalar("sample_size", ntot, len(logging_outputs)) # inference metrics if "targ_frames" not in logging_outputs[0]: return n = sum(log.get("targ_frames", 0) for log in logging_outputs) for key, new_key in [ ("mcd_loss", "mcd_loss"), ("pred_frames", "pred_ratio"), ("nins", "ins_rate"), ("ndel", "del_rate"), ]: val = sum(log.get(key, 0) for log in logging_outputs) metrics.log_scalar(new_key, val / n, n, round=3) @staticmethod def logging_outputs_can_be_summed() -> bool: return False
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/criterions/fastspeech2_loss.py
# All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import math from argparse import Namespace from dataclasses import dataclass, field from omegaconf import II from typing import Optional import torch import torch.nn.functional as F from fairseq import metrics, utils from fairseq.criterions import FairseqCriterion, register_criterion from fairseq.dataclass import FairseqDataclass from fairseq.data.data_utils import post_process from fairseq.tasks import FairseqTask from fairseq.logging.meters import safe_round @dataclass class CtcCriterionConfig(FairseqDataclass): zero_infinity: bool = field( default=False, metadata={"help": "zero inf loss when source length <= target length"}, ) sentence_avg: bool = II("optimization.sentence_avg") post_process: str = field( default="letter", metadata={ "help": "how to post process predictions into words. can be letter, " "wordpiece, BPE symbols, etc. " "See fairseq.data.data_utils.post_process() for full list of options" }, ) wer_kenlm_model: Optional[str] = field( default=None, metadata={ "help": "if this is provided, use kenlm to compute wer (along with other wer_* args)" }, ) wer_lexicon: Optional[str] = field( default=None, metadata={"help": "lexicon to use with wer_kenlm_model"}, ) wer_lm_weight: float = field( default=2.0, metadata={"help": "lm weight to use with wer_kenlm_model"}, ) wer_word_score: float = field( default=-1.0, metadata={"help": "lm word score to use with wer_kenlm_model"}, ) wer_args: Optional[str] = field( default=None, metadata={ "help": "DEPRECATED: tuple of (wer_kenlm_model, wer_lexicon, wer_lm_weight, wer_word_score)" }, ) @register_criterion("ctc", dataclass=CtcCriterionConfig) class CtcCriterion(FairseqCriterion): def __init__(self, cfg: CtcCriterionConfig, task: FairseqTask): super().__init__(task) self.blank_idx = ( task.target_dictionary.index(task.blank_symbol) if hasattr(task, "blank_symbol") else 0 ) self.pad_idx = task.target_dictionary.pad() self.eos_idx = task.target_dictionary.eos() self.post_process = cfg.post_process if cfg.wer_args is not None: ( cfg.wer_kenlm_model, cfg.wer_lexicon, cfg.wer_lm_weight, cfg.wer_word_score, ) = eval(cfg.wer_args) if cfg.wer_kenlm_model is not None: from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder dec_args = Namespace() dec_args.nbest = 1 dec_args.criterion = "ctc" dec_args.kenlm_model = cfg.wer_kenlm_model dec_args.lexicon = cfg.wer_lexicon dec_args.beam = 50 dec_args.beam_size_token = min(50, len(task.target_dictionary)) dec_args.beam_threshold = min(50, len(task.target_dictionary)) dec_args.lm_weight = cfg.wer_lm_weight dec_args.word_score = cfg.wer_word_score dec_args.unk_weight = -math.inf dec_args.sil_weight = 0 self.w2l_decoder = W2lKenLMDecoder(dec_args, task.target_dictionary) else: self.w2l_decoder = None self.zero_infinity = cfg.zero_infinity self.sentence_avg = cfg.sentence_avg def forward(self, model, sample, reduce=True): net_output = model(**sample["net_input"]) lprobs = model.get_normalized_probs( net_output, log_probs=True ).contiguous() # (T, B, C) from the encoder if "src_lengths" in sample["net_input"]: input_lengths = sample["net_input"]["src_lengths"] else: if net_output["padding_mask"] is not None: non_padding_mask = ~net_output["padding_mask"] input_lengths = non_padding_mask.long().sum(-1) else: input_lengths = lprobs.new_full( (lprobs.size(1),), lprobs.size(0), dtype=torch.long ) pad_mask = (sample["target"] != self.pad_idx) & ( sample["target"] != self.eos_idx ) targets_flat = sample["target"].masked_select(pad_mask) if "target_lengths" in sample: target_lengths = sample["target_lengths"] else: target_lengths = pad_mask.sum(-1) with torch.backends.cudnn.flags(enabled=False): loss = F.ctc_loss( lprobs, targets_flat, input_lengths, target_lengths, blank=self.blank_idx, reduction="sum", zero_infinity=self.zero_infinity, ) ntokens = ( sample["ntokens"] if "ntokens" in sample else target_lengths.sum().item() ) sample_size = sample["target"].size(0) if self.sentence_avg else ntokens logging_output = { "loss": utils.item(loss.data), # * sample['ntokens'], "ntokens": ntokens, "nsentences": sample["id"].numel(), "sample_size": sample_size, } if not model.training: import editdistance with torch.no_grad(): lprobs_t = lprobs.transpose(0, 1).float().contiguous().cpu() c_err = 0 c_len = 0 w_errs = 0 w_len = 0 wv_errs = 0 for lp, t, inp_l in zip( lprobs_t, sample["target_label"] if "target_label" in sample else sample["target"], input_lengths, ): lp = lp[:inp_l].unsqueeze(0) decoded = None if self.w2l_decoder is not None: decoded = self.w2l_decoder.decode(lp) if len(decoded) < 1: decoded = None else: decoded = decoded[0] if len(decoded) < 1: decoded = None else: decoded = decoded[0] p = (t != self.task.target_dictionary.pad()) & ( t != self.task.target_dictionary.eos() ) targ = t[p] targ_units = self.task.target_dictionary.string(targ) targ_units_arr = targ.tolist() toks = lp.argmax(dim=-1).unique_consecutive() pred_units_arr = toks[toks != self.blank_idx].tolist() c_err += editdistance.eval(pred_units_arr, targ_units_arr) c_len += len(targ_units_arr) targ_words = post_process(targ_units, self.post_process).split() pred_units = self.task.target_dictionary.string(pred_units_arr) pred_words_raw = post_process(pred_units, self.post_process).split() if decoded is not None and "words" in decoded: pred_words = decoded["words"] w_errs += editdistance.eval(pred_words, targ_words) wv_errs += editdistance.eval(pred_words_raw, targ_words) else: dist = editdistance.eval(pred_words_raw, targ_words) w_errs += dist wv_errs += dist w_len += len(targ_words) logging_output["wv_errors"] = wv_errs logging_output["w_errors"] = w_errs logging_output["w_total"] = w_len logging_output["c_errors"] = c_err logging_output["c_total"] = c_len return loss, sample_size, logging_output @staticmethod def reduce_metrics(logging_outputs) -> None: """Aggregate logging outputs from data parallel training.""" loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs)) ntokens = utils.item(sum(log.get("ntokens", 0) for log in logging_outputs)) nsentences = utils.item( sum(log.get("nsentences", 0) for log in logging_outputs) ) sample_size = utils.item( sum(log.get("sample_size", 0) for log in logging_outputs) ) metrics.log_scalar( "loss", loss_sum / sample_size / math.log(2), sample_size, round=3 ) metrics.log_scalar("ntokens", ntokens) metrics.log_scalar("nsentences", nsentences) if sample_size != ntokens: metrics.log_scalar( "nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3 ) c_errors = sum(log.get("c_errors", 0) for log in logging_outputs) metrics.log_scalar("_c_errors", c_errors) c_total = sum(log.get("c_total", 0) for log in logging_outputs) metrics.log_scalar("_c_total", c_total) w_errors = sum(log.get("w_errors", 0) for log in logging_outputs) metrics.log_scalar("_w_errors", w_errors) wv_errors = sum(log.get("wv_errors", 0) for log in logging_outputs) metrics.log_scalar("_wv_errors", wv_errors) w_total = sum(log.get("w_total", 0) for log in logging_outputs) metrics.log_scalar("_w_total", w_total) if c_total > 0: metrics.log_derived( "uer", lambda meters: safe_round( meters["_c_errors"].sum * 100.0 / meters["_c_total"].sum, 3 ) if meters["_c_total"].sum > 0 else float("nan"), ) if w_total > 0: metrics.log_derived( "wer", lambda meters: safe_round( meters["_w_errors"].sum * 100.0 / meters["_w_total"].sum, 3 ) if meters["_w_total"].sum > 0 else float("nan"), ) metrics.log_derived( "raw_wer", lambda meters: safe_round( meters["_wv_errors"].sum * 100.0 / meters["_w_total"].sum, 3 ) if meters["_w_total"].sum > 0 else float("nan"), ) @staticmethod def logging_outputs_can_be_summed() -> bool: """ Whether the logging outputs returned by `forward` can be summed across workers prior to calling `reduce_metrics`. Setting this to True will improves distributed training speed. """ return True
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/criterions/ctc.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from dataclasses import dataclass import torch.nn.functional as F from fairseq import metrics, utils from fairseq.criterions import FairseqCriterion, register_criterion from fairseq.dataclass import FairseqDataclass from fairseq.dataclass.constants import DDP_BACKEND_CHOICES from omegaconf import II @dataclass class AdaptiveLossConfig(FairseqDataclass): sentence_avg: bool = II("optimization.sentence_avg") ddp_backend: DDP_BACKEND_CHOICES = II("distributed_training.ddp_backend") @register_criterion("adaptive_loss", dataclass=AdaptiveLossConfig) class AdaptiveLoss(FairseqCriterion): """This is an implementation of the loss function accompanying the adaptive softmax approximation for graphical processing units (GPU), described in the paper "Efficient softmax approximation for GPUs" (http://arxiv.org/abs/1609.04309).""" def __init__(self, task, sentence_avg): super().__init__(task) self.sentence_avg = sentence_avg @classmethod def build_criterion(cls, cfg: AdaptiveLossConfig, task): if cfg.ddp_backend in {"c10d", "pytorch_ddp"}: raise Exception( "AdaptiveLoss is not compatible with the PyTorch " "version of DistributedDataParallel. Please use " "`--ddp-backend=legacy_ddp` instead." ) return cls(task, cfg.sentence_avg) def forward(self, model, sample, reduce=True): """Compute the loss for the given sample. Returns a tuple with three elements: 1) the loss 2) the sample size, which is used as the denominator for the gradient 3) logging outputs to display while training """ assert ( hasattr(model.decoder, "adaptive_softmax") and model.decoder.adaptive_softmax is not None ) adaptive_softmax = model.decoder.adaptive_softmax net_output = model(**sample["net_input"]) orig_target = model.get_targets(sample, net_output) nsentences = orig_target.size(0) orig_target = orig_target.view(-1) bsz = orig_target.size(0) logits, target = adaptive_softmax(net_output[0], orig_target) assert len(target) == len(logits) loss = net_output[0].new(1 if reduce else bsz).zero_().float() # convert to fp32 for i in range(len(target)): if target[i] is not None: assert target[i].min() >= 0 and target[i].max() <= logits[i].size(1) loss += F.cross_entropy( logits[i], target[i], ignore_index=self.padding_idx, reduction="sum" if reduce else "none", ) orig = utils.strip_pad(orig_target, self.padding_idx) ntokens = orig.numel() sample_size = sample["target"].size(0) if self.sentence_avg else ntokens logging_output = { "loss": loss.data, "ntokens": ntokens, "nsentences": nsentences, "sample_size": sample_size, } return loss, sample_size, logging_output @staticmethod def reduce_metrics(logging_outputs) -> None: """Aggregate logging outputs from data parallel training.""" loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs)) ntokens = utils.item(sum(log.get("ntokens", 0) for log in logging_outputs)) sample_size = utils.item( sum(log.get("sample_size", 0) for log in logging_outputs) ) metrics.log_scalar( "loss", loss_sum / sample_size / math.log(2), sample_size, round=3 ) if sample_size != ntokens: metrics.log_scalar( "nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3 ) metrics.log_derived( "ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg) ) else: metrics.log_derived( "ppl", lambda meters: utils.get_perplexity(meters["loss"].avg) ) @staticmethod def logging_outputs_can_be_summed() -> bool: """ Whether the logging outputs returned by `forward` can be summed across workers prior to calling `reduce_metrics`. Setting this to True will improves distributed training speed. """ return True
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/criterions/adaptive_loss.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from dataclasses import dataclass, field from typing import Dict, List from fairseq import metrics, utils from fairseq.criterions import FairseqCriterion, register_criterion from fairseq.dataclass import FairseqDataclass logger = logging.getLogger(__name__) @dataclass class ModelCriterionConfig(FairseqDataclass): loss_weights: Dict[str, float] = field( default_factory=dict, metadata={"help": "weights for the loss terms"}, ) log_keys: List[str] = field( default_factory=list, metadata={"help": "additional output keys to log"}, ) @register_criterion("model", dataclass=ModelCriterionConfig) class ModelCriterion(FairseqCriterion): """ This criterion relies on the model to supply losses. The losses should be a dictionary of name -> scalar returned by the model either by including it in the net_output dict or by implementing a get_losses(net_output, sample) method. The final loss is a scaled sum of all losses according to weights in loss_weights. If no weights are provided, then all losses are scaled by 1.0. The losses will be automatically logged. Additional keys from net_output dict can be logged via the log_keys parameter. """ def __init__(self, task, loss_weights=None, log_keys=None): super().__init__(task) self.loss_weights = loss_weights self.log_keys = log_keys def forward(self, model, sample, reduce=True): net_output = model(**sample["net_input"]) sample_size = net_output["sample_size"] scaled_losses = {} if hasattr(model, "get_losses"): losses = model.get_losses(net_output, sample) elif isinstance(net_output, dict) and "losses" in net_output: losses = net_output["losses"] else: raise Exception("Could not retrieve losses") for lk, p in losses.items(): try: coef = 1.0 if len(self.loss_weights) == 0 else self.loss_weights[lk] except KeyError: logger.error( f"weight for loss {lk} is not in loss_weights ({self.loss_weights})" ) raise if coef != 0 and p is not None: scaled_losses[lk] = coef * p.float() loss = sum(scaled_losses.values()) if reduce and loss.numel() > 1: loss = loss.sum() logging_output = { "loss": loss.data, "ntokens": sample_size, "nsentences": sample["id"].numel(), "sample_size": sample_size, "_world_size": 1, } for lk in self.log_keys: if lk in net_output and net_output[lk] is not None: logging_output[lk] = float(net_output[lk]) if len(scaled_losses) > 1: for lk, l in scaled_losses.items(): logging_output[f"loss_{lk}"] = l.item() return loss, sample_size, logging_output @staticmethod def reduce_metrics(logging_outputs) -> None: """Aggregate logging outputs from data parallel training.""" loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs)) ntokens = utils.item(sum(log.get("ntokens", 0) for log in logging_outputs)) nsentences = utils.item( sum(log.get("nsentences", 0) for log in logging_outputs) ) sample_size = utils.item( sum(log.get("sample_size", 0) for log in logging_outputs) ) metrics.log_scalar("loss", loss_sum / sample_size, sample_size, round=3) metrics.log_scalar("ntokens", ntokens) metrics.log_scalar("nsentences", nsentences) builtin_keys = { "loss", "ntokens", "nsentences", "sample_size", "_world_size", } world_size = utils.item( sum(log.get("_world_size", 0) for log in logging_outputs) ) for k in logging_outputs[0]: if k not in builtin_keys: val = sum(log.get(k, 0) for log in logging_outputs) if k.startswith("loss_"): metrics.log_scalar(k, val / sample_size, sample_size, round=3) else: metrics.log_scalar(k, val / world_size, round=3) @staticmethod def logging_outputs_can_be_summed() -> bool: """ Whether the logging outputs returned by `forward` can be summed across workers prior to calling `reduce_metrics`. Setting this to True will improves distributed training speed. """ return True
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/criterions/model_criterion.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from dataclasses import dataclass, field from typing import List, Optional import torch import torch.nn.functional as F from fairseq import metrics, utils from fairseq.criterions import FairseqCriterion, register_criterion from fairseq.dataclass import FairseqDataclass from fairseq.logging.meters import safe_round from fairseq.utils import is_xla_tensor @dataclass class Wav2VecCriterionConfig(FairseqDataclass): infonce: bool = field( default=False, metadata={ "help": "if set, uses cross entropy instead of binary cross entropy (i.e. InfoNCE loss)" }, ) loss_weights: Optional[List[float]] = field( default=None, metadata={"help": "weights for additional loss terms (not first one)"}, ) log_keys: List[str] = field( default_factory=lambda: [], metadata={"help": "output keys to log"}, ) @register_criterion("wav2vec", dataclass=Wav2VecCriterionConfig) class Wav2vecCriterion(FairseqCriterion): def __init__(self, task, infonce=False, loss_weights=None, log_keys=None): super().__init__(task) self.infonce = infonce self.loss_weights = loss_weights self.log_keys = [] if log_keys is None else log_keys def forward(self, model, sample, reduce=True): """Compute the loss for the given sample. Returns a tuple with three elements: 1) the loss 2) the sample size, which is used as the denominator for the gradient 3) logging outputs to display while training """ net_output = model(**sample["net_input"]) logits = model.get_logits(net_output).float() target = model.get_targets(sample, net_output) self.xla = is_xla_tensor(logits) # XXX: handle weights on xla. weights = None if hasattr(model, "get_target_weights") and not self.infonce: weights = model.get_target_weights(target, net_output) if torch.is_tensor(weights): weights = weights.float() losses = [] reduction = "none" if ((not reduce) or self.xla) else "sum" if self.infonce: loss = F.cross_entropy(logits, target, reduction=reduction) else: loss = F.binary_cross_entropy_with_logits( logits, target.float(), weights, reduction=reduction ) if self.xla: # tpu-comment: since dynamic shapes lead to recompilations on xla, # we don't shrink tensors using mask_indices. # Instead, we use mask indices to adjust loss. mi = ( sample['net_input']['mask_indices'] .transpose(0, 1) # logits are transposed in `model.get_logits` .reshape(logits.size(0)) ) loss = (loss * mi).sum() if reduce else (loss * mi) if 'sample_size' in sample: sample_size = sample['sample_size'] elif 'mask_indices' in sample['net_input']: sample_size = sample['net_input']['mask_indices'].sum() else: sample_size = target.numel() if self.infonce else target.long().sum().item() losses.append(loss.detach().clone()) if self.loss_weights is not None: assert hasattr(model, "get_extra_losses") extra_losses = model.get_extra_losses(net_output) if torch.is_tensor(extra_losses): extra_losses = [extra_losses] if len(self.loss_weights) == 1 and len(extra_losses) != 1: self.loss_weights = [self.loss_weights[0]] * len(extra_losses) assert len(extra_losses) == len( self.loss_weights ), f"{len(extra_losses)}, {len(self.loss_weights)}" for p, coef in zip(extra_losses, self.loss_weights): if coef != 0 and p is not None: p = coef * p.float() * sample_size loss += p losses.append(p) logging_output = { "loss": loss.item() if (reduce and not self.xla) else loss.detach(), "ntokens": sample_size, "nsentences": sample["id"].numel(), "sample_size": sample_size, } for lk in self.log_keys: # Only store "logits" and "target" for computing MAP and MAUC # during validation if lk == "logits": if not self.training: logging_output["logits"] = logits.cpu().numpy() elif lk == "target": if not self.training: # If the targets have been mixed with the predictions of # teacher models, find the original targets if hasattr(model, "get_original_targets"): original_target = model.get_original_targets(sample, net_output) else: original_target = target logging_output["target"] = original_target.cpu().numpy() elif lk in net_output: value = net_output[lk] if not is_xla_tensor(value): value = float(value) logging_output[lk] = value if len(losses) > 1: for i, l in enumerate(losses): logging_output[f"loss_{i}"] = l.item() if not self.xla else l.detach() if self.infonce: with torch.no_grad(): if logits.numel() == 0: corr = 0 count = 0 else: assert logits.dim() > 1, logits.shape max = logits.argmax(-1) == 0 min = logits.argmin(-1) == 0 if is_xla_tensor(logits): max, min = max * mi, min * mi both = max & min corr = max.long().sum() - both.long().sum() count = mi.sum() else: both = max & min corr = max.long().sum().item() - both.long().sum().item() count = float(max.numel()) logging_output["correct"] = corr logging_output["count"] = count return loss, sample_size, logging_output @staticmethod def reduce_metrics(logging_outputs) -> None: """Aggregate logging outputs from data parallel training.""" loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs)) ntokens = utils.item(sum(log.get("ntokens", 0) for log in logging_outputs)) nsentences = utils.item( sum(log.get("nsentences", 0) for log in logging_outputs) ) sample_size = utils.item( sum(log.get("sample_size", 0) for log in logging_outputs) ) metrics.log_scalar( "loss", loss_sum / (sample_size or 1) / math.log(2), sample_size, round=3 ) metrics.log_scalar("ntokens", ntokens) metrics.log_scalar("nsentences", nsentences) correct = sum(log.get("correct", 0) for log in logging_outputs) metrics.log_scalar("_correct", correct) total = sum(log.get("count", 0) for log in logging_outputs) metrics.log_scalar("_total", total) if total > 0: metrics.log_derived( "accuracy", lambda meters: safe_round( meters["_correct"].sum / meters["_total"].sum, 5 ) if meters["_total"].sum > 0 else float("nan"), ) builtin_keys = { "loss", "ntokens", "nsentences", "sample_size", "correct", "count", } for k in logging_outputs[0]: if k not in builtin_keys: val = sum(log.get(k, 0) for log in logging_outputs) if k.startswith("loss"): metrics.log_scalar( k, val / (sample_size or 1) / math.log(2), sample_size, round=3 ) else: metrics.log_scalar(k, val / len(logging_outputs), round=3) # FIXME: revert when gather based xla reduction is implemented #@staticmethod #def logging_outputs_can_be_summed() -> bool: def logging_outputs_can_be_summed(self) -> bool: """ Whether the logging outputs returned by `forward` can be summed across workers prior to calling `reduce_metrics`. Setting this to True will improves distributed training speed. """ # XXX: Gather based reduction not implemented for xla yet. # So we fall to sum based reduction for xla. return self.xla
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/criterions/wav2vec_criterion.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn.functional as F from fairseq import metrics, utils from fairseq.criterions import FairseqCriterion, register_criterion def compute_cross_entropy_loss(logits, targets, ignore_index=-100): """ Function to compute the cross entropy loss. The default value of ignore_index is the same as the default value for F.cross_entropy in pytorch. """ assert logits.size(0) == targets.size( -1 ), "Logits and Targets tensor shapes don't match up" loss = F.nll_loss( F.log_softmax(logits, -1, dtype=torch.float32), targets, reduction="sum", ignore_index=ignore_index, ) return loss @register_criterion("legacy_masked_lm_loss") class LegacyMaskedLmLoss(FairseqCriterion): """ Implementation for the loss used in masked language model (MLM) training. This optionally also computes the next sentence prediction (NSP) loss and adds it to the overall loss based on the specified args. There are three cases to consider: 1) Generic MLM training without NSP loss. In this case sentence_targets and sentence_logits are both None. 2) BERT training without NSP loss. In this case sentence_targets is not None but sentence_logits is None and we should not be computing a sentence level loss. 3) BERT training with NSP loss. In this case both sentence_targets and sentence_logits are not None and we should be computing a sentence level loss. The weight of the sentence level loss is specified as an argument. """ def __init__(self, task, masked_lm_only, nsp_loss_weight): super().__init__(task) self.masked_lm_only = masked_lm_only self.nsp_loss_weight = nsp_loss_weight @staticmethod def add_args(parser): """Args for MaskedLM Loss""" # Default for masked_lm_only is False so as to not break BERT training parser.add_argument( "--masked-lm-only", default=False, action="store_true", help="compute MLM loss only", ) parser.add_argument( "--nsp-loss-weight", default=1.0, type=float, help="weight for next sentence prediction" " loss (default 1)", ) def forward(self, model, sample, reduce=True): """Compute the loss for the given sample. Returns a tuple with three elements: 1) the loss 2) the sample size, which is used as the denominator for the gradient 3) logging outputs to display while training """ lm_logits, output_metadata = model(**sample["net_input"]) # reshape lm_logits from (N,T,C) to (N*T,C) lm_logits = lm_logits.view(-1, lm_logits.size(-1)) lm_targets = sample["lm_target"].view(-1) lm_loss = compute_cross_entropy_loss(lm_logits, lm_targets, self.padding_idx) # compute the number of tokens for which loss is computed. This is used # to normalize the loss ntokens = utils.strip_pad(lm_targets, self.padding_idx).numel() loss = lm_loss / ntokens nsentences = sample["nsentences"] # nsentences = 0 # Compute sentence loss if masked_lm_only is False sentence_loss = None if not self.masked_lm_only: sentence_logits = output_metadata["sentence_logits"] sentence_targets = sample["sentence_target"].view(-1) # This needs to be recomputed due to some differences between # TokenBlock and BlockPair dataset. This can be resolved with a # refactor of BERTModel which we will do in the future. # TODO: Remove this after refactor of BERTModel nsentences = sentence_targets.size(0) # Check for logits being none which can happen when remove_heads # is set to true in the BERT model. Ideally we should set # masked_lm_only to true in this case, but that requires some # refactor in the BERT model. if sentence_logits is not None: sentence_loss = compute_cross_entropy_loss( sentence_logits, sentence_targets ) loss += self.nsp_loss_weight * (sentence_loss / nsentences) # NOTE: as we are summing up per token mlm loss and per sentence nsp loss # we don't need to use sample_size as denominator for the gradient # here sample_size is just used for logging sample_size = 1 logging_output = { "loss": utils.item(loss.data) if reduce else loss.data, "lm_loss": utils.item(lm_loss.data) if reduce else lm_loss.data, # sentence loss is not always computed "sentence_loss": ( (utils.item(sentence_loss.data) if reduce else sentence_loss.data) if sentence_loss is not None else 0.0 ), "ntokens": ntokens, "nsentences": nsentences, "sample_size": sample_size, } return loss, sample_size, logging_output @staticmethod def reduce_metrics(logging_outputs) -> None: """Aggregate logging outputs from data parallel training.""" lm_loss_sum = sum(log.get("lm_loss", 0) for log in logging_outputs) sentence_loss_sum = sum(log.get("sentence_loss", 0) for log in logging_outputs) ntokens = sum(log.get("ntokens", 0) for log in logging_outputs) nsentences = sum(log.get("nsentences", 0) for log in logging_outputs) sample_size = sum(log.get("sample_size", 0) for log in logging_outputs) agg_loss = sum(log.get("loss", 0) for log in logging_outputs) metrics.log_scalar( "loss", agg_loss / sample_size / math.log(2) if sample_size > 0 else 0.0, sample_size, round=3, ) metrics.log_scalar( "lm_loss", lm_loss_sum / ntokens / math.log(2) if ntokens > 0 else 0.0, ntokens, round=3, ) metrics.log_scalar( "sentence_loss", sentence_loss_sum / nsentences / math.log(2) if nsentences > 0 else 0.0, nsentences, round=3, ) metrics.log_scalar( "nll_loss", lm_loss_sum / ntokens / math.log(2) if ntokens > 0 else 0.0, ntokens, round=3, ) @staticmethod def logging_outputs_can_be_summed() -> bool: """ Whether the logging outputs returned by `forward` can be summed across workers prior to calling `reduce_metrics`. Setting this to True will improves distributed training speed. """ return True
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/criterions/legacy_masked_lm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn.functional as F from fairseq import metrics, utils from fairseq.criterions import FairseqCriterion, register_criterion from fairseq.dataclass import FairseqDataclass from torch import Tensor from dataclasses import dataclass, field @dataclass class LabelSmoothedDualImitationCriterionConfig(FairseqDataclass): label_smoothing: float = field( default=0.0, metadata={"help": "epsilon for label smoothing, 0 means no label smoothing"}, ) @register_criterion("nat_loss", dataclass=LabelSmoothedDualImitationCriterionConfig) class LabelSmoothedDualImitationCriterion(FairseqCriterion): def __init__(self, task, label_smoothing): super().__init__(task) self.label_smoothing = label_smoothing def _compute_loss( self, outputs, targets, masks=None, label_smoothing=0.0, name="loss", factor=1.0 ): """ outputs: batch x len x d_model targets: batch x len masks: batch x len policy_logprob: if there is some policy depends on the likelihood score as rewards. """ def mean_ds(x: Tensor, dim=None) -> Tensor: return ( x.float().mean().type_as(x) if dim is None else x.float().mean(dim).type_as(x) ) if masks is not None: outputs, targets = outputs[masks], targets[masks] if masks is not None and not masks.any(): nll_loss = torch.tensor(0) loss = nll_loss else: logits = F.log_softmax(outputs, dim=-1) if targets.dim() == 1: losses = F.nll_loss(logits, targets.to(logits.device), reduction="none") else: # soft-labels losses = F.kl_div(logits, targets.to(logits.device), reduction="none") losses = losses.sum(-1) nll_loss = mean_ds(losses) if label_smoothing > 0: loss = ( nll_loss * (1 - label_smoothing) - mean_ds(logits) * label_smoothing ) else: loss = nll_loss loss = loss * factor return {"name": name, "loss": loss, "nll_loss": nll_loss, "factor": factor} def _custom_loss(self, loss, name="loss", factor=1.0): return {"name": name, "loss": loss, "factor": factor} def forward(self, model, sample, reduce=True): """Compute the loss for the given sample. Returns a tuple with three elements: 1) the loss 2) the sample size, which is used as the denominator for the gradient 3) logging outputs to display while training """ nsentences, ntokens = sample["nsentences"], sample["ntokens"] # B x T src_tokens, src_lengths = ( sample["net_input"]["src_tokens"], sample["net_input"]["src_lengths"], ) tgt_tokens, prev_output_tokens = sample["target"], sample["prev_target"] outputs = model(src_tokens, src_lengths, prev_output_tokens, tgt_tokens) losses, nll_loss = [], [] for obj in outputs: if outputs[obj].get("loss", None) is None: _losses = self._compute_loss( outputs[obj].get("out"), outputs[obj].get("tgt"), outputs[obj].get("mask", None), outputs[obj].get("ls", 0.0), name=obj + "-loss", factor=outputs[obj].get("factor", 1.0), ) else: _losses = self._custom_loss( outputs[obj].get("loss"), name=obj + "-loss", factor=outputs[obj].get("factor", 1.0), ) losses += [_losses] if outputs[obj].get("nll_loss", False): nll_loss += [_losses.get("nll_loss", 0.0)] loss = sum(l["loss"] for l in losses) nll_loss = sum(l for l in nll_loss) if len(nll_loss) > 0 else loss.new_tensor(0) # NOTE: # we don't need to use sample_size as denominator for the gradient # here sample_size is just used for logging sample_size = 1 logging_output = { "loss": loss.data, "nll_loss": nll_loss.data, "ntokens": ntokens, "nsentences": nsentences, "sample_size": sample_size, } for l in losses: logging_output[l["name"]] = ( utils.item(l["loss"].data / l["factor"]) if reduce else l[["loss"]].data / l["factor"] ) return loss, sample_size, logging_output @staticmethod def reduce_metrics(logging_outputs) -> None: """Aggregate logging outputs from data parallel training.""" sample_size = utils.item( sum(log.get("sample_size", 0) for log in logging_outputs) ) loss = utils.item(sum(log.get("loss", 0) for log in logging_outputs)) nll_loss = utils.item(sum(log.get("nll_loss", 0) for log in logging_outputs)) metrics.log_scalar( "loss", loss / sample_size / math.log(2), sample_size, round=3 ) metrics.log_scalar( "nll_loss", nll_loss / sample_size / math.log(2), sample_size, round=3 ) metrics.log_derived( "ppl", lambda meters: utils.get_perplexity(meters["loss"].avg) ) for key in logging_outputs[0]: if key[-5:] == "-loss": val = sum(log.get(key, 0) for log in logging_outputs) metrics.log_scalar( key[:-5], val / sample_size / math.log(2) if sample_size > 0 else 0.0, sample_size, round=3, ) @staticmethod def logging_outputs_can_be_summed() -> bool: """ Whether the logging outputs returned by `forward` can be summed across workers prior to calling `reduce_metrics`. Setting this to True will improves distributed training speed. """ return True
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/criterions/nat_loss.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """isort:skip_file""" import importlib import os from fairseq import registry from fairseq.criterions.fairseq_criterion import ( # noqa FairseqCriterion, LegacyFairseqCriterion, ) from omegaconf import DictConfig ( build_criterion_, register_criterion, CRITERION_REGISTRY, CRITERION_DATACLASS_REGISTRY, ) = registry.setup_registry( "--criterion", base_class=FairseqCriterion, default="cross_entropy" ) def build_criterion(cfg: DictConfig, task): return build_criterion_(cfg, task) # automatically import any Python files in the criterions/ directory for file in sorted(os.listdir(os.path.dirname(__file__))): if file.endswith(".py") and not file.startswith("_"): file_name = file[: file.find(".py")] importlib.import_module("fairseq.criterions." + file_name)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/criterions/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass, field import torch from fairseq import metrics, utils from fairseq.criterions import register_criterion from fairseq.criterions.label_smoothed_cross_entropy import ( LabelSmoothedCrossEntropyCriterion, LabelSmoothedCrossEntropyCriterionConfig ) try: from simuleval.metrics.latency import ( AverageLagging, AverageProportion, DifferentiableAverageLagging ) LATENCY_METRICS = { "average_lagging": AverageLagging, "average_proportion": AverageProportion, "differentiable_average_lagging": DifferentiableAverageLagging, } except ImportError: LATENCY_METRICS = None @dataclass class LabelSmoothedCrossEntropyCriterionLatencyAugmentConfig( LabelSmoothedCrossEntropyCriterionConfig ): latency_avg_weight: float = field( default=0.0, metadata={"help": "weight fot average latency loss."}, ) latency_var_weight: float = field( default=0.0, metadata={"help": "weight fot variance latency loss."}, ) latency_avg_type: str = field( default="differentiable_average_lagging", metadata={"help": "latency type for average loss"}, ) latency_var_type: str = field( default="variance_delay", metadata={"help": "latency typ for variance loss"}, ) latency_gather_method: str = field( default="weighted_average", metadata={"help": "method to gather latency loss for all heads"}, ) latency_update_after: int = field( default=0, metadata={"help": "Add latency loss after certain steps"}, ) @register_criterion( "latency_augmented_label_smoothed_cross_entropy", dataclass=LabelSmoothedCrossEntropyCriterionLatencyAugmentConfig ) class LatencyAugmentedLabelSmoothedCrossEntropyCriterion( LabelSmoothedCrossEntropyCriterion ): def __init__( self, task, sentence_avg, label_smoothing, ignore_prefix_size, report_accuracy, latency_avg_weight, latency_var_weight, latency_avg_type, latency_var_type, latency_gather_method, latency_update_after, ): super().__init__( task, sentence_avg, label_smoothing, ignore_prefix_size, report_accuracy ) assert LATENCY_METRICS is not None, "Please make sure SimulEval is installed." self.latency_avg_weight = latency_avg_weight self.latency_var_weight = latency_var_weight self.latency_avg_type = latency_avg_type self.latency_var_type = latency_var_type self.latency_gather_method = latency_gather_method self.latency_update_after = latency_update_after def forward(self, model, sample, reduce=True): net_output = model(**sample["net_input"]) # 1. Compute cross entropy loss loss, nll_loss = self.compute_loss(model, net_output, sample, reduce=reduce) # 2. Compute cross latency loss latency_loss, expected_latency, expected_delays_var = self.compute_latency_loss( model, sample, net_output ) if self.latency_update_after > 0: num_updates = getattr(model.decoder, "num_updates", None) assert num_updates is not None, ( "model.decoder doesn't have attribute 'num_updates'" ) if num_updates <= self.latency_update_after: latency_loss = 0 loss += latency_loss sample_size = ( sample["target"].size(0) if self.sentence_avg else sample["ntokens"] ) logging_output = { "loss": loss.data, "nll_loss": nll_loss.data, "ntokens": sample["ntokens"], "nsentences": sample["target"].size(0), "sample_size": sample_size, "latency": expected_latency, "delays_var": expected_delays_var, "latency_loss": latency_loss, } if self.report_accuracy: n_correct, total = self.compute_accuracy(model, net_output, sample) logging_output["n_correct"] = utils.item(n_correct.data) logging_output["total"] = utils.item(total.data) return loss, sample_size, logging_output def compute_latency_loss(self, model, sample, net_output): assert ( net_output[-1].encoder_padding_mask is None or not net_output[-1].encoder_padding_mask[:, 0].any() ), ( "Only right padding on source is supported." ) # 1. Obtain the expected alignment alpha_list = [item["alpha"] for item in net_output[1].attn_list] num_layers = len(alpha_list) bsz, num_heads, tgt_len, src_len = alpha_list[0].size() # bsz * num_layers * num_heads, tgt_len, src_len alpha_all = torch.cat(alpha_list, dim=1).view(-1, tgt_len, src_len) # 2 compute expected delays # bsz * num_heads * num_layers, tgt_len, src_len for MMA steps = ( torch.arange(1, 1 + src_len) .unsqueeze(0) .unsqueeze(1) .expand_as(alpha_all) .type_as(alpha_all) ) expected_delays = torch.sum(steps * alpha_all, dim=-1) target_padding_mask = ( model.get_targets(sample, net_output) .eq(self.padding_idx) .unsqueeze(1) .expand(bsz, num_layers * num_heads, tgt_len) .contiguous() .view(-1, tgt_len) ) src_lengths = ( sample["net_input"]["src_lengths"] .unsqueeze(1) .expand(bsz, num_layers * num_heads) .contiguous() .view(-1) ) expected_latency = LATENCY_METRICS[self.latency_avg_type]( expected_delays, src_lengths, None, target_padding_mask=target_padding_mask ) # 2.1 average expected latency of heads # bsz, num_layers * num_heads expected_latency = expected_latency.view(bsz, -1) if self.latency_gather_method == "average": # bsz * tgt_len expected_latency = expected_delays.mean(dim=1) elif self.latency_gather_method == "weighted_average": weights = torch.nn.functional.softmax(expected_latency, dim=1) expected_latency = torch.sum(expected_latency * weights, dim=1) elif self.latency_gather_method == "max": expected_latency = expected_latency.max(dim=1)[0] else: raise NotImplementedError expected_latency = expected_latency.sum() avg_loss = self.latency_avg_weight * expected_latency # 2.2 variance of expected delays expected_delays_var = ( expected_delays.view(bsz, -1, tgt_len).var(dim=1).mean(dim=1) ) expected_delays_var = expected_delays_var.sum() var_loss = self.latency_avg_weight * expected_delays_var # 3. Final loss latency_loss = avg_loss + var_loss return latency_loss, expected_latency, expected_delays_var @classmethod def reduce_metrics(cls, logging_outputs) -> None: super().reduce_metrics(logging_outputs) latency = sum( log.get("latency", 0) for log in logging_outputs ) delays_var = sum( log.get("delays_var", 0) for log in logging_outputs ) latency_loss = sum( log.get("latency_loss", 0) for log in logging_outputs ) nsentences = sum(log.get("nsentences", 0) for log in logging_outputs) metrics.log_scalar( "latency", latency.float() / nsentences, nsentences, round=3 ) metrics.log_scalar( "delays_var", delays_var / nsentences, nsentences, round=3 ) metrics.log_scalar( "latency_loss", latency_loss / nsentences, nsentences, round=3 )
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/criterions/label_smoothed_cross_entropy_latency_augmented.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from dataclasses import dataclass, field import torch from fairseq import metrics, utils from fairseq.criterions import FairseqCriterion, register_criterion from fairseq.dataclass import FairseqDataclass from omegaconf import II @dataclass class LabelSmoothedCrossEntropyCriterionConfig(FairseqDataclass): label_smoothing: float = field( default=0.0, metadata={"help": "epsilon for label smoothing, 0 means no label smoothing"}, ) report_accuracy: bool = field( default=False, metadata={"help": "report accuracy metric"}, ) ignore_prefix_size: int = field( default=0, metadata={"help": "Ignore first N tokens"}, ) sentence_avg: bool = II("optimization.sentence_avg") def label_smoothed_nll_loss(lprobs, target, epsilon, ignore_index=None, reduce=True): if target.dim() == lprobs.dim() - 1: target = target.unsqueeze(-1) nll_loss = -lprobs.gather(dim=-1, index=target) smooth_loss = -lprobs.sum(dim=-1, keepdim=True) if ignore_index is not None: pad_mask = target.eq(ignore_index) nll_loss.masked_fill_(pad_mask, 0.0) smooth_loss.masked_fill_(pad_mask, 0.0) else: nll_loss = nll_loss.squeeze(-1) smooth_loss = smooth_loss.squeeze(-1) if reduce: nll_loss = nll_loss.sum() smooth_loss = smooth_loss.sum() eps_i = epsilon / (lprobs.size(-1) - 1) loss = (1.0 - epsilon - eps_i) * nll_loss + eps_i * smooth_loss return loss, nll_loss @register_criterion( "label_smoothed_cross_entropy", dataclass=LabelSmoothedCrossEntropyCriterionConfig ) class LabelSmoothedCrossEntropyCriterion(FairseqCriterion): def __init__( self, task, sentence_avg, label_smoothing, ignore_prefix_size=0, report_accuracy=False, ): super().__init__(task) self.sentence_avg = sentence_avg self.eps = label_smoothing self.ignore_prefix_size = ignore_prefix_size self.report_accuracy = report_accuracy def forward(self, model, sample, reduce=True): """Compute the loss for the given sample. Returns a tuple with three elements: 1) the loss 2) the sample size, which is used as the denominator for the gradient 3) logging outputs to display while training """ net_output = model(**sample["net_input"]) loss, nll_loss = self.compute_loss(model, net_output, sample, reduce=reduce) sample_size = ( sample["target"].size(0) if self.sentence_avg else sample["ntokens"] ) logging_output = { "loss": loss.data, "nll_loss": nll_loss.data, "ntokens": sample["ntokens"], "nsentences": sample["target"].size(0), "sample_size": sample_size, } if self.report_accuracy: n_correct, total = self.compute_accuracy(model, net_output, sample) logging_output["n_correct"] = utils.item(n_correct.data) logging_output["total"] = utils.item(total.data) return loss, sample_size, logging_output def get_lprobs_and_target(self, model, net_output, sample): lprobs = model.get_normalized_probs(net_output, log_probs=True) target = model.get_targets(sample, net_output) if self.ignore_prefix_size > 0: if getattr(lprobs, "batch_first", False): lprobs = lprobs[:, self.ignore_prefix_size :, :].contiguous() target = target[:, self.ignore_prefix_size :].contiguous() else: lprobs = lprobs[self.ignore_prefix_size :, :, :].contiguous() target = target[self.ignore_prefix_size :, :].contiguous() return lprobs.view(-1, lprobs.size(-1)), target.view(-1) def compute_loss(self, model, net_output, sample, reduce=True): lprobs, target = self.get_lprobs_and_target(model, net_output, sample) loss, nll_loss = label_smoothed_nll_loss( lprobs, target, self.eps, ignore_index=self.padding_idx, reduce=reduce, ) return loss, nll_loss def compute_accuracy(self, model, net_output, sample): lprobs, target = self.get_lprobs_and_target(model, net_output, sample) mask = target.ne(self.padding_idx) n_correct = torch.sum( lprobs.argmax(1).masked_select(mask).eq(target.masked_select(mask)) ) total = torch.sum(mask) return n_correct, total @classmethod def reduce_metrics(cls, logging_outputs) -> None: """Aggregate logging outputs from data parallel training.""" loss_sum = sum(log.get("loss", 0) for log in logging_outputs) nll_loss_sum = sum(log.get("nll_loss", 0) for log in logging_outputs) ntokens = sum(log.get("ntokens", 0) for log in logging_outputs) sample_size = sum(log.get("sample_size", 0) for log in logging_outputs) metrics.log_scalar( "loss", loss_sum / sample_size / math.log(2), sample_size, round=3 ) metrics.log_scalar( "nll_loss", nll_loss_sum / ntokens / math.log(2), ntokens, round=3 ) metrics.log_derived( "ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg) ) total = utils.item(sum(log.get("total", 0) for log in logging_outputs)) if total > 0: metrics.log_scalar("total", total) n_correct = utils.item( sum(log.get("n_correct", 0) for log in logging_outputs) ) metrics.log_scalar("n_correct", n_correct) metrics.log_derived( "accuracy", lambda meters: round( meters["n_correct"].sum * 100.0 / meters["total"].sum, 3 ) if meters["total"].sum > 0 else float("nan"), ) @staticmethod def logging_outputs_can_be_summed() -> bool: """ Whether the logging outputs returned by `forward` can be summed across workers prior to calling `reduce_metrics`. Setting this to True will improves distributed training speed. """ return True
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/criterions/label_smoothed_cross_entropy.py
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import logging from typing import Any, Dict, List from functools import lru_cache from dataclasses import dataclass, field import torch from omegaconf import II from fairseq import metrics, utils from fairseq.criterions import FairseqCriterion, register_criterion from fairseq.dataclass import FairseqDataclass from fairseq.data.data_utils import lengths_to_mask import torch.nn.functional as F logger = logging.getLogger(__name__) @dataclass class Tacotron2CriterionConfig(FairseqDataclass): bce_pos_weight: float = field( default=1.0, metadata={"help": "weight of positive examples for BCE loss"}, ) n_frames_per_step: int = field( default=0, metadata={"help": "Number of frames per decoding step"}, ) use_guided_attention_loss: bool = field( default=False, metadata={"help": "use guided attention loss"}, ) guided_attention_loss_sigma: float = field( default=0.4, metadata={"help": "weight of positive examples for BCE loss"}, ) ctc_weight: float = field( default=0.0, metadata={"help": "weight for CTC loss"} ) sentence_avg: bool = II("optimization.sentence_avg") class GuidedAttentionLoss(torch.nn.Module): """ Efficiently Trainable Text-to-Speech System Based on Deep Convolutional Networks with Guided Attention (https://arxiv.org/abs/1710.08969) """ def __init__(self, sigma): super().__init__() self.sigma = sigma @staticmethod @lru_cache(maxsize=8) def _get_weight(s_len, t_len, sigma): grid_x, grid_y = torch.meshgrid(torch.arange(t_len), torch.arange(s_len)) grid_x = grid_x.to(s_len.device) grid_y = grid_y.to(s_len.device) w = (grid_y.float() / s_len - grid_x.float() / t_len) ** 2 return 1.0 - torch.exp(-w / (2 * (sigma ** 2))) def _get_weights(self, src_lens, tgt_lens): bsz, max_s_len, max_t_len = len(src_lens), max(src_lens), max(tgt_lens) weights = torch.zeros((bsz, max_t_len, max_s_len)) for i, (s_len, t_len) in enumerate(zip(src_lens, tgt_lens)): weights[i, :t_len, :s_len] = self._get_weight(s_len, t_len, self.sigma) return weights @staticmethod def _get_masks(src_lens, tgt_lens): in_masks = lengths_to_mask(src_lens) out_masks = lengths_to_mask(tgt_lens) return out_masks.unsqueeze(2) & in_masks.unsqueeze(1) def forward(self, attn, src_lens, tgt_lens, reduction="mean"): weights = self._get_weights(src_lens, tgt_lens).to(attn.device) masks = self._get_masks(src_lens, tgt_lens).to(attn.device) loss = (weights * attn.transpose(1, 2)).masked_select(masks) loss = torch.sum(loss) if reduction == "sum" else torch.mean(loss) return loss @register_criterion("tacotron2", dataclass=Tacotron2CriterionConfig) class Tacotron2Criterion(FairseqCriterion): def __init__(self, task, sentence_avg, n_frames_per_step, use_guided_attention_loss, guided_attention_loss_sigma, bce_pos_weight, ctc_weight): super().__init__(task) self.sentence_avg = sentence_avg self.n_frames_per_step = n_frames_per_step self.bce_pos_weight = bce_pos_weight self.guided_attn = None if use_guided_attention_loss: self.guided_attn = GuidedAttentionLoss(guided_attention_loss_sigma) self.ctc_weight = ctc_weight def forward(self, model, sample, reduction="mean"): bsz, max_len, _ = sample["target"].size() feat_tgt = sample["target"] feat_len = sample["target_lengths"].view(bsz, 1).expand(-1, max_len) eos_tgt = torch.arange(max_len).to(sample["target"].device) eos_tgt = eos_tgt.view(1, max_len).expand(bsz, -1) eos_tgt = (eos_tgt == (feat_len - 1)).float() src_tokens = sample["net_input"]["src_tokens"] src_lens = sample["net_input"]["src_lengths"] tgt_lens = sample["target_lengths"] feat_out, eos_out, extra = model( src_tokens=src_tokens, src_lengths=src_lens, prev_output_tokens=sample["net_input"]["prev_output_tokens"], incremental_state=None, target_lengths=tgt_lens, speaker=sample["speaker"] ) l1_loss, mse_loss, eos_loss = self.compute_loss( extra["feature_out"], feat_out, eos_out, feat_tgt, eos_tgt, tgt_lens, reduction, ) attn_loss = torch.tensor(0.).type_as(l1_loss) if self.guided_attn is not None: attn_loss = self.guided_attn(extra['attn'], src_lens, tgt_lens, reduction) ctc_loss = torch.tensor(0.).type_as(l1_loss) if self.ctc_weight > 0.: net_output = (feat_out, eos_out, extra) lprobs = model.get_normalized_probs(net_output, log_probs=True) lprobs = lprobs.transpose(0, 1) # T x B x C src_mask = lengths_to_mask(src_lens) src_tokens_flat = src_tokens.masked_select(src_mask) ctc_loss = F.ctc_loss( lprobs, src_tokens_flat, tgt_lens, src_lens, reduction=reduction, zero_infinity=True ) * self.ctc_weight loss = l1_loss + mse_loss + eos_loss + attn_loss + ctc_loss sample_size = sample["nsentences"] if self.sentence_avg \ else sample["ntokens"] logging_output = { "loss": utils.item(loss.data), "ntokens": sample["ntokens"], "nsentences": sample["nsentences"], "sample_size": sample_size, "l1_loss": utils.item(l1_loss.data), "mse_loss": utils.item(mse_loss.data), "eos_loss": utils.item(eos_loss.data), "attn_loss": utils.item(attn_loss.data), "ctc_loss": utils.item(ctc_loss.data), } return loss, sample_size, logging_output def compute_loss(self, feat_out, feat_out_post, eos_out, feat_tgt, eos_tgt, tgt_lens, reduction="mean"): mask = lengths_to_mask(tgt_lens) _eos_out = eos_out[mask].squeeze() _eos_tgt = eos_tgt[mask] _feat_tgt = feat_tgt[mask] _feat_out = feat_out[mask] _feat_out_post = feat_out_post[mask] l1_loss = ( F.l1_loss(_feat_out, _feat_tgt, reduction=reduction) + F.l1_loss(_feat_out_post, _feat_tgt, reduction=reduction) ) mse_loss = ( F.mse_loss(_feat_out, _feat_tgt, reduction=reduction) + F.mse_loss(_feat_out_post, _feat_tgt, reduction=reduction) ) eos_loss = F.binary_cross_entropy_with_logits( _eos_out, _eos_tgt, pos_weight=torch.tensor(self.bce_pos_weight), reduction=reduction ) return l1_loss, mse_loss, eos_loss @classmethod def reduce_metrics(cls, logging_outputs: List[Dict[str, Any]]) -> None: ns = [log.get("sample_size", 0) for log in logging_outputs] ntot = sum(ns) ws = [n / (ntot + 1e-8) for n in ns] for key in ["loss", "l1_loss", "mse_loss", "eos_loss", "attn_loss", "ctc_loss"]: vals = [log.get(key, 0) for log in logging_outputs] val = sum(val * w for val, w in zip(vals, ws)) metrics.log_scalar(key, val, ntot, round=3) metrics.log_scalar("sample_size", ntot, len(logging_outputs)) # inference metrics if "targ_frames" not in logging_outputs[0]: return n = sum(log.get("targ_frames", 0) for log in logging_outputs) for key, new_key in [ ("mcd_loss", "mcd_loss"), ("pred_frames", "pred_ratio"), ("nins", "ins_rate"), ("ndel", "del_rate"), ]: val = sum(log.get(key, 0) for log in logging_outputs) metrics.log_scalar(new_key, val / n, n, round=3) @staticmethod def logging_outputs_can_be_summed() -> bool: return False
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/criterions/tacotron2_loss.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq import utils from fairseq.criterions import LegacyFairseqCriterion, register_criterion from torch import nn @register_criterion("composite_loss") class CompositeLoss(LegacyFairseqCriterion): """This is a composite loss that, given a list of model outputs and a list of targets, computes an average of losses for each output-target pair""" def __init__(self, args, task): super().__init__(args, task) self.underlying_criterion = args.underlying_criterion @staticmethod def add_args(parser): """Add criterion-specific arguments to the parser.""" # fmt: off parser.add_argument('--underlying-criterion', type=str, metavar='VAL', required=True, help='underlying criterion to use for the composite loss') # fmt: on @staticmethod def build_underlying_criterion(args, task): saved_criterion = args.criterion args.criterion = args.underlying_criterion assert saved_criterion != args.underlying_criterion underlying_criterion = task.build_criterion(args) args.criterion = saved_criterion return underlying_criterion @classmethod def build_criterion(cls, args, task): underlying_criterion = CompositeLoss.build_underlying_criterion(args, task) class FakeModel(nn.Module): def __init__(self, model, net_out, target): super().__init__() self.model = model self.net_out = net_out self.target = target def forward(self, **unused): return self.net_out def get_normalized_probs(self, net_output, log_probs, sample=None): return self.model.get_normalized_probs( net_output, log_probs, sample=sample ) def get_targets(self, *unused): return self.target @property def decoder(self): return self.model.decoder class _CompositeLoss(LegacyFairseqCriterion): def __init__(self, args, task, underlying_criterion): super().__init__(args, task) self.underlying_criterion = underlying_criterion def forward(self, model, sample, reduce=True): net_outputs = model(**sample["net_input"]) targets = sample["target"] bsz = targets[0].size(0) loss = net_outputs[0][0].new(1 if reduce else bsz).float().zero_() sample_size = 0 logging_output = {} for o, t in zip(net_outputs[0], targets): m = FakeModel(model, (o, net_outputs[1]), t) sample["target"] = t l, ss, logging_output = self.underlying_criterion(m, sample, reduce) loss += l sample_size += ss loss.div_(len(targets)) sample_size /= len(targets) logging_output["loss"] = utils.item(loss.data) if reduce else loss.data return loss, sample_size, logging_output @staticmethod def aggregate_logging_outputs(logging_outputs): return underlying_criterion.__class__.aggregate_logging_outputs( logging_outputs ) @staticmethod def reduce_metrics(logging_outputs) -> None: underlying_criterion.__class__.reduce_metrics(logging_outputs) return _CompositeLoss(args, task, underlying_criterion)
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/criterions/composite_loss.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import re from dataclasses import dataclass, field from typing import List, Optional import torch import torch.nn.functional as F from fairseq import metrics, utils from fairseq.criterions import FairseqCriterion, register_criterion from fairseq.dataclass import FairseqDataclass @dataclass class HubertCriterionConfig(FairseqDataclass): pred_masked_weight: float = field( default=1.0, metadata={"help": "weight for predictive loss for masked frames"}, ) pred_nomask_weight: float = field( default=0.0, metadata={"help": "weight for predictive loss for unmasked frames"}, ) loss_weights: Optional[List[float]] = field( default=None, metadata={"help": "weights for additional loss terms (not first one)"}, ) log_keys: List[str] = field( default_factory=lambda: [], metadata={"help": "output keys to log"}, ) @register_criterion("hubert", dataclass=HubertCriterionConfig) class HubertCriterion(FairseqCriterion): def __init__(self, task, pred_masked_weight, pred_nomask_weight, loss_weights=None, log_keys=None): super().__init__(task) self.pred_masked_weight = pred_masked_weight self.pred_nomask_weight = pred_nomask_weight self.loss_weights = loss_weights self.log_keys = [] if log_keys is None else log_keys def forward(self, model, sample, reduce=True, log_pred=False): """Compute the loss for the given sample. Returns a tuple with three elements: 1) the loss 2) the sample size, which is used as the denominator for the gradient 3) logging outputs to display while training """ net_output = model(target_list=sample["target_list"], **sample["net_input"]) loss = 0. sample_size = 0 logging_output = {} reduction = "sum" if reduce else "none" loss_m_list = [] logp_m_list = model.get_logits(net_output, True) targ_m_list = model.get_targets(net_output, True) assert self.pred_masked_weight == 0 or len(logp_m_list) > 0 for i, (logp_m, targ_m) in enumerate(zip(logp_m_list, targ_m_list)): loss_m = F.cross_entropy(logp_m, targ_m, reduction=reduction) loss_m_list.append(loss_m) logging_output[f"loss_m_{i}"] = loss_m.detach().item() if self.pred_masked_weight > 0: loss += self.pred_masked_weight * sum(loss_m_list) sample_size += targ_m_list[0].numel() loss_u_list = [] logp_u_list = model.get_logits(net_output, False) targ_u_list = model.get_targets(net_output, False) assert self.pred_nomask_weight == 0 or len(logp_u_list) > 0 for i, (logp_u, targ_u) in enumerate(zip(logp_u_list, targ_u_list)): loss_u = F.cross_entropy(logp_u, targ_u, reduction=reduction) loss_u_list.append(loss_u) logging_output[f"loss_u_{i}"] = loss_u.detach().item() if self.pred_nomask_weight > 0: loss += self.pred_nomask_weight * sum(loss_u_list) sample_size += targ_u_list[0].numel() if self.loss_weights is not None: assert hasattr(model, "get_extra_losses") extra_losses, names = model.get_extra_losses(net_output) if torch.is_tensor(extra_losses): extra_losses = [extra_losses] names = [names] if len(self.loss_weights) == 1 and len(extra_losses) != 1: self.loss_weights = [self.loss_weights[0]] * len(extra_losses) assert len(extra_losses) == len(self.loss_weights), f"{len(extra_losses)}, {len(self.loss_weights)}" for p, n, coef in zip(extra_losses, names, self.loss_weights): if coef != 0 and p is not None: p = coef * p.float() * sample_size loss += p logging_output[f"loss_{n}"] = p.item() logging_output = { "loss": loss.item() if reduce else loss, "ntokens": sample_size, "nsentences": sample["id"].numel(), "sample_size": sample_size, **logging_output, } for lk in self.log_keys: if lk in net_output: logging_output[lk] = float((net_output[lk])) def compute_correct(logits): if logits.numel() == 0: return 0, 0 else: assert logits.dim() > 1, logits.shape max = logits.argmax(-1) == 0 min = logits.argmin(-1) == 0 both = max & min corr = max.long().sum().item() - both.long().sum().item() count = max.numel() return corr, count with torch.no_grad(): for i, logp_m in enumerate(logp_m_list): corr_m, count_m = compute_correct(logp_m) logging_output[f"correct_m_{i}"] = corr_m logging_output[f"count_m_{i}"] = count_m for i, logp_u in enumerate(logp_u_list): corr_u, count_u = compute_correct(logp_u) logging_output[f"correct_u_{i}"] = corr_u logging_output[f"count_u_{i}"] = count_u return loss, sample_size, logging_output @staticmethod def reduce_metrics(logging_outputs) -> None: """Aggregate logging outputs from data parallel training (copied from normal cross entropy).""" loss_sum = sum(log.get("loss", 0) for log in logging_outputs) ntokens = sum(log.get("ntokens", 0) for log in logging_outputs) sample_size = sum(log.get("sample_size", 0) for log in logging_outputs) metrics.log_scalar("loss", loss_sum / sample_size / math.log(2), sample_size, round=3) if sample_size != ntokens: metrics.log_scalar("nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3) metrics.log_derived("ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg)) else: metrics.log_derived("ppl", lambda meters: utils.get_perplexity(meters["loss"].avg)) counts = {} for lk in logging_outputs[0].keys(): if lk.startswith("count_"): val = sum(log[lk] for log in logging_outputs) metrics.log_scalar(lk, val) counts[lk] = val for lk in logging_outputs[0].keys(): if lk.startswith("loss_"): val = sum(log[lk] for log in logging_outputs) metrics.log_scalar(lk, val / sample_size / math.log(2), round=3) elif lk.startswith("correct_"): val = sum(log[lk] for log in logging_outputs) metrics.log_scalar(lk, val / counts[re.sub("correct", "count", lk)]) @staticmethod def aggregate_logging_outputs(logging_outputs): """Aggregate logging outputs from data parallel training.""" raise NotImplementedError() @staticmethod def logging_outputs_can_be_summed() -> bool: """ Whether the logging outputs returned by `forward` can be summed across workers prior to calling `reduce_metrics`. Setting this to True will improves distributed training speed. """ return False
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/criterions/hubert_criterion.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from dataclasses import dataclass, field import torch import torch.nn.functional as F from fairseq import metrics, utils from fairseq.criterions import FairseqCriterion, register_criterion from fairseq.dataclass import FairseqDataclass @dataclass class SentencePredictionConfig(FairseqDataclass): classification_head_name: str = field( default="sentence_classification_head", metadata={"help": "name of the classification head to use"}, ) regression_target: bool = field( default=False, ) @register_criterion("sentence_prediction", dataclass=SentencePredictionConfig) class SentencePredictionCriterion(FairseqCriterion): def __init__(self, cfg: SentencePredictionConfig, task): super().__init__(task) self.classification_head_name = cfg.classification_head_name self.regression_target = cfg.regression_target def forward(self, model, sample, reduce=True): """Compute the loss for the given sample. Returns a tuple with three elements: 1) the loss 2) the sample size, which is used as the denominator for the gradient 3) logging outputs to display while training """ assert ( hasattr(model, "classification_heads") and self.classification_head_name in model.classification_heads ), "model must provide sentence classification head for --criterion=sentence_prediction" logits, _ = model( **sample["net_input"], features_only=True, classification_head_name=self.classification_head_name, ) targets = model.get_targets(sample, [logits]).view(-1) sample_size = targets.numel() if not self.regression_target: lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float32) loss = F.nll_loss(lprobs, targets, reduction="sum") else: logits = logits.view(-1).float() targets = targets.float() loss = F.mse_loss(logits, targets, reduction="sum") logging_output = { "loss": loss.data, "ntokens": sample["ntokens"], "nsentences": sample_size, "sample_size": sample_size, } if not self.regression_target: preds = logits.argmax(dim=1) logging_output["ncorrect"] = (preds == targets).sum() return loss, sample_size, logging_output @staticmethod def reduce_metrics(logging_outputs) -> None: """Aggregate logging outputs from data parallel training.""" loss_sum = sum(log.get("loss", 0) for log in logging_outputs) ntokens = sum(log.get("ntokens", 0) for log in logging_outputs) nsentences = sum(log.get("nsentences", 0) for log in logging_outputs) sample_size = sum(log.get("sample_size", 0) for log in logging_outputs) metrics.log_scalar( "loss", loss_sum / sample_size / math.log(2), sample_size, round=3 ) if sample_size != ntokens: metrics.log_scalar( "nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3 ) if len(logging_outputs) > 0 and "ncorrect" in logging_outputs[0]: ncorrect = sum(log.get("ncorrect", 0) for log in logging_outputs) metrics.log_scalar( "accuracy", 100.0 * ncorrect / nsentences, nsentences, round=1 ) @staticmethod def logging_outputs_can_be_summed() -> bool: """ Whether the logging outputs returned by `forward` can be summed across workers prior to calling `reduce_metrics`. Setting this to True will improves distributed training speed. """ return True
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/criterions/sentence_prediction.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from fairseq import metrics, utils from fairseq.criterions import register_criterion from .label_smoothed_cross_entropy import ( LabelSmoothedCrossEntropyCriterion, LabelSmoothedCrossEntropyCriterionConfig, ) from dataclasses import dataclass, field @dataclass class LabelSmoothedCrossEntropyCriterionWithAlignmentConfig( LabelSmoothedCrossEntropyCriterionConfig ): alignment_lambda: float = field( default=0.05, metadata={"help": "weight for the alignment loss"} ) @register_criterion( "label_smoothed_cross_entropy_with_alignment", dataclass=LabelSmoothedCrossEntropyCriterionWithAlignmentConfig, ) class LabelSmoothedCrossEntropyCriterionWithAlignment( LabelSmoothedCrossEntropyCriterion ): def __init__(self, task, sentence_avg, label_smoothing, alignment_lambda): super().__init__(task, sentence_avg, label_smoothing) self.alignment_lambda = alignment_lambda def forward(self, model, sample, reduce=True): """Compute the loss for the given sample. Returns a tuple with three elements: 1) the loss 2) the sample size, which is used as the denominator for the gradient 3) logging outputs to display while training """ net_output = model(**sample["net_input"]) loss, nll_loss = self.compute_loss(model, net_output, sample, reduce=reduce) sample_size = ( sample["target"].size(0) if self.sentence_avg else sample["ntokens"] ) logging_output = { "loss": utils.item(loss.data) if reduce else loss.data, "nll_loss": utils.item(nll_loss.data) if reduce else nll_loss.data, "ntokens": sample["ntokens"], "nsentences": sample["target"].size(0), "sample_size": sample_size, } alignment_loss = None # Compute alignment loss only for training set and non dummy batches. if "alignments" in sample and sample["alignments"] is not None: alignment_loss = self.compute_alignment_loss(sample, net_output) if alignment_loss is not None: logging_output["alignment_loss"] = utils.item(alignment_loss.data) loss += self.alignment_lambda * alignment_loss return loss, sample_size, logging_output def compute_alignment_loss(self, sample, net_output): attn_prob = net_output[1]["attn"][0] bsz, tgt_sz, src_sz = attn_prob.shape attn = attn_prob.view(bsz * tgt_sz, src_sz) align = sample["alignments"] align_weights = sample["align_weights"].float() if len(align) > 0: # Alignment loss computation. align (shape [:, 2]) contains the src-tgt index pairs corresponding to # the alignments. align_weights (shape [:]) contains the 1 / frequency of a tgt index for normalizing. loss = -( (attn[align[:, 1][:, None], align[:, 0][:, None]]).log() * align_weights[:, None] ).sum() else: return None return loss @staticmethod def reduce_metrics(logging_outputs) -> None: """Aggregate logging outputs from data parallel training.""" loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs)) nll_loss_sum = utils.item( sum(log.get("nll_loss", 0) for log in logging_outputs) ) alignment_loss_sum = utils.item( sum(log.get("alignment_loss", 0) for log in logging_outputs) ) ntokens = utils.item(sum(log.get("ntokens", 0) for log in logging_outputs)) sample_size = utils.item( sum(log.get("sample_size", 0) for log in logging_outputs) ) metrics.log_scalar( "loss", loss_sum / sample_size / math.log(2), sample_size, round=3 ) metrics.log_scalar( "nll_loss", nll_loss_sum / ntokens / math.log(2), ntokens, round=3 ) metrics.log_scalar( "alignment_loss", alignment_loss_sum / sample_size / math.log(2), sample_size, round=3, ) metrics.log_derived( "ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg) ) @staticmethod def logging_outputs_can_be_summed() -> bool: """ Whether the logging outputs returned by `forward` can be summed across workers prior to calling `reduce_metrics`. Setting this to True will improves distributed training speed. """ return True
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/criterions/label_smoothed_cross_entropy_with_alignment.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass import math from omegaconf import II import torch from fairseq import metrics, modules, utils from fairseq.criterions import FairseqCriterion, register_criterion from fairseq.dataclass import FairseqDataclass @dataclass class MaskedLmConfig(FairseqDataclass): tpu: bool = II("common.tpu") @register_criterion("masked_lm", dataclass=MaskedLmConfig) class MaskedLmLoss(FairseqCriterion): """ Implementation for the loss used in masked language model (MLM) training. """ def __init__(self, cfg: MaskedLmConfig, task): super().__init__(task) self.tpu = cfg.tpu def forward(self, model, sample, reduce=True): """Compute the loss for the given sample. Returns a tuple with three elements: 1) the loss 2) the sample size, which is used as the denominator for the gradient 3) logging outputs to display while training """ masked_tokens = sample["target"].ne(self.padding_idx) sample_size = masked_tokens.int().sum() # Rare: when all tokens are masked, project all tokens. # We use torch.where to avoid device-to-host transfers, # except on CPU where torch.where is not well supported # (see github.com/pytorch/pytorch/issues/26247). if self.tpu: masked_tokens = None # always project all tokens on TPU elif masked_tokens.device == torch.device("cpu"): if not masked_tokens.any(): masked_tokens = None else: masked_tokens = torch.where( masked_tokens.any(), masked_tokens, masked_tokens.new([True]), ) logits = model(**sample["net_input"], masked_tokens=masked_tokens)[0] targets = model.get_targets(sample, [logits]) if masked_tokens is not None: targets = targets[masked_tokens] loss = modules.cross_entropy( logits.view(-1, logits.size(-1)), targets.view(-1), reduction="sum", ignore_index=self.padding_idx, ) logging_output = { "loss": loss if self.tpu else loss.data, "ntokens": sample["ntokens"], "nsentences": sample["nsentences"], "sample_size": sample_size, } return loss, sample_size, logging_output @staticmethod def reduce_metrics(logging_outputs) -> None: """Aggregate logging outputs from data parallel training.""" loss_sum = sum(log.get("loss", 0) for log in logging_outputs) sample_size = sum(log.get("sample_size", 0) for log in logging_outputs) metrics.log_scalar( "loss", loss_sum / sample_size / math.log(2), sample_size, round=3 ) metrics.log_derived( "ppl", lambda meters: utils.get_perplexity(meters["loss"].avg) ) @staticmethod def logging_outputs_can_be_summed() -> bool: """ Whether the logging outputs returned by `forward` can be summed across workers prior to calling `reduce_metrics`. Setting this to True will improves distributed training speed. """ return True
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/criterions/masked_lm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn.functional as F from fairseq import metrics, utils from fairseq.criterions import FairseqCriterion, register_criterion @register_criterion("sentence_ranking") class SentenceRankingCriterion(FairseqCriterion): def __init__(self, task, ranking_head_name, save_predictions, num_classes): super().__init__(task) self.ranking_head_name = ranking_head_name if save_predictions is not None: self.prediction_h = open(save_predictions, "w") else: self.prediction_h = None self.num_classes = num_classes def __del__(self): if self.prediction_h is not None: self.prediction_h.close() @staticmethod def add_args(parser): # fmt: off parser.add_argument('--save-predictions', metavar='FILE', help='file to save predictions to') parser.add_argument('--ranking-head-name', default='sentence_classification_head', help='name of the ranking head to use') # fmt: on def forward(self, model, sample, reduce=True): """Compute ranking loss for the given sample. Returns a tuple with three elements: 1) the loss 2) the sample size, which is used as the denominator for the gradient 3) logging outputs to display while training """ assert ( hasattr(model, "classification_heads") and self.ranking_head_name in model.classification_heads ), "model must provide sentence ranking head for --criterion=sentence_ranking" scores = [] for idx in range(self.num_classes): score, _ = model( **sample["net_input{idx}".format(idx=idx + 1)], classification_head_name=self.ranking_head_name, ) scores.append(score) logits = torch.cat(scores, dim=1) sample_size = logits.size(0) if "target" in sample: targets = model.get_targets(sample, [logits]).view(-1) lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float32) loss = F.nll_loss(lprobs, targets, reduction="sum") else: targets = None loss = torch.tensor(0.0, requires_grad=True) if self.prediction_h is not None: preds = logits.argmax(dim=1) for i, (id, pred) in enumerate(zip(sample["id"].tolist(), preds.tolist())): if targets is not None: label = targets[i].item() print("{}\t{}\t{}".format(id, pred, label), file=self.prediction_h) else: print("{}\t{}".format(id, pred), file=self.prediction_h) logging_output = { "loss": loss.data, "ntokens": sample["ntokens"], "nsentences": sample_size, "sample_size": sample_size, } if targets is not None: logging_output["ncorrect"] = (logits.argmax(dim=1) == targets).sum() return loss, sample_size, logging_output @staticmethod def reduce_metrics(logging_outputs) -> None: """Aggregate logging outputs from data parallel training.""" loss_sum = sum(log.get("loss", 0) for log in logging_outputs) ntokens = sum(log.get("ntokens", 0) for log in logging_outputs) nsentences = sum(log.get("nsentences", 0) for log in logging_outputs) sample_size = sum(log.get("sample_size", 0) for log in logging_outputs) metrics.log_scalar( "loss", loss_sum / sample_size / math.log(2), sample_size, round=3 ) if sample_size != ntokens: metrics.log_scalar( "nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3 ) if len(logging_outputs) > 0 and "ncorrect" in logging_outputs[0]: ncorrect = sum(log.get("ncorrect", 0) for log in logging_outputs) metrics.log_scalar( "accuracy", 100.0 * ncorrect / nsentences, nsentences, round=1 ) @staticmethod def logging_outputs_can_be_summed() -> bool: """ Whether the logging outputs returned by `forward` can be summed across workers prior to calling `reduce_metrics`. Setting this to True will improves distributed training speed. """ return True
EXA-1-master
exa/models/unilm-master/edgelm/fairseq/criterions/sentence_ranking.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # fairseq documentation build configuration file, created by # sphinx-quickstart on Fri Aug 17 21:45:30 2018. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. import os import sys from fairseq import __version__ # source code directory, relative to this file, for sphinx-autobuild sys.path.insert(0, os.path.abspath("..")) source_suffix = [".rst"] # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ "sphinx.ext.autodoc", "sphinx.ext.intersphinx", "sphinx.ext.viewcode", "sphinx.ext.napoleon", "sphinxarg.ext", ] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # The master toctree document. master_doc = "index" # General information about the project. project = "fairseq" copyright = "Facebook AI Research (FAIR)" author = "Facebook AI Research (FAIR)" github_doc_root = "https://github.com/pytorch/fairseq/tree/main/docs/" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = __version__ # The full version, including alpha/beta/rc tags. release = __version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" highlight_language = "python" # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = "sphinx_rtd_theme" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] html_context = { "css_files": [ "_static/theme_overrides.css", # override wide tables in RTD theme ], } # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # This is required for the alabaster theme # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars # html_sidebars = { # '**': [ # 'about.html', # 'navigation.html', # 'relations.html', # needs 'show_related': True theme option to display # 'searchbox.html', # 'donate.html', # ] # } # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { "numpy": ("http://docs.scipy.org/doc/numpy/", None), "python": ("https://docs.python.org/", None), "torch": ("https://pytorch.org/docs/master/", None), }
EXA-1-master
exa/models/unilm-master/edgelm/docs/conf.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os from fairseq.dataclass.initialize import add_defaults, hydra_init from fairseq_cli.train import main as pre_main from fairseq import distributed_utils, metrics from fairseq.dataclass.configs import FairseqConfig from fairseq.dataclass.utils import omegaconf_no_object_check from fairseq.utils import reset_logging import hydra from hydra.core.hydra_config import HydraConfig import torch from omegaconf import OmegaConf, open_dict logger = logging.getLogger("fairseq_cli.hydra_train") @hydra.main(config_path=os.path.join("..", "fairseq", "config"), config_name="config") def hydra_main(cfg: FairseqConfig) -> float: _hydra_main(cfg) def _hydra_main(cfg: FairseqConfig, **kwargs) -> float: add_defaults(cfg) if cfg.common.reset_logging: reset_logging() # Hydra hijacks logging, fix that else: # check if directly called or called through hydra_main if HydraConfig.initialized(): with open_dict(cfg): # make hydra logging work with ddp (see # see https://github.com/facebookresearch/hydra/issues/1126) cfg.job_logging_cfg = OmegaConf.to_container(HydraConfig.get().job_logging, resolve=True) with omegaconf_no_object_check(): cfg = OmegaConf.create(OmegaConf.to_container(cfg, resolve=True, enum_to_str=True)) OmegaConf.set_struct(cfg, True) try: if cfg.common.profile: with torch.cuda.profiler.profile(): with torch.autograd.profiler.emit_nvtx(): distributed_utils.call_main(cfg, pre_main, **kwargs) else: distributed_utils.call_main(cfg, pre_main, **kwargs) except BaseException as e: if not cfg.common.suppress_crashes: raise else: logger.error("Crashed! " + str(e)) # get best val and return - useful for sweepers try: best_val = metrics.get_smoothed_value( "valid", cfg.checkpoint.best_checkpoint_metric ) except: best_val = None if best_val is None: best_val = float("inf") return best_val def cli_main(): try: from hydra._internal.utils import get_args cfg_name = get_args().config_name or "config" except: logger.warning("Failed to get config name from hydra args") cfg_name = "config" hydra_init(cfg_name) hydra_main() if __name__ == "__main__": cli_main()
EXA-1-master
exa/models/unilm-master/edgelm/fairseq_cli/hydra_train.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Data pre-processing: build vocabularies and binarize training data. """ import logging import os import shutil import sys from collections import Counter from itertools import zip_longest from multiprocessing import Pool from fairseq import options, tasks, utils from fairseq.binarizer import Binarizer from fairseq.data import indexed_dataset from fairseq.file_chunker_utils import find_offsets logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=sys.stdout, ) logger = logging.getLogger("fairseq_cli.preprocess") def main(args): utils.import_user_module(args) os.makedirs(args.destdir, exist_ok=True) logger.addHandler( logging.FileHandler( filename=os.path.join(args.destdir, "preprocess.log"), ) ) logger.info(args) assert args.dataset_impl != "huffman", "preprocessing.py doesn't support Huffman yet, use HuffmanCodeBuilder directly." task = tasks.get_task(args.task) def train_path(lang): return "{}{}".format(args.trainpref, ("." + lang) if lang else "") def file_name(prefix, lang): fname = prefix if lang is not None: fname += ".{lang}".format(lang=lang) return fname def dest_path(prefix, lang): return os.path.join(args.destdir, file_name(prefix, lang)) def dict_path(lang): return dest_path("dict", lang) + ".txt" def build_dictionary(filenames, src=False, tgt=False): assert src ^ tgt return task.build_dictionary( filenames, workers=args.workers, threshold=args.thresholdsrc if src else args.thresholdtgt, nwords=args.nwordssrc if src else args.nwordstgt, padding_factor=args.padding_factor, ) target = not args.only_source if not args.srcdict and os.path.exists(dict_path(args.source_lang)): raise FileExistsError(dict_path(args.source_lang)) if target and not args.tgtdict and os.path.exists(dict_path(args.target_lang)): raise FileExistsError(dict_path(args.target_lang)) if args.joined_dictionary: assert ( not args.srcdict or not args.tgtdict ), "cannot use both --srcdict and --tgtdict with --joined-dictionary" if args.srcdict: src_dict = task.load_dictionary(args.srcdict) elif args.tgtdict: src_dict = task.load_dictionary(args.tgtdict) else: assert ( args.trainpref ), "--trainpref must be set if --srcdict is not specified" src_dict = build_dictionary( {train_path(lang) for lang in [args.source_lang, args.target_lang]}, src=True, ) tgt_dict = src_dict else: if args.srcdict: src_dict = task.load_dictionary(args.srcdict) else: assert ( args.trainpref ), "--trainpref must be set if --srcdict is not specified" src_dict = build_dictionary([train_path(args.source_lang)], src=True) if target: if args.tgtdict: tgt_dict = task.load_dictionary(args.tgtdict) else: assert ( args.trainpref ), "--trainpref must be set if --tgtdict is not specified" tgt_dict = build_dictionary([train_path(args.target_lang)], tgt=True) else: tgt_dict = None src_dict.save(dict_path(args.source_lang)) if target and tgt_dict is not None: tgt_dict.save(dict_path(args.target_lang)) if args.dict_only: return def make_binary_dataset(vocab, input_prefix, output_prefix, lang, num_workers): logger.info("[{}] Dictionary: {} types".format(lang, len(vocab))) n_seq_tok = [0, 0] replaced = Counter() def merge_result(worker_result): replaced.update(worker_result["replaced"]) n_seq_tok[0] += worker_result["nseq"] n_seq_tok[1] += worker_result["ntok"] input_file = "{}{}".format( input_prefix, ("." + lang) if lang is not None else "" ) offsets = find_offsets(input_file, num_workers) (first_chunk, *more_chunks) = zip(offsets, offsets[1:]) pool = None if num_workers > 1: pool = Pool(processes=num_workers - 1) for worker_id, (start_offset, end_offset) in enumerate( more_chunks, start=1 ): prefix = "{}{}".format(output_prefix, worker_id) pool.apply_async( binarize, ( args, input_file, vocab, prefix, lang, start_offset, end_offset, ), callback=merge_result, ) pool.close() ds = indexed_dataset.make_builder( dataset_dest_file(args, output_prefix, lang, "bin"), impl=args.dataset_impl, vocab_size=len(vocab), ) merge_result( Binarizer.binarize( input_file, vocab, lambda t: ds.add_item(t), offset=first_chunk[0], end=first_chunk[1], ) ) if num_workers > 1: pool.join() for worker_id in range(1, num_workers): prefix = "{}{}".format(output_prefix, worker_id) temp_file_path = dataset_dest_prefix(args, prefix, lang) ds.merge_file_(temp_file_path) os.remove(indexed_dataset.data_file_path(temp_file_path)) os.remove(indexed_dataset.index_file_path(temp_file_path)) ds.finalize(dataset_dest_file(args, output_prefix, lang, "idx")) logger.info( "[{}] {}: {} sents, {} tokens, {:.3}% replaced by {}".format( lang, input_file, n_seq_tok[0], n_seq_tok[1], 100 * sum(replaced.values()) / n_seq_tok[1], vocab.unk_word, ) ) def make_binary_alignment_dataset(input_prefix, output_prefix, num_workers): nseq = [0] def merge_result(worker_result): nseq[0] += worker_result["nseq"] input_file = input_prefix offsets = find_offsets(input_file, num_workers) (first_chunk, *more_chunks) = zip(offsets, offsets[1:]) pool = None if num_workers > 1: pool = Pool(processes=num_workers - 1) for worker_id, (start_offset, end_offset) in enumerate( more_chunks, start=1 ): prefix = "{}{}".format(output_prefix, worker_id) pool.apply_async( binarize_alignments, ( args, input_file, utils.parse_alignment, prefix, start_offset, end_offset, ), callback=merge_result, ) pool.close() ds = indexed_dataset.make_builder( dataset_dest_file(args, output_prefix, None, "bin"), impl=args.dataset_impl ) merge_result( Binarizer.binarize_alignments( input_file, utils.parse_alignment, lambda t: ds.add_item(t), offset=first_chunk[0], end=first_chunk[1], ) ) if num_workers > 1: pool.join() for worker_id in range(1, num_workers): prefix = "{}{}".format(output_prefix, worker_id) temp_file_path = dataset_dest_prefix(args, prefix, None) ds.merge_file_(temp_file_path) os.remove(indexed_dataset.data_file_path(temp_file_path)) os.remove(indexed_dataset.index_file_path(temp_file_path)) ds.finalize(dataset_dest_file(args, output_prefix, None, "idx")) logger.info("[alignments] {}: parsed {} alignments".format(input_file, nseq[0])) def make_dataset(vocab, input_prefix, output_prefix, lang, num_workers=1): if args.dataset_impl == "raw": # Copy original text file to destination folder output_text_file = dest_path( output_prefix + ".{}-{}".format(args.source_lang, args.target_lang), lang, ) shutil.copyfile(file_name(input_prefix, lang), output_text_file) else: make_binary_dataset(vocab, input_prefix, output_prefix, lang, num_workers) def make_all(lang, vocab): if args.trainpref: make_dataset(vocab, args.trainpref, "train", lang, num_workers=args.workers) if args.validpref: for k, validpref in enumerate(args.validpref.split(",")): outprefix = "valid{}".format(k) if k > 0 else "valid" make_dataset( vocab, validpref, outprefix, lang, num_workers=args.workers ) if args.testpref: for k, testpref in enumerate(args.testpref.split(",")): outprefix = "test{}".format(k) if k > 0 else "test" make_dataset(vocab, testpref, outprefix, lang, num_workers=args.workers) def make_all_alignments(): if args.trainpref and os.path.exists(args.trainpref + "." + args.align_suffix): make_binary_alignment_dataset( args.trainpref + "." + args.align_suffix, "train.align", num_workers=args.workers, ) if args.validpref and os.path.exists(args.validpref + "." + args.align_suffix): make_binary_alignment_dataset( args.validpref + "." + args.align_suffix, "valid.align", num_workers=args.workers, ) if args.testpref and os.path.exists(args.testpref + "." + args.align_suffix): make_binary_alignment_dataset( args.testpref + "." + args.align_suffix, "test.align", num_workers=args.workers, ) make_all(args.source_lang, src_dict) if target: make_all(args.target_lang, tgt_dict) if args.align_suffix: make_all_alignments() logger.info("Wrote preprocessed data to {}".format(args.destdir)) if args.alignfile: assert args.trainpref, "--trainpref must be set if --alignfile is specified" src_file_name = train_path(args.source_lang) tgt_file_name = train_path(args.target_lang) freq_map = {} with open(args.alignfile, "r", encoding="utf-8") as align_file: with open(src_file_name, "r", encoding="utf-8") as src_file: with open(tgt_file_name, "r", encoding="utf-8") as tgt_file: for a, s, t in zip_longest(align_file, src_file, tgt_file): si = src_dict.encode_line(s, add_if_not_exist=False) ti = tgt_dict.encode_line(t, add_if_not_exist=False) ai = list(map(lambda x: tuple(x.split("-")), a.split())) for sai, tai in ai: srcidx = si[int(sai)] tgtidx = ti[int(tai)] if srcidx != src_dict.unk() and tgtidx != tgt_dict.unk(): assert srcidx != src_dict.pad() assert srcidx != src_dict.eos() assert tgtidx != tgt_dict.pad() assert tgtidx != tgt_dict.eos() if srcidx not in freq_map: freq_map[srcidx] = {} if tgtidx not in freq_map[srcidx]: freq_map[srcidx][tgtidx] = 1 else: freq_map[srcidx][tgtidx] += 1 align_dict = {} for srcidx in freq_map.keys(): align_dict[srcidx] = max(freq_map[srcidx], key=freq_map[srcidx].get) with open( os.path.join( args.destdir, "alignment.{}-{}.txt".format(args.source_lang, args.target_lang), ), "w", encoding="utf-8", ) as f: for k, v in align_dict.items(): print("{} {}".format(src_dict[k], tgt_dict[v]), file=f) def binarize(args, filename, vocab, output_prefix, lang, offset, end, append_eos=True): ds = indexed_dataset.make_builder( dataset_dest_file(args, output_prefix, lang, "bin"), impl=args.dataset_impl, vocab_size=len(vocab), ) def consumer(tensor): ds.add_item(tensor) res = Binarizer.binarize( filename, vocab, consumer, append_eos=append_eos, offset=offset, end=end ) ds.finalize(dataset_dest_file(args, output_prefix, lang, "idx")) return res def binarize_alignments(args, filename, parse_alignment, output_prefix, offset, end): ds = indexed_dataset.make_builder( dataset_dest_file(args, output_prefix, None, "bin"), impl=args.dataset_impl, vocab_size=None, ) def consumer(tensor): ds.add_item(tensor) res = Binarizer.binarize_alignments( filename, parse_alignment, consumer, offset=offset, end=end ) ds.finalize(dataset_dest_file(args, output_prefix, None, "idx")) return res def dataset_dest_prefix(args, output_prefix, lang): base = "{}/{}".format(args.destdir, output_prefix) if lang is not None: lang_part = ".{}-{}.{}".format(args.source_lang, args.target_lang, lang) elif args.only_source: lang_part = "" else: lang_part = ".{}-{}".format(args.source_lang, args.target_lang) return "{}{}".format(base, lang_part) def dataset_dest_file(args, output_prefix, lang, extension): base = dataset_dest_prefix(args, output_prefix, lang) return "{}.{}".format(base, extension) def cli_main(): parser = options.get_preprocessing_parser() args = parser.parse_args() main(args) if __name__ == "__main__": cli_main()
EXA-1-master
exa/models/unilm-master/edgelm/fairseq_cli/preprocess.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Translate pre-processed data with a trained model. """ import ast import logging import math import os import sys from argparse import Namespace from itertools import chain import numpy as np import torch from fairseq import checkpoint_utils, options, scoring, tasks, utils from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.logging import progress_bar from fairseq.logging.meters import StopwatchMeter, TimeMeter from omegaconf import DictConfig def main(cfg: DictConfig): if isinstance(cfg, Namespace): cfg = convert_namespace_to_omegaconf(cfg) assert cfg.common_eval.path is not None, "--path required for generation!" assert ( not cfg.generation.sampling or cfg.generation.nbest == cfg.generation.beam ), "--sampling requires --nbest to be equal to --beam" assert ( cfg.generation.replace_unk is None or cfg.dataset.dataset_impl == "raw" ), "--replace-unk requires a raw text dataset (--dataset-impl=raw)" if cfg.common_eval.results_path is not None: os.makedirs(cfg.common_eval.results_path, exist_ok=True) output_path = os.path.join( cfg.common_eval.results_path, "generate-{}.txt".format(cfg.dataset.gen_subset), ) with open(output_path, "w", buffering=1, encoding="utf-8") as h: return _main(cfg, h) else: return _main(cfg, sys.stdout) def get_symbols_to_strip_from_output(generator): if hasattr(generator, "symbols_to_strip_from_output"): return generator.symbols_to_strip_from_output else: return {generator.eos} def _main(cfg: DictConfig, output_file): logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=output_file, ) logger = logging.getLogger("fairseq_cli.generate") utils.import_user_module(cfg.common) if cfg.dataset.max_tokens is None and cfg.dataset.batch_size is None: cfg.dataset.max_tokens = 12000 logger.info(cfg) # Fix seed for stochastic decoding if cfg.common.seed is not None and not cfg.generation.no_seed_provided: np.random.seed(cfg.common.seed) utils.set_torch_seed(cfg.common.seed) use_cuda = torch.cuda.is_available() and not cfg.common.cpu # Load dataset splits task = tasks.setup_task(cfg.task) # Set dictionaries try: src_dict = getattr(task, "source_dictionary", None) except NotImplementedError: src_dict = None tgt_dict = task.target_dictionary overrides = ast.literal_eval(cfg.common_eval.model_overrides) # Load ensemble logger.info("loading model(s) from {}".format(cfg.common_eval.path)) models, saved_cfg = checkpoint_utils.load_model_ensemble( utils.split_paths(cfg.common_eval.path), arg_overrides=overrides, task=task, suffix=cfg.checkpoint.checkpoint_suffix, strict=(cfg.checkpoint.checkpoint_shard_count == 1), num_shards=cfg.checkpoint.checkpoint_shard_count, ) # loading the dataset should happen after the checkpoint has been loaded so we can give it the saved task config task.load_dataset(cfg.dataset.gen_subset, task_cfg=saved_cfg.task) if cfg.generation.lm_path is not None: overrides["data"] = cfg.task.data try: lms, _ = checkpoint_utils.load_model_ensemble( [cfg.generation.lm_path], arg_overrides=overrides, task=None ) except: logger.warning( f"Failed to load language model! Please make sure that the language model dict is the same " f"as target dict and is located in the data dir ({cfg.task.data})" ) raise assert len(lms) == 1 else: lms = [None] # Optimize ensemble for generation for model in chain(models, lms): if model is None: continue if cfg.common.fp16: model.half() if use_cuda and not cfg.distributed_training.pipeline_model_parallel: model.cuda() model.prepare_for_inference_(cfg) # Load alignment dictionary for unknown word replacement # (None if no unknown word replacement, empty if no path to align dictionary) align_dict = utils.load_align_dict(cfg.generation.replace_unk) # Load dataset (possibly sharded) itr = task.get_batch_iterator( dataset=task.dataset(cfg.dataset.gen_subset), max_tokens=cfg.dataset.max_tokens, max_sentences=cfg.dataset.batch_size, max_positions=utils.resolve_max_positions( task.max_positions(), *[m.max_positions() for m in models] ), ignore_invalid_inputs=cfg.dataset.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=cfg.dataset.required_batch_size_multiple, seed=cfg.common.seed, num_shards=cfg.distributed_training.distributed_world_size, shard_id=cfg.distributed_training.distributed_rank, num_workers=cfg.dataset.num_workers, data_buffer_size=cfg.dataset.data_buffer_size, ).next_epoch_itr(shuffle=False) progress = progress_bar.progress_bar( itr, log_format=cfg.common.log_format, log_interval=cfg.common.log_interval, default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"), ) # Initialize generator gen_timer = StopwatchMeter() extra_gen_cls_kwargs = {"lm_model": lms[0], "lm_weight": cfg.generation.lm_weight} generator = task.build_generator( models, cfg.generation, extra_gen_cls_kwargs=extra_gen_cls_kwargs ) # Handle tokenization and BPE tokenizer = task.build_tokenizer(cfg.tokenizer) bpe = task.build_bpe(cfg.bpe) def decode_fn(x): if bpe is not None: x = bpe.decode(x) if tokenizer is not None: x = tokenizer.decode(x) return x scorer = scoring.build_scorer(cfg.scoring, tgt_dict) num_sentences = 0 has_target = True wps_meter = TimeMeter() for sample in progress: sample = utils.move_to_cuda(sample) if use_cuda else sample if "net_input" not in sample: continue prefix_tokens = None if cfg.generation.prefix_size > 0: prefix_tokens = sample["target"][:, : cfg.generation.prefix_size] constraints = None if "constraints" in sample: constraints = sample["constraints"] gen_timer.start() hypos = task.inference_step( generator, models, sample, prefix_tokens=prefix_tokens, constraints=constraints, ) num_generated_tokens = sum(len(h[0]["tokens"]) for h in hypos) gen_timer.stop(num_generated_tokens) for i, sample_id in enumerate(sample["id"].tolist()): has_target = sample["target"] is not None # Remove padding if "src_tokens" in sample["net_input"]: src_tokens = utils.strip_pad( sample["net_input"]["src_tokens"][i, :], tgt_dict.pad() ) else: src_tokens = None target_tokens = None if has_target: target_tokens = ( utils.strip_pad(sample["target"][i, :], tgt_dict.pad()).int().cpu() ) # Either retrieve the original sentences or regenerate them from tokens. if align_dict is not None: src_str = task.dataset(cfg.dataset.gen_subset).src.get_original_text( sample_id ) target_str = task.dataset(cfg.dataset.gen_subset).tgt.get_original_text( sample_id ) else: if src_dict is not None: src_str = src_dict.string(src_tokens, cfg.common_eval.post_process) else: src_str = "" if has_target: target_str = tgt_dict.string( target_tokens, cfg.common_eval.post_process, escape_unk=True, extra_symbols_to_ignore=get_symbols_to_strip_from_output( generator ), ) src_str = decode_fn(src_str) if has_target: target_str = decode_fn(target_str) if not cfg.common_eval.quiet: if src_dict is not None: print("S-{}\t{}".format(sample_id, src_str), file=output_file) if has_target: print("T-{}\t{}".format(sample_id, target_str), file=output_file) # Process top predictions for j, hypo in enumerate(hypos[i][: cfg.generation.nbest]): hypo_tokens, hypo_str, alignment = utils.post_process_prediction( hypo_tokens=hypo["tokens"].int().cpu(), src_str=src_str, alignment=hypo["alignment"], align_dict=align_dict, tgt_dict=tgt_dict, remove_bpe=cfg.common_eval.post_process, extra_symbols_to_ignore=get_symbols_to_strip_from_output(generator), ) detok_hypo_str = decode_fn(hypo_str) if not cfg.common_eval.quiet: score = hypo["score"] / math.log(2) # convert to base 2 # original hypothesis (after tokenization and BPE) print( "H-{}\t{}\t{}".format(sample_id, score, hypo_str), file=output_file, ) # detokenized hypothesis print( "D-{}\t{}\t{}".format(sample_id, score, detok_hypo_str), file=output_file, ) print( "P-{}\t{}".format( sample_id, " ".join( map( lambda x: "{:.4f}".format(x), # convert from base e to base 2 hypo["positional_scores"] .div_(math.log(2)) .tolist(), ) ), ), file=output_file, ) if cfg.generation.print_alignment == "hard": print( "A-{}\t{}".format( sample_id, " ".join( [ "{}-{}".format(src_idx, tgt_idx) for src_idx, tgt_idx in alignment ] ), ), file=output_file, ) if cfg.generation.print_alignment == "soft": print( "A-{}\t{}".format( sample_id, " ".join( [ ",".join(src_probs) for src_probs in alignment ] ), ), file=output_file, ) if cfg.generation.print_step: print( "I-{}\t{}".format(sample_id, hypo["steps"]), file=output_file, ) if cfg.generation.retain_iter_history: for step, h in enumerate(hypo["history"]): _, h_str, _ = utils.post_process_prediction( hypo_tokens=h["tokens"].int().cpu(), src_str=src_str, alignment=None, align_dict=None, tgt_dict=tgt_dict, remove_bpe=None, ) print( "E-{}_{}\t{}".format(sample_id, step, h_str), file=output_file, ) # Score only the top hypothesis if has_target and j == 0: if align_dict is not None or cfg.common_eval.post_process is not None: # Convert back to tokens for evaluation with unk replacement and/or without BPE target_tokens = tgt_dict.encode_line( target_str, add_if_not_exist=True ) hypo_tokens = tgt_dict.encode_line( detok_hypo_str, add_if_not_exist=True ) if hasattr(scorer, "add_string"): scorer.add_string(target_str, detok_hypo_str) else: scorer.add(target_tokens, hypo_tokens) wps_meter.update(num_generated_tokens) progress.log({"wps": round(wps_meter.avg)}) num_sentences += ( sample["nsentences"] if "nsentences" in sample else sample["id"].numel() ) logger.info("NOTE: hypothesis and token scores are output in base 2") logger.info( "Translated {:,} sentences ({:,} tokens) in {:.1f}s ({:.2f} sentences/s, {:.2f} tokens/s)".format( num_sentences, gen_timer.n, gen_timer.sum, num_sentences / gen_timer.sum, 1.0 / gen_timer.avg, ) ) if has_target: if cfg.bpe and not cfg.generation.sacrebleu: if cfg.common_eval.post_process: logger.warning( "BLEU score is being computed by splitting detokenized string on spaces, this is probably not what you want. Use --sacrebleu for standard 13a BLEU tokenization" ) else: logger.warning( "If you are using BPE on the target side, the BLEU score is computed on BPE tokens, not on proper words. Use --sacrebleu for standard 13a BLEU tokenization" ) # use print to be consistent with other main outputs: S-, H-, T-, D- and so on print( "Generate {} with beam={}: {}".format( cfg.dataset.gen_subset, cfg.generation.beam, scorer.result_string() ), file=output_file, ) return scorer def cli_main(): parser = options.get_generation_parser() # TODO: replace this workaround with refactoring of `AudioPretraining` parser.add_argument( '--arch', '-a', metavar='ARCH', default="wav2vec2", help='Model architecture. For constructing tasks that rely on ' 'model args (e.g. `AudioPretraining`)' ) args = options.parse_args_and_arch(parser) main(args) if __name__ == "__main__": cli_main()
EXA-1-master
exa/models/unilm-master/edgelm/fairseq_cli/generate.py
EXA-1-master
exa/models/unilm-master/edgelm/fairseq_cli/__init__.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import os import sys from argparse import Namespace from itertools import chain import torch from fairseq import checkpoint_utils, distributed_utils, options, utils from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.logging import metrics, progress_bar from fairseq.utils import reset_logging from omegaconf import DictConfig logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=sys.stdout, ) logger = logging.getLogger("fairseq_cli.validate") def main(cfg: DictConfig, override_args=None): if isinstance(cfg, Namespace): cfg = convert_namespace_to_omegaconf(cfg) utils.import_user_module(cfg.common) reset_logging() assert ( cfg.dataset.max_tokens is not None or cfg.dataset.batch_size is not None ), "Must specify batch size either with --max-tokens or --batch-size" use_fp16 = cfg.common.fp16 use_cuda = torch.cuda.is_available() and not cfg.common.cpu if use_cuda: torch.cuda.set_device(cfg.distributed_training.device_id) if cfg.distributed_training.distributed_world_size > 1: data_parallel_world_size = distributed_utils.get_data_parallel_world_size() data_parallel_rank = distributed_utils.get_data_parallel_rank() else: data_parallel_world_size = 1 data_parallel_rank = 0 if override_args is not None: overrides = vars(override_args) overrides.update(eval(getattr(override_args, "model_overrides", "{}"))) else: overrides = None # Load ensemble logger.info("loading model(s) from {}".format(cfg.common_eval.path)) models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task( [cfg.common_eval.path], arg_overrides=overrides, suffix=cfg.checkpoint.checkpoint_suffix, ) model = models[0] # Move models to GPU for model in models: model.eval() if use_fp16: model.half() if use_cuda: model.cuda() # Print args logger.info(saved_cfg) # Build criterion criterion = task.build_criterion(saved_cfg.criterion) criterion.eval() for subset in cfg.dataset.valid_subset.split(","): try: task.load_dataset(subset, combine=False, epoch=1, task_cfg=saved_cfg.task) dataset = task.dataset(subset) except KeyError: raise Exception("Cannot find dataset: " + subset) # Initialize data iterator itr = task.get_batch_iterator( dataset=dataset, max_tokens=cfg.dataset.max_tokens, max_sentences=cfg.dataset.batch_size, max_positions=utils.resolve_max_positions( task.max_positions(), *[m.max_positions() for m in models], ), ignore_invalid_inputs=cfg.dataset.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=cfg.dataset.required_batch_size_multiple, seed=cfg.common.seed, num_shards=data_parallel_world_size, shard_id=data_parallel_rank, num_workers=cfg.dataset.num_workers, data_buffer_size=cfg.dataset.data_buffer_size, ).next_epoch_itr(shuffle=False) progress = progress_bar.progress_bar( itr, log_format=cfg.common.log_format, log_interval=cfg.common.log_interval, prefix=f"valid on '{subset}' subset", default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"), ) log_outputs = [] for i, sample in enumerate(progress): sample = utils.move_to_cuda(sample) if use_cuda else sample _loss, _sample_size, log_output = task.valid_step(sample, model, criterion) progress.log(log_output, step=i) log_outputs.append(log_output) if data_parallel_world_size > 1: log_outputs = distributed_utils.all_gather_list( log_outputs, max_size=cfg.common.all_gather_list_size, group=distributed_utils.get_data_parallel_group(), ) log_outputs = list(chain.from_iterable(log_outputs)) with metrics.aggregate() as agg: task.reduce_metrics(log_outputs, criterion) log_output = agg.get_smoothed_values() progress.print(log_output, tag=subset, step=i) def cli_main(): parser = options.get_validation_parser() args = options.parse_args_and_arch(parser) # only override args that are explicitly given on the command line override_parser = options.get_validation_parser() override_args = options.parse_args_and_arch( override_parser, suppress_defaults=True ) distributed_utils.call_main( convert_namespace_to_omegaconf(args), main, override_args=override_args ) if __name__ == "__main__": cli_main()
EXA-1-master
exa/models/unilm-master/edgelm/fairseq_cli/validate.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Translate raw text with a trained model. Batches data on-the-fly. """ import ast import fileinput import logging import math import os import sys import time from argparse import Namespace from collections import namedtuple import numpy as np import torch from fairseq import checkpoint_utils, distributed_utils, options, tasks, utils from fairseq.dataclass.configs import FairseqConfig from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.token_generation_constraints import pack_constraints, unpack_constraints from fairseq_cli.generate import get_symbols_to_strip_from_output logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=sys.stdout, ) logger = logging.getLogger("fairseq_cli.interactive") Batch = namedtuple("Batch", "ids src_tokens src_lengths constraints") Translation = namedtuple("Translation", "src_str hypos pos_scores alignments") def buffered_read(input, buffer_size): buffer = [] with fileinput.input(files=[input], openhook=fileinput.hook_encoded("utf-8")) as h: for src_str in h: buffer.append(src_str.strip()) if len(buffer) >= buffer_size: yield buffer buffer = [] if len(buffer) > 0: yield buffer def make_batches(lines, cfg, task, max_positions, encode_fn): def encode_fn_target(x): return encode_fn(x) if cfg.generation.constraints: # Strip (tab-delimited) contraints, if present, from input lines, # store them in batch_constraints batch_constraints = [list() for _ in lines] for i, line in enumerate(lines): if "\t" in line: lines[i], *batch_constraints[i] = line.split("\t") # Convert each List[str] to List[Tensor] for i, constraint_list in enumerate(batch_constraints): batch_constraints[i] = [ task.target_dictionary.encode_line( encode_fn_target(constraint), append_eos=False, add_if_not_exist=False, ) for constraint in constraint_list ] if cfg.generation.constraints: constraints_tensor = pack_constraints(batch_constraints) else: constraints_tensor = None tokens, lengths = task.get_interactive_tokens_and_lengths(lines, encode_fn, truncate_length=cfg.generation.truncate_length) itr = task.get_batch_iterator( dataset=task.build_dataset_for_inference( tokens, lengths, constraints=constraints_tensor ), max_tokens=cfg.dataset.max_tokens, max_sentences=cfg.dataset.batch_size, max_positions=max_positions, ignore_invalid_inputs=cfg.dataset.skip_invalid_size_inputs_valid_test, ).next_epoch_itr(shuffle=False) for batch in itr: ids = batch["id"] src_tokens = batch["net_input"]["src_tokens"] src_lengths = batch["net_input"]["src_lengths"] constraints = batch.get("constraints", None) yield Batch( ids=ids, src_tokens=src_tokens, src_lengths=src_lengths, constraints=constraints, ) def main(cfg: FairseqConfig): if isinstance(cfg, Namespace): cfg = convert_namespace_to_omegaconf(cfg) start_time = time.time() total_translate_time = 0 utils.import_user_module(cfg.common) if cfg.interactive.buffer_size < 1: cfg.interactive.buffer_size = 1 if cfg.dataset.max_tokens is None and cfg.dataset.batch_size is None: cfg.dataset.batch_size = 1 assert ( not cfg.generation.sampling or cfg.generation.nbest == cfg.generation.beam ), "--sampling requires --nbest to be equal to --beam" assert ( not cfg.dataset.batch_size or cfg.dataset.batch_size <= cfg.interactive.buffer_size ), "--batch-size cannot be larger than --buffer-size" logger.info(cfg) # Fix seed for stochastic decoding if cfg.common.seed is not None and not cfg.generation.no_seed_provided: np.random.seed(cfg.common.seed) utils.set_torch_seed(cfg.common.seed) use_cuda = torch.cuda.is_available() and not cfg.common.cpu # Setup task, e.g., translation task = tasks.setup_task(cfg.task) # Load ensemble overrides = ast.literal_eval(cfg.common_eval.model_overrides) logger.info("loading model(s) from {}".format(cfg.common_eval.path)) models, _model_args = checkpoint_utils.load_model_ensemble( utils.split_paths(cfg.common_eval.path), arg_overrides=overrides, task=task, suffix=cfg.checkpoint.checkpoint_suffix, strict=(cfg.checkpoint.checkpoint_shard_count == 1), num_shards=cfg.checkpoint.checkpoint_shard_count, ) # Set dictionaries src_dict = task.source_dictionary tgt_dict = task.target_dictionary # Optimize ensemble for generation for model in models: if model is None: continue if cfg.common.fp16: model.half() if use_cuda and not cfg.distributed_training.pipeline_model_parallel: model.cuda() model.prepare_for_inference_(cfg) # Initialize generator generator = task.build_generator(models, cfg.generation) # Handle tokenization and BPE tokenizer = task.build_tokenizer(cfg.tokenizer) bpe = task.build_bpe(cfg.bpe) def encode_fn(x): if tokenizer is not None: x = tokenizer.encode(x) if bpe is not None: x = bpe.encode(x) return x def decode_fn(x): if bpe is not None: x = bpe.decode(x) if tokenizer is not None: x = tokenizer.decode(x) return x # Load alignment dictionary for unknown word replacement # (None if no unknown word replacement, empty if no path to align dictionary) align_dict = utils.load_align_dict(cfg.generation.replace_unk) max_positions = utils.resolve_max_positions( task.max_positions(), *[model.max_positions() for model in models] ) if cfg.generation.constraints: logger.warning( "NOTE: Constrained decoding currently assumes a shared subword vocabulary." ) if cfg.interactive.buffer_size > 1: logger.info("Sentence buffer size: %s", cfg.interactive.buffer_size) logger.info("NOTE: hypothesis and token scores are output in base 2") logger.info("Type the input sentence and press return:") start_id = 0 for inputs in buffered_read(cfg.interactive.input, cfg.interactive.buffer_size): results = [] for batch in make_batches(inputs, cfg, task, max_positions, encode_fn): bsz = batch.src_tokens.size(0) src_tokens = batch.src_tokens src_lengths = batch.src_lengths constraints = batch.constraints if use_cuda: src_tokens = src_tokens.cuda() src_lengths = src_lengths.cuda() if constraints is not None: constraints = constraints.cuda() sample = { "net_input": { "src_tokens": src_tokens, "src_lengths": src_lengths, }, } translate_start_time = time.time() translations = task.inference_step( generator, models, sample, constraints=constraints ) translate_time = time.time() - translate_start_time total_translate_time += translate_time list_constraints = [[] for _ in range(bsz)] if cfg.generation.constraints: list_constraints = [unpack_constraints(c) for c in constraints] for i, (id, hypos) in enumerate(zip(batch.ids.tolist(), translations)): src_tokens_i = utils.strip_pad(src_tokens[i], tgt_dict.pad()) constraints = list_constraints[i] results.append( ( start_id + id, src_tokens_i, hypos, { "constraints": constraints, "time": translate_time / len(translations), }, ) ) # sort output to match input order for id_, src_tokens, hypos, info in sorted(results, key=lambda x: x[0]): src_str = '' if src_dict is not None: src_str = src_dict.string(src_tokens, cfg.common_eval.post_process) print("S-{}\t{}".format(id_, src_str)) print("W-{}\t{:.3f}\tseconds".format(id_, info["time"])) for constraint in info["constraints"]: print( "C-{}\t{}".format( id_, tgt_dict.string(constraint, cfg.common_eval.post_process) ) ) # Process top predictions for hypo in hypos[: min(len(hypos), cfg.generation.nbest)]: hypo_tokens, hypo_str, alignment = utils.post_process_prediction( hypo_tokens=hypo["tokens"].int().cpu(), src_str=src_str, alignment=hypo["alignment"], align_dict=align_dict, tgt_dict=tgt_dict, remove_bpe=cfg.common_eval.post_process, extra_symbols_to_ignore=get_symbols_to_strip_from_output(generator), ) detok_hypo_str = decode_fn(hypo_str) score = hypo["score"] / math.log(2) # convert to base 2 # original hypothesis (after tokenization and BPE) print("H-{}\t{}\t{}".format(id_, score, hypo_str)) # detokenized hypothesis print("D-{}\t{}\t{}".format(id_, score, detok_hypo_str)) print( "P-{}\t{}".format( id_, " ".join( map( lambda x: "{:.4f}".format(x), # convert from base e to base 2 hypo["positional_scores"].div_(math.log(2)).tolist(), ) ), ) ) if cfg.generation.print_alignment: alignment_str = " ".join( ["{}-{}".format(src, tgt) for src, tgt in alignment] ) print("A-{}\t{}".format(id_, alignment_str)) # update running id_ counter start_id += len(inputs) logger.info( "Total time: {:.3f} seconds; translation time: {:.3f}".format( time.time() - start_time, total_translate_time ) ) def cli_main(): parser = options.get_interactive_generation_parser() args = options.parse_args_and_arch(parser) distributed_utils.call_main(convert_namespace_to_omegaconf(args), main) if __name__ == "__main__": cli_main()
EXA-1-master
exa/models/unilm-master/edgelm/fairseq_cli/interactive.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Train a new model on one or across multiple GPUs. """ import argparse import logging import math import os import sys from typing import Dict, Optional, Any, List, Tuple, Callable # We need to setup root logger before importing any fairseq libraries. logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=sys.stdout, ) logger = logging.getLogger("fairseq_cli.train") import numpy as np import torch from fairseq import ( checkpoint_utils, options, quantization_utils, tasks, utils, ) from fairseq.data import iterators, data_utils from fairseq.data.plasma_utils import PlasmaStore from fairseq.dataclass.configs import FairseqConfig from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.distributed import fsdp_enable_wrap, fsdp_wrap, utils as distributed_utils from fairseq.file_io import PathManager from fairseq.logging import meters, metrics, progress_bar from fairseq.model_parallel.megatron_trainer import MegatronTrainer from fairseq.trainer import Trainer from omegaconf import DictConfig, OmegaConf def main(cfg: FairseqConfig) -> None: if isinstance(cfg, argparse.Namespace): cfg = convert_namespace_to_omegaconf(cfg) utils.import_user_module(cfg.common) if distributed_utils.is_master(cfg.distributed_training) and "job_logging_cfg" in cfg: # make hydra logging work with ddp (see # see https://github.com/facebookresearch/hydra/issues/1126) logging.config.dictConfig(OmegaConf.to_container(cfg.job_logging_cfg)) assert ( cfg.dataset.max_tokens is not None or cfg.dataset.batch_size is not None ), "Must specify batch size either with --max-tokens or --batch-size" metrics.reset() if cfg.common.log_file is not None: handler = logging.FileHandler(filename=cfg.common.log_file) logger.addHandler(handler) np.random.seed(cfg.common.seed) utils.set_torch_seed(cfg.common.seed) if distributed_utils.is_master(cfg.distributed_training): checkpoint_utils.verify_checkpoint_directory(cfg.checkpoint.save_dir) # Print args logger.info(cfg) if cfg.checkpoint.write_checkpoints_asynchronously: try: import iopath # noqa: F401 except ImportError: logging.exception( "Asynchronous checkpoint writing is specified but iopath is " "not installed: `pip install iopath`" ) return # Setup task, e.g., translation, language modeling, etc. task = tasks.setup_task(cfg.task) assert cfg.criterion, "Please specify criterion to train a model" # Build model and criterion if cfg.distributed_training.ddp_backend == "fully_sharded": with fsdp_enable_wrap(cfg.distributed_training): model = fsdp_wrap(task.build_model(cfg.model)) else: model = task.build_model(cfg.model) criterion = task.build_criterion(cfg.criterion) logger.info(model) logger.info("task: {}".format(task.__class__.__name__)) logger.info("model: {}".format(model.__class__.__name__)) logger.info("criterion: {}".format(criterion.__class__.__name__)) logger.info( "num. shared model params: {:,} (num. trained: {:,})".format( sum(p.numel() for p in model.parameters() if not getattr(p, "expert", False)), sum(p.numel() for p in model.parameters() if not getattr(p, "expert", False) and p.requires_grad) ) ) logger.info( "num. expert model params: {} (num. trained: {})".format( sum(p.numel() for p in model.parameters() if getattr(p, "expert", False)), sum(p.numel() for p in model.parameters() if getattr(p, "expert", False) and p.requires_grad), ) ) # Load valid dataset (we load training data below, based on the latest checkpoint) # We load the valid dataset AFTER building the model data_utils.raise_if_valid_subsets_unintentionally_ignored(cfg) if cfg.dataset.combine_valid_subsets: task.load_dataset("valid", combine=True, epoch=1) else: for valid_sub_split in cfg.dataset.valid_subset.split(","): task.load_dataset(valid_sub_split, combine=False, epoch=1) # (optionally) Configure quantization if cfg.common.quantization_config_path is not None: quantizer = quantization_utils.Quantizer( config_path=cfg.common.quantization_config_path, max_epoch=cfg.optimization.max_epoch, max_update=cfg.optimization.max_update, ) else: quantizer = None # Build trainer if cfg.common.model_parallel_size == 1: trainer = Trainer(cfg, task, model, criterion, quantizer) else: trainer = MegatronTrainer(cfg, task, model, criterion) logger.info( "training on {} devices (GPUs/TPUs)".format( cfg.distributed_training.distributed_world_size ) ) logger.info( "max tokens per device = {} and max sentences per device = {}".format( cfg.dataset.max_tokens, cfg.dataset.batch_size, ) ) # Load the latest checkpoint if one is available and restore the # corresponding train iterator extra_state, epoch_itr = checkpoint_utils.load_checkpoint( cfg.checkpoint, trainer, # don't cache epoch iterators for sharded datasets disable_iterator_cache=task.has_sharded_data("train"), ) if cfg.common.tpu: import torch_xla.core.xla_model as xm xm.rendezvous("load_checkpoint") # wait for all workers max_epoch = cfg.optimization.max_epoch or math.inf lr = trainer.get_lr() train_meter = meters.StopwatchMeter() train_meter.start() while epoch_itr.next_epoch_idx <= max_epoch: if lr <= cfg.optimization.stop_min_lr: logger.info( f"stopping training because current learning rate ({lr}) is smaller " "than or equal to minimum learning rate " f"(--stop-min-lr={cfg.optimization.stop_min_lr})" ) break # train for one epoch valid_losses, should_stop = train(cfg, trainer, task, epoch_itr) if should_stop: break # only use first validation loss to update the learning rate lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0]) epoch_itr = trainer.get_train_iterator( epoch_itr.next_epoch_idx, # sharded data: get train iterator for next epoch load_dataset=task.has_sharded_data("train"), # don't cache epoch iterators for sharded datasets disable_iterator_cache=task.has_sharded_data("train"), ) train_meter.stop() logger.info("done training in {:.1f} seconds".format(train_meter.sum)) if getattr(epoch_itr, "should_close_after_finished", False): epoch_itr.close() # ioPath implementation to wait for all asynchronous file writes to complete. if cfg.checkpoint.write_checkpoints_asynchronously: logger.info( "ioPath PathManager waiting for all asynchronous checkpoint " "writes to finish." ) PathManager.async_close() logger.info("ioPath PathManager finished waiting.") def should_stop_early(cfg: DictConfig, valid_loss: float) -> bool: # skip check if no validation was done in the current epoch if valid_loss is None: return False if cfg.checkpoint.patience <= 0: return False def is_better(a, b): return a > b if cfg.checkpoint.maximize_best_checkpoint_metric else a < b prev_best = getattr(should_stop_early, "best", None) if prev_best is None or is_better(valid_loss, prev_best): should_stop_early.best = valid_loss should_stop_early.num_runs = 0 return False else: should_stop_early.num_runs += 1 if should_stop_early.num_runs >= cfg.checkpoint.patience: logger.info( "early stop since valid performance hasn't improved for last {} runs".format( cfg.checkpoint.patience ) ) return True else: return False @metrics.aggregate("train") def train( cfg: DictConfig, trainer: Trainer, task: tasks.FairseqTask, epoch_itr ) -> Tuple[List[Optional[float]], bool]: """Train the model for one epoch and return validation losses.""" # Initialize data iterator itr = epoch_itr.next_epoch_itr( fix_batches_to_gpus=cfg.distributed_training.fix_batches_to_gpus, shuffle=(epoch_itr.next_epoch_idx > cfg.dataset.curriculum), ) update_freq = ( cfg.optimization.update_freq[epoch_itr.epoch - 1] if epoch_itr.epoch <= len(cfg.optimization.update_freq) else cfg.optimization.update_freq[-1] ) itr = iterators.GroupedIterator(itr, update_freq) if cfg.common.tpu: itr = utils.tpu_data_loader(itr) progress = progress_bar.progress_bar( itr, log_format=cfg.common.log_format, log_file=cfg.common.log_file, log_interval=cfg.common.log_interval, epoch=epoch_itr.epoch, tensorboard_logdir=( cfg.common.tensorboard_logdir if distributed_utils.is_master(cfg.distributed_training) else None ), default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"), wandb_project=( cfg.common.wandb_project if distributed_utils.is_master(cfg.distributed_training) else None ), wandb_run_name=os.environ.get( "WANDB_NAME", os.path.basename(cfg.checkpoint.save_dir) ), azureml_logging=( cfg.common.azureml_logging if distributed_utils.is_master(cfg.distributed_training) else False ), ) progress.update_config(_flatten_config(cfg)) trainer.begin_epoch(epoch_itr.epoch) valid_subsets = cfg.dataset.valid_subset.split(",") should_stop = False num_updates = trainer.get_num_updates() logger.info("Start iterating over samples") for i, samples in enumerate(progress): with metrics.aggregate("train_inner"), torch.autograd.profiler.record_function( "train_step-%d" % i ): log_output = trainer.train_step(samples) if log_output is not None: # not OOM, overflow, ... # log mid-epoch stats num_updates = trainer.get_num_updates() if num_updates % cfg.common.log_interval == 0: stats = get_training_stats(metrics.get_smoothed_values("train_inner")) progress.log(stats, tag="train_inner", step=num_updates) # reset mid-epoch stats after each log interval # the end-of-epoch stats will still be preserved metrics.reset_meters("train_inner") end_of_epoch = not itr.has_next() valid_losses, should_stop = validate_and_save( cfg, trainer, task, epoch_itr, valid_subsets, end_of_epoch ) if should_stop: break # log end-of-epoch stats logger.info("end of epoch {} (average epoch stats below)".format(epoch_itr.epoch)) stats = get_training_stats(metrics.get_smoothed_values("train")) progress.print(stats, tag="train", step=num_updates) # reset epoch-level meters metrics.reset_meters("train") return valid_losses, should_stop def _flatten_config(cfg: DictConfig): config = OmegaConf.to_container(cfg) # remove any legacy Namespaces and replace with a single "args" namespace = None for k, v in list(config.items()): if isinstance(v, argparse.Namespace): namespace = v del config[k] if namespace is not None: config["args"] = vars(namespace) return config def validate_and_save( cfg: DictConfig, trainer: Trainer, task: tasks.FairseqTask, epoch_itr, valid_subsets: List[str], end_of_epoch: bool, ) -> Tuple[List[Optional[float]], bool]: num_updates = trainer.get_num_updates() max_update = cfg.optimization.max_update or math.inf # Stopping conditions (and an additional one based on validation loss later # on) should_stop = False if num_updates >= max_update: should_stop = True logger.info( f"Stopping training due to " f"num_updates: {num_updates} >= max_update: {max_update}" ) training_time_hours = trainer.cumulative_training_time() / (60 * 60) if ( cfg.optimization.stop_time_hours > 0 and training_time_hours > cfg.optimization.stop_time_hours ): should_stop = True logger.info( f"Stopping training due to " f"cumulative_training_time: {training_time_hours} > " f"stop_time_hours: {cfg.optimization.stop_time_hours} hour(s)" ) do_save = ( (end_of_epoch and epoch_itr.epoch % cfg.checkpoint.save_interval == 0) or should_stop or ( cfg.checkpoint.save_interval_updates > 0 and num_updates > 0 and num_updates % cfg.checkpoint.save_interval_updates == 0 and num_updates >= cfg.dataset.validate_after_updates ) ) do_validate = ( (not end_of_epoch and do_save) # validate during mid-epoch saves or (end_of_epoch and epoch_itr.epoch % cfg.dataset.validate_interval == 0) or should_stop or ( cfg.dataset.validate_interval_updates > 0 and num_updates > 0 and num_updates % cfg.dataset.validate_interval_updates == 0 ) ) and not cfg.dataset.disable_validation and num_updates >= cfg.dataset.validate_after_updates # Validate valid_losses = [None] if do_validate: valid_losses = validate(cfg, trainer, task, epoch_itr, valid_subsets) should_stop |= should_stop_early(cfg, valid_losses[0]) # Save checkpoint if do_save or should_stop: checkpoint_utils.save_checkpoint( cfg.checkpoint, trainer, epoch_itr, valid_losses[0] ) return valid_losses, should_stop def get_training_stats(stats: Dict[str, Any]) -> Dict[str, Any]: stats["wall"] = round(metrics.get_meter("default", "wall").elapsed_time, 0) return stats def validate( cfg: DictConfig, trainer: Trainer, task: tasks.FairseqTask, epoch_itr, subsets: List[str], ) -> List[Optional[float]]: """Evaluate the model on the validation set(s) and return the losses.""" if cfg.dataset.fixed_validation_seed is not None: # set fixed seed for every validation utils.set_torch_seed(cfg.dataset.fixed_validation_seed) trainer.begin_valid_epoch(epoch_itr.epoch) valid_losses = [] for subset in subsets: logger.info('begin validation on "{}" subset'.format(subset)) # Initialize data iterator itr = trainer.get_valid_iterator(subset).next_epoch_itr( shuffle=False, set_dataset_epoch=False # use a fixed valid set ) if cfg.common.tpu: itr = utils.tpu_data_loader(itr) progress = progress_bar.progress_bar( itr, log_format=cfg.common.log_format, log_interval=cfg.common.log_interval, epoch=epoch_itr.epoch, prefix=f"valid on '{subset}' subset", tensorboard_logdir=( cfg.common.tensorboard_logdir if distributed_utils.is_master(cfg.distributed_training) else None ), default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"), wandb_project=( cfg.common.wandb_project if distributed_utils.is_master(cfg.distributed_training) else None ), wandb_run_name=os.environ.get( "WANDB_NAME", os.path.basename(cfg.checkpoint.save_dir) ), ) # create a new root metrics aggregator so validation metrics # don't pollute other aggregators (e.g., train meters) with metrics.aggregate(new_root=True) as agg: logging_outputs = [] for i, sample in enumerate(progress): if cfg.dataset.max_valid_steps is not None and i > cfg.dataset.max_valid_steps: break inner_logging_outputs = trainer.valid_step(sample) logging_outputs.extend(inner_logging_outputs) task.reduce_metrics(logging_outputs, trainer.get_criterion()) # log validation stats stats = get_valid_stats(cfg, trainer, agg.get_smoothed_values()) if hasattr(task, "post_validate"): task.post_validate(trainer.get_model(), stats, agg) progress.print(stats, tag=subset, step=trainer.get_num_updates()) valid_losses.append(stats[cfg.checkpoint.best_checkpoint_metric]) return valid_losses def get_valid_stats( cfg: DictConfig, trainer: Trainer, stats: Dict[str, Any] ) -> Dict[str, Any]: stats["num_updates"] = trainer.get_num_updates() if hasattr(checkpoint_utils.save_checkpoint, "best"): key = "best_{0}".format(cfg.checkpoint.best_checkpoint_metric) best_function = max if cfg.checkpoint.maximize_best_checkpoint_metric else min stats[key] = best_function( checkpoint_utils.save_checkpoint.best, stats[cfg.checkpoint.best_checkpoint_metric], ) return stats def cli_main( modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None ) -> None: parser = options.get_training_parser() args = options.parse_args_and_arch(parser, modify_parser=modify_parser) cfg = convert_namespace_to_omegaconf(args) if cfg.common.use_plasma_view: server = PlasmaStore(path=cfg.common.plasma_path) logger.info(f"Started plasma server pid {server.server.pid} {cfg.common.plasma_path}") if args.profile: with torch.cuda.profiler.profile(): with torch.autograd.profiler.emit_nvtx(): distributed_utils.call_main(cfg, main) else: distributed_utils.call_main(cfg, main) # if cfg.common.use_plasma_view: # server.server.kill() if __name__ == "__main__": cli_main()
EXA-1-master
exa/models/unilm-master/edgelm/fairseq_cli/train.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Evaluate the perplexity of a trained language model. """ import logging import math import os import sys from argparse import Namespace from typing import Iterable, List, Optional import torch import fairseq from fairseq import checkpoint_utils, distributed_utils, options, tasks, utils from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.logging import progress_bar from fairseq.logging.meters import StopwatchMeter from fairseq.sequence_scorer import SequenceScorer from omegaconf import DictConfig logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=sys.stdout, ) logger = logging.getLogger("fairseq_cli.eval_lm") def eval_lm( models: List[fairseq.models.FairseqModel], source_dictionary: fairseq.data.Dictionary, batch_iterator: Iterable, post_process: Optional[str] = None, output_word_probs: bool = False, output_word_stats: bool = False, target_dictionary: Optional[fairseq.data.Dictionary] = None, softmax_batch: int = 0, remove_bos_token: bool = False, device: Optional[torch.device] = None, ): """ Args: models (List[~fairseq.models.FairseqModel]): list of models to evaluate. Models are essentially `nn.Module` instances, but must be compatible with fairseq's `SequenceScorer`. source_dictionary (~fairseq.data.Dictionary): dictionary for applying any relevant post processing or outputing word probs/stats. batch_iterator (Iterable): yield batches of data post_process (Optional[str]): post-process text by removing BPE, letter segmentation, etc. Valid options can be found in fairseq.data.utils.post_process, although not all options are implemented here. output_word_probs (Optional[bool]): output words and their predicted log probabilities output_word_stats (Optional[bool]): output word statistics such as word count and average probability target_dictionary (Optional[~fairseq.data.Dictionary]): output dictionary (defaults to *source_dictionary*) softmax_batch (Optional[bool]): if BxT is more than this, will batch the softmax over vocab to this amount of tokens, in order to fit into GPU memory remove_bos_token (Optional[bool]): if True, confirm that the first token is the beginning-of-sentence symbol (according to the relevant dictionary) and remove it from the output device (Optional[torch.device]): device to use for evaluation (defaults to device of first model parameter) """ if target_dictionary is None: target_dictionary = source_dictionary if device is None: device = next(models[0].parameters()).device gen_timer = StopwatchMeter() scorer = SequenceScorer(target_dictionary, softmax_batch) score_sum = 0.0 count = 0 if post_process is not None: if post_process in {"subword_nmt", "@@ "}: bpe_cont = post_process.rstrip() bpe_toks = { i for i in range(len(source_dictionary)) if source_dictionary[i].endswith(bpe_cont) } else: raise NotImplementedError( "--post-process={post_process} is not implemented" ) bpe_len = len(bpe_cont) else: bpe_toks = None bpe_len = 0 word_stats = dict() for sample in batch_iterator: if "net_input" not in sample: continue sample = utils.move_to_cuda(sample, device=device) gen_timer.start() hypos = scorer.generate(models, sample) gen_timer.stop(sample["ntokens"]) for i, hypos_i in enumerate(hypos): hypo = hypos_i[0] sample_id = sample["id"][i] tokens = hypo["tokens"] tgt_len = tokens.numel() pos_scores = hypo["positional_scores"].float() if remove_bos_token: assert hypo["tokens"][0].item() == target_dictionary.bos() tokens = tokens[1:] pos_scores = pos_scores[1:] skipped_toks = 0 if bpe_toks is not None: for i in range(tgt_len - 1): if tokens[i].item() in bpe_toks: skipped_toks += 1 pos_scores[i + 1] += pos_scores[i] pos_scores[i] = 0 inf_scores = pos_scores.eq(float("inf")) | pos_scores.eq(float("-inf")) if inf_scores.any(): logger.info( "skipping tokens with inf scores:", target_dictionary.string(tokens[inf_scores.nonzero()]), ) pos_scores = pos_scores[(~inf_scores).nonzero()] score_sum += pos_scores.sum().cpu() count += pos_scores.numel() - skipped_toks if output_word_probs or output_word_stats: w = "" word_prob = [] is_bpe = False for i in range(len(tokens)): w_ind = tokens[i].item() w += source_dictionary[w_ind] if bpe_toks is not None and w_ind in bpe_toks: w = w[:-bpe_len] is_bpe = True else: word_prob.append((w, pos_scores[i].item())) next_prob = None ind = i + 1 while ind < len(tokens): if pos_scores[ind].item() != 0: next_prob = pos_scores[ind] break ind += 1 word_stats.setdefault(w, WordStat(w, is_bpe)).add( pos_scores[i].item(), next_prob ) is_bpe = False w = "" if output_word_probs: logger.info( str(int(sample_id)) + " " + ( "\t".join( "{} [{:2f}]".format(x[0], x[1]) for x in word_prob ) ) ) avg_nll_loss = ( -score_sum / count / math.log(2) if count > 0 else 0 ) # convert to base 2 logger.info( "Evaluated {:,} tokens in {:.1f}s ({:.2f} tokens/s)".format( gen_timer.n, gen_timer.sum, 1.0 / gen_timer.avg if gen_timer.avg > 0 else 0 ) ) if output_word_stats: for ws in sorted(word_stats.values(), key=lambda x: x.count, reverse=True): logger.info(ws) return { "loss": avg_nll_loss, "perplexity": 2 ** avg_nll_loss, } class WordStat(object): def __init__(self, word, is_bpe): self.word = word self.is_bpe = is_bpe self.log_prob = 0 self.next_word_prob = 0 self.count = 0 self.missing_next_words = 0 def add(self, log_prob, next_word_prob): """increments counters for the sum of log probs of current word and next word (given context ending at current word). Since the next word might be at the end of the example, or it might be not counted because it is not an ending subword unit, also keeps track of how many of those we have seen""" if next_word_prob is not None: self.next_word_prob += next_word_prob else: self.missing_next_words += 1 self.log_prob += log_prob self.count += 1 def __str__(self): return "{}\t{}\t{}\t{}\t{}\t{}".format( self.word, self.count, self.log_prob, self.is_bpe, self.next_word_prob, self.count - self.missing_next_words, ) def main(cfg: DictConfig, **unused_kwargs): if isinstance(cfg, Namespace): cfg = convert_namespace_to_omegaconf(cfg) utils.import_user_module(cfg.common) logger.info(cfg) if cfg.eval_lm.context_window > 0: # reduce tokens per sample by the required context window size cfg.task.tokens_per_sample -= cfg.eval_lm.context_window # Initialize the task using the current *cfg* task = tasks.setup_task(cfg.task) # Load ensemble logger.info("loading model(s) from {}".format(cfg.common_eval.path)) models, model_args, task = checkpoint_utils.load_model_ensemble_and_task( [cfg.common_eval.path], arg_overrides=eval(cfg.common_eval.model_overrides), suffix=cfg.checkpoint.checkpoint_suffix, strict=(cfg.checkpoint.checkpoint_shard_count == 1), num_shards=cfg.checkpoint.checkpoint_shard_count, task=task, ) use_fp16 = cfg.common.fp16 use_cuda = torch.cuda.is_available() and not cfg.common.cpu if use_cuda: torch.cuda.set_device(cfg.distributed_training.device_id) # Optimize ensemble for generation and set the source and dest dicts on the model # (required by scorer) for model in models: if use_fp16: model.half() if use_cuda and not cfg.distributed_training.pipeline_model_parallel: model.cuda() model.prepare_for_inference_(cfg) assert len(models) > 0 logger.info( "num. model params: {:,}".format(sum(p.numel() for p in models[0].parameters())) ) # Load dataset splits task.load_dataset(cfg.dataset.gen_subset) dataset = task.dataset(cfg.dataset.gen_subset) logger.info( "{} {} {:,} examples".format( cfg.task.data, cfg.dataset.gen_subset, len(dataset) ) ) itr = task.eval_lm_dataloader( dataset=dataset, max_tokens=cfg.dataset.max_tokens or 36000, batch_size=cfg.dataset.batch_size, max_positions=utils.resolve_max_positions( *[model.max_positions() for model in models] ), num_shards=max( cfg.dataset.num_shards, cfg.distributed_training.distributed_world_size, ), shard_id=max( cfg.dataset.shard_id, cfg.distributed_training.distributed_rank, ), num_workers=cfg.dataset.num_workers, data_buffer_size=cfg.dataset.data_buffer_size, context_window=cfg.eval_lm.context_window, ) itr = progress_bar.progress_bar( itr, log_format=cfg.common.log_format, log_interval=cfg.common.log_interval, default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"), ) results = eval_lm( models=models, source_dictionary=task.source_dictionary, batch_iterator=itr, post_process=cfg.common_eval.post_process, output_word_probs=cfg.eval_lm.output_word_probs, output_word_stats=cfg.eval_lm.output_word_stats, target_dictionary=task.target_dictionary, softmax_batch=cfg.eval_lm.softmax_batch, remove_bos_token=getattr(cfg.task, "add_bos_token", False), ) logger.info( "Loss (base 2): {:.4f}, Perplexity: {:.2f}".format( results["loss"], results["perplexity"] ) ) return results def cli_main(): parser = options.get_eval_lm_parser() args = options.parse_args_and_arch(parser) distributed_utils.call_main(convert_namespace_to_omegaconf(args), main) if __name__ == "__main__": cli_main()
EXA-1-master
exa/models/unilm-master/edgelm/fairseq_cli/eval_lm.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ BLEU scoring of generated translations against reference translations. """ import argparse import os import sys from fairseq.data import dictionary from fairseq.scoring import bleu def get_parser(): parser = argparse.ArgumentParser( description="Command-line script for BLEU scoring." ) # fmt: off parser.add_argument('-s', '--sys', default='-', help='system output') parser.add_argument('-r', '--ref', required=True, help='references') parser.add_argument('-o', '--order', default=4, metavar='N', type=int, help='consider ngrams up to this order') parser.add_argument('--ignore-case', action='store_true', help='case-insensitive scoring') parser.add_argument('--sacrebleu', action='store_true', help='score with sacrebleu') parser.add_argument('--sentence-bleu', action='store_true', help='report sentence-level BLEUs (i.e., with +1 smoothing)') # fmt: on return parser def cli_main(): parser = get_parser() args = parser.parse_args() print(args) assert args.sys == "-" or os.path.exists( args.sys ), "System output file {} does not exist".format(args.sys) assert os.path.exists(args.ref), "Reference file {} does not exist".format(args.ref) dict = dictionary.Dictionary() def readlines(fd): for line in fd.readlines(): if args.ignore_case: yield line.lower() else: yield line if args.sacrebleu: import sacrebleu def score(fdsys): with open(args.ref) as fdref: print(sacrebleu.corpus_bleu(fdsys, [fdref]).format()) elif args.sentence_bleu: def score(fdsys): with open(args.ref) as fdref: scorer = bleu.Scorer(dict.pad(), dict.eos(), dict.unk()) for i, (sys_tok, ref_tok) in enumerate( zip(readlines(fdsys), readlines(fdref)) ): scorer.reset(one_init=True) sys_tok = dict.encode_line(sys_tok) ref_tok = dict.encode_line(ref_tok) scorer.add(ref_tok, sys_tok) print(i, scorer.result_string(args.order)) else: def score(fdsys): with open(args.ref) as fdref: scorer = bleu.Scorer( bleu.BleuConfig( pad=dict.pad(), eos=dict.eos(), unk=dict.unk(), ) ) for sys_tok, ref_tok in zip(readlines(fdsys), readlines(fdref)): sys_tok = dict.encode_line(sys_tok) ref_tok = dict.encode_line(ref_tok) scorer.add(ref_tok, sys_tok) print(scorer.result_string(args.order)) if args.sys == "-": score(sys.stdin) else: with open(args.sys, "r") as f: score(f) if __name__ == "__main__": cli_main()
EXA-1-master
exa/models/unilm-master/edgelm/fairseq_cli/score.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. try: from fairseq.version import __version__ # noqa except ImportError: pass
EXA-1-master
exa/models/unilm-master/edgelm/examples/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from multiprocessing import Pool import numpy as np from fairseq import options from fairseq.data import dictionary from fairseq.scoring import bleu from examples.noisychannel import ( rerank_generate, rerank_options, rerank_score_bw, rerank_score_lm, rerank_utils, ) def score_target_hypo( args, a, b, c, lenpen, target_outfile, hypo_outfile, write_hypos, normalize ): print("lenpen", lenpen, "weight1", a, "weight2", b, "weight3", c) gen_output_lst, bitext1_lst, bitext2_lst, lm_res_lst = load_score_files(args) dict = dictionary.Dictionary() scorer = scorer = bleu.Scorer( bleu.BleuConfig( pad=dict.pad(), eos=dict.eos(), unk=dict.unk(), ) ) ordered_hypos = {} ordered_targets = {} for shard_id in range(len(bitext1_lst)): bitext1 = bitext1_lst[shard_id] bitext2 = bitext2_lst[shard_id] gen_output = gen_output_lst[shard_id] lm_res = lm_res_lst[shard_id] total = len(bitext1.rescore_source.keys()) source_lst = [] hypo_lst = [] score_lst = [] reference_lst = [] j = 1 best_score = -math.inf for i in range(total): # length is measured in terms of words, not bpe tokens, since models may not share the same bpe target_len = len(bitext1.rescore_hypo[i].split()) if lm_res is not None: lm_score = lm_res.score[i] else: lm_score = 0 if bitext2 is not None: bitext2_score = bitext2.rescore_score[i] bitext2_backwards = bitext2.backwards else: bitext2_score = None bitext2_backwards = None score = rerank_utils.get_score( a, b, c, target_len, bitext1.rescore_score[i], bitext2_score, lm_score=lm_score, lenpen=lenpen, src_len=bitext1.source_lengths[i], tgt_len=bitext1.target_lengths[i], bitext1_backwards=bitext1.backwards, bitext2_backwards=bitext2_backwards, normalize=normalize, ) if score > best_score: best_score = score best_hypo = bitext1.rescore_hypo[i] if j == gen_output.num_hypos[i] or j == args.num_rescore: j = 1 hypo_lst.append(best_hypo) score_lst.append(best_score) source_lst.append(bitext1.rescore_source[i]) reference_lst.append(bitext1.rescore_target[i]) best_score = -math.inf best_hypo = "" else: j += 1 gen_keys = list(sorted(gen_output.no_bpe_target.keys())) for key in range(len(gen_keys)): if args.prefix_len is None: assert hypo_lst[key] in gen_output.no_bpe_hypo[gen_keys[key]], ( "pred and rescore hypo mismatch: i: " + str(key) + ", " + str(hypo_lst[key]) + str(gen_keys[key]) + str(gen_output.no_bpe_hypo[key]) ) sys_tok = dict.encode_line(hypo_lst[key]) ref_tok = dict.encode_line(gen_output.no_bpe_target[gen_keys[key]]) scorer.add(ref_tok, sys_tok) else: full_hypo = rerank_utils.get_full_from_prefix( hypo_lst[key], gen_output.no_bpe_hypo[gen_keys[key]] ) sys_tok = dict.encode_line(full_hypo) ref_tok = dict.encode_line(gen_output.no_bpe_target[gen_keys[key]]) scorer.add(ref_tok, sys_tok) # if only one set of hyper parameters is provided, write the predictions to a file if write_hypos: # recover the orinal ids from n best list generation for key in range(len(gen_output.no_bpe_target)): if args.prefix_len is None: assert hypo_lst[key] in gen_output.no_bpe_hypo[gen_keys[key]], ( "pred and rescore hypo mismatch:" + "i:" + str(key) + str(hypo_lst[key]) + str(gen_output.no_bpe_hypo[key]) ) ordered_hypos[gen_keys[key]] = hypo_lst[key] ordered_targets[gen_keys[key]] = gen_output.no_bpe_target[ gen_keys[key] ] else: full_hypo = rerank_utils.get_full_from_prefix( hypo_lst[key], gen_output.no_bpe_hypo[gen_keys[key]] ) ordered_hypos[gen_keys[key]] = full_hypo ordered_targets[gen_keys[key]] = gen_output.no_bpe_target[ gen_keys[key] ] # write the hypos in the original order from nbest list generation if args.num_shards == (len(bitext1_lst)): with open(target_outfile, "w") as t: with open(hypo_outfile, "w") as h: for key in range(len(ordered_hypos)): t.write(ordered_targets[key]) h.write(ordered_hypos[key]) res = scorer.result_string(4) if write_hypos: print(res) score = rerank_utils.parse_bleu_scoring(res) return score def match_target_hypo(args, target_outfile, hypo_outfile): """combine scores from the LM and bitext models, and write the top scoring hypothesis to a file""" if len(args.weight1) == 1: res = score_target_hypo( args, args.weight1[0], args.weight2[0], args.weight3[0], args.lenpen[0], target_outfile, hypo_outfile, True, args.normalize, ) rerank_scores = [res] else: print("launching pool") with Pool(32) as p: rerank_scores = p.starmap( score_target_hypo, [ ( args, args.weight1[i], args.weight2[i], args.weight3[i], args.lenpen[i], target_outfile, hypo_outfile, False, args.normalize, ) for i in range(len(args.weight1)) ], ) if len(rerank_scores) > 1: best_index = np.argmax(rerank_scores) best_score = rerank_scores[best_index] print("best score", best_score) print("best lenpen", args.lenpen[best_index]) print("best weight1", args.weight1[best_index]) print("best weight2", args.weight2[best_index]) print("best weight3", args.weight3[best_index]) return ( args.lenpen[best_index], args.weight1[best_index], args.weight2[best_index], args.weight3[best_index], best_score, ) else: return ( args.lenpen[0], args.weight1[0], args.weight2[0], args.weight3[0], rerank_scores[0], ) def load_score_files(args): if args.all_shards: shard_ids = list(range(args.num_shards)) else: shard_ids = [args.shard_id] gen_output_lst = [] bitext1_lst = [] bitext2_lst = [] lm_res1_lst = [] for shard_id in shard_ids: using_nbest = args.nbest_list is not None ( pre_gen, left_to_right_preprocessed_dir, right_to_left_preprocessed_dir, backwards_preprocessed_dir, lm_preprocessed_dir, ) = rerank_utils.get_directories( args.data_dir_name, args.num_rescore, args.gen_subset, args.gen_model_name, shard_id, args.num_shards, args.sampling, args.prefix_len, args.target_prefix_frac, args.source_prefix_frac, ) rerank1_is_gen = ( args.gen_model == args.score_model1 and args.source_prefix_frac is None ) rerank2_is_gen = ( args.gen_model == args.score_model2 and args.source_prefix_frac is None ) score1_file = rerank_utils.rescore_file_name( pre_gen, args.prefix_len, args.model1_name, target_prefix_frac=args.target_prefix_frac, source_prefix_frac=args.source_prefix_frac, backwards=args.backwards1, ) if args.score_model2 is not None: score2_file = rerank_utils.rescore_file_name( pre_gen, args.prefix_len, args.model2_name, target_prefix_frac=args.target_prefix_frac, source_prefix_frac=args.source_prefix_frac, backwards=args.backwards2, ) if args.language_model is not None: lm_score_file = rerank_utils.rescore_file_name( pre_gen, args.prefix_len, args.lm_name, lm_file=True ) # get gen output predictions_bpe_file = pre_gen + "/generate_output_bpe.txt" if using_nbest: print("Using predefined n-best list from interactive.py") predictions_bpe_file = args.nbest_list gen_output = rerank_utils.BitextOutputFromGen( predictions_bpe_file, bpe_symbol=args.post_process, nbest=using_nbest, prefix_len=args.prefix_len, target_prefix_frac=args.target_prefix_frac, ) if rerank1_is_gen: bitext1 = gen_output else: bitext1 = rerank_utils.BitextOutput( score1_file, args.backwards1, args.right_to_left1, args.post_process, args.prefix_len, args.target_prefix_frac, args.source_prefix_frac, ) if args.score_model2 is not None or args.nbest_list is not None: if rerank2_is_gen: bitext2 = gen_output else: bitext2 = rerank_utils.BitextOutput( score2_file, args.backwards2, args.right_to_left2, args.post_process, args.prefix_len, args.target_prefix_frac, args.source_prefix_frac, ) assert ( bitext2.source_lengths == bitext1.source_lengths ), "source lengths for rescoring models do not match" assert ( bitext2.target_lengths == bitext1.target_lengths ), "target lengths for rescoring models do not match" else: if args.diff_bpe: assert args.score_model2 is None bitext2 = gen_output else: bitext2 = None if args.language_model is not None: lm_res1 = rerank_utils.LMOutput( lm_score_file, args.lm_dict, args.prefix_len, args.post_process, args.target_prefix_frac, ) else: lm_res1 = None gen_output_lst.append(gen_output) bitext1_lst.append(bitext1) bitext2_lst.append(bitext2) lm_res1_lst.append(lm_res1) return gen_output_lst, bitext1_lst, bitext2_lst, lm_res1_lst def rerank(args): if type(args.lenpen) is not list: args.lenpen = [args.lenpen] if type(args.weight1) is not list: args.weight1 = [args.weight1] if type(args.weight2) is not list: args.weight2 = [args.weight2] if type(args.weight3) is not list: args.weight3 = [args.weight3] if args.all_shards: shard_ids = list(range(args.num_shards)) else: shard_ids = [args.shard_id] for shard_id in shard_ids: ( pre_gen, left_to_right_preprocessed_dir, right_to_left_preprocessed_dir, backwards_preprocessed_dir, lm_preprocessed_dir, ) = rerank_utils.get_directories( args.data_dir_name, args.num_rescore, args.gen_subset, args.gen_model_name, shard_id, args.num_shards, args.sampling, args.prefix_len, args.target_prefix_frac, args.source_prefix_frac, ) rerank_generate.gen_and_reprocess_nbest(args) rerank_score_bw.score_bw(args) rerank_score_lm.score_lm(args) if args.write_hypos is None: write_targets = pre_gen + "/matched_targets" write_hypos = pre_gen + "/matched_hypos" else: write_targets = args.write_hypos + "_targets" + args.gen_subset write_hypos = args.write_hypos + "_hypos" + args.gen_subset if args.all_shards: write_targets += "_all_shards" write_hypos += "_all_shards" ( best_lenpen, best_weight1, best_weight2, best_weight3, best_score, ) = match_target_hypo(args, write_targets, write_hypos) return best_lenpen, best_weight1, best_weight2, best_weight3, best_score def cli_main(): parser = rerank_options.get_reranking_parser() args = options.parse_args_and_arch(parser) rerank(args) if __name__ == "__main__": cli_main()
EXA-1-master
exa/models/unilm-master/edgelm/examples/noisychannel/rerank.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq import options def get_reranking_parser(default_task="translation"): parser = options.get_parser("Generation and reranking", default_task) add_reranking_args(parser) return parser def get_tuning_parser(default_task="translation"): parser = options.get_parser("Reranking tuning", default_task) add_reranking_args(parser) add_tuning_args(parser) return parser def add_reranking_args(parser): group = parser.add_argument_group("Reranking") # fmt: off group.add_argument('--score-model1', '-s1', type=str, metavar='FILE', required=True, help='path to first model or ensemble of models for rescoring') group.add_argument('--score-model2', '-s2', type=str, metavar='FILE', required=False, help='path to second model or ensemble of models for rescoring') group.add_argument('--num-rescore', '-n', type=int, metavar='N', default=10, help='the number of candidate hypothesis to rescore') group.add_argument('-bz', '--batch-size', type=int, metavar='N', default=128, help='batch size for generating the nbest list') group.add_argument('--gen-subset', default='test', metavar='SET', choices=['test', 'train', 'valid'], help='data subset to generate (train, valid, test)') group.add_argument('--gen-model', default=None, metavar='FILE', help='the model to generate translations') group.add_argument('-b1', '--backwards1', action='store_true', help='whether or not the first model group is backwards') group.add_argument('-b2', '--backwards2', action='store_true', help='whether or not the second model group is backwards') group.add_argument('-a', '--weight1', default=1, nargs='+', type=float, help='the weight(s) of the first model') group.add_argument('-b', '--weight2', default=1, nargs='+', type=float, help='the weight(s) of the second model, or the gen model if using nbest from interactive.py') group.add_argument('-c', '--weight3', default=1, nargs='+', type=float, help='the weight(s) of the third model') # lm arguments group.add_argument('-lm', '--language-model', default=None, metavar='FILE', help='language model for target language to rescore translations') group.add_argument('--lm-dict', default=None, metavar='FILE', help='the dict of the language model for the target language') group.add_argument('--lm-name', default=None, help='the name of the language model for the target language') group.add_argument('--lm-bpe-code', default=None, metavar='FILE', help='the bpe code for the language model for the target language') group.add_argument('--data-dir-name', default=None, help='name of data directory') group.add_argument('--lenpen', default=1, nargs='+', type=float, help='length penalty: <1.0 favors shorter, >1.0 favors longer sentences') group.add_argument('--score-dict-dir', default=None, help='the directory with dictionaries for the scoring models') group.add_argument('--right-to-left1', action='store_true', help='whether the first model group is a right to left model') group.add_argument('--right-to-left2', action='store_true', help='whether the second model group is a right to left model') group.add_argument('--post-process', '--remove-bpe', default='@@ ', help='the bpe symbol, used for the bitext and LM') group.add_argument('--prefix-len', default=None, type=int, help='the length of the target prefix to use in rescoring (in terms of words wo bpe)') group.add_argument('--sampling', action='store_true', help='use sampling instead of beam search for generating n best list') group.add_argument('--diff-bpe', action='store_true', help='bpe for rescoring and nbest list not the same') group.add_argument('--rescore-bpe-code', default=None, help='bpe code for rescoring models') group.add_argument('--nbest-list', default=None, help='use predefined nbest list in interactive.py format') group.add_argument('--write-hypos', default=None, help='filename prefix to write hypos to') group.add_argument('--ref-translation', default=None, help='reference translation to use with nbest list from interactive.py') group.add_argument('--backwards-score-dict-dir', default=None, help='the directory with dictionaries for the backwards model,' 'if None then it is assumed the fw and backwards models share dictionaries') # extra scaling args group.add_argument('--gen-model-name', default=None, help='the name of the models that generated the nbest list') group.add_argument('--model1-name', default=None, help='the name of the set for model1 group ') group.add_argument('--model2-name', default=None, help='the name of the set for model2 group') group.add_argument('--shard-id', default=0, type=int, help='the id of the shard to generate') group.add_argument('--num-shards', default=1, type=int, help='the number of shards to generate across') group.add_argument('--all-shards', action='store_true', help='use all shards') group.add_argument('--target-prefix-frac', default=None, type=float, help='the fraction of the target prefix to use in rescoring (in terms of words wo bpe)') group.add_argument('--source-prefix-frac', default=None, type=float, help='the fraction of the source prefix to use in rescoring (in terms of words wo bpe)') group.add_argument('--normalize', action='store_true', help='whether to normalize by src and target len') # fmt: on return group def add_tuning_args(parser): group = parser.add_argument_group("Tuning") group.add_argument( "--lower-bound", default=[-0.7], nargs="+", type=float, help="lower bound of search space", ) group.add_argument( "--upper-bound", default=[3], nargs="+", type=float, help="upper bound of search space", ) group.add_argument( "--tune-param", default=["lenpen"], nargs="+", choices=["lenpen", "weight1", "weight2", "weight3"], help="the parameter(s) to tune", ) group.add_argument( "--tune-subset", default="valid", choices=["valid", "test", "train"], help="the subset to tune on ", ) group.add_argument( "--num-trials", default=1000, type=int, help="number of trials to do for random search", ) group.add_argument( "--share-weights", action="store_true", help="share weight2 and weight 3" ) return group
EXA-1-master
exa/models/unilm-master/edgelm/examples/noisychannel/rerank_options.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import random import numpy as np from fairseq import options from examples.noisychannel import rerank, rerank_options def random_search(args): param_values = [] tuneable_parameters = ["lenpen", "weight1", "weight2", "weight3"] initial_params = [args.lenpen, args.weight1, args.weight2, args.weight3] for i, elem in enumerate(initial_params): if type(elem) is not list: initial_params[i] = [elem] else: initial_params[i] = elem tune_parameters = args.tune_param.copy() for i in range(len(args.tune_param)): assert args.upper_bound[i] >= args.lower_bound[i] index = tuneable_parameters.index(args.tune_param[i]) del tuneable_parameters[index] del initial_params[index] tune_parameters += tuneable_parameters param_values += initial_params random.seed(args.seed) random_params = np.array( [ [ random.uniform(args.lower_bound[i], args.upper_bound[i]) for i in range(len(args.tune_param)) ] for k in range(args.num_trials) ] ) set_params = np.array( [ [initial_params[i][0] for i in range(len(tuneable_parameters))] for k in range(args.num_trials) ] ) random_params = np.concatenate((random_params, set_params), 1) rerank_args = vars(args).copy() if args.nbest_list: rerank_args["gen_subset"] = "test" else: rerank_args["gen_subset"] = args.tune_subset for k in range(len(tune_parameters)): rerank_args[tune_parameters[k]] = list(random_params[:, k]) if args.share_weights: k = tune_parameters.index("weight2") rerank_args["weight3"] = list(random_params[:, k]) rerank_args = argparse.Namespace(**rerank_args) best_lenpen, best_weight1, best_weight2, best_weight3, best_score = rerank.rerank( rerank_args ) rerank_args = vars(args).copy() rerank_args["lenpen"] = [best_lenpen] rerank_args["weight1"] = [best_weight1] rerank_args["weight2"] = [best_weight2] rerank_args["weight3"] = [best_weight3] # write the hypothesis from the valid set from the best trial if args.gen_subset != "valid": rerank_args["gen_subset"] = "valid" rerank_args = argparse.Namespace(**rerank_args) rerank.rerank(rerank_args) # test with the best hyperparameters on gen subset rerank_args = vars(args).copy() rerank_args["gen_subset"] = args.gen_subset rerank_args["lenpen"] = [best_lenpen] rerank_args["weight1"] = [best_weight1] rerank_args["weight2"] = [best_weight2] rerank_args["weight3"] = [best_weight3] rerank_args = argparse.Namespace(**rerank_args) rerank.rerank(rerank_args) def cli_main(): parser = rerank_options.get_tuning_parser() args = options.parse_args_and_arch(parser) random_search(args) if __name__ == "__main__": cli_main()
EXA-1-master
exa/models/unilm-master/edgelm/examples/noisychannel/rerank_tune.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .rerank_options import * # noqa
EXA-1-master
exa/models/unilm-master/edgelm/examples/noisychannel/__init__.py