python_code
stringlengths
0
992k
repo_name
stringlengths
8
46
file_path
stringlengths
5
162
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 from typing import Union from transformers import (AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast) Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast] # For consistency with T5 Tokenizer, which is what this adaptation aims to mimic, # we hardcode there to be 100 sentinel tokens NUM_SENTINEL_TOKENS: int = 100 def adapt_tokenizer_for_denoising(tokenizer: Tokenizer): """Adds sentinel tokens and padding token (if missing). Expands the tokenizer vocabulary to include sentinel tokens used in mixture-of-denoiser tasks as well as a padding token. All added tokens are added as special tokens. No tokens are added if sentinel tokens and padding token already exist. """ # Add sentinel tokens (e.g., <extra_id_0>, <extra_id_1>, and so on). Has no effect if these are already in the vocab. sentinels_to_add = [f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)] tokenizer.add_tokens(sentinels_to_add, special_tokens=True) # If the padding token has not been set, add <pad> and use it if tokenizer.pad_token is None: tokenizer.add_tokens('<pad>', special_tokens=True) tokenizer.pad_token = '<pad>' assert tokenizer.pad_token_id is not None # Register a property that gets us the ids of the sentinel tokens sentinels = ''.join([f'<extra_id_{i}>' for i in range(NUM_SENTINEL_TOKENS)]) _sentinel_token_ids = tokenizer(sentinels, add_special_tokens=False).input_ids tokenizer.sentinel_token_ids = _sentinel_token_ids class AutoTokenizerForMOD(AutoTokenizer): """AutoTokenizer + Adaptation for MOD. A simple wrapper around AutoTokenizer to make instantiating an MOD-adapted tokenizer a bit easier. MOD-adapted tokenizers have sentinel tokens (e.g., <extra_id_0>), a padding token, and a property to get the token ids of the sentinel tokens. """ @classmethod def from_pretrained(cls, *args, **kwargs): """See `AutoTokenizer.from_pretrained` docstring.""" tokenizer = super().from_pretrained(*args, **kwargs) adapt_tokenizer_for_denoising(tokenizer) return tokenizer
EXA-1-master
exa/libraries/llm-foundry/llmfoundry/models/utils/adapt_tokenizer.py
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 import math import warnings from collections.abc import Sequence from functools import partial from typing import Optional, Tuple, Union import torch from torch import nn from llmfoundry.models.layers.norm import NORM_CLASS_REGISTRY def torch_default_param_init_fn_( module: nn.Module, verbose: int = 0, **kwargs, ): del kwargs # unused, just to capture any extra args from the config if verbose > 1: warnings.warn( f"Initializing network using module's reset_parameters attribute") if hasattr(module, 'reset_parameters'): module.reset_parameters() # type: ignore def fused_init_helper_(module: nn.Module, init_fn_): # parameter initialization is often based on the parameters shape. # If a layer is fused, initialization should be based on the shapes # of the original tensor instead of the shape of the fused tensor. # Layers which are fused should have the _fused attibute defined. # The first element of _fused is the dimension along which the tensor is fused. # This is followed by an iterable of split indices." _fused = getattr(module, '_fused', None) if _fused is None: raise RuntimeError(f'Internal logic error') dim, splits = _fused splits = (0, *splits, module.weight.size(dim)) # type: ignore for s, e in zip(splits[:-1], splits[1:]): slice_indices = [slice(None)] * module.weight.ndim # type: ignore slice_indices[dim] = slice(s, e) init_fn_(module.weight[slice_indices]) # type: ignore def generic_param_init_fn_( module: nn.Module, init_fn_, n_layers: int, d_model: Optional[int] = None, init_div_is_residual: Union[int, float, str, bool] = True, emb_init_std: Optional[float] = None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None, verbose: int = 0, **kwargs, ): del kwargs # unused, just to capture any extra args from the config if verbose > 1: warnings.warn( f'If model has bias parameters they are initialized to 0.') # enable user to divide _is_residual weights by # a value which defaults to math.sqrt(2 * cfg.n_layers) init_div_is_residual = init_div_is_residual if init_div_is_residual is False: # not used, for pyright div_is_residual = 1.0 elif init_div_is_residual is True: div_is_residual = math.sqrt(2 * n_layers) elif isinstance(init_div_is_residual, float) or isinstance( init_div_is_residual, int): div_is_residual = init_div_is_residual elif isinstance(init_div_is_residual, str) and init_div_is_residual.isnumeric(): # do not trust YAML parsing to always convert numbers to numbers div_is_residual = float(init_div_is_residual) else: # not used, for pyright div_is_residual = 1.0 raise ValueError( f'Expected init_div_is_residual to be boolean or numeric, got {init_div_is_residual}' ) if init_div_is_residual is not False: if verbose > 1: warnings.warn( f'Initializing _is_residual layers then dividing them by {div_is_residual:.3f}. ' +\ f'Set `init_div_is_residual: false` in init config to disable this.' ) if isinstance(module, nn.Linear): # Linear if hasattr(module, '_fused'): fused_init_helper_(module, init_fn_) else: init_fn_(module.weight) if module.bias is not None: torch.nn.init.zeros_(module.bias) if init_div_is_residual is not False and getattr( module, '_is_residual', False): with torch.no_grad(): module.weight.div_(div_is_residual) elif isinstance(module, nn.Embedding): # Embedding if emb_init_std is not None: std = emb_init_std if std == 0: warnings.warn(f'Embedding layer initialized to 0.') emb_init_fn_ = partial(torch.nn.init.normal_, mean=0.0, std=std) if verbose > 1: warnings.warn( f'Embedding layer initialized using normal distribution with mean=0 and {std=}.' ) elif emb_init_uniform_lim is not None: lim = emb_init_uniform_lim if isinstance(lim, Sequence): if len(lim) > 2: raise ValueError( f'Uniform init requires a min and a max limit. User input: {lim}.' ) if lim[0] == lim[1]: warnings.warn(f'Embedding layer initialized to {lim[0]}.') else: if lim == 0: warnings.warn(f'Embedding layer initialized to 0.') lim = [-lim, lim] a, b = lim emb_init_fn_ = partial(torch.nn.init.uniform_, a=a, b=b) if verbose > 1: warnings.warn( f'Embedding layer initialized using uniform distribution in range {lim}.' ) else: emb_init_fn_ = init_fn_ emb_init_fn_(module.weight) elif isinstance(module, tuple(set(NORM_CLASS_REGISTRY.values()))): # type: ignore # Norm if verbose > 1: warnings.warn( f'Norm weights are set to 1. If norm layer has a bias it is initialized to 0.' ) if hasattr(module, 'weight') and module.weight is not None: torch.nn.init.ones_(module.weight) # type: ignore if hasattr(module, 'bias') and module.bias is not None: torch.nn.init.zeros_(module.bias) # type: ignore elif isinstance(module, nn.MultiheadAttention): # torch's MultiheadAttention if module._qkv_same_embed_dim: assert module.in_proj_weight is not None assert module.q_proj_weight is None and module.k_proj_weight is None and module.v_proj_weight is None assert d_model is not None # in_proj_weight is actually 3 layers and should be split up for width based init _d = d_model splits = (0, _d, 2 * _d, 3 * _d) for s, e in zip(splits[:-1], splits[1:]): init_fn_(module.in_proj_weight[s:e]) else: assert module.q_proj_weight is not None and module.k_proj_weight is not None and module.v_proj_weight is not None assert module.in_proj_weight is None init_fn_(module.q_proj_weight) init_fn_(module.k_proj_weight) init_fn_(module.v_proj_weight) # bias if module.in_proj_bias is not None: torch.nn.init.zeros_(module.in_proj_bias) if module.bias_k is not None: torch.nn.init.zeros_(module.bias_k) if module.bias_v is not None: torch.nn.init.zeros_(module.bias_v) # out proj init_fn_(module.out_proj.weight) if init_div_is_residual is not False and getattr( module.out_proj, '_is_residual', False): with torch.no_grad(): module.out_proj.weight.div_(div_is_residual) if module.out_proj.bias is not None: torch.nn.init.zeros_(module.out_proj.bias) else: for _ in module.parameters(recurse=False): # raise error if uninitialized module has any parameters raise NotImplementedError( f'{module.__class__.__name__} parameters are not initialized by param_init_fn.' ) def _normal_init_(std, mean=0.0): return partial(torch.nn.init.normal_, mean=mean, std=std) def _normal_param_init_fn_( module: nn.Module, std: float, n_layers: int, d_model: Optional[int] = None, init_div_is_residual: Union[int, float, str, bool] = True, emb_init_std: Optional[float] = None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None, verbose: int = 0, **kwargs, ): del kwargs # unused, just to capture any extra args from the config init_fn_ = _normal_init_(std=std) if verbose > 1: warnings.warn( f'Using torch.nn.init.normal_ init fn mean=0.0, std={std}') generic_param_init_fn_( module=module, init_fn_=init_fn_, d_model=d_model, n_layers=n_layers, init_div_is_residual=init_div_is_residual, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose, ) def baseline_param_init_fn_( module: nn.Module, init_std: float, n_layers: int, d_model: Optional[int] = None, init_div_is_residual: Union[int, float, str, bool] = True, emb_init_std: Optional[float] = None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None, verbose: int = 0, **kwargs, ): del kwargs # unused, just to capture any extra args from the config if init_std is None: raise ValueError( "You must set model.init_config['init_std'] to a float value to use the default initialization scheme." ) _normal_param_init_fn_( module=module, std=init_std, d_model=d_model, n_layers=n_layers, init_div_is_residual=init_div_is_residual, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose, ) def small_param_init_fn_( module: nn.Module, n_layers: int, d_model: int, init_div_is_residual: Union[int, float, str, bool] = True, emb_init_std: Optional[float] = None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None, verbose: int = 0, **kwargs, ): del kwargs # unused, just to capture any extra args from the config # very close to kaiming normal # from Transformers without Tears (2019) - Nguyen & Salazar std = math.sqrt(2 / (5 * d_model)) _normal_param_init_fn_( module=module, std=std, d_model=d_model, n_layers=n_layers, init_div_is_residual=init_div_is_residual, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose, ) def neox_param_init_fn_( module: nn.Module, n_layers: int, d_model: int, emb_init_std: Optional[float] = None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None, verbose: int = 0, **kwargs, ): """From section 2.3.1 of GPT-NeoX-20B: An Open-Source AutoregressiveLanguage Model — Black et. al. (2022) see https://github.com/EleutherAI/gpt-neox/blob/9610391ab319403cef079b438edd016a2443af54/megatron/model/init_functions.py#L151 and https://github.com/EleutherAI/gpt-neox/blob/main/megatron/model/transformer.py """ del kwargs # unused, just to capture any extra args from the config residual_div = n_layers / math.sqrt(10) # small std / wang std if verbose > 1: warnings.warn(f'setting init_div_is_residual to {residual_div}') small_param_init_fn_( module=module, d_model=d_model, n_layers=n_layers, init_div_is_residual=residual_div, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose, ) def kaiming_uniform_param_init_fn_( module: nn.Module, n_layers: int, d_model: Optional[int] = None, init_div_is_residual: Union[int, float, str, bool] = True, emb_init_std: Optional[float] = None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None, init_gain: float = 0, fan_mode: str = 'fan_in', init_nonlinearity: str = 'leaky_relu', verbose: int = 0, **kwargs, ): del kwargs # unused, just to capture any extra args from the config if verbose > 1: warnings.warn( f'Using nn.init.kaiming_uniform_ init fn with parameters: ' +\ f'a={init_gain}, mode={fan_mode}, nonlinearity={init_nonlinearity}' ) kaiming_uniform_ = partial(nn.init.kaiming_uniform_, a=init_gain, mode=fan_mode, nonlinearity=init_nonlinearity) generic_param_init_fn_( module=module, init_fn_=kaiming_uniform_, d_model=d_model, n_layers=n_layers, init_div_is_residual=init_div_is_residual, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose, ) def kaiming_normal_param_init_fn_( module: nn.Module, n_layers: int, d_model: Optional[int] = None, init_div_is_residual: Union[int, float, str, bool] = True, emb_init_std: Optional[float] = None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None, init_gain: float = 0, fan_mode: str = 'fan_in', init_nonlinearity: str = 'leaky_relu', verbose: int = 0, **kwargs, ): del kwargs # unused, just to capture any extra args from the config if verbose > 1: warnings.warn( f'Using nn.init.kaiming_normal_ init fn with parameters: ' +\ f'a={init_gain}, mode={fan_mode}, nonlinearity={init_nonlinearity}' ) kaiming_normal_ = partial(torch.nn.init.kaiming_normal_, a=init_gain, mode=fan_mode, nonlinearity=init_nonlinearity) generic_param_init_fn_( module=module, init_fn_=kaiming_normal_, d_model=d_model, n_layers=n_layers, init_div_is_residual=init_div_is_residual, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose, ) def xavier_uniform_param_init_fn_( module: nn.Module, n_layers: int, d_model: Optional[int] = None, init_div_is_residual: Union[int, float, str, bool] = True, emb_init_std: Optional[float] = None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None, init_gain: float = 0, verbose: int = 0, **kwargs, ): del kwargs # unused, just to capture any extra args from the config xavier_uniform_ = partial(torch.nn.init.xavier_uniform_, gain=init_gain) if verbose > 1: warnings.warn( f'Using torch.nn.init.xavier_uniform_ init fn with parameters: ' +\ f'gain={init_gain}' ) generic_param_init_fn_( module=module, init_fn_=xavier_uniform_, d_model=d_model, n_layers=n_layers, init_div_is_residual=init_div_is_residual, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose, ) def xavier_normal_param_init_fn_( module: nn.Module, n_layers: int, d_model: Optional[int] = None, init_div_is_residual: Union[int, float, str, bool] = True, emb_init_std: Optional[float] = None, emb_init_uniform_lim: Optional[Union[Tuple[float, float], float]] = None, init_gain: float = 0, verbose: int = 0, **kwargs, ): xavier_normal_ = partial(torch.nn.init.xavier_normal_, gain=init_gain) if verbose > 1: warnings.warn( f'Using torch.nn.init.xavier_normal_ init fn with parameters: ' +\ f'gain={init_gain}' ) generic_param_init_fn_( module=module, init_fn_=xavier_normal_, d_model=d_model, n_layers=n_layers, init_div_is_residual=init_div_is_residual, emb_init_std=emb_init_std, emb_init_uniform_lim=emb_init_uniform_lim, verbose=verbose, ) MODEL_INIT_REGISTRY = { 'default_': torch_default_param_init_fn_, 'baseline_': baseline_param_init_fn_, 'kaiming_uniform_': kaiming_uniform_param_init_fn_, 'kaiming_normal_': kaiming_normal_param_init_fn_, 'neox_init_': neox_param_init_fn_, 'small_init_': small_param_init_fn_, 'xavier_uniform_': xavier_uniform_param_init_fn_, 'xavier_normal_': xavier_normal_param_init_fn_, }
EXA-1-master
exa/libraries/llm-foundry/llmfoundry/models/utils/param_init_fns.py
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 from llmfoundry.models.utils.adapt_tokenizer import ( AutoTokenizerForMOD, adapt_tokenizer_for_denoising) from llmfoundry.models.utils.hf_prefixlm_converter import ( add_bidirectional_mask_if_missing, convert_hf_causal_lm_to_prefix_lm) from llmfoundry.models.utils.meta_init_context import init_empty_weights from llmfoundry.models.utils.param_init_fns import ( # type: ignore MODEL_INIT_REGISTRY, generic_param_init_fn_) __all__ = [ 'AutoTokenizerForMOD', 'adapt_tokenizer_for_denoising', 'convert_hf_causal_lm_to_prefix_lm', 'init_empty_weights', 'add_bidirectional_mask_if_missing', 'generic_param_init_fn_', 'MODEL_INIT_REGISTRY' ]
EXA-1-master
exa/libraries/llm-foundry/llmfoundry/models/utils/__init__.py
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Modified from https://github.com/huggingface/accelerate/blob/main/src/accelerate/big_modeling.py from contextlib import contextmanager import torch import torch.nn as nn @contextmanager def init_empty_weights(include_buffers: bool = False): """Meta initialization context manager. A context manager under which models are initialized with all parameters on the meta device, therefore creating an empty model. Useful when just initializing the model would blow the available RAM. Args: include_buffers (`bool`, *optional*, defaults to `False`): Whether or not to also put all buffers on the meta device while initializing. Example: ```python import torch.nn as nn # Initialize a model with 100 billions parameters in no time and without using any RAM. with init_empty_weights(): tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)]) ``` <Tip warning={true}> Any model created under this context manager has no weights. As such you can't do something like `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`]. </Tip> """ with init_on_device(torch.device('meta'), include_buffers=include_buffers) as f: yield f @contextmanager def init_on_device(device: torch.device, include_buffers: bool = False): """Device initialization context manager. A context manager under which models are initialized with all parameters on the specified device. Args: device (`torch.device`): Device to initialize all parameters on. include_buffers (`bool`, *optional*, defaults to `False`): Whether or not to also put all buffers on the meta device while initializing. Example: ```python import torch.nn as nn with init_on_device(device=torch.device("cuda")): tst = nn.Liner(100, 100) # on `cuda` device ``` """ old_register_parameter = nn.Module.register_parameter if include_buffers: old_register_buffer = nn.Module.register_buffer def register_empty_parameter(module, name, param): old_register_parameter(module, name, param) if param is not None: param_cls = type(module._parameters[name]) kwargs = module._parameters[name].__dict__ module._parameters[name] = param_cls( module._parameters[name].to(device), **kwargs) def register_empty_buffer(module, name, buffer): old_register_buffer(module, name, buffer) if buffer is not None: module._buffers[name] = module._buffers[name].to(device) # Patch tensor creation if include_buffers: tensor_constructors_to_patch = { torch_function_name: getattr(torch, torch_function_name) for torch_function_name in ['empty', 'zeros', 'ones', 'full'] } else: tensor_constructors_to_patch = {} def patch_tensor_constructor(fn): def wrapper(*args, **kwargs): kwargs['device'] = device return fn(*args, **kwargs) return wrapper try: nn.Module.register_parameter = register_empty_parameter # type: ignore if include_buffers: nn.Module.register_buffer = register_empty_buffer # type: ignore for torch_function_name in tensor_constructors_to_patch.keys(): setattr( torch, torch_function_name, patch_tensor_constructor(getattr(torch, torch_function_name))) yield finally: nn.Module.register_parameter = old_register_parameter if include_buffers: nn.Module.register_buffer = old_register_buffer for torch_function_name, old_torch_function in tensor_constructors_to_patch.items( ): setattr(torch, torch_function_name, old_torch_function)
EXA-1-master
exa/libraries/llm-foundry/llmfoundry/models/utils/meta_init_context.py
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 """Converts Huggingface Causal LM to Prefix LM. Conversion does lightweight surgery on a HuggingFace Causal LM to convert it to a Prefix LM. Prefix LMs accepts a `bidirectional_mask` input in `forward` and treat the input prompt as the prefix in `generate`. """ import math import warnings from types import MethodType from typing import Any, Dict, List, Optional, Tuple, Union import torch from transformers.models.bloom.modeling_bloom import ( BaseModelOutputWithPastAndCrossAttentions, BloomForCausalLM, BloomModel, CausalLMOutputWithCrossAttentions, CrossEntropyLoss) from transformers.models.bloom.modeling_bloom import \ _expand_mask as _expand_mask_bloom from transformers.models.bloom.modeling_bloom import \ _make_causal_mask as _make_causal_mask_bloom from transformers.models.bloom.modeling_bloom import logging from transformers.models.gpt2.modeling_gpt2 import GPT2LMHeadModel from transformers.models.gpt_neo.modeling_gpt_neo import GPTNeoForCausalLM from transformers.models.gpt_neox.modeling_gpt_neox import GPTNeoXForCausalLM from transformers.models.gptj.modeling_gptj import GPTJForCausalLM from transformers.models.opt.modeling_opt import OPTForCausalLM from transformers.models.opt.modeling_opt import \ _expand_mask as _expand_mask_opt from transformers.models.opt.modeling_opt import \ _make_causal_mask as _make_causal_mask_opt logger = logging.get_logger(__name__) _SUPPORTED_GPT_MODELS = ( GPT2LMHeadModel, GPTJForCausalLM, GPTNeoForCausalLM, GPTNeoXForCausalLM, ) CAUSAL_GPT_TYPES = Union[GPT2LMHeadModel, GPTJForCausalLM, GPTNeoForCausalLM, GPTNeoXForCausalLM,] def _convert_gpt_causal_lm_to_prefix_lm( model: CAUSAL_GPT_TYPES) -> CAUSAL_GPT_TYPES: """Converts a GPT-style Causal LM to a Prefix LM. Supported HuggingFace model classes: - `GPT2LMHeadModel` - `GPTNeoForCausalLM` - `GPTNeoXForCausalLM` - `GPTJForCausalLM` See `convert_hf_causal_lm_to_prefix_lm` for more details. """ if hasattr(model, '_prefix_lm_converted'): return model assert isinstance(model, _SUPPORTED_GPT_MODELS) assert model.config.add_cross_attention == False, 'Only supports GPT-style decoder-only models' def _get_attn_modules(model: CAUSAL_GPT_TYPES) -> List[torch.nn.Module]: """Helper that gets a list of the model's attention modules. Each module has a `bias` buffer used for causal masking. The Prefix LM conversion adds logic to dynamically manipulate these biases to support Prefix LM attention masking. """ attn_modules = [] if isinstance(model, GPTNeoXForCausalLM): blocks = model.gpt_neox.layers else: blocks = model.transformer.h for block in blocks: # type: ignore if isinstance(model, GPTNeoForCausalLM): # Ignore "local" layers in this model type if block.attn.attention_type != 'global': continue attn_module = block.attn.attention elif isinstance(model, GPTNeoXForCausalLM): attn_module = block.attention else: attn_module = block.attn attn_modules.append(attn_module) return attn_modules # Rename methods to allow: # - new `forward` to wrap original `forward` # - new `generate` to wrap original `generate` setattr(model, '_original_forward', getattr(model, 'forward')) setattr(model, '_original_generate', getattr(model, 'generate')) def forward( self: CAUSAL_GPT_TYPES, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, attention_mask: Optional[torch.FloatTensor] = None, bidirectional_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): """Wraps original forward to enable PrefixLM attention.""" def call_og_forward(): if isinstance(self, GPTNeoXForCausalLM): return self._original_forward( input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, labels=labels, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) else: return self._original_forward( input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, labels=labels, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if bidirectional_mask is None: # This wrapper is a no-op if bidirectional masks are not supplied return call_og_forward() # type: ignore assert isinstance(bidirectional_mask, torch.Tensor) attn_modules = _get_attn_modules(model) # Handle bidirectional_mask sizing # Note: all attn_modules.bias have the same size b, s = bidirectional_mask.shape max_length = attn_modules[0].bias.shape[-1] # type: ignore if s > max_length: raise ValueError( f'bidirectional_mask sequence length (={s}) exceeds the ' +\ f'max length allowed by the model ({max_length}).' ) assert s <= max_length if s < max_length: pad = torch.zeros((int(b), int(max_length - s)), dtype=bidirectional_mask.dtype, device=bidirectional_mask.device) bidirectional_mask = torch.cat([bidirectional_mask, pad], dim=1) bidirectional = bidirectional_mask.unsqueeze(1).unsqueeze(1) # Incorporate the bidirectional mask into the original causal mask for attn_module in attn_modules: attn_module.bias.data = torch.logical_or( attn_module.bias.data, bidirectional) # type: ignore # Collect outputs using the model's original forward method output = call_og_forward() # Reset the masks for attn_module in attn_modules: attn_module.bias.data = torch.tril( attn_module.bias.data[0, 0])[None, None] # type: ignore # Return the outputs return output def generate(self: CAUSAL_GPT_TYPES, *args: tuple, **kwargs: Dict[str, Any]): """Wraps original generate to enable PrefixLM attention.""" attn_modules = _get_attn_modules(model) # A convenient answer to PrefixLM generation is to set the causal mask # to be bidirectional. All the tokens in the input prompt can attend to # one another and, since tokens are generated one-by-one, each new # token gets to see everything behind it. This depends on activations # being cached and not updated, which is how the HF implementation works. for attn_module in attn_modules: attn_module.bias.data[:] = 1 # type: ignore # Collect outputs using the model's original forward method output = self._original_generate(*args, **kwargs) # type: ignore # Reset the masks for attn_module in attn_modules: attn_module.bias.data = torch.tril( attn_module.bias.data[0, 0])[None, None] # type: ignore # Return the outputs return output # Replace `forward` and `generate` with the new wrappers setattr(model, 'forward', MethodType(forward, model)) setattr(model, 'generate', MethodType(generate, model)) # Finally, tag the model so that this conversion cannot happen again. setattr(model, '_prefix_lm_converted', True) return model def _convert_bloom_causal_lm_to_prefix_lm( model: BloomForCausalLM) -> BloomForCausalLM: """Converts a BLOOM Causal LM to a Prefix LM. Supported HuggingFace model classes: - `BloomForCausalLM` See `convert_hf_causal_lm_to_prefix_lm` for more details. """ if hasattr(model, '_prefix_lm_converted'): return model assert isinstance(model, BloomForCausalLM) assert model.config.add_cross_attention == False, 'Only supports BLOOM decoder-only models' # Modified from transformers.models.bloom.modeling_bloom.BloomModel._prepare_attn_mask # https://github.com/huggingface/transformers/blob/v4.25.1/src/transformers/models/bloom/modeling_bloom.py#L648 def _prepare_attn_mask( self: BloomModel, attention_mask: torch.Tensor, bidirectional_mask: Optional[torch.Tensor], input_shape: Tuple[int, int], past_key_values_length: int, ) -> torch.BoolTensor: # create causal mask # [batch_size, seq_length] -> [batch_size, 1, tgt_length, src_length] combined_attention_mask = None device = attention_mask.device _, src_length = input_shape if src_length > 1: combined_attention_mask = _make_causal_mask_bloom( input_shape, device=device, past_key_values_length=past_key_values_length) # Make use of the batch-specific `bidirectional_mask` attribute set # by the parent module in its (new) `forward` method wrapper if bidirectional_mask is not None: # The two masks should have the same size assert attention_mask.shape == bidirectional_mask.shape # [batch_size, seq_length] -> [batch_size, 1, tgt_length, src_length] expanded_bidirectional_mask = _expand_mask_bloom( bidirectional_mask, tgt_length=src_length) combined_attention_mask = torch.logical_and( combined_attention_mask, expanded_bidirectional_mask) # [batch_size, seq_length] -> [batch_size, 1, tgt_length, src_length] expanded_attn_mask = _expand_mask_bloom(attention_mask, tgt_length=src_length) combined_attention_mask = (expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask | combined_attention_mask) return combined_attention_mask # Modified from transformers.models.bloom.modeling_bloom._prepare_alibi_transformer # https://github.com/huggingface/transformers/blob/v4.25.1/src/transformers/models/bloom/modeling_bloom.py#L87 def _build_alibi_tensor( self: BloomModel, batch_size: int, query_length: int, key_length: int, dtype: torch.dtype, device: torch.device, ) -> torch.Tensor: num_heads = self.config.n_head closest_power_of_2 = 2**math.floor(math.log2(num_heads)) base = torch.tensor(2**(-(2**-(math.log2(closest_power_of_2) - 3))), device=device, dtype=torch.float32) powers = torch.arange(1, 1 + closest_power_of_2, device=device, dtype=torch.int32) slopes = torch.pow(base, powers) if closest_power_of_2 != num_heads: extra_base = torch.tensor( 2**(-(2**-(math.log2(2 * closest_power_of_2) - 3))), device=device, dtype=torch.float32) num_remaining_heads = min(closest_power_of_2, num_heads - closest_power_of_2) extra_powers = torch.arange(1, 1 + 2 * num_remaining_heads, 2, device=device, dtype=torch.int32) slopes = torch.cat( [slopes, torch.pow(extra_base, extra_powers)], dim=0) qa = torch.arange(query_length, device=device, dtype=torch.int32).view(-1, 1) ka = torch.arange(key_length, device=device, dtype=torch.int32).view(1, -1) diffs = qa - ka + key_length - query_length diffs = -diffs.abs() alibi = slopes.view(1, num_heads, 1, 1) * diffs.view( 1, 1, query_length, key_length) alibi = alibi.expand(batch_size, -1, -1, -1).reshape(-1, query_length, key_length) return alibi.to(dtype) # Modified from transformers.models.bloom.modeling_bloom.BloomModel.forward # Note: The modified code is surrounded with #### START/END #### comments # and one new argument (`bidirectional_mask`) is added to the signature. KeyValueT = Tuple[torch.Tensor, torch.Tensor] def forward( # type: ignore self: BloomModel, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[KeyValueT, ...]] = None, attention_mask: Optional[torch.Tensor] = None, bidirectional_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **deprecated_arguments) -> Union[Tuple[ torch.Tensor, ...], BaseModelOutputWithPastAndCrossAttentions]: if deprecated_arguments.pop('position_ids', False) is not False: # `position_ids` could have been `torch.Tensor` or `None` so # defaulting pop to `False` allows to detect if users were # passing explicitly `None` warnings.warn( '`position_ids` have no functionality in BLOOM and will be removed in v5.0.0. ' +\ 'You can safely ignore passing `position_ids`.', FutureWarning, ) if len(deprecated_arguments) > 0: raise ValueError( f'Got unexpected arguments: {deprecated_arguments}') output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = (output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError( 'You cannot specify both input_ids and inputs_embeds at the same time' ) elif input_ids is not None: batch_size, seq_length = input_ids.shape elif inputs_embeds is not None: batch_size, seq_length, _ = inputs_embeds.shape else: raise ValueError( 'You have to specify either input_ids or inputs_embeds') if past_key_values is None: past_key_values = tuple([None] * len(self.h)) # type: ignore # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape batch_size x num_heads x N x N # head_mask has shape n_layer x batch x num_heads x N x N head_mask = self.get_head_mask(head_mask, self.config.n_layer) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) hidden_states = self.word_embeddings_layernorm(inputs_embeds) presents = () if use_cache else None all_self_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None # Compute alibi tensor: check build_alibi_tensor documentation seq_length_with_past = seq_length past_key_values_length = 0 if past_key_values[0] is not None: # type: ignore tmp = past_key_values[0][0] # type: ignore past_key_values_length = tmp.shape[2] # type: ignore seq_length_with_past = seq_length_with_past + past_key_values_length if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length_with_past), device=hidden_states.device) else: attention_mask = attention_mask.to(hidden_states.device) ##### ALL NON-SIGNATURE MODIFICATIONS ARE CONTAINED TO THIS BLOCK [STARTS HERE] ##### alibi = self._build_alibi_tensor( batch_size=batch_size, query_length=seq_length, key_length=seq_length_with_past, dtype=hidden_states.dtype, device=hidden_states.device, ) causal_mask = self._prepare_attn_mask( attention_mask, bidirectional_mask, input_shape=(batch_size, seq_length), past_key_values_length=past_key_values_length, ) ##### ALL NON-SIGNATURE MODIFICATIONS ARE CONTAINED TO THIS BLOCK [ENDS HERE] ##### for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)): # type: ignore if output_hidden_states: hst = (hidden_states,) all_hidden_states = all_hidden_states + hst # type: ignore if self.gradient_checkpointing and self.training: if use_cache: logger.warning( '`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...' ) use_cache = False def create_custom_forward(module): def custom_forward(*inputs): # None for past_key_value return module(*inputs, use_cache=use_cache, output_attentions=output_attentions) return custom_forward outputs = torch.utils.checkpoint.checkpoint( # type: ignore create_custom_forward(block), hidden_states, alibi, causal_mask, head_mask[i], # type: ignore ) else: outputs = block( hidden_states, layer_past=layer_past, attention_mask=causal_mask, head_mask=head_mask[i], # type: ignore use_cache=use_cache, output_attentions=output_attentions, alibi=alibi, ) hidden_states = outputs[0] if use_cache is True: presents = presents + (outputs[1],) # type: ignore if output_attentions: oa = (outputs[2 if use_cache else 1],) # type: ignore all_self_attentions = all_self_attentions + oa # type: ignore # Add last hidden state hidden_states = self.ln_f(hidden_states) if output_hidden_states: hst = (hidden_states,) all_hidden_states = all_hidden_states + hst # type: ignore if not return_dict: return tuple(v for v in [ hidden_states, presents, all_hidden_states, all_self_attentions ] if v is not None) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_self_attentions, ) # Make it so model.transformer has the new helper methods and new # `forward` method setattr(model.transformer, '_prepare_attn_mask', MethodType(_prepare_attn_mask, model.transformer)) setattr(model.transformer, '_build_alibi_tensor', MethodType(_build_alibi_tensor, model.transformer)) setattr(model.transformer, 'forward', MethodType(forward, model.transformer)) # In order to actually use the new argument we've added to # model.transformer, we need to update the parent module's `forward` to # accept/pass the same new argument. # We add 2 lines to handle that change. # Both lines are tagged with "# WE'RE ADDING A NEW ARGUMENT!" KeyValueT = Tuple[torch.Tensor, torch.Tensor] def forward( self: BloomForCausalLM, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[KeyValueT, ...]] = None, attention_mask: Optional[torch.Tensor] = None, # WE'RE ADDING A NEW ARGUMENT! (Change 1/2) bidirectional_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **deprecated_arguments ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]: """Replacement forward method for BloomCausalLM.""" if deprecated_arguments.pop('position_ids', False) is not False: # `position_ids` could have been `torch.Tensor` or `None` so # defaulting pop to `False` allows to detect if users were passing # explicitly `None` warnings.warn( '`position_ids` have no functionality in BLOOM and will be removed ' +\ 'in v5.0.0. You can safely ignore passing `position_ids`.', FutureWarning, ) if len(deprecated_arguments) > 0: raise ValueError( f'Got unexpected arguments: {deprecated_arguments}') return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, # WE'RE ADDING A NEW ARGUMENT! (Change 2/2) bidirectional_mask=bidirectional_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] lm_logits = self.lm_head(hidden_states) loss = None if labels is not None: # Shift so that tokens < n predict n shift_logits = lm_logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() batch_size, seq_length, vocab_size = shift_logits.shape # Flatten the tokens loss_fct = CrossEntropyLoss() loss = loss_fct( shift_logits.view(batch_size * seq_length, vocab_size), shift_labels.view(batch_size * seq_length)) if not return_dict: output = (lm_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return CausalLMOutputWithCrossAttentions( loss=loss, logits=lm_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) # To handle generation, re-write `prepare_inputs_for_generation` to # implement the bidirectional logic. def prepare_inputs_for_generation(self: BloomForCausalLM, input_ids: torch.LongTensor, past: Optional[torch.Tensor] = None, attention_mask: Optional[ torch.Tensor] = None, **kwargs) -> dict: # only last token for input_ids if past is not None if past: input_ids = input_ids[:, -1].unsqueeze(-1) # type: ignore # We can turn off bidirectional masking after the prefix # has been encoded into `past` bidirectional_mask = None # the cache may be in the stardard format (e.g. in contrastive # search), convert to bloom's format if needed if past[0][0].shape[0] == input_ids.shape[0]: past = self._convert_to_bloom_cache(past) else: # If we're here, `input_ids` contains the prefix. Encode it with # bidirectional attention. bidirectional_mask = torch.ones_like(input_ids) return { 'input_ids': input_ids, 'past_key_values': past, # "use_cache": kwargs.get("use_cache"), # Requires this. TODO(Alex): Confirm this supports other decoding strategies. 'use_cache': True, 'attention_mask': attention_mask, 'bidirectional_mask': bidirectional_mask, } # Register the new `forward` and `prepare_inputs_for_generation` methods # with the model setattr(model, 'forward', MethodType(forward, model)) setattr(model, 'prepare_inputs_for_generation', MethodType(prepare_inputs_for_generation, model)) # Finally, tag the model so that this conversion cannot happen again. setattr(model, '_prefix_lm_converted', True) return model def _convert_opt_causal_lm_to_prefix_lm( model: OPTForCausalLM) -> OPTForCausalLM: """Converts an OPT Causal LM to a Prefix LM. Supported HuggingFace model classes: - `OPTForCausalLM` See `convert_hf_causal_lm_to_prefix_lm` for more details. """ if hasattr(model, '_prefix_lm_converted'): return model assert isinstance(model, OPTForCausalLM) assert model.config.add_cross_attention == False, 'Only supports OPT decoder-only models' # Rename methods to allow: # - new `forward` to wrap original `forward` # - new `generate` to wrap original `generate` setattr(model, '_original_forward', getattr(model, 'forward')) setattr(model, '_original_generate', getattr(model, 'generate')) model.model.decoder.bidirectional_mask = None # Modified from transformers.models.bloom.modeling_opt.OPTDecoder._prepare_decoder_attn_mask # https://github.com/huggingface/transformers/blob/v4.25.1/src/transformers/models/opt/modeling_opt.py#L532 def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] combined_attention_mask = None if input_shape[-1] > 1: # 'g' indicates generation mode. Causal mask replaced with 0. if self.bidirectional_mask == 'g': bsz, src_length = input_shape combined_attention_mask = torch.zeros( (bsz, 1, src_length, src_length + past_key_values_length), dtype=inputs_embeds.dtype, device=inputs_embeds.device) else: combined_attention_mask = _make_causal_mask_opt( input_shape, inputs_embeds.dtype, past_key_values_length=past_key_values_length).to( inputs_embeds.device) # Make use of the batch-specific `bidirectional_mask` attribute # set by the parent module in its (new) `forward` method wrapper if self.bidirectional_mask is not None: # The two masks should have the same size assert attention_mask.shape == self.bidirectional_mask.shape # [batch_size, seq_length] -> [batch_size, 1, tgt_length, src_length] expanded_bidirectional_mask = _expand_mask_opt( self.bidirectional_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to(inputs_embeds.device) combined_attention_mask = torch.maximum( expanded_bidirectional_mask, combined_attention_mask) if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] expanded_attn_mask = _expand_mask_opt(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( inputs_embeds.device) combined_attention_mask = (expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask) return combined_attention_mask # Make it so model.model.decoder uses the above `_prepare_decoder_attn_mask` # in place of the original method setattr(model.model.decoder, '_prepare_decoder_attention_mask', MethodType(_prepare_decoder_attention_mask, model.model.decoder)) def forward( self: OPTForCausalLM, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, bidirectional_mask: Optional[torch.ByteTensor] = None, head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): def call_og_forward(): return self._original_forward( input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, labels=labels, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if bidirectional_mask is None: # This wrapper is a no-op if bidirectional masks are not supplied return call_og_forward() # Temporarily set `bidirectional_mask` in the child module self.model.decoder.bidirectional_mask = bidirectional_mask # Apply the original forward method (the model will use the mask that # was just set) try: outputs = call_og_forward() except: self.model.decoder.bidirectional_mask = None raise # Reset the `bidirectional_mask` attribute to None self.model.decoder.bidirectional_mask = None # Return the outputs return outputs def generate(self: OPTForCausalLM, *args: tuple, **kwargs: Dict[str, Any]): """Wraps original generate to enable PrefixLM-style attention.""" # Flag the child module to use generation-style attention masking self.model.decoder.bidirectional_mask = 'g' # Collect outputs using the model's original forward method try: output = self._original_generate(*args, **kwargs) except: self.model.decoder.bidirectional_mask = None raise # Reset the `bidirectional_mask` attribute to None self.model.decoder.bidirectional_mask = None # Return the output return output # Replace `forward` and `generate` with the new wrappers setattr(model, 'forward', MethodType(forward, model)) setattr(model, 'generate', MethodType(generate, model)) # Finally, tag the model so that this conversion cannot happen again. setattr(model, '_prefix_lm_converted', True) return model _SUPPORTED_HF_MODELS = _SUPPORTED_GPT_MODELS + (BloomForCausalLM, OPTForCausalLM) CAUSAL_LM_TYPES = Union[GPT2LMHeadModel, GPTJForCausalLM, GPTNeoForCausalLM, GPTNeoXForCausalLM, BloomForCausalLM, OPTForCausalLM] def convert_hf_causal_lm_to_prefix_lm( model: CAUSAL_LM_TYPES) -> CAUSAL_LM_TYPES: """Converts a HuggingFace Causal LM to a Prefix LM. Supported HuggingFace model classes: - `GPT2LMHeadModel` - `GPTNeoForCausalLM` - `GPTNeoXForCausalLM` - `GPTJForCausalLM` - `BloomForCausalLM` - `OPTForCausalLM` Conversion to a Prefix LM is done by modifying the `forward` method, and possibly also the `generate` method and/or select underlying methods depending on the model class. These changes preserve the model API, but add a new input to `forward`: "bidirectional_mask". Notes on training: To actually train the converted model as a Prefix LM, training batches will need to indicate the prefix/target structure by including `bidirectional_mask` as part of the batch inputs. **This is not a standard input and requires custom layers either within or after your dataloader.** In addition to adding `bidirectional_mask` to the batch, this custom code should modify `labels` such that `batch['labels'][batch['bidirectional_mask'] == 1] == -100`. That is, the prefix portion of the sequence should not generate any loss. Loss should only be generated by the target portion of the sequence. Notes on `GPTNeoForCausalLM`: To simplify the implementation, "global" and "local" attention layers are handled differently. For "global" layers, we handle conversion as described above. For "local" layers, which use a causal attention mask within a restricted local window, we do not alter the masking. Notes on `forward` method conversion: After conversion, the `forward` method will handle a new input, `bidirectional_mask`, which should be a [batch_size, seq_length] byte tensor, where 1 indicates token positions belonging to the prefix (prefix tokens can attend to one another bidirectionally), and 0 indicates token positions belonging to the target. The new `forward` method will incorporate `bidirectional_mask` (if supplied) into the existing causal mask, call the original `forward` method, and (if the causal mask is a buffer) reset the causal masks before returning the result. Notes on `generate` method conversion: After conversion, the `generate` method will have the same signature but will internally convert all causal masks to be purely bidirectional, call the original `generate` method, and (where appropriate) reset the causal masks before returning the result. This works thanks to the logic of the HuggingFace `generate` API, which first encodes the token "prompt" passed to `generate` (which is treated as the prefix) and then sequentially generates each new token. Encodings are cached as generation happens, so all prefix tokens can attend to one another (as expected in a Prefix LM) and generated tokens can only attend to prefix tokens and previously-generated tokens (also as expected in a Prefix LM). To preserve the API, the original methods are renamed to `_original_forward` and `_original_generate`, and replaced with new `forward` and `generate` methods that wrap them, respectively. Although implementation details vary by model class. """ if isinstance(model, _SUPPORTED_GPT_MODELS): return _convert_gpt_causal_lm_to_prefix_lm(model) elif isinstance(model, BloomForCausalLM): return _convert_bloom_causal_lm_to_prefix_lm(model) elif isinstance(model, OPTForCausalLM): return _convert_opt_causal_lm_to_prefix_lm(model) else: raise TypeError( f'Cannot convert model to Prefix LM. ' +\ f'Model does not belong to set of supported HF models:' +\ f'\n{_SUPPORTED_HF_MODELS}' ) def add_bidirectional_mask_if_missing(batch: Dict[str, Any]): """Attempts to add bidirectional_mask to batch if missing. Raises: KeyError if bidirectional_mask is missing and can't be inferred """ if 'bidirectional_mask' not in batch: if batch.get('mode', None) == 'icl_task': batch['bidirectional_mask'] = batch['attention_mask'].clone() for i, continuation_indices in enumerate( batch['continuation_indices']): batch['bidirectional_mask'][i, continuation_indices] = 0 elif ('labels' in batch) and ('attention_mask' in batch): batch['bidirectional_mask'] = torch.logical_and( torch.eq(batch['attention_mask'], 1), torch.eq(batch['labels'], -100), ).type_as(batch['attention_mask']) else: raise KeyError( 'No bidirectional_mask in batch and not sure how to construct one.' )
EXA-1-master
exa/libraries/llm-foundry/llmfoundry/models/utils/hf_prefixlm_converter.py
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 from llmfoundry.models.hf.hf_causal_lm import ComposerHFCausalLM from llmfoundry.models.hf.hf_fsdp import (prepare_hf_causal_lm_model_for_fsdp, prepare_hf_enc_dec_model_for_fsdp, prepare_hf_model_for_fsdp) from llmfoundry.models.hf.hf_prefix_lm import ComposerHFPrefixLM from llmfoundry.models.hf.hf_t5 import ComposerHFT5 __all__ = [ 'ComposerHFCausalLM', 'ComposerHFPrefixLM', 'ComposerHFT5', 'prepare_hf_causal_lm_model_for_fsdp', 'prepare_hf_enc_dec_model_for_fsdp', 'prepare_hf_model_for_fsdp', ]
EXA-1-master
exa/libraries/llm-foundry/llmfoundry/models/hf/__init__.py
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 """Implements a Hugging Causal LM wrapped inside a :class:`.ComposerModel`.""" from typing import Union from composer.metrics.nlp import (InContextLearningLMAccuracy, InContextLearningLMExpectedCalibrationError, InContextLearningMCExpectedCalibrationError, InContextLearningMultipleChoiceAccuracy, InContextLearningQAAccuracy, LanguageCrossEntropy, LanguagePerplexity) from omegaconf import DictConfig from transformers import (AutoConfig, AutoModelForCausalLM, PreTrainedTokenizer, PreTrainedTokenizerFast) from llmfoundry.models.hf.model_wrapper import HuggingFaceModelWithZLoss from llmfoundry.models.utils import init_empty_weights __all__ = ['ComposerHFCausalLM'] Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast] class ComposerHFCausalLM(HuggingFaceModelWithZLoss): """Configures a :class:`.HuggingFaceModel` around a Causal LM. Args: cfg (DictConfig): An omegaconf dictionary used to configure the model: cfg.pretrained_model_name_or_path (str): The name of or local path to the HF Causal LM (e.g., `gpt2` to instantiate a GPT2LMHeadModel). cfg.config_overrides (dict, optional): An optional dictionary of keyword arguments that override the default configuration associated with cfg.pretrained_model_name_or_path. cfg.pretrained (bool): Whether to instantiate the model with pre-trained weights coming from cfg.pretrained_model_name_or_path. If ``True``, cfg.config_overrides must be compatible with the pre-trained weights. cfg.init_device ('cpu' | 'meta'): Which device, 'cpu' or 'meta', to initialize the model on. Currently, `meta` is only supported when cfg.pretrained is ``False``. Default: ``'cpu'``. cfg.add_exact_match (bool, optional): CURRENTLY UNUSED. Whether to add ExactMatch metric used in some fine-tuning settings. Default: ``False``. cfg.add_rouge (bool, optional): CURRENTLY UNUSED. Whether to add RougeWithDetokenizer metric to validation metrics. Default: ``False``. """ def __init__(self, om_model_config: DictConfig, tokenizer: Tokenizer): config = AutoConfig.from_pretrained( om_model_config.pretrained_model_name_or_path, trust_remote_code=om_model_config.get('trust_remote_code', True), use_auth_token=om_model_config.get('use_auth_token', False), **om_model_config.get('config_overrides', {})) train_metrics = [ LanguageCrossEntropy(len(tokenizer)), LanguagePerplexity(len(tokenizer)), ] eval_metrics = [ LanguageCrossEntropy(len(tokenizer)), LanguagePerplexity(len(tokenizer)), InContextLearningLMAccuracy(), InContextLearningMultipleChoiceAccuracy(), InContextLearningQAAccuracy(), InContextLearningLMExpectedCalibrationError(), InContextLearningMCExpectedCalibrationError() ] init_device = om_model_config.get('init_device', 'cpu') if init_device == 'cpu': if om_model_config.pretrained: model = AutoModelForCausalLM.from_pretrained( om_model_config.pretrained_model_name_or_path, trust_remote_code=om_model_config.get( 'trust_remote_code', True), use_auth_token=om_model_config.get('use_auth_token', False), config=config) else: model = AutoModelForCausalLM.from_config(config) elif init_device == 'meta': if om_model_config.pretrained: raise ValueError( 'Setting cfg.pretrained=True is not supported when init_device="meta".' ) with init_empty_weights(include_buffers=False): model = AutoModelForCausalLM.from_config(config) else: raise ValueError( f'init_device="{init_device}" must be either "cpu" or "meta".') # if cfg.add_exact_match: # metrics.append(ExactMatch(ignore_index=_HF_IGNORE_INDEX)) composer_model = super().__init__(model=model, tokenizer=tokenizer, metrics=train_metrics, eval_metrics=eval_metrics, z_loss=om_model_config.get( 'z_loss', 0.0)) # if cfg.add_rouge: # rouge_metric = RougeWithDetokenizer(detokenizer=tokenizer) # composer_model.val_metrics[RougeWithDetokenizer.__name__] = rouge_metric return composer_model
EXA-1-master
exa/libraries/llm-foundry/llmfoundry/models/hf/hf_causal_lm.py
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 """Re-usable :class:`.ComposerModel` for LLM HF Models.""" from __future__ import annotations import inspect from collections import UserDict from typing import List, Optional, Union import torch import transformers from composer.models.huggingface import HuggingFaceModel from torchmetrics import Metric from transformers import PreTrainedTokenizer, PreTrainedTokenizerFast from llmfoundry.models.hf.hf_fsdp import prepare_hf_model_for_fsdp # HuggingFace hardcodes the ignore index to -100 _HF_IGNORE_INDEX = -100 Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast] class HuggingFaceModelWithZLoss(HuggingFaceModel): """Wrapper around HuggingFaceModel. This adds z-loss, which is used in some training contexts, and is a convenient way to patch features that are generically useful for HF models. See use of z_loss in PaLM: https://arxiv.org/abs/2204.02311v3, Section 5. Also, from https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666: Two uses of z_loss are: - To keep the logits from drifting too far from zero, which can cause unacceptable roundoff errors in bfloat16. - To encourage the logits to be normalized log-probabilities. Handles preparation for FSDP wrapping. """ def __init__(self, model: transformers.PreTrainedModel, tokenizer: Optional[Tokenizer] = None, metrics: Optional[List[Metric]] = None, eval_metrics: Optional[List[Metric]] = None, z_loss: float = 0.0): super().__init__(model, tokenizer, use_logits=True, metrics=metrics, eval_metrics=eval_metrics) self.z_loss = float(z_loss) if self.z_loss < 0.0: raise ValueError(f'z_loss(={z_loss}) cannot be negative.') self.model_forward_args = inspect.getfullargspec( self.model.forward).args # Note: We need to add the FSDP related attributes to the model AFTER the super init, # so that the (possible) embedding resizing doesn't destroy them prepare_hf_model_for_fsdp(self.model) # This provides support for meta initialization when using FSDP self.model.param_init_fn = lambda module: self.model._init_weights( module) def forward(self, batch): if isinstance(batch, dict) or isinstance(batch, UserDict): # Further input validation is left to the huggingface forward call batch = { k: v for k, v in batch.items() if k in self.model_forward_args } output = self.model(**batch) # type: ignore (thirdparty) else: raise ValueError( 'Unexpected batch type. Expected a dictionary with keys corresponding to the inputs to the forward function of the Huggingface model' ) return output def loss(self, outputs, batch): if self.config.use_return_dict: loss, logits = outputs['loss'], outputs['logits'] else: # loss is at index 0 in the output tuple, logits are at index 1 loss, logits = outputs[:2] if self.z_loss == 0.0: return loss # Add a z_loss to the standard loss logits_flat = logits.view(-1, logits.size(-1)) labels_flat = batch['labels'].view(-1) log_z = torch.logsumexp(logits_flat[labels_flat != _HF_IGNORE_INDEX], dim=1) log_z2 = log_z**2 z_loss = log_z2.mean() * self.z_loss if self.config.use_return_dict: outputs['loss'] += z_loss return outputs['loss'] else: outputs[0] += z_loss return outputs[0] # def eval_forward(self, batch, outputs: Optional[Any] = None): # if 'generate_output' in batch: # self.labels = batch.pop('labels') # return self.model.generate( # batch['input_ids'], # attention_mask=batch['attention_mask'], # max_new_tokens=512, # do_sample=True, # top_p=0.90, # top_k=0, # no_repeat_ngram_size=3, # ) # return super().eval_forward(batch, outputs)
EXA-1-master
exa/libraries/llm-foundry/llmfoundry/models/hf/model_wrapper.py
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 """Implements a Hugging Prefix LM wrapped inside a :class:`.ComposerModel`.""" from __future__ import annotations from composer.metrics.nlp import LanguageCrossEntropy, MaskedAccuracy from omegaconf import DictConfig from omegaconf import OmegaConf as om from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer from llmfoundry.models.hf.model_wrapper import HuggingFaceModelWithZLoss from llmfoundry.models.utils import (AutoTokenizerForMOD, add_bidirectional_mask_if_missing, convert_hf_causal_lm_to_prefix_lm, init_empty_weights) __all__ = ['ComposerHFPrefixLM'] # HuggingFace hardcodes the ignore index to -100 _HF_IGNORE_INDEX = -100 class ComposerHFPrefixLM(HuggingFaceModelWithZLoss): """Configures a :class:`.HuggingFaceModel` around a Prefix LM. Note: HuggingFace does not natively support Prefix LM-style models. This function uses `transformers.AutoModelForCausalLM` to instantiate a Causal LM, then uses a conversion utility to turn the model into a Prefix LM. Currently, that conversion utility only supports the following HuggingFace Causal LM types: - `GPT2LMHeadModel` - `GPTNeoForCausalLM` - `GPTNeoXForCausalLM` - `GPTJForCausalLM` - `BloomForCausalLM` - `OPTForCausalLM` Args: cfg (DictConfig): An omegaconf dictionary used to configure the model: cfg.pretrained_model_name_or_path (str): The name of or local path to the HF model (e.g., `gpt2` to instantiate a GPT2LMHeadModel). The model will be converted to a Prefix LM during initialization. cfg.config_overrides (dict, optional): An optional dictionary of keyword arguments that override the default configuration associated with cfg.pretrained_model_name_or_path. Default: ``{}``. cfg.pretrained (bool): Whether to instantiate the model with pre-trained weights coming from cfg.pretrained_model_name_or_path. If ``True``, cfg.config_overrides must be compatible with the pre-trained weights. cfg.init_device ('cpu' | 'meta'): Which device, 'cpu' or 'meta', to initialize the model on. Currently, `meta` is only supported when cfg.pretrained is ``False``. Default: ``'cpu'``. cfg.z_loss (float, optional): The coefficient of the z-loss. If >0.0, this the z-loss will be multiplied by this value before being added to the standard loss term. Default: ``0.0``. cfg.adapt_vocab_for_denoising (bool, optional): Whether to adapt the vocab of the model/tokenizer to include sentinel tokens that are used in denoising tasks like Span Corruption. If you intend to load from an existing Composer checkpoint that was trained on such a task, set this to ``True`` to ensure that the model vocab size matches your checkpoint's vocab size when loading the weights. Default: ``False``. cfg.add_exact_match (bool, optional): CURRENTLY UNUSED. Whether to add ExactMatch metric used in some fine-tuning settings. Default: ``False``. cfg.add_rouge (bool, optional): CURRENTLY UNUSED. Whether to add RougeWithDetokenizer metric to validation metrics. Default: ``False``. """ def __init__(self, om_model_config: DictConfig, om_tokenizer_config: DictConfig): config = AutoConfig.from_pretrained( om_model_config.pretrained_model_name_or_path, **om_model_config.get('config_overrides', {})) # Set up the tokenizer (add tokens for denoising sentinels if needed) if om_model_config.get('adapt_vocab_for_denoising', False): resolved_om_tokenizer_config = om.to_container(om_tokenizer_config, resolve=True) tokenizer_kwargs = resolved_om_tokenizer_config.get( # type: ignore 'kwargs', {}) tokenizer_name = resolved_om_tokenizer_config[ # type: ignore 'name'] tokenizer = AutoTokenizerForMOD.from_pretrained( tokenizer_name, **tokenizer_kwargs) else: resolved_om_tokenizer_config = om.to_container(om_tokenizer_config, resolve=True) tokenizer_kwargs = resolved_om_tokenizer_config.get( # type: ignore 'kwargs', {}) tokenizer_name = resolved_om_tokenizer_config[ # type: ignore 'name'] tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, **tokenizer_kwargs) vocab_size = len(tokenizer) init_device = om_model_config.get('init_device', 'cpu') if init_device == 'cpu': if om_model_config.pretrained: model = AutoModelForCausalLM.from_pretrained( om_model_config.pretrained_model_name_or_path, config=config) else: model = AutoModelForCausalLM.from_config(config) elif init_device == 'meta': if om_model_config.pretrained: raise ValueError( 'Setting cfg.pretrained=True is not supported when init_device="meta".' ) with init_empty_weights(include_buffers=False): model = AutoModelForCausalLM.from_config(config) else: raise ValueError( f'init_device="{init_device}" must be either "cpu" or "meta".') # Convert the Causal LM into a Prefix LM via our custom wrapper model = convert_hf_causal_lm_to_prefix_lm(model) metrics = [ LanguageCrossEntropy(vocab_size=vocab_size, ignore_index=_HF_IGNORE_INDEX), MaskedAccuracy(ignore_index=_HF_IGNORE_INDEX) ] # if cfg.add_exact_match: # metrics.append(ExactMatch(ignore_index=_HF_IGNORE_INDEX)) composer_model = super().__init__(model=model, tokenizer=tokenizer, metrics=metrics, z_loss=om_model_config.get( 'z_loss', 0.0)) # if cfg.add_rouge: # rouge_metric = RougeWithDetokenizer(detokenizer=tokenizer) # composer_model.val_metrics[RougeWithDetokenizer.__name__] = rouge_metric return composer_model def forward(self, batch): # Add bidirectional_mask if it is missing and can be constructed add_bidirectional_mask_if_missing(batch) return super().forward(batch)
EXA-1-master
exa/libraries/llm-foundry/llmfoundry/models/hf/hf_prefix_lm.py
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 """Implements a Hugging Face T5 wrapped inside a :class:`.ComposerModel`.""" from __future__ import annotations from composer.metrics.nlp import LanguageCrossEntropy, MaskedAccuracy from omegaconf import DictConfig from omegaconf import OmegaConf as om from transformers import AutoConfig, AutoTokenizer, T5ForConditionalGeneration from llmfoundry.models.hf.model_wrapper import HuggingFaceModelWithZLoss from llmfoundry.models.utils import init_empty_weights __all__ = ['ComposerHFT5'] # HuggingFace hardcodes the ignore index to -100 _HF_IGNORE_INDEX = -100 class ComposerHFT5(HuggingFaceModelWithZLoss): """Configures a :class:`.HuggingFaceModel` around a T5. Note: This function uses `transformers.T5ForConditionalGenration`. Future releases will expand support to more general classes of HF Encoder-Decoder models. Args: cfg (DictConfig): An omegaconf dictionary used to configure the model: cfg.pretrained_model_name_or_path (str): The name of or local path to the HF model (e.g., `t5-base` to instantiate a T5 using the base config). cfg.config_overrides (dict, optional): An optional dictionary of keyword arguments that override the default configuration associated with cfg.pretrained_model_name_or_path. Default: ``{}``. cfg.pretrained (bool): Whether to instantiate the model with pre-trained weights coming from cfg.pretrained_model_name_or_path. If ``True``, cfg.config_overrides must be compatible with the pre-trained weights. cfg.init_device ('cpu' | 'meta'): Which device, 'cpu' or 'meta', to initialize the model on. Currently, `meta` is only supported when cfg.pretrained is ``False``. Default: ``'cpu'``. cfg.z_loss (float, optional): The coefficient of the z-loss. If >0.0, this the z-loss will be multiplied by this value before being added to the standard loss term. Default: ``0.0``. cfg.add_exact_match (bool, optional): CURRENTLY UNUSED. Whether to add ExactMatch metric used in some fine-tuning settings. Default: ``False``. cfg.add_rouge (bool, optional): CURRENTLY UNUSED. Whether to add RougeWithDetokenizer metric to validation metrics. Default: ``False``. """ def __init__(self, om_model_config: DictConfig, om_tokenizer_config: DictConfig): config = AutoConfig.from_pretrained( om_model_config.pretrained_model_name_or_path, **om_model_config.get('config_overrides', {})) if not config.is_encoder_decoder: raise ValueError(f'Model type "hf_t5" currently only supports T5 models ' +\ f'using configs where `is_encoder_decoder` is ``True``.') resolved_om_tokenizer_config = om.to_container(om_tokenizer_config, resolve=True) tokenizer_kwargs = resolved_om_tokenizer_config.get( # type: ignore 'kwargs', {}) tokenizer_name = resolved_om_tokenizer_config['name'] # type: ignore tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, **tokenizer_kwargs) vocab_size = len(tokenizer) init_device = om_model_config.get('init_device', 'cpu') if init_device == 'cpu': if om_model_config.pretrained: model = T5ForConditionalGeneration.from_pretrained( om_model_config.pretrained_model_name_or_path, config=config) else: model = T5ForConditionalGeneration(config) elif init_device == 'meta': if om_model_config.pretrained: raise ValueError( 'Setting cfg.pretrained=True is not supported when init_device="meta".' ) with init_empty_weights(include_buffers=False): model = T5ForConditionalGeneration(config) else: raise ValueError( f'init_device="{init_device}" must be either "cpu" or "meta".') metrics = [ LanguageCrossEntropy(vocab_size=vocab_size, ignore_index=_HF_IGNORE_INDEX), MaskedAccuracy(ignore_index=_HF_IGNORE_INDEX) ] # if cfg.add_exact_match: # metrics.append(ExactMatch(ignore_index=_HF_IGNORE_INDEX)) composer_model = super().__init__(model=model, tokenizer=tokenizer, metrics=metrics, z_loss=om_model_config.get( 'z_loss', 0.0)) # if cfg.add_rouge: # rouge_metric = RougeWithDetokenizer(detokenizer=tokenizer) # composer_model.val_metrics[RougeWithDetokenizer.__name__] = rouge_metric return composer_model
EXA-1-master
exa/libraries/llm-foundry/llmfoundry/models/hf/hf_t5.py
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 # helper functions from https://github.com/CarperAI/trlx/blob/main/trlx/utils/modeling.py # which is MIT licensed import functools from typing import Any, Iterable, List from transformers import PreTrainedModel from transformers.models.opt.modeling_opt import OPTDecoder # helper functions def rhasattr(obj: Any, attr: str): """A chain-able attribute version of hasattr. For example, to check if `obj` has the attribute `foo.bar.baz`, you can use: `rhasattr(obj, "foo.bar.baz")` Reference: https://stackoverflow.com/a/67303315 """ _nested_attrs = attr.split('.') _curr_obj = obj for _a in _nested_attrs[:-1]: if hasattr(_curr_obj, _a): _curr_obj = getattr(_curr_obj, _a) else: return False return hasattr(_curr_obj, _nested_attrs[-1]) def rgetattr(obj: Any, attr: str, *args: List[Any]): """A chain-able attribute version of getattr. For example, to get the attribute `foo.bar.baz` from `obj`, you can use: `rgetattr(obj, "foo.bar.baz")` Reference: https://stackoverflow.com/a/31174427 """ def _getattr(obj: Any, attr: str): return getattr(obj, attr, *args) return functools.reduce(_getattr, [obj] + attr.split('.')) def findattr(obj: Any, attrs: Iterable[str]): for attr in attrs: if rhasattr(obj, attr): return rgetattr(obj, attr) return None def hf_get_causal_base_model(model: PreTrainedModel): """Returns the causal decoder backbone of the specified HuggingFace model. Newer HF models have a `self.get_decoder()` method. Older models do not. NOTE: Different model configurations have different causal decoder attribute names. - transformer: (GPT2LMHeadModel, GPTJConfig) - model.decoder: (OPTConfig, BloomConfig) - gpt_neox: (GPTNeoXConfig) """ if hasattr(model, 'get_decoder'): return model.get_decoder() decoder_attrs = ('transformer', 'model.decoder', 'gpt_neox') return findattr(model, decoder_attrs) def hf_get_hidden_layers(model: PreTrainedModel): """Returns the hidden layers of the specified model. NOTE: Different model configurations have different hidden layer attribute names. - transformer.h: (BloomForCausalLM, GPT2LMHeadModel, GPTJForCausalLM) - model.decoder.layers: (OPTForCausalLM) - gpt_neox.layers: (GPTNeoXForCausalLM) - model.layers: (LlaMaForCausalLM) - transformer.blocks: (MPTForCausalLM) """ hidden_layers_attrs = ( 'transformer.h', # BLOOM, GPT2, GPTJ 'model.decoder.layers', # OPT 'gpt_neox.layers', # GPTNeoX 'block', # T5, BART, Pegasus (from encoder) 'layers', # ProphetNet, Marian (from encoder) 'model.layers', # LLaMa 'transformer.blocks', # MPT ) return findattr(model, hidden_layers_attrs) # /end helper functions def prepare_hf_model_for_fsdp(model: PreTrainedModel) -> None: """FSDP wrap a HuggingFace model. Call specific functions """ if model.config.is_encoder_decoder: prepare_hf_enc_dec_model_for_fsdp(model) else: # many common decoder-only model do not set the flag # model.config.is_decoder, so we can't trust it prepare_hf_causal_lm_model_for_fsdp(model) def prepare_hf_causal_lm_model_for_fsdp(model: PreTrainedModel) -> None: """FSDP wrap a HuggingFace decoder. Wrap any model for FSDP which follows one of the 3 existing conventions from HuggingFace for decoder-only LLMs. """ causal_base_model = hf_get_causal_base_model(model) # OPT has an extra layer of wrapping, so special case here if isinstance(causal_base_model, OPTDecoder): model.model._fsdp_wrap = False model_block = hf_get_hidden_layers(model) # type: ignore lm_head = model.get_output_embeddings() # some models (OPT) implement .get_input_embeddings for the causal subclass # but all of them implement it for the base model tied_embeddings = causal_base_model.get_input_embeddings() # type: ignore modules = { 'base_model': causal_base_model, 'model_block': model_block, 'lm_head': lm_head, 'tied_embeddings': tied_embeddings } for mod_name, module in modules.items(): if module is None: raise ValueError( f'Unable to FSDP-wrap this model! `{mod_name}` does not ' + 'follow common layer/weight naming conventions.') block_type = type(model_block[0]) # type: ignore # When using the HF LM models, # the weights of the self.lm_head and self.transformer.wte are tied. # This tying occurs inside the `self.post_init()` function. # This is a hurdle for FSDP because they need to be in the same FSDP block # These lines ensures that both modules stay together in the top-most block when # the model has this tying enabled (almost all do; this property defaults to True) if model.config.tie_word_embeddings: causal_base_model._fsdp_wrap = False # type: ignore tied_embeddings._fsdp_wrap = False # type: ignore lm_head._fsdp_wrap = False # type: ignore # FSDP Wrap and Activation Checkpoint every model block model.fsdp_wrap_fn = lambda module: isinstance(module, block_type) model.activation_checkpointing_fn = lambda module: isinstance( module, block_type) def prepare_hf_enc_dec_model_for_fsdp(model: PreTrainedModel) -> None: """Wrap an encoder/decoder HF model. This works for T5, BART, Pegasus, PegasusX, but not all enc/dec (ProphetNet) You have model.shared, model.encoder, model.decoder and model.lm_head, where model.shared are the embeddings which are tied to model.lm_head, and model.shared == model.encoder.embed_tokens and model.shared == model.decoder.embed_tokens """ tied_embeddings = model.get_input_embeddings() encoder = model.get_encoder() decoder = model.get_decoder() lm_head = model.get_output_embeddings() # some encoder/decoders have different layers for encoder vs decoder encoder_block = hf_get_hidden_layers(encoder) decoder_block = hf_get_hidden_layers(decoder) modules = { 'encoder': encoder, 'decoder': decoder, 'encoder_block': encoder_block, 'decoder_block': decoder_block, 'lm_head': lm_head, 'tied_embeddings': tied_embeddings } for mod_name, module in modules.items(): if module is None: raise ValueError( f'Unable to FSDP-wrap this model! `{mod_name}` does not ' + 'follow common layer/weight naming conventions.') decoder_block_type = type(decoder_block[0]) # type: ignore encoder_block_type = type(encoder_block[0]) # type: ignore if model.config.tie_word_embeddings: # it is possible to train an enc/dec without tied embeddings, hence the check tied_embeddings._fsdp_wrap = False # type: ignore encoder._fsdp_wrap = False # type: ignore decoder._fsdp_wrap = False # type: ignore lm_head._fsdp_wrap = False # type: ignore # FSDP Wrap and Activation Checkpoint every decoder block model.fsdp_wrap_fn = lambda module: isinstance(module, decoder_block_type) model.activation_checkpointing_fn = lambda module: isinstance( module, decoder_block_type) if encoder_block_type == decoder_block_type: return # need to wrap encoder blocks separately for ProhpetNet and Marian model.fsdp_wrap_fn = lambda module: isinstance(module, encoder_block_type) model.activation_checkpointing_fn = lambda module: isinstance( module, encoder_block_type)
EXA-1-master
exa/libraries/llm-foundry/llmfoundry/models/hf/hf_fsdp.py
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 """Streaming dataloader for (mixture of) denoising task(s).""" import logging import random import sys from typing import Any, Callable, Dict, List, Mapping, Optional, Sequence, Union import numpy as np import torch from omegaconf import DictConfig from omegaconf import OmegaConf as om from torch.utils.data import DataLoader from transformers import PreTrainedTokenizer, PreTrainedTokenizerFast from llmfoundry.data.packing import BinPackWrapper from llmfoundry.data.text_data import StreamingTextDataset from llmfoundry.models import utils __all__ = ['MixtureOfDenoisersCollator', 'build_text_denoising_dataloader'] log = logging.getLogger(__name__) # HuggingFace hardcodes the ignore index to -100 _HF_IGNORE_INDEX = -100 Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast] # Required signature of any `prefix_function` (see below) PREFIX_FUNCTION = Callable[[float, Optional[float], Tokenizer], Sequence[int]] def ul2_prefix_function( mask_ratio: float, mean_length: Optional[float], tokenizer: Tokenizer, ) -> Sequence[int]: """Generates prefixes based on UL2 paper. See: http://arxiv.org/abs/2205.05131 """ if mean_length is None: # This is the case for "sequence to sequence" prefix = '[S2S]' if mask_ratio < 1.0 else '[CLM]' elif mean_length >= 12 or mask_ratio >= 0.3: # UL2 tags this corruption rate "extreme" prefix = '[NLG]' else: # UL2 tags this corruption rate as "regular" prefix = '[NLU]' return tokenizer(prefix, add_special_tokens=False).input_ids class MixtureOfDenoisersCollator: """Data collator for mixture of span-corruption denoisers, as in UL2. This collator supports a variety of tasks used to pre-train an encoder-decoder model or a (prefix LM) decoder-only model. This is meant to be used with a dataset that yields tokenized text sequences. It is not required that the token sequences are already padded or truncate, as this collator will internally truncate and pad as needed. For the denoising mixture recommended in the original UL2 paper, http://arxiv.org/abs/2205.05131, use: .. python: MixtureOfDenoisersCollator( ..., span_mean_lengths_and_ratios=[ [3, .15], [8, .15], [3, .50], [8, .50], [64, .15], [64, .50], ], sequence_mask_ratios=0.25 ) Args: tokenizer (transformers.PreTrainedTokenizer): The tokenizer used to prepare the data from raw text. Any missing sentinel tokens will be added by the collator. max_seq_length (int): The maximum length of sequences produced by this collator. Incoming sequences may be truncated to accommodate this limit. Note that when formatting for decoder-only models, the context tokens and target tokens are concatenated, and max_seq_length applies to their combined length. For encoder-decoder models, both the encoder and decoder will see up to max_seq_length tokens. decoder_only_format (bool, optional): Whether to format the batches for a decoder-only model (i.e. a prefix LM) or, if ``False``, an encoder-decoder model. Default: ``False``. span_mean_lengths_and_rations (optional): A length-2 list of a ``[mean_length, mask_ratio]`` pair, or a list of such pairs. Each pair adds a span corruption denoising task to the task mixture. For example, ``[3, 0.15]`` adds the original span corruption task used for pre-training a T5 model as in http://arxiv.org/abs/1910.10683, which trained with a single span corruption task that used a mean span length of 3 and a mask ratio of 15%. Default: ``None`` does not add any span corruption tasks. sequence_mask_ratios (optional): A float or list of floats, one for each sequence corruption denoising task to add to the task mixture. Each sequence mask ratio must be greater than 0.0 and at most 1.0. This type of task is a special instance of span corruption, with exactly one masked span take from the end of the sequence. The length of the span is sampled uniformly such that the average portion of masked tokens equals sequence_mask_ratio. Note: A value of 1.0 essentially yields causal LM examples. Default: ``None` does not add any sequence corruption tasks. allow_pad_trimming (bool, optional): Whether to allow the collator to trim away sequence regions that are entirely padding (i.e. padding for each example in the batch). If ``True``, shorter sequences may improve throughput but at a potentially higher memory cost owing to variable sequence lengths from batch to batch. Default: ``False`` yields batches that are always padded to max_seq_length. prefix_function (callable, optional): A function that maps denoising task parameters (e.g. mean_length=3, mask_ratio=0.15) to a prefix that will be added to sequences when the associated "noiser" is applied. To disable these prefixes, use a value of ``None``. Default: :func:`ul2_prefix_function` applies the prefix scheme suggested in the UL2 paper: http://arxiv.org/abs/2205.05131. context_eos (bool, optional): Whether to attach an EOS token to the end of the context sequence, marking the transition from context to target sequence. Only applicable if decoder_only_format is True. Context EOS tokens are always added for encoder-decoder format. Default: ``False`` does not attach context EOS. """ def __init__( self, tokenizer: Tokenizer, max_seq_length: int, decoder_only_format: bool = False, span_mean_lengths_and_ratios: Optional[List] = None, sequence_mask_ratios: Optional[Union[List[float], float]] = None, allow_pad_trimming: bool = False, prefix_function: Optional[PREFIX_FUNCTION] = ul2_prefix_function, context_eos: Optional[bool] = None, ): # Prepare the tokenizer for denoising tasks utils.adapt_tokenizer_for_denoising(tokenizer) self.tokenizer = tokenizer self.max_seq_length = max_seq_length self.decoder_only_format = decoder_only_format self._sentinel_token_ids = np.array(self.tokenizer.sentinel_token_ids) # Trimming will always be skipped on at least the first __call__ self._allow_pad_trimming = allow_pad_trimming self._seen_first_batch = False self.context_eos = bool(context_eos) if decoder_only_format else True # Process the span_mean_lengths_and_ratios argument if span_mean_lengths_and_ratios is None: # In this case, there are no span corruption tasks self.span_mean_lengths_and_ratios = [] elif isinstance(span_mean_lengths_and_ratios[0], (int, float)): # In this case, there is one span corruption task if not len(span_mean_lengths_and_ratios) == 2: raise ValueError('`span_mean_lengths_and_ratios` must be a ' + \ 'pair of [mean_length, mask_ratio], a list ' + \ f'of such pairs, or None. Got {span_mean_lengths_and_ratios}.') self.span_mean_lengths_and_ratios = [span_mean_lengths_and_ratios] else: # In this case, there are one or more span corruption tasks span_mean_lengths_and_ratios = list(span_mean_lengths_and_ratios) for spec_pair in span_mean_lengths_and_ratios: if len(spec_pair) != 2: raise ValueError('`span_mean_lengths_and_ratios` must be a ' + \ 'pair of [mean_length, mask_ratio], a list ' + \ f'of such pairs, or None. Got {span_mean_lengths_and_ratios}.') self.span_mean_lengths_and_ratios = span_mean_lengths_and_ratios # Process the sequence_mask_ratios argument if sequence_mask_ratios is None: # In this case, there are no sequence corruption tasks self.sequence_mask_ratios = [] elif isinstance(sequence_mask_ratios, float): # In this case, there is one sequence corruption task self.sequence_mask_ratios = [sequence_mask_ratios] else: # In this case, there is one or more sequence corruption tasks for ratio in sequence_mask_ratios: if not (0 < ratio <= 1.0): raise ValueError('`sequence_mask_ratios` must be a float (or list '+\ 'of floats) that are each >0.0 and <=1.0, or None. '+\ f'Got {sequence_mask_ratios}.') self.sequence_mask_ratios = sequence_mask_ratios # Populate the noisers so we can learn to denoise them! self._noisers = [] self._smallest_max_raw_length = self.max_seq_length * 100 self._largest_max_raw_length = 0 self._uses_span_corruption = False # Add "noisers" for any span corruption denoising tasks # Each mean_length / mask_ratio combo becomes one of the span # corruption denoising tasks for span_mean_length, span_mask_ratio in self.span_mean_lengths_and_ratios: self._uses_span_corruption = True if span_mean_length < 0: raise ValueError('All span mean lengths must be positive.') if not 0 < span_mask_ratio < 1.0: raise ValueError( 'All span masking ratios must be between 0.0 and 1.0.') if prefix_function is not None: prefix_tokens = prefix_function(span_mask_ratio, span_mean_length, self.tokenizer) else: prefix_tokens = None max_raw_length = _get_max_starting_length( max_length=self.max_seq_length, mask_ratio=span_mask_ratio, mean_span_length=span_mean_length, n_prefix_tokens=len(prefix_tokens or []), decoder_only_format=self.decoder_only_format, context_eos=self.context_eos) if max_raw_length < self._smallest_max_raw_length: self._smallest_max_raw_length = max_raw_length if max_raw_length > self._largest_max_raw_length: self._largest_max_raw_length = max_raw_length kwargs = { 'mean_span_length': span_mean_length, 'mask_ratio': span_mask_ratio, 'prefix_tokens': prefix_tokens, 'max_raw_length': max_raw_length, } self._noisers.append(kwargs) # Add "noisers" for any sequential denoising tasks for sequence_mask_ratio in self.sequence_mask_ratios: if prefix_function is not None: prefix_tokens = prefix_function(sequence_mask_ratio, None, self.tokenizer) else: prefix_tokens = None max_raw_length = self.max_seq_length - len(prefix_tokens or []) - 1 if decoder_only_format and self.context_eos: max_raw_length = max_raw_length - 1 if not self._uses_span_corruption and ( max_raw_length < self._smallest_max_raw_length): # We choose not to count sequence denoising in the smallest # unless there is only sequence denoising. self._smallest_max_raw_length = max_raw_length if max_raw_length > self._largest_max_raw_length: self._largest_max_raw_length = max_raw_length kwargs = { 'mean_span_length': None, 'mask_ratio': sequence_mask_ratio, 'prefix_tokens': prefix_tokens, 'max_raw_length': max_raw_length, } self._noisers.append(kwargs) if not self._noisers: raise ValueError( 'No denoising tasks were included. Make sure to set ' + \ '`span_mean_lengths_and_ratios` and/or `sequence_mask_ratios`.') @property def smallest_max_raw_length(self): return int(self._smallest_max_raw_length) @property def largest_max_raw_length(self): return int(self._largest_max_raw_length) def __call__(self, examples: List[Dict[str, Any]]) -> Dict[str, torch.Tensor]: """Batch examples processed by the span corrupter.""" processed_examples = [] for example in examples: # Randomly pick a "noiser" to apply to this example noiser = random.choice(self._noisers) # Apply it processed_examples.append( noise_token_sequence( example, mask_ratio=noiser['mask_ratio'], mean_span_length=noiser['mean_span_length'], prefix_tokens=noiser['prefix_tokens'], max_raw_length=noiser['max_raw_length'], max_seq_length=self.max_seq_length, tokenizer=self.tokenizer, sentinel_token_ids=self._sentinel_token_ids, decoder_only_format=self.decoder_only_format, context_eos=self.context_eos)) batch = self.tokenizer.pad(processed_examples) # This logic prevents trimming on at least the first batch if not (self._allow_pad_trimming and self._seen_first_batch): self._seen_first_batch = True return batch self._seen_first_batch = True # Truncate portions of the inputs that are purely padding # (up to a multiple of 8) multiple_of = 8 n_examples_per_length = batch['attention_mask'].sum(0) keep_tokens = torch.sum(n_examples_per_length > 0) keep_tokens = int(multiple_of * torch.ceil(keep_tokens / multiple_of)) # Note: EncDec formatting will always produce a right-padded batch if self.tokenizer.padding_side == 'left' and self.decoder_only_format: batch['input_ids'] = batch['input_ids'][:, -keep_tokens:] batch['attention_mask'] = batch['attention_mask'][:, -keep_tokens:] else: batch['input_ids'] = batch['input_ids'][:, :keep_tokens] batch['attention_mask'] = batch['attention_mask'][:, :keep_tokens] if self.decoder_only_format: if self.tokenizer.padding_side == 'left': batch['labels'] = batch['labels'][:, -keep_tokens:] batch['bidirectional_mask'] = batch[ 'bidirectional_mask'][:, -keep_tokens:] else: batch['labels'] = batch['labels'][:, :keep_tokens] batch['bidirectional_mask'] = batch[ 'bidirectional_mask'][:, :keep_tokens] else: # Truncate portions of the decoder inputs that are purely padding n_examples_per_length = batch['decoder_attention_mask'].sum(0) keep_tokens = torch.sum(n_examples_per_length > 0) keep_tokens = int(multiple_of * torch.ceil(keep_tokens / multiple_of)) batch['labels'] = batch['labels'][:, :keep_tokens] batch['decoder_attention_mask'] = batch[ 'decoder_attention_mask'][:, :keep_tokens] batch['decoder_input_ids'] = batch[ 'decoder_input_ids'][:, :keep_tokens] # This slicing can produce non-contiguous tensors, so use .contiguous # to prevent related problems batch = {k: v.contiguous() for k, v in batch.items()} return batch def build_text_denoising_dataloader( cfg: DictConfig, tokenizer: Tokenizer, device_batch_size: int, ) -> DataLoader: """Constructor function for a Mixture of Denoisers dataloader. This function constructs a dataloader that can be used to train an encoder-decoder model or a (prefix LM) decoder-only model on a text denoising task mixture (e.g. span corruption, or UL2). The underlying dataset is a :class:`StreamingTextDataset`, allowing you to stream raw text data or pre-tokenized text data. The dataloader uses a :class:`MixtureOfDenoisersCollator` to prepare the tokenized examples into training batches. Args: cfg (DictConfig): An omegaconf dictionary used to configure the loader: cfg.name (str): The type of dataloader to build. Must = "text_denoising". --- cfg.dataset.max_seq_len (int): The maximum length of sequences in the batch. See :class:`MixtureOfDenoisersCollator` docstring for details. cfg.dataset.packing_ratio (float, optional): If provided, this invokes a collator wrapper that packs device_batch_size*packing_ratio raw examples into device_batch_size packed examples. This helps minimize padding while preserving sequence integrity. This adds `sequence_id` to the batch, which indicates which unique sequence each token belongs to. Note: Using this feature will not change device_batch_size but it will determine the number of raw examples consumed by the dataloader per batch. Some examples may be discarded if they do not fit when packing. Select packing_ratio **carefully** based on the dataset statistics, max_seq_len, and tolerance for discarding samples! The packing code in `./packing.py` provides a script that can help you choose the best packing_ratio. See :class:`StreamingTextDataset` for info on other standard config options within `cfg.dataset`. --- cfg.mixture_of_denoisers.decoder_only_format (bool): Whether the batches should use the format required for training a decoder-only model (if ``True``) or an encoder-decoder model (if ``False``). cfg.mixture_of_denoisers.span_mean_lengths_and_ratios (optional): The parameters for any span corruption denoising tasks to include in the task mixture. See :class:`MixtureOfDenoisersCollator` docstring for details. cfg.mixture_of_denoisers.sequence_mask_ratios (optional): The parameters for any sequence denoising tasks to include in the task mixture. See :class:`MixtureOfDenoisersCollator` docstring for details. cfg.mixture_of_denoisers.allow_pad_trimming (optional): Whether to allow the collator to trim padding when possible (if ``True``). Defaults to ``False``. cfg.mixture_of_denoisers.prefix_function (optional): Set to ``None`` to disable the UL2-style prefixes that will be automatically added by default. --- See :class:`DataLoader` for standard argument options to the pytorch dataloader, such as `cfg.drop_last`, `cfg.num_workers`, etc. tokenizer (transformers.PreTrainedTokenizer): The tokenizer used to prepare the data from raw text. Any missing sentinel tokens will be added by the collator. device_batch_size (int): The size of the batches (number of examples) that the dataloader will produce. Note: You can run the script inside `./packing.py` to quickly test the padding/waste rates for different `cfg.dataset.packing_ratio` choices, given a starting workload YAML. """ assert cfg.name == 'text_denoising', f'Tried to build_denoising text dataloader with cfg.name={cfg.name}' collate_fn = MixtureOfDenoisersCollator( tokenizer=tokenizer, max_seq_length=cfg.dataset.max_seq_len, decoder_only_format=cfg.mixture_of_denoisers.decoder_only_format, span_mean_lengths_and_ratios=cfg.mixture_of_denoisers.get( 'span_mean_lengths_and_ratios'), sequence_mask_ratios=cfg.mixture_of_denoisers.get( 'sequence_mask_ratios'), allow_pad_trimming=cfg.mixture_of_denoisers.get('allow_pad_trimming', False), prefix_function=cfg.mixture_of_denoisers.get('prefix_function', ul2_prefix_function), context_eos=cfg.mixture_of_denoisers.get('context_eos')) truncate_to = cfg.mixture_of_denoisers.get('truncate_raw_tokens_to') if truncate_to is None: # By default, truncate to the largest max raw length of the denoisers truncate_to = collate_fn.largest_max_raw_length elif isinstance(truncate_to, str): if truncate_to.lower() == 'min': # Truncate to the smallest max raw length of the denoisers truncate_to = collate_fn.smallest_max_raw_length elif truncate_to.lower() == 'max': # Truncate to the largest max raw length of the denoisers truncate_to = collate_fn.largest_max_raw_length else: raise ValueError( f'truncate_raw_tokens_to(="{truncate_to.lower()}") must be "min", "max", a positive int, or None.' ) else: if not isinstance(truncate_to, int): ValueError( f'truncate_raw_tokens_to(={truncate_to}) must be "min", "max", a positive int, or None.' ) if truncate_to < 0: ValueError( f'truncate_raw_tokens_to(={truncate_to}) must be "min", "max", a positive int, or None.' ) dataset = StreamingTextDataset( local=cfg.dataset.local, tokenizer=tokenizer, max_seq_len=truncate_to, remote=cfg.dataset.get('remote'), split=cfg.dataset.get('split'), shuffle=cfg.dataset.get('shuffle', False), predownload=cfg.dataset.get('predownload', 100_000), keep_zip=cfg.dataset.get('keep_zip', False), download_retry=cfg.dataset.get('download_retry', 2), download_timeout=cfg.dataset.get('download_timeout', 60), validate_hash=cfg.dataset.get('validate_hash'), shuffle_seed=cfg.dataset.get('shuffle_seed', 9176), num_canonical_nodes=cfg.dataset.get('num_canonical_nodes', 128), batch_size=device_batch_size, ) if dataset.tokenizer.pad_token is None: # type: ignore dataset.tokenizer.pad_token = dataset.tokenizer.eos_token if cfg.dataset.get('packing_ratio'): n_examples_to_pack = int(device_batch_size * cfg.dataset.packing_ratio) if n_examples_to_pack < device_batch_size: raise ValueError('packing_ratio must be >= 1, if supplied') if not cfg.mixture_of_denoisers.decoder_only_format: raise NotImplementedError( 'On-the-fly packing is currently only supported for decoder-only formats.' ) collate_fn = BinPackWrapper( collator=collate_fn, target_batch_size=device_batch_size, max_seq_len=cfg.dataset.max_seq_len, pad_token_id=dataset.tokenizer.pad_token_id, padding_side=dataset.tokenizer.padding_side, max_leftover_bins_to_keep=cfg.dataset.get( 'max_leftover_bins_to_keep'), ) device_batch_size = n_examples_to_pack elif cfg.dataset.get('max_leftover_bins_to_keep') is not None: raise ValueError( 'cfg.dataset.max_leftover_bins_to_keep has been defined, ' +\ 'but cfg.dataset.packing_ratio has not been set. Please set ' +\ 'the latter to turn on packing or remove the former from the config.') return DataLoader( dataset, collate_fn=collate_fn, batch_size=device_batch_size, drop_last=cfg.drop_last, num_workers=cfg.num_workers, pin_memory=cfg.get('pin_memory', True), prefetch_factor=cfg.get('prefetch_factor', 2), persistent_workers=cfg.get('persistent_workers', False), timeout=cfg.get('timeout', 0), ) def noise_token_sequence( example: Union[torch.Tensor, Mapping[str, Any]], mask_ratio: float, mean_span_length: Optional[float], prefix_tokens: Optional[Sequence[int]], max_raw_length: int, max_seq_length: int, tokenizer: Tokenizer, sentinel_token_ids: np.ndarray, decoder_only_format: bool, context_eos: bool, ) -> Dict[str, torch.Tensor]: """Span corruption applicable to all UL2 denoising tasks.""" # Extract the raw text tokens (trim if we need to) if isinstance(example, torch.Tensor): # If the example is a tensor, assume is the raw tokens with no padding tokens = example length = len(tokens) else: tokens = example['input_ids'] length = sum(example['attention_mask']) if length > max_raw_length: length = max_raw_length if tokenizer.padding_side == 'left': tokens = tokens[-length:] else: tokens = tokens[:length] prefix_tokens = prefix_tokens or [] if length < 1: raise ValueError('Example cannot be empty but token length <1.') # mean_span_length==None is a special case for "sequential" denoising # (where a single span at the end of the sequence is masked) if mean_span_length is None: # This ensures that exactly 1 span will be produced and that # trimming to max_seq_length won't cut off any <EOS> token. # In the decoder-only case, this won't insert new tokens. if mask_ratio <= 0.5: u = np.random.uniform(low=0.0, high=mask_ratio * 2) else: u = np.random.uniform(low=(mask_ratio * 2) - 1, high=1.0) mean_span_length = float(np.round(1 + u * (length - 1))) mask_ratio = mean_span_length / length # type: ignore use_sentinels = False else: use_sentinels = True # Generate the mask # Note: this function can be used for all the UL2 noising functions mask = _sample_mask_array(length, mask_ratio, mean_span_length) # The sequence should always be unmasked at the beginning assert mask[0] == 0 # Generate the input/label sequences given the raw tokens and the mask tokens_inputs = _apply_mask(tokens, mask, use_sentinels, tokenizer.eos_token_id, sentinel_token_ids, ensure_eos=context_eos) tokens_labels = _apply_mask(tokens, 1 - mask, use_sentinels, tokenizer.eos_token_id, sentinel_token_ids, ensure_eos=True) # Tag the inputs with any prefix if prefix_tokens: tokens_inputs = np.concatenate([prefix_tokens, tokens_inputs]) # Trim if necessary if len(tokens_inputs) > max_seq_length: raise ValueError('This should not exceed the max length') if len(tokens_labels) > max_seq_length: raise ValueError('This should not exceed the max length') tokens_inputs = torch.LongTensor(tokens_inputs) tokens_labels = torch.LongTensor(tokens_labels) if decoder_only_format: return _format_tokens_for_decoder_only(tokens_inputs, tokens_labels, max_seq_length, tokenizer.pad_token_id, tokenizer.padding_side) return _format_tokens_for_encoder_decoder(tokens_inputs, tokens_labels, max_seq_length, tokenizer.pad_token_id) def _get_max_starting_length(max_length: int, mask_ratio: float, mean_span_length: float, n_prefix_tokens: int, decoder_only_format: bool, context_eos: bool): """Get max num raw tokens that will fit max_length.""" def sequence_stats(length: int): length = np.maximum(length, 2) num_noise_tokens = int(np.round(mask_ratio * float(length))) num_noise_tokens = np.minimum(np.maximum(num_noise_tokens, 1), length - 1) num_spans = int(np.round(float(num_noise_tokens) / mean_span_length)) num_noise_spans = np.maximum(num_spans, 1) num_nonnoise_tokens = length - num_noise_tokens # Prefix, sentinel, and EOS added to input for Enc-Dec extra_inp_tokens = n_prefix_tokens + num_noise_spans + int(context_eos) # Sentinel and EOS added to target extra_targ_tokens = num_noise_spans + 1 # Sequence totals after corruption total_inp_tokens = num_nonnoise_tokens + extra_inp_tokens total_targ_tokens = num_noise_tokens + extra_targ_tokens return total_inp_tokens, total_targ_tokens def length_fits(length: int) -> bool: total_inp_tokens, total_targ_tokens = sequence_stats(length) if decoder_only_format: return (total_inp_tokens + total_targ_tokens) <= max_length return (total_inp_tokens <= max_length) and (total_targ_tokens <= max_length) # Start with a definitely too-long sequence and reduce until it fits num_raw_tokens = max_length * 2 while num_raw_tokens > 0: if length_fits(num_raw_tokens): return num_raw_tokens num_raw_tokens -= 1 raise ValueError( 'Unable to find a starting sequence length that can fit given the corruption and max_length parameters.' ) def _sample_mask_array(length: int, mask_ratio: float, mean_span_length: float) -> np.ndarray: """Samples a span corruption mask.""" if mask_ratio == 0.0: return np.zeros(length) # This first block computes the number of noise/non-noise spans and the # total tokens in each. Extra steps are taken to handle edge cases that # cause degeneracy. starting_length = length length = np.maximum(length, 2) num_noise_tokens = int(np.round(mask_ratio * float(length))) num_noise_tokens = np.minimum(np.maximum(num_noise_tokens, 1), length - 1) num_spans = int(np.round(float(num_noise_tokens) / mean_span_length)) num_noise_spans = np.maximum(num_spans, 1) num_nonnoise_tokens = length - num_noise_tokens # Sample the noise/non-noise span lengths and interleave them to # generate the mask array. # Note: We always start with a non-noise span. def _sample_span_lengths(total_tokens: int, num_spans: int) -> np.ndarray: """Samples lengths of num_spans segments. Note: the combined length of segments equals total_tokens. """ span_markers = np.less(np.arange(total_tokens - 1), num_spans - 1)[np.random.permutation(total_tokens - 1)] span_start_indicator = np.concatenate([[0], span_markers]) span_id = np.cumsum(span_start_indicator).reshape(-1, 1) spans = np.arange(num_spans).reshape(1, -1) span_lengths = np.sum(span_id == spans, axis=0) return span_lengths noise_span_lengths = _sample_span_lengths(num_noise_tokens, num_noise_spans) nonnoise_span_lengths = _sample_span_lengths(num_nonnoise_tokens, num_noise_spans) interleaved_span_lengths = np.reshape( np.stack([nonnoise_span_lengths, noise_span_lengths], axis=1), [num_noise_spans * 2]) span_starts = np.cumsum(interleaved_span_lengths)[:-1] span_start_indicator = np.zeros(length) span_start_indicator[span_starts] = 1 span_id = np.cumsum(span_start_indicator) is_noise = np.equal(np.mod(span_id, 2), 1) mask = is_noise[:starting_length] return mask def _apply_mask(tokens: Union[torch.Tensor, Sequence[int], np.ndarray], mask: np.ndarray, use_sentinels: bool, eos_token_id: int, sentinel_token_ids: np.ndarray, ensure_eos: bool = True) -> np.ndarray: """Remove or replace masked portions from token sequence.""" if not use_sentinels: # The logic is simple if we don't use sentinel tokens noised_tokens = np.array(tokens)[np.logical_not(mask)] # Ensure there's an end-of-sentence token at the end if ensure_eos and (noised_tokens[-1] != eos_token_id): noised_tokens = np.concatenate([noised_tokens, [eos_token_id]]) return noised_tokens # Masking at previous token prev_token_mask = np.concatenate([[0], mask[:-1]]) # Decompose mask into start-of-span mask and non-start-of-span mask start_of_noise_span_token = np.logical_and(mask, np.logical_not(prev_token_mask)) nonstart_noise_span_token = np.logical_and(mask, prev_token_mask) # Replace tokens at the start of each noise span with its corresponding # sentinel token sentinel_idx = np.minimum(len(sentinel_token_ids), np.cumsum(start_of_noise_span_token)) - 1 tokens = np.where(start_of_noise_span_token, sentinel_token_ids[sentinel_idx], tokens) # Remove masked tokens (but preserving the sentinel tokens) noised_tokens = tokens[np.logical_not(nonstart_noise_span_token)] # Ensure there's an end-of-sentence token at the end if ensure_eos and (noised_tokens[-1] != eos_token_id): noised_tokens = np.concatenate([noised_tokens, [eos_token_id]]) return noised_tokens def _format_tokens_for_encoder_decoder( tokens_inputs: torch.LongTensor, tokens_labels: torch.LongTensor, max_seq_length: int, pad_token_id: int, ) -> Dict[str, torch.Tensor]: """Package the input/label sequence for an EncDec model.""" example = {} # Re-populate with an empty, padded example example['input_ids'] = torch.full((max_seq_length,), pad_token_id, dtype=torch.int32) example['labels'] = torch.full((max_seq_length,), _HF_IGNORE_INDEX, dtype=torch.int32) example['attention_mask'] = torch.zeros_like(example['input_ids']) example['decoder_attention_mask'] = torch.zeros_like(example['labels']) # Fill in with processed results (Note: EncDec format is right-padded) example['input_ids'][:len(tokens_inputs)] = tokens_inputs example['labels'][:len(tokens_labels)] = tokens_labels example['attention_mask'][:len(tokens_inputs)] = 1 example['decoder_attention_mask'][:len(tokens_labels)] = 1 # Best practice is to include decoder_input_ids (= right-shifted labels) example['decoder_input_ids'] = torch.full_like(example['labels'], pad_token_id) example['decoder_input_ids'][1:len(tokens_labels)] = tokens_labels[:-1] return example def _format_tokens_for_decoder_only( tokens_inputs: torch.LongTensor, tokens_labels: torch.LongTensor, max_seq_length: int, pad_token_id: int, padding_side: str, ) -> Dict[str, torch.Tensor]: """Package the input/label sequence for an decoder-only model.""" example = {} # Re-populate with an empty, padded example example['input_ids'] = torch.full((max_seq_length,), pad_token_id, dtype=torch.int32) example['labels'] = torch.full((max_seq_length,), _HF_IGNORE_INDEX, dtype=torch.int32) example['attention_mask'] = torch.full((max_seq_length,), 0, dtype=torch.bool) example['bidirectional_mask'] = torch.full((max_seq_length,), 0, dtype=torch.bool) n_input = len(tokens_inputs) n_label = len(tokens_labels) n_concat = n_input + n_label assert n_concat <= max_seq_length, f'{n_concat=}, {n_input=}, {n_label=}' tokens_concat = torch.concat([tokens_inputs, tokens_labels], dim=0) # Fill in with the processed results if padding_side == 'left': example['input_ids'][-n_concat:] = tokens_concat # `labels` copies `input_ids` but with -100 at # non-loss-generating tokens. `labels` will be shifted in the # model code when computing loss. example['labels'][-n_concat:] = tokens_concat example['labels'][-n_concat:-n_label] = _HF_IGNORE_INDEX example['attention_mask'][-n_concat:] = 1 example['bidirectional_mask'][-n_concat:-n_label] = 1 else: example['input_ids'][:n_concat] = tokens_concat # See above comment regarding `labels` example['labels'][:n_concat] = tokens_concat example['labels'][:n_input] = _HF_IGNORE_INDEX example['attention_mask'][:n_concat] = 1 example['bidirectional_mask'][:n_input] = 1 return example # Helpful to test if your dataloader is working locally # Run `python denoising.py [local] [remote, optional]` and verify that batches # are printed out if __name__ == '__main__': from llmfoundry.utils.builders import build_tokenizer local = sys.argv[1] if len(sys.argv) > 2: remote = sys.argv[2] else: remote = local print(f'Reading val split from {remote} -> {local}') decoder_only = True cfg = { 'name': 'text_denoising', 'dataset': { 'local': local, 'remote': remote, 'split': 'val', # 'val_small', 'shuffle': False, 'max_seq_len': 2048 if decoder_only else 1024, 'packing_ratio': 4.5, 'predownload': 1000, 'keep_zip': True, # in case we need compressed files after testing }, 'mixture_of_denoisers': { 'decoder_only_format': decoder_only, 'span_mean_lengths_and_ratios': [[3, .15], [8, .5]], 'sequence_mask_ratios': 0.25, }, 'drop_last': False, 'num_workers': 0, } cfg = om.create(cfg) device_batch_size = 2 tokenizer_cfg = { 'name': 'EleutherAI/gpt-neox-20b' if decoder_only else 't5-base', 'kwargs': {} } tokenizer_cfg['kwargs'] = {'model_max_length': cfg.dataset.max_seq_len} tokenizer_cfg = om.create(tokenizer_cfg) tokenizer = build_tokenizer(tokenizer_cfg) loader = build_text_denoising_dataloader(cfg, tokenizer, device_batch_size) print( f'\n\nTRUNCATING TO: {loader.dataset.max_seq_len}\n\n') # type: ignore packing = cfg.dataset.get('packing_ratio') is not None if packing: tokenizer = loader.collate_fn.base_collator.tokenizer else: tokenizer = loader.collate_fn.tokenizer batch_ix = 0 for batch in loader: if batch_ix >= 50: batch_ix += 1 break if batch_ix >= 5: if not packing: break batch_ix += 1 continue print('\n') print('#' * 20, f'Batch {batch_ix}', '#' * 20) for k, v in batch.items(): print(k, v.shape, v.dtype) for sample_ix, token_sample in enumerate(batch['input_ids']): if cfg.mixture_of_denoisers.decoder_only_format: labels = batch['labels'][sample_ix] attn_inputs = batch['bidirectional_mask'][sample_ix].to( torch.bool) attn_full = batch['attention_mask'][sample_ix].to(torch.bool) attn_labels = torch.logical_xor(attn_inputs, attn_full) print('-' * 20, f' Sample {sample_ix} ', '-' * 20) if packing: for subseq in range( int(batch['sequence_id'][sample_ix].max()) + 1): is_subseq = batch['sequence_id'][sample_ix] == subseq print( '\033[93m{}\033[00m\n'.format('Input: '), tokenizer.decode(token_sample[torch.logical_and( is_subseq, attn_inputs)])) print( '\033[92m{}\033[00m\n'.format('Target: '), tokenizer.decode(labels[torch.logical_and( is_subseq, attn_labels)])) else: print('\033[91m{}\033[00m\n'.format('Full: '), tokenizer.decode(token_sample[attn_full])) print('\033[93m{}\033[00m\n'.format('Input: '), tokenizer.decode(token_sample[attn_inputs])) print('\033[92m{}\033[00m\n'.format('Target: '), tokenizer.decode(labels[attn_labels])) else: labels = batch['labels'][sample_ix] attn_inputs = batch['attention_mask'][sample_ix].to(torch.bool) attn_labels = batch['decoder_attention_mask'][sample_ix].to( torch.bool) print('-' * 20, f' Sample {sample_ix} ', '-' * 20) print('\033[93m{}\033[00m\n'.format('Input: '), tokenizer.decode(token_sample[attn_inputs])) print('\033[92m{}\033[00m\n'.format('Target: '), tokenizer.decode(labels[attn_labels])) batch_ix += 1 if packing: print(f'Padding = {100*(1-loader.collate_fn.efficiency):5.2f}%') print(f'Waste = {100*loader.collate_fn.waste:5.2f}%')
EXA-1-master
exa/libraries/llm-foundry/llmfoundry/data/denoising.py
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 """Datasets for converting to MDS Shards.""" import os import warnings from typing import Dict, Iterable, Union import datasets as hf_datasets import numpy as np from torch.utils.data import IterableDataset from transformers import PreTrainedTokenizerBase class NoConcatDataset(IterableDataset): """An IterableDataset that returns text samples for MDSWriter. Returns dicts of {'text': bytes} """ def __init__(self, hf_dataset: Union[hf_datasets.IterableDataset, hf_datasets.Dataset]): self.hf_dataset = hf_dataset def __iter__(self) -> Iterable[Dict[str, bytes]]: for sample in self.hf_dataset: # print(sample) # convert to bytes to store in MDS binary format yield {'text': sample['text'].encode('utf-8')} class ConcatTokensDataset(IterableDataset): """An IterableDataset that returns token samples for MDSWriter. Returns dicts of {'tokens': bytes} To use data created by this class and written to MDS format: ```python import torch from streaming.base import StreamingDataset from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained('your/tokenizer') ds = StreamingDataset(local='mds-data-folder', split='val') # note, you need to copy the numpy array because the original is non-writeable # and torch does not support non-writeable tensors, so you get a scary warning and # if you do try to write to the tensor you get undefined behavior tokens = torch.from_numpy(np.frombuffer(ds[0]['tokens'], dtype=np.int64).copy()) print(tokenizer.decode(tokens)) ``` """ def __init__( self, hf_dataset: Union[hf_datasets.IterableDataset, hf_datasets.Dataset], tokenizer: PreTrainedTokenizerBase, max_length: int, bos_text: str, eos_text: str, no_wrap: bool, ): self.hf_dataset = hf_dataset self.tokenizer = tokenizer os.environ['TOKENIZERS_PARALLELISM'] = 'false' self.max_length = max_length self.bos_text = bos_text self.eos_text = eos_text self.should_wrap = not no_wrap self.bos_tokens = self.tokenizer(self.bos_text, truncation=False, padding=False, add_special_tokens=False)['input_ids'] if len(self.bos_tokens) > 1: warnings.warn( f'You specified --concat_tokens with --bos_text, but your BOS text is not tokenizing to one token\ , instead we got {self.bos_tokens}. Quit if this was in error.') self.eos_tokens = self.tokenizer(self.eos_text, truncation=False, padding=False, add_special_tokens=False)['input_ids'] if len(self.eos_tokens) > 1: warnings.warn( f'You specified --concat_tokens with --eos_text, but your EOS text is not tokenizing to one token\ , instead we got {self.eos_tokens}. Quit if this was in error.') eos_text_provided = self.eos_text != '' bos_text_provided = self.bos_text != '' test_text = self.tokenizer('') if len(test_text['input_ids']) > 0 and (eos_text_provided or bos_text_provided): message = 'both eos and bos' if eos_text_provided and bos_text_provided else ( 'eos_text' if eos_text_provided else 'bos_text') warnings.warn( f'The provided tokenizer adds special tokens, but you also specified {message}. This may result ' 'in duplicated special tokens. Please be sure this is what you intend.' ) def __iter__(self) -> Iterable[Dict[str, bytes]]: buffer = [] for sample in self.hf_dataset: encoded = self.tokenizer(sample['text'], truncation=False, padding=False) iids = encoded['input_ids'] buffer = buffer + self.bos_tokens + iids + self.eos_tokens while len(buffer) >= self.max_length: concat_sample = buffer[:self.max_length] buffer = buffer[self.max_length:] if self.should_wrap else [] yield { # convert to bytes to store in MDS binary format 'tokens': np.asarray(concat_sample).tobytes() }
EXA-1-master
exa/libraries/llm-foundry/llmfoundry/data/datasets.py
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 from llmfoundry.data.datasets import ConcatTokensDataset, NoConcatDataset from llmfoundry.data.denoising import (MixtureOfDenoisersCollator, build_text_denoising_dataloader) from llmfoundry.data.finetuning import (Seq2SeqFinetuningCollator, build_finetuning_dataloader) from llmfoundry.data.text_data import (StreamingTextDataset, build_text_dataloader) __all__ = [ 'MixtureOfDenoisersCollator', 'build_text_denoising_dataloader', 'Seq2SeqFinetuningCollator', 'build_finetuning_dataloader', 'StreamingTextDataset', 'build_text_dataloader', 'NoConcatDataset', 'ConcatTokensDataset', ]
EXA-1-master
exa/libraries/llm-foundry/llmfoundry/data/__init__.py
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 import os from typing import Callable, Dict, List, Literal, Optional, Tuple import numpy as np import torch class BinPackWrapper: """Utility collator for packing to reduce padding.""" def __init__(self, collator: Callable, target_batch_size: int, max_seq_len: int, pad_token_id: int, padding_side: Literal['left', 'right'], max_leftover_bins_to_keep: Optional[int] = None): self.base_collator = collator self.out_size = int(target_batch_size) self.max_seq_len = int(max_seq_len) self.pad_token_id = int(pad_token_id) self.padding_side = padding_side if self.out_size <= 0: raise ValueError(f'{target_batch_size=} must be >0.') if self.max_seq_len <= 0: raise ValueError(f'{max_seq_len=} must be >0.') if self.pad_token_id < 0: raise ValueError(f'{pad_token_id=} must be >=0.') if max_leftover_bins_to_keep is None: self.max_leftover_bins_to_keep = int(10 * self.out_size) elif max_leftover_bins_to_keep < 0: raise ValueError( f'{max_leftover_bins_to_keep=} must be >=0 or None.') else: self.max_leftover_bins_to_keep = int(max_leftover_bins_to_keep) self.n_packed_tokens = 0 self.n_total_tokens = 0 self.n_packed_examples = 0 self._leftover_bins: List[Tuple[int, Dict[str, torch.Tensor]]] = [] @property def waste(self): return 1 - (self.n_packed_tokens / self.n_total_tokens) @property def efficiency(self): return self.n_packed_tokens / (self.max_seq_len * self.n_packed_examples) def __call__( self, examples: List[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]: batch = self.base_collator(examples) assert 'attention_mask' in batch assert 'input_ids' in batch for key in batch.keys(): assert key in [ 'input_ids', 'labels', 'attention_mask', 'bidirectional_mask', ] # Cut everything down to size sizes, trimmed_examples = [], [] for idx in range(batch['attention_mask'].shape[0]): size, trimmed_example = extract_trim_batch_idx(batch, idx) sizes.append(size) trimmed_examples.append(trimmed_example) # Apply our CS 101 bin packing algorithm. packed_examples, n_packed_tokens, n_total_tokens, leftover_bins = first_fit_bin_packing( sizes=sizes, examples=trimmed_examples, num_bins=self.out_size, max_bin_size=self.max_seq_len, existing_bins=self._leftover_bins, ) self.n_packed_tokens += n_packed_tokens self.n_total_tokens += n_total_tokens self.n_packed_examples += self.out_size self._leftover_bins = leftover_bins[:self.max_leftover_bins_to_keep] # Re-pad to max_seq_len and batch batch = repad(packed_examples, max_seq_len=self.max_seq_len, pad_token_id=self.pad_token_id, padding_side=self.padding_side) return batch def extract_trim_batch_idx(batch: Dict[str, torch.Tensor], idx: int): example = {k: v[idx] for k, v in batch.items()} keep = example['attention_mask'] == 1 size = int(keep.sum()) trim_example = {k: v[keep] for k, v in example.items()} trim_example['sequence_id'] = torch.zeros_like(trim_example['input_ids']) return size, trim_example def combine_in_place(example: Dict[str, torch.Tensor], add_on: Dict[str, torch.Tensor]): if 'labels' in add_on: # Prevents the last token in example from being trained to # predict the first token in add_on, which would make no sense. add_on['labels'][0] = -100 for k in example.keys(): if k == 'sequence_id': example[k] = torch.cat( [example[k], add_on[k] + 1 + torch.max(example[k])]) else: example[k] = torch.cat([example[k], add_on[k]]) return example def first_fit_bin_packing( sizes: List[int], examples: List[Dict[str, torch.Tensor]], num_bins: int, max_bin_size: int, existing_bins: List[Tuple[int, Dict[str, torch.Tensor]]] ) -> Tuple[List[Dict[str, torch.Tensor]], int, int, List[Tuple[int, Dict[ str, torch.Tensor]]]]: # Will contain tuples (bin_size_size, packed_example) bins: List[Tuple[int, Dict[str, torch.Tensor]]] = existing_bins starting_total_bin_sizes = sum([bin_size for bin_size, _ in bins]) sizes_and_examples = [ (size, example) for size, example in zip(sizes, examples) ] sorted_sizes_and_examples = sorted(sizes_and_examples, key=lambda x: x[0], reverse=True) required_num_examples = max(0, num_bins - len(bins)) num_examples = len(sizes) if num_examples < required_num_examples: for size, example in sorted_sizes_and_examples: # Can't keep packing. All remaining items get their own bin. bins.append((size, example)) total_bin_sizes = sum([bin_size for bin_size, _ in bins]) total_new_bin_sizes = total_bin_sizes - starting_total_bin_sizes total_example_sizes = sum(sizes) if total_new_bin_sizes != total_example_sizes: raise AssertionError( f'Error in packing. {total_example_sizes=} does not equal {total_new_bin_sizes=}.' ) sorted_bins = sorted(bins, key=lambda x: x[0], reverse=True) bin_sizes, packed_examples = [], [] for bin_size, packed_example in sorted_bins: bin_sizes.append(bin_size) packed_examples.append(packed_example) # Return: # - the num_bins largest packed examples # - the total tokens in those examples # - the total size of all new examples # - leftover bins return packed_examples[:num_bins], sum( bin_sizes[:num_bins]), sum(sizes), sorted_bins[num_bins:] # Go through each item from longest to shortest. # Note: all items will either go into an existing or new bin. for i, (size, example) in enumerate(sorted_sizes_and_examples): # If we can't keep packing, all remaining items get their own bin. required_num_examples = max(0, num_bins - len(bins)) n_remaining = num_examples - i assert n_remaining >= required_num_examples if n_remaining == required_num_examples: # Can't keep packing. All remaining items get their own bin. bins.append((size, example)) continue # Add it to the first bin it fits in added = False for bidx in range(len(bins)): if bins[bidx][0] + size <= max_bin_size: bin_size, packed_example = bins.pop(bidx) bin_size = bin_size + size packed_example = combine_in_place(packed_example, example) bins.append((bin_size, packed_example)) added = True break # If it didn't fit anywhere, open a new bin if not added: bins.append((size, example)) total_bin_sizes = sum([bin_size for bin_size, _ in bins]) total_new_bin_sizes = total_bin_sizes - starting_total_bin_sizes total_example_sizes = sum(sizes) if total_new_bin_sizes != total_example_sizes: raise AssertionError( f'Error in packing. {total_example_sizes=} does not equal {total_new_bin_sizes=}.' ) sorted_bins = sorted(bins, key=lambda x: x[0], reverse=True) bin_sizes, packed_examples = [], [] for bin_size, packed_example in sorted_bins: bin_sizes.append(bin_size) packed_examples.append(packed_example) # Return: # - the num_bins largest packed examples # - the total tokens in those examples # - the total size of all new examples # - leftover bins return packed_examples[:num_bins], sum( bin_sizes[:num_bins]), sum(sizes), sorted_bins[num_bins:] def repad(packed_examples: List[Dict[str, torch.Tensor]], max_seq_len: int, pad_token_id: int, padding_side: str) -> Dict[str, torch.Tensor]: def pad_tensor(tensor: torch.Tensor, pad_value: int): if len(tensor) == max_seq_len: return tensor t = torch.full((max_seq_len,), pad_value, dtype=tensor.dtype, device=tensor.device) if padding_side == 'left': t[-len(tensor):] = tensor elif padding_side == 'right': t[:len(tensor)] = tensor else: raise ValueError(f'Unknown {padding_side=}') return t pad_vals = { 'input_ids': pad_token_id, 'labels': -100, 'attention_mask': 0, 'bidirectional_mask': 0, 'sequence_id': -1, } keys = packed_examples[0].keys() batch = {} for key in keys: batch[key] = torch.stack([ pad_tensor(example[key], pad_vals[key]) for example in packed_examples ]) return batch if __name__ == '__main__': from argparse import ArgumentParser, Namespace from omegaconf import OmegaConf as om from llmfoundry import (build_finetuning_dataloader, build_text_denoising_dataloader) from llmfoundry.data import build_text_dataloader from llmfoundry.utils import build_tokenizer def parse_args() -> Namespace: """Parse commandline arguments.""" parser = ArgumentParser( description= 'Profile packing_ratio choices for a particular workload.') parser.add_argument( '--yaml-path', type=str, required=True, help='Path to the YAML that defines the workload to profile.') parser.add_argument('--num-devices', type=int, default=None, help='How many devices your run will use.') parser.add_argument('--min', type=float, required=True, help='Smallest packing_ratio to test. Must be >=1.') parser.add_argument( '--max', type=float, required=True, help='Largest packing_ratio to test. Must be larger than `min`.') parser.add_argument( '--num-packing-ratios', type=int, default=10, help= 'Number of packing_ratio values (spaced between `min` and `max) to try.' ) args = parser.parse_args() if not os.path.isfile(args.yaml_path): raise FileNotFoundError( '`yaml_path` does not correspond to any existing file.') if args.num_devices < 1: raise ValueError('`num_devices` must be a positive integer.') if args.min < 1.0: raise ValueError('`min` must be >=1.0.') if args.max < args.min: raise ValueError('`max` cannot be less than `min`.') if args.num_packing_ratios < 1: raise ValueError('`num_packing_ratios` must be a positive integer.') return args def build_dataloader(cfg, tokenizer, device_batch_size): if cfg.name == 'text': return build_text_dataloader(cfg, tokenizer, device_batch_size) elif cfg.name == 'text_denoising': return build_text_denoising_dataloader(cfg, tokenizer, device_batch_size) elif cfg.name == 'finetuning': return build_finetuning_dataloader(cfg, tokenizer, device_batch_size) else: raise ValueError( f'Not sure how to build dataloader with config: {cfg}') args = parse_args() with open(args.yaml_path) as f: cfg = om.load(f) if 'parameters' in cfg: cfg = om.to_container(cfg.parameters) cfg = om.create(cfg) device_batch_size = cfg.global_train_batch_size // args.num_devices # Determine the packing_ratio values we'll try packing_ratios, raw_batch_sizes = [], [] for packing_ratio in np.linspace(args.min, args.max, args.num_packing_ratios, endpoint=True): packing_ratio = np.round(10 * packing_ratio) / 10 raw_batch_size = int(packing_ratio * device_batch_size) if raw_batch_size not in raw_batch_sizes: packing_ratios.append(packing_ratio) raw_batch_sizes.append(raw_batch_size) # Fetch a bunch of raw examples once, which we'll re-use if 'train_loader' not in cfg: raise ValueError('config must define train_loader') dataloader_cfg = cfg.train_loader max_leftovers_to_keep = dataloader_cfg.dataset.get('max_leftovers_to_keep', None) # build tokenizer if 'tokenizer' not in cfg: raise ValueError('config must define tokenizer') tokenizer = build_tokenizer(cfg.tokenizer) # Turn off packing for the dataloader (we want raw, pre-packed examples) dataloader_cfg.dataset.packing_ratio = None dataloader_cfg.dataset.max_leftovers_to_keep = None train_dataloader = build_dataloader(dataloader_cfg, tokenizer, max(raw_batch_sizes) * 100) # Get a bunch of raw examples big_batch = next(iter(train_dataloader)) def split_big_batch(raw_batch_size: int) -> List: input_ids = big_batch['input_ids'].split(raw_batch_size) batches = [{'input_ids': x} for x in input_ids] for key in big_batch.keys(): if key == 'input_ids': continue for idx, split in enumerate(big_batch[key].split(raw_batch_size)): batches[idx].update({key: split}) return batches def profile_packing(raw_batch_size: int) -> Tuple[float, float]: packer = BinPackWrapper( collator=lambda x: x, target_batch_size=device_batch_size, max_seq_len=dataloader_cfg.dataset.max_seq_len, pad_token_id=0, # <-- Doesn't need to be correct for profiling padding_side='left', # <-- Doesn't need to be correct for profiling max_leftover_bins_to_keep=max_leftovers_to_keep) # Simulate feeding the packing collator a bunch of data for batch in split_big_batch(raw_batch_size): if batch['input_ids'].shape[0] < device_batch_size: continue _ = packer(batch) # Return the padding / waste stats over that bunch of data padding_percent = 100 * (1 - packer.efficiency) waste_percent = 100 * packer.waste return padding_percent, waste_percent header = '\n\n\n packing_ratio | % PADDING | % WASTE' fstr = ' {:5.1f} | {:5.2f}% | {:6.2f}%' print(header) print('-' * len(header)) for packing_ratio, raw_batch_size in zip(packing_ratios, raw_batch_sizes): padding, waste = profile_packing(raw_batch_size) print(fstr.format(packing_ratio, padding, waste))
EXA-1-master
exa/libraries/llm-foundry/llmfoundry/data/packing.py
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 """Build a StreamingTextDataset dataset and dataloader for training.""" import os from itertools import islice from typing import Any, Callable, Dict, List, Optional, Sequence, Union import numpy as np import torch import transformers from omegaconf import DictConfig from omegaconf import OmegaConf as om from streaming import Stream, StreamingDataset from torch.utils.data import DataLoader from transformers import PreTrainedTokenizer, PreTrainedTokenizerFast Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast] class StreamingTextDataset(StreamingDataset): """Generic text dataset using MosaicML's StreamingDataset. Args: tokenizer (Tokenizer): HuggingFace tokenizer to tokenize samples. max_seq_len (int): The max sequence length of each sample. streams (Sequence[Stream], optional): One or more Streams to stream/cache samples from, which may be upsampled or downsampled. StreamingDataset uses either ``streams`` or ``remote``/``local``. Defaults to ``None``. remote (str, optional): Remote path or directory to download the dataset from. If ``None``, its data must exist locally. StreamingDataset uses either ``streams`` or ``remote``/``local``. Defaults to ``None``. local (str, optional): Local working directory to download shards to. This is where shards are cached while they are being used. Uses a temp directory if not set. StreamingDataset uses either ``streams`` or ``remote``/``local``. Defaults to ``None``. split (str, optional): Which dataset split to use, if any. If provided, we stream from/to the ``split`` subdirs of ``remote`` and ``local``. Defaults to ``None``. download_retry (int): Number of download re-attempts before giving up. Defaults to ``2``. download_timeout (float): Number of seconds to wait for a shard to download before raising an exception. Defaults to ``60``. validate_hash (str, optional): Optional hash or checksum algorithm to use to validate shards. Defaults to ``None``. keep_zip (bool): Whether to keep or delete the compressed form when decompressing downloaded shards. If ``False``, keep iff remote is local or no remote. Defaults to `False``. keep_raw (bool): Whether to keep or delete the decompressed form (or only form) of shards after all their samples have been yielded this epoch. If ``False``, keep iff remote is local or no remote and no compression. Defaults to ``True``. samples_per_epoch (int, optional): Provide this field iff you are weighting sub-datasets proportionally. Defaults to ``None``. predownload (int, optional): Target number of samples ahead to download the shards of while iterating. Defaults to ``100_000``. partition_algo (str): Which partitioning algorithm to use. Defaults to ``orig``. num_canonical_nodes (int, optional): Canonical number of nodes for shuffling with resumption. Defaults to ``None``, which is interpreted as the number of nodes of the initial run. batch_size (int, optional): Batch size of its DataLoader, which affects how the dataset is partitioned over the workers. Defaults to ``None``. shuffle (bool): Whether to iterate over the samples in randomized order. Defaults to ``False``. shuffle_algo (str): Which shuffling algorithm to use. Defaults to ``py1s``. shuffle_seed (int): Seed for Deterministic data shuffling. Defaults to ``9176``. """ def __init__(self, tokenizer: Tokenizer, max_seq_len: int, streams: Optional[Sequence[Stream]] = None, remote: Optional[str] = None, local: Optional[str] = None, split: Optional[str] = None, download_retry: int = 2, download_timeout: float = 60, validate_hash: Optional[str] = None, keep_zip: bool = False, keep_raw: bool = True, samples_per_epoch: Optional[int] = None, predownload: int = 100_000, partition_algo: str = 'orig', num_canonical_nodes: Optional[int] = None, batch_size: Optional[int] = None, shuffle: bool = False, shuffle_algo: str = 'py1s', shuffle_seed: int = 9176, **kwargs: Dict[str, Any]): group_method = kwargs.pop('group_method', None) if group_method is not None: raise NotImplementedError( 'group_method is deprecated and has been removed.\nTo ' + 'concatenate, use the --concat_tokens ' + 'argument when creating your MDS dataset with concat_c4.py') if kwargs is not None and len(kwargs) > 0: raise ValueError( f'StreamingTextDataset() got an unexpected keyword argument: {kwargs}' ) if local is not None and (remote is None or (local == remote)): if os.path.isdir(local): contents = set(os.listdir(local)) if split not in contents: raise ValueError( f'local directory {local} does not contain split {split}' ) # Build Dataset super().__init__( streams=streams, remote=remote, local=local, split=split, download_retry=download_retry, download_timeout=download_timeout, validate_hash=validate_hash, keep_zip=keep_zip, keep_raw=keep_raw, samples_per_epoch=samples_per_epoch, predownload=predownload, partition_algo=partition_algo, num_canonical_nodes=num_canonical_nodes, batch_size=batch_size, shuffle=shuffle, shuffle_algo=shuffle_algo, shuffle_seed=shuffle_seed, ) self.tokenizer = tokenizer self.max_seq_len = max_seq_len # How to tokenize a text sample to a token sample def _tokenize(self, text_sample): if self.tokenizer._pad_token is None: # Some tokenizers (e.g. GPT2 tokenizer) have no padding token which causes bugs raise RuntimeError( 'If tokenizing on-the-fly, tokenizer must have a pad_token_id') return self.tokenizer(text_sample['text'], truncation=True, padding='max_length', max_length=self.max_seq_len) def _read_binary_tokenized_sample(self, sample): return torch.from_numpy( np.frombuffer(sample['tokens'], dtype=np.int64)[:self.max_seq_len].copy()) # How to process a sample def __getitem__(self, idx: int): sample = super().__getitem__(idx) if 'text' in sample: token_sample = self._tokenize(sample) elif 'tokens' in sample: token_sample = self._read_binary_tokenized_sample(sample) else: raise RuntimeError( 'StreamingTextDataset needs samples to have a `text` or `tokens` column' ) return token_sample class ConcatenatedSequenceCollatorWrapper: """Collator wrapper to add sequence_id to batch.""" def __init__( self, base_collator: Callable, eos_token_id=None, bos_token_id=None, ): self.base_collator = base_collator if (eos_token_id is None) and (bos_token_id is None): raise ValueError( 'Must supply a value for either eos_token_id or bos_token_id, but got None for both.' ) if (eos_token_id is not None) and (bos_token_id is not None): raise ValueError( 'Cannot use *both* EOS and BOS tokens for detecting sequence boundaries. ' +\ 'Please supply `eos_token_id` if sequences end with an EOS token, or use ' +\ '`bos_token_id` if sequences start with a BOS token.' ) self.split_token_id = eos_token_id self.bos_mode = False if eos_token_id is None: self.split_token_id = bos_token_id self.bos_mode = True def __call__(self, examples: List[Any]) -> Dict[str, torch.Tensor]: batch = self.base_collator(examples) batch['sequence_id'] = self.get_sequence_id_from_batch(batch) return batch def get_sequence_id_from_batch( self, batch: Dict[str, torch.Tensor]) -> torch.Tensor: is_separator = torch.eq(batch['input_ids'], self.split_token_id) # type: ignore cumulative_sep = torch.cumsum(is_separator, dim=1).to(batch['input_ids'].dtype) # If separator token is bos, we're already done if self.bos_mode: return cumulative_sep # If separator token is eos, right shift 1 space left_zeros = cumulative_sep.new_zeros((cumulative_sep.shape[0], 1)) return torch.cat([left_zeros, cumulative_sep[:, :-1]], dim=1) def build_text_dataloader( cfg: DictConfig, tokenizer: Tokenizer, device_batch_size: int, ): assert cfg.name == 'text', f'Tried to build text dataloader with cfg.name={cfg.name}' if cfg.dataset.get('group_method', None) is not None: raise NotImplementedError( 'group_method is deprecated and has been removed.\nTo ' + 'concatenate, use the --concat_tokens ' + 'argument when creating your MDS dataset with convert_dataset_hf.py' ) # build streams streams_dict = cfg.dataset.get('streams', None) streams = None if streams_dict is not None: streams = [] for _, stream in streams_dict.items(): streams.append( Stream( remote=stream.get('remote', None) or cfg.dataset.get('remote', None), local=stream.get('local', None) or cfg.dataset.get('local', None), split=stream.get('split', None) or cfg.dataset.get('split', None), proportion=stream.get('proportion', None), repeat=stream.get('repeat', None), samples=stream.get('samples', None), download_retry=stream.get('download_retry', None) or cfg.dataset.get('download_retry', 2), download_timeout=stream.get('download_timeout', None) or cfg.dataset.get('download_timeout', 60), validate_hash=stream.get('validate_hash', None) or cfg.dataset.get('validate_hash', None), keep_zip=stream.get('keep_zip', None) or cfg.dataset.get('keep_zip', False), keep_raw=stream.get('keep_raw', None) or cfg.dataset.get('keep_raw', True), )) # build dataset potentially with streams dataset = StreamingTextDataset( tokenizer=tokenizer, max_seq_len=cfg.dataset.max_seq_len, streams=streams, remote=cfg.dataset.get('remote', None), local=cfg.dataset.get('local', None), split=cfg.dataset.get('split', None), download_retry=cfg.dataset.get('download_retry', 2), download_timeout=cfg.dataset.get('download_timeout', 60), validate_hash=cfg.dataset.get('validate_hash', None), keep_zip=cfg.dataset.get('keep_zip', False), keep_raw=cfg.dataset.get('keep_raw', True), samples_per_epoch=cfg.dataset.get('samples_per_epoch', None), predownload=cfg.dataset.get('predownload', 100_000), partition_algo=cfg.dataset.get('partition_algo', 'orig'), num_canonical_nodes=cfg.dataset.get('num_canonical_nodes', 128), batch_size=device_batch_size, shuffle=cfg.dataset.get('shuffle', False), shuffle_algo=cfg.dataset.get('shuffle_algo', 'py1s'), shuffle_seed=cfg.dataset.get('shuffle_seed', 9176), ) mlm_probability = cfg.dataset.get('mlm_probability', None) collate_fn = transformers.DataCollatorForLanguageModeling( tokenizer=dataset.tokenizer, mlm=mlm_probability is not None, mlm_probability=mlm_probability) eos_token_id = cfg.dataset.get('eos_token_id') bos_token_id = cfg.dataset.get('bos_token_id') if (eos_token_id is not None) or (bos_token_id is not None): # Note: Will raise an error if both are non-None collate_fn = ConcatenatedSequenceCollatorWrapper( base_collator=collate_fn, eos_token_id=eos_token_id, bos_token_id=bos_token_id) return DataLoader( dataset, collate_fn=collate_fn, batch_size=device_batch_size, drop_last=cfg.drop_last, num_workers=cfg.num_workers, pin_memory=cfg.get('pin_memory', True), prefetch_factor=cfg.get('prefetch_factor', 2), persistent_workers=cfg.get('persistent_workers', True), timeout=cfg.get('timeout', 0), ) # Helpful to test if your dataloader is working locally # Run `python data.py --local_path [local] [--remote_path remote, optional]` and verify that batches are printed out if __name__ == '__main__': import argparse from llmfoundry.utils.builders import build_tokenizer parser = argparse.ArgumentParser() parser.add_argument('--tokenizer', type=str, default='EleutherAI/gpt-neox-20b', help='the name of the tokenizer to use') parser.add_argument('--local_path', type=str, required=True, help='the path to the local copy of the dataset') parser.add_argument( '--remote_path', type=str, default=None, help='the path to the remote copy to stream from (optional)') parser.add_argument('--split', type=str, default='val', help='which split of the dataset to use') parser.add_argument('--max_seq_len', type=int, default=32, help='max sequence length to test') args = parser.parse_args() if args.remote_path is not None: print( f'Reading {args.split} split from {args.local_path} <- streamed from <- {args.remote_path}' ) else: print(f'Reading {args.split} split from {args.local_path}') cfg = { 'name': 'text', 'dataset': { 'local': args.local_path, 'remote': args.remote_path, 'split': args.split, 'shuffle': False, 'max_seq_len': args.max_seq_len, 'keep_zip': True, # in case we need compressed files after testing }, 'drop_last': False, 'num_workers': 4, } cfg = om.create(cfg) device_batch_size = 2 tokenizer_cfg = {'name': args.tokenizer, 'kwargs': {}} tokenizer_cfg['kwargs'] = {'model_max_length': args.max_seq_len} tokenizer_cfg = om.create(tokenizer_cfg) tokenizer = build_tokenizer(tokenizer_cfg) loader = build_text_dataloader(cfg, tokenizer, device_batch_size) tokenizer = loader.dataset.tokenizer # type: ignore for batch_ix, batch in enumerate(islice(loader, 5)): print('\n') print('#' * 20, f'Batch {batch_ix}', '#' * 20) for k, v in batch.items(): print(k, v.shape, v.dtype) for sample_ix, token_sample in enumerate(batch['input_ids']): print('-' * 20, f' Sample {sample_ix} ', '-' * 20) print(tokenizer.decode(token_sample))
EXA-1-master
exa/libraries/llm-foundry/llmfoundry/data/text_data.py
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 """Includes code for task-specific seq-to-seq data formatting. This file provides some templates/examples of preprocessing functions that format examples for use in seq-to-seq finetuning tasks. These preprocessing functions take individual examples that contain raw text and process them into formatted examples. These functions have this basic structure: def preprocessing_fn(example: Dict) -> Dict[str, str]: # code to extract prompt/response from `example` ... return { 'prompt': <prompt>, 'response': <response>, } where `<prompt>` is a placeholder for the prompt text string that you extracted from the input example, and '<response>' is a placeholder for the response text string. Just to be clear, "prompt" represents the text you would give the model at inference time, and "response" represents the text you are training it to produce given the prompt. The key requirement of these functions is that they return a dictionary with "prompt" and "response" keys, and that the values associated with those keys are strings (i.e. text). """ import importlib import os from typing import Any, Callable, Dict, Optional, Union import datasets from omegaconf import DictConfig from streaming import StreamingDataset from transformers import PreTrainedTokenizer, PreTrainedTokenizerFast __all__ = ['dataset_constructor'] Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast] def _tokenize_formatted_example(example: Dict[str, Any], tokenizer: Tokenizer): if ('prompt' not in example) or ('response' not in example): raise KeyError( 'Unable to tokenize example because it has not been properly formatted. ' +\ '"prompt" and "response" are required keys but at least one was missing ' +\ f'from {example=}.' ) return tokenizer(text=example['prompt'], text_target=example['response']) class StreamingFinetuningDataset(StreamingDataset): """Finetuning dataset with flexible tokenization using StreamingDataset. Args: local (str): Local dataset directory where shards are cached by split. tokenizer (Tokenizer): The name of the HuggingFace tokenizer to use to tokenize samples. remote (str, optional): Download shards from this remote path or directory. If None, this rank and worker's partition of the dataset must all exist locally. Defaults to ``None``. split (str, optional): Which dataset split to use, if any. Defaults to ``None``. shuffle (bool): Whether to iterate over the samples in randomized order. Defaults to ``False``. predownload (int, optional): Target number of samples ahead to download the shards of while iterating. Defaults to ``100_000``. keep_zip (bool, optional): Whether to keep or delete the compressed file when decompressing downloaded shards. If set to None, keep if remote is local. Defaults to ``None``. download_retry (int): Number of download re-attempts before giving up. Defaults to ``2``. download_timeout (float): Number of seconds to wait for a shard to download before raising an exception. Defaults to ``60``. validate_hash (str, optional): Optional hash or checksum algorithm to use to validate shards. Defaults to ``None``. shuffle_seed (int): Seed for Deterministic data shuffling. Defaults to ``9176``. num_canonical_nodes (int, optional): Canonical number of nodes for shuffling with resumption. If ``None``, defaults to the number of nodes of the initial run. Defaults to 128. batch_size (int, optional): Batch size of its DataLoader, which affects how the dataset is partitioned over the workers. Defaults to ``None``. """ def __init__(self, local: str, tokenizer: Tokenizer, remote: Optional[str] = None, split: Optional[str] = None, shuffle: bool = False, predownload: Optional[int] = 100_000, keep_zip: Optional[bool] = None, download_retry: int = 2, download_timeout: float = 60, validate_hash: Optional[str] = None, shuffle_seed: int = 9176, num_canonical_nodes: Optional[int] = 128, batch_size: Optional[int] = None, **kwargs: Any): if len(kwargs) > 0: raise ValueError( f'StreamingTextDataset() got an unexpected keyword argument: {kwargs}' ) if remote is None or (local == remote): if os.path.isdir(local): contents = set(os.listdir(local)) if split not in contents: raise ValueError( f'local directory {local} does not contain split {split}' ) # Build Dataset super().__init__(local=local, remote=remote, split=split, shuffle=shuffle, predownload=predownload, keep_zip=keep_zip, download_retry=download_retry, download_timeout=download_timeout, validate_hash=validate_hash, shuffle_seed=shuffle_seed, num_canonical_nodes=num_canonical_nodes, batch_size=batch_size) self.tokenizer = tokenizer # How to process a sample def __getitem__(self, idx: int) -> Dict[str, Any]: sample = super().__getitem__(idx) return _tokenize_formatted_example(sample, tokenizer=self.tokenizer) class DatasetConstructor: def __init__(self): self._task_preprocessing_registry: Dict[str, Callable] = {} def register(self, *names: str): """Decorator for registering preprocessing functions.""" def _register_func(name: str, func: Callable) -> None: if name in self._task_preprocessing_registry: raise ValueError( f'A tokenization function has already been registered with {name=}.' ) self._task_preprocessing_registry[name] = func return def wrapper(func: Callable) -> Callable: for name in names: _register_func(name, func) return func return wrapper def print_registered_tasks(self): tasks = sorted(self._task_preprocessing_registry.keys()) print('\n'.join(tasks)) def get_preprocessing_fn_from_str(self, preprocessor: Optional[str], dataset_name: Optional[str] = None, verbose: bool = False): """Get a preprocessing function from a string. String can be either a registered function or an import path. Args: preprocessor (Optional[str]): The name of the preprocessing function, or an import path. dataset_name (Optional[str]): The dataset name to look up in the registry. verbose (bool): Whether to print verbose messages or not. Returns: Callable: The preprocessing function or None if not found. Raises: ValueError: If the preprocessing function import from the provided string fails. """ if preprocessor is None: if dataset_name is None: return None if dataset_name in self._task_preprocessing_registry: if verbose: print( f'Re-formatting dataset with "{dataset_name}" preprocessing function.' ) return self._task_preprocessing_registry[dataset_name] else: if verbose: print( 'No preprocessor was supplied and no preprocessing function ' +\ f'is registered for dataset name "{dataset_name}". No additional ' +\ 'preprocessing will be applied. If the dataset is already formatted ' +\ 'correctly, you can ignore this message.' ) return None if preprocessor in self._task_preprocessing_registry: if verbose: print( f'Re-formatting dataset with "{preprocessor}" preprocessing function.' ) return self._task_preprocessing_registry[preprocessor] try: import_path, function_name = preprocessor.split(':', maxsplit=1) if verbose: print( f'Importing preprocessing function via: `from {import_path} import {function_name}`' ) module = importlib.import_module(import_path) preprocessing_fn = getattr(module, function_name) except Exception as e: raise ValueError( f'Failed to import preprocessing function from string = {preprocessor}.' ) from e return preprocessing_fn def build_from_hf(self, cfg: DictConfig, tokenizer: Tokenizer): """Load a HuggingFace Datasets, preprocess, and tokenize. Args: cfg (DictConfig): The dataset configuration. tokenizer (Tokenizer): The tokenizer to be used for tokenizing the dataset. Returns: Dataset: The tokenized dataset. """ dataset_name = cfg.hf_name split = cfg.split kwargs = cfg.get('hf_kwargs', {}) preprocessing_fn = self.get_preprocessing_fn_from_str( cfg.get('preprocessing_fn'), dataset_name, verbose=True) dataset = datasets.load_dataset(dataset_name, split=split, **kwargs) def dataset_mapper(example: Dict): if preprocessing_fn is not None: example = preprocessing_fn(example) return _tokenize_formatted_example(example, tokenizer) columns_to_remove = list(dataset[0].keys()) tokenized_dataset = dataset.map( dataset_mapper, batched=False, remove_columns=columns_to_remove, ) return tokenized_dataset def build_from_streaming(self, *args: Any, **kwargs: Any): return StreamingFinetuningDataset(*args, **kwargs) dataset_constructor = DatasetConstructor() @dataset_constructor.register('tatsu-lab/alpaca') def alpaca_preprocessing_function(inp: Dict): """Split out prompt/response from text.""" try: prompt, response = inp['text'].split('### Response:') prompt += '### Response:' except Exception as e: raise ValueError( f"Unable to extract prompt/response from 'text'={inp['text']}" ) from e return {'prompt': prompt, 'response': response} @dataset_constructor.register('HuggingFaceH4/databricks_dolly_15k') def dolly_preprocessing_function(inp: Dict): """Format the text string.""" PROMPT_FORMAT = 'Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n### Instruction:\n{instruction}\n\n### Response:\n' try: if inp['input'] != '': instruction = inp['instruction'] + '\n' + inp['input'] else: instruction = inp['instruction'] prompt = PROMPT_FORMAT.format(instruction=instruction) response = inp['output'] except Exception as e: raise ValueError( f'Unable to extract prompt/response from {inp=}') from e return {'prompt': prompt, 'response': response} @dataset_constructor.register('bigscience/P3') def p3_preprocessing_function(inp: Dict): """Format the already-split example.""" return { 'prompt': inp['inputs'] + ':', 'response': inp['targets'], } # Muennighoff's P3 and flan datasets share a similar convention @dataset_constructor.register('Muennighoff/P3', 'Muennighoff/flan') def muennighoff_tokenize_function(inp: Dict): """Format the already-split example.""" try: prompt: str = inp['inputs'] response: str = inp['targets'] # Put a space before the response if needed transitions = (' ', '\n', '\t') if not (prompt.endswith(transitions) or response.startswith(transitions)): response = ' ' + response except Exception as e: raise ValueError( f'Unable to process prompt/response from {inp=}') from e return {'prompt': prompt, 'response': response}
EXA-1-master
exa/libraries/llm-foundry/llmfoundry/data/finetuning/tasks.py
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 import logging import warnings from typing import Any, Dict, List, Optional, Union import torch from transformers import PreTrainedTokenizer, PreTrainedTokenizerFast log = logging.getLogger(__name__) # HuggingFace hardcodes the ignore index to -100 _HF_IGNORE_INDEX = -100 class Seq2SeqFinetuningCollator: """A general-purpose collator for sequence-to-sequence training/evaluation. Args: tokenizer: A HuggingFace tokenizer. Must have a pad_token set. max_seq_len (int): The maximum sequence length of the combined context/target sequence (decoder-only format) or of each the context sequence and target sequence (encoder-decoder format). decoder_only_format (bool): Whether to format the batches for a decoder-only model (if True) or an encoder-decoder model (if False). allow_pad_trimming (bool, optional): Whether to allow the collator to trim padding, which may result in smaller but inconsistent batch sizes. Default: ``False`` ensures that all sequences are max_seq_len. separator_text (str | bool, optional): If a string is provided, it will be used to separate the context and target sequences (appended to end of context). If ``True``, will use the tokenizer's sep_token, which must be defined. Only applicable for decoder-only formatting. format_for_generation (bool, optional): Whether to format the batch such that context and target sequences remain separated, which is useful when using the context to generate text which should be compared to the target (e.g., during evaluation). Default: ``False``. batch_metadata (dict, optional): A dictionary of metadata which will be added to the batch. """ def __init__( self, tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast], max_seq_len: int, decoder_only_format: bool, allow_pad_trimming: bool = False, separator_text: Optional[Union[str, bool]] = None, format_for_generation: bool = False, batch_metadata: Optional[Dict[str, Any]] = None, ): self.tokenizer = tokenizer self.max_seq_len = max_seq_len self.decoder_only_format = decoder_only_format self.format_for_generation = format_for_generation self.batch_metadata = batch_metadata or {} # Trimming will always be skipped on at least the first __call__ self._allow_pad_trimming = allow_pad_trimming self._seen_first_batch = False illegal_keys = [ 'input_ids', 'labels', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'generate_output' ] found_keys = [] for illegal_key in illegal_keys: if illegal_key in self.batch_metadata: found_keys.append(illegal_key) if found_keys: raise ValueError( f'The following keys are in batch_metadata but are not allowed: {", ".join(found_keys)}.\n' +\ f'You cannot use keys that are used directly by the models. The prohibited keys are:\n' +\ f'{", ".join(illegal_keys)}' ) if self.format_for_generation: self.batch_metadata['generate_output'] = True if (max_seq_len % 8) != 0: log.warning( 'For performance, a max_seq_len as a multiple of 8 is recommended.' ) if self.tokenizer.pad_token_id is None: raise ValueError( f'{self.__class__.__name__} requires that the tokenizer has the pad token set, but it is None' ) self.separator_tokens = [] if separator_text and decoder_only_format: if separator_text == True: # Use the tokenizer's sep token or throw an error if undefined if self.tokenizer.sep_token_id is None: raise ValueError( 'Setting separator_text=True requires that the tokenizer has sep_token_id but it has not been set. ' +\ 'Please pass a string argument for separator_text or set sep_token_id in the tokenizer.' ) self.separator_tokens = [self.tokenizer.sep_token_id] else: # Convert the string separator_text into token(s) self.separator_tokens = tokenizer( separator_text, add_special_tokens=False).input_ids self._warned_context = False self._warned_target = False def __call__(self, examples: List[Dict[str, Any]]) -> Dict[str, torch.Tensor]: for check_key in ['input_ids', 'labels', 'attention_mask']: if check_key not in examples[0]: raise KeyError( f'Examples returned by dataset do not include required key: {check_key}' ) if self.decoder_only_format: batch = self._process_and_batch_decoder_only(examples) else: batch = self._process_and_batch_encoder_decoder(examples) # Add any batch_metadata batch_size = batch['input_ids'].shape[0] batch.update({ k: torch.tensor([v] * batch_size) for k, v in self.batch_metadata.items() }) return batch def _process_and_batch_decoder_only( self, examples: List[Dict[str, Any]]) -> Dict[str, torch.Tensor]: # Steps explained in comments processed_examples = [] for example in examples: context = ensure_list(example['input_ids']) target = ensure_list(example['labels']) # First, get rid of any padding tokens context = [t for t in context if t != self.tokenizer.pad_token_id] target = [t for t in target if t != self.tokenizer.pad_token_id] # Second, append any separator tokens to the context tokens if self.separator_tokens: context = context + self.separator_tokens # Third, ensure that the target text ends with an eos tag if target[-1] != self.tokenizer.eos_token_id: target = target + [self.tokenizer.eos_token_id] n_context = len(context) n_target = len(target) if n_context >= self.max_seq_len: if not self._warned_context: warnings.warn( f'Skipping example because CONTEXT length={n_context} leaves no room ' +\ f'for TARGET tokens because max_seq_len={self.max_seq_len}. ' +\ f'If this causes downstream issues because of inconsistent batch sizes, ' +\ f'consider increasing max_seq_len or using example packing.' ) self._warned_context = True continue if self.format_for_generation: # When formatting for generation, we need to keep input_ids and # labels separate. The input_ids (context) will be fed into the # generator and the labels will be used by the eval metric. input_ids = context[-self.max_seq_len:] n_context = len(input_ids) attention_mask = [1] * n_context bidirectional_mask = [1] * n_context # Annoyingly, we need to pad the everything but input_ids # and attention_mask ourselves i_pad = [self.tokenizer.pad_token_id ] * (self.max_seq_len - n_target) z_pad = [0] * (self.max_seq_len - n_context) if self.tokenizer.padding_side == 'left': labels = i_pad + target bidirectional_mask = z_pad + bidirectional_mask else: labels = target + i_pad bidirectional_mask = bidirectional_mask + z_pad else: # We need to concatenate the context and target to get the # full input sequence, cutting off any excess tokens from the # end of the target if n_context + n_target > self.max_seq_len: old_n_target = int(n_target) n_target = self.max_seq_len - n_context if not self._warned_target: warnings.warn( f'Truncating TARGET sequence of length={old_n_target} to length={n_target}, ' +\ f'so context+target fit max_seq_len={self.max_seq_len}. If truncation is ' +\ f'a problem, consider increasing max_seq_len.') self._warned_target = True target = target[-n_target:] target[-1] = self.tokenizer.eos_token_id n_total = n_context + n_target input_ids = context + target labels = ([_HF_IGNORE_INDEX] * n_context) + target attention_mask = [1] * n_total # bidirectional_mask is used by our prefix lm model variants bidirectional_mask = ([1] * n_context) + ([0] * n_target) # Annoyingly, we need to pad the everything but input_ids # and attention_mask ourselves i_pad = [_HF_IGNORE_INDEX] * (self.max_seq_len - n_total) z_pad = [0] * (self.max_seq_len - n_total) if self.tokenizer.padding_side == 'left': labels = i_pad + labels bidirectional_mask = z_pad + bidirectional_mask else: labels = labels + i_pad bidirectional_mask = bidirectional_mask + z_pad # Update the example example['input_ids'] = input_ids example['labels'] = labels example['attention_mask'] = attention_mask example['bidirectional_mask'] = bidirectional_mask processed_examples.append(example) batch = self.tokenizer.pad( processed_examples, padding='max_length', max_length=self.max_seq_len, return_tensors='pt', ) # This logic prevents trimming on at least the first batch if not (self._allow_pad_trimming and self._seen_first_batch): self._seen_first_batch = True return batch self._seen_first_batch = True # The batch is ready, but we can trim padding for efficiency multiple_of = 8 n_non_padding = batch['attention_mask'].sum(dim=1).max() keep_tokens = int(multiple_of * torch.ceil(n_non_padding / multiple_of)) for k, v in batch.items(): if len(v.shape) < 2: continue if k == 'labels' and self.format_for_generation: continue if self.tokenizer.padding_side == 'left': batch[k] = v[:, -keep_tokens:].contiguous() else: batch[k] = v[:, :keep_tokens].contiguous() return batch def _process_and_batch_encoder_decoder( self, examples: List[Dict[str, Any]]) -> Dict[str, torch.Tensor]: # The encoder-decoder case is has some gotchas. # Steps are explained in comments. processed_examples = [] for example in examples: context = ensure_list(example['input_ids']) target = ensure_list(example['labels']) # ... first, get rid of any padding that was already applied context = [t for t in context if t != self.tokenizer.pad_token_id] target = [t for t in target if t != self.tokenizer.pad_token_id] # ... second, ensure that the target text ends with an eos tag if target[-1] != self.tokenizer.eos_token_id: target = target + [self.tokenizer.eos_token_id] # ... third, we need to pad labels ourselves. Because HF. if len(target) < self.max_seq_len: i_pad = [_HF_IGNORE_INDEX] * (self.max_seq_len - len(target)) target = target + i_pad else: if not self._warned_target: warnings.warn( f'Truncating TARGET sequence of length={len(target)} ' +\ f'to max_seq_len={self.max_seq_len}. If truncation is ' +\ f'a problem, consider increasing max_seq_len.') self._warned_target = True target = target[:self.max_seq_len - 1] + [self.tokenizer.eos_token_id] # We might need to truncate the context. Preserve the beginning. if len(context) > self.max_seq_len: if not self._warned_context: warnings.warn( f'Truncating CONTEXT sequence of length={len(context)} ' +\ f'to max_seq_len={self.max_seq_len}. If truncation is ' +\ f'a problem, consider increasing max_seq_len.') self._warned_context = True context = context[:self.max_seq_len - 1] + [self.tokenizer.eos_token_id] # Back into the example example['input_ids'] = context example['attention_mask'] = [1] * len(context) example['labels'] = target processed_examples.append(example) # Batch examples into a single dict (this also pads) batch = self.tokenizer.pad( processed_examples, padding='max_length', max_length=self.max_seq_len, return_tensors='pt', ) # We're still missing decoder_input_ids and decoder_attention_mask batch['decoder_input_ids'] = torch.cat([ torch.full((len(processed_examples), 1), self.tokenizer.pad_token_id), batch['labels'][:, :-1] ], dim=1) batch['decoder_input_ids'].masked_fill_( batch['decoder_input_ids'] == _HF_IGNORE_INDEX, self.tokenizer.pad_token_id) batch['decoder_attention_mask'] = torch.not_equal( batch['labels'], _HF_IGNORE_INDEX) # This logic prevents trimming on at least the first batch if not (self._allow_pad_trimming and self._seen_first_batch): self._seen_first_batch = True return batch self._seen_first_batch = True # The batch is now valid, but we can trim padding for efficiency multiple_of = 8 # (first for the encoder) n_non_padding = batch['attention_mask'].sum(dim=1).max() keep_tokens = int(multiple_of * torch.ceil(n_non_padding / multiple_of)) for k in ['input_ids', 'attention_mask']: batch[k] = batch[k][:, :keep_tokens].contiguous() # (then for the decoder) n_non_padding = batch['decoder_attention_mask'].sum(dim=1).max() keep_tokens = int(multiple_of * torch.ceil(n_non_padding / multiple_of)) for k in ['decoder_input_ids', 'decoder_attention_mask', 'labels']: batch[k] = batch[k][:, :keep_tokens].contiguous() return batch def ensure_list(x: Union[List, torch.Tensor]): if isinstance(x, torch.Tensor): x = list(x.flatten()) assert isinstance(x, list) return x
EXA-1-master
exa/libraries/llm-foundry/llmfoundry/data/finetuning/collator.py
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 from llmfoundry.data.finetuning.collator import Seq2SeqFinetuningCollator from llmfoundry.data.finetuning.dataloader import build_finetuning_dataloader __all__ = ['Seq2SeqFinetuningCollator', 'build_finetuning_dataloader']
EXA-1-master
exa/libraries/llm-foundry/llmfoundry/data/finetuning/__init__.py
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 import logging from typing import Union import torch from composer.utils import dist from omegaconf import DictConfig from torch.utils.data import DataLoader from transformers import PreTrainedTokenizer, PreTrainedTokenizerFast from llmfoundry.data.finetuning.collator import Seq2SeqFinetuningCollator from llmfoundry.data.finetuning.tasks import dataset_constructor from llmfoundry.data.packing import BinPackWrapper log = logging.getLogger(__name__) # HuggingFace hardcodes the ignore index to -100 _HF_IGNORE_INDEX = -100 Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast] def build_finetuning_dataloader(cfg: DictConfig, tokenizer: Tokenizer, device_batch_size: int) -> DataLoader: """Builds a finetuning dataloader for training or evaluating. The underlying dataset can be built through one of two code paths: 1. As a HuggingFace dataset, via `datasets.load_dataset(...)` 2. As a streaming dataset You will need to set slightly different dataset config fields depending on which you intend to use, as explained below. Args: cfg (DictConfig): An omegaconf dictionary used to configure the loader: cfg.name (str): The type of dataloader to build. Must = "finetuning". --- *** HuggingFace dataset config fields *** cfg.dataset.hf_name (str, optional): The name of the HuggingFace dataset to use. cfg.dataset.hf_kwargs (DictConfig, optional): Additional kwargs to pass to `datasets.load_dataset`, which can be used to load a dataset from local files. cfg.dataset.preprocessing_fn (str, optional): The name/import path of the preprocessing function to use for formatting the data examples. If ``None`` (default), the builder will use the preprocessing function registered under `hf_name` (see `tasks.py`), if one exists, otherwise it will skip preprocessing. If `preprocessing_fn` corresponds to a registered preprocessing function in `tasks.py`, the builder will use that. Otherwise, it will interpret `preprocessing_fn` as a "import.path:function_name" import path; e.g., it will call `from import.path import function_name` and use the imported function as the preprocessing function. *** Streaming dataset config fields *** cfg.dataset.remote (str, optional): Location of a MDS-formatted streaming dataset to use. Setting this will tell the builder to create a streaming dataset rather than a HuggingFace dataset. cfg.dataset.local (str, optional): Local path where remote data will be streamed to. Only valid if `cfg.dataset.remote` has also been set. *** Shared dataset configs fields *** cfg.dataset.max_seq_len (int): The maximum length of sequences in the batch. See :class:`Seq2SeqFinetuningCollator` docstring for details. cfg.dataset.decoder_only_format (bool): Whether to format the examples for a decoder-only model. See :class:`Seq2SeqFinetuningCollator` docstring for details. cfg.dataset.allow_pad_trimming (bool, optional): Whether to allow the collator to trim padding. See :class:`Seq2SeqFinetuningCollator` docstring for details. Default: ``False``. cfg.dataset.packing_ratio (float, optional): If provided, this invokes a collator wrapper that packs `device_batch_size*packing_ratio` raw examples into `device_batch_size` packed examples. This helps minimize padding while preserving sequence integrity. This adds `sequence_id` to the batch, which indicates which unique sequence each token belongs to. Note: Using this feature will not change device_batch_size but it will determine the number of raw examples consumed by the dataloader per batch. Some examples may be discarded if they do not fit when packing. Select `packing_ratio` **carefully** based on the dataset statistics, `max_seq_len`, and tolerance for discarding samples! The packing code in `../packing.py` provides a script that can help you choose the best `packing_ratio`. cfg.dataset.shuffle (bool): Whether to shuffle the dataset. ___ See :class:`StreamingTextDataset` for info on other standard config options within `cfg.dataset` that will be passed as kwargs if using the streaming codepath. --- See :class:`DataLoader` for standard argument options to the pytorch dataloader, such as `cfg.drop_last`, `cfg.num_workers`, etc. tokenizer (transformers.PreTrainedTokenizer): The tokenizer used to prepare the data from raw text. Any missing sentinel tokens will be added by the collator. device_batch_size (int): The size of the batches (number of examples) that the dataloader will produce. Returns: A pytorch dataloader Note: You can run the script inside `../packing.py` to quickly test the padding/waste rates for different `cfg.dataset.packing_ratio` choices, given a starting workload YAML. """ _validate_config(cfg.dataset) # Use EOS as the pad token if none exists if tokenizer.pad_token is None: # type: ignore tokenizer.pad_token = tokenizer.eos_token if cfg.dataset.get('remote') is not None: dataset = dataset_constructor.build_from_streaming( tokenizer=tokenizer, local=cfg.dataset.local, remote=cfg.dataset.get('remote', None), split=cfg.dataset.get('split'), shuffle=cfg.dataset.get('shuffle', False), predownload=cfg.dataset.get('predownload', 100_000), keep_zip=cfg.dataset.get('keep_zip', None), download_retry=cfg.dataset.get('download_retry', 2), download_timeout=cfg.dataset.get('download_timeout', 60), validate_hash=cfg.dataset.get('validate_hash', None), shuffle_seed=cfg.dataset.get('shuffle_seed', 9176), num_canonical_nodes=cfg.dataset.get('num_canonical_nodes', 128), batch_size=device_batch_size, ) collate_fn, dataloader_batch_size = _build_collate_fn( cfg.dataset, tokenizer, device_batch_size) return DataLoader( dataset, collate_fn=collate_fn, batch_size=dataloader_batch_size, drop_last=cfg.drop_last, num_workers=cfg.num_workers, pin_memory=cfg.get('pin_memory', True), prefetch_factor=cfg.get('prefetch_factor', 2), persistent_workers=cfg.get('persistent_workers', True), timeout=cfg.get('timeout', 0), ) else: dataset = dataset_constructor.build_from_hf(cfg.dataset, tokenizer) collate_fn, dataloader_batch_size = _build_collate_fn( cfg.dataset, tokenizer, device_batch_size) return DataLoader( dataset, collate_fn=collate_fn, batch_size=dataloader_batch_size, sampler=dist.get_sampler(dataset, drop_last=cfg.drop_last, shuffle=cfg.dataset.shuffle), num_workers=cfg.num_workers, pin_memory=cfg.get('pin_memory', True), prefetch_factor=cfg.get('prefetch_factor', 2), persistent_workers=cfg.get('persistent_workers', True), timeout=cfg.get('timeout', 0), ) def _validate_config(dataset_cfg: DictConfig): """Validates the dataset configuration. Makes sure that the dataset is properly configured for either a HuggingFace dataset or a streaming dataset. Must be valid for one or the other. Args: dataset_cfg (DictConfig): The dataset configuration to be validated. Raises: ValueError: If the dataset configuration does not meet the requirements. """ if dataset_cfg.get('hf_name') is not None: # Using the HuggingFace dataset codepath illegal_keys = ['local', 'remote'] discovered_illegal_keys = [] for key in illegal_keys: if dataset_cfg.get(key) is not None: discovered_illegal_keys.append('`' + key + '`') if discovered_illegal_keys: raise ValueError( 'The dataset config sets a value for `hf_name` as well as the ' +\ f'following keys: {", ".join(discovered_illegal_keys)}.\n' +\ 'Those keys are used when building from a streaming dataset, but ' +\ 'setting `hf_name` instructs the dataset to build from a HuggingFace dataset.' ) elif dataset_cfg.get('remote') is not None: # Using the streaming dataset codepath illegal_keys = ['hf_name', 'hf_kwargs', 'preprocessing_fn'] discovered_illegal_keys = [] for key in illegal_keys: if dataset_cfg.get(key) is not None: discovered_illegal_keys.append('`' + key + '`') if discovered_illegal_keys: raise ValueError( 'The dataset config sets a value for `remote` as well as the ' +\ f'following keys: {", ".join(discovered_illegal_keys)}.\n' +\ 'Those keys are used when building from a HuggingFace dataset, but ' +\ 'setting `remote` instructs the dataset to build from a streaming dataset.' ) if dataset_cfg.get('local') is not None: raise ValueError( 'Using a streaming dataset requires setting both `remote` and `local`, ' +\ 'but dataset.local is None.' ) else: raise ValueError( 'In the dataset config, you must set either `hf_name` to use a ' +\ 'HuggingFace dataset or set `remote` to use a streaming ' +\ 'dataset, but both were None.' ) def _build_collate_fn(dataset_cfg: DictConfig, tokenizer: Tokenizer, device_batch_size: int): collate_fn = Seq2SeqFinetuningCollator( tokenizer=tokenizer, max_seq_len=dataset_cfg.max_seq_len, decoder_only_format=dataset_cfg.decoder_only_format, allow_pad_trimming=dataset_cfg.get('allow_pad_trimming', False), ) packing_ratio = dataset_cfg.get('packing_ratio') if packing_ratio is None: if dataset_cfg.get('max_leftover_bins_to_keep') is not None: raise ValueError( 'dataset.max_leftover_bins_to_keep has been defined, ' +\ 'but dataset.packing_ratio has not been set. Please set ' +\ 'the latter to turn on packing or remove the former from the config.') return collate_fn, device_batch_size if packing_ratio == 1.0: return collate_fn, device_batch_size elif packing_ratio < 1.0: raise ValueError('packing_ratio must be >= 1, if supplied') if not dataset_cfg.decoder_only_format: raise NotImplementedError( 'On-the-fly packing is currently only supported for decoder-only formats.' ) collate_fn = BinPackWrapper( collator=collate_fn, target_batch_size=device_batch_size, max_seq_len=dataset_cfg.max_seq_len, pad_token_id=tokenizer.pad_token_id, padding_side=tokenizer.padding_side, max_leftover_bins_to_keep=dataset_cfg.get('max_leftover_bins_to_keep'), ) n_examples_to_pack = int(device_batch_size * packing_ratio) return collate_fn, n_examples_to_pack if __name__ == '__main__': import torch from omegaconf import OmegaConf as om from llmfoundry.utils import build_tokenizer cfg = om.create({ 'dataset': { 'hf_name': 'tatsu-lab/alpaca', 'preprocessing_fn': 'llmfoundry.data.finetuning.tasks:alpaca_preprocessing_function', 'split': 'train', 'packing_ratio': 18.0, 'max_seq_len': 2048, 'decoder_only_format': True, 'separator_text': False, 'allow_pad_trimming': False, 'num_canonical_nodes': 472, 'shuffle': True, }, 'drop_last': False, 'num_workers': 0, 'pin_memory': False, 'prefetch_factor': 2, 'persistent_workers': False, 'timeout': 0 }) tokenizer_cfg = {'name': 'EleutherAI/gpt-neox-20b', 'kwargs': {}} tokenizer_cfg['kwargs'] = {'model_max_length': cfg.dataset.max_seq_len} tokenizer_cfg = om.create(tokenizer_cfg) tokenizer = build_tokenizer(tokenizer_cfg) device_batch_size = 2 dataloader = build_finetuning_dataloader(cfg, tokenizer, device_batch_size) packing = cfg.dataset.get('packing_ratio') is not None for i, batch in enumerate(dataloader): if i >= 5: break print(f'-----Batch {i}-----') for k, v in batch.items(): if isinstance(v, torch.Tensor): print(k, v.shape) else: print(k, v) for j in range(device_batch_size): print(f'--- Sample {j} ---') if cfg.dataset.decoder_only_format: if packing: for subseq in range(int(batch['sequence_id'][j].max()) + 1): is_subseq = batch['sequence_id'][j] == subseq print( '\033[93m{}\033[00m\n'.format('INPUT IDS:'), tokenizer.decode(batch['input_ids'][ j, torch.logical_and( is_subseq, batch['attention_mask'][j] == 1)], skip_special_tokens=False)) print( '\033[92m{}\033[00m\n'.format('CONTEXT: '), tokenizer.decode(batch['input_ids'][ j, torch.logical_and( is_subseq, batch['bidirectional_mask'][j] == 1)], skip_special_tokens=False)) print( '\033[91m{}\033[00m\n'.format('TARGET: '), tokenizer.decode(batch['input_ids'][ j, torch.logical_and( is_subseq, batch['labels'][j] != _HF_IGNORE_INDEX)], skip_special_tokens=False)) else: print( '\033[93m{}\033[00m\n'.format('INPUT IDS:'), tokenizer.decode( batch['input_ids'][j, batch['attention_mask'][j] == 1], skip_special_tokens=False)) print( '\033[92m{}\033[00m\n'.format('CONTEXT: '), tokenizer.decode(batch['input_ids'][ j, batch['bidirectional_mask'][j] == 1], skip_special_tokens=False)) print( '\033[91m{}\033[00m\n'.format('TARGET: '), tokenizer.decode(batch['input_ids'][ j, batch['labels'][j] != _HF_IGNORE_INDEX], skip_special_tokens=False)) else: print( '\033[92m{}\033[00m\n'.format('CONTEXT: '), tokenizer.decode( batch['input_ids'][j, batch['attention_mask'][j] == 1], skip_special_tokens=False)) print( '\033[91m{}\033[00m\n'.format('TARGET: '), tokenizer.decode(batch['labels'][ j, batch['decoder_attention_mask'][j] == 1], skip_special_tokens=False)) print(' ')
EXA-1-master
exa/libraries/llm-foundry/llmfoundry/data/finetuning/dataloader.py
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 import copy import os import warnings from typing import cast from unittest import mock import pytest import torch import torch.nn as nn from composer.core.precision import get_precision_context from composer.optim import DecoupledAdamW from composer.utils import get_device, reproducibility from omegaconf import DictConfig from omegaconf import OmegaConf as om from transformers.modeling_outputs import CausalLMOutputWithPast from transformers.models.bloom.modeling_bloom import build_alibi_tensor from llmfoundry import (COMPOSER_MODEL_REGISTRY, ComposerHFCausalLM, ComposerHFPrefixLM) from llmfoundry.models.layers import NORM_CLASS_REGISTRY, build_alibi_bias from llmfoundry.models.mpt import MPTConfig, MPTForCausalLM def get_config(conf_path='scripts/train/yamls/mpt/testing.yaml') -> DictConfig: os.environ['TOKENIZERS_PARALLELISM'] = 'false' print(conf_path) with open(conf_path) as f: test_cfg = om.load(f) return cast(DictConfig, test_cfg) def get_objs(conf_path='scripts/train/yamls/mpt/testing.yaml'): warnings.filterwarnings( action='ignore', message='Torchmetrics v0.9 introduced a new argument class property') test_cfg = get_config(conf_path=conf_path) reproducibility.seed_all(test_cfg.seed) # Read FSDP Config as a dict fsdp_config = test_cfg.get('fsdp_config', None) fsdp_config = om.to_container(fsdp_config, resolve=True) if fsdp_config else None # Build Model # For fast initialization, use `meta` device print('Initializing model...') device = 'cpu' test_cfg.precision = 'fp32' test_cfg.model.attn_config = { 'attn_impl': 'torch', } # device = 'cuda' # test_cfg.precision = 'amp' test_cfg.model.init_device = device test_cfg.device = device test_cfg.global_train_batch_size = 2 test_cfg.device_eval_batch_size = 2 test_cfg.device_train_microbatch_size = 2 model = COMPOSER_MODEL_REGISTRY[test_cfg.model.name](test_cfg.model, test_cfg.tokenizer) # Optimizer assert test_cfg.optimizer.name == 'decoupled_adamw' optimizer = DecoupledAdamW(model.parameters(), lr=test_cfg.optimizer.lr, betas=test_cfg.optimizer.betas, eps=test_cfg.optimizer.eps, weight_decay=test_cfg.optimizer.weight_decay) return test_cfg, model, optimizer def gen_random_batch(batch_size, test_cfg): # generate input batch of random data, suitable for a Causal or Prefix LM batch = {} batch['input_ids'] = torch.randint( low=0, high=test_cfg.model.vocab_size, size=(batch_size, test_cfg.max_seq_len)).to(test_cfg.device) batch['labels'] = torch.randint(low=0, high=test_cfg.model.vocab_size, size=(batch_size, test_cfg.max_seq_len)).to( test_cfg.device) batch['attention_mask'] = torch.ones(size=(batch_size, test_cfg.max_seq_len), dtype=torch.int64).to(test_cfg.device) batch['bidirectional_mask'] = batch['attention_mask'].clone() batch['bidirectional_mask'][:, (test_cfg.max_seq_len // 2):] = 0 return batch def gen_random_enc_dec_batch(batch_size, vocab_size, max_seq_len, device): # generate input batch of random data, suitable for a T5 batch = {} batch['input_ids'] = torch.randint(low=0, high=vocab_size, size=(batch_size, max_seq_len)).to(device) batch['labels'] = torch.randint(low=0, high=vocab_size, size=(batch_size, max_seq_len)).to(device) batch['decoder_input_ids'] = torch.zeros_like(batch['labels']) batch['decoder_input_ids'][:, 1:] = batch['labels'][:, :-1] batch['attention_mask'] = torch.ones(size=(batch_size, max_seq_len), dtype=torch.int64).to(device) batch['decoder_attention_mask'] = batch['attention_mask'].clone() return batch def test_full_forward_and_backward(batch_size=2): test_cfg, model, optimizer = get_objs( conf_path='scripts/train/yamls/mpt/testing.yaml') batch = gen_random_batch(batch_size, test_cfg) assert batch['input_ids'].shape == torch.Size( [batch_size, test_cfg.max_seq_len]) model.train() original_params = next(model.parameters()).clone().data outputs = model(batch) loss = model.loss(outputs, batch) loss.backward() optimizer.step() updated_params = next(model.parameters()).clone().data assert not torch.equal(original_params, updated_params) def test_attention_mechanism(batch_size=2): test_cfg, model, _ = get_objs( conf_path='scripts/train/yamls/mpt/testing.yaml') batch = gen_random_batch(batch_size, test_cfg) model.eval() # run a partial forward where we explicitly inspect the attention_mask from the causal_attn block input_ids, attention_mask = batch['input_ids'], batch[ 'attention_mask'].bool() _, S = input_ids.size() assert ( S <= test_cfg.max_seq_len ), f'Cannot forward input with seq_len={S}, this model only supports seq_len<={test_cfg.max_seq_len}' pos = torch.arange(0, S, dtype=torch.long, device=input_ids.device).unsqueeze(0) tok_emb = model.model.transformer.wte(input_ids) pos_emb = model.model.transformer.wpe(pos) x = model.model.transformer.emb_drop(tok_emb + pos_emb) # basically the attention mask should be a tensor shape (bsz, seqlen, seqlen) # wih -inf along the upper triangle as well as wherever there are any pad tokens # and with 0 everywhere else expected_zerod_weights = nn.Transformer.generate_square_subsequent_mask(test_cfg.max_seq_len)\ .reshape(1, test_cfg.max_seq_len, test_cfg.max_seq_len) expected_zerod_weights = torch.isneginf( # type: ignore torch.cat(batch_size * [expected_zerod_weights])) torch_key_padding = torch.cat( # type: ignore test_cfg.max_seq_len * [(~attention_mask).reshape(batch_size, 1, test_cfg.max_seq_len)], axis=1) expected_zerod_weights |= torch_key_padding attn_bias, attention_mask = model.model.transformer._attn_bias( device=x.device, dtype=x.dtype, attention_mask=attention_mask) for block in model.model.transformer.blocks: a = block.norm_1(x) b, attention_weights, _ = block.attn( a, past_key_value=None, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=model.model.transformer.is_causal, needs_weights=True) zerod_weights = (attention_weights == 0) assert torch.equal(expected_zerod_weights.expand(*zerod_weights.shape), zerod_weights) x = x + block.resid_attn_dropout(b) m = block.norm_2(x) n = block.ffn(m) x = x + block.resid_ffn_dropout(n) @pytest.mark.parametrize('prefixlm', [False, True]) def test_full_forward_and_backward_gpt2_small(prefixlm, batch_size=2): warnings.filterwarnings( action='ignore', message='Torchmetrics v0.9 introduced a new argument class property') conf_path = 'scripts/train/yamls/hf_causal_lm/gpt2-small.yaml' with open(conf_path) as f: neo_cfg = om.load(f) device = 'cpu' neo_cfg.device = device neo_cfg.max_seq_len = 256 if prefixlm: neo_cfg.model.name = 'hf_prefix_lm' else: neo_cfg.model.name = 'hf_causal_lm' model = COMPOSER_MODEL_REGISTRY[neo_cfg.model.name]( neo_cfg.model, neo_cfg.tokenizer).to(device) assert neo_cfg.optimizer.name == 'decoupled_adamw' optimizer = DecoupledAdamW(model.parameters(), lr=neo_cfg.optimizer.lr, betas=neo_cfg.optimizer.betas, eps=neo_cfg.optimizer.eps, weight_decay=neo_cfg.optimizer.weight_decay) # set vocab size using model num_embeddings neo_cfg.model.vocab_size = model.model.transformer.wte.num_embeddings batch = gen_random_batch(batch_size, neo_cfg) assert batch['input_ids'].shape == torch.Size( [batch_size, neo_cfg.max_seq_len]) model.train() original_params = next(model.parameters()).clone().data outputs = model(batch) loss = model.loss(outputs, batch) loss.backward() optimizer.step() updated_params = next(model.parameters()).clone().data assert not torch.equal(original_params, updated_params) def test_full_forward_and_backward_t5_small(batch_size=2): warnings.filterwarnings( action='ignore', message='Torchmetrics v0.9 introduced a new argument class property') from omegaconf import OmegaConf cfg = OmegaConf.create({ 'model': { 'pretrained_model_name_or_path': 't5-small', 'pretrained': False, 'z_loss': 0.0001, }, 'optimizer': { 'lr': 0.0001, 'betas': [0.9, 0.99], 'eps': 1e-6, 'weight_decay': 0.00001 }, 'tokenizer': { 'name': 't5-small', } }) device = 'cpu' max_seq_len = 16 model = COMPOSER_MODEL_REGISTRY['hf_t5'](cfg.model, cfg.tokenizer).to(device) optimizer = DecoupledAdamW(model.parameters(), lr=cfg.optimizer.lr, betas=cfg.optimizer.betas, eps=cfg.optimizer.eps, weight_decay=cfg.optimizer.weight_decay) # set vocab size using model num_embeddings batch = gen_random_enc_dec_batch(batch_size, model.model.config.vocab_size, max_seq_len, device) assert batch['input_ids'].shape == torch.Size([batch_size, max_seq_len]) model.train() original_params = next(model.parameters()).clone().data outputs = model(batch) loss = model.loss(outputs, batch) loss.backward() optimizer.step() updated_params = next(model.parameters()).clone().data assert not torch.equal(original_params, updated_params) @pytest.mark.parametrize( 'attn_impl,precision', [('torch', torch.float16), ('torch', torch.bfloat16), pytest.param('flash', torch.float16, marks=pytest.mark.gpu), pytest.param('flash', torch.bfloat16, marks=pytest.mark.gpu)]) def test_determinism(attn_impl: str, precision): if not torch.cuda.is_available(): pytest.skip( 'This test requires CUDA to be available in order to run with bfloat16 precision.' ) reproducibility.seed_all(1111) conf_path = 'scripts/train/yamls/mpt/testing.yaml' with open(conf_path) as f: test_cfg = om.load(f) test_cfg.model.attn_config = { 'attn_impl': attn_impl, } test_cfg.model.init_device = 'cuda:0' test_cfg.device = 'cuda:0' model_1 = COMPOSER_MODEL_REGISTRY[test_cfg.model.name](test_cfg.model, test_cfg.tokenizer) model_2 = copy.deepcopy(model_1) optimizer_1 = DecoupledAdamW(model_1.parameters(), lr=test_cfg.optimizer.lr, betas=test_cfg.optimizer.betas, eps=test_cfg.optimizer.eps, weight_decay=test_cfg.optimizer.weight_decay) optimizer_2 = DecoupledAdamW(model_2.parameters(), lr=test_cfg.optimizer.lr, betas=test_cfg.optimizer.betas, eps=test_cfg.optimizer.eps, weight_decay=test_cfg.optimizer.weight_decay) for i in range(5): with torch.cuda.amp.autocast(True, precision): batch = gen_random_batch(2, test_cfg) output_1 = model_1(batch) output_2 = model_2(batch) assert output_1.logits.allclose(output_2.logits, rtol=0.0, atol=0.0), f'differed at step {i}' loss_1 = model_1.loss(output_1, batch) loss_2 = model_2.loss(output_2, batch) assert loss_1 == loss_2 loss_1.backward() loss_2.backward() optimizer_1.step() optimizer_2.step() @pytest.mark.gpu def test_loss_fn(): """Tests the Fused CrossEntropy vs torch.nn.CrossEntropy loss function. We provide non-zero tolerances to account for small numerics differences between the two loss implementations. """ try: from flash_attn.losses.cross_entropy import CrossEntropyLoss as FusedCrossEntropyLoss # type: ignore # isort: skip except: pytest.skip('Fused cross entropy was not installed') reproducibility.seed_all(1111) conf_path = 'scripts/train/yamls/mpt/testing.yaml' with open(conf_path) as f: test_cfg = om.load(f) test_cfg.device = 'cuda:0' test_cfg.model.init_device = 'cuda:0' test_cfg.model.init_config = { 'name': 'baseline_', 'init_std': 0.02, } model_1 = COMPOSER_MODEL_REGISTRY[test_cfg.model.name](test_cfg.model, test_cfg.tokenizer) model_2 = copy.deepcopy(model_1) assert isinstance(model_1.loss_fn, torch.nn.CrossEntropyLoss) model_2.loss_fn = FusedCrossEntropyLoss(ignore_index=-100) optimizer_1 = DecoupledAdamW(model_1.parameters(), lr=test_cfg.optimizer.lr, betas=test_cfg.optimizer.betas, eps=test_cfg.optimizer.eps, weight_decay=test_cfg.optimizer.weight_decay) optimizer_2 = DecoupledAdamW(model_2.parameters(), lr=test_cfg.optimizer.lr, betas=test_cfg.optimizer.betas, eps=test_cfg.optimizer.eps, weight_decay=test_cfg.optimizer.weight_decay) for i in range(25): batch = gen_random_batch(2, test_cfg) output_1 = model_1(batch) output_2 = model_2(batch) assert output_1.logits.allclose(output_2.logits, rtol=1e-4, atol=1e-4), f'differed at step {i}' loss_1 = model_1.loss(output_1, batch) loss_2 = model_2.loss(output_2, batch) assert loss_1.allclose(loss_2, rtol=1e-3, atol=1e-3), f'differed at step {i}' loss_1.backward() loss_2.backward() optimizer_1.step() optimizer_2.step() for p1, p2 in zip(model_1.parameters(), model_2.parameters()): assert p1.data.shape == p2.data.shape assert p1.data.allclose(p2.data, rtol=1e-5, atol=1e-4), f'differed at step {i}' @pytest.mark.parametrize('prefixlm', [False, True]) def test_opt_wrapping(prefixlm): conf = { 'model': { 'name': 'hf_prefix_lm' if prefixlm else 'hf_causal_lm', 'pretrained_model_name_or_path': 'facebook/opt-125m', 'pretrained': 'false' }, 'tokenizer': { 'name': 'facebook/opt-125m' } } config = DictConfig(conf) if prefixlm: model = ComposerHFPrefixLM(config.model, config.tokenizer) else: model = ComposerHFCausalLM(config.model, config.tokenizer) # check that all the modules we except are blocked from FSDP wrapping assert not model.model.model._fsdp_wrap assert not model.model.model.decoder._fsdp_wrap assert not model.model.model.decoder.embed_tokens._fsdp_wrap assert not model.model.lm_head._fsdp_wrap @pytest.mark.parametrize('norm_type', NORM_CLASS_REGISTRY.keys()) @pytest.mark.parametrize('no_bias', [False, True]) def test_mpt_creation(norm_type, no_bias): # Test that the config constructs the model as expected. hf_config = MPTConfig( init_device='cpu', d_model=128, n_heads=4, n_layers=2, expansion_ratio=2, max_seq_len=2048, emb_pdrop=0.1, resid_pdrop=0.2, attn_config={ 'attn_impl': 'torch', }, norm_type=norm_type, no_bias=no_bias, ) mpt = MPTForCausalLM(hf_config) assert mpt.config.d_model == 128 assert mpt.config.n_heads == 4 assert mpt.config.n_layers == 2 assert mpt.config.expansion_ratio == 2 assert mpt.config.max_seq_len == 2048 assert mpt.transformer.wte.weight.shape == torch.Size( # type: ignore [hf_config.vocab_size, hf_config.d_model]) assert mpt.transformer.wpe.weight.shape == torch.Size( # type: ignore [hf_config.max_seq_len, hf_config.d_model]) assert mpt.transformer.emb_drop.p == 0.1 # type: ignore assert len(mpt.transformer.blocks) == 2 # type: ignore d_model = hf_config.d_model for block in mpt.transformer.blocks: # type: ignore assert block.norm_1.weight.shape == torch.Size([d_model ]) # type: ignore assert block.norm_2.weight.shape == torch.Size([d_model ]) # type: ignore assert block.ffn.up_proj.weight.shape == torch.Size( # type: ignore [hf_config.d_model * hf_config.expansion_ratio, hf_config.d_model]) assert block.ffn.down_proj.weight.shape == torch.Size( # type: ignore [hf_config.d_model, hf_config.d_model * hf_config.expansion_ratio]) assert block.resid_attn_dropout.p == 0.2 # type: ignore assert block.resid_ffn_dropout.p == 0.2 # type: ignore @pytest.mark.parametrize('attention_impl,device', [('torch', 'cpu'), ('flash', 'gpu'), ('triton', 'gpu'), ('torch', 'gpu')]) @pytest.mark.parametrize('alibi', [True, False]) def test_forward_with_padding(attention_impl, device, alibi): # Test that different placement of padding does not affect the output. if not torch.cuda.is_available() and device == 'gpu': pytest.skip( f'This test requires CUDA to be available in order to run with {attention_impl} attention.' ) if alibi and attention_impl == 'flash': pytest.skip(f'alibi only implemented with torch and triton attention.') reproducibility.seed_all(1234) device = get_device(device) hf_config = MPTConfig( init_device='cpu', d_model=128, n_heads=1, n_layers=2, expansion_ratio=2, max_seq_len=2048, emb_pdrop=0.1, resid_pdrop=0.2, attn_config={ 'attn_impl': attention_impl, 'alibi': alibi, }, init_config={ 'name': 'baseline_', 'init_std': 0.02, }, ) mpt = MPTForCausalLM(hf_config) mpt.eval() mpt = device.module_to_device(mpt) with get_precision_context('amp_bf16' if device.name == 'gpu' else 'fp32'): # padding on the right side of the input right_padding_input_ids = torch.tensor( [[11274, 16390, 11, 50256, 50256, 50256], [11274, 16390, 11, 50256, 50256, 50256]]) right_padding_input_ids = device.tensor_to_device( right_padding_input_ids) right_padding_attention_mask = torch.tensor([[1, 1, 1, 0, 0, 0], [1, 1, 1, 0, 0, 0]]).bool() right_padding_attention_mask = device.tensor_to_device( right_padding_attention_mask) # padding in the middle of the input middle_padding_input_ids = torch.tensor( [[11274, 16390, 50256, 50256, 50256, 11], [11274, 16390, 50256, 50256, 50256, 11]]) middle_padding_input_ids = device.tensor_to_device( middle_padding_input_ids) middle_padding_attention_mask = torch.tensor([[1, 1, 0, 0, 0, 1], [1, 1, 0, 0, 0, 1]]).bool() middle_padding_attention_mask = device.tensor_to_device( middle_padding_attention_mask) # padding on the left side of the input left_padding_input_ids = torch.tensor( [[50256, 50256, 50256, 11274, 16390, 11], [50256, 50256, 50256, 11274, 16390, 11]]) left_padding_input_ids = device.tensor_to_device(left_padding_input_ids) left_padding_attention_mask = torch.tensor([[0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 1, 1]]).bool() left_padding_attention_mask = device.tensor_to_device( left_padding_attention_mask) # a single batch with padding in different places batched_input_ids = torch.tensor([ [11274, 16390, 11, 50256, 50256, 50256], # right padding [11274, 16390, 50256, 50256, 50256, 11] ]) # middle padding batched_input_ids = device.tensor_to_device(batched_input_ids) batched_attention_mask = torch.tensor([[1, 1, 1, 0, 0, 0], [1, 1, 0, 0, 0, 1]]).bool() batched_attention_mask = device.tensor_to_device(batched_attention_mask) right_padding_output = mpt( right_padding_input_ids, attention_mask=right_padding_attention_mask).logits middle_padding_output = mpt( middle_padding_input_ids, attention_mask=middle_padding_attention_mask).logits left_padding_output = mpt( left_padding_input_ids, attention_mask=left_padding_attention_mask).logits batched_output = mpt(batched_input_ids, attention_mask=batched_attention_mask).logits # check that right padding and left padding produce the same output assert torch.allclose(right_padding_output[0, :3], left_padding_output[0, 3:], atol=1e-6 if attention_impl == 'torch' else 1e-8) if not alibi: # check that right padding and middle padding produce the same output # Note: alibi not implemented for middle padding. assert torch.allclose( right_padding_output[0, :3], middle_padding_output[0, [0, 1, 5]], atol=1e-6 if attention_impl == 'torch' else 1e-8) # check that right padding and right padding in a batch produce the same output assert torch.allclose(right_padding_output[0, :3], batched_output[0, :3], atol=1e-6 if attention_impl == 'torch' else 1e-8) if not alibi: # check that middle padding and middle padding in a batch produce the same output # Note: alibi not implemented for middle padding. assert torch.allclose( middle_padding_output[0], batched_output[1, :], atol=1e-6 if attention_impl == 'torch' else 1e-8) @pytest.mark.parametrize('attention_impl', ['torch', 'triton']) def test_advanced_mask_building(attention_impl): # Test that the correct attention mask is created when both # prefix_mask and sequence_id are used hf_config = MPTConfig( init_device='cpu', d_model=16, n_heads=1, n_layers=1, expansion_ratio=1, max_seq_len=256, emb_pdrop=0.0, resid_pdrop=0.0, attn_config={ 'attn_impl': attention_impl, 'prefix_lm': True, 'attn_uses_sequence_id': True, 'alibi': False, }, ) mpt = MPTForCausalLM(hf_config) mpt.eval() prefix_mask = torch.ByteTensor([[1, 1, 0, 0, 1, 1, 1, 0]]) sequence_id = torch.LongTensor([[0, 0, 0, 0, 1, 1, 1, 1]]) attn_bias, _ = mpt.transformer._attn_bias(device=mpt.device, dtype=torch.float32, attention_mask=None, prefix_mask=prefix_mask, sequence_id=sequence_id) assert isinstance(attn_bias, torch.Tensor) assert attn_bias.shape == torch.Size([1, 1, 8, 8]) # We'll construct the expected value of attn_bias and then compare. can_attend = torch.tensor([ [1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 1, 0], [0, 0, 0, 0, 1, 1, 1, 0], [0, 0, 0, 0, 1, 1, 1, 0], [0, 0, 0, 0, 1, 1, 1, 1], ]) can_attend = can_attend.bool().view(1, 1, 8, 8) expected_attn_bias = torch.zeros_like(attn_bias) expected_attn_bias = expected_attn_bias.masked_fill( torch.logical_not(can_attend), torch.finfo(attn_bias.dtype).min) assert torch.equal(attn_bias, expected_attn_bias) @pytest.mark.parametrize('attention_impl,device', [('torch', 'cpu'), ('flash', 'gpu'), ('triton', 'gpu'), ('torch', 'gpu')]) @pytest.mark.parametrize('alibi', [True, False]) def test_generate(attention_impl, device, alibi): # Test that generate works, and produces the same output with or without # padding in the input. if not torch.cuda.is_available() and device == 'gpu': pytest.skip( f'This test requires CUDA to be available in order to run with {attention_impl} attention.' ) if alibi and attention_impl == 'flash': pytest.skip(f'alibi only implemented with torch and triton attention.') reproducibility.seed_all(1234) device = get_device(device) hf_config = MPTConfig( init_device='cpu', d_model=128, n_heads=4, n_layers=2, expansion_ratio=2, max_seq_len=2048, emb_pdrop=0.1, resid_pdrop=0.2, attn_config={ 'attn_impl': attention_impl, 'alibi': alibi, }, ) mpt = MPTForCausalLM(hf_config) mpt.eval() mpt = device.module_to_device(mpt) # padding on the left of the input left_padding_input_ids = torch.tensor( [[50256, 50256, 50256, 11274, 16390, 11], [50256, 50256, 50256, 11274, 16390, 11]]) left_padding_input_ids = device.tensor_to_device(left_padding_input_ids) left_padding_attention_mask = torch.tensor([[0, 0, 0, 1, 1, 1], [0, 0, 0, 1, 1, 1]]) left_padding_attention_mask = device.tensor_to_device( left_padding_attention_mask) # no padding in the input no_padding_input_ids = torch.tensor([[11274, 16390, 11], [11274, 16390, 11]]) no_padding_input_ids = device.tensor_to_device(no_padding_input_ids) no_padding_attention_mask = torch.tensor([[1, 1, 1], [1, 1, 1]]) no_padding_attention_mask = device.tensor_to_device( no_padding_attention_mask) # a single batch with different amounts of left padding in the input batched_input_ids = torch.tensor([[50256, 50256, 50256, 11274, 16390, 11], [50256, 50256, 16, 11274, 16390, 11]]) batched_input_ids = device.tensor_to_device(batched_input_ids) batched_attention_mask = torch.tensor([[0, 0, 0, 1, 1, 1], [0, 0, 1, 1, 1, 1]]).bool() batched_attention_mask = device.tensor_to_device(batched_attention_mask) with get_precision_context('amp_bf16' if device.name == 'gpu' else 'fp32'): # check that a batch with different amounts of padding doesn't crash # and produces the right output shape batched_generation = mpt.generate(input_ids=batched_input_ids, attention_mask=batched_attention_mask, max_new_tokens=5, use_cache=False) assert batched_generation.shape == (2, 6 + 5) reproducibility.seed_all(1234) generation_with_left_padding = mpt.generate( input_ids=left_padding_input_ids, attention_mask=left_padding_attention_mask, max_new_tokens=5, use_cache=False) assert generation_with_left_padding.shape == (2, 6 + 5) reproducibility.seed_all(1234) generation_with_no_padding = mpt.generate( input_ids=no_padding_input_ids, attention_mask=no_padding_attention_mask, max_new_tokens=5, use_cache=False) assert generation_with_no_padding.shape == (2, 3 + 5) # check that left padding and no padding produce the same output assert generation_with_no_padding[:, 3:].equal( generation_with_left_padding[:, 6:]) def check_hf_model_equivalence(model1, model2): # Checks that two huggingface models are equivalent (config and # parameters) expected_model_config_dict = model1.config.to_dict() new_model_config_dict = model2.config.to_dict() # this key just says the folder it was loaded from, which is a tmp dir during pytest del expected_model_config_dict['_name_or_path'] del new_model_config_dict['_name_or_path'] assert expected_model_config_dict == new_model_config_dict assert sum(p.numel() for p in model1.parameters()) == sum( p.numel() for p in model2.parameters()) assert all( type(module1) == type(module2) for module1, module2 in zip(model1.modules(), model2.modules())) for p1, p2 in zip(model1.parameters(), model2.parameters()): torch.testing.assert_close(p1, p2) def test_save_from_pretrained(tmp_path): # Test that MPT can be used with the HuggingFace # save_pretrained/from_pretrained api. hf_config = MPTConfig( init_device='cpu', d_model=128, n_heads=4, n_layers=2, expansion_ratio=2, max_seq_len=2048, emb_pdrop=0.1, resid_pdrop=0.2, attn_config={ 'attn_impl': 'torch', }, ) mpt = MPTForCausalLM(hf_config) mpt.save_pretrained(tmp_path / 'test-save-pretrained') mpt2 = MPTForCausalLM.from_pretrained(tmp_path / 'test-save-pretrained') check_hf_model_equivalence(mpt, mpt2) @pytest.mark.parametrize('alibi', [True, False]) def test_forward_with_cache_and_padding(alibi): # Tests that the result is the same with or without padding when using kv caching hf_config = MPTConfig( init_device='cpu', d_model=128, n_heads=4, n_layers=2, expansion_ratio=2, max_seq_len=2048, emb_pdrop=0.1, resid_pdrop=0.2, attn_config={ 'attn_impl': 'torch', 'alibi': alibi, }, use_cache=True, init_config={ 'name': 'baseline_', 'init_std': 0.02, }, ) mpt = MPTForCausalLM(hf_config) mpt.eval() first_input_ids_no_padding = torch.tensor([[11274, 16390, 11]]) first_attention_mask_no_padding = torch.tensor([[1, 1, 1]]).bool() # start with passing the first three tokens through (no padding) first_output_no_padding = mpt( first_input_ids_no_padding, attention_mask=first_attention_mask_no_padding) second_input_ids_no_padding = torch.tensor([[11274, 16390, 11, 11274]]) second_attention_mask_no_padding = torch.tensor([[1, 1, 1, 1]]).bool() # pass through the fourth token by itself, using the key-value cache (no padding) second_output_no_padding = mpt( second_input_ids_no_padding[:, -1].unsqueeze(-1), attention_mask=second_attention_mask_no_padding, past_key_values=first_output_no_padding.past_key_values) first_input_ids_padding = torch.tensor([[50256, 11274, 16390, 11]]) first_attention_mask_padding = torch.tensor([[0, 1, 1, 1]]).bool() # start with passing the first three tokens through (with left padding) first_output_padding = mpt(first_input_ids_padding, attention_mask=first_attention_mask_padding) second_input_ids_padding = torch.tensor([[50256, 11274, 16390, 11, 11274]]) second_attention_mask_padding = torch.tensor([[0, 1, 1, 1, 1]]).bool() # pass through the fourth token by itself, using the key-value cache (with left padding) second_output_padding = mpt( second_input_ids_padding[:, -1].unsqueeze(-1), attention_mask=second_attention_mask_padding, past_key_values=first_output_padding.past_key_values) # check that the outputs are the same with or without padding torch.testing.assert_close(second_output_no_padding.logits, second_output_padding.logits[:, -1, :].unsqueeze(1), atol=1e-6, rtol=1e-6) @pytest.mark.parametrize('attention_impl,device', [('torch', 'cpu'), ('flash', 'gpu'), ('triton', 'gpu'), ('torch', 'gpu')]) @pytest.mark.parametrize('alibi', [True, False]) def test_forward_with_cache(attention_impl, device, alibi): # Test that model forward with and without the key-value cache produces the # same output. if not torch.cuda.is_available() and device == 'gpu': pytest.skip( f'This test requires CUDA to be available in order to run with {attention_impl} attention.' ) if alibi and attention_impl == 'flash': pytest.skip(f'alibi only implemented with torch and triton attention.') device = get_device(device) hf_config = MPTConfig( init_device='cpu', d_model=128, n_heads=4, n_layers=2, expansion_ratio=2, max_seq_len=2048, emb_pdrop=0.1, resid_pdrop=0.2, attn_config={ 'attn_impl': attention_impl, 'alibi': alibi, }, attn_impl=attention_impl, alibi=alibi, use_cache=True, init_config={ 'name': 'baseline_', 'init_std': 0.02, }, ) reproducibility.seed_all(1234) mpt = MPTForCausalLM(hf_config) mpt.eval() mpt = device.module_to_device(mpt) with get_precision_context('amp_bf16' if device.name == 'gpu' else 'fp32'): reproducibility.seed_all(1234) first_input_ids = torch.tensor([[11274, 16390, 11]]) first_input_ids = device.tensor_to_device(first_input_ids) first_attention_mask = torch.tensor([[1, 1, 1]]).bool() first_attention_mask = device.tensor_to_device(first_attention_mask) # start with passing the first three tokens through first_output = mpt(first_input_ids, attention_mask=first_attention_mask) assert first_output.logits.shape == (1, 3, hf_config.vocab_size) assert len(first_output.past_key_values) == 2 assert all( len(past_key_value) == 2 for past_key_value in first_output.past_key_values) assert all(past_key_value[0].shape == (1, 3, 128) for past_key_value in first_output.past_key_values) assert all(past_key_value[1].shape == (1, 3, 128) for past_key_value in first_output.past_key_values) reproducibility.seed_all(1234) second_input_ids = torch.tensor([[11274, 16390, 11, 11274]]) second_input_ids = device.tensor_to_device(second_input_ids) second_attention_mask = torch.tensor([[1, 1, 1, 1]]).bool() second_attention_mask = device.tensor_to_device(second_attention_mask) # pass through the fourth token by itself, using the key-value cache second_output = mpt(second_input_ids[:, -1].unsqueeze(-1), attention_mask=second_attention_mask, past_key_values=first_output.past_key_values) assert second_output.logits.shape == (1, 1, hf_config.vocab_size) assert len(second_output.past_key_values) == 2 assert all( len(past_key_value) == 2 for past_key_value in second_output.past_key_values) assert all(past_key_value[0].shape == (1, 4, 128) for past_key_value in second_output.past_key_values) assert all(past_key_value[1].shape == (1, 4, 128) for past_key_value in second_output.past_key_values) reproducibility.seed_all(1234) # pass through the first four tokens without the key-value cache full_output = mpt(second_input_ids, attention_mask=second_attention_mask) # check that the output is the same whether using the key-value cache or not torch.testing.assert_close( second_output.logits, full_output.logits[:, -1, :].unsqueeze(1), atol=1e-2, rtol=1e-2, ) @pytest.mark.parametrize('alibi', [True, False]) def test_generate_with_past_kv(alibi): hf_config = MPTConfig( init_device='cpu', d_model=128, n_heads=4, n_layers=2, expansion_ratio=2, max_seq_len=2048, emb_pdrop=0.1, resid_pdrop=0.2, attn_config={ 'attn_impl': 'torch', 'alibi': alibi, }, use_cache=True, init_config={ 'name': 'baseline_', 'init_std': 0.02, }, ) mpt = MPTForCausalLM(hf_config) mpt.eval() # no padding in the input no_padding_input_ids = torch.tensor([[11274, 16390, 11]]) no_padding_attention_mask = torch.tensor([[1, 1, 1]]) with mock.patch.object(MPTForCausalLM, 'forward', autospec=True) as forward_mocked: forward_mocked.return_value = CausalLMOutputWithPast( logits=torch.randn((1, 3, hf_config.vocab_size)), past_key_values=[(torch.randn(1, 3, hf_config.d_model), torch.randn(1, 3, hf_config.d_model)) for _ in range(hf_config.n_layers)]) _ = mpt.generate(input_ids=no_padding_input_ids, attention_mask=no_padding_attention_mask, max_new_tokens=2) assert forward_mocked.call_count == 2 _, _, kwargs = forward_mocked.mock_calls[0] assert kwargs['past_key_values'] is None _, _, kwargs = forward_mocked.mock_calls[1] assert kwargs['past_key_values'] is not None assert len(kwargs['past_key_values']) == hf_config.n_layers assert kwargs['past_key_values'][0][0].shape == (1, 3, hf_config.d_model) @pytest.mark.parametrize('generation_kwargs', [{ 'max_new_tokens': 2, 'num_beams': 4 }, { 'max_new_tokens': 2, 'top_k': 5, 'penalty_alpha': 0.4 }, { 'do_sample': True, 'top_p': 0.95 }]) @pytest.mark.parametrize('alibi', [True, False]) def test_generation_kwargs_dont_crash(generation_kwargs, alibi): hf_config = MPTConfig( init_device='cpu', d_model=128, n_heads=4, n_layers=2, expansion_ratio=2, max_seq_len=2048, emb_pdrop=0.1, resid_pdrop=0.2, attn_config={ 'attn_impl': 'torch', 'alibi': alibi, }, use_cache=True, ) mpt = MPTForCausalLM(hf_config) mpt.eval() # no padding in the input no_padding_input_ids = torch.tensor([[11274, 16390, 11]]) no_padding_attention_mask = torch.tensor([[1, 1, 1]]) _ = mpt.generate(input_ids=no_padding_input_ids, attention_mask=no_padding_attention_mask, **generation_kwargs) @pytest.mark.gpu @pytest.mark.parametrize('attention_impl', ['torch', 'flash', 'triton']) @pytest.mark.parametrize('alibi', [True, False]) def test_model_to(attention_impl, alibi): # test that moving the model to diff devices and dtypes in diff ways does not break the model if not torch.cuda.is_available(): pytest.skip( f'This test requires CUDA to be available in order to run with {attention_impl} attention.' ) if alibi and attention_impl == 'flash': pytest.skip(f'alibi only implemented with torch and triton attention.') hf_config = MPTConfig( init_device='cpu', d_model=128, n_heads=4, n_layers=2, expansion_ratio=2, max_seq_len=2048, emb_pdrop=0.1, resid_pdrop=0.2, attn_config={ 'attn_impl': attention_impl, 'alibi': alibi, }, use_cache=True, init_config={ 'name': 'baseline_', 'init_std': 0.02, }, ) reproducibility.seed_all(1234) mpt = MPTForCausalLM(hf_config) mpt = mpt.bfloat16() mpt = mpt.to('cuda') mpt.eval() # gen input data input_ids = torch.tensor([[11274, 16390, 11]]).to('cuda') attention_mask = torch.tensor([[1, 1, 1]]).bool().to('cuda') _ = mpt(input_ids, attention_mask=attention_mask) # move the model around using different methods mpt = mpt.to('cpu') # verify the model still works if attention_impl == 'torch': _ = mpt(input_ids.to('cpu'), attention_mask=attention_mask.to('cpu')) mpt = mpt.cuda() # verify the model still works if attention_impl == 'torch': _ = mpt(input_ids, attention_mask=attention_mask) mpt = mpt.to('cpu') mpt = mpt.float() # verify the model still works if attention_impl == 'torch': _ = mpt(input_ids.to('cpu'), attention_mask=attention_mask.to('cpu')) mpt = mpt.half() mpt = mpt.to(0) # move to rank0 mpt = mpt.bfloat16() # verify the model still works _ = mpt(input_ids, attention_mask=attention_mask) def test_alibi_vs_hf(): # compare alibi-bias generation vs HF Bloom model alibi-bias for diff seq len and n_heads for n_heads in range(1, 64): for seq_len in [1, 2, 8, 13, 64, 195, 256]: # hf bloom alibi bais alibi_bias_hf = build_alibi_tensor( torch.ones(seq_len)[None, ...], n_heads, torch.float32) alibi_bias_hf = alibi_bias_hf - alibi_bias_hf.max( dim=2, keepdim=True).values # mosaicml alibi bais alibi_bias_m = build_alibi_bias(n_heads, seq_len, dtype=torch.float32) alibi_bias_m = alibi_bias_m[0] torch.testing.assert_close(alibi_bias_hf, alibi_bias_m)
EXA-1-master
exa/libraries/llm-foundry/tests/test_model.py
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 import os from typing import List, Optional import pytest from composer.utils import reproducibility # Allowed options for pytest.mark.world_size() # Important: when updating this list, make sure to also up ./.ci/test.sh # (so tests of all world sizes will be executed) and tests/README.md # (so the documentation is correct) WORLD_SIZE_OPTIONS = (1, 2) # Enforce deterministic mode before any tests start. reproducibility.configure_deterministic_mode() def _add_option(parser: pytest.Parser, name: str, help: str, choices: Optional[List[str]] = None): parser.addoption( f'--{name}', default=None, type=str, choices=choices, help=help, ) parser.addini( name=name, help=help, type='string', default=None, ) def pytest_addoption(parser: pytest.Parser) -> None: _add_option(parser, 'seed', help="""\ Rank zero seed to use. `reproducibility.seed_all(seed + dist.get_global_rank())` will be invoked before each test.""") def _get_world_size(item: pytest.Item): """Returns the world_size of a test, defaults to 1.""" _default = pytest.mark.world_size(1).mark return item.get_closest_marker('world_size', default=_default).args[0] def pytest_collection_modifyitems(config: pytest.Config, items: List[pytest.Item]) -> None: """Filter tests by world_size (for multi-GPU tests)""" world_size = int(os.environ.get('WORLD_SIZE', '1')) conditions = [ lambda item: _get_world_size(item) == world_size, ] # keep items that satisfy all conditions remaining = [] deselected = [] for item in items: if all([condition(item) for condition in conditions]): remaining.append(item) else: deselected.append(item) if deselected: config.hook.pytest_deselected(items=deselected) items[:] = remaining def pytest_sessionfinish(session: pytest.Session, exitstatus: int): if exitstatus == 5: session.exitstatus = 0 # Ignore no-test-ran errors
EXA-1-master
exa/libraries/llm-foundry/tests/conftest.py
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 import os import shutil import sys import tempfile from argparse import Namespace import pytest import torch from omegaconf import OmegaConf as om from llmfoundry import (build_finetuning_dataloader, build_text_denoising_dataloader) from llmfoundry.data.text_data import (ConcatenatedSequenceCollatorWrapper, build_text_dataloader) from llmfoundry.utils.builders import build_tokenizer # Add repo root to path so we can import scripts and test it repo_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) sys.path.append(repo_dir) from scripts.data_prep.convert_dataset_hf import main as main_hf def get_config(conf_path='yamls/mpt/125m.yaml'): os.environ['TOKENIZERS_PARALLELISM'] = 'false' with open(conf_path) as f: test_cfg = om.load(f) return test_cfg def get_data_local(tokenizer_name, pretokenize): return f'my-copy-c4-{tokenizer_name}-pretokenize-{pretokenize}' def get_abs_data_path(data_local): return os.path.join(os.getcwd(), data_local) @pytest.mark.parametrize('tokenizer_name', ['gpt2', 'facebook/opt-125m']) @pytest.mark.parametrize('pretokenize', [False, True]) def test_correct_padding(tokenizer_name, pretokenize, batch_size=4): if tokenizer_name == 'gpt2' and not pretokenize: pytest.xfail('Must pretokenize data if using "gpt2" tokenizer') data_local = get_data_local(tokenizer_name, pretokenize) split = 'val_xsmall' eos_text = '' bos_text = '' if tokenizer_name == 'gpt2': eos_text = '<|endoftext|>' elif tokenizer_name == 'facebook/opt-125m': bos_text = '</s>' path = get_abs_data_path(data_local) shutil.rmtree(path, ignore_errors=True) if pretokenize: main_hf( Namespace( **{ 'dataset': 'c4', 'data_subset': 'en', 'splits': [split], 'out_root': path, 'compression': None, 'concat_tokens': 2048, 'tokenizer': tokenizer_name, 'bos_text': bos_text, 'eos_text': eos_text, 'no_wrap': False })) else: main_hf( Namespace( **{ 'dataset': 'c4', 'data_subset': 'en', 'splits': [split], 'out_root': path, 'compression': None, 'concat_tokens': None, 'tokenizer': tokenizer_name, 'bos_text': bos_text, 'eos_text': eos_text, 'no_wrap': False })) if not os.path.isdir(path): raise RuntimeError(f'c4 dataset at {path} not set up as expected') test_cfg = get_config(conf_path='scripts/train/yamls/mpt/125m.yaml') test_cfg.data_local = data_local test_cfg.eval_loader.dataset.split = split test_cfg.dataset = om.create({ 'num_canonical_nodes': 1, 'predownload': 3000, }) tokenizer = build_tokenizer( om.create({ 'name': tokenizer_name, 'kwargs': {} })) # Dataloaders eval_loader = build_text_dataloader( test_cfg.eval_loader, tokenizer, batch_size, ) batch = next(iter(eval_loader)) assert batch['input_ids'].shape == torch.Size([batch_size, 2048]) assert batch['input_ids'].type() == 'torch.LongTensor' # we follow the convention (from huggingface) that non-attended tokens are 0 in the attn mask and -100 in the labels attention_mask = batch.get( 'attention_mask', torch.ones_like(batch['input_ids'], dtype=torch.bool)) a = attention_mask == 0 b = batch['labels'] == -100 assert torch.equal(a, b) @pytest.mark.parametrize(('eos_token_id', 'bos_token_id'), [(5, None), (None, 5), pytest.param(5, 5, marks=pytest.mark.xfail)]) def test_sequence_id_wrapper(eos_token_id, bos_token_id): wrapper = ConcatenatedSequenceCollatorWrapper( lambda x: x, # placeholder eos_token_id=eos_token_id, bos_token_id=bos_token_id, ) batch = {'input_ids': torch.Tensor([[0, 1, 2, 5, 0, 1, 5, 0, 6]])} sequence_id = wrapper.get_sequence_id_from_batch(batch) if eos_token_id is not None: assert torch.equal(sequence_id, torch.Tensor([[0, 0, 0, 0, 1, 1, 1, 2, 2]])) elif bos_token_id is not None: assert torch.equal(sequence_id, torch.Tensor([[0, 0, 0, 1, 1, 1, 2, 2, 2]])) else: raise NotImplementedError() @pytest.mark.parametrize('decoder_only_format', [True, False]) @pytest.mark.parametrize('pretokenize', [True, False]) @pytest.mark.parametrize('packing_ratio', [None, 5.5]) def test_denoising_dataloader(decoder_only_format, pretokenize, packing_ratio): # Use the datasets just built in the last test tokenizer_name = 'facebook/opt-125m' data_local = get_data_local(tokenizer_name, pretokenize) path = get_abs_data_path(data_local) max_seq_len = 256 if decoder_only_format else 128 if (decoder_only_format is False) and (packing_ratio is not None): pytest.xfail('packing_ratio only supported for decoder-only format.') with tempfile.TemporaryDirectory() as tmpdir: cfg = { 'name': 'text_denoising', 'dataset': { 'local': tmpdir, 'remote': path, 'split': 'val_xsmall', 'shuffle': False, 'max_seq_len': max_seq_len, 'packing_ratio': packing_ratio, 'predownload': 1000, 'keep_zip': False, }, 'mixture_of_denoisers': { 'decoder_only_format': decoder_only_format, 'span_mean_lengths_and_ratios': [[3, .15], [8, .5]], 'sequence_mask_ratios': 0.25, }, 'drop_last': False, 'num_workers': 0, } cfg = om.create(cfg) device_batch_size = 2 expected_keys = ['input_ids', 'attention_mask', 'labels'] if decoder_only_format: expected_keys += ['bidirectional_mask'] else: expected_keys += ['decoder_attention_mask', 'decoder_input_ids'] if packing_ratio is not None: expected_keys += ['sequence_id'] tokenizer = build_tokenizer( om.create({ 'name': tokenizer_name, 'kwargs': { 'model_max_length': max_seq_len } })) loader = build_text_denoising_dataloader(cfg, tokenizer, device_batch_size) batch_ix = 0 for batch in loader: for k in expected_keys: assert k in batch t = batch[k] assert t.shape[0] == device_batch_size assert t.shape[1] <= max_seq_len batch_ix += 1 if batch_ix >= 5: break @pytest.mark.parametrize('decoder_only_format', [True, False]) @pytest.mark.parametrize('allow_pad_trimming', [True, False]) @pytest.mark.parametrize('packing_ratio', [10.0, None]) def test_finetuning_dataloader(decoder_only_format, allow_pad_trimming, packing_ratio): # Use the datasets just built in the last test tokenizer_name = 'gpt2' if decoder_only_format else 't5-base' max_seq_len = 2048 if decoder_only_format else 1024 if (decoder_only_format is False) and (packing_ratio is not None): pytest.xfail('packing_ratio only supported for decoder-only format.') cfg = { 'name': 'finetuning', 'dataset': { 'hf_name': 'tatsu-lab/alpaca', 'split': 'train', 'max_seq_len': max_seq_len, 'decoder_only_format': decoder_only_format, 'allow_pad_trimming': allow_pad_trimming, 'packing_ratio': packing_ratio, 'shuffle': True, }, 'drop_last': False, 'num_workers': 0, 'pin_memory': False, 'prefetch_factor': 2, 'persistent_workers': False, 'timeout': 0 } cfg = om.create(cfg) tokenizer = build_tokenizer( om.create({ 'name': tokenizer_name, 'kwargs': { 'model_max_length': max_seq_len } })) device_batch_size = 2 expected_keys = ['input_ids', 'attention_mask', 'labels'] if decoder_only_format: expected_keys += ['bidirectional_mask'] else: expected_keys += ['decoder_attention_mask', 'decoder_input_ids'] loader = build_finetuning_dataloader(cfg, tokenizer, device_batch_size) batch_ix = 0 for batch in loader: for k in expected_keys: assert k in batch t = batch[k] assert t.shape[ 0] == device_batch_size, f'{k} has incorrect batch size' assert t.shape[1] <= max_seq_len, f'{k} exceeds max_seq_len' batch_ix += 1 if batch_ix >= 3: break
EXA-1-master
exa/libraries/llm-foundry/tests/test_dataloader.py
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 import pytest import torch from composer.utils import reproducibility from omegaconf import OmegaConf as om def allclose_helper(t0, t1, rtol=1e-2, atol=1e-2): return torch.allclose(t0, t1, rtol=rtol, atol=atol) @pytest.mark.gpu @pytest.mark.parametrize('attn_impl_0', ['flash', 'triton', 'torch']) @pytest.mark.parametrize('attn_impl_1', ['flash', 'triton', 'torch']) @pytest.mark.parametrize('clip_qkv', [True, False]) @pytest.mark.parametrize('qk_ln', [True, False]) @pytest.mark.parametrize('alibi', [True, False]) @pytest.mark.parametrize('multiquery', [True, False]) def test_attn_impl(attn_impl_0, attn_impl_1, clip_qkv, qk_ln, alibi, multiquery, device='cuda'): """Compare all attn impl with each other. Includes testing with and without attn_clip_qkv, attn_qk_ln, and alibi. """ from llmfoundry.models.layers import attention if alibi and (attn_impl_0 == 'flash' or attn_impl_1 == 'flash'): pytest.xfail('flash attn does not support alibi') reproducibility.seed_all(7) cfg = om.create({ 'attn_impl': 'flash', 'd_model': 128, 'n_heads': 2, 'attn_pdrop': 0, 'clip_qkv': clip_qkv, 'qk_ln': qk_ln, }) n, s, f = 2, 16, cfg.d_model cfg.attn_impl = attn_impl_0 if multiquery: attn0 = attention.MultiQueryAttention(**cfg).to(device) else: attn0 = attention.MultiheadAttention(**cfg).to(device) cfg.attn_impl = attn_impl_1 if multiquery: attn1 = attention.MultiQueryAttention(**cfg).to(device) else: attn1 = attention.MultiheadAttention(**cfg).to(device) attn1.load_state_dict(attn0.state_dict()) attention_mask = torch.ones(n, s).to(device).bool() def gen_bias(attn_impl): causal = True attn_bias = None bs = attention.attn_bias_shape(attn_impl, cfg.n_heads, s, alibi, prefix_lm=False, use_sequence_id=False, causal=causal) if bs is not None: attn_bias = torch.zeros(*bs, device=device) attn_bias = attention.build_attn_bias( attn_impl, attn_bias, cfg.n_heads, s, causal=causal, alibi=alibi, alibi_bias_max=8, ) return attn_bias x0 = torch.randn(n, s, f).to(device) x1 = x0.clone().detach() x0.requires_grad = True x1.requires_grad = True with torch.autocast(x0.device.type): attn_bias = gen_bias(attn0.attn_impl) y0, _, _ = attn0(x0, past_key_value=None, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=True) attn_bias = gen_bias(attn1.attn_impl) y1, _, _ = attn1(x1, past_key_value=None, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=True) y0 *= attention_mask.unsqueeze(-1) y1 *= attention_mask.unsqueeze(-1) loss0 = y0.sum() loss1 = y1.sum() loss0.backward() loss1.backward() assert allclose_helper(y0, y1) torch_name_param_map = {n: p for n, p in attn1.named_parameters()} for n, p in attn0.named_parameters(): tp = torch_name_param_map[n] assert allclose_helper(p, tp) assert allclose_helper(p.grad, tp.grad) assert allclose_helper(x0.grad, x1.grad) @pytest.mark.gpu @pytest.mark.parametrize('attn_impl', ['flash', 'triton', 'torch']) def test_vs_mha(attn_impl, device='cuda'): """Compare diff attn_impl to torch.nn.MultiheadAttention.""" from llmfoundry.models.layers import attention reproducibility.seed_all(17) cfg = om.create({ 'attn_impl': attn_impl, 'd_model': 256, 'n_heads': 2, 'attn_pdrop': 0, 'clip_qkv': False, 'qk_ln': False, }) n, s, f = 2, 16, cfg.d_model mmhsa = attention.MultiheadAttention(**cfg).to(device) tmhsa = torch.nn.MultiheadAttention( embed_dim=cfg.d_model, num_heads=cfg.n_heads, dropout=cfg.attn_pdrop, bias=True, batch_first=True, device=device, ) def gen_tca_mask(): # generate causal mask for torch attn ms = (s, s) attn_mask = torch.empty(*ms).to(device) attn_mask.fill_(float('-inf')) attn_mask.masked_fill_(attn_mask.to(torch.bool).fill_(1).tril_(), 0.) return attn_mask # clone weights tmhsa.in_proj_weight.data = mmhsa.Wqkv.weight.data.clone().detach() tmhsa.in_proj_bias.data = mmhsa.Wqkv.bias.data.clone().detach() tmhsa.out_proj.weight.data = mmhsa.out_proj.weight.data.clone().detach() tmhsa.out_proj.bias.data = mmhsa.out_proj.bias.data.clone().detach() attention_mask = torch.ones(n, s).to(device).bool() x0 = torch.randn(n, s, f).to(device) x1 = x0.clone().detach() x0.requires_grad = True x1.requires_grad = True with torch.autocast(x0.device.type): y0, _, _ = mmhsa(x0, past_key_value=None, attn_bias=None, attention_mask=attention_mask, is_causal=True) y1, _ = tmhsa(x1, x1, x1, attn_mask=gen_tca_mask(), key_padding_mask=~attention_mask, need_weights=True) y0 *= attention_mask.unsqueeze(-1) y1 *= attention_mask.unsqueeze(-1) loss0 = y0.sum() loss1 = y1.sum() loss0.backward() loss1.backward() assert allclose_helper(y0, y1) assert allclose_helper(tmhsa.out_proj.bias.grad, mmhsa.out_proj.bias.grad) assert allclose_helper(tmhsa.out_proj.weight.grad, mmhsa.out_proj.weight.grad) assert allclose_helper(tmhsa.in_proj_bias.grad, mmhsa.Wqkv.bias.grad) assert allclose_helper(tmhsa.in_proj_weight.grad, mmhsa.Wqkv.weight.grad) assert allclose_helper(x0.grad, x1.grad)
EXA-1-master
exa/libraries/llm-foundry/tests/test_flash_triton_torch.py
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 from omegaconf import OmegaConf as om from transformers import AutoTokenizer def get_config(conf_path='scripts/train/yamls/mpt/125m.yaml'): with open(conf_path) as f: test_cfg = om.load(f) return test_cfg def test_load_tokenizer(): test_cfg = get_config(conf_path='scripts/train/yamls/mpt/125m.yaml') truncation = True padding = 'max_length' resolved_om_tokenizer_config = om.to_container(test_cfg.tokenizer, resolve=True) tokenizer_kwargs = resolved_om_tokenizer_config.get( # type: ignore 'kwargs', {}) tokenizer_name = resolved_om_tokenizer_config['name'] # type: ignore tokenizer = AutoTokenizer.from_pretrained(tokenizer_name, **tokenizer_kwargs) tokenizer.pad_token = tokenizer.eos_token assert tokenizer.vocab_size == 50254 assert tokenizer.name_or_path == 'EleutherAI/gpt-neox-20b' # HuggingFace overrides model_max_length, so this check would fail. We explicitly reset the # model_max_length in ComposerMPTCausalLM # assert tokenizer.model_max_length == resolved_om_tokenizer_config['kwargs']['model_max_length'] in_str = 'hello\n\nhello' out_token_key = [25521, 187, 187, 25521] # test explicitly call tokenizer out = tokenizer.encode(in_str) assert out == out_token_key # tokenizer __call__ out = tokenizer(in_str)['input_ids'] assert out == out_token_key # tokenizer __call__ with kwargs padded_tokenize = tokenizer( in_str, truncation=truncation, padding=padding, max_length=tokenizer.model_max_length)['input_ids'] out_pad_tokens = out_token_key + [0] * (tokenizer.model_max_length - 4) assert padded_tokenize == out_pad_tokens # wrapper class __call__ out = tokenizer(in_str)['input_ids'] assert out == out_token_key # wrapper class __call__ with kwargs padded_tokenize = tokenizer( in_str, truncation=truncation, padding=padding, max_length=tokenizer.model_max_length)['input_ids'] assert padded_tokenize == out_pad_tokens # check attn mask attention_mask = tokenizer( in_str, truncation=truncation, padding=padding, max_length=tokenizer.model_max_length)['attention_mask'] attn_mask_key = [1, 1, 1, 1] + [0] * (tokenizer.model_max_length - 4) assert attention_mask == attn_mask_key
EXA-1-master
exa/libraries/llm-foundry/tests/test_tokenizer.py
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 import math from collections import OrderedDict from collections.abc import Sequence from functools import partial import pytest import torch from composer.utils import reproducibility from omegaconf import OmegaConf as om from torch import nn from llmfoundry.models.utils import MODEL_INIT_REGISTRY, generic_param_init_fn_ class MLP(nn.Module): def __init__(self, cfg): super().__init__() self.fc1 = nn.Linear(cfg.in_features, cfg.out_features, bias=True) self.ln_1 = nn.LayerNorm(cfg.out_features) self.fc2 = nn.Linear(cfg.out_features, cfg.out_features, bias=True) self.fc2._is_residual = True # type: ignore def forward(self, x): y = self.ln_1(self.fc1(x)) res = y y = self.fc2(y) y = y + res return y @pytest.mark.parametrize('is_residual', [True, False]) def test_div_is_residual(is_residual: bool): reproducibility.seed_all(7) in_features, out_features = 8, 32 cfg = om.create({ 'in_features': in_features, 'out_features': out_features, 'n_layers': 2, }) cfg.init_div_is_residual = is_residual model = MLP(cfg) model.apply(partial(generic_param_init_fn_, init_fn_=nn.init.ones_, **cfg)) # verify layer norm is init to bias=0 and weight=1 assert (model.ln_1.weight == 1).all() if model.ln_1.bias is not None: assert (model.ln_1.bias == 0).all() # verify _is_residual works expected_value = 1 / math.sqrt(2 * cfg.n_layers) if is_residual else 1 for n, p in model.named_parameters(): if n == 'bias': assert (p == 0).all() elif n == 'weight': assert (p == expected_value).all() @pytest.mark.parametrize('fused', [True, False]) def test_fused_init_helper(fused): reproducibility.seed_all(7) in_features, out_features = 8, 32 cfg = om.create({ 'in_features': in_features, 'out_features': out_features, 'n_layers': 2, }) fc = nn.Linear(cfg.in_features, cfg.out_features, bias=True) fc.train() if fused: fc._fused = (0, (cfg.out_features // 2,)) # type: ignore def init_fn_(weight): # dummy init based on layer width with torch.no_grad(): out_features, _ = weight.shape[:2] weight.fill_(1 / out_features) fc.apply(partial(generic_param_init_fn_, init_fn_=init_fn_, **cfg)) expected_value = 1 / cfg.out_features if fused: expected_value *= 2 for n, p in fc.named_parameters(): if n == 'bias': assert (p == 0).all() elif n == 'weight': assert (p == expected_value).all() @pytest.mark.parametrize('module', [ nn.Linear(8, 16), nn.Embedding(8, 16), pytest.param(nn.LayerNorm(8), marks=pytest.mark.xfail( reason='LayerNorm is skipped by init_fn_', strict=True)), pytest.param(nn.Conv2d(8, 16, 3), marks=pytest.mark.xfail( reason='generic_param_init_fn_ does not init Conv layers', strict=True)), ]) def test_all_params_init(module): fill_val = torch.finfo(torch.float16).max def max_fill_init_(weight): # init param with max value with torch.no_grad(): weight.fill_(fill_val) cfg = om.create({ 'n_layers': 2, }) module.apply(partial(generic_param_init_fn_, init_fn_=max_fill_init_, **cfg)) for n, p in module.named_parameters(): if n == 'bias': assert (p == 0).all() elif n == 'weight': assert (p == fill_val).all() @pytest.mark.parametrize('emb_init_cfg', [ None, ('emb_init_std', 5), ('emb_init_std', 0), ('emb_init_uniform_lim', 2), ('emb_init_uniform_lim', [-1, 4]), ('emb_init_uniform_lim', 0), ('emb_init_uniform_lim', [1, 1]) ]) def test_emb_init(emb_init_cfg): reproducibility.seed_all(7) cfg = { 'vocab_size': 64, 'in_features': 16, 'out_features': 32, 'n_layers': 2, } if emb_init_cfg is not None: cfg[emb_init_cfg[0]] = emb_init_cfg[1] cfg = om.create(cfg) model = nn.Sequential( OrderedDict([ ('emb', nn.Embedding(cfg.vocab_size, cfg.in_features)), ('fc1', nn.Linear(cfg.in_features, cfg.out_features, bias=True)), ('ln1', nn.LayerNorm(cfg.out_features)), ('act1', nn.ReLU()), ('fc2', nn.Linear(cfg.out_features, cfg.out_features, bias=True)), ])) model.apply(partial(MODEL_INIT_REGISTRY['kaiming_normal_'], **cfg)) if cfg.get('emb_init_std') is not None: emb_init_std = cfg.get('emb_init_std') if emb_init_std == 0: assert (model.emb.weight == 0).all() # type: ignore elif cfg.get('emb_init_uniform_lim') is not None: emb_init_uniform_lim = cfg.get('emb_init_uniform_lim') if emb_init_uniform_lim == 0: assert (model.emb.weight == 0).all() # type: ignore elif isinstance(emb_init_uniform_lim, Sequence): assert len(emb_init_uniform_lim) <= 2 if len(emb_init_uniform_lim ) == 2 and emb_init_uniform_lim[0] == emb_init_uniform_lim[1]: assert ( model.emb.weight == emb_init_uniform_lim[0] # type: ignore ).all()
EXA-1-master
exa/libraries/llm-foundry/tests/test_init_fn.py
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 import torch from composer.utils import reproducibility from transformers import AutoConfig, AutoModelForCausalLM from llmfoundry import MPTConfig, MPTForCausalLM def gen_random_batch(batch_size: int, vocab_size: int, max_seq_len: int): # generate input batch of random data batch = { 'input_ids': torch.randint( low=0, high=vocab_size, size=(batch_size, max_seq_len), dtype=torch.int64, ), 'attention_mask': torch.ones(size=(batch_size, max_seq_len), dtype=torch.bool) } return batch def test_onnx_export(tmp_path): reproducibility.seed_all(42) AutoConfig.register('mpt', MPTConfig) AutoModelForCausalLM.register(MPTConfig, MPTForCausalLM) hf_config = MPTConfig( init_device='cpu', d_model=128, n_heads=4, n_layers=2, expansion_ratio=2, max_seq_len=2048, emb_pdrop=0.0, resid_pdrop=0.0, attn_config={ 'attn_impl': 'torch', 'alibi': True, }, use_cache=True, vocab_size=50368, norm_type='layernorm', ) mpt = MPTForCausalLM(hf_config) mpt.eval() print('Creating random batch...') sample_input = gen_random_batch( 1, 50368, 2048, ) with torch.no_grad(): mpt(**sample_input) torch.onnx.export( mpt, (sample_input,), str(tmp_path / 'mpt.onnx'), input_names=['input_ids', 'attention_mask'], output_names=['output'], opset_version=16, ) with torch.no_grad(): orig_out = mpt(**sample_input) import onnx # type: ignore import onnx.checker # type: ignore import onnxruntime as ort # type: ignore _ = onnx.load(str(tmp_path / 'mpt.onnx')) onnx.checker.check_model(str(tmp_path / 'mpt.onnx')) ort_session = ort.InferenceSession(str(tmp_path / 'mpt.onnx')) for key, value in sample_input.items(): sample_input[key] = value.cpu().numpy() loaded_model_out = ort_session.run(None, sample_input) torch.testing.assert_close( orig_out.logits.detach().numpy(), loaded_model_out[0], rtol=1e-4, atol=1e-4, msg=f'output mismatch between the orig and onnx exported model', )
EXA-1-master
exa/libraries/llm-foundry/tests/test_onnx.py
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 import os import random import shutil from pathlib import Path import pytest from omegaconf import OmegaConf as om from transformers import AutoTokenizer from llmfoundry.utils.builders import build_icl_evaluators def load_icl_config(conf_path='tests/test_tasks.yaml'): with open(conf_path) as f: test_cfg = om.load(f) return test_cfg @pytest.fixture(autouse=True, scope='function') def tmp_dir(): TMP_FOLDER = 'tmp_data' + str(random.randint(0, 100_000)) dirpath = Path(TMP_FOLDER) if dirpath.exists() and dirpath.is_dir(): shutil.rmtree(dirpath) os.mkdir(TMP_FOLDER) yield TMP_FOLDER dirpath = Path(TMP_FOLDER) if dirpath.exists() and dirpath.is_dir(): shutil.rmtree(dirpath) def run_test(dir, tokenizer, bos_tok=''): task_cfg = load_icl_config() evaluators, _ = build_icl_evaluators(task_cfg.icl_tasks, tokenizer, 1024, 8, destination_dir=f'{os.getcwd()}/{dir}') for e in evaluators: batch = next(e.dataloader.dataloader.__iter__()) inputs = batch['input_ids'][0] if 'continuation_indices' in batch: continuation_indices = list(batch['continuation_indices'][0]) full_example = tokenizer.decode(inputs[0:continuation_indices[-1]]) answer = tokenizer.decode( inputs[continuation_indices[0]:continuation_indices[-1]]) else: if tokenizer.pad_token_id is not None: start_idx = ( inputs == tokenizer.pad_token_id).tolist().index(False) else: start_idx = ( inputs == tokenizer.eos_token_id).tolist().index(False) full_example = tokenizer.decode(inputs[start_idx:]) answer = batch['labels'][0][0] if e.label == 'jeopardy/0-shot/american_history': assert full_example == bos_tok + 'AMERICAN HISTORY: On May 29, 1765 Patrick Henrys Stamp Act protest was interrupted with this one word\nAnswer: Treason' assert answer == ' Treason' elif e.label == 'jeopardy/1-shot/american_history': assert full_example == bos_tok + 'AMERICAN HISTORY: Witchcraft trials held in this town in 1692 led to the hangings of 19 people\nAnswer: Salem\nAMERICAN HISTORY: On May 29, 1765 Patrick Henrys Stamp Act protest was interrupted with this one word\nAnswer: Treason' assert answer == ' Treason' elif e.label == 'triviaqa/0-shot': assert full_example == bos_tok + 'Question: Who was the man behind The Chipmunks?\nAnswer:' assert answer == 'David Seville' elif e.label == 'triviaqa/1-shot': assert full_example == bos_tok + 'Question: High Willhays is the highest point of what National Park?\nAnswer: DARTMOOR\nQuestion: Who was the man behind The Chipmunks?\nAnswer:' assert answer == 'David Seville' elif e.label == 'copa/0-shot': assert full_example == bos_tok + 'The man turned on the faucet, therefore the toilet filled with water' assert answer == ' the toilet filled with water' elif e.label == 'copa/1-shot': assert full_example == bos_tok + 'The woman was in a bad mood, therefore she told her friend to leave her alone.\nThe man turned on the faucet, therefore the toilet filled with water' assert answer == ' the toilet filled with water' elif e.label == 'winograd/0-shot': assert full_example == bos_tok + 'The city councilmen refused the demonstrators a permit because the city councilmen feared violence' assert answer == ' feared violence' elif e.label == 'winograd/1-shot': assert full_example == bos_tok + "Tom gave Ralph a lift to school so Ralph wouldn't have to walk.\nThe city councilmen refused the demonstrators a permit because the city councilmen feared violence" assert answer == ' feared violence' def test_icl_task_loading_gpt2_tokenizer(tmp_dir): tokenizer = AutoTokenizer.from_pretrained('gpt2') run_test(tmp_dir, tokenizer) def test_icl_task_loading_gptj_tokenizer(tmp_dir): tokenizer = AutoTokenizer.from_pretrained('EleutherAI/gpt-j-6b') run_test(tmp_dir, tokenizer) def test_icl_task_loading_opt_tokenizer(tmp_dir): tokenizer = AutoTokenizer.from_pretrained('facebook/opt-6.7b') run_test(tmp_dir, tokenizer, '</s>') def test_icl_task_loading_gptneox_tokenizer(tmp_dir): tokenizer = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b') run_test(tmp_dir, tokenizer)
EXA-1-master
exa/libraries/llm-foundry/tests/test_icl_datasets.py
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 import warnings import pytest import torch from composer.utils import reproducibility from omegaconf import OmegaConf as om from llmfoundry import COMPOSER_MODEL_REGISTRY @pytest.mark.gpu @pytest.mark.xfail(reason='CUDA OOM expected, needs to be fixed.') @pytest.mark.parametrize('attn_impl,dropout,alibi,mask_val,no_attn_mask', [ ('flash', 0.0, False, 1, False), ('flash', 0.1, False, 1, False), ('torch', 0.0, False, 1, False), ('triton', 0.0, False, 1, False), ('triton', 0.1, False, 1, False), pytest.param('torch', 0.0, True, 1, False, marks=pytest.mark.xfail( reason='hf model is not implemented with alibi')), pytest.param('triton', 0.1, True, 1, False, marks=pytest.mark.xfail( reason='hf model is not implemented with alibi')), ('torch', 0.0, False, 0, False), ('triton', 0.0, False, 0, False), ('triton', 0.1, False, 0, False), ('flash', 0.0, False, None, True), ('torch', 0.0, False, None, True), ('triton', 0.0, False, None, True), ]) def test_compare_hf_v_mpt(attn_impl, dropout, alibi, mask_val, no_attn_mask): warnings.filterwarnings( action='ignore', message='Torchmetrics v0.9 introduced a new argument class property') warnings.filterwarnings(action='ignore', message='Using Fused Cross Entropy Loss.') conf_path = 'scripts/train/yamls/mpt/125m.yaml' # set cfg path batch_size = 2 # set batch size device = 'cuda' # set decive # ensure reproducibility seed = 17 reproducibility.seed_all(seed) # set seed # get hf gpt2 cfg hf_cfg = om.create({ 'model': { 'name': 'hf_causal_lm', 'pretrained_model_name_or_path': 'gpt2', 'device': 'cpu', 'pretrained': False, }, 'tokenizer': { 'name': 'gpt2' }, }) # get hf gpt2 model print(hf_cfg) hf_model = COMPOSER_MODEL_REGISTRY[hf_cfg.model.name]( hf_cfg.model, hf_cfg.tokenizer).to(device) hf_n_params = sum(p.numel() for p in hf_model.parameters()) hf_model.model.config.embd_pdrop = dropout hf_model.model.transformer.drop.p = dropout hf_model.model.config.resid_pdrop = dropout for b in hf_model.model.transformer.h: b.mlp.dropout.p = dropout for b in hf_model.model.transformer.h: b.attn.resid_dropout.p = dropout # in mosaic gpt, attn_dropout is integrated into the FlashMHA kernel # and will therefore generate different drop idx when compared to nn.Dropout # reguradless of if rng is seeded # attn_dropout must be set to 0 for numerical comparisons. hf_model.model.config.attn_pdrop = 0.0 for b in hf_model.model.transformer.h: b.attn.attn_dropout.p = 0.0 # get mosaic 125m config with open(conf_path) as f: cfg = om.load(f) # extract model cfg model_cfg = cfg.model # use triton attn implementation model_cfg.attn_impl = attn_impl model_cfg.alibi = alibi # modify cfg for HF GPT2 compatibility model_cfg.max_seq_len = hf_model.model.config.n_ctx model_cfg.init_device = device model_cfg.vocab_size = hf_model.model.config.vocab_size # set dropout prob model_cfg.resid_pdrop = hf_model.model.config.resid_pdrop model_cfg.emb_pdrop = hf_model.model.config.embd_pdrop # attn_dropout is integrated into the FlashMHA kernel # given this, it will generate different drop idx when compared to nn.Dropout # reguradless of if rng is seeded. model_cfg.attn_pdrop = hf_model.model.config.attn_pdrop # Build Model print('Initializing model...') print(model_cfg) model = COMPOSER_MODEL_REGISTRY[model_cfg.name](model_cfg, cfg.tokenizer).to(device) n_params = sum(p.numel() for p in model.parameters()) if alibi: assert hf_n_params != n_params else: assert hf_n_params == n_params # generate random input branch batch = {} batch['input_ids'] = torch.randint(low=0, high=model_cfg.vocab_size, size=(batch_size, model_cfg.max_seq_len)).to(device) batch['labels'] = torch.randint(low=0, high=model_cfg.vocab_size, size=(batch_size, model_cfg.max_seq_len)).to(device) kpm = None if no_attn_mask: if 'attention_mask' in batch.keys(): _ = batch.pop('attention_mask') else: batch['attention_mask'] = torch.ones(size=(batch_size, model_cfg.max_seq_len), dtype=torch.int64).to(device) # mask out some tokens batch['attention_mask'][:, model_cfg.max_seq_len // 2:] = mask_val kpm = batch['attention_mask'].view(*batch['attention_mask'].shape, 1) hf_model.train() model.train() # UTIL: can be used to verify that models are not the same at init with torch.autocast(device_type='cuda', dtype=torch.float16): torch.manual_seed(seed) hf_model_fwd = hf_model(batch)['logits'] if kpm is not None: hf_model_fwd *= kpm torch.manual_seed(seed) model_fwd = model(batch).logits if kpm is not None: model_fwd *= kpm print(f'{hf_model_fwd.mean().item() = }\n{model_fwd.mean().item() = }') if hf_model_fwd.mean().allclose(model_fwd.mean()): warn_msg = f'WARNING: model_fwd ({model_fwd}) and hf_model_fwd ({hf_model_fwd}) are very close at init.' raise warnings.warn(warn_msg) # type: ignore hf_model_statedict = hf_model.state_dict() # convert hf gpt statedict to mosaic gpt statedict # HF keys which are ignored hf_keys_ignore = ['.attn.masked_bias', '.attn.bias', 'lm_head'] # HF params which need to be transposed _transpose = [ '.attn.c_attn.', '.attn.c_proj.', '.mlp.c_fc.', '.mlp.c_proj.' ] # HF keys which need to be replaced by the associated value hf_2_mosaic_key_mods = { 'model.transformer.h.': 'model.transformer.blocks.', '.mlp.c_fc.': '.mlp.mlp_up.', '.mlp.c_proj.': '.mlp.mlp_down.', '.attn.c_attn.': '.attn.Wqkv.', '.attn.c_proj.': '.attn.out_proj.', '.ln_': '.norm_', } # convert hf gpt statedict to mosaic gpt statedict using the dict and list above _hf_model_statedict = {} for k, v in hf_model_statedict.items(): skip = False for _k in hf_keys_ignore: if _k in k: skip = True continue for _k in _transpose: if _k in k: v = v.t() for _k, _v in hf_2_mosaic_key_mods.items(): if _k in k: k = k.replace(_k, _v) if not skip: _hf_model_statedict[k] = v # load hf model weights into mosaic gpt model model.load_state_dict(_hf_model_statedict) with torch.autocast(device_type=device, dtype=torch.float16): torch.manual_seed(seed) hf_model_fwd = hf_model(batch)['logits'] if kpm is not None: hf_model_fwd *= kpm torch.manual_seed(seed) model_fwd = model(batch).logits if kpm is not None: model_fwd *= kpm print(f'{hf_model_fwd.mean().item() = }\n{model_fwd.mean().item() = }') print(f'{hf_model_fwd = }\n{model_fwd = }') # given dropout seeded the same way, the mean of the outputs is extremely similar assert hf_model_fwd.mean().allclose(model_fwd.mean(), rtol=1e-04, atol=1e-06) assert hf_model_fwd.allclose(model_fwd, rtol=1e-02, atol=1e-02)
EXA-1-master
exa/libraries/llm-foundry/tests/test_hf_v_mpt.py
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 import os import sys import warnings import pytest import torch from omegaconf import OmegaConf as om # Add repo root to path so we can import scripts and test it repo_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) sys.path.append(repo_dir) from scripts.train.train import main def gpt_tiny_cfg(conf_path='scripts/train/yamls/mpt/125m.yaml'): """Create gpt tiny cfg.""" with open(conf_path) as f: test_cfg = om.load(f) # removes requirement to download / process train set test_cfg.train_loader.dataset = test_cfg.eval_loader.dataset test_cfg.global_train_batch_size = 8 test_cfg.device_eval_batch_size = 4 test_cfg.device_train_microbatch_size = 4 test_cfg.max_duration = '4ba' test_cfg.eval_interval = '4ba' test_cfg.eval_loader.eval_subset_num_batches = 2 test_cfg.save_interval = '4ba' test_cfg.run_name = 'gpt-mini-integration-test' test_cfg.model.d_model = 32 test_cfg.model.n_heads = 2 test_cfg.model.n_layers = 2 test_cfg.max_seq_len = 256 test_cfg.model.max_seq_len = test_cfg.max_seq_len test_cfg.tokenizer.kwargs.model_max_length = test_cfg.max_seq_len test_cfg.train_loader.dataset.max_seq_len = test_cfg.max_seq_len test_cfg.eval_loader.dataset.max_seq_len = test_cfg.max_seq_len return test_cfg @pytest.mark.parametrize('device', [ 'cpu', pytest.param('cuda', marks=pytest.mark.skipif( not torch.cuda.is_available(), reason='testing with cuda requires GPU')), ]) @pytest.mark.parametrize('logit_scale', [None, 0.036, 'inv_sqrt_d_model']) def test_train(device, logit_scale): if not os.path.isdir('./my-copy-c4/val'): pytest.xfail('c4 dataset not set up as expected') warnings.filterwarnings( action='ignore', category=DeprecationWarning, message= "Using the 'grad_clip_norm' field in Trainer is deprecated. Please usethe GradientClipping Algorithm in composer.algorithms.gradient_clipping." ) test_cfg = gpt_tiny_cfg(conf_path='scripts/train/yamls/mpt/125m.yaml') test_cfg.eval_subset_num_batches = 2 if logit_scale: test_cfg.model.logit_scale = logit_scale if device == 'cpu': pytest.xfail( 'FSDP in PyTorch 1.13 does not support precision `Precision.FP32` with sharding_strategy `FULL_SHARD.`' ) test_cfg.model.init_device = 'cpu' test_cfg.model.attn_impl = 'torch' test_cfg.precision = 'fp32' main(test_cfg)
EXA-1-master
exa/libraries/llm-foundry/tests/test_training.py
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 import os import shutil import sys from argparse import Namespace # Add repo root to path so we can import scripts and test it repo_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) sys.path.append(repo_dir) from scripts.data_prep.convert_dataset_hf import main as main_hf from scripts.data_prep.convert_dataset_json import main as main_json def test_download_script_from_api(): # test calling it directly path = os.path.join(os.getcwd(), 'my-copy-c4-1') shutil.rmtree(path, ignore_errors=True) main_hf( Namespace( **{ 'dataset': 'c4', 'data_subset': 'en', 'splits': ['val_xsmall'], 'out_root': './my-copy-c4-1', 'compression': None, 'concat_tokens': None, 'bos_text': None, 'eos_text': None, 'no_wrap': False })) assert os.path.exists(path) shutil.rmtree(path, ignore_errors=False) def test_json_script_from_api(): # test calling it directly path = os.path.join(os.getcwd(), 'my-copy-c4-3') shutil.rmtree(path, ignore_errors=True) main_json( Namespace( **{ 'path': 'scripts/data_prep/example_data/arxiv.jsonl', 'out_root': './my-copy-c4-3', 'compression': None, 'split': 'train', 'concat_tokens': None, 'bos_text': None, 'eos_text': None, 'no_wrap': False })) assert os.path.exists(path) shutil.rmtree(path, ignore_errors=False)
EXA-1-master
exa/libraries/llm-foundry/tests/test_data_prep_scripts.py
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 import os import tempfile from argparse import ArgumentParser, Namespace from collections import OrderedDict from copy import deepcopy from pathlib import Path from typing import Any, Dict, Optional, Union import torch from composer.utils import (get_file, maybe_create_object_store_from_uri, safe_torch_load) from llmfoundry.models.mpt.configuration_mpt import (attn_config_defaults, init_config_defaults) # define state dict key changes # old_state_dict_key: new_state_dict_key v004_to_llmfoundry_key_conversion = OrderedDict([ ('.ln_', '.norm_'), ('.mlp_up.', '.up_proj.'), ('.mlp_down.', '.down_proj.'), ('.mlp.', '.ffn.'), ]) def convert_examples_ckpt_state_dict( state_dict: Dict[str, Any], conversion_dict: Dict[str, str], ) -> Dict[str, Any]: # map old keys to new keys key_mappings = OrderedDict() for k in state_dict.keys(): key_mappings[k] = k for k, v in key_mappings.items(): _v = v for old, new in conversion_dict.items(): _v = _v.replace(old, new) key_mappings[k] = _v # generate state dict with new keys new_state_dict = OrderedDict() for k, v in state_dict.items(): if key_mappings[k] != k: print(f'Updating state dict key: {k} -> {key_mappings[k]}') new_state_dict[key_mappings[k]] = v return new_state_dict def convert_examples_ckpt( checkpoint_path: Union[Path, str], output_path: Union[Path, str], conversion_dict: Dict[str, str], local_ckpt_path: Optional[Union[Path, str]] = None, ) -> None: """Convert a ckpt created in examples repo to an llmfoundry compat ckpt. Args: checkpoint_path (Union[Path, str]): Path to the composer checkpoint, can be a local path, or a remote path beginning with ``s3://``, or another backend supported by :meth:`composer.utils.maybe_create_object_store_from_uri`. output_path (Union[Path, str]): Path to the folder to write the output to. Can be a local path, or a remote path beginning with ``s3://``, or another backend supported by :meth:`composer.utils.maybe_create_object_store_from_uri`. conversion_dict (Dict): defines state dict key changes local_ckpt_path (Optional[Union[Path, str]], optional): If specified, where to save the checkpoint file to locally. If the input ``checkpoint_path`` is already a local path, this will be a symlink. Defaults to None, which will use a temporary file. """ # default local path to a tempfile if path is not provided if local_ckpt_path is None: tmp_dir = tempfile.TemporaryDirectory() local_ckpt_path = Path(tmp_dir.name) / 'local-composer-checkpoint.pt' # create object store if output_path object_store = maybe_create_object_store_from_uri(str(output_path)) if object_store is not None: local_output_path = tempfile.TemporaryDirectory().name else: local_output_path = output_path # create folder os.makedirs(local_output_path) # download the checkpoint file print(f'Downloading checkpoint from {checkpoint_path} -> {local_ckpt_path}') get_file(str(checkpoint_path), str(local_ckpt_path)) # Load the Composer checkpoint state dict print('Loading checkpoint into CPU RAM...') composer_state_dict = safe_torch_load(local_ckpt_path) # Convert examples model state dict to llm-foundry model_state = convert_examples_ckpt_state_dict( composer_state_dict['state']['model'], conversion_dict, ) composer_state_dict['state']['model'] = model_state # Convert HF config in state dict if 'huggingface' in composer_state_dict['state']['integrations']: hf_config = composer_state_dict['state']['integrations']['huggingface'][ 'model']['config']['content'] if hf_config['model_type'] == 'mosaic_gpt': hf_config['model_type'] = 'mpt' if 'mlp_ratio' in hf_config: hf_config['expansion_ratio'] = hf_config.pop('mlp_ratio') # Convert attention config if 'attn_config' not in hf_config: hf_config['attn_config'] = deepcopy(attn_config_defaults) hf_config['attn_config']['attn_type'] = 'multihead_attention' hf_config['attn_config']['qk_ln'] = hf_config.pop( 'attn_qk_ln', attn_config_defaults['qk_ln']) hf_config['attn_config']['clip_qkv'] = hf_config.pop( 'attn_clip_qkv', attn_config_defaults['clip_qkv']) for k in [ 'attn_pdrop', 'attn_impl', 'softmax_scale', 'prefix_lm', 'attn_uses_sequence_id', 'alibi', 'alibi_bias_max' ]: if k in hf_config: hf_config['attn_config'][k] = hf_config.pop(k) # convert norm config if 'low_precision_layernorm' in hf_config: if hf_config.pop('low_precision_layernorm'): hf_config['norm_type'] = 'low_precision_layernorm' else: hf_config['norm_type'] = 'layernorm' # Convert init config if 'init_config' not in hf_config: hf_config['init_config'] = deepcopy(init_config_defaults) hf_config['init_config']['name'] = hf_config.pop('param_init_fn') for k in [ 'fan_mode', 'init_nonlinearity', 'init_gain', 'init_std', 'init_div_is_residual', 'emb_init_std', 'emb_init_uniform_lim' ]: if k in hf_config: hf_config['init_config'][k] = hf_config.pop(k) print(f'Setting hf_config: {hf_config}') composer_state_dict['state']['integrations']['huggingface']['model'][ 'config']['content'] = hf_config # Convert optimizer state dict if 'optimizers' in composer_state_dict['state'].keys(): print(f'Updating optimizer state dict') for opt in composer_state_dict['state']['optimizers'].keys(): opt_state = convert_examples_ckpt_state_dict( composer_state_dict['state']['optimizers'][opt]['state'], conversion_dict, ) composer_state_dict['state']['optimizers'][opt]['state'] = opt_state for pg_idx in range( len(composer_state_dict['state']['optimizers'][opt] ['param_groups'])): for param_idx in range( len(composer_state_dict['state']['optimizers'][opt] ['param_groups'][pg_idx]['params'])): param_name = composer_state_dict['state']['optimizers'][ opt]['param_groups'][pg_idx]['params'][param_idx] for old, new in conversion_dict.items(): param_name = param_name.replace(old, new) composer_state_dict['state']['optimizers'][opt][ 'param_groups'][pg_idx]['params'][ param_idx] = param_name # Save weights torch.save(composer_state_dict, Path(local_output_path) / checkpoint_path.split('/')[-1]) def main(args: Namespace) -> None: convert_examples_ckpt( checkpoint_path=args.checkpoint_path, output_path=args.output_path, conversion_dict=v004_to_llmfoundry_key_conversion, local_ckpt_path=args.local_ckpt_path, ) if __name__ == '__main__': parser = ArgumentParser( description= 'Convert ckpt created with the examples repo into one usable by llmfoundry.' ) parser.add_argument('--checkpoint_path', type=str, required=True) parser.add_argument('--output_path', type=str, required=True) parser.add_argument('--local_ckpt_path', type=str, default=None) args = parser.parse_args() main(args)
EXA-1-master
exa/libraries/llm-foundry/scripts/misc/convert_examples_ckpt.py
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 import os import platform from argparse import ArgumentParser, Namespace from typing import Dict, Iterable, List, Optional, Union import datasets as hf_datasets from streaming import MDSWriter from torch.utils.data import DataLoader, IterableDataset from tqdm import tqdm from llmfoundry.data.finetuning.tasks import dataset_constructor def parse_args() -> Namespace: """Parse commandline arguments.""" parser = ArgumentParser(description='Convert dataset into MDS format.') parser.add_argument( '--dataset', type=str, required=True, help= 'Name/path of the dataset (e.g., first argument to `datasets.load_dataset`)' ) parser.add_argument('--data_subset', type=str, default=None, help='(Optional) subset of data to use.') parser.add_argument('--splits', nargs='+', default=['train', 'validation'], help='Which splits of the dataset to convert.') parser.add_argument('--preprocessor', type=str, default=None, help='Name or import path of function used to preprocess (reformat) the dataset. ' +\ 'See README for additional details.') parser.add_argument( '--skip-preprocessing', action='store_true', help= 'Whether to skip preprocesing (e.g., if the dataset is already formatted correctly)' ) parser.add_argument( '--out_root', type=str, required=True, help= 'Root path of output directory where MDS shards will be stored. Can be a remote URI.' ) parser.add_argument( '--local', type=str, default=None, help= '(Optional) root path of local directory if you want to keep a local copy when out_root is remote.' ) parser.add_argument('--compression', type=str, default=None, help='(Optional) name of compression algorithm to use.') parsed = parser.parse_args() if os.path.isdir(parsed.out_root) and len( set(os.listdir(parsed.out_root)).intersection(set( parsed.splits))) > 0: raise ValueError( f'--out_root={parsed.out_root} contains {os.listdir(parsed.out_root)} which cannot overlap with the requested splits {parsed.splits}.' ) return parsed class SimpleDataset(IterableDataset): """An IterableDataset that returns text samples for MDSWriter. Returns dicts of {'key': bytes} for each 'key' in `columns` """ def __init__(self, dataset_name: str, data_subset: Union[str, None], split: str, columns: List[str]): self.hf_dataset = hf_datasets.load_dataset(path=dataset_name, name=data_subset, split=split, streaming=True) self.columns = columns def __iter__(self) -> Iterable[Dict[str, bytes]]: for sample in self.hf_dataset: # convert to bytes to store in MDS binary format yield {key: sample[key].encode('utf-8') for key in self.columns} def build_dataloader(dataset: SimpleDataset, batch_size: int) -> DataLoader: # Multiple workers is only supported on linux machines if 'linux' in platform.platform().lower(): num_workers = min(64, dataset.hf_dataset.n_shards) # type: ignore else: num_workers = 0 # If using multiple workers, configure each worker to prefetch as many samples as it can, up to # the aggregate device batch size # If not using workers, the torch DataLoader expects the default value for prefetch_factor, # which non-intuitively must be 2. prefetch_factor = max(1, 2 * batch_size // num_workers) if num_workers > 0 else 2 return DataLoader( dataset=dataset, sampler=None, batch_size=batch_size, num_workers=num_workers, prefetch_factor=prefetch_factor, ) def generate_samples( loader: DataLoader, truncate_num_samples: Optional[int] = None ) -> Iterable[Dict[str, bytes]]: """Generator over samples of a dataloader. Args: loader (DataLoader): A dataloader emitting batches like {key: [sample0_bytes, sample1_bytes, sample2_bytes, ...]} truncate_num_samples (Optional[int]): An optional # of samples to stop at. Yields: Sample dicts. """ n_samples = 0 for batch in loader: keys = list(batch.keys()) current_bs = len(batch[keys[0]]) for idx in range(current_bs): if truncate_num_samples is not None and n_samples == truncate_num_samples: return n_samples += 1 yield {k: v[idx] for k, v in batch.items()} def main(args: Namespace) -> None: """Main: create a streaming dataset. Args: args (Namespace): Commandline arguments. """ if args.skip_preprocessing: preprocessing_fn = lambda x: x # Just an identity function else: preprocessor_str = args.preprocessor preprocessing_fn = dataset_constructor.get_preprocessing_fn_from_str( preprocessor=preprocessor_str, dataset_name=args.dataset, verbose=True) if preprocessing_fn is None: raise ValueError( '`args.preprocessor` was not set and no preprocessing function ' +\ 'has been registered for `args.dataset`. If this was intentional ' +\ '(e.g., because your dataset is already correctly formatted), ' +\ 'include the "--skip-preprocessing" flag to avoid this error.' ) columns = ['prompt', 'response'] for split_name in args.splits: dataset = hf_datasets.load_dataset(path=args.dataset, name=args.data_subset, split=split_name, streaming=True) loader = build_dataloader(dataset=dataset, batch_size=512) samples = generate_samples(loader) # Write samples print(f'Converting {split_name} to MDS format...') out = os.path.join(args.out_root, split_name) if args.local is not None: out = (os.path.join(args.local, split_name), out) keep_local = True else: keep_local = False with MDSWriter(columns={key: 'str' for key in columns}, out=out, compression=args.compression, keep_local=keep_local) as out: for sample in tqdm(samples, desc=split_name): formatted_sample = preprocessing_fn(sample) if ('prompt' not in formatted_sample) or ('response' not in formatted_sample): raise KeyError( 'Unable to tokenize example because it has not been properly formatted. ' +\ '"prompt" and "response" are required keys but at least one was missing ' +\ f'from {formatted_sample=}.' ) encoded_sample = { key: formatted_sample[key].encode('utf-8') for key in columns } out.write(encoded_sample) if __name__ == '__main__': """Example for converting Muennighoff/P3: >>> python convert_finetuning_dataset.py \ >>> --dataset "Muennighoff/P3" \ >>> --splits train validation \ >>> --preprocessor llmfoundry.data.finetuning.tasks:p3_preprocessing_function \ >>> --out_root s3://<bucket>/muennighoff-p3 """ main(parse_args())
EXA-1-master
exa/libraries/llm-foundry/scripts/data_prep/convert_finetuning_dataset.py
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 """Streaming dataset conversion scripts for json files.""" import os import platform from argparse import ArgumentParser, Namespace from enum import Enum from glob import glob from typing import Dict, Iterable, Optional import datasets as hf_datasets import numpy as np from streaming import MDSWriter from torch.utils.data import DataLoader, IterableDataset from tqdm import tqdm from transformers import AutoTokenizer, PreTrainedTokenizerBase from llmfoundry.data.datasets import ConcatTokensDataset, NoConcatDataset class ConcatMode(Enum): NO_CONCAT = 'NO_CONCAT' CONCAT_TOKENS = 'CONCAT_TOKENS' def parse_args() -> Namespace: """Parse commandline arguments.""" parser = ArgumentParser( description= 'Convert dataset into MDS format, optionally concatenating and tokenizing' ) parser.add_argument('--path', type=str, required=True) parser.add_argument('--out_root', type=str, required=True) parser.add_argument('--compression', type=str, default=None) group = parser.add_mutually_exclusive_group(required=False) group.add_argument( '--concat_tokens', type=int, help='Convert text to tokens and concatenate up to this many tokens') parser.add_argument('--split', type=str, default='train') parser.add_argument('--tokenizer', type=str, required=False, default=None) parser.add_argument('--bos_text', type=str, required=False, default=None) parser.add_argument('--eos_text', type=str, required=False, default=None) parser.add_argument('--no_wrap', default=False, action='store_true') parsed = parser.parse_args() if os.path.isdir(parsed.out_root) and len( set(os.listdir(parsed.out_root)).intersection(set( parsed.split))) > 0: raise ValueError( f'--out_root={parsed.out_root} contains {os.listdir(parsed.out_root)} which cannot overlap with the requested splits {parsed.splits}.' ) # Make sure we have needed concat options if (parsed.concat_tokens is not None and isinstance(parsed.concat_tokens, int) and parsed.tokenizer is None): parser.error( 'When setting --concat_tokens, you must specify a --tokenizer') # now that we have validated them, change BOS/EOS to strings if parsed.bos_text is None: parsed.bos_text = '' if parsed.eos_text is None: parsed.eos_text = '' return parsed def build_hf_dataset( path: str, split: str, mode: ConcatMode, max_length: Optional[int] = None, bos_text: str = '', eos_text: str = '', no_wrap: bool = False, tokenizer: PreTrainedTokenizerBase = None, ) -> IterableDataset: """Build an IterableDataset over the HF C4 or pile source data. Args: dataset_name (str): Dataset name split (str): Split name. mode (ConcatMode): NO_CONCAT, or CONCAT_TOKENS max_length (int): The length of concatenated tokens bos_text (str): text to insert at the beginning of each sequence eos_text (str): text to insert at the end of each sequence no_wrap (bool): if concatenating, whether to wrap text across `max_length` boundaries tokenizer (PreTrainedTokenizerBase): if mode is CONCAT_TOKENS, the tokenizer to use data_subset (str): Referred to as "name" in HuggingFace datasets.load_dataset. Typically "all" (The Pile) or "en" (c4). Returns: An IterableDataset. """ if os.path.isdir(path): data_files = glob(f'{path}/*') else: data_files = path hf_dataset = hf_datasets.load_dataset('json', data_files=data_files, split=split) if mode == ConcatMode.NO_CONCAT: dataset = NoConcatDataset(hf_dataset) else: if not isinstance(tokenizer, PreTrainedTokenizerBase): raise ValueError( f'{tokenizer=} must be of type PreTrainedTokenizerBase') if max_length is None: raise ValueError(f'max_length must be set.') if bos_text + eos_text == '': test_tokens = tokenizer('test') if test_tokens['input_ids'][ 0] != tokenizer.bos_token_id and test_tokens['input_ids'][ -1] != tokenizer.eos_token_id: tok_error_msg = 'This tokenizer does not insert an EOS nor BOS token. ' tok_error_msg += 'Concatenating with this tokenizer will result in sequences being ' tok_error_msg += 'attached without a separating token. Please use another tokenizer, ' tok_error_msg += 'such as facebook/opt-125m, or specify EOS/BOS text with e.g. ' tok_error_msg += '--bos_text=<|endoftext|>.' raise ValueError(tok_error_msg) dataset = ConcatTokensDataset(hf_dataset=hf_dataset, tokenizer=tokenizer, max_length=max_length, bos_text=bos_text, eos_text=eos_text, no_wrap=no_wrap) return dataset def _est_progress_denominator(total_samples: int, chars_per_sample: int, chars_per_token: int, mode: ConcatMode, max_length: int): est_tokens_per_sample = chars_per_sample // chars_per_token if mode == ConcatMode.NO_CONCAT: return total_samples elif mode == ConcatMode.CONCAT_TOKENS: return total_samples * est_tokens_per_sample // max_length def build_dataloader(dataset, batch_size) -> DataLoader: # Multiple workers is only supported on linux machines if 'linux' in platform.platform().lower(): num_workers = 64 else: num_workers = 0 # If using multiple workers, configure each worker to prefetch as many samples as it can, up to # the aggregate device batch size # If not using workers, the torch DataLoader expects the default value for prefetch_factor, # which non-intuitively must be 2. prefetch_factor = max(1, 2 * batch_size // num_workers) if num_workers > 0 else 2 return DataLoader( dataset=dataset, sampler=None, batch_size=batch_size, num_workers=num_workers, prefetch_factor=prefetch_factor, ) def generate_samples( loader: DataLoader, truncate_num_samples: Optional[int] = None ) -> Iterable[Dict[str, bytes]]: """Generator over samples of a dataloader. Args: loader (DataLoader): A dataloader emitting batches like {key: [sample0_bytes, sample1_bytes, sample2_bytes, ...]} truncate_num_samples (Optional[int]): An optional # of samples to stop at. Yields: Sample dicts. """ n_samples = 0 for batch in loader: keys = list(batch.keys()) current_bs = len(batch[keys[0]]) for idx in range(current_bs): if truncate_num_samples is not None and n_samples == truncate_num_samples: return n_samples += 1 yield {k: v[idx] for k, v in batch.items()} def main(args: Namespace) -> None: """Main: create C4/pile streaming dataset. Args: args (Namespace): Commandline arguments. """ if args.concat_tokens is not None: mode = ConcatMode.CONCAT_TOKENS tokenizer = AutoTokenizer.from_pretrained(args.tokenizer) # we will enforce length, so suppress warnings about sequences too long for the model tokenizer.model_max_length = int(1e30) columns = {'tokens': 'bytes'} else: mode = ConcatMode.NO_CONCAT tokenizer = None columns = {'text': 'str'} # Get samples dataset = build_hf_dataset(path=args.path, split=args.split, mode=mode, max_length=args.concat_tokens, bos_text=args.bos_text, eos_text=args.eos_text, no_wrap=args.no_wrap, tokenizer=tokenizer) print('here') # Write samples print(f'Converting to MDS format...') print( f'Note that the progress bar is based on the dataset length before tokenization.' ) print(f'It will finish at a value below 100% if tokenizing') with MDSWriter(columns=columns, out=os.path.join(args.out_root), compression=args.compression) as out: for sample in tqdm(dataset): out.write(sample) if __name__ == '__main__': main(parse_args())
EXA-1-master
exa/libraries/llm-foundry/scripts/data_prep/convert_dataset_json.py
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 """Streaming dataset conversion scripts for C4 and The Pile.""" import os import platform from argparse import ArgumentParser, Namespace from dataclasses import dataclass from enum import Enum from typing import Dict, Iterable, Optional, Union import datasets as hf_datasets import numpy as np from streaming import MDSWriter from torch.utils.data import DataLoader, IterableDataset from tqdm import tqdm from transformers import AutoTokenizer, PreTrainedTokenizerBase from llmfoundry.data.datasets import ConcatTokensDataset, NoConcatDataset class ConcatMode(Enum): NO_CONCAT = 'NO_CONCAT' CONCAT_TOKENS = 'CONCAT_TOKENS' def parse_args() -> Namespace: """Parse commandline arguments.""" parser = ArgumentParser( description= 'Convert dataset into MDS format, optionally concatenating and tokenizing' ) parser.add_argument('--dataset', type=str, required=True) parser.add_argument('--data_subset', type=str, default=None, help='E.g. "all" or "en"') parser.add_argument( '--splits', nargs='+', default=['train', 'train_small', 'val', 'val_small', 'val_xsmall']) parser.add_argument('--out_root', type=str, required=True) parser.add_argument('--compression', type=str, default=None) group = parser.add_mutually_exclusive_group(required=False) group.add_argument( '--concat_tokens', type=int, help='Convert text to tokens and concatenate up to this many tokens') parser.add_argument('--tokenizer', type=str, required=False, default=None) parser.add_argument('--bos_text', type=str, required=False, default=None) parser.add_argument('--eos_text', type=str, required=False, default=None) parser.add_argument('--no_wrap', default=False, action='store_true') parsed = parser.parse_args() if os.path.isdir(parsed.out_root) and len( set(os.listdir(parsed.out_root)).intersection(set( parsed.splits))) > 0: raise ValueError( f'--out_root={parsed.out_root} contains {os.listdir(parsed.out_root)} which cannot overlap with the requested splits {parsed.splits}.' ) # Make sure we have needed concat options if (parsed.concat_tokens is not None and isinstance(parsed.concat_tokens, int) and parsed.tokenizer is None): parser.error( 'When setting --concat_tokens, you must specify a --tokenizer') # now that we have validated them, change BOS/EOS to strings if parsed.bos_text is None: parsed.bos_text = '' if parsed.eos_text is None: parsed.eos_text = '' return parsed @dataclass class DataSplitConstants: hf_split: str folder_split: str raw_samples: int truncated_samples: Union[int, None] @dataclass class DatasetConstants: chars_per_sample: int chars_per_token: int splits = {} def __iter__(self): for _, v in self.splits.items(): yield v class TrainSmallConstants(DataSplitConstants): def __init__(self, hf_split: str = 'train', folder_split: str = 'train_small', raw_samples: int = 1000000, truncated_samples: int = 100000): super().__init__(hf_split, folder_split, raw_samples, truncated_samples) class ValSmallConstants(DataSplitConstants): def __init__(self, hf_split: str = 'validation', folder_split: str = 'val_small', raw_samples: int = 10000, truncated_samples: int = 10000): super().__init__(hf_split, folder_split, raw_samples, truncated_samples) class ValXSmallConstants(DataSplitConstants): def __init__(self, hf_split: str = 'validation', folder_split: str = 'val_xsmall', raw_samples: int = 3000, truncated_samples: int = 3000): super().__init__(hf_split, folder_split, raw_samples, truncated_samples) pileconstants = DatasetConstants( chars_per_sample=6212, # Computed over validation set chars_per_token=4 # OpenAI estimate ) pileconstants.splits['train'] = DataSplitConstants(hf_split='train', folder_split='train', raw_samples=210607728, truncated_samples=None) pileconstants.splits['train_small'] = DataSplitConstants( hf_split='train', folder_split='train_small', raw_samples=1000000, truncated_samples=100000) pileconstants.splits['val'] = DataSplitConstants(hf_split='validation', folder_split='val', raw_samples=214670, truncated_samples=None) pileconstants.splits['val_small'] = DataSplitConstants(hf_split='validation', folder_split='val_small', raw_samples=10000, truncated_samples=10000) pileconstants.splits['val_xsmall'] = DataSplitConstants( hf_split='validation', folder_split='val_xsmall', raw_samples=3000, truncated_samples=3000) c4constants = DatasetConstants( chars_per_sample=2163, # Computed over validation set chars_per_token=4 # OpenAI estimate ) c4constants.splits['train'] = DataSplitConstants(hf_split='train', folder_split='train', raw_samples=364868892, truncated_samples=None) c4constants.splits['train_small'] = DataSplitConstants( hf_split='train', folder_split='train_small', raw_samples=1000000, truncated_samples=100000) c4constants.splits['val'] = DataSplitConstants(hf_split='validation', folder_split='val', raw_samples=364608, truncated_samples=None) c4constants.splits['val_small'] = DataSplitConstants(hf_split='validation', folder_split='val_small', raw_samples=10000, truncated_samples=10000) c4constants.splits['val_xsmall'] = DataSplitConstants(hf_split='validation', folder_split='val_xsmall', raw_samples=3000, truncated_samples=3000) CONSTS = {'c4': c4constants, 'the_pile': pileconstants} def build_hf_dataset( dataset_name: str, split: str, mode: ConcatMode, max_length: Optional[int] = None, bos_text: str = '', eos_text: str = '', no_wrap: bool = False, tokenizer: PreTrainedTokenizerBase = None, data_subset: Union[str, None] = None, ) -> IterableDataset: """Build an IterableDataset over the HF C4 or pile source data. Args: dataset_name (str): Dataset name split (str): Split name. mode (ConcatMode): NO_CONCAT, or CONCAT_TOKENS max_length (int): The length of concatenated tokens bos_text (str): text to insert at the beginning of each sequence eos_text (str): text to insert at the end of each sequence no_wrap (bool): if concatenating, whether to wrap text across `max_length` boundaries tokenizer (PreTrainedTokenizerBase): if mode is CONCAT_TOKENS, the tokenizer to use data_subset (str): Referred to as "name" in HuggingFace datasets.load_dataset. Typically "all" (The Pile) or "en" (c4). Returns: An IterableDataset. """ hf_dataset = hf_datasets.load_dataset(path=dataset_name, name=data_subset, split=split, streaming=True) if mode == ConcatMode.NO_CONCAT: dataset = NoConcatDataset(hf_dataset) else: if not isinstance(tokenizer, PreTrainedTokenizerBase): raise ValueError( f'{tokenizer=} must be of type PreTrainedTokenizerBase') if max_length is None: raise ValueError(f'max_length must be set.') if bos_text + eos_text == '': test_tokens = tokenizer('test') if test_tokens['input_ids'][ 0] != tokenizer.bos_token_id and test_tokens['input_ids'][ -1] != tokenizer.eos_token_id: tok_error_msg = 'This tokenizer does not insert an EOS nor BOS token. ' tok_error_msg += 'Concatenating with this tokenizer will result in sequences being ' tok_error_msg += 'attached without a separating token. Please use another tokenizer, ' tok_error_msg += 'such as facebook/opt-125m, or specify EOS/BOS text with e.g. ' tok_error_msg += '--bos_text=<|endoftext|>.' raise ValueError(tok_error_msg) dataset = ConcatTokensDataset(hf_dataset=hf_dataset, tokenizer=tokenizer, max_length=max_length, bos_text=bos_text, eos_text=eos_text, no_wrap=no_wrap) return dataset def _est_progress_denominator(total_samples: int, chars_per_sample: int, chars_per_token: int, mode: ConcatMode, max_length: int): est_tokens_per_sample = chars_per_sample // chars_per_token if mode == ConcatMode.NO_CONCAT: return total_samples elif mode == ConcatMode.CONCAT_TOKENS: return total_samples * est_tokens_per_sample // max_length def build_dataloader(dataset, batch_size) -> DataLoader: # Multiple workers is only supported on linux machines if 'linux' in platform.platform().lower(): num_workers = min(64, dataset.hf_dataset.n_shards) # type: ignore else: num_workers = 0 # If using multiple workers, configure each worker to prefetch as many samples as it can, up to # the aggregate device batch size # If not using workers, the torch DataLoader expects the default value for prefetch_factor, # which non-intuitively must be 2. prefetch_factor = max(1, 2 * batch_size // num_workers) if num_workers > 0 else 2 return DataLoader( dataset=dataset, sampler=None, batch_size=batch_size, num_workers=num_workers, prefetch_factor=prefetch_factor, ) def generate_samples( loader: DataLoader, truncate_num_samples: Optional[int] = None ) -> Iterable[Dict[str, bytes]]: """Generator over samples of a dataloader. Args: loader (DataLoader): A dataloader emitting batches like {key: [sample0_bytes, sample1_bytes, sample2_bytes, ...]} truncate_num_samples (Optional[int]): An optional # of samples to stop at. Yields: Sample dicts. """ n_samples = 0 for batch in loader: keys = list(batch.keys()) current_bs = len(batch[keys[0]]) for idx in range(current_bs): if truncate_num_samples is not None and n_samples == truncate_num_samples: return n_samples += 1 yield {k: v[idx] for k, v in batch.items()} def main(args: Namespace) -> None: """Main: create C4/pile streaming dataset. Args: args (Namespace): Commandline arguments. """ try: dataset_constants = CONSTS[args.dataset] except KeyError: raise ValueError( f'Constants for dataset "{args.dataset}" not found. Currently only "the_pile" and "c4" are supported.' ) if args.concat_tokens is not None: mode = ConcatMode.CONCAT_TOKENS tokenizer = AutoTokenizer.from_pretrained(args.tokenizer) # we will enforce length, so suppress warnings about sequences too long for the model tokenizer.model_max_length = int(1e30) columns = {'tokens': 'bytes'} else: mode = ConcatMode.NO_CONCAT tokenizer = None columns = {'text': 'str'} for split_name in args.splits: try: split = dataset_constants.splits[split_name] except KeyError: raise KeyError(f'Constants not defined for split {split_name}.') hf_split = split.hf_split folder_split = split.folder_split expected_num_samples = split.raw_samples truncate_num_samples = split.truncated_samples # Only generate the splits requested if folder_split not in args.splits: continue # Get samples dataset = build_hf_dataset(dataset_name=args.dataset, data_subset=args.data_subset, split=hf_split, mode=mode, max_length=args.concat_tokens, bos_text=args.bos_text, eos_text=args.eos_text, no_wrap=args.no_wrap, tokenizer=tokenizer) loader = build_dataloader(dataset=dataset, batch_size=512) samples = generate_samples(loader, truncate_num_samples=truncate_num_samples) if expected_num_samples is not None: denominator = truncate_num_samples if truncate_num_samples is not None else _est_progress_denominator( total_samples=expected_num_samples, chars_per_sample=dataset_constants.chars_per_sample, chars_per_token=dataset_constants.chars_per_token, mode=mode, max_length=args.concat_tokens, ) else: denominator = None # Write samples print(f'Converting {folder_split} to MDS format...') print( f'Note that the progress bar is based on the dataset length before tokenization.' ) print(f'It will finish at a value below 100% if tokenizing') with MDSWriter(columns=columns, out=os.path.join(args.out_root, folder_split), compression=args.compression) as out: if denominator is not None: for sample in tqdm(samples, desc=folder_split, total=denominator): out.write(sample) else: for sample in tqdm(samples, desc=folder_split): out.write(sample) if __name__ == '__main__': main(parse_args())
EXA-1-master
exa/libraries/llm-foundry/scripts/data_prep/convert_dataset_hf.py
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 import contextlib import os import sys import warnings from composer import Trainer from composer.core import Evaluator from composer.utils import dist, get_device, reproducibility from omegaconf import OmegaConf as om from llmfoundry import (COMPOSER_MODEL_REGISTRY, build_finetuning_dataloader, build_text_denoising_dataloader) from llmfoundry.data.text_data import build_text_dataloader from llmfoundry.models.utils import init_empty_weights from llmfoundry.utils.builders import (build_algorithm, build_callback, build_icl_evaluators, build_logger, build_optimizer, build_scheduler, build_tokenizer) from llmfoundry.utils.config_utils import log_config, update_batch_size_info def validate_config(cfg): """Validates compatible model and dataloader selection.""" loaders = [cfg.train_loader] if 'eval_loader' in cfg: loaders.append(cfg.eval_loader) for loader in loaders: if loader.name == 'text': if cfg.model.name in ['hf_prefix_lm', 'hf_t5']: raise ValueError( f'Model type "{cfg.model.name}" is not supported when using the "text " ' +\ f'dataloader. Please use the "text_denoising" dataloader to pre-train that model type.') elif loader.name == 'text_denoising': if cfg.model.name == 'hf_causal_lm': raise ValueError( f'Model type "{cfg.model.name}" is not supported when using the "text_denoising" ' +\ f'dataloader. Please use the "text" dataloader to pre-train that model type.') if loader.mixture_of_denoisers.decoder_only_format and cfg.model.name == 'hf_t5': warnings.warn( 'Model type "hf_t5" requires `decoder_only_format` to be ``False``. ' +\ 'Overriding `decoder_only_format` from ``True`` to ``False``.') loader.mixture_of_denoisers.decoder_only_format = False if (not loader.mixture_of_denoisers.decoder_only_format ) and cfg.model.name == 'hf_prefix_lm': warnings.warn( 'Model type "hf_prefix_lm" requires `decoder_only_format` to be ``True``. ' +\ 'Overriding `decoder_only_format` from ``False`` to ``True``.') loader.mixture_of_denoisers.decoder_only_format = True if 'icl_tasks' in cfg: if cfg.model.name == 'hf_t5': raise ValueError( 'ICL evaluation does not currently support Encoder-Decoder models, such as "hf_t5".' ) def build_composer_model(model_cfg, tokenizer): warnings.filterwarnings( action='ignore', message='Torchmetrics v0.9 introduced a new argument class property') if model_cfg.name not in COMPOSER_MODEL_REGISTRY: raise ValueError( f'Not sure how to build model with name={model_cfg.name}') return COMPOSER_MODEL_REGISTRY[model_cfg.name](model_cfg, tokenizer) def build_dataloader(cfg, tokenizer, device_batch_size): if cfg.name == 'text': return build_text_dataloader( cfg, tokenizer, device_batch_size, ) elif cfg.name == 'text_denoising': return build_text_denoising_dataloader( cfg, tokenizer, device_batch_size, ) elif cfg.name == 'finetuning': return build_finetuning_dataloader( cfg, tokenizer, device_batch_size, ) else: raise ValueError(f'Not sure how to build dataloader with config: {cfg}') def main(cfg): # Check for incompatibilities between the model and data loaders validate_config(cfg) # Filter deprecation warning from torch internal usage warnings.filterwarnings( action='ignore', category=UserWarning, message= f'torch.distributed.*_base is a private function and will be deprecated.*' ) cfg.dist_timeout = cfg.get('dist_timeout', 600.0) reproducibility.seed_all(cfg.seed) dist.initialize_dist(get_device(None), timeout=cfg.dist_timeout) # Run Name if cfg.get('run_name') is None: cfg.run_name = os.environ.get('COMPOSER_RUN_NAME', 'llm') # Get batch size info cfg = update_batch_size_info(cfg) # Read FSDP Config as a dict fsdp_config = cfg.get('fsdp_config', None) fsdp_config = om.to_container(fsdp_config, resolve=True) if fsdp_config else None # Restrict model init_device to 'meta' and 'cpu', # using 'cuda' vs. 'cuda:id' is tricky and can lead to common user errors # when multiple GPUs are available. # Also 'meta' is only valid when using FSDP init_device = cfg.model.get('init_device', 'cpu') assert init_device in ['meta', 'cpu'] if fsdp_config is None and init_device == 'meta': warnings.warn( "Using `cfg.model.init_device='meta'` is only valid when using FSDP! " +\ "Reverting to `cfg.model.init_device='cpu'`.") cfg.model.init_device = 'cpu' # build tokenizer tokenizer = build_tokenizer(cfg.tokenizer) # Build Model print('Initializing model...') init_context = contextlib.nullcontext() if init_device == 'meta': init_context = init_empty_weights() with init_context: model = build_composer_model(cfg.model, tokenizer) cfg.n_params = sum(p.numel() for p in model.parameters()) print(f'{cfg.n_params=:.2e}') # Dataloaders print('Building train loader...') train_loader = build_dataloader( cfg.train_loader, tokenizer, cfg.device_train_batch_size, ) print('Building eval loader...') evaluators = [] if 'eval_loader' in cfg: eval_loader = Evaluator(label='eval', dataloader=build_dataloader( cfg.eval_loader, tokenizer, cfg.device_eval_batch_size), metric_names=list(model.train_metrics.keys())) evaluators.append(eval_loader) if 'icl_tasks' in cfg: icl_evaluators, _ = build_icl_evaluators(cfg.icl_tasks, tokenizer, cfg.max_seq_len, cfg.device_eval_batch_size) evaluators.extend(icl_evaluators) # Optimizer optimizer = build_optimizer(cfg.optimizer, model) # Scheduler scheduler = build_scheduler(cfg.scheduler) # Loggers loggers = [ build_logger(name, logger_cfg) for name, logger_cfg in (cfg.get('loggers') or {}).items() ] # Callbacks callbacks = [ build_callback(name, callback_cfg) for name, callback_cfg in (cfg.get('callbacks') or {}).items() ] # Algorithms algorithms = [ build_algorithm(name, algorithm_cfg) for name, algorithm_cfg in (cfg.get('algorithms') or {}).items() ] # Build the Trainer print('Building trainer...') trainer = Trainer( run_name=cfg.run_name, seed=cfg.seed, model=model, train_dataloader=train_loader, eval_dataloader=evaluators, optimizers=optimizer, schedulers=scheduler, max_duration=cfg.max_duration, eval_interval=cfg.eval_interval, eval_subset_num_batches=cfg.get('eval_subset_num_batches', -1), progress_bar=cfg.get('progress_bar', False), log_to_console=cfg.get('log_to_console', True), console_log_interval=cfg.get('console_log_interval', '1ba'), loggers=loggers, callbacks=callbacks, precision=cfg.precision, algorithms=algorithms, device_train_microbatch_size=cfg.get('device_train_microbatch_size', 'auto'), fsdp_config=fsdp_config, # type: ignore save_folder=cfg.get('save_folder', None), save_filename=cfg.get('save_filename', 'ep{epoch}-ba{batch}-rank{rank}.pt'), save_latest_filename=cfg.get('save_latest_filename', 'latest-rank{rank}.pt'), save_interval=cfg.get('save_interval', '1000ba'), save_num_checkpoints_to_keep=cfg.get('save_num_checkpoints_to_keep', -1), save_overwrite=cfg.get('save_overwrite', False), load_path=cfg.get('load_path', None), load_weights_only=cfg.get('load_weights_only', False), load_ignore_keys=cfg.get('load_ignore_keys', None), autoresume=cfg.get('autoresume', False), python_log_level=cfg.get('python_log_level', None), dist_timeout=cfg.dist_timeout, ) print('Logging config...') log_config(cfg) if cfg.get('eval_first', False) and trainer.state.timestamp.batch.value == 0: trainer.eval() print('Starting training...') trainer.fit() print('Done.') if __name__ == '__main__': yaml_path, args_list = sys.argv[1], sys.argv[2:] with open(yaml_path) as f: yaml_cfg = om.load(f) cli_cfg = om.from_cli(args_list) cfg = om.merge(yaml_cfg, cli_cfg) main(cfg)
EXA-1-master
exa/libraries/llm-foundry/scripts/train/train.py
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 import argparse import math import os import requests import yaml from mcli.models.run_config import SchedulingConfig from mcli.sdk import RunConfig, create_run, get_clusters def _get_cluster_info(): clusters = get_clusters() cluster_info = {} for cluster in clusters: cluster_info[cluster.name] = [(ci.gpu_type, max(ci.gpu_nums)) for ci in cluster.cluster_instances if ci.gpu_type is not None] return cluster_info CLUSTER_INFO = _get_cluster_info() def str_to_bool(value): # helper fn if isinstance(value, bool): return value if value.lower() in {'false', 'f', '0', 'no', 'n'}: return False elif value.lower() in {'true', 't', '1', 'yes', 'y'}: return True raise ValueError(f'{value} is not a valid boolean value') def parse_args(): parser = argparse.ArgumentParser( description= 'Generate and run configurations to test MPT training throughput on Mosaic Cloud.' ) parser.add_argument('--project', type=str, default='tput') parser.add_argument( '--image', type=str, default='mosaicml/pytorch:1.13.1_cu117-python3.10-ubuntu20.04') parser.add_argument('--git_branch', type=str, default=None, help='what git branch to use.') parser.add_argument('--git_commit', type=str, default=None, help='what git commit to use.') parser.add_argument('-t', '--precisions', '--types', type=str, default=['bf16'], nargs='+', choices=['bf16', 'fp16']) parser.add_argument('--fsdp_config_mixed_precision', type=str, default='PURE') parser.add_argument('--fsdp_config_activation_checkpointing', type=str_to_bool, nargs='?', const=True, default=None) parser.add_argument( '-s', '--seq_len_exp', type=int, default=[11, 11], nargs=2, help='exponent of seq lengths to be tested (default: [11, 11] = 2048)') parser.add_argument( '-b', '--batch_size_exp', type=int, default=None, nargs=2, help= 'exponent of batch size (in tokens) to be tested (default: [19, 23] = 2^19 to 2^23)' ) parser.add_argument( '--batch_sizes', type=int, nargs='+', default=[], help='batch sizes to run.', ) parser.add_argument( '--accum', type=int, default=None, help='batch sizes multiplier (accumulations before step).', ) parser.add_argument('-m', '--model_yamls', type=str, default=[ '125m.yaml', '350m.yaml', '760m.yaml', '1b.yaml', '3b.yaml', '7b.yaml', '13b.yaml', '30b.yaml', '70b.yaml' ], choices=[ '125m.yaml', '350m.yaml', '760m.yaml', '1b.yaml', '3b.yaml', '7b.yaml', '13b.yaml', '30b.yaml', '70b.yaml' ], nargs='+', help='model sizes to test') parser.add_argument('--attn_impl', type=str, default='triton') parser.add_argument('-c', '--clusters', type=str, default=['r7z2'], nargs='+', choices=CLUSTER_INFO.keys()) known_args = parser.parse_known_args()[0] _gpu_types = get_gpu_types(known_args.clusters) parser.add_argument('--gpu_types', type=str, default=['a100_40gb'], nargs='+', choices=_gpu_types) known_args = parser.parse_known_args()[0] _gpu_nums = get_gpu_nums(known_args.clusters, known_args.gpu_types) parser.add_argument('-g', '--gpu_nums', type=int, default=[16], nargs='+', choices=_gpu_nums) parser.add_argument('--microbatch_size', type=int, default=None, help='set microbatch_size') parser.add_argument('--pad_vocab_multiple', type=int, default=None) parser.add_argument('--data_remote', type=str, default=None, help='optional data remote path for streaming data') parser.add_argument('--wandb', type=str_to_bool, nargs='?', const=True, default=True) parser.add_argument('--priority', type=str, default='low') parser.add_argument('--RUN', type=str_to_bool, nargs='?', const=True, default=False) return parser.parse_args() def get_max_seq_lens(pows=[9, 14]): return [2**n for n in range(pows[0], pows[1] + 1)] def get_global_train_batch_sizes(max_seq_len, pows, batch_sizes=[]): if pows: # global batch size in tokens (defualt: .5M thru 8M) global_train_token_counts = [2**n for n in range(pows[0], pows[1] + 1)] batch_sizes += [t // max_seq_len for t in global_train_token_counts ] # global batch size in samples return batch_sizes def get_parameters(yaml_file): local_yamls = False if 'https' in yaml_file else True if local_yamls: # Load the YAML into a parameters dictionary with open(yaml_file) as f: parameters = yaml.safe_load(f) else: # Download parameter yaml req = requests.get(yaml_file) # Load the YAML into a parameters dictionary parameters = yaml.safe_load(req.text) return parameters def get_cluster_gpu_types(cluster): return [gpu_info[0] for gpu_info in CLUSTER_INFO[cluster]] def get_gpu_types(clusters): gpu_types = set() for c in clusters: for g in get_cluster_gpu_types(c): gpu_types.add(g) return gpu_types def get_gpu_nums(clusters, gpu_types): max_gpus_per_run = 1 for c in clusters: for gpu_info in CLUSTER_INFO[c]: if gpu_info[0] in gpu_types: max_gpus_per_run = max(max_gpus_per_run, gpu_info[1]) gpu_nums = [1] while gpu_nums[-1] < max_gpus_per_run: gpu_nums += [2 * gpu_nums[-1]] return gpu_nums def get_valid_gpu_lim(cluster, gpu_type): for gpu_info in CLUSTER_INFO[cluster]: if gpu_info[0] == gpu_type: return gpu_info[1] raise ValueError def mod_parameters(parameters, max_seq_len, global_train_batch_size, precision, fsdp_config_mixed_precision='DEFAULT', fsdp_config_activation_checkpointing=None, run_name='', data_remote=None, max_duration='30ba', eval_interval=0, microbatch_size=None, wandb=True, pad_vocab_multiple=None): if run_name: parameters['run_name'] = run_name if data_remote is not None: parameters['data_remote'] = data_remote parameters['train_loader']['dataset']['remote'] = parameters[ 'data_remote'] parameters['eval_loader']['dataset']['remote'] = parameters[ 'data_remote'] parameters['data_local'] = '/tmp/c4' parameters['train_loader']['dataset']['local'] = parameters[ 'data_local'] parameters['eval_loader']['dataset']['local'] = parameters['data_local'] else: parameters['train_loader']['dataset'][ 'split'] = 'train_small' # for throughput testing purposes parameters['eval_loader']['dataset'][ 'split'] = 'val_small' # for throughput testing purposes # set max_seq_len parameters['max_seq_len'] = max_seq_len parameters['model']['max_seq_len'] = max_seq_len parameters['model']['attn_impl'] = args.attn_impl parameters['model']['low_precision_layernorm'] = True # Pad vocab size to multiple of N for A100 perf if pad_vocab_multiple: vocab_size = parameters['model']['vocab_size'] parameters['model']['vocab_size'] = math.ceil( vocab_size / pad_vocab_multiple) * pad_vocab_multiple parameters['tokenizer']['kwargs']['model_max_length'] = max_seq_len parameters['train_loader']['dataset']['max_seq_len'] = max_seq_len parameters['eval_loader']['dataset']['max_seq_len'] = max_seq_len parameters['global_train_batch_size'] = global_train_batch_size if microbatch_size is not None: parameters['device_train_microbatch_size'] = microbatch_size # update eval batch size based on change in seq len parameters['device_eval_batch_size'] = max( 1, int(parameters['device_eval_batch_size'] / ((max_seq_len / 2048)**2))) parameters['eval_loader'][ 'eval_subset_num_batches'] = 2 # for throughput testing purposes parameters['max_duration'] = max_duration parameters['eval_interval'] = eval_interval parameters['precision'] = precision parameters['fsdp_config']['mixed_precision'] = fsdp_config_mixed_precision if fsdp_config_activation_checkpointing is not None: parameters['fsdp_config'][ 'activation_checkpointing'] = fsdp_config_activation_checkpointing parameters['fsdp_config']['activation_checkpointing_reentrant'] = False parameters['fsdp_config']['limit_all_gathers'] = True if wandb: # add wandb parameters['loggers'] = {'wandb': {}} return parameters def get_integrations(project, git_branch=None, git_commit=None, wandb=True): integrations = [] if git_branch and git_commit: raise ValueError(f'{git_branch=} and {git_commit=} cannot both be set!') git_integration = { k: v for k, v in { 'git_branch': git_branch, 'git_commit': git_commit, }.items() if v is not None } git_integration.update({ 'integration_type': 'git_repo', 'git_repo': 'mosaicml/examples', 'pip_install': '-e .[gpu]' }) integrations = [git_integration] if wandb: integrations += [{ 'integration_type': 'wandb', 'entity': 'mosaic-ml', 'project': project }] return integrations def run_config(config, args): model_yaml, max_seq_len, global_train_batch_size, cluster, gpu_type, gpu_num, precision = config integrations = get_integrations( args.project, git_branch=args.git_branch, git_commit=args.git_commit, wandb=args.wandb) # point to git repo and potentially wandb # Define our command if args.data_remote is not None: command = """ cd examples/scripts composer train/train.py /mnt/config/parameters.yaml """ else: command = f""" cd examples/scripts python data_prep/convert_dataset_hf.py --dataset c4 --data_subset en --out_root ./my-copy-c4 --splits train_small val_small --concat_tokens {max_seq_len} --tokenizer gpt2 --eos_text '<|endoftext|>' composer train/train.py /mnt/config/parameters.yaml """ path = os.path.join('../yamls/mpt', model_yaml) parameters = get_parameters(path) model_name = '-'.join(model_yaml.split('.')[-2].split('/')[-2:]).replace( '_', '-') model_name = model_name.split('-') if 'mosaic' in model_name: model_name.pop(model_name.index('mosaic')) model_name = ''.join(model_name) name = f"{args.project}-{cluster}-{model_name}-{gpu_num}x{gpu_type}-s{max_seq_len}b{global_train_batch_size}{precision.replace('amp_', '')}".replace( '_', '-') name_len_lim = 54 - 7 if len(name) > name_len_lim: _name = name name = name[:name_len_lim] print(f'Shortening {_name} to {name} ({name_len_lim} chars)') microbatch_size = args.microbatch_size or 'auto' parameters = mod_parameters( parameters, max_seq_len, global_train_batch_size, precision, fsdp_config_mixed_precision=args.fsdp_config_mixed_precision, fsdp_config_activation_checkpointing=args. fsdp_config_activation_checkpointing, run_name=name, data_remote=args.data_remote, microbatch_size=microbatch_size, wandb=args.wandb, pad_vocab_multiple=args.pad_vocab_multiple) # Create run config mcli sdk/api config = RunConfig(run_name=name, name=name, gpu_type=gpu_type, gpu_num=gpu_num, cpus=None, platform=None, cluster=cluster, image=args.image, optimization_level=0, integrations=integrations, command=command, parameters=parameters, scheduling=SchedulingConfig(priority=args.priority)) if args.RUN: # Create the run from a config run = create_run(config) print(f'Launching run {run.name}') else: print(f'run = {name}') def run_check_capacity(model_yaml, gpu_num, gpu_type, p_multiplier=16): _params = model_yaml.replace('.yaml', '') params, mult = int(_params[:-1]), _params[-1] if mult == 'm': b_params = params / 1000 elif mult == 'b': b_params = params else: raise ValueError gpu_mem = int(gpu_type.split('_')[-1][:-2]) if p_multiplier * b_params > gpu_num * gpu_mem: print( f'WARNING: will not be running {model_yaml=} on {gpu_num=} {gpu_type=} since it probably will not fit into memory' ) return False return True def run_check_dtms(num_gpus, dtms, batch_size): if num_gpus * dtms > batch_size: print( f'WARNING: Cannot run with {batch_size=} on {num_gpus=} with {dtms=} ({num_gpus*dtms=}).' ) return False return True if __name__ == '__main__': args = parse_args() n_jobs = 0 for max_seq_len in get_max_seq_lens(args.seq_len_exp): for cluster in args.clusters: for gpu_type in get_cluster_gpu_types(cluster): ng_lim = get_valid_gpu_lim(cluster, gpu_type) _gpu_nums = [ng for ng in args.gpu_nums if ng <= ng_lim] for gpu_num in _gpu_nums: global_train_batch_sizes = get_global_train_batch_sizes( max_seq_len, args.batch_size_exp, args.batch_sizes) if not global_train_batch_sizes and args.microbatch_size is not None: accum = args.accum or 1 global_train_batch_sizes = [ accum * gpu_num * args.microbatch_size ] for global_train_batch_size in global_train_batch_sizes: for precision in args.precisions: for model_yaml in args.model_yamls: run = run_check_capacity(model_yaml, gpu_num, gpu_type, p_multiplier=4) if args.microbatch_size is not None: run = run and run_check_dtms( gpu_num, args.microbatch_size, global_train_batch_size) if run: config = (model_yaml, max_seq_len, global_train_batch_size, cluster, gpu_type, gpu_num, precision) print(config) run_config(config, args) n_jobs += 1 print(f'{n_jobs=}')
EXA-1-master
exa/libraries/llm-foundry/scripts/train/benchmarking/submit_benchmarks.py
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 import argparse import csv import math from typing import Any, Dict from mcli import sdk as msdk GPU_AVAILABLE_FLOPS = 312_000_000_000_000 def str_to_bool(value): # helper fn if isinstance(value, bool): return value if value.lower() in {'false', 'f', '0', 'no', 'n'}: return False elif value.lower() in {'true', 't', '1', 'yes', 'y'}: return True raise ValueError(f'{value} is not a valid boolean value') def parse_args(): parser = argparse.ArgumentParser(description=""" Parse run configs to get MPT training throughput. MFU and HFU are defined in https://arxiv.org/abs/2205.05198 All FLOP calculations do not include norm, act, residual, etc. """) parser.add_argument('--project', type=str, default='tput') parser.add_argument('--filters', type=str, default=[], nargs='+') parser.add_argument('-s', '--save-path', type=str, default='benchmark_results') parser.add_argument('-p', '--print-results', type=str_to_bool, nargs='?', const=True, default=False) return parser.parse_args() def get_runs(args): runs = [r for r in msdk.get_runs() if args.project in r.name] for filter in args.filters: runs = [r for r in runs if filter in r.name] def sort_key(r): model_name = r.name.split('-')[2] num_gpu = r.config.gpu_num if model_name[-1] == 'm': model_name_size = 1e6 elif model_name[-1] == 'b': model_name_size = 1e9 else: print(model_name) raise ValueError model_size = int(model_name[:-1]) return (model_name_size, model_size, r.config.parameters['max_seq_len'], num_gpu, r.config.parameters['global_train_batch_size']) runs.sort(reverse=True, key=sort_key) return runs def filter_runs(runs): pop_runs = [] for run in runs: if run.status == msdk.RunStatus('FAILED'): print( f"run {run.name} has FAILED (likely due to OOM error but we'd recommend checking.)" ) pop_runs.append(run) for run in pop_runs: runs.pop(runs.index(run)) pop_runs = [] for run in runs: if run.status in [ msdk.RunStatus('FAILED_PULL'), msdk.RunStatus('PENDING'), msdk.RunStatus('QUEUED'), msdk.RunStatus('RUNNING'), msdk.RunStatus('SCHEDULED'), msdk.RunStatus('STARTING'), msdk.RunStatus('STOPPED'), msdk.RunStatus('STOPPING'), msdk.RunStatus('TERMINATING'), ]: print(f'run {run.name} has run status {run.status}') pop_runs.append(run) for run in pop_runs: runs.pop(runs.index(run)) return runs def parse_run(run) -> Dict[str, Any]: n_params = micro_batchsize = throughput = -1 model_name = run.name.split('-')[2] gpu_num = run.config.gpu_num gpu_type = run.config.gpu_type fsdp_config = run.config.parameters['fsdp_config'] seq_len = run.config.parameters['max_seq_len'] global_train_batch_size = run.config.parameters['global_train_batch_size'] activation_checkpointing = fsdp_config['activation_checkpointing'] logs = msdk.get_run_logs(run) lines = '' for line in logs: lines += line lines = lines.split('\n') for line in lines: if line.startswith('n_params'): n_params = int(line.split(' ')[-1]) break lines.reverse() for line in lines: if 'trainer/device_train_microbatch_size' in line: micro_batchsize = int(line.split(' ')[-1]) break for line in lines: if 'throughput/samples_per_sec' in line: throughput = float(line.split(' ')[-1]) break d_model = run.config.parameters['model']['d_model'] n_layers = run.config.parameters['model']['n_layers'] # mfu is approximated using thoughtput and param count # the number of paramters is approximately the number of multiply-accumulates (MAC) in the network # each MAC has 2 FLOPs - we multiply by 2 ie 2 * n_param # there are 3 passes of a NN (fwd, bwd, delta) - we multiply by 3 ie 2 * 3 * n_param # this gets us FLOPs / token flops_per_token = 2 * n_params flops_per_seq = flops_per_token * seq_len # there are 2 FLOPS per mac; there is A=Q*K^T and out=A*V ops (ie mult by 2) attn_flops_per_seq = n_layers * 2 * 2 * (d_model * (seq_len**2)) # there are 2 ops in bwd pass and 1 in fwd pass so we mult by 3 mfu_w_attn = (3 * flops_per_seq + 3 * attn_flops_per_seq) * throughput / ( gpu_num * GPU_AVAILABLE_FLOPS) if activation_checkpointing: hfu_w_attn = (4 * flops_per_seq + 4 * attn_flops_per_seq ) * throughput / (gpu_num * GPU_AVAILABLE_FLOPS) else: hfu_w_attn = mfu_w_attn return { 'Model': model_name, 'SeqLen (T)': seq_len, '# GPUs': gpu_num, 'GPU': gpu_type, 'MFU': round(mfu_w_attn * 100, 2), 'HFU': round(hfu_w_attn * 100, 2), 'MicroBatchSize': micro_batchsize, 'GradAccum': math.ceil(global_train_batch_size / gpu_num / micro_batchsize), 'GlobalBatchSize': global_train_batch_size, 'Throughput (S/s)': int(throughput), 'Throughput (T/s)': int(throughput * seq_len), 'Throughput (T/s/GPU)': int(throughput * seq_len / gpu_num), 'GlobalBatchSize (T)': global_train_batch_size * seq_len, 'Precision': run.config.parameters['precision'], 'MP Mode': fsdp_config['mixed_precision'], 'Sharding Strategy': fsdp_config['sharding_strategy'], 'Activation Checkpointing': activation_checkpointing, 'Activation CPUOffload': str(fsdp_config['activation_cpu_offload']), 'NumParams': n_params, } def main(args): runs = get_runs(args) runs = filter_runs(runs) results = [] for run in runs: try: results.append(parse_run(run)) except Exception as e: print(f'{run.name=} not parsed') print(e) if results: csv_name = args.save_path + '.csv' with open(csv_name, 'w') as f: writer = csv.DictWriter(f, fieldnames=results[0].keys()) writer.writeheader() for result in results: writer.writerow(result) md_name = args.save_path + '.md' fieldnames = results[0].keys() with open(md_name, 'w') as f: fmt = '| ' + ' {} |' * len(fieldnames) + '\n' f.write(fmt.format(*fieldnames)) f.write(fmt.format(*['---' for _ in fieldnames])) if args.print_results: print(fmt.format(*fieldnames), end='') print(fmt.format(*['---' for _ in fieldnames]), end='') for result in results: if args.print_results: print(fmt.format(*result.values()), end='') f.write(fmt.format(*result.values())) else: print('WARNING: No results parsed.') if __name__ == '__main__': args = parse_args() main(args)
EXA-1-master
exa/libraries/llm-foundry/scripts/train/benchmarking/collect_results.py
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 import time import warnings from argparse import ArgumentParser, ArgumentTypeError, Namespace from typing import Any, Dict, Tuple, Union import torch from transformers import (AutoConfig, AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast) Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast] def str2bool(v): if isinstance(v, bool): return v if v.lower() in ('yes', 'true', 't', 'y', '1'): return True elif v.lower() in ('no', 'false', 'f', 'n', '0'): return False else: raise ArgumentTypeError('Boolean value expected.') def str_or_bool(v): if isinstance(v, bool): return v if v.lower() in ('yes', 'true', 't', 'y', '1'): return True elif v.lower() in ('no', 'false', 'f', 'n', '0'): return False else: return v SYSTEM_PROMPT = """<|im_start|>system - You are a helpful assistant chatbot trained by MosaicML. - You answer questions. - You are excited to be able to help the user, but will refuse to do anything that could be considered harmful to the user. - You are more than just an information source, you are also able to write poetry, short stories, and make jokes.<|im_end|>\n""" USER_MSG_FMT = '<|im_start|>user {}<|im_end|>\n' ASSISTANT_MSG_FMT = '<|im_start|>assistant {}<|im_end|>\n' def parse_args() -> Namespace: """Parse commandline arguments.""" parser = ArgumentParser( description='Load a HF CausalLM Model and use it to generate text.') parser.add_argument('-n', '--name_or_path', type=str, required=True) parser.add_argument('--max_new_tokens', type=int, default=512) parser.add_argument('--temperature', type=float, default=1.0) parser.add_argument('--top_k', type=int, default=50) parser.add_argument('--top_p', type=float, default=1.0) parser.add_argument('--do_sample', type=str2bool, nargs='?', const=True, default=True) parser.add_argument('--use_cache', type=str2bool, nargs='?', const=True, default=True) parser.add_argument('--eos_token_id', type=str, default=None) parser.add_argument('--pad_token_id', type=str, default=None) parser.add_argument('--dtype', type=str, choices=['fp32', 'fp16', 'bf16'], default='bf16') parser.add_argument('--autocast', type=str2bool, nargs='?', const=True, default=False) parser.add_argument('--warmup', type=str2bool, nargs='?', const=True, default=True) parser.add_argument('--trust_remote_code', type=str2bool, nargs='?', const=True, default=True) parser.add_argument('--use_auth_token', type=str_or_bool, nargs='?', const=True, default=None) parser.add_argument('--revision', type=str, default=None) parser.add_argument('--device', type=str, default=None) parser.add_argument('--seed', type=int, default=42) parser.add_argument('--system_prompt', type=str, default=SYSTEM_PROMPT) parser.add_argument('--user_msg_fmt', type=str, default=USER_MSG_FMT) parser.add_argument('--assistant_msg_fmt', type=str, default=ASSISTANT_MSG_FMT) return parser.parse_args() def maybe_synchronize(): if torch.cuda.is_available(): torch.cuda.synchronize() def conversation(model, tokenizer: Tokenizer, user_inp: str, history: str, **generate_kwargs: Dict[str, Any]) -> Tuple[str, str, float]: if history != '': user_inp = USER_MSG_FMT.format(user_inp) conversation = history + user_inp else: conversation = SYSTEM_PROMPT + USER_MSG_FMT.format(user_inp) input_ids = tokenizer(conversation, return_tensors='pt').input_ids input_ids = input_ids.to(model.device) maybe_synchronize() start = time.time() with torch.no_grad(): output_ids = model.generate(input_ids, **generate_kwargs) maybe_synchronize() end = time.time() # Slice the output_ids tensor to get only new tokens new_tokens = output_ids[0, len(input_ids[0]):] output_text = tokenizer.decode(new_tokens, skip_special_tokens=True) conversation = conversation + ASSISTANT_MSG_FMT.format(output_text) return output_text, conversation, end - start def have_conversation(model, tokenizer: Tokenizer, **generate_kwargs: Dict[str, Any]) -> None: history = '' while True: print( "Enter your message below.\n- Type 'EOF' on a new line to send input to the model\n" + "- Type 'clear' to restart the conversation\n- Type 'history' to see the conversation\n" + "- Type 'quit' to end:") user_inp_lines = [] while True: line = input() if line.strip() == 'EOF': break user_inp_lines.append(line) user_inp = '\n'.join(user_inp_lines) if user_inp.lower() == 'quit': break elif user_inp.lower() == 'clear': history = '' continue elif user_inp == 'history': print(f'history: {history}\n') continue assistant_resp, history, time_taken = conversation( model, tokenizer, user_inp, history, **generate_kwargs) print(f'Assistant: {assistant_resp} ({time_taken:.3f}s)\n') def main(args: Namespace) -> None: print('Loading HF model...') from_pretrained_kwargs = { 'use_auth_token': args.use_auth_token, 'trust_remote_code': args.trust_remote_code, 'revision': args.revision, } model = AutoModelForCausalLM.from_pretrained(args.name_or_path, **from_pretrained_kwargs) model.eval() print(f'n_params={sum(p.numel() for p in model.parameters())}') print('\nLoading HF tokenizer...') tokenizer = AutoTokenizer.from_pretrained(args.name_or_path, **from_pretrained_kwargs) if tokenizer.pad_token_id is None: warnings.warn( 'pad_token_id is not set for the tokenizer. Using eos_token_id as pad_token_id.' ) tokenizer.pad_token = tokenizer.eos_token tokenizer.padding_side = 'left' generate_kwargs = { 'max_new_tokens': args.max_new_tokens, 'temperature': args.temperature, 'top_p': args.top_p, 'top_k': args.top_k, 'use_cache': args.use_cache, 'do_sample': args.do_sample, 'eos_token_id': args.eos_token_id or tokenizer.eos_token_id, 'pad_token_id': args.pad_token_id, } if args.dtype == 'fp32': dtype = torch.float32 elif args.dtype == 'fp16': dtype = torch.float16 elif args.dtype == 'bf16': dtype = torch.bfloat16 else: raise ValueError(f'Invalid dtype: {args.dtype}') if args.device is None: device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') else: device = torch.device(args.device) if args.autocast: autocast = torch.cuda.amp.autocast else: autocast = torch.no_grad model.to(device, dtype=dtype) if args.warmup: print('Warming up...') with autocast(): conversation(model, tokenizer, 'hello', '', **generate_kwargs) print('Starting conversation...') with autocast(): have_conversation(model, tokenizer, **generate_kwargs) if __name__ == '__main__': main(parse_args())
EXA-1-master
exa/libraries/llm-foundry/scripts/inference/hf_chat.py
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 __all__ = []
EXA-1-master
exa/libraries/llm-foundry/scripts/inference/__init__.py
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 """Basic HuggingFace -> ONNX export script. This scripts show a basic HuggingFace -> ONNX export workflow. This works for a MPT model that has been saved using `MPT.save_pretrained`. For more details and examples of exporting and working with HuggingFace models with ONNX, see https://huggingface.co/docs/transformers/serialization#export-to-onnx. Example usage: 1) Local export python convert_hf_to_onnx.py --pretrained_model_name_or_path local/path/to/huggingface/folder --output_folder local/folder 2) Remote export python convert_hf_to_onnx.py --pretrained_model_name_or_path local/path/to/huggingface/folder --output_folder s3://bucket/remote/folder 3) Verify the exported model python convert_hf_to_onnx.py --pretrained_model_name_or_path local/path/to/huggingface/folder --output_folder local/folder --verify_export 4) Change the batch size or max sequence length python convert_hf_to_onnx.py --pretrained_model_name_or_path local/path/to/huggingface/folder --output_folder local/folder --export_batch_size 1 --max_seq_len 32000 """ import argparse import os from argparse import ArgumentTypeError from pathlib import Path from typing import Optional import torch from composer.utils import (maybe_create_object_store_from_uri, parse_uri, reproducibility) from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer def str2bool(v): if isinstance(v, bool): return v if v.lower() in ('yes', 'true', 't', 'y', '1'): return True elif v.lower() in ('no', 'false', 'f', 'n', '0'): return False else: raise ArgumentTypeError('Boolean value expected.') def str_or_bool(v): if isinstance(v, bool): return v if v.lower() in ('yes', 'true', 't', 'y', '1'): return True elif v.lower() in ('no', 'false', 'f', 'n', '0'): return False else: return v def gen_random_batch(batch_size: int, vocab_size: int, max_seq_len: int): # generate input batch of random data batch = { 'input_ids': torch.randint( low=0, high=vocab_size, size=(batch_size, max_seq_len), dtype=torch.int64, ), 'attention_mask': torch.ones(size=(batch_size, max_seq_len), dtype=torch.bool) } return batch def export_to_onnx( pretrained_model_name_or_path: str, output_folder: str, export_batch_size: int, max_seq_len: Optional[int], verify_export: bool, from_pretrained_kwargs: dict, ): reproducibility.seed_all(42) save_object_store = maybe_create_object_store_from_uri(output_folder) _, _, parsed_save_path = parse_uri(output_folder) print('Loading HF config/model/tokenizer...') tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path, **from_pretrained_kwargs) config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **from_pretrained_kwargs) # specifically for MPT, switch to the torch version of attention for ONNX export if hasattr(config, 'attn_config'): config.attn_config['attn_impl'] = 'torch' model = AutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path, config=config, **from_pretrained_kwargs) model.eval() if max_seq_len is None and not hasattr(model.config, 'max_seq_len'): raise ValueError( 'max_seq_len must be specified in either the model config or as an argument to this function.' ) elif max_seq_len is None: max_seq_len = model.config.max_seq_len assert isinstance(max_seq_len, int) # pyright print('Creating random batch...') sample_input = gen_random_batch( export_batch_size, len(tokenizer), max_seq_len, ) with torch.no_grad(): model(**sample_input) output_file = Path(parsed_save_path) / 'model.onnx' os.makedirs(parsed_save_path, exist_ok=True) print('Exporting the model with ONNX...') torch.onnx.export( model, (sample_input,), str(output_file), input_names=['input_ids', 'attention_mask'], output_names=['output'], opset_version=16, ) if verify_export: with torch.no_grad(): orig_out = model(**sample_input) import onnx # type: ignore import onnx.checker # type: ignore import onnxruntime as ort # type: ignore _ = onnx.load(str(output_file)) onnx.checker.check_model(str(output_file)) ort_session = ort.InferenceSession(str(output_file)) for key, value in sample_input.items(): sample_input[key] = value.cpu().numpy() loaded_model_out = ort_session.run(None, sample_input) torch.testing.assert_close( orig_out.logits.detach().numpy(), loaded_model_out[0], rtol=1e-2, atol=1e-2, msg=f'output mismatch between the orig and onnx exported model', ) print('exported model ouptut matches with unexported model!!') if save_object_store is not None: print('Uploading files to object storage...') for filename in os.listdir(parsed_save_path): full_path = str(Path(parsed_save_path) / filename) save_object_store.upload_object(full_path, full_path) def parse_args(): parser = argparse.ArgumentParser(description='Convert HF model to ONNX',) parser.add_argument( '--pretrained_model_name_or_path', type=str, required=True, ) parser.add_argument( '--output_folder', type=str, required=True, ) parser.add_argument( '--export_batch_size', type=int, default=8, ) parser.add_argument( '--max_seq_len', type=int, default=None, ) parser.add_argument( '--verify_export', action='store_true', ) parser.add_argument('--trust_remote_code', type=str2bool, nargs='?', const=True, default=True) parser.add_argument('--use_auth_token', type=str_or_bool, nargs='?', const=True, default=None) parser.add_argument('--revision', type=str, default=None) return parser.parse_args() def main(args: argparse.Namespace): from_pretrained_kwargs = { 'use_auth_token': args.use_auth_token, 'trust_remote_code': args.trust_remote_code, 'revision': args.revision, } export_to_onnx( pretrained_model_name_or_path=args.pretrained_model_name_or_path, output_folder=args.output_folder, export_batch_size=args.export_batch_size, max_seq_len=args.max_seq_len, verify_export=args.verify_export, from_pretrained_kwargs=from_pretrained_kwargs) if __name__ == '__main__': main(parse_args())
EXA-1-master
exa/libraries/llm-foundry/scripts/inference/convert_hf_to_onnx.py
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 import ast import importlib import json import os import tempfile from argparse import ArgumentParser, Namespace from pathlib import Path from typing import Any, Dict, List, Optional, Union import sentencepiece as spm import torch import transformers from composer.utils import (get_file, maybe_create_object_store_from_uri, parse_uri, safe_torch_load) from transformers import (AutoConfig, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer) from llmfoundry import MPTConfig, MPTForCausalLM # TODO: maybe move this functionality to Composer def get_hf_config_from_composer_state_dict( state_dict: Dict[str, Any]) -> PretrainedConfig: hf_config_dict = state_dict['state']['integrations']['huggingface'][ 'model']['config']['content'] # Always set init_device='cpu' hf_config_dict['init_device'] = 'cpu' AutoConfig.register('mpt', MPTConfig) # backwards compatibility changes if hf_config_dict['model_type'] == 'mosaic_gpt': hf_config_dict['model_type'] = 'mpt' if 'attn_config' not in hf_config_dict: attn_config = {} attn_config['attn_type'] = 'multihead_attention' attn_config['attn_pdrop'] = hf_config_dict['attn_pdrop'] del hf_config_dict['attn_pdrop'] attn_config['attn_impl'] = hf_config_dict['attn_impl'] del hf_config_dict['attn_impl'] attn_config['qk_ln'] = hf_config_dict['attn_qk_ln'] del hf_config_dict['attn_qk_ln'] attn_config['clip_qkv'] = hf_config_dict['attn_clip_qkv'] del hf_config_dict['attn_clip_qkv'] attn_config['softmax_scale'] = hf_config_dict['softmax_scale'] del hf_config_dict['softmax_scale'] attn_config['prefix_lm'] = hf_config_dict['prefix_lm'] del hf_config_dict['prefix_lm'] attn_config['attn_uses_sequence_id'] = hf_config_dict[ 'attn_uses_sequence_id'] del hf_config_dict['attn_uses_sequence_id'] attn_config['alibi'] = hf_config_dict['alibi'] del hf_config_dict['alibi'] attn_config['alibi_bias_max'] = hf_config_dict['alibi_bias_max'] del hf_config_dict['alibi_bias_max'] hf_config_dict['attn_config'] = attn_config if 'init_config' not in hf_config_dict: init_config = {} init_config['name'] = hf_config_dict['param_init_fn'] del hf_config_dict['param_init_fn'] init_config['fan_mode'] = hf_config_dict['fan_mode'] del hf_config_dict['fan_mode'] init_config['init_nonlinearity'] = hf_config_dict['init_nonlinearity'] del hf_config_dict['init_nonlinearity'] init_config['init_gain'] = hf_config_dict['init_gain'] del hf_config_dict['init_gain'] init_config['init_std'] = hf_config_dict['init_std'] del hf_config_dict['init_std'] init_config['init_div_is_residual'] = hf_config_dict[ 'init_div_is_residual'] del hf_config_dict['init_div_is_residual'] init_config['emb_init_std'] = hf_config_dict['emb_init_std'] del hf_config_dict['emb_init_std'] init_config['emb_init_uniform_lim'] = hf_config_dict[ 'emb_init_uniform_lim'] del hf_config_dict['emb_init_uniform_lim'] hf_config_dict['init_config'] = init_config if 'mlp_ratio' in hf_config_dict: hf_config_dict['expansion_ratio'] = hf_config_dict['mlp_ratio'] del hf_config_dict['mlp_ratio'] if 'low_precision_layernorm' in hf_config_dict: if hf_config_dict['low_precision_layernorm']: hf_config_dict['norm_type'] = 'low_precision_layernorm' else: hf_config_dict['norm_type'] = 'layernorm' del hf_config_dict['low_precision_layernorm'] return AutoConfig.for_model(**hf_config_dict) # TODO: maybe move this functionality to Composer def get_hf_tokenizer_from_composer_state_dict( state_dict: Dict[str, Any]) -> Optional[PreTrainedTokenizer]: hf_tokenizer_state = state_dict['state']['integrations']['huggingface'][ 'tokenizer'] hf_tokenizer = None if hf_tokenizer_state != {}: with tempfile.TemporaryDirectory() as _tmp_dir: for filename, saved_content in hf_tokenizer_state.items(): tokenizer_file_path = Path( _tmp_dir) / f'{filename}{saved_content["file_extension"]}' if saved_content['file_extension'] == '.json': with open(tokenizer_file_path, 'w') as _tmp_file: json.dump(saved_content['content'], _tmp_file) elif saved_content['file_extension'] == '.txt': with open(tokenizer_file_path, 'w') as _tmp_file: for line in saved_content['content']: _tmp_file.write(line) _tmp_file.write('\n') elif saved_content['file_extension'] == '.model': s = spm.SentencePieceProcessor() s.load_from_serialized_proto(saved_content['content']) with open(tokenizer_file_path, 'wb') as _tmp_file: _tmp_file.write(s.serialized_model_proto()) hf_tokenizer = AutoTokenizer.from_pretrained(_tmp_dir) # remove 'name_or_path' hf_tokenizer.name_or_path = '' hf_tokenizer.init_kwargs['name_or_path'] = '' return hf_tokenizer def write_huggingface_pretrained_from_composer_checkpoint( checkpoint_path: Union[Path, str], output_path: Union[Path, str], output_precision: str = 'fp32', local_checkpoint_save_location: Optional[Union[Path, str]] = None) -> None: """Convert a Composer checkpoint to a pretrained HF checkpoint folder. Write a ``config.json`` and ``pytorch_model.bin``, like :meth:`transformers.PreTrainedModel.from_pretrained` expects, from a composer checkpoint. .. note:: This function will not work properly if you used surgery algorithms when you trained your model. In that case you will want to load the model weights using the Composer :class:`~composer.Trainer` with the ``load_path`` argument. .. testsetup:: import torch dataset = RandomTextClassificationDataset(size=16, use_keys=True) train_dataloader = torch.utils.data.DataLoader(dataset, batch_size=8) eval_dataloader = torch.utils.data.DataLoader(dataset, batch_size=8) import transformers from composer.models import HuggingFaceModel from composer.trainer import Trainer hf_model = transformers.AutoModelForSequenceClassification.from_pretrained('prajjwal1/bert-tiny', num_labels=2) hf_tokenizer = transformers.AutoTokenizer.from_pretrained('prajjwal1/bert-tiny') composer_model = HuggingFaceModel(hf_model, tokenizer=hf_tokenizer, metrics=[], use_logits=True) trainer = Trainer(model=composer_model, train_dataloader=train_dataloader, save_filename='composer-hf-checkpoint.pt', max_duration='1ep', save_folder='./') trainer.fit() trainer.close() Example: .. testcode:: from composer.models import write_huggingface_pretrained_from_composer_checkpoint write_huggingface_pretrained_from_composer_checkpoint('composer-hf-checkpoint.pt', './hf-save-pretrained-output') loaded_model = transformers.AutoModelForSequenceClassification.from_pretrained('./hf-save-pretrained-output') Args: checkpoint_path (Union[Path, str]): Path to the composer checkpoint, can be a local path, or a remote path beginning with ``s3://``, or another backend supported by :meth:`composer.utils.maybe_create_object_store_from_uri`. output_path (Union[Path, str]): Path to the folder to write the output to. Can be a local path, or a remote path beginning with ``s3://``, or another backend supported by :meth:`composer.utils.maybe_create_object_store_from_uri`. output_precision (str, optional): The precision of the output weights saved to `pytorch_model.bin`. Can be one of ``fp32``, ``fp16``, or ``bf16``. local_checkpoint_save_location (Optional[Union[Path, str]], optional): If specified, where to save the checkpoint file to locally. If the input ``checkpoint_path`` is already a local path, this will be a symlink. Defaults to None, which will use a temporary file. """ dtype = { 'fp32': torch.float32, 'fp16': torch.float16, 'bf16': torch.bfloat16, }[output_precision] # default local path to a tempfile if path is not provided if local_checkpoint_save_location is None: tmp_dir = tempfile.TemporaryDirectory() local_checkpoint_save_location = Path( tmp_dir.name) / 'local-composer-checkpoint.pt' # create object store if output_path object_store = maybe_create_object_store_from_uri(str(output_path)) if object_store is not None: local_output_path = tempfile.TemporaryDirectory().name else: local_output_path = output_path # create folder os.makedirs(local_output_path) # download the checkpoint file print( f'Downloading checkpoint from {checkpoint_path} -> {local_checkpoint_save_location}' ) get_file(str(checkpoint_path), str(local_checkpoint_save_location)) # Load the Composer checkpoint state dict print('Loading checkpoint into CPU RAM...') composer_state_dict = safe_torch_load(local_checkpoint_save_location) # Build and save HF Config print('#' * 30) print('Saving HF Model Config...') hf_config = get_hf_config_from_composer_state_dict(composer_state_dict) hf_config.torch_dtype = dtype hf_config.save_pretrained(local_output_path) print(hf_config) # Extract and save the HF tokenizer print('#' * 30) print('Saving HF Tokenizer...') hf_tokenizer = get_hf_tokenizer_from_composer_state_dict( composer_state_dict) if hf_tokenizer is not None: hf_tokenizer.save_pretrained(local_output_path) print(hf_tokenizer) else: print('Warning! No HF Tokenizer found!') # Extract the HF model weights print('#' * 30) print('Saving HF Model Weights...') weights_state_dict = composer_state_dict if 'state' in weights_state_dict: weights_state_dict = weights_state_dict['state']['model'] torch.nn.modules.utils.consume_prefix_in_state_dict_if_present( weights_state_dict, prefix='model.') # Convert weights to desired dtype for k, v in weights_state_dict.items(): if isinstance(v, torch.Tensor): weights_state_dict[k] = v.to(dtype=dtype) # Save weights torch.save(weights_state_dict, Path(local_output_path) / 'pytorch_model.bin') print('#' * 30) print(f'HF checkpoint folder successfully created at {local_output_path}.') if object_store is not None: print( f'Uploading HF checkpoint folder from {local_output_path} -> {output_path}' ) for file in os.listdir(local_output_path): _, _, prefix = parse_uri(str(output_path)) remote_file = os.path.join(prefix, file) local_file = os.path.join(local_output_path, file) object_store.upload_object(remote_file, local_file) print('Done.') print('#' * 30) class DeleteSpecificNodes(ast.NodeTransformer): def __init__(self, nodes_to_remove: List[ast.AST]): self.nodes_to_remove = nodes_to_remove def visit(self, node: ast.AST): if node in self.nodes_to_remove: return None return super().visit(node) def convert_to_relative_import(module_name: str) -> str: parts = module_name.split('.') return '.' + parts[-1] def find_module_file(module_name: str) -> str: module = importlib.import_module(module_name) module_file = module.__file__ return module_file def process_file(file_path: str, folder_path: str) -> List[str]: with open(file_path, 'r') as f: source = f.read() tree = ast.parse(source) new_files_to_process = [] nodes_to_remove = [] for node in ast.walk(tree): # convert any llmfoundry imports into relative imports if isinstance(node, ast.ImportFrom) and node.module.startswith('llmfoundry'): module_path = find_module_file(node.module) node.module = convert_to_relative_import(node.module) # recursively process any llmfoundry files new_files_to_process.append(module_path) # remove any imports from composer or omegaconf elif isinstance( node, ast.ImportFrom) and (node.module.startswith('composer') or node.module.startswith('omegaconf')): nodes_to_remove.append(node) # remove the Composer* class elif isinstance(node, ast.ClassDef) and node.name.startswith('Composer'): nodes_to_remove.append(node) # remove the __all__ declaration in any __init__.py files, whose enclosing module # will be converted to a single file of the same name elif isinstance(node, ast.Assign) and len(node.targets) == 1 and isinstance( node.targets[0], ast.Name) and node.targets[0].id == '__all__': nodes_to_remove.append(node) transformer = DeleteSpecificNodes(nodes_to_remove) new_tree = transformer.visit(tree) new_filename = os.path.basename(file_path) # special case for __init__.py to mimic the original submodule if new_filename == '__init__.py': new_filename = file_path.split('/')[-2] + '.py' new_file_path = os.path.join(folder_path, new_filename) with open(new_file_path, 'w') as f: f.write(ast.unparse(new_tree)) return new_files_to_process def edit_files_for_hf_compatibility(folder: str): files_to_process = [ os.path.join(folder, filename) for filename in os.listdir(folder) if filename.endswith('.py') ] files_processed_and_queued = set(files_to_process) while len(files_to_process) > 0: to_process = files_to_process.pop() if os.path.isfile(to_process) and to_process.endswith('.py'): to_add = process_file(to_process, folder) for file in to_add: if file not in files_processed_and_queued: files_to_process.append(file) files_processed_and_queued.add(file) def parse_args() -> Namespace: """Parse commandline arguments.""" parser = ArgumentParser( description= 'Convert Composer checkpoint and Omegaconf model config into a standard HuggingFace checkpoint folder, and optionally upload to the hub.' ) parser.add_argument('--composer_path', type=str, required=True) parser.add_argument('--hf_output_path', type=str, required=True) parser.add_argument('--local_checkpoint_save_location', type=str, default=None) parser.add_argument('--output_precision', type=str, choices=['fp32', 'fp16', 'bf16'], default='fp32') parser.add_argument('--hf_repo_for_upload', type=str, default=None) parser.add_argument('--test_uploaded_model', action='store_true') return parser.parse_args() def main(args: Namespace) -> None: write_huggingface_pretrained_from_composer_checkpoint( checkpoint_path=args.composer_path, output_path=args.hf_output_path, output_precision=args.output_precision, local_checkpoint_save_location=args.local_checkpoint_save_location) dtype = { 'fp32': torch.float32, 'fp16': torch.float16, 'bf16': torch.bfloat16, }[args.output_precision] # register config auto class MPTConfig.register_for_auto_class() # register model auto class MPTForCausalLM.register_for_auto_class('AutoModelForCausalLM') print(f'Loading model from {args.hf_output_path}') config = MPTConfig.from_pretrained(args.hf_output_path) # You have to edit the config this way, because attn_config is a nested dictionary config.attn_config['attn_impl'] = 'torch' loaded_hf_model = MPTForCausalLM.from_pretrained(args.hf_output_path, config=config, torch_dtype=dtype) delattr(loaded_hf_model.config, '_name_or_path') loaded_hf_model.save_pretrained(args.hf_output_path) print(f'Loading tokenizer from {args.hf_output_path}') tokenizer = transformers.AutoTokenizer.from_pretrained(args.hf_output_path) tokenizer.save_pretrained(args.hf_output_path) print('Editing files for HF compatibility...') edit_files_for_hf_compatibility(args.hf_output_path) if args.hf_repo_for_upload is not None: from huggingface_hub import HfApi api = HfApi() print( f'Uploading {args.hf_output_path} to HuggingFace Hub at {args.hf_repo_for_upload}' ) api.create_repo(repo_id=args.hf_repo_for_upload, use_auth_token=True, repo_type='model', private=True, exist_ok=True) print('Repo created.') # ignore the full checkpoint file if we now have sharded checkpoint files ignore_patterns = [] if any( f.startswith('pytorch_model-00001') for f in os.listdir(args.hf_output_path)): ignore_patterns.append('pytorch_model.bin') api.upload_folder(folder_path=args.hf_output_path, repo_id=args.hf_repo_for_upload, use_auth_token=True, repo_type='model', ignore_patterns=ignore_patterns) print('Folder uploaded.') if args.test_uploaded_model: print('Testing uploaded model...') hub_model = transformers.AutoModelForCausalLM.from_pretrained( args.hf_repo_for_upload, trust_remote_code=True, use_auth_token=True, torch_dtype=dtype) hub_tokenizer = transformers.AutoTokenizer.from_pretrained( args.hf_repo_for_upload, trust_remote_code=True, use_auth_token=True) assert sum(p.numel() for p in hub_model.parameters()) == sum( p.numel() for p in loaded_hf_model.parameters()) assert all( str(type(module1)).split('.')[-2:] == str(type(module2)).split( '.')[-2:] for module1, module2 in zip( hub_model.modules(), loaded_hf_model.modules())) assert next( hub_model.parameters() ).dtype == dtype, f'Expected model dtype to be {dtype}, but got {next(hub_model.parameters()).dtype}' print( hub_tokenizer.batch_decode( hub_model.generate(hub_tokenizer( 'MosaicML is', return_tensors='pt').input_ids, max_new_tokens=10))) if __name__ == '__main__': main(parse_args())
EXA-1-master
exa/libraries/llm-foundry/scripts/inference/convert_composer_to_hf.py
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 import random import time import warnings from argparse import ArgumentParser, ArgumentTypeError, Namespace from contextlib import nullcontext import torch from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer def get_dtype(dtype): if dtype == 'fp32': return torch.float32 elif dtype == 'fp16': return torch.float16 elif dtype == 'bf16': return torch.bfloat16 else: raise NotImplementedError( f'dtype {dtype} is not supported. ' f'We only support fp32, fp16, and bf16 currently') def str2bool(v): if isinstance(v, bool): return v if v.lower() in ('yes', 'true', 't', 'y', '1'): return True elif v.lower() in ('no', 'false', 'f', 'n', '0'): return False else: raise ArgumentTypeError('Boolean value expected.') def str_or_bool(v): if isinstance(v, bool): return v if v.lower() in ('yes', 'true', 't', 'y', '1'): return True elif v.lower() in ('no', 'false', 'f', 'n', '0'): return False else: return v def parse_args() -> Namespace: """Parse commandline arguments.""" parser = ArgumentParser( description='Load a HF CausalLM Model and use it to generate text.') parser.add_argument('-n', '--name_or_path', type=str, required=True) parser.add_argument( '-p', '--prompts', nargs='+', default=[ 'My name is', 'This is an explanation of deep learning to a five year old. Deep learning is', ]) parser.add_argument('--max_new_tokens', type=int, default=100) parser.add_argument('--max_seq_len', type=int, default=None) parser.add_argument('--temperature', type=float, default=1.0) parser.add_argument('--top_k', type=int, default=50) parser.add_argument('--top_p', type=float, default=1.0) parser.add_argument('--do_sample', type=str2bool, nargs='?', const=True, default=True) parser.add_argument('--use_cache', type=str2bool, nargs='?', const=True, default=True) parser.add_argument('--eos_token_id', type=int, default=None) parser.add_argument('--pad_token_id', type=int, default=None) parser.add_argument('--model_dtype', type=str, choices=['fp32', 'fp16', 'bf16'], default=None) parser.add_argument('--autocast_dtype', type=str, choices=['fp32', 'fp16', 'bf16'], default=None) parser.add_argument('--warmup', type=str2bool, nargs='?', const=True, default=True) parser.add_argument('--trust_remote_code', type=str2bool, nargs='?', const=True, default=True) parser.add_argument('--use_auth_token', type=str_or_bool, nargs='?', const=True, default=None) parser.add_argument('--revision', type=str, default=None) parser.add_argument('--device', type=str, default=None) parser.add_argument('--attn_impl', type=str, default=None) parser.add_argument('--seed', type=int, default=42) return parser.parse_args() def maybe_synchronize(): if torch.cuda.is_available(): torch.cuda.synchronize() def main(args: Namespace) -> None: # Grab config first print(f'Loading HF Config...') from_pretrained_kwargs = { 'use_auth_token': args.use_auth_token, 'trust_remote_code': args.trust_remote_code, 'revision': args.revision, } try: config = AutoConfig.from_pretrained(args.name_or_path, **from_pretrained_kwargs) except Exception as e: raise RuntimeError( 'If you are having auth problems, try logging in via `huggingface-cli login` ' 'or by setting the environment variable `export HUGGING_FACE_HUB_TOKEN=... ' 'using your access token from https://huggingface.co/settings/tokens.' ) from e # Set device and model_dtype if args.device is not None: device = args.device else: device = 'cuda:0' if torch.cuda.is_available() else 'cpu' if args.model_dtype is not None: model_dtype = get_dtype(args.model_dtype) else: model_dtype = config.torch_dtype or torch.float32 # Load HF Model print(f'Loading HF model to device={device} and dtype={model_dtype}...') model_kwargs = { 'attn_impl': args.attn_impl, 'max_seq_len': args.max_seq_len, 'torch_dtype': model_dtype, } model_kwargs = {k: v for k, v in model_kwargs.items() if v is not None} try: model = AutoModelForCausalLM.from_pretrained(args.name_or_path, **from_pretrained_kwargs, **model_kwargs) model.to(device) model.eval() print(f'n_params={sum(p.numel() for p in model.parameters())}') except Exception as e: raise RuntimeError( 'If you are having auth problems, try logging in via `huggingface-cli login` ' 'or by setting the environment variable `export HUGGING_FACE_HUB_TOKEN=... ' 'using your access token from https://huggingface.co/settings/tokens.' ) from e print('\nLoading HF tokenizer...') tokenizer = AutoTokenizer.from_pretrained(args.name_or_path, **from_pretrained_kwargs) if tokenizer.pad_token_id is None: warnings.warn( 'pad_token_id is not set for the tokenizer. Using eos_token_id as pad_token_id.' ) tokenizer.pad_token = tokenizer.eos_token tokenizer.padding_side = 'left' generate_kwargs = { 'max_new_tokens': args.max_new_tokens, 'temperature': args.temperature, 'top_p': args.top_p, 'top_k': args.top_k, 'use_cache': args.use_cache, 'do_sample': args.do_sample, 'eos_token_id': args.eos_token_id or tokenizer.eos_token_id, 'pad_token_id': args.pad_token_id or tokenizer.pad_token_id, } print(f'\nGenerate kwargs:\n{generate_kwargs}') print(f'\nTokenizing prompts...') maybe_synchronize() encode_start = time.time() encoded_inp = tokenizer(args.prompts, return_tensors='pt', padding=True) for key, value in encoded_inp.items(): encoded_inp[key] = value.to(device) maybe_synchronize() encode_end = time.time() input_tokens = torch.sum(encoded_inp['input_ids'] != tokenizer.pad_token_id, axis=1).numpy(force=True) # type: ignore # Autocast if args.autocast_dtype is not None: autocast_dtype = get_dtype(args.autocast_dtype) autocast_context = torch.autocast(device, autocast_dtype) print(f'Using autocast with dtype={autocast_dtype}...') else: autocast_context = nullcontext() print('NOT using autocast...') # Generate function with correct context managers def _generate(encoded_inp): with torch.no_grad(): with autocast_context: return model.generate( input_ids=encoded_inp['input_ids'], attention_mask=encoded_inp['attention_mask'], **generate_kwargs, ) # Warmup if args.warmup: print('Warming up...') _ = _generate(encoded_inp) # Seed randomness random.seed(args.seed) torch.manual_seed(args.seed) # Run HF generate print('Generating responses...') maybe_synchronize() gen_start = time.time() encoded_gen = _generate(encoded_inp) maybe_synchronize() gen_end = time.time() decode_start = time.time() decoded_gen = tokenizer.batch_decode(encoded_gen, skip_special_tokens=True) maybe_synchronize() decode_end = time.time() gen_tokens = torch.sum(encoded_gen != tokenizer.pad_token_id, axis=1).numpy(force=True) # type: ignore # Print generations delimiter = '#' * 100 for prompt, gen in zip(args.prompts, decoded_gen): continuation = gen[len(prompt):] print(delimiter) print('\033[92m' + prompt + '\033[0m' + continuation) print(delimiter) # Print timing info bs = len(args.prompts) output_tokens = gen_tokens - input_tokens total_input_tokens = input_tokens.sum() total_output_tokens = output_tokens.sum() encode_latency = 1000 * (encode_end - encode_start) gen_latency = 1000 * (gen_end - gen_start) decode_latency = 1000 * (decode_end - decode_start) total_latency = encode_latency + gen_latency + decode_latency latency_per_output_token = total_latency / total_output_tokens output_tok_per_sec = 1000 / latency_per_output_token print(f'{bs=}, {input_tokens=}, {output_tokens=}') print(f'{total_input_tokens=}, {total_output_tokens=}') print( f'{encode_latency=:.2f}ms, {gen_latency=:.2f}ms, {decode_latency=:.2f}ms, {total_latency=:.2f}ms' ) print(f'{latency_per_output_token=:.2f}ms/tok') print(f'{output_tok_per_sec=:.2f}tok/sec') if __name__ == '__main__': main(parse_args())
EXA-1-master
exa/libraries/llm-foundry/scripts/inference/hf_generate.py
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 import contextlib import sys import time import numpy as np import torch # You can use this to load the model weights from omegaconf import OmegaConf as om from llmfoundry import COMPOSER_MODEL_REGISTRY def get_precision(precision): if precision == 'fp32': return torch.float32 elif precision == 'fp16': return torch.float16 elif precision == 'bf16': return torch.bfloat16 else: raise NotImplementedError( f'Precision of type {precision} is not supported. ' f'We only support fp32, amp_fp16, and amp_bf16 currently') def compare_precision(precision, param_dtype): if precision != param_dtype: raise ValueError( f'Precision type is: {precision} but model dtype is: {param_dtype}. ' f"The expected precision and model precision don't match.") def main(config): model_dtype = get_precision(config.model_dtype) autocast_precision = None if config.autocast_precision is not None: autocast_precision = get_precision(config.autocast_precision) inference_config = { 'replace_with_kernel_inject': True, 'dtype': model_dtype, 'replace_method': 'auto', 'enable_cuda_graph': False, 'tensor_parallel': { 'tp_size': 0 }, } composer_model = COMPOSER_MODEL_REGISTRY[config.model.name]( config.model, config.tokenizer) model = composer_model.model model.eval() if config.use_deepspeed: import deepspeed # type: ignore model = deepspeed.init_inference(model, config=inference_config) # Checking if deepspeed casts dtypes correctly for _, p in model.named_parameters(): compare_precision(model_dtype, p.dtype) break else: model.to(torch.cuda.current_device()) model.to(model_dtype) n_params = sum(p.numel() for p in model.parameters()) print('n_params is: ', n_params) print('name, latency (s), tokens / s, output token time (ms)') print('=' * 75) stats = [] for batch_size in config.batch_sizes: for input_length in config.input_lengths: for output_length in config.output_lengths: times = [] batch = torch.randint( 0, config.model.vocab_size - 1, size=( batch_size, input_length)).to(f'cuda:{torch.cuda.current_device()}') # We're just going to have generate eos, padding tokens be # ignored by HF generate batch = batch.to(torch.long) attention_mask = torch.ones_like(batch) torch.cuda.synchronize() for i in range(config.num_runs + 1): start_time = time.time() with torch.no_grad(): precision_context = contextlib.nullcontext() if autocast_precision is not None and autocast_precision in [ 'fp16', 'bf16' ]: precision_context = torch.cuda.amp.autocast( True, dtype=autocast_precision) with precision_context: model.generate(batch, max_new_tokens=output_length, use_cache=True, attention_mask=attention_mask, eos_token_id=None, pad_token_id=None) torch.cuda.synchronize() # We noticed there sometimes might be a small bit of startup time # so we only start to benchmark after some number of batches if i >= config.num_warmup_batches: times.append(time.time() - start_time) num_output_tokens = output_length * batch_size mean_time = np.mean(times) tokens_per_second = num_output_tokens / float(mean_time) ms_per_seq_output_token = float( mean_time) * 1000 / num_output_tokens result = ( f'{config.benchmark_name}_{batch_size}_{input_length}_{output_length}', f'{mean_time:.3f}', f'{tokens_per_second:.3f}', f'{ms_per_seq_output_token:.3f}') run_name, latency, tokens_per_second, ms_per_seq_output_token = result print( f'{run_name}, {latency}, {tokens_per_second}, {ms_per_seq_output_token}' ) stats.append(result) print('=' * 75) print('name, latency (s), tokens / s, output token time (ms)') for val in stats: run_name, latency, tokens_per_second, ms_per_seq_output_token = val print( f'{run_name}, latency (s) {latency}, tokens per second {tokens_per_second}, output token time (ms) {ms_per_seq_output_token}' ) if __name__ == '__main__': yaml_path, args_list = sys.argv[1], sys.argv[2:] with open(yaml_path) as f: yaml_config = om.load(f) cli_config = om.from_cli(args_list) config = om.merge(yaml_config, cli_config) main(config)
EXA-1-master
exa/libraries/llm-foundry/scripts/inference/benchmarking/benchmark.py
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 import sys import time from typing import List import torch from composer.loggers import InMemoryLogger, LoggerDestination from composer.trainer import Trainer from composer.utils import dist, get_device, reproducibility from omegaconf import DictConfig from omegaconf import OmegaConf as om from llmfoundry.models.model_registry import COMPOSER_MODEL_REGISTRY from llmfoundry.utils.builders import (build_icl_evaluators, build_logger, build_tokenizer) def main(cfg): cfg.dist_timeout = cfg.get('dist_timeout', 600.0) reproducibility.seed_all(cfg.seed) dist.initialize_dist(get_device(None), timeout=cfg.dist_timeout) # Build tokenizer and model tokenizer = build_tokenizer(cfg.tokenizer) composer_model = COMPOSER_MODEL_REGISTRY[cfg.model.name](cfg.model, tokenizer) evaluators, logger_keys = build_icl_evaluators(cfg.icl_tasks, tokenizer, cfg.max_seq_len, cfg.device_eval_batch_size) in_memory_logger = InMemoryLogger() # track metrics in the in_memory_logger loggers: List[LoggerDestination] = [ build_logger(name, logger_cfg) for name, logger_cfg in (cfg.get('loggers') or {}).items() ] loggers.append(in_memory_logger) fsdp_config = cfg.get('fsdp_config', None) fsdp_config = om.to_container( fsdp_config, resolve=True) if fsdp_config is not None else None load_path = cfg.get('load_path', None) trainer = Trainer( model=composer_model, loggers=loggers, fsdp_config=fsdp_config, # type: ignore load_path=load_path, load_weights_only=True, progress_bar=False, log_to_console=True, dist_timeout=cfg.dist_timeout, ) if torch.cuda.is_available(): torch.cuda.synchronize() a = time.time() trainer.eval(eval_dataloader=evaluators) if torch.cuda.is_available(): torch.cuda.synchronize() b = time.time() print(f'Ran eval in: {b-a} seconds') for key in logger_keys: if key in in_memory_logger.data: result = in_memory_logger.data[key][0][1].item() print(f'{key}: {result}') if __name__ == '__main__': yaml_path, args_list = sys.argv[1], sys.argv[2:] with open(yaml_path) as f: yaml_cfg = om.load(f) cli_cfg = om.from_cli(args_list) cfg = om.merge(yaml_cfg, cli_cfg) main(cfg)
EXA-1-master
exa/libraries/llm-foundry/scripts/eval/eval.py
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 # Copyright 2022 MosaicML Composer authors # SPDX-License-Identifier: Apache-2.0 """Run pytest using MCP.""" import argparse import time from mcli.sdk import (RunConfig, RunStatus, create_run, follow_run_logs, stop_run, wait_for_run_status) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--name', type=str, default='mcp-pytest', help='Base name of run') parser.add_argument('--cluster', type=str, default='r1z4', help='Cluster to use') parser.add_argument('--gpu_type', type=str, default='a100_40gb', help='Type of GPU to use') parser.add_argument('--gpu_num', type=int, default=2, help='Number of the GPU to use') parser.add_argument('--image', type=str, default='mosaicml/pytorch:latest', help='Docker image to use') parser.add_argument('--git_branch', type=str, help='Git branch to check out') parser.add_argument( '--git_commit', type=str, help='Git commit to check out. Overrides git_branch if specified') parser.add_argument( '--pr_number', type=int, help= 'PR number to check out. Overrides git_branch/git_commit if specified') parser.add_argument('--pytest_markers', type=str, help='Markers to pass to pytest') parser.add_argument('--pytest_command', type=str, help='Command to run pytest') parser.add_argument('--timeout', type=int, default=1800, help='Timeout for run (in seconds)') args = parser.parse_args() name = args.name git_integration = { 'integration_type': 'git_repo', 'git_repo': 'mosaicml/llm-foundry', 'ssh_clone': 'False', } if args.git_branch is not None and args.git_commit is None: name += f'-branch-{args.git_branch}' git_integration['git_branch'] = args.git_branch if args.git_commit is not None: name += f'-commit-{args.git_commit}' git_integration['git_commit'] = args.git_commit command = 'cd llm-foundry' # Checkout a specific PR if specified if args.pr_number is not None: name += f'-pr-{args.pr_number}' command += f''' git fetch origin pull/{args.pr_number}/head:pr_branch git checkout pr_branch ''' # Shorten name if too long if len(name) > 56: name = name[:56] command += f''' pip install --upgrade --user .[all] export COMMON_ARGS="-v --durations=20 -m '{args.pytest_markers}'" make test PYTEST='{args.pytest_command}' EXTRA_ARGS="$COMMON_ARGS --codeblocks" make test-dist PYTEST='{args.pytest_command}' EXTRA_ARGS="$COMMON_ARGS" WORLD_SIZE=2 python -m coverage combine python -m coverage report ''' config = RunConfig( name=name, cluster=args.cluster, gpu_type=args.gpu_type, gpu_num=args.gpu_num, image=args.image, integrations=[git_integration], command=command, ) # Create run run = create_run(config) print(f'[GHA] Run created: {run.name}') # Wait until run starts before fetching logs run = wait_for_run_status(run, status='running') start_time = time.time() print('[GHA] Run started. Following logs...') # Print logs for line in follow_run_logs(run): print(line, end='') # Check if args.timeout seconds have elapsed if time.time() - start_time > args.timeout: print( f'[GHA] Run timed out and did not complete in {args.timeout/60} minutes.' ) run = stop_run(run) print('[GHA] Run stopped.') break print('[GHA] Run completed. Waiting for run to finish...') run = wait_for_run_status(run, status='completed') # Fail if command exited with non-zero exit code or timed out assert run.status == RunStatus.COMPLETED
EXA-1-master
exa/libraries/llm-foundry/.github/mcp/mcp_pytest.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ from setuptools import setup, find_namespace_packages import platform DEPENDENCY_LINKS = [] if platform.system() == "Windows": DEPENDENCY_LINKS.append("https://download.pytorch.org/whl/torch_stable.html") def fetch_requirements(filename): with open(filename) as f: return [ln.strip() for ln in f.read().split("\n")] setup( name="salesforce-lavis", version="1.0.1", author="Dongxu Li, Junnan Li, Hung Le, Guangsen Wang, Silvio Savarese, Steven C.H. Hoi", description="LAVIS - A One-stop Library for Language-Vision Intelligence", long_description=open("README.md", "r", encoding="utf-8").read(), long_description_content_type="text/markdown", keywords="Vision-Language, Multimodal, Image Captioning, Generative AI, Deep Learning, Library, PyTorch", license="3-Clause BSD", packages=find_namespace_packages(include="lavis.*"), install_requires=fetch_requirements("requirements.txt"), python_requires=">=3.7.0", include_package_data=True, dependency_links=DEPENDENCY_LINKS, zip_safe=False, )
EXA-1-master
exa/libraries/LAVIS/setup.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import argparse import os import random import numpy as np import torch import torch.backends.cudnn as cudnn import lavis.tasks as tasks from lavis.common.config import Config from lavis.common.dist_utils import get_rank, init_distributed_mode from lavis.common.logger import setup_logger from lavis.common.optims import ( LinearWarmupCosineLRScheduler, LinearWarmupStepLRScheduler, ) from lavis.common.registry import registry from lavis.common.utils import now # imports modules for registration from lavis.datasets.builders import * from lavis.models import * from lavis.processors import * from lavis.runners import * from lavis.tasks import * def parse_args(): parser = argparse.ArgumentParser(description="Training") parser.add_argument("--cfg-path", required=True, help="path to configuration file.") parser.add_argument( "--options", nargs="+", help="override some settings in the used config, the key-value pair " "in xxx=yyy format will be merged into config file (deprecate), " "change to --cfg-options instead.", ) args = parser.parse_args() # if 'LOCAL_RANK' not in os.environ: # os.environ['LOCAL_RANK'] = str(args.local_rank) return args def setup_seeds(config): seed = config.run_cfg.seed + get_rank() random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) cudnn.benchmark = False cudnn.deterministic = True def get_runner_class(cfg): """ Get runner class from config. Default to epoch-based runner. """ runner_cls = registry.get_runner_class(cfg.run_cfg.get("runner", "runner_base")) return runner_cls def main(): # allow auto-dl completes on main process without timeout when using NCCL backend. # os.environ["NCCL_BLOCKING_WAIT"] = "1" # set before init_distributed_mode() to ensure the same job_id shared across all ranks. job_id = now() cfg = Config(parse_args()) init_distributed_mode(cfg.run_cfg) setup_seeds(cfg) # set after init_distributed_mode() to only log on master. setup_logger() cfg.pretty_print() task = tasks.setup_task(cfg) datasets = task.build_datasets(cfg) model = task.build_model(cfg) runner = get_runner_class(cfg)( cfg=cfg, job_id=job_id, task=task, model=model, datasets=datasets ) runner.train() if __name__ == "__main__": main()
EXA-1-master
exa/libraries/LAVIS/train.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import argparse import random import numpy as np import torch import torch.backends.cudnn as cudnn import lavis.tasks as tasks from lavis.common.config import Config from lavis.common.dist_utils import get_rank, init_distributed_mode from lavis.common.logger import setup_logger from lavis.common.optims import ( LinearWarmupCosineLRScheduler, LinearWarmupStepLRScheduler, ) from lavis.common.utils import now # imports modules for registration from lavis.datasets.builders import * from lavis.models import * from lavis.processors import * from lavis.runners.runner_base import RunnerBase from lavis.tasks import * def parse_args(): parser = argparse.ArgumentParser(description="Training") parser.add_argument("--cfg-path", required=True, help="path to configuration file.") parser.add_argument( "--options", nargs="+", help="override some settings in the used config, the key-value pair " "in xxx=yyy format will be merged into config file (deprecate), " "change to --cfg-options instead.", ) args = parser.parse_args() # if 'LOCAL_RANK' not in os.environ: # os.environ['LOCAL_RANK'] = str(args.local_rank) return args def setup_seeds(config): seed = config.run_cfg.seed + get_rank() random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) cudnn.benchmark = False cudnn.deterministic = True def main(): # allow auto-dl completes on main process without timeout when using NCCL backend. # os.environ["NCCL_BLOCKING_WAIT"] = "1" # set before init_distributed_mode() to ensure the same job_id shared across all ranks. job_id = now() cfg = Config(parse_args()) init_distributed_mode(cfg.run_cfg) setup_seeds(cfg) # set after init_distributed_mode() to only log on master. setup_logger() cfg.pretty_print() task = tasks.setup_task(cfg) datasets = task.build_datasets(cfg) model = task.build_model(cfg) runner = RunnerBase( cfg=cfg, job_id=job_id, task=task, model=model, datasets=datasets ) runner.evaluate(skip_reload=True) if __name__ == "__main__": main()
EXA-1-master
exa/libraries/LAVIS/evaluate.py
""" # Copyright (c) 2022, salesforce.com, inc. # All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ from PIL import Image import requests import torch import os from lavis.common.registry import registry from lavis.processors import * from lavis.models import * from lavis.common.utils import build_default_model device = torch.device("cuda" if torch.cuda.is_available() else "cpu") def load_demo_image(): img_url = ( "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg" ) raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") return raw_image def read_img(filepath): raw_image = Image.open(filepath).convert("RGB") return raw_image # model model_url = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base.pth" feature_extractor = BlipFeatureExtractor(pretrained=model_url) feature_extractor.eval() feature_extractor = feature_extractor.to(device) # preprocessors vis_processor = BlipImageEvalProcessor(image_size=224) text_processor = BlipCaptionProcessor() # files to process # file_root = "/export/home/.cache/lavis/coco/images/val2014" file_root = "/export/home/.cache/lavis/coco/images/train2014" filepaths = os.listdir(file_root) print(len(filepaths)) caption = "dummy" path2feat = dict() bsz = 256 images_in_batch = [] filepaths_in_batch = [] for i, filename in enumerate(filepaths): if i % bsz == 0 and i > 0: images_in_batch = torch.cat(images_in_batch, dim=0).to(device) with torch.no_grad(): image_features = feature_extractor( images_in_batch, caption, mode="image", normalized=True )[:, 0] for filepath, image_feat in zip(filepaths_in_batch, image_features): path2feat[os.path.basename(filepath)] = image_feat.detach().cpu() images_in_batch = [] filepaths_in_batch = [] print(len(path2feat), image_features.shape) else: filepath = os.path.join(file_root, filename) image = read_img(filepath) image = vis_processor(image).unsqueeze(0) images_in_batch.append(image) filepaths_in_batch.append(filepath) torch.save(path2feat, "path2feat_coco_train2014.pth")
EXA-1-master
exa/libraries/LAVIS/app/calculate_coco_features.py
""" # Copyright (c) 2022, salesforce.com, inc. # All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import plotly.graph_objects as go import requests import streamlit as st import torch from lavis.models import load_model from lavis.processors import load_processor from lavis.processors.blip_processors import BlipCaptionProcessor from PIL import Image from app import device, load_demo_image from app.utils import load_blip_itm_model from lavis.processors.clip_processors import ClipImageEvalProcessor @st.cache() def load_demo_image(img_url=None): if not img_url: img_url = "https://img.atlasobscura.com/yDJ86L8Ou6aIjBsxnlAy5f164w1rjTgcHZcx2yUs4mo/rt:fit/w:1200/q:81/sm:1/scp:1/ar:1/aHR0cHM6Ly9hdGxh/cy1kZXYuczMuYW1h/em9uYXdzLmNvbS91/cGxvYWRzL3BsYWNl/X2ltYWdlcy85MDll/MDRjOS00NTJjLTQx/NzQtYTY4MS02NmQw/MzI2YWIzNjk1ZGVk/MGZhMTJiMTM5MmZi/NGFfUmVhcl92aWV3/X29mX3RoZV9NZXJs/aW9uX3N0YXR1ZV9h/dF9NZXJsaW9uX1Bh/cmssX1NpbmdhcG9y/ZSxfd2l0aF9NYXJp/bmFfQmF5X1NhbmRz/X2luX3RoZV9kaXN0/YW5jZV8tXzIwMTQw/MzA3LmpwZw.jpg" raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") return raw_image @st.cache( hash_funcs={ torch.nn.parameter.Parameter: lambda parameter: parameter.data.detach() .cpu() .numpy() }, allow_output_mutation=True, ) def load_model_cache(model_type, device): if model_type == "blip": model = load_model( "blip_feature_extractor", model_type="base", is_eval=True, device=device ) elif model_type == "albef": model = load_model( "albef_feature_extractor", model_type="base", is_eval=True, device=device ) elif model_type == "CLIP_ViT-B-32": model = load_model( "clip_feature_extractor", "ViT-B-32", is_eval=True, device=device ) elif model_type == "CLIP_ViT-B-16": model = load_model( "clip_feature_extractor", "ViT-B-16", is_eval=True, device=device ) elif model_type == "CLIP_ViT-L-14": model = load_model( "clip_feature_extractor", "ViT-L-14", is_eval=True, device=device ) return model def app(): model_type = st.sidebar.selectbox( "Model:", ["ALBEF", "BLIP_Base", "CLIP_ViT-B-32", "CLIP_ViT-B-16", "CLIP_ViT-L-14"], ) score_type = st.sidebar.selectbox("Score type:", ["Cosine", "Multimodal"]) # ===== layout ===== st.markdown( "<h1 style='text-align: center;'>Zero-shot Classification</h1>", unsafe_allow_html=True, ) instructions = """Try the provided image or upload your own:""" file = st.file_uploader(instructions) st.header("Image") if file: raw_img = Image.open(file).convert("RGB") else: raw_img = load_demo_image() st.image(raw_img) # , use_column_width=True) col1, col2 = st.columns(2) col1.header("Categories") cls_0 = col1.text_input("category 1", value="merlion") cls_1 = col1.text_input("category 2", value="sky") cls_2 = col1.text_input("category 3", value="giraffe") cls_3 = col1.text_input("category 4", value="fountain") cls_4 = col1.text_input("category 5", value="marina bay") cls_names = [cls_0, cls_1, cls_2, cls_3, cls_4] cls_names = [cls_nm for cls_nm in cls_names if len(cls_nm) > 0] if len(cls_names) != len(set(cls_names)): st.error("Please provide unique class names") return button = st.button("Submit") col2.header("Prediction") # ===== event ===== if button: if model_type.startswith("BLIP"): text_processor = BlipCaptionProcessor(prompt="A picture of ") cls_prompt = [text_processor(cls_nm) for cls_nm in cls_names] if score_type == "Cosine": vis_processor = load_processor("blip_image_eval").build(image_size=224) img = vis_processor(raw_img).unsqueeze(0).to(device) feature_extractor = load_model_cache(model_type="blip", device=device) sample = {"image": img, "text_input": cls_prompt} with torch.no_grad(): image_features = feature_extractor.extract_features( sample, mode="image" ).image_embeds_proj[:, 0] text_features = feature_extractor.extract_features( sample, mode="text" ).text_embeds_proj[:, 0] sims = (image_features @ text_features.t())[ 0 ] / feature_extractor.temp else: vis_processor = load_processor("blip_image_eval").build(image_size=384) img = vis_processor(raw_img).unsqueeze(0).to(device) model = load_blip_itm_model(device) output = model(img, cls_prompt, match_head="itm") sims = output[:, 1] sims = torch.nn.Softmax(dim=0)(sims) inv_sims = [sim * 100 for sim in sims.tolist()[::-1]] elif model_type.startswith("ALBEF"): vis_processor = load_processor("blip_image_eval").build(image_size=224) img = vis_processor(raw_img).unsqueeze(0).to(device) text_processor = BlipCaptionProcessor(prompt="A picture of ") cls_prompt = [text_processor(cls_nm) for cls_nm in cls_names] feature_extractor = load_model_cache(model_type="albef", device=device) sample = {"image": img, "text_input": cls_prompt} with torch.no_grad(): image_features = feature_extractor.extract_features( sample, mode="image" ).image_embeds_proj[:, 0] text_features = feature_extractor.extract_features( sample, mode="text" ).text_embeds_proj[:, 0] st.write(image_features.shape) st.write(text_features.shape) sims = (image_features @ text_features.t())[0] / feature_extractor.temp sims = torch.nn.Softmax(dim=0)(sims) inv_sims = [sim * 100 for sim in sims.tolist()[::-1]] elif model_type.startswith("CLIP"): if model_type == "CLIP_ViT-B-32": model = load_model_cache(model_type="CLIP_ViT-B-32", device=device) elif model_type == "CLIP_ViT-B-16": model = load_model_cache(model_type="CLIP_ViT-B-16", device=device) elif model_type == "CLIP_ViT-L-14": model = load_model_cache(model_type="CLIP_ViT-L-14", device=device) else: raise ValueError(f"Unknown model type {model_type}") if score_type == "Cosine": # image_preprocess = ClipImageEvalProcessor(image_size=336) image_preprocess = ClipImageEvalProcessor(image_size=224) img = image_preprocess(raw_img).unsqueeze(0).to(device) sample = {"image": img, "text_input": cls_names} with torch.no_grad(): clip_features = model.extract_features(sample) image_features = clip_features.image_embeds_proj text_features = clip_features.text_embeds_proj sims = (100.0 * image_features @ text_features.T)[0].softmax(dim=-1) inv_sims = sims.tolist()[::-1] else: st.warning("CLIP does not support multimodal scoring.") return fig = go.Figure( go.Bar( x=inv_sims, y=cls_names[::-1], text=["{:.2f}".format(s) for s in inv_sims], orientation="h", ) ) fig.update_traces( textfont_size=12, textangle=0, textposition="outside", cliponaxis=False, ) col2.plotly_chart(fig, use_container_width=True)
EXA-1-master
exa/libraries/LAVIS/app/classification.py
""" # Copyright (c) 2022, salesforce.com, inc. # All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import math import numpy as np import streamlit as st from lavis.models.blip_models.blip_image_text_matching import compute_gradcam from lavis.processors import load_processor from PIL import Image from app import device, load_demo_image from app.utils import getAttMap, init_bert_tokenizer, load_blip_itm_model def app(): model_type = st.sidebar.selectbox("Model:", ["BLIP_base", "BLIP_large"]) values = list(range(1, 12)) default_layer_num = values.index(7) layer_num = ( st.sidebar.selectbox("Layer number", values, index=default_layer_num) - 1 ) st.markdown( "<h1 style='text-align: center;'>Text Localization</h1>", unsafe_allow_html=True ) vis_processor = load_processor("blip_image_eval").build(image_size=384) text_processor = load_processor("blip_caption") tokenizer = init_bert_tokenizer() instructions = "Try the provided image and text or use your own ones." file = st.file_uploader(instructions) query = st.text_input( "Try a different input.", "A girl playing with her dog on the beach." ) submit_button = st.button("Submit") col1, col2 = st.columns(2) if file: raw_img = Image.open(file).convert("RGB") else: raw_img = load_demo_image() col1.header("Image") w, h = raw_img.size scaling_factor = 720 / w resized_image = raw_img.resize((int(w * scaling_factor), int(h * scaling_factor))) col1.image(resized_image, use_column_width=True) col2.header("GradCam") if submit_button: if model_type.startswith("BLIP"): blip_type = model_type.split("_")[1] model = load_blip_itm_model(device, model_type=blip_type) img = vis_processor(raw_img).unsqueeze(0).to(device) qry = text_processor(query) qry_tok = tokenizer(qry, return_tensors="pt").to(device) norm_img = np.float32(resized_image) / 255 gradcam, _ = compute_gradcam(model, img, qry, qry_tok, block_num=layer_num) avg_gradcam = getAttMap(norm_img, gradcam[0][1], blur=True) col2.image(avg_gradcam, use_column_width=True, clamp=True) num_cols = 4.0 num_tokens = len(qry_tok.input_ids[0]) - 2 num_rows = int(math.ceil(num_tokens / num_cols)) gradcam_iter = iter(gradcam[0][2:-1]) token_id_iter = iter(qry_tok.input_ids[0][1:-1]) for _ in range(num_rows): with st.container(): for col in st.columns(int(num_cols)): token_id = next(token_id_iter, None) if not token_id: break gradcam_img = next(gradcam_iter) word = tokenizer.decode([token_id]) gradcam_todraw = getAttMap(norm_img, gradcam_img, blur=True) new_title = ( '<p style="text-align: center; font-size: 25px;">{}</p>'.format( word ) ) col.markdown(new_title, unsafe_allow_html=True) # st.image(image, channels="BGR") col.image(gradcam_todraw, use_column_width=True, clamp=True)
EXA-1-master
exa/libraries/LAVIS/app/text_localization.py
""" # Copyright (c) 2022, salesforce.com, inc. # All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ from PIL import Image import requests import streamlit as st import torch @st.cache() def load_demo_image(): img_url = ( "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg" ) raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") return raw_image device = torch.device("cuda" if torch.cuda.is_available() else "cpu") cache_root = "/export/home/.cache/lavis/"
EXA-1-master
exa/libraries/LAVIS/app/__init__.py
""" # Copyright (c) 2022, salesforce.com, inc. # All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import numpy as np import streamlit as st import torch from lavis.models.blip_models.blip_image_text_matching import compute_gradcam from lavis.processors import load_processor from PIL import Image from app import device, load_demo_image from app.utils import getAttMap, init_bert_tokenizer, load_blip_itm_model def app(): model_type = st.sidebar.selectbox("Model:", ["BLIP_base", "BLIP_large"]) if model_type.startswith("BLIP"): blip_type = model_type.split("_")[1] model = load_blip_itm_model(device, model_type=blip_type) vis_processor = load_processor("blip_image_eval").build(image_size=384) st.markdown( "<h1 style='text-align: center;'>Image Text Matching</h1>", unsafe_allow_html=True, ) values = list(range(1, 12)) default_layer_num = values.index(7) layer_num = ( st.sidebar.selectbox("Layer number", values, index=default_layer_num) - 1 ) instructions = """Try the provided image or upload your own:""" file = st.file_uploader(instructions) col1, col2 = st.columns(2) col1.header("Image") col2.header("GradCam") if file: raw_img = Image.open(file).convert("RGB") else: raw_img = load_demo_image() w, h = raw_img.size scaling_factor = 720 / w resized_image = raw_img.resize((int(w * scaling_factor), int(h * scaling_factor))) col1.image(resized_image, use_column_width=True) col3, col4 = st.columns(2) col3.header("Text") user_question = col3.text_input( "Input your sentence!", "a woman sitting on the beach with a dog" ) submit_button = col3.button("Submit") col4.header("Matching score") if submit_button: tokenizer = init_bert_tokenizer() img = vis_processor(raw_img).unsqueeze(0).to(device) text_processor = load_processor("blip_caption").build() qry = text_processor(user_question) norm_img = np.float32(resized_image) / 255 qry_tok = tokenizer(qry, return_tensors="pt").to(device) gradcam, output = compute_gradcam(model, img, qry, qry_tok, block_num=layer_num) avg_gradcam = getAttMap(norm_img, gradcam[0][1], blur=True) col2.image(avg_gradcam, use_column_width=True, clamp=True) # output = model(img, question) itm_score = torch.nn.functional.softmax(output, dim=1) new_title = ( '<p style="text-align: left; font-size: 25px;">\n{:.3f}%</p>'.format( itm_score[0][1].item() * 100 ) ) col4.markdown(new_title, unsafe_allow_html=True)
EXA-1-master
exa/libraries/LAVIS/app/image_text_match.py
""" # Copyright (c) 2022, salesforce.com, inc. # All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import random from collections import OrderedDict from functools import reduce from tkinter import N import streamlit as st from lavis.common.registry import registry from lavis.datasets.builders import dataset_zoo, load_dataset from lavis.datasets.builders.base_dataset_builder import load_dataset_config from PIL import Image IMAGE_LAYOUT = 3, 4 VIDEO_LAYOUT = 1, 2 PREV_STR = "Prev" NEXT_STR = "Next" def sample_dataset(dataset, indices): samples = [dataset.displ_item(idx) for idx in indices] return samples def get_concat_v(im1, im2): margin = 5 canvas_size = (im1.width + im2.width + margin, max(im1.height, im2.height)) canvas = Image.new("RGB", canvas_size, "White") canvas.paste(im1, (0, 0)) canvas.paste(im2, (im1.width + margin, 0)) return canvas def resize_img_w(raw_img, new_w=224): if isinstance(raw_img, list): resized_imgs = [resize_img_w(img, 196) for img in raw_img] # concatenate images resized_image = reduce(get_concat_v, resized_imgs) else: w, h = raw_img.size scaling_factor = new_w / w resized_image = raw_img.resize( (int(w * scaling_factor), int(h * scaling_factor)) ) return resized_image def get_visual_key(dataset): if "image" in dataset[0]: return "image" elif "image0" in dataset[0]: # NLVR2 dataset return "image" elif "video" in dataset[0]: return "video" else: raise ValueError("Visual key not found.") def gather_items(samples, exclude=[]): gathered = [] for s in samples: ns = OrderedDict() for k in s.keys(): if k not in exclude: ns[k] = s[k] gathered.append(ns) return gathered @st.cache(allow_output_mutation=True) def load_dataset_cache(name): return load_dataset(name) def format_text(text): md = "\n\n".join([f"**{k}**: {v}" for k, v in text.items()]) return md def show_samples(dataset, offset=0, is_next=False): visual_key = get_visual_key(dataset) num_rows, num_cols = IMAGE_LAYOUT if visual_key == "image" else VIDEO_LAYOUT n_samples = num_rows * num_cols if not shuffle: if is_next: start = min(int(start_idx) + offset + n_samples, len(dataset) - n_samples) else: start = max(0, int(start_idx) + offset - n_samples) st.session_state.last_start = start end = min(start + n_samples, len(dataset)) indices = list(range(start, end)) else: indices = random.sample(range(len(dataset)), n_samples) samples = sample_dataset(dataset, indices) visual_info = ( iter([resize_img_w(s[visual_key]) for s in samples]) if visual_key == "image" # else iter([s[visual_key] for s in samples]) else iter([s["file"] for s in samples]) ) text_info = gather_items(samples, exclude=["image", "video"]) text_info = iter([format_text(s) for s in text_info]) st.markdown( """<hr style="height:1px;border:none;color:#c7ccd4;background-color:#c7ccd4;"/> """, unsafe_allow_html=True, ) for _ in range(num_rows): with st.container(): for col in st.columns(num_cols): # col.text(next(text_info)) # col.caption(next(text_info)) try: col.markdown(next(text_info)) if visual_key == "image": col.image(next(visual_info), use_column_width=True, clamp=True) elif visual_key == "video": col.markdown( "![Alt Text](https://media.giphy.com/media/vFKqnCdLPNOKc/giphy.gif)" ) except StopIteration: break st.markdown( """<hr style="height:1px;border:none;color:#c7ccd4;background-color:#c7ccd4;"/> """, unsafe_allow_html=True, ) st.session_state.n_display = n_samples if __name__ == "__main__": st.set_page_config( page_title="LAVIS Dataset Explorer", # layout="wide", initial_sidebar_state="expanded", ) dataset_name = st.sidebar.selectbox("Dataset:", dataset_zoo.get_names()) function = st.sidebar.selectbox("Function:", ["Browser"], index=0) if function == "Browser": shuffle = st.sidebar.selectbox("Shuffled:", [True, False], index=0) dataset = load_dataset_cache(dataset_name) split = st.sidebar.selectbox("Split:", dataset.keys()) dataset_len = len(dataset[split]) st.success( f"Loaded {dataset_name}/{split} with **{dataset_len}** records. **Image/video directory**: {dataset[split].vis_root}" ) if "last_dataset" not in st.session_state: st.session_state.last_dataset = dataset_name st.session_state.last_split = split if "last_start" not in st.session_state: st.session_state.last_start = 0 if "start_idx" not in st.session_state: st.session_state.start_idx = 0 if "shuffle" not in st.session_state: st.session_state.shuffle = shuffle if "first_run" not in st.session_state: st.session_state.first_run = True elif ( st.session_state.last_dataset != dataset_name or st.session_state.last_split != split ): st.session_state.first_run = True st.session_state.last_dataset = dataset_name st.session_state.last_split = split elif st.session_state.shuffle != shuffle: st.session_state.shuffle = shuffle st.session_state.first_run = True if not shuffle: n_col, p_col = st.columns([0.05, 1]) prev_button = n_col.button(PREV_STR) next_button = p_col.button(NEXT_STR) else: next_button = st.button(NEXT_STR) if not shuffle: start_idx = st.sidebar.text_input(f"Begin from (total {dataset_len})", 0) if not start_idx.isdigit(): st.error(f"Input to 'Begin from' must be digits, found {start_idx}.") else: if int(start_idx) != st.session_state.start_idx: st.session_state.start_idx = int(start_idx) st.session_state.last_start = int(start_idx) if prev_button: show_samples( dataset[split], offset=st.session_state.last_start - st.session_state.start_idx, is_next=False, ) if next_button: show_samples( dataset[split], offset=st.session_state.last_start - st.session_state.start_idx, is_next=True, ) if st.session_state.first_run: st.session_state.first_run = False show_samples( dataset[split], offset=st.session_state.last_start - st.session_state.start_idx, is_next=True, )
EXA-1-master
exa/libraries/LAVIS/app/dataset_browser.py
""" # Copyright (c) 2022, salesforce.com, inc. # All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import streamlit as st from app import device, load_demo_image from app.utils import load_model_cache from lavis.processors import load_processor from PIL import Image def app(): # ===== layout ===== model_type = st.sidebar.selectbox("Model:", ["BLIP_base", "BLIP_large"]) sampling_method = st.sidebar.selectbox( "Sampling method:", ["Beam search", "Nucleus sampling"] ) st.markdown( "<h1 style='text-align: center;'>Image Description Generation</h1>", unsafe_allow_html=True, ) instructions = """Try the provided image or upload your own:""" file = st.file_uploader(instructions) use_beam = sampling_method == "Beam search" col1, col2 = st.columns(2) if file: raw_img = Image.open(file).convert("RGB") else: raw_img = load_demo_image() col1.header("Image") w, h = raw_img.size scaling_factor = 720 / w resized_image = raw_img.resize((int(w * scaling_factor), int(h * scaling_factor))) col1.image(resized_image, use_column_width=True) col2.header("Description") cap_button = st.button("Generate") # ==== event ==== vis_processor = load_processor("blip_image_eval").build(image_size=384) if cap_button: if model_type.startswith("BLIP"): blip_type = model_type.split("_")[1].lower() model = load_model_cache( "blip_caption", model_type=f"{blip_type}_coco", is_eval=True, device=device, ) img = vis_processor(raw_img).unsqueeze(0).to(device) captions = generate_caption( model=model, image=img, use_nucleus_sampling=not use_beam ) col2.write("\n\n".join(captions), use_column_width=True) def generate_caption( model, image, use_nucleus_sampling=False, num_beams=3, max_length=40, min_length=5 ): samples = {"image": image} captions = [] if use_nucleus_sampling: for _ in range(5): caption = model.generate( samples, use_nucleus_sampling=True, max_length=max_length, min_length=min_length, top_p=0.9, ) captions.append(caption[0]) else: caption = model.generate( samples, use_nucleus_sampling=False, num_beams=num_beams, max_length=max_length, min_length=min_length, ) captions.append(caption[0]) return captions
EXA-1-master
exa/libraries/LAVIS/app/caption.py
""" # Copyright (c) 2022, salesforce.com, inc. # All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import numpy as np import streamlit as st import torch from lavis.models import BlipBase, load_model from matplotlib import pyplot as plt from PIL import Image from scipy.ndimage import filters from skimage import transform as skimage_transform def resize_img(raw_img): w, h = raw_img.size scaling_factor = 240 / w resized_image = raw_img.resize((int(w * scaling_factor), int(h * scaling_factor))) return resized_image def read_img(filepath): raw_image = Image.open(filepath).convert("RGB") return raw_image @st.cache( hash_funcs={ torch.nn.parameter.Parameter: lambda parameter: parameter.data.detach() .cpu() .numpy() }, allow_output_mutation=True, ) def load_model_cache(name, model_type, is_eval, device): return load_model(name, model_type, is_eval, device) @st.cache(allow_output_mutation=True) def init_bert_tokenizer(): tokenizer = BlipBase.init_tokenizer() return tokenizer def getAttMap(img, attMap, blur=True, overlap=True): attMap -= attMap.min() if attMap.max() > 0: attMap /= attMap.max() attMap = skimage_transform.resize(attMap, (img.shape[:2]), order=3, mode="constant") if blur: attMap = filters.gaussian_filter(attMap, 0.02 * max(img.shape[:2])) attMap -= attMap.min() attMap /= attMap.max() cmap = plt.get_cmap("jet") attMapV = cmap(attMap) attMapV = np.delete(attMapV, 3, 2) if overlap: attMap = ( 1 * (1 - attMap**0.7).reshape(attMap.shape + (1,)) * img + (attMap**0.7).reshape(attMap.shape + (1,)) * attMapV ) return attMap @st.cache( hash_funcs={ torch.nn.parameter.Parameter: lambda parameter: parameter.data.detach() .cpu() .numpy() }, allow_output_mutation=True, ) def load_blip_itm_model(device, model_type="base"): model = load_model( "blip_image_text_matching", model_type, is_eval=True, device=device ) return model
EXA-1-master
exa/libraries/LAVIS/app/utils.py
""" # Copyright (c) 2022, salesforce.com, inc. # All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import os import numpy as np import streamlit as st import torch import torch.nn.functional as F from app import cache_root, device from app.utils import ( getAttMap, init_bert_tokenizer, load_blip_itm_model, read_img, resize_img, ) from lavis.models import load_model from lavis.processors import load_processor @st.cache( hash_funcs={ torch.nn.parameter.Parameter: lambda parameter: parameter.data.detach() .cpu() .numpy() }, allow_output_mutation=True, ) def load_feat(): from lavis.common.utils import download_url dirname = os.path.join(os.path.dirname(__file__), "assets") filename = "path2feat_coco_train2014.pth" filepath = os.path.join(dirname, filename) url = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/path2feat_coco_train2014.pth" if not os.path.exists(filepath): download_url(url=url, root=dirname, filename="path2feat_coco_train2014.pth") path2feat = torch.load(filepath) paths = sorted(path2feat.keys()) all_img_feats = torch.stack([path2feat[k] for k in paths], dim=0).to(device) return path2feat, paths, all_img_feats @st.cache( hash_funcs={ torch.nn.parameter.Parameter: lambda parameter: parameter.data.detach() .cpu() .numpy() }, allow_output_mutation=True, ) def load_feature_extractor_model(device): model_url = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base.pth" model = load_model( "blip_feature_extractor", model_type="base", is_eval=True, device=device ) model.load_from_pretrained(model_url) return model def app(): # === layout === model_type = st.sidebar.selectbox("Model:", ["BLIP_base", "BLIP_large"]) file_root = os.path.join(cache_root, "coco/images/train2014/") values = [12, 24, 48] default_layer_num = values.index(24) num_display = st.sidebar.selectbox( "Number of images:", values, index=default_layer_num ) show_gradcam = st.sidebar.selectbox("Show GradCam:", [True, False], index=1) itm_ranking = st.sidebar.selectbox("Multimodal re-ranking:", [True, False], index=0) # st.title('Multimodal Search') st.markdown( "<h1 style='text-align: center;'>Multimodal Search</h1>", unsafe_allow_html=True ) # === event === vis_processor = load_processor("blip_image_eval").build(image_size=384) text_processor = load_processor("blip_caption") user_question = st.text_input( "Search query", "A dog running on the grass.", help="Type something to search." ) user_question = text_processor(user_question) feature_extractor = load_feature_extractor_model(device) # ======= ITC ========= sample = {"text_input": user_question} with torch.no_grad(): text_feature = feature_extractor.extract_features( sample, mode="text" ).text_embeds_proj[0, 0] path2feat, paths, all_img_feats = load_feat() all_img_feats.to(device) all_img_feats = F.normalize(all_img_feats, dim=1) num_cols = 4 num_rows = int(num_display / num_cols) similarities = text_feature @ all_img_feats.T indices = torch.argsort(similarities, descending=True)[:num_display] top_paths = [paths[ind.detach().cpu().item()] for ind in indices] sorted_similarities = [similarities[idx] for idx in indices] filenames = [os.path.join(file_root, p) for p in top_paths] # ========= ITM and GradCam ========== bsz = 4 # max number of images to avoid cuda oom if model_type.startswith("BLIP"): blip_type = model_type.split("_")[1] itm_model = load_blip_itm_model(device, model_type=blip_type) tokenizer = init_bert_tokenizer() queries_batch = [user_question] * bsz queries_tok_batch = tokenizer(queries_batch, return_tensors="pt").to(device) num_batches = int(num_display / bsz) avg_gradcams = [] all_raw_images = [] itm_scores = [] for i in range(num_batches): filenames_in_batch = filenames[i * bsz : (i + 1) * bsz] raw_images, images = read_and_process_images(filenames_in_batch, vis_processor) gradcam, itm_output = compute_gradcam_batch( itm_model, images, queries_batch, queries_tok_batch ) all_raw_images.extend([resize_img(r_img) for r_img in raw_images]) norm_imgs = [np.float32(r_img) / 255 for r_img in raw_images] for norm_img, grad_cam in zip(norm_imgs, gradcam): avg_gradcam = getAttMap(norm_img, grad_cam[0], blur=True) avg_gradcams.append(avg_gradcam) with torch.no_grad(): itm_score = torch.nn.functional.softmax(itm_output, dim=1) itm_scores.append(itm_score) # ========= ITM re-ranking ========= itm_scores = torch.cat(itm_scores)[:, 1] if itm_ranking: itm_scores_sorted, indices = torch.sort(itm_scores, descending=True) avg_gradcams_sorted = [] all_raw_images_sorted = [] for idx in indices: avg_gradcams_sorted.append(avg_gradcams[idx]) all_raw_images_sorted.append(all_raw_images[idx]) avg_gradcams = avg_gradcams_sorted all_raw_images = all_raw_images_sorted if show_gradcam: images_to_show = iter(avg_gradcams) else: images_to_show = iter(all_raw_images) for _ in range(num_rows): with st.container(): for col in st.columns(num_cols): col.image(next(images_to_show), use_column_width=True, clamp=True) def read_and_process_images(image_paths, vis_processor): raw_images = [read_img(path) for path in image_paths] images = [vis_processor(r_img) for r_img in raw_images] images_tensors = torch.stack(images).to(device) return raw_images, images_tensors def compute_gradcam_batch(model, visual_input, text_input, tokenized_text, block_num=6): model.text_encoder.base_model.base_model.encoder.layer[ block_num ].crossattention.self.save_attention = True output = model({"image": visual_input, "text_input": text_input}, match_head="itm") loss = output[:, 1].sum() model.zero_grad() loss.backward() with torch.no_grad(): mask = tokenized_text.attention_mask.view( tokenized_text.attention_mask.size(0), 1, -1, 1, 1 ) # (bsz,1,token_len, 1,1) token_length = mask.sum() - 2 token_length = token_length.cpu() # grads and cams [bsz, num_head, seq_len, image_patch] grads = model.text_encoder.base_model.base_model.encoder.layer[ block_num ].crossattention.self.get_attn_gradients() cams = model.text_encoder.base_model.base_model.encoder.layer[ block_num ].crossattention.self.get_attention_map() # assume using vit large with 576 num image patch cams = cams[:, :, :, 1:].reshape(visual_input.size(0), 12, -1, 24, 24) * mask grads = ( grads[:, :, :, 1:].clamp(0).reshape(visual_input.size(0), 12, -1, 24, 24) * mask ) gradcam = cams * grads # [enc token gradcam, average gradcam across token, gradcam for individual token] # gradcam = torch.cat((gradcam[0:1,:], gradcam[1:token_length+1, :].sum(dim=0, keepdim=True)/token_length, gradcam[1:, :])) gradcam = gradcam.mean(1).cpu().detach() gradcam = ( gradcam[:, 1 : token_length + 1, :].sum(dim=1, keepdim=True) / token_length ) return gradcam, output
EXA-1-master
exa/libraries/LAVIS/app/multimodal_search.py
""" # Copyright (c) 2022, salesforce.com, inc. # All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ """ This file is the framework for generating multiple Streamlit applications through an object oriented framework. """ # Import necessary libraries import streamlit as st # Define the multipage class to manage the multiple apps in our program class MultiPage: """Framework for combining multiple streamlit applications.""" def __init__(self) -> None: """Constructor class to generate a list which will store all our applications as an instance variable.""" self.pages = [] def add_page(self, title, func) -> None: """Class Method to Add pages to the project Args: title ([str]): The title of page which we are adding to the list of apps func: Python function to render this page in Streamlit """ self.pages.append({"title": title, "function": func}) def run(self): # Drodown to select the page to run page = st.sidebar.selectbox( "Navigation", self.pages, format_func=lambda page: page["title"] ) # run the app function page["function"]()
EXA-1-master
exa/libraries/LAVIS/app/multipage.py
""" # Copyright (c) 2022, salesforce.com, inc. # All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import streamlit as st from app import load_demo_image, device from app.utils import load_model_cache from lavis.processors import load_processor from PIL import Image def app(): model_type = st.sidebar.selectbox("Model:", ["BLIP"]) # ===== layout ===== st.markdown( "<h1 style='text-align: center;'>Visual Question Answering</h1>", unsafe_allow_html=True, ) instructions = """Try the provided image or upload your own:""" file = st.file_uploader(instructions) col1, col2 = st.columns(2) col1.header("Image") if file: raw_img = Image.open(file).convert("RGB") else: raw_img = load_demo_image() w, h = raw_img.size scaling_factor = 720 / w resized_image = raw_img.resize((int(w * scaling_factor), int(h * scaling_factor))) col1.image(resized_image, use_column_width=True) col2.header("Question") user_question = col2.text_input("Input your question!", "What are objects there?") qa_button = st.button("Submit") col2.header("Answer") # ===== event ===== vis_processor = load_processor("blip_image_eval").build(image_size=480) text_processor = load_processor("blip_question").build() if qa_button: if model_type.startswith("BLIP"): model = load_model_cache( "blip_vqa", model_type="vqav2", is_eval=True, device=device ) img = vis_processor(raw_img).unsqueeze(0).to(device) question = text_processor(user_question) vqa_samples = {"image": img, "text_input": [question]} answers = model.predict_answers(vqa_samples, inference_method="generate") col2.write("\n".join(answers), use_column_width=True)
EXA-1-master
exa/libraries/LAVIS/app/vqa.py
""" # Copyright (c) 2022, salesforce.com, inc. # All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ from app.multipage import MultiPage from app import vqa, caption from app import image_text_match as itm from app import text_localization as tl from app import multimodal_search as ms from app import classification as cl if __name__ == "__main__": app = MultiPage() app.add_page("Image Description Generation", caption.app) app.add_page("Multimodal Search", ms.app) app.add_page("Visual Question Answering", vqa.app) app.add_page("Image Text Matching", itm.app) app.add_page("Text Localization", tl.app) app.add_page("Classification", cl.app) app.run()
EXA-1-master
exa/libraries/LAVIS/app/main.py
#!/usr/bin/env python # coding: utf-8 # <a href="https://colab.research.google.com/github/anthonytmh/lavis-pnpvqa/blob/pnp_vqa/projects/pnp-vqa/pnp_vqa.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # ## Img2Prompt-VQA: Inference Demo # In[2]: # install requirements import sys # if 'google.colab' in sys.modules: # print('Running in Colab.') # get_ipython().system('git clone https://github.com/salesforce/LAVIS') # get_ipython().run_line_magic('cd', 'LAVIS') # get_ipython().system('pip install .') # get_ipython().system('pip3 install https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.0.0/en_core_web_sm-3.0.0.tar.gz') #else: # get_ipython().system('pip install omegaconf') # get_ipython().run_line_magic('cd', '../..') # get_ipython().system('pip install .') # 'pip3 install https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.0.0/en_core_web_sm-3.0.0.tar.gz') # In[3]: import torch import requests from PIL import Image from matplotlib import pyplot as plt import numpy as np from lavis.common.gradcam import getAttMap from lavis.models import load_model_and_preprocess # ### Load an example image and question # In[ ]: #img_url = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/projects/pnp-vqa/demo.png' #raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB') raw_image = Image.open("./demo.png").convert("RGB") #display(raw_image.resize((400, 300))) question = "What item s are spinning which can be used to control electric?" print(question) # In[ ]: # setup device to use device = 'cpu' #device = torch.device("cuda" if torch.cuda.is_available() else "cpu") device # ### Load Img2Prompt-VQA model # In[ ]: model, vis_processors, txt_processors = load_model_and_preprocess(name="img2prompt_vqa", model_type="base", is_eval=True, device=device) # ### Preprocess image and text inputs # In[ ]: image = vis_processors["eval"](raw_image).unsqueeze(0).to(device) question = txt_processors["eval"](question) samples = {"image": image, "text_input": [question]} # In[ ]: # ### Img2Prompt-VQA utilizes 4 submodels to perform VQA: # #### 1. Image-Question Matching # Compute the relevancy score of image patches with respect to the question using GradCAM # In[ ]: samples = model.forward_itm(samples=samples) # In[ ]: # Gradcam visualisation dst_w = 720 w, h = raw_image.size scaling_factor = dst_w / w resized_img = raw_image.resize((int(w * scaling_factor), int(h * scaling_factor))) norm_img = np.float32(resized_img) / 255 gradcam = samples['gradcams'].reshape(24,24) avg_gradcam = getAttMap(norm_img, gradcam, blur=True) fig, ax = plt.subplots(1, 1, figsize=(5, 5)) ax.imshow(avg_gradcam) ax.set_yticks([]) ax.set_xticks([]) print('Question: {}'.format(question)) # #### 2. Image Captioning # Generate question-guided captions based on the relevancy score # In[ ]: samples = model.forward_cap(samples=samples, num_captions=50, num_patches=20) print('Examples of question-guided captions: ') print(samples['captions'][0][:5]) # #### 3. Question Generation # Generate synthetic questions using the captions # In[ ]: samples = model.forward_qa_generation(samples) print('Sample Question: {} \nSample Answer: {}'.format(samples['questions'][:5], samples['answers'][:5])) # #### 4. Prompt Construction # Prepare the prompts for LLM # ### Generate answer by calling `predict_answers()` directly # # In[ ]: Img2Prompt = model.prompts_construction(samples) from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, AutoModelForSeq2SeqLM def load_model(model_selection): model = AutoModelForCausalLM.from_pretrained(model_selection) tokenizer = AutoTokenizer.from_pretrained(model_selection, use_fast=False) return model,tokenizer def postprocess_Answer(text): for i, ans in enumerate(text): for j, w in enumerate(ans): if w == '.' or w == '\n': ans = ans[:j].lower() break return ans model,tokenizer = load_model('facebook/opt-6.7b') Img2Prompt_input = tokenizer(Img2Prompt, padding='longest', truncation=True, return_tensors="pt").to( device) assert (len(Img2Prompt_input.input_ids[0])+20) <=2048 # print(len(question_input.attention_mask[0])) outputs_list = [] outputs = model.generate(input_ids=Img2Prompt_input.input_ids, attention_mask=Img2Prompt_input.attention_mask, max_length=20+len(Img2Prompt_input.input_ids[0]), return_dict_in_generate=True, output_scores = True ) outputs_list.append(outputs) pred_answer = tokenizer.batch_decode(outputs.sequences[:, len(Img2Prompt_input.input_ids[0]):]) pred_answer = postprocess_Answer(pred_answer) print({"question": question, "answer": pred_answer}) #pred_answers, caption, gradcam = model.predict_answers(samples, num_captions=50, num_patches=20) #print('Question: {} \nPredicted answer: {}'.format(question, pred_answers[0]))
EXA-1-master
exa/libraries/LAVIS/projects/img2llm-vqa/img2llm_vqa.py
""" # # Copyright (c) 2022 salesforce.com, inc. # All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause # Integration tests for PNP-VQA model. """ import pytest import torch from lavis.models import load_model, load_model_and_preprocess from PIL import Image # setup device to use device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # load sample image raw_image = Image.open("docs/_static/merlion.png").convert("RGB") precision = 1e-1 class TestPNPVQA: def test_vqa(self): # loads PNP-VQA base model, with BLIP_itm_large, BLIP_caption_large, Unifiedqav2_base # this also loads the associated image processors and text processors model, vis_processors, txt_processors = load_model_and_preprocess( name="pnp_vqa", model_type="base", is_eval=True, device=device ) # ask a random question. question = "Which city is this photo taken?" image = vis_processors["eval"](raw_image).unsqueeze(0).to(device) question = txt_processors["eval"](question) samples = {"image": image, "text_input": [question]} answer, caption, gradcam = model.predict_answers( samples=samples, inference_method="generate", num_captions=5, num_patches=20, ) assert isinstance(answer, list) assert isinstance(caption, list) assert isinstance(gradcam, torch.Tensor) assert len(answer) == 1 assert len(caption) == 1 assert len(caption[0]) == 5 assert gradcam.size() == torch.Size([1,576]) def test_itm(self): # loads PNP-VQA base model, with BLIP_itm_large, BLIP_caption_large, Unifiedqav2_base # this also loads the associated image processors and text processors model, vis_processors, txt_processors = load_model_and_preprocess( name="pnp_vqa", model_type="base", is_eval=True, device=device ) # ask a random question. question = "Which city is this photo taken?" image = vis_processors["eval"](raw_image).unsqueeze(0).to(device) question = txt_processors["eval"](question) samples = {"image": image, "text_input": [question]} samples = model.forward_itm(samples=samples) assert samples['gradcams'].size() == torch.Size([1,576]) def test_caption(self): # loads PNP-VQA base model, with BLIP_itm_large, BLIP_caption_large, Unifiedqav2_base # this also loads the associated image processors and text processors model, vis_processors, txt_processors = load_model_and_preprocess( name="pnp_vqa", model_type="base", is_eval=True, device=device ) # ask a random question. question = "Which city is this photo taken?" image = vis_processors["eval"](raw_image).unsqueeze(0).to(device) question = txt_processors["eval"](question) samples = {"image": image, "text_input": [question]} samples['gradcams'] = torch.rand(1,576) samples = model.forward_cap(samples=samples, num_captions=5, num_patches=20) assert len(samples['captions']) == 1 assert len(samples['captions'][0]) == 5 def test_qa(self): # loads PNP-VQA base model, with BLIP_itm_large, BLIP_caption_large, Unifiedqav2_base # this also loads the associated image processors and text processors model, vis_processors, txt_processors = load_model_and_preprocess( name="pnp_vqa", model_type="base", is_eval=True, device=device ) # ask a random question. question = "Which city is this photo taken?" image = vis_processors["eval"](raw_image).unsqueeze(0).to(device) question = txt_processors["eval"](question) samples = {"image": image, "text_input": [question]} samples['captions'] = [['the city is singapore', 'the picture is taken in singapore']] answer = model.forward_qa(samples=samples, num_captions=2) assert isinstance(answer, list) assert len(answer) == 1 assert answer[0]== 'singapore'
EXA-1-master
exa/libraries/LAVIS/tests/models/test_pnp_vqa.py
""" # # Copyright (c) 2022 salesforce.com, inc. # All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause # Integration tests for ALBEF models. """ import pytest import torch from lavis.models import load_model, load_model_and_preprocess from PIL import Image # setup device to use device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # load sample image raw_image = Image.open("docs/_static/merlion.png").convert("RGB") precision = 1e-3 class TestAlbef: def test_vqa(self): model, vis_processors, txt_processors = load_model_and_preprocess( name="albef_vqa", model_type="vqav2", is_eval=True, device=device ) # ask a random question. question = "Which city is this photo taken?" image = vis_processors["eval"](raw_image).unsqueeze(0).to(device) question = txt_processors["eval"](question) samples = {"image": image, "text_input": question} answer_list = ["Singapore", "London", "Palo Alto", "Tokyo"] answers = model.predict_answers(samples, answer_list=answer_list) assert answers == ["Singapore"] def test_vqa_forward(self): model = load_model("albef_vqa", model_type="vqav2", is_eval=True) samples = { "image": torch.rand(2, 3, 384, 384), "text_input": ["What is this?", "What is that?"], "answer": ["cat", "cat", "dog"], "weight": torch.tensor([1.0, 1.0, 1.0]), "n_answers": torch.tensor([2, 1]), "epoch": 0, "iters": 0, "num_iters_per_epoch": 1000, } output = model(samples) # odict_keys(['image_embeds', 'image_embeds_m', 'encoder_output', 'encoder_output_m', 'decoder_output', 'decoder_labels']) assert output.intermediate_output.image_embeds.shape == torch.Size( [2, 577, 768] ) assert output.intermediate_output.image_embeds_m.shape == torch.Size( [2, 577, 768] ) assert ( output.intermediate_output.encoder_output.last_hidden_state.shape == torch.Size([2, 6, 768]) ) assert ( output.intermediate_output.encoder_output_m.last_hidden_state.shape == torch.Size([2, 6, 768]) ) assert output.intermediate_output.decoder_output.logits.shape == torch.Size( [3, 3, 30522] ) assert output.intermediate_output.decoder_labels.shape == torch.Size([3, 3]) def test_retrieval(self): model = load_model("albef_retrieval", "coco", is_eval=True, device=device) images = torch.randn(4, 3, 384, 384).to(device) text_input = [ "caption of image 1", "another caption of image 1", "caption of image 2", "caption of image 3", ] image_id = torch.LongTensor([1, 1, 2, 3]).to(device) samples = { "image": images, "text_input": text_input, "image_id": image_id, "epoch": 0, "iters": 0, "num_iters_per_epoch": 100, } output = model(samples) assert output.intermediate_output.image_embeds.shape == torch.Size( [4, 577, 768] ) assert output.intermediate_output.text_embeds.shape == torch.Size([4, 30, 768]) assert output.intermediate_output.image_embeds_m.shape == torch.Size( [4, 577, 768] ) assert output.intermediate_output.text_embeds_m.shape == torch.Size( [4, 30, 768] ) assert ( output.intermediate_output.encoder_output.last_hidden_state.shape == torch.Size([4, 30, 768]) ) assert output.intermediate_output.itm_logits.shape == torch.Size([12, 2]) assert all( output.intermediate_output.itm_labels == torch.LongTensor([1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]).to(device) ) def test_pretrain(self): model = load_model( "albef_pretrain", model_type="base", is_eval=True, device=device ) images = torch.randn(4, 3, 224, 224).to(device) text_input = [ "caption of image 1", "another caption of image 1", "caption of image 2", "caption of image 3", ] samples = { "image": images, "text_input": text_input, "epoch": 0, "iters": 0, "num_iters_per_epoch": 100, } output = model(samples) assert output.intermediate_output.image_embeds.shape == torch.Size( [4, 197, 768] ) assert output.intermediate_output.text_embeds.shape == torch.Size([4, 30, 768]) assert output.intermediate_output.itm_logits.shape == torch.Size([12, 2]) assert all( output.intermediate_output.itm_labels == torch.LongTensor([1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]).to(device) )
EXA-1-master
exa/libraries/LAVIS/tests/models/test_albef.py
""" # # Copyright (c) 2022 salesforce.com, inc. # All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause # Integration tests for BLIP2 models. """ import pytest import torch from lavis.models import load_model, load_model_and_preprocess from PIL import Image # setup device to use device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # load sample image raw_image = Image.open("docs/_static/merlion.png").convert("RGB") class TestBlip2: def test_blip2_opt2p7b(self): # loads BLIP2-OPT-2.7b caption model, without finetuning on coco. model, vis_processors, _ = load_model_and_preprocess( name="blip2_opt", model_type="pretrain_opt2.7b", is_eval=True, device=device ) # preprocess the image # vis_processors stores image transforms for "train" and "eval" (validation / testing / inference) image = vis_processors["eval"](raw_image).unsqueeze(0).to(device) # generate caption caption = model.generate({"image": image}) assert caption == ["the merlion fountain in singapore"] # generate multiple captions captions = model.generate({"image": image}, num_captions=3) assert len(captions) == 3 def test_blip2_opt2p7b_coco(self): # loads BLIP2-OPT-2.7b caption model, model, vis_processors, _ = load_model_and_preprocess( name="blip2_opt", model_type="caption_coco_opt2.7b", is_eval=True, device=device, ) # preprocess the image # vis_processors stores image transforms for "train" and "eval" (validation / testing / inference) image = vis_processors["eval"](raw_image).unsqueeze(0).to(device) # generate caption caption = model.generate({"image": image}) assert caption == ["a statue of a mermaid spraying water into the air"] # generate multiple captions captions = model.generate({"image": image}, num_captions=3) assert len(captions) == 3 def test_blip2_opt6p7b(self): # loads BLIP2-OPT-2.7b caption model, model, vis_processors, _ = load_model_and_preprocess( name="blip2_opt", model_type="pretrain_opt6.7b", is_eval=True, device=device ) # preprocess the image # vis_processors stores image transforms for "train" and "eval" (validation / testing / inference) image = vis_processors["eval"](raw_image).unsqueeze(0).to(device) # generate caption caption = model.generate({"image": image}) assert caption == ["a statue of a merlion in front of a water fountain"] # generate multiple captions captions = model.generate({"image": image}, num_captions=3) assert len(captions) == 3 def test_blip2_opt6p7b_coco(self): # loads BLIP2-OPT-2.7b caption model, model, vis_processors, _ = load_model_and_preprocess( name="blip2_opt", model_type="caption_coco_opt6.7b", is_eval=True, device=device, ) # preprocess the image # vis_processors stores image transforms for "train" and "eval" (validation / testing / inference) image = vis_processors["eval"](raw_image).unsqueeze(0).to(device) # generate caption caption = model.generate({"image": image}) assert caption == ["a large fountain spraying water into the air"] # generate multiple captions captions = model.generate({"image": image}, num_captions=3) assert len(captions) == 3 def test_blip2_flant5xl(self): # loads BLIP2-FLAN-T5XL caption model, model, vis_processors, _ = load_model_and_preprocess( name="blip2_t5", model_type="pretrain_flant5xl", is_eval=True, device=device ) # preprocess the image # vis_processors stores image transforms for "train" and "eval" (validation / testing / inference) image = vis_processors["eval"](raw_image).unsqueeze(0).to(device) # generate caption caption = model.generate({"image": image}) assert caption == ["marina bay sands, singapore"] # generate multiple captions captions = model.generate({"image": image}, num_captions=3) assert len(captions) == 3 def test_blip2_flant5xxl(self): # loads BLIP2-FLAN-T5XXL caption model, model, vis_processors, _ = load_model_and_preprocess( name="blip2_t5", model_type="pretrain_flant5xxl", is_eval=True, device=device, ) # preprocess the image # vis_processors stores image transforms for "train" and "eval" (validation / testing / inference) image = vis_processors["eval"](raw_image).unsqueeze(0).to(device) # generate caption caption = model.generate({"image": image}) assert caption == ["the merlion statue in singapore"] # generate multiple captions captions = model.generate({"image": image}, num_captions=3) assert len(captions) == 3
EXA-1-master
exa/libraries/LAVIS/tests/models/test_blip2.py
""" # # Copyright (c) 2022 salesforce.com, inc. # All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause # Integration tests for BLIP models. """ import pytest import torch from lavis.models import load_model, load_model_and_preprocess from PIL import Image # setup device to use device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # load sample image raw_image = Image.open("docs/_static/merlion.png").convert("RGB") precision = 1e-1 class TestBlip: def test_caption(self): # loads BLIP caption base model, with finetuned checkpoints on MSCOCO captioning dataset. # this also loads the associated image processors model, vis_processors, _ = load_model_and_preprocess( name="blip_caption", model_type="base_coco", is_eval=True, device=device ) # preprocess the image # vis_processors stores image transforms for "train" and "eval" (validation / testing / inference) image = vis_processors["eval"](raw_image).unsqueeze(0).to(device) # generate caption caption = model.generate({"image": image}) assert caption == ["a large fountain spewing water into the air"] # generate multiple captions captions = model.generate({"image": image}, num_captions=3) assert len(captions) == 3 def test_caption_large(self): # loads BLIP caption base model, with finetuned checkpoints on MSCOCO captioning dataset. # this also loads the associated image processors model, vis_processors, _ = load_model_and_preprocess( name="blip_caption", model_type="large_coco", is_eval=True, device=device ) # preprocess the image # vis_processors stores image transforms for "train" and "eval" (validation / testing / inference) image = vis_processors["eval"](raw_image).unsqueeze(0).to(device) # generate caption caption = model.generate({"image": image}) assert caption == ["a large statue of a person spraying water from a fountain"] # generate multiple captions captions = model.generate({"image": image}, num_captions=3) assert len(captions) == 3 def test_caption_forward(self): model, vis_processors, _ = load_model_and_preprocess( name="blip_caption", model_type="base_coco", is_eval=True, device=device ) # preprocess the image # vis_processors stores image transforms for "train" and "eval" (validation / testing / inference) image = vis_processors["eval"](raw_image).unsqueeze(0).to(device) text_input = ["a large statue of a person spraying water from a fountain"] samples = {"image": image, "text_input": text_input} output = model(samples) assert output.intermediate_output.image_embeds.shape == torch.Size( [1, 577, 768] ) assert output.intermediate_output.decoder_labels.shape == torch.Size([1, 13]) assert pytest.approx(2.7152, precision) == output.loss.item() assert ( pytest.approx(-0.0200, precision) == torch.mean(output.intermediate_output.image_embeds).item() ) assert all( output.intermediate_output.decoder_labels[0] == torch.LongTensor( [ -100, -100, -100, -100, 1997, 1037, 2711, 29035, 2300, 2013, 1037, 9545, 102, ] ).to(device) ) def test_vqa(self): model, vis_processors, txt_processors = load_model_and_preprocess( name="blip_vqa", model_type="vqav2", is_eval=True, device=device ) # ask a random question. question = "Which city is this photo taken?" image = vis_processors["eval"](raw_image).unsqueeze(0).to(device) question = txt_processors["eval"](question) samples = {"image": image, "text_input": question} answer = model.predict_answers( samples=samples, inference_method="generate", ) assert answer == ["singapore"] answer_list = ["Singapore", "London", "Palo Alto", "Tokyo"] answers = model.predict_answers(samples, answer_list=answer_list) assert answers == ["Singapore"] def test_retrieval(self): model = load_model("blip_retrieval", "coco", is_eval=True, device=device) images = torch.randn(4, 3, 384, 384).to(device) text_input = [ "caption of image 1", "another caption of image 1", "caption of image 2", "caption of image 3", ] image_id = torch.tensor([1, 1, 2, 3]).to(device) samples = { "image": images, "text_input": text_input, "image_id": image_id, "epoch": 0, "iters": 0, "num_iters_per_epoch": 100, } output = model(samples) assert output.intermediate_output.image_embeds.shape == torch.Size( [4, 577, 768] ) assert output.intermediate_output.text_embeds.shape == torch.Size([4, 35, 768]) assert output.intermediate_output.image_embeds_m.shape == torch.Size( [4, 577, 768] ) assert output.intermediate_output.text_embeds_m.shape == torch.Size( [4, 35, 768] ) assert ( output.intermediate_output.encoder_output.last_hidden_state.shape == torch.Size([4, 35, 768]) ) assert output.intermediate_output.itm_logits.shape == torch.Size([12, 2]) assert all( output.intermediate_output.itm_labels == torch.LongTensor([1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]).to(device) ) def test_pretrain(self): model = load_model("blip_pretrain", "base", is_eval=True, device=device) images = torch.randn(4, 3, 224, 224).to(device) text_input = [ "caption of image 1", "another caption of image 1", "caption of image 2", "caption of image 3", ] samples = { "image": images, "text_input": text_input, "epoch": 0, "iters": 0, "num_iters_per_epoch": 100, } output = model(samples) assert output.intermediate_output.image_embeds.shape == torch.Size( [4, 197, 768] ) assert output.intermediate_output.text_embeds.shape == torch.Size([4, 30, 768]) assert output.intermediate_output.itm_logits.shape == torch.Size([12, 2]) assert all( output.intermediate_output.itm_labels == torch.LongTensor([1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]).to(device) ) assert output.intermediate_output.decoder_labels.shape == torch.Size([4, 30]) assert output.intermediate_output.decoder_output.logits.shape == torch.Size( [4, 30, 30524] ) def test_feature_extractor(self): from PIL import Image from lavis.models import load_model_and_preprocess raw_image = Image.open("docs/_static/merlion.png").convert("RGB") caption = "a large fountain spewing water into the air" model, vis_processors, txt_processors = load_model_and_preprocess( "blip_feature_extractor", model_type="base", is_eval=True, device=device ) image = vis_processors["eval"](raw_image).unsqueeze(0).to(device) text_input = txt_processors["eval"](caption) sample = {"image": image, "text_input": [text_input]} features_multimodal = model.extract_features(sample) features_text = model.extract_features(sample, mode="text") features_image = model.extract_features(sample, mode="image") assert features_multimodal.image_embeds.shape == torch.Size([1, 197, 768]) assert features_multimodal.multimodal_embeds.shape == torch.Size([1, 12, 768]) assert features_text.text_embeds.shape == torch.Size([1, 12, 768]) assert features_text.text_embeds_proj.shape == torch.Size([1, 12, 256]) assert features_image.image_embeds.shape == torch.Size([1, 197, 768]) assert features_image.image_embeds_proj.shape == torch.Size([1, 197, 256]) assert torch.mean(features_multimodal.image_embeds).item() == pytest.approx( -0.02032, precision ) assert torch.mean( features_multimodal.multimodal_embeds ).item() == pytest.approx(-0.00095, precision) assert torch.mean(features_text.text_embeds).item() == pytest.approx( -6.6098e-5, precision ) assert torch.mean(features_text.text_embeds_proj).item() == pytest.approx( -0.002149, precision ) assert torch.mean(features_image.image_embeds).item() == pytest.approx( -0.02032, precision ) assert torch.mean(features_image.image_embeds_proj).item() == pytest.approx( -0.0023, precision ) def test_itm(self): from PIL import Image from lavis.models import load_model_and_preprocess def compute_itm(): img = vis_processors["eval"](raw_image).unsqueeze(0).to(device) txt = txt_processors["eval"](caption) itm_output = model({"image": img, "text_input": [txt]}, match_head="itm") itm_scores = torch.nn.functional.softmax(itm_output, dim=1) return itm_scores def compute_itc(): img = vis_processors["eval"](raw_image).unsqueeze(0).to(device) txt = txt_processors["eval"](caption) itc_score = model({"image": img, "text_input": [txt]}, match_head="itc") return itc_score raw_image = Image.open("docs/_static/merlion.png").convert("RGB") model, vis_processors, txt_processors = load_model_and_preprocess( "blip_image_text_matching", model_type="base", is_eval=True, device=device ) caption = "merlion in Singapore" itm_scores = compute_itm() itc_score = compute_itc() assert itm_scores[:, 1].item() == pytest.approx(0.98613, abs=1e-5) assert itc_score.item() == pytest.approx(0.4633, abs=1e-4) caption = "a random irrelevant caption" itm_scores = compute_itm() itc_score = compute_itc() assert itm_scores[:, 1].item() == pytest.approx(0.05704, abs=1e-5) assert itc_score.item() == pytest.approx(0.23282, abs=1e-5) # test BLIP ITM large model, vis_processors, txt_processors = load_model_and_preprocess( "blip_image_text_matching", model_type="large", is_eval=True, device=device ) caption = "merlion in Singapore" itm_scores = compute_itm() itc_score = compute_itc() assert itm_scores[:, 1].item() == pytest.approx(0.99466, abs=1e-5) assert itc_score.item() == pytest.approx(0.4474, abs=1e-4) caption = "a random irrelevant caption" itm_scores = compute_itm() itc_score = compute_itc() assert itm_scores[:, 1].item() == pytest.approx(0.04744, abs=1e-5) assert itc_score.item() == pytest.approx(0.12821, abs=1e-5)
EXA-1-master
exa/libraries/LAVIS/tests/models/test_blip.py
# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # import os # import sys # sys.path.insert(0, os.path.abspath('.')) # -- Project information ----------------------------------------------------- project = "LAVIS" copyright = "2022, salesforce.com inc." author = ( "Dongxu Li, Junnan Li, Hung Le, Guangsen Wang, Silvio Savarese, Steven C.H. Hoi" ) # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = ["nbsphinx"] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = [] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # # html_theme = "alabaster" html_theme = "sphinx_rtd_theme" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] # pygments_style = "sphinx"
EXA-1-master
exa/libraries/LAVIS/docs/conf.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import os import sys from omegaconf import OmegaConf from lavis.common.registry import registry from lavis.datasets.builders import * from lavis.models import * from lavis.processors import * from lavis.tasks import * root_dir = os.path.dirname(os.path.abspath(__file__)) default_cfg = OmegaConf.load(os.path.join(root_dir, "configs/default.yaml")) registry.register_path("library_root", root_dir) repo_root = os.path.join(root_dir, "..") registry.register_path("repo_root", repo_root) cache_root = os.path.join(repo_root, default_cfg.env.cache_root) registry.register_path("cache_root", cache_root) registry.register("MAX_INT", sys.maxsize) registry.register("SPLIT_NAMES", ["train", "val", "test"])
EXA-1-master
exa/libraries/LAVIS/lavis/__init__.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import logging import os import torch import torch.distributed as dist from lavis.common.dist_utils import get_rank, get_world_size, is_main_process, is_dist_avail_and_initialized from lavis.common.logger import MetricLogger, SmoothedValue from lavis.common.registry import registry from lavis.datasets.data_utils import prepare_sample class BaseTask: def __init__(self, **kwargs): super().__init__() self.inst_id_key = "instance_id" @classmethod def setup_task(cls, **kwargs): return cls() def build_model(self, cfg): model_config = cfg.model_cfg model_cls = registry.get_model_class(model_config.arch) return model_cls.from_config(model_config) def build_datasets(self, cfg): """ Build a dictionary of datasets, keyed by split 'train', 'valid', 'test'. Download dataset and annotations automatically if not exist. Args: cfg (common.config.Config): _description_ Returns: dict: Dictionary of torch.utils.data.Dataset objects by split. """ datasets = dict() datasets_config = cfg.datasets_cfg assert len(datasets_config) > 0, "At least one dataset has to be specified." for name in datasets_config: dataset_config = datasets_config[name] builder = registry.get_builder_class(name)(dataset_config) dataset = builder.build_datasets() datasets[name] = dataset return datasets def train_step(self, model, samples): loss = model(samples)["loss"] return loss def valid_step(self, model, samples): raise NotImplementedError def before_evaluation(self, model, dataset, **kwargs): model.before_evaluation(dataset=dataset, task_type=type(self)) def after_evaluation(self, **kwargs): pass def inference_step(self): raise NotImplementedError def evaluation(self, model, data_loader, cuda_enabled=True): metric_logger = MetricLogger(delimiter=" ") header = "Evaluation" # TODO make it configurable print_freq = 10 results = [] for samples in metric_logger.log_every(data_loader, print_freq, header): samples = prepare_sample(samples, cuda_enabled=cuda_enabled) eval_output = self.valid_step(model=model, samples=samples) results.extend(eval_output) if is_dist_avail_and_initialized(): dist.barrier() return results def train_epoch( self, epoch, model, data_loader, optimizer, lr_scheduler, scaler=None, cuda_enabled=False, log_freq=50, accum_grad_iters=1, ): return self._train_inner_loop( epoch=epoch, iters_per_epoch=len(data_loader), model=model, data_loader=data_loader, optimizer=optimizer, scaler=scaler, lr_scheduler=lr_scheduler, log_freq=log_freq, cuda_enabled=cuda_enabled, accum_grad_iters=accum_grad_iters, ) def train_iters( self, epoch, start_iters, iters_per_inner_epoch, model, data_loader, optimizer, lr_scheduler, scaler=None, cuda_enabled=False, log_freq=50, accum_grad_iters=1, ): return self._train_inner_loop( epoch=epoch, start_iters=start_iters, iters_per_epoch=iters_per_inner_epoch, model=model, data_loader=data_loader, optimizer=optimizer, scaler=scaler, lr_scheduler=lr_scheduler, log_freq=log_freq, cuda_enabled=cuda_enabled, accum_grad_iters=accum_grad_iters, ) def _train_inner_loop( self, epoch, iters_per_epoch, model, data_loader, optimizer, lr_scheduler, scaler=None, start_iters=None, log_freq=50, cuda_enabled=False, accum_grad_iters=1, ): """ An inner training loop compatible with both epoch-based and iter-based training. When using epoch-based, training stops after one epoch; when using iter-based, training stops after #iters_per_epoch iterations. """ use_amp = scaler is not None if not hasattr(data_loader, "__next__"): # convert to iterator if not already data_loader = iter(data_loader) metric_logger = MetricLogger(delimiter=" ") metric_logger.add_meter("lr", SmoothedValue(window_size=1, fmt="{value:.6f}")) metric_logger.add_meter("loss", SmoothedValue(window_size=1, fmt="{value:.4f}")) # if iter-based runner, schedule lr based on inner epoch. logging.info( "Start training epoch {}, {} iters per inner epoch.".format( epoch, iters_per_epoch ) ) header = "Train: data epoch: [{}]".format(epoch) if start_iters is None: # epoch-based runner inner_epoch = epoch else: # In iter-based runner, we schedule the learning rate based on iterations. inner_epoch = start_iters // iters_per_epoch header = header + "; inner epoch [{}]".format(inner_epoch) for i in metric_logger.log_every(range(iters_per_epoch), log_freq, header): # if using iter-based runner, we stop after iters_per_epoch iterations. if i >= iters_per_epoch: break samples = next(data_loader) samples = prepare_sample(samples, cuda_enabled=cuda_enabled) samples.update( { "epoch": inner_epoch, "num_iters_per_epoch": iters_per_epoch, "iters": i, } ) lr_scheduler.step(cur_epoch=inner_epoch, cur_step=i) with torch.cuda.amp.autocast(enabled=use_amp): loss = self.train_step(model=model, samples=samples) # after_train_step() if use_amp: scaler.scale(loss).backward() else: loss.backward() # update gradients every accum_grad_iters iterations if (i + 1) % accum_grad_iters == 0: if use_amp: scaler.step(optimizer) scaler.update() else: optimizer.step() optimizer.zero_grad() metric_logger.update(loss=loss.item()) metric_logger.update(lr=optimizer.param_groups[0]["lr"]) # after train_epoch() # gather the stats from all processes metric_logger.synchronize_between_processes() logging.info("Averaged stats: " + str(metric_logger.global_avg())) return { k: "{:.3f}".format(meter.global_avg) for k, meter in metric_logger.meters.items() } @staticmethod def save_result(result, result_dir, filename, remove_duplicate=""): import json result_file = os.path.join( result_dir, "%s_rank%d.json" % (filename, get_rank()) ) final_result_file = os.path.join(result_dir, "%s.json" % filename) json.dump(result, open(result_file, "w")) if is_dist_avail_and_initialized(): dist.barrier() if is_main_process(): logging.warning("rank %d starts merging results." % get_rank()) # combine results from all processes result = [] for rank in range(get_world_size()): result_file = os.path.join( result_dir, "%s_rank%d.json" % (filename, rank) ) res = json.load(open(result_file, "r")) result += res if remove_duplicate: result_new = [] id_list = [] for res in result: if res[remove_duplicate] not in id_list: id_list.append(res[remove_duplicate]) result_new.append(res) result = result_new json.dump(result, open(final_result_file, "w")) print("result file saved to %s" % final_result_file) return final_result_file
EXA-1-master
exa/libraries/LAVIS/lavis/tasks/base_task.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ from lavis.common.registry import registry from lavis.tasks.base_task import BaseTask from lavis.tasks.captioning import CaptionTask from lavis.tasks.image_text_pretrain import ImageTextPretrainTask from lavis.tasks.multimodal_classification import ( MultimodalClassificationTask, ) from lavis.tasks.retrieval import RetrievalTask from lavis.tasks.vqa import VQATask, GQATask, AOKVQATask from lavis.tasks.vqa_reading_comprehension import VQARCTask, GQARCTask from lavis.tasks.dialogue import DialogueTask def setup_task(cfg): assert "task" in cfg.run_cfg, "Task name must be provided." task_name = cfg.run_cfg.task task = registry.get_task_class(task_name).setup_task(cfg=cfg) assert task is not None, "Task {} not properly registered.".format(task_name) return task __all__ = [ "BaseTask", "AOKVQATask", "RetrievalTask", "CaptionTask", "VQATask", "GQATask", "VQARCTask", "GQARCTask", "MultimodalClassificationTask", # "VideoQATask", # "VisualEntailmentTask", "ImageTextPretrainTask", "DialogueTask", ]
EXA-1-master
exa/libraries/LAVIS/lavis/tasks/__init__.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import json import logging import os import numpy as np import torch from lavis.common.dist_utils import is_main_process from lavis.common.registry import registry from lavis.tasks.base_task import BaseTask @registry.register_task("retrieval") class RetrievalTask(BaseTask): def __init__(self, cfg): super().__init__() self.cfg = cfg @classmethod def setup_task(cls, cfg): run_cfg = cfg.run_cfg return cls(cfg=run_cfg) def evaluation(self, model, data_loader, **kwargs): # score_i2t, score_t2i = model.compute_sim_matrix(model, data_loader) score_i2t, score_t2i = model.compute_sim_matrix(data_loader, task_cfg=self.cfg) if is_main_process(): eval_result = self._report_metrics( score_i2t, score_t2i, data_loader.dataset.txt2img, data_loader.dataset.img2txt, ) logging.info(eval_result) else: eval_result = None return eval_result def after_evaluation(self, val_result, **kwargs): return val_result @staticmethod @torch.no_grad() def _report_metrics(scores_i2t, scores_t2i, txt2img, img2txt): # Images->Text ranks = np.zeros(scores_i2t.shape[0]) for index, score in enumerate(scores_i2t): inds = np.argsort(score)[::-1] # Score rank = 1e20 for i in img2txt[index]: tmp = np.where(inds == i)[0][0] if tmp < rank: rank = tmp ranks[index] = rank # Compute metrics tr1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks) tr5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks) tr10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks) # Text->Images ranks = np.zeros(scores_t2i.shape[0]) for index, score in enumerate(scores_t2i): inds = np.argsort(score)[::-1] ranks[index] = np.where(inds == txt2img[index])[0][0] # Compute metrics ir1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks) ir5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks) ir10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks) tr_mean = (tr1 + tr5 + tr10) / 3 ir_mean = (ir1 + ir5 + ir10) / 3 r_mean = (tr_mean + ir_mean) / 2 agg_metrics = (tr1 + tr5 + tr10) / 3 eval_result = { "txt_r1": tr1, "txt_r5": tr5, "txt_r10": tr10, "txt_r_mean": tr_mean, "img_r1": ir1, "img_r5": ir5, "img_r10": ir10, "img_r_mean": ir_mean, "r_mean": r_mean, "agg_metrics": agg_metrics, } with open( os.path.join(registry.get_path("output_dir"), "evaluate.txt"), "a" ) as f: f.write(json.dumps(eval_result) + "\n") return eval_result
EXA-1-master
exa/libraries/LAVIS/lavis/tasks/retrieval.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ from lavis.common.registry import registry from lavis.tasks.base_task import BaseTask @registry.register_task("image_text_pretrain") class ImageTextPretrainTask(BaseTask): def __init__(self): super().__init__() def evaluation(self, model, data_loader, cuda_enabled=True): pass
EXA-1-master
exa/libraries/LAVIS/lavis/tasks/image_text_pretrain.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import json import os from lavis.common.dist_utils import main_process from lavis.common.logger import MetricLogger from lavis.common.registry import registry from lavis.tasks.base_task import BaseTask from lavis.datasets.data_utils import prepare_sample import numpy as np @registry.register_task("dialogue") class DialogueTask(BaseTask): def __init__(self, num_beams, max_len, min_len, evaluate, report_metric=True): super().__init__() self.num_beams = num_beams self.max_len = max_len self.min_len = min_len self.evaluate = evaluate self.report_metric = report_metric @classmethod def setup_task(cls, cfg): run_cfg = cfg.run_cfg num_beams = run_cfg.num_beams max_len = run_cfg.max_len min_len = run_cfg.min_len evaluate = run_cfg.evaluate report_metric = run_cfg.get("report_metric", True) return cls( num_beams=num_beams, max_len=max_len, min_len=min_len, evaluate=evaluate, report_metric=report_metric, ) def valid_step(self, model, samples): results = [] loss = model(samples)["loss"].item() return [loss] def after_evaluation(self, val_result, split_name, epoch, **kwargs): if self.report_metric: avg_loss = np.mean(val_result) metrics = {"agg_metrics": avg_loss} else: metrics = {"agg_metrics": 0.0} return metrics @main_process def _report_metrics(self, eval_result_file, split_name): # TODO better way to define this coco_gt_root = os.path.join(registry.get_path("cache_root"), "coco_gt") coco_val = coco_dialogue_eval(coco_gt_root, eval_result_file, split_name) agg_metrics = coco_val.eval["CIDEr"] + coco_val.eval["Bleu_4"] log_stats = {split_name: {k: v for k, v in coco_val.eval.items()}} with open( os.path.join(registry.get_path("output_dir"), "evaluate.txt"), "a" ) as f: f.write(json.dumps(log_stats) + "\n") coco_res = {k: v for k, v in coco_val.eval.items()} coco_res["agg_metrics"] = agg_metrics return coco_res # TODO better structure for this. from pycocoevalcap.eval import COCOEvalCap from pycocotools.coco import COCO from torchvision.datasets.utils import download_url def coco_dialogue_eval(coco_gt_root, results_file, split): urls = { "val": "https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_val_gt.json", "test": "https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_test_gt.json", } filenames = { "val": "coco_karpathy_val_gt.json", "test": "coco_karpathy_test_gt.json", } download_url(urls[split], coco_gt_root) annotation_file = os.path.join(coco_gt_root, filenames[split]) # create coco object and coco_result object coco = COCO(annotation_file) coco_result = coco.loadRes(results_file) # create coco_eval object by taking coco and coco_result coco_eval = COCOEvalCap(coco, coco_result) # evaluate on a subset of images by setting # coco_eval.params['image_id'] = coco_result.getImgIds() # please remove this line when evaluating the full validation set # coco_eval.params['image_id'] = coco_result.getImgIds() # evaluate results # SPICE will take a few minutes the first time, but speeds up due to caching coco_eval.evaluate() # print output evaluation scores for metric, score in coco_eval.eval.items(): print(f"{metric}: {score:.3f}") return coco_eval
EXA-1-master
exa/libraries/LAVIS/lavis/tasks/dialogue.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import json import os from lavis.common.dist_utils import main_process from lavis.common.registry import registry from lavis.tasks.base_task import BaseTask @registry.register_task("captioning") class CaptionTask(BaseTask): def __init__(self, num_beams, max_len, min_len, evaluate, report_metric=True): super().__init__() self.num_beams = num_beams self.max_len = max_len self.min_len = min_len self.evaluate = evaluate self.report_metric = report_metric @classmethod def setup_task(cls, cfg): run_cfg = cfg.run_cfg num_beams = run_cfg.num_beams max_len = run_cfg.max_len min_len = run_cfg.min_len evaluate = run_cfg.evaluate report_metric = run_cfg.get("report_metric", True) return cls( num_beams=num_beams, max_len=max_len, min_len=min_len, evaluate=evaluate, report_metric=report_metric, ) def valid_step(self, model, samples): results = [] # run_cfg = slf.cfg.run_cfg captions = model.generate( samples, use_nucleus_sampling=False, num_beams=self.num_beams, max_length=self.max_len, min_length=self.min_len, ) img_ids = samples["image_id"] for caption, img_id in zip(captions, img_ids): results.append({"caption": caption, "image_id": int(img_id)}) return results def after_evaluation(self, val_result, split_name, epoch, **kwargs): eval_result_file = self.save_result( result=val_result, result_dir=registry.get_path("result_dir"), filename="{}_epoch{}".format(split_name, epoch), remove_duplicate="image_id", ) if self.report_metric: metrics = self._report_metrics( eval_result_file=eval_result_file, split_name=split_name ) else: metrics = {"agg_metrics": 0.0} return metrics @main_process def _report_metrics(self, eval_result_file, split_name): # TODO better way to define this coco_gt_root = os.path.join(registry.get_path("cache_root"), "coco_gt") coco_val = coco_caption_eval(coco_gt_root, eval_result_file, split_name) agg_metrics = coco_val.eval["CIDEr"] + coco_val.eval["Bleu_4"] log_stats = {split_name: {k: v for k, v in coco_val.eval.items()}} with open( os.path.join(registry.get_path("output_dir"), "evaluate.txt"), "a" ) as f: f.write(json.dumps(log_stats) + "\n") coco_res = {k: v for k, v in coco_val.eval.items()} coco_res["agg_metrics"] = agg_metrics return coco_res # TODO better structure for this. from pycocoevalcap.eval import COCOEvalCap from pycocotools.coco import COCO from torchvision.datasets.utils import download_url def coco_caption_eval(coco_gt_root, results_file, split): urls = { "val": "https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_val_gt.json", "test": "https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_test_gt.json", } filenames = { "val": "coco_karpathy_val_gt.json", "test": "coco_karpathy_test_gt.json", } download_url(urls[split], coco_gt_root) annotation_file = os.path.join(coco_gt_root, filenames[split]) # create coco object and coco_result object coco = COCO(annotation_file) coco_result = coco.loadRes(results_file) # create coco_eval object by taking coco and coco_result coco_eval = COCOEvalCap(coco, coco_result) # evaluate on a subset of images by setting # coco_eval.params['image_id'] = coco_result.getImgIds() # please remove this line when evaluating the full validation set # coco_eval.params['image_id'] = coco_result.getImgIds() # evaluate results # SPICE will take a few minutes the first time, but speeds up due to caching coco_eval.evaluate() # print output evaluation scores for metric, score in coco_eval.eval.items(): print(f"{metric}: {score:.3f}") return coco_eval
EXA-1-master
exa/libraries/LAVIS/lavis/tasks/captioning.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import logging import json import os import lavis.common.dist_utils as dist_utils from lavis.common.registry import registry from lavis.common.vqa_tools.vqa import VQA from lavis.common.vqa_tools.vqa_eval import VQAEval from lavis.tasks.base_task import BaseTask @registry.register_task("vqa") class VQATask(BaseTask): def __init__( self, num_beams, max_len, min_len, evaluate, num_ans_candidates, inference_method="rank", prompt="", ): super().__init__() self.num_beams = num_beams self.max_len = max_len self.min_len = min_len self.evaluate = evaluate self.inference_method = inference_method self.num_ans_candidates = num_ans_candidates self.prompt = prompt self.answer_list = None self.ques_files = dict() self.anno_files = dict() @classmethod def setup_task(cls, cfg): run_cfg = cfg.run_cfg num_beams = run_cfg.get("num_beams", 3) max_len = run_cfg.get("max_len", 10) min_len = run_cfg.get("min_len", 1) evaluate = run_cfg.get("evaluate", False) inference_method = run_cfg.get("inference_method", "rank") num_ans_candidates = run_cfg.get("num_ans_candidates", 128) prompt = run_cfg.get("prompt", "") return cls( num_beams=num_beams, max_len=max_len, min_len=min_len, evaluate=evaluate, num_ans_candidates=num_ans_candidates, inference_method=inference_method, prompt=prompt, ) def build_datasets(self, cfg): datasets = super().build_datasets(cfg) # get question file, annotation file and anwser list in COCO format for dataset in datasets.values(): for split in dataset: if ( hasattr(dataset[split], "coco_fmt_qust_file") and dataset[split].coco_fmt_qust_file is not None ): self.ques_files[split] = dataset[split].coco_fmt_qust_file self.anno_files[split] = dataset[split].coco_fmt_anno_file try: self.answer_list = dataset[split].answer_list except AttributeError: # if answer_list is not provided, then set it to None pass if len(self.ques_files) > 0: assert len(self.ques_files) == len( self.anno_files ), "Only support one split for evaluation." return datasets def valid_step(self, model, samples): answers = model.predict_answers( samples=samples, answer_list=self.answer_list, inference_method=self.inference_method, num_beams=self.num_beams, max_len=self.max_len, min_len=self.min_len, num_ans_candidates=self.num_ans_candidates, prompt=self.prompt, ) pred_qa_pairs = [] question_id = samples["question_id"] for answer, ques_id in zip(answers, question_id): ques_id = int(ques_id.item()) pred_qa_pairs.append({"question_id": ques_id, "answer": answer}) return pred_qa_pairs def after_evaluation(self, val_result, split_name, **kwargs): result_file = self.save_result( val_result, result_dir=registry.get_path("result_dir"), filename=f"{split_name}_vqa_result", remove_duplicate="question_id", ) metrics = self._report_metrics(result_file=result_file, split=split_name) return metrics @dist_utils.main_process def _report_metrics(self, result_file, split): """ Use official VQA evaluation script to report metrics. """ metrics = {} if split in self.ques_files and split in self.anno_files: vqa = VQA(self.anno_files[split], self.ques_files[split]) vqa_result = vqa.loadRes( resFile=result_file, quesFile=self.ques_files[split] ) # create vqaEval object by taking vqa and vqaRes # n is precision of accuracy (number of places after decimal), default is 2 vqa_scorer = VQAEval(vqa, vqa_result, n=2) logging.info("Start VQA evaluation.") vqa_scorer.evaluate() # print accuracies overall_acc = vqa_scorer.accuracy["overall"] metrics["agg_metrics"] = overall_acc logging.info("Overall Accuracy is: %.02f\n" % overall_acc) logging.info("Per Answer Type Accuracy is the following:") for ans_type in vqa_scorer.accuracy["perAnswerType"]: logging.info( "%s : %.02f" % (ans_type, vqa_scorer.accuracy["perAnswerType"][ans_type]) ) metrics[ans_type] = vqa_scorer.accuracy["perAnswerType"][ans_type] with open( os.path.join(registry.get_path("output_dir"), "evaluate.txt"), "a" ) as f: f.write(json.dumps(metrics) + "\n") return metrics @registry.register_task("gqa") class GQATask(VQATask): def valid_step(self, model, samples): answers = model.predict_answers( samples=samples, answer_list=self.answer_list, inference_method=self.inference_method, num_beams=self.num_beams, max_len=self.max_len, min_len=self.min_len, num_ans_candidates=self.num_ans_candidates, prompt=self.prompt, ) pred_qa_pairs = [] question_id = samples["question_id"] gt_answers = samples["answer"] for answer, ques_id, gt_answer in zip(answers, question_id, gt_answers): ques_id = int(ques_id.item()) pred_qa_pairs.append({"question_id": ques_id, "pred_ans": answer, "gt_ans": gt_answer}) return pred_qa_pairs @dist_utils.main_process def _report_metrics(self, result_file, split): """ TODO: add other evaluation metrics for GQA """ results = json.load(open(result_file, "r")) acc = [] vqa_tool = VQAEval() for res in results: if res["gt_ans"] is None: # prepare test results for leaderboard evaluation self._save_result_leaderboard(results) return gt_ans = res["gt_ans"] pred = res["pred_ans"] if self.inference_method == "generate": pred = vqa_tool.processPunctuation(pred) pred = vqa_tool.processDigitArticle(pred) vqa_acc = 1 if pred == gt_ans else 0 acc.append(vqa_acc) accuracy = sum(acc) / len(acc) * 100 metrics = {"agg_metrics": accuracy, "acc": accuracy} with open( os.path.join(registry.get_path("output_dir"), "evaluate.txt"), "a" ) as f: f.write(json.dumps(metrics) + "\n") logging.info(metrics) return metrics @registry.register_task("aok_vqa") class AOKVQATask(VQATask): def valid_step(self, model, samples): answers = model.predict_answers( samples=samples, answer_list=self.answer_list, inference_method=self.inference_method, num_beams=self.num_beams, max_len=self.max_len, min_len=self.min_len, num_ans_candidates=self.num_ans_candidates, ) pred_qa_pairs = [] question_id = samples["question_id"] gt_answers = samples["direct_answers"] for pred_answer, ques_id, gt_answer in zip(answers, question_id, gt_answers): pred_qa_pairs.append( {"question_id": ques_id, "pred_ans": pred_answer, "gt_ans": gt_answer} ) return pred_qa_pairs @dist_utils.main_process def _report_metrics(self, result_file, split): """ Implementing accuracy computation for AOKVQA, see https://github.com/allenai/aokvqa/blob/main/evaluation/eval_predictions.py#L45 for details. """ # TODO add evaluation for multi-choice results = json.load(open(result_file, "r")) acc = [] for res in results: if res["gt_ans"] is None: # prepare test results for leaderboard evaluation self._save_result_leaderboard(results) return pred = res["pred_ans"] gt_ans = res["gt_ans"] num_match = sum([pred == gt for gt in gt_ans]) vqa_acc = min(1.0, num_match / 3.0) acc.append(vqa_acc) accuracy = sum(acc) / len(acc) * 100 metrics = {"agg_metrics": accuracy, "acc": accuracy} with open( os.path.join(registry.get_path("output_dir"), "evaluate.txt"), "a" ) as f: f.write(json.dumps(metrics) + "\n") logging.info(metrics) return metrics @dist_utils.main_process def _save_result_leaderboard(self, results): """ Saving the results in the format required for leaderboard evaluation. [TODO] add support for multi-choice. """ result_leaderboard = dict() for res in results: result_leaderboard[res["question_id"]] = { "direct_answer": res["pred_ans"], "multiple_choice": "", } result_file = registry.get_path("result_dir") + "_leaderboard.json" with open(result_file, "w") as f: json.dump(result_leaderboard, f) logging.info(f"Saved results for leaderboard evaluation at {result_file}")
EXA-1-master
exa/libraries/LAVIS/lavis/tasks/vqa.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import logging import json import os import torch import torch.distributed as dist from itertools import chain import lavis.common.dist_utils as dist_utils from lavis.common.dist_utils import get_rank, get_world_size, is_main_process from lavis.common.registry import registry from lavis.common.vqa_tools.vqa_eval import VQAEval as VQATool from lavis.tasks.vqa import VQATask @registry.register_task("vqa_reading_comprehension") class VQARCTask(VQATask): def __init__( self, num_beams, max_len, min_len, evaluate, num_ans_candidates, inference_method="rank", **kwargs, ): super().__init__(num_beams, max_len, min_len, evaluate, num_ans_candidates, inference_method) self.config = kwargs.get('config') @classmethod def setup_task(cls, cfg): run_cfg = cfg.run_cfg num_beams = run_cfg.get("num_beams", 3) max_len = run_cfg.get("max_len", 10) min_len = run_cfg.get("min_len", 1) evaluate = run_cfg.get("evaluate", False) inference_method = run_cfg.get("inference_method", "rank") num_ans_candidates = run_cfg.get("num_ans_candidates", 128) return cls( num_beams=num_beams, max_len=max_len, min_len=min_len, evaluate=evaluate, num_ans_candidates=num_ans_candidates, inference_method=inference_method, config=run_cfg, ) def valid_step(self, model, samples): answers, captions, gradcams = model.predict_answers( samples=samples, inference_method=self.inference_method, num_beams=self.num_beams, max_len=self.max_len, min_len=self.min_len, internal_bsz_fid=self.config['internal_bsz_fid'], num_captions=self.config['num_captions'], num_captions_fid=self.config['num_captions_fid'], cap_max_length=self.config['cap_max_length'], cap_min_length=self.config['cap_min_length'], top_k=self.config['top_k'], top_p=self.config['top_p'], repetition_penalty=self.config['repetition_penalty'], num_patches=self.config['num_patches'], block_num=self.config['block_num'], ) pred_qa_pairs = [] sample_captions = [] sample_gradcams = [] question_id = samples["question_id"] for answer, caption, gradcam, ques_id in zip(answers, captions, gradcams, question_id): ques_id = int(ques_id.item()) pred_qa_pairs.append({"question_id": ques_id, "answer": answer}) sample_captions.append({"question_id": ques_id, "caption": caption}) sample_gradcams.append({"question_id": ques_id, "gradcam": gradcam}) return [sample_gradcams, sample_captions, pred_qa_pairs] def after_evaluation(self, val_result, split_name, **kwargs): result_ = list(chain(*val_result[0::3])) result_file = self.save_gradcam( result_, result_dir=registry.get_path("result_dir"), filename=f"{split_name}_gradcam_result", remove_duplicate="question_id", ) result_ = list(chain(*val_result[1::3])) result_file = self.save_result( result_, result_dir=registry.get_path("result_dir"), filename=f"{split_name}_caption_result", remove_duplicate="question_id", ) result_ = list(chain(*val_result[2::3])) result_file = self.save_result( result_, result_dir=registry.get_path("result_dir"), filename=f"{split_name}_vqa_result", remove_duplicate="question_id", ) metrics = self._report_metrics(result_file=result_file, split=split_name) return metrics def save_gradcam(self, result, result_dir, filename, remove_duplicate=""): result_file = os.path.join(result_dir, '%s_rank%d.pth' % (filename, get_rank())) final_result_file = os.path.join(result_dir, '%s.pth' % filename) torch.save({'result': result}, result_file) dist.barrier() if is_main_process(): logging.warning("rank %d starts merging results." % get_rank()) # combine results from all processes result = [] for rank in range(get_world_size()): result_file = os.path.join(result_dir, '%s_rank%d.pth' % (filename, rank)) res_ckpt = torch.load(result_file, map_location='cpu') res = res_ckpt['result'] result += res if remove_duplicate: result_new = [] id_list = [] for res in result: if res[remove_duplicate] not in id_list: id_list.append(res[remove_duplicate]) result_new.append(res) result = result_new torch.save({'result': result}, final_result_file) print("result file saved to %s" % final_result_file) return final_result_file @registry.register_task("gqa_reading_comprehension") class GQARCTask(VQARCTask): def valid_step(self, model, samples): answers, captions, gradcams = model.predict_answers( samples=samples, inference_method=self.inference_method, num_beams=self.num_beams, max_len=self.max_len, min_len=self.min_len, internal_bsz_fid=self.config['internal_bsz_fid'], num_captions=self.config['num_captions'], num_captions_fid=self.config['num_captions_fid'], cap_max_length=self.config['cap_max_length'], cap_min_length=self.config['cap_min_length'], top_k=self.config['top_k'], top_p=self.config['top_p'], repetition_penalty=self.config['repetition_penalty'], num_patches=self.config['num_patches'], block_num=self.config['block_num'], ) pred_qa_pairs = [] sample_captions = [] sample_gradcams = [] question_id = samples["question_id"] gt_answers = samples["answer"] for pred_answer, caption, gradcam, ques_id, gt_answer in zip(answers, captions, gradcams, question_id, gt_answers): ques_id = int(ques_id.item()) pred_qa_pairs.append({"question_id": ques_id, "pred_ans": pred_answer, "gt_ans": gt_answer}) sample_captions.append({"question_id": ques_id, "caption": caption}) sample_gradcams.append({"question_id": ques_id, "gradcam": gradcam}) return [sample_gradcams, sample_captions, pred_qa_pairs] @dist_utils.main_process def _report_metrics(self, result_file, split): """ TODO: add other evaluation metrics for GQA """ results = json.load(open(result_file, "r")) acc = [] vqa_tool = VQATool() for res in results: if res["gt_ans"] is None: # prepare test results for leaderboard evaluation self._save_result_leaderboard(results) return gt_ans = res["gt_ans"] pred = res["pred_ans"] if self.inference_method == "generate": pred = vqa_tool.processPunctuation(pred) pred = vqa_tool.processDigitArticle(pred) vqa_acc = 1 if pred == gt_ans else 0 acc.append(vqa_acc) accuracy = sum(acc) / len(acc) * 100 metrics = {"agg_metrics": accuracy, "acc": accuracy} with open( os.path.join(registry.get_path("output_dir"), "evaluate.txt"), "a" ) as f: f.write(json.dumps(metrics) + "\n") logging.info(metrics) return metrics @dist_utils.main_process def _save_result_leaderboard(self, results): """ Saving the results in the format required for leaderboard evaluation. """ result_leaderboard = [] for res in results: result_leaderboard.append({ "questionId": str(res['question_id']), "prediction": str(res["pred_ans"]), }) result_file = registry.get_path("result_dir") + "_leaderboard.json" with open(result_file, "w") as f: json.dump(result_leaderboard, f) logging.info(f"Saved results for leaderboard evaluation at {result_file}")
EXA-1-master
exa/libraries/LAVIS/lavis/tasks/vqa_reading_comprehension.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import json import os import logging import numpy as np import torch from lavis.common.dist_utils import main_process from lavis.common.registry import registry from lavis.tasks.base_task import BaseTask @registry.register_task("multimodal_classification") class MultimodalClassificationTask(BaseTask): def __init__(self): super().__init__() def valid_step(self, model, samples): results = [] outputs = model.predict(samples) predictions = outputs["predictions"] targets = outputs["targets"] predictions = predictions.max(1)[1].cpu().numpy() targets = targets.cpu().numpy() indices = samples[self.inst_id_key] for pred, tgt, index in zip(predictions, targets, indices): if isinstance(index, torch.Tensor): index = index.item() results.append( { self.inst_id_key: index, "prediction": pred.item(), "target": tgt.item(), } ) return results def after_evaluation(self, val_result, split_name, epoch, **kwargs): eval_result_file = self.save_result( result=val_result, result_dir=registry.get_path("result_dir"), filename="{}_epoch{}".format(split_name, epoch), remove_duplicate=self.inst_id_key, ) metrics = self._report_metrics( eval_result_file=eval_result_file, split_name=split_name ) return metrics @main_process def _report_metrics(self, eval_result_file, split_name): results = json.load(open(eval_result_file)) predictions = np.array([res["prediction"] for res in results]) targets = np.array([res["target"] for res in results]) accuracy = (targets == predictions).sum() / targets.shape[0] metrics = {"agg_metrics": accuracy, "acc": accuracy} log_stats = {split_name: {k: v for k, v in metrics.items()}} with open( os.path.join(registry.get_path("output_dir"), "evaluate.txt"), "a" ) as f: f.write(json.dumps(log_stats) + "\n") logging.info(metrics) return metrics
EXA-1-master
exa/libraries/LAVIS/lavis/tasks/multimodal_classification.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import gzip import logging import os import random as rnd import tarfile import zipfile import decord import webdataset as wds import numpy as np import torch from torch.utils.data.dataset import IterableDataset, ChainDataset from decord import VideoReader from lavis.common.registry import registry from lavis.datasets.datasets.base_dataset import ConcatDataset from tqdm import tqdm decord.bridge.set_bridge("torch") MAX_INT = registry.get("MAX_INT") def load_video(video_path, n_frms=MAX_INT, height=-1, width=-1, sampling="uniform"): vr = VideoReader(uri=video_path, height=height, width=width) vlen = len(vr) start, end = 0, vlen n_frms = min(n_frms, vlen) if sampling == "uniform": indices = np.arange(start, end, vlen / n_frms).astype(int) elif sampling == "headtail": indices_h = sorted(rnd.sample(range(vlen // 2), n_frms // 2)) indices_t = sorted(rnd.sample(range(vlen // 2, vlen), n_frms // 2)) indices = indices_h + indices_t else: raise NotImplementedError # get_batch -> T, H, W, C frms = vr.get_batch(indices).permute(3, 0, 1, 2).float() # (C, T, H, W) return frms def apply_to_sample(f, sample): if len(sample) == 0: return {} def _apply(x): if torch.is_tensor(x): return f(x) elif isinstance(x, dict): return {key: _apply(value) for key, value in x.items()} elif isinstance(x, list): return [_apply(x) for x in x] else: return x return _apply(sample) def move_to_cuda(sample): def _move_to_cuda(tensor): return tensor.cuda() return apply_to_sample(_move_to_cuda, sample) def prepare_sample(samples, cuda_enabled=True): if cuda_enabled: samples = move_to_cuda(samples) # TODO fp16 support return samples def reorg_datasets_by_split(datasets): """ Organizes datasets by split. Args: datasets: dict of torch.utils.data.Dataset objects by name. Returns: Dict of datasets by split {split_name: List[Datasets]}. """ # if len(datasets) == 1: # return datasets[list(datasets.keys())[0]] # else: reorg_datasets = dict() # reorganize by split for _, dataset in datasets.items(): for split_name, dataset_split in dataset.items(): if split_name not in reorg_datasets: reorg_datasets[split_name] = [dataset_split] else: reorg_datasets[split_name].append(dataset_split) return reorg_datasets def concat_datasets(datasets): """ Concatenates multiple datasets into a single dataset. It supports may-style datasets and DataPipeline from WebDataset. Currently, does not support generic IterableDataset because it requires creating separate samplers. Now only supports conctenating training datasets and assuming validation and testing have only a single dataset. This is because metrics should not be computed on the concatenated datasets. Args: datasets: dict of torch.utils.data.Dataset objects by split. Returns: Dict of concatenated datasets by split, "train" is the concatenation of multiple datasets, "val" and "test" remain the same. If the input training datasets contain both map-style and DataPipeline datasets, returns a tuple, where the first element is a concatenated map-style dataset and the second element is a chained DataPipeline dataset. """ # concatenate datasets in the same split for split_name in datasets: if split_name != "train": assert ( len(datasets[split_name]) == 1 ), "Do not support multiple {} datasets.".format(split_name) datasets[split_name] = datasets[split_name][0] else: iterable_datasets, map_datasets = [], [] for dataset in datasets[split_name]: if isinstance(dataset, wds.DataPipeline): logging.info( "Dataset {} is IterableDataset, can't be concatenated.".format( dataset ) ) iterable_datasets.append(dataset) elif isinstance(dataset, IterableDataset): raise NotImplementedError( "Do not support concatenation of generic IterableDataset." ) else: map_datasets.append(dataset) # if len(iterable_datasets) > 0: # concatenate map-style datasets and iterable-style datasets separately chained_datasets = ( ChainDataset(iterable_datasets) if len(iterable_datasets) > 0 else None ) concat_datasets = ( ConcatDataset(map_datasets) if len(map_datasets) > 0 else None ) train_datasets = concat_datasets, chained_datasets train_datasets = tuple([x for x in train_datasets if x is not None]) train_datasets = ( train_datasets[0] if len(train_datasets) == 1 else train_datasets ) datasets[split_name] = train_datasets return datasets def extract_archive(from_path, to_path=None, overwrite=False): """Extract archive. Args: from_path: the path of the archive. to_path: the root path of the extracted files (directory of from_path) overwrite: overwrite existing files (False) Returns: List of paths to extracted files even if not overwritten. Examples: >>> url = 'http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/validation.tar.gz' >>> from_path = './validation.tar.gz' >>> to_path = './' >>> torchtext.utils.download_from_url(url, from_path) >>> torchtext.utils.extract_archive(from_path, to_path) >>> ['.data/val.de', '.data/val.en'] >>> torchtext.utils.download_from_url(url, from_path) >>> torchtext.utils.extract_archive(from_path, to_path) >>> ['.data/val.de', '.data/val.en'] """ if to_path is None: to_path = os.path.dirname(from_path) if from_path.endswith((".tar.gz", ".tgz")): logging.info("Opening tar file {} to {}.".format(from_path, to_path)) with tarfile.open(from_path, "r") as tar: files = [] for file_ in tqdm(tar): file_path = os.path.join(to_path, file_.name) if file_.isfile(): files.append(file_path) if os.path.exists(file_path): logging.info("{} already extracted.".format(file_path)) if not overwrite: continue tar.extract(file_, to_path) logging.info("Finished extracting tar file {}.".format(from_path)) return files elif from_path.endswith(".zip"): assert zipfile.is_zipfile(from_path), from_path logging.info("Opening zip file {} to {}.".format(from_path, to_path)) with zipfile.ZipFile(from_path, "r") as zfile: files = [] for file_ in tqdm(zfile.namelist()): file_path = os.path.join(to_path, file_) files.append(file_path) if os.path.exists(file_path): logging.info("{} already extracted.".format(file_path)) if not overwrite: continue zfile.extract(file_, to_path) files = [f for f in files if os.path.isfile(f)] logging.info("Finished extracting zip file {}.".format(from_path)) return files elif from_path.endswith(".gz"): logging.info("Opening gz file {} to {}.".format(from_path, to_path)) default_block_size = 65536 filename = from_path[:-3] files = [filename] with gzip.open(from_path, "rb") as gzfile, open(filename, "wb") as d_file: while True: block = gzfile.read(default_block_size) if not block: break else: d_file.write(block) d_file.write(block) logging.info("Finished extracting gz file {}.".format(from_path)) return files else: raise NotImplementedError( "We currently only support tar.gz, .tgz, .gz and zip achives." ) def save_frames_grid(img_array, out_path): import torch from PIL import Image from torchvision.utils import make_grid if len(img_array.shape) == 3: img_array = img_array.unsqueeze(0) elif len(img_array.shape) == 5: b, t, c, h, w = img_array.shape img_array = img_array.view(-1, c, h, w) elif len(img_array.shape) == 4: pass else: raise NotImplementedError( "Supports only (b,t,c,h,w)-shaped inputs. First two dimensions can be ignored." ) assert img_array.shape[1] == 3, "Exepcting input shape of (H, W, 3), i.e. RGB-only." grid = make_grid(img_array) ndarr = grid.permute(1, 2, 0).to("cpu", torch.uint8).numpy() img = Image.fromarray(ndarr) img.save(out_path)
EXA-1-master
exa/libraries/LAVIS/lavis/datasets/data_utils.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import os from lavis.common.registry import registry from lavis.datasets.builders.base_dataset_builder import BaseDatasetBuilder from lavis.datasets.datasets.imagefolder_dataset import ImageFolderDataset @registry.register_builder("imagenet") class ImageNetBuilder(BaseDatasetBuilder): train_dataset_cls = ImageFolderDataset eval_dataset_cls = ImageFolderDataset DATASET_CONFIG_DICT = {"default": "configs/datasets/imagenet/defaults.yaml"} def _download_ann(self): pass def build(self): self.build_processors() build_info = self.config.build_info vis_info = build_info.get(self.data_type) datasets = dict() for split in build_info.splits: assert split in [ "train", "val", ], "Invalid split name {}, must be one of 'train', 'val' and 'test'." is_train = split == "train" vis_processor = ( self.vis_processors["train"] if is_train else self.vis_processors["eval"] ) vis_path = os.path.join(vis_info.storage, split) # create datasets dataset_cls = self.train_dataset_cls if is_train else self.eval_dataset_cls datasets[split] = dataset_cls( vis_processor=vis_processor, vis_root=vis_path, classnames=imagenet_classnames, ) return datasets imagenet_classnames = [ "tench", "goldfish", "great white shark", "tiger shark", "hammerhead shark", "electric ray", "stingray", "rooster", "hen", "ostrich", "brambling", "goldfinch", "house finch", "junco", "indigo bunting", "American robin", "bulbul", "jay", "magpie", "chickadee", "American dipper", "kite (bird of prey)", "bald eagle", "vulture", "great grey owl", "fire salamander", "smooth newt", "newt", "spotted salamander", "axolotl", "American bullfrog", "tree frog", "tailed frog", "loggerhead sea turtle", "leatherback sea turtle", "mud turtle", "terrapin", "box turtle", "banded gecko", "green iguana", "Carolina anole", "desert grassland whiptail lizard", "agama", "frilled-necked lizard", "alligator lizard", "Gila monster", "European green lizard", "chameleon", "Komodo dragon", "Nile crocodile", "American alligator", "triceratops", "worm snake", "ring-necked snake", "eastern hog-nosed snake", "smooth green snake", "kingsnake", "garter snake", "water snake", "vine snake", "night snake", "boa constrictor", "African rock python", "Indian cobra", "green mamba", "sea snake", "Saharan horned viper", "eastern diamondback rattlesnake", "sidewinder rattlesnake", "trilobite", "harvestman", "scorpion", "yellow garden spider", "barn spider", "European garden spider", "southern black widow", "tarantula", "wolf spider", "tick", "centipede", "black grouse", "ptarmigan", "ruffed grouse", "prairie grouse", "peafowl", "quail", "partridge", "african grey parrot", "macaw", "sulphur-crested cockatoo", "lorikeet", "coucal", "bee eater", "hornbill", "hummingbird", "jacamar", "toucan", "duck", "red-breasted merganser", "goose", "black swan", "tusker", "echidna", "platypus", "wallaby", "koala", "wombat", "jellyfish", "sea anemone", "brain coral", "flatworm", "nematode", "conch", "snail", "slug", "sea slug", "chiton", "chambered nautilus", "Dungeness crab", "rock crab", "fiddler crab", "red king crab", "American lobster", "spiny lobster", "crayfish", "hermit crab", "isopod", "white stork", "black stork", "spoonbill", "flamingo", "little blue heron", "great egret", "bittern bird", "crane bird", "limpkin", "common gallinule", "American coot", "bustard", "ruddy turnstone", "dunlin", "common redshank", "dowitcher", "oystercatcher", "pelican", "king penguin", "albatross", "grey whale", "killer whale", "dugong", "sea lion", "Chihuahua", "Japanese Chin", "Maltese", "Pekingese", "Shih Tzu", "King Charles Spaniel", "Papillon", "toy terrier", "Rhodesian Ridgeback", "Afghan Hound", "Basset Hound", "Beagle", "Bloodhound", "Bluetick Coonhound", "Black and Tan Coonhound", "Treeing Walker Coonhound", "English foxhound", "Redbone Coonhound", "borzoi", "Irish Wolfhound", "Italian Greyhound", "Whippet", "Ibizan Hound", "Norwegian Elkhound", "Otterhound", "Saluki", "Scottish Deerhound", "Weimaraner", "Staffordshire Bull Terrier", "American Staffordshire Terrier", "Bedlington Terrier", "Border Terrier", "Kerry Blue Terrier", "Irish Terrier", "Norfolk Terrier", "Norwich Terrier", "Yorkshire Terrier", "Wire Fox Terrier", "Lakeland Terrier", "Sealyham Terrier", "Airedale Terrier", "Cairn Terrier", "Australian Terrier", "Dandie Dinmont Terrier", "Boston Terrier", "Miniature Schnauzer", "Giant Schnauzer", "Standard Schnauzer", "Scottish Terrier", "Tibetan Terrier", "Australian Silky Terrier", "Soft-coated Wheaten Terrier", "West Highland White Terrier", "Lhasa Apso", "Flat-Coated Retriever", "Curly-coated Retriever", "Golden Retriever", "Labrador Retriever", "Chesapeake Bay Retriever", "German Shorthaired Pointer", "Vizsla", "English Setter", "Irish Setter", "Gordon Setter", "Brittany dog", "Clumber Spaniel", "English Springer Spaniel", "Welsh Springer Spaniel", "Cocker Spaniel", "Sussex Spaniel", "Irish Water Spaniel", "Kuvasz", "Schipperke", "Groenendael dog", "Malinois", "Briard", "Australian Kelpie", "Komondor", "Old English Sheepdog", "Shetland Sheepdog", "collie", "Border Collie", "Bouvier des Flandres dog", "Rottweiler", "German Shepherd Dog", "Dobermann", "Miniature Pinscher", "Greater Swiss Mountain Dog", "Bernese Mountain Dog", "Appenzeller Sennenhund", "Entlebucher Sennenhund", "Boxer", "Bullmastiff", "Tibetan Mastiff", "French Bulldog", "Great Dane", "St. Bernard", "husky", "Alaskan Malamute", "Siberian Husky", "Dalmatian", "Affenpinscher", "Basenji", "pug", "Leonberger", "Newfoundland dog", "Great Pyrenees dog", "Samoyed", "Pomeranian", "Chow Chow", "Keeshond", "brussels griffon", "Pembroke Welsh Corgi", "Cardigan Welsh Corgi", "Toy Poodle", "Miniature Poodle", "Standard Poodle", "Mexican hairless dog (xoloitzcuintli)", "grey wolf", "Alaskan tundra wolf", "red wolf or maned wolf", "coyote", "dingo", "dhole", "African wild dog", "hyena", "red fox", "kit fox", "Arctic fox", "grey fox", "tabby cat", "tiger cat", "Persian cat", "Siamese cat", "Egyptian Mau", "cougar", "lynx", "leopard", "snow leopard", "jaguar", "lion", "tiger", "cheetah", "brown bear", "American black bear", "polar bear", "sloth bear", "mongoose", "meerkat", "tiger beetle", "ladybug", "ground beetle", "longhorn beetle", "leaf beetle", "dung beetle", "rhinoceros beetle", "weevil", "fly", "bee", "ant", "grasshopper", "cricket insect", "stick insect", "cockroach", "praying mantis", "cicada", "leafhopper", "lacewing", "dragonfly", "damselfly", "red admiral butterfly", "ringlet butterfly", "monarch butterfly", "small white butterfly", "sulphur butterfly", "gossamer-winged butterfly", "starfish", "sea urchin", "sea cucumber", "cottontail rabbit", "hare", "Angora rabbit", "hamster", "porcupine", "fox squirrel", "marmot", "beaver", "guinea pig", "common sorrel horse", "zebra", "pig", "wild boar", "warthog", "hippopotamus", "ox", "water buffalo", "bison", "ram (adult male sheep)", "bighorn sheep", "Alpine ibex", "hartebeest", "impala (antelope)", "gazelle", "arabian camel", "llama", "weasel", "mink", "European polecat", "black-footed ferret", "otter", "skunk", "badger", "armadillo", "three-toed sloth", "orangutan", "gorilla", "chimpanzee", "gibbon", "siamang", "guenon", "patas monkey", "baboon", "macaque", "langur", "black-and-white colobus", "proboscis monkey", "marmoset", "white-headed capuchin", "howler monkey", "titi monkey", "Geoffroy's spider monkey", "common squirrel monkey", "ring-tailed lemur", "indri", "Asian elephant", "African bush elephant", "red panda", "giant panda", "snoek fish", "eel", "silver salmon", "rock beauty fish", "clownfish", "sturgeon", "gar fish", "lionfish", "pufferfish", "abacus", "abaya", "academic gown", "accordion", "acoustic guitar", "aircraft carrier", "airliner", "airship", "altar", "ambulance", "amphibious vehicle", "analog clock", "apiary", "apron", "trash can", "assault rifle", "backpack", "bakery", "balance beam", "balloon", "ballpoint pen", "Band-Aid", "banjo", "baluster / handrail", "barbell", "barber chair", "barbershop", "barn", "barometer", "barrel", "wheelbarrow", "baseball", "basketball", "bassinet", "bassoon", "swimming cap", "bath towel", "bathtub", "station wagon", "lighthouse", "beaker", "military hat (bearskin or shako)", "beer bottle", "beer glass", "bell tower", "baby bib", "tandem bicycle", "bikini", "ring binder", "binoculars", "birdhouse", "boathouse", "bobsleigh", "bolo tie", "poke bonnet", "bookcase", "bookstore", "bottle cap", "hunting bow", "bow tie", "brass memorial plaque", "bra", "breakwater", "breastplate", "broom", "bucket", "buckle", "bulletproof vest", "high-speed train", "butcher shop", "taxicab", "cauldron", "candle", "cannon", "canoe", "can opener", "cardigan", "car mirror", "carousel", "tool kit", "cardboard box / carton", "car wheel", "automated teller machine", "cassette", "cassette player", "castle", "catamaran", "CD player", "cello", "mobile phone", "chain", "chain-link fence", "chain mail", "chainsaw", "storage chest", "chiffonier", "bell or wind chime", "china cabinet", "Christmas stocking", "church", "movie theater", "cleaver", "cliff dwelling", "cloak", "clogs", "cocktail shaker", "coffee mug", "coffeemaker", "spiral or coil", "combination lock", "computer keyboard", "candy store", "container ship", "convertible", "corkscrew", "cornet", "cowboy boot", "cowboy hat", "cradle", "construction crane", "crash helmet", "crate", "infant bed", "Crock Pot", "croquet ball", "crutch", "cuirass", "dam", "desk", "desktop computer", "rotary dial telephone", "diaper", "digital clock", "digital watch", "dining table", "dishcloth", "dishwasher", "disc brake", "dock", "dog sled", "dome", "doormat", "drilling rig", "drum", "drumstick", "dumbbell", "Dutch oven", "electric fan", "electric guitar", "electric locomotive", "entertainment center", "envelope", "espresso machine", "face powder", "feather boa", "filing cabinet", "fireboat", "fire truck", "fire screen", "flagpole", "flute", "folding chair", "football helmet", "forklift", "fountain", "fountain pen", "four-poster bed", "freight car", "French horn", "frying pan", "fur coat", "garbage truck", "gas mask or respirator", "gas pump", "goblet", "go-kart", "golf ball", "golf cart", "gondola", "gong", "gown", "grand piano", "greenhouse", "radiator grille", "grocery store", "guillotine", "hair clip", "hair spray", "half-track", "hammer", "hamper", "hair dryer", "hand-held computer", "handkerchief", "hard disk drive", "harmonica", "harp", "combine harvester", "hatchet", "holster", "home theater", "honeycomb", "hook", "hoop skirt", "gymnastic horizontal bar", "horse-drawn vehicle", "hourglass", "iPod", "clothes iron", "carved pumpkin", "jeans", "jeep", "T-shirt", "jigsaw puzzle", "rickshaw", "joystick", "kimono", "knee pad", "knot", "lab coat", "ladle", "lampshade", "laptop computer", "lawn mower", "lens cap", "letter opener", "library", "lifeboat", "lighter", "limousine", "ocean liner", "lipstick", "slip-on shoe", "lotion", "music speaker", "loupe magnifying glass", "sawmill", "magnetic compass", "messenger bag", "mailbox", "tights", "one-piece bathing suit", "manhole cover", "maraca", "marimba", "mask", "matchstick", "maypole", "maze", "measuring cup", "medicine cabinet", "megalith", "microphone", "microwave oven", "military uniform", "milk can", "minibus", "miniskirt", "minivan", "missile", "mitten", "mixing bowl", "mobile home", "ford model t", "modem", "monastery", "monitor", "moped", "mortar and pestle", "graduation cap", "mosque", "mosquito net", "vespa", "mountain bike", "tent", "computer mouse", "mousetrap", "moving van", "muzzle", "metal nail", "neck brace", "necklace", "baby pacifier", "notebook computer", "obelisk", "oboe", "ocarina", "odometer", "oil filter", "pipe organ", "oscilloscope", "overskirt", "bullock cart", "oxygen mask", "product packet / packaging", "paddle", "paddle wheel", "padlock", "paintbrush", "pajamas", "palace", "pan flute", "paper towel", "parachute", "parallel bars", "park bench", "parking meter", "railroad car", "patio", "payphone", "pedestal", "pencil case", "pencil sharpener", "perfume", "Petri dish", "photocopier", "plectrum", "Pickelhaube", "picket fence", "pickup truck", "pier", "piggy bank", "pill bottle", "pillow", "ping-pong ball", "pinwheel", "pirate ship", "drink pitcher", "block plane", "planetarium", "plastic bag", "plate rack", "farm plow", "plunger", "Polaroid camera", "pole", "police van", "poncho", "pool table", "soda bottle", "plant pot", "potter's wheel", "power drill", "prayer rug", "printer", "prison", "missile", "projector", "hockey puck", "punching bag", "purse", "quill", "quilt", "race car", "racket", "radiator", "radio", "radio telescope", "rain barrel", "recreational vehicle", "fishing casting reel", "reflex camera", "refrigerator", "remote control", "restaurant", "revolver", "rifle", "rocking chair", "rotisserie", "eraser", "rugby ball", "ruler measuring stick", "sneaker", "safe", "safety pin", "salt shaker", "sandal", "sarong", "saxophone", "scabbard", "weighing scale", "school bus", "schooner", "scoreboard", "CRT monitor", "screw", "screwdriver", "seat belt", "sewing machine", "shield", "shoe store", "shoji screen / room divider", "shopping basket", "shopping cart", "shovel", "shower cap", "shower curtain", "ski", "balaclava ski mask", "sleeping bag", "slide rule", "sliding door", "slot machine", "snorkel", "snowmobile", "snowplow", "soap dispenser", "soccer ball", "sock", "solar thermal collector", "sombrero", "soup bowl", "keyboard space bar", "space heater", "space shuttle", "spatula", "motorboat", "spider web", "spindle", "sports car", "spotlight", "stage", "steam locomotive", "through arch bridge", "steel drum", "stethoscope", "scarf", "stone wall", "stopwatch", "stove", "strainer", "tram", "stretcher", "couch", "stupa", "submarine", "suit", "sundial", "sunglasses", "sunglasses", "sunscreen", "suspension bridge", "mop", "sweatshirt", "swim trunks / shorts", "swing", "electrical switch", "syringe", "table lamp", "tank", "tape player", "teapot", "teddy bear", "television", "tennis ball", "thatched roof", "front curtain", "thimble", "threshing machine", "throne", "tile roof", "toaster", "tobacco shop", "toilet seat", "torch", "totem pole", "tow truck", "toy store", "tractor", "semi-trailer truck", "tray", "trench coat", "tricycle", "trimaran", "tripod", "triumphal arch", "trolleybus", "trombone", "hot tub", "turnstile", "typewriter keyboard", "umbrella", "unicycle", "upright piano", "vacuum cleaner", "vase", "vaulted or arched ceiling", "velvet fabric", "vending machine", "vestment", "viaduct", "violin", "volleyball", "waffle iron", "wall clock", "wallet", "wardrobe", "military aircraft", "sink", "washing machine", "water bottle", "water jug", "water tower", "whiskey jug", "whistle", "hair wig", "window screen", "window shade", "Windsor tie", "wine bottle", "airplane wing", "wok", "wooden spoon", "wool", "split-rail fence", "shipwreck", "sailboat", "yurt", "website", "comic book", "crossword", "traffic or street sign", "traffic light", "dust jacket", "menu", "plate", "guacamole", "consomme", "hot pot", "trifle", "ice cream", "popsicle", "baguette", "bagel", "pretzel", "cheeseburger", "hot dog", "mashed potatoes", "cabbage", "broccoli", "cauliflower", "zucchini", "spaghetti squash", "acorn squash", "butternut squash", "cucumber", "artichoke", "bell pepper", "cardoon", "mushroom", "Granny Smith apple", "strawberry", "orange", "lemon", "fig", "pineapple", "banana", "jackfruit", "cherimoya (custard apple)", "pomegranate", "hay", "carbonara", "chocolate syrup", "dough", "meatloaf", "pizza", "pot pie", "burrito", "red wine", "espresso", "tea cup", "eggnog", "mountain", "bubble", "cliff", "coral reef", "geyser", "lakeshore", "promontory", "sandbar", "beach", "valley", "volcano", "baseball player", "bridegroom", "scuba diver", "rapeseed", "daisy", "yellow lady's slipper", "corn", "acorn", "rose hip", "horse chestnut seed", "coral fungus", "agaric", "gyromitra", "stinkhorn mushroom", "earth star fungus", "hen of the woods mushroom", "bolete", "corn cob", "toilet paper", ]
EXA-1-master
exa/libraries/LAVIS/lavis/datasets/builders/imagefolder_builder.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import logging import os import shutil import warnings import lavis.common.utils as utils import torch.distributed as dist from lavis.common.dist_utils import is_dist_avail_and_initialized, is_main_process from lavis.common.registry import registry from lavis.datasets.data_utils import extract_archive from lavis.processors.base_processor import BaseProcessor from omegaconf import OmegaConf from torchvision.datasets.utils import download_url class BaseDatasetBuilder: train_dataset_cls, eval_dataset_cls = None, None def __init__(self, cfg=None): super().__init__() if cfg is None: # help to create datasets from default config. self.config = load_dataset_config(self.default_config_path()) elif isinstance(cfg, str): self.config = load_dataset_config(cfg) else: # when called from task.build_dataset() self.config = cfg self.data_type = self.config.data_type self.vis_processors = {"train": BaseProcessor(), "eval": BaseProcessor()} self.text_processors = {"train": BaseProcessor(), "eval": BaseProcessor()} def build_datasets(self): # download, split, etc... # only called on 1 GPU/TPU in distributed if is_main_process(): self._download_data() if is_dist_avail_and_initialized(): dist.barrier() # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") datasets = self.build() # dataset['train'/'val'/'test'] return datasets def build_processors(self): vis_proc_cfg = self.config.get("vis_processor") txt_proc_cfg = self.config.get("text_processor") if vis_proc_cfg is not None: vis_train_cfg = vis_proc_cfg.get("train") vis_eval_cfg = vis_proc_cfg.get("eval") self.vis_processors["train"] = self._build_proc_from_cfg(vis_train_cfg) self.vis_processors["eval"] = self._build_proc_from_cfg(vis_eval_cfg) if txt_proc_cfg is not None: txt_train_cfg = txt_proc_cfg.get("train") txt_eval_cfg = txt_proc_cfg.get("eval") self.text_processors["train"] = self._build_proc_from_cfg(txt_train_cfg) self.text_processors["eval"] = self._build_proc_from_cfg(txt_eval_cfg) @staticmethod def _build_proc_from_cfg(cfg): return ( registry.get_processor_class(cfg.name).from_config(cfg) if cfg is not None else None ) @classmethod def default_config_path(cls, type="default"): return utils.get_abs_path(cls.DATASET_CONFIG_DICT[type]) def _download_data(self): self._download_ann() self._download_vis() def _download_ann(self): """ Download annotation files if necessary. All the vision-language datasets should have annotations of unified format. storage_path can be: (1) relative/absolute: will be prefixed with env.cache_root to make full path if relative. (2) basename/dirname: will be suffixed with base name of URL if dirname is provided. Local annotation paths should be relative. """ anns = self.config.build_info.annotations splits = anns.keys() cache_root = registry.get_path("cache_root") for split in splits: info = anns[split] urls, storage_paths = info.get("url", None), info.storage if isinstance(urls, str): urls = [urls] if isinstance(storage_paths, str): storage_paths = [storage_paths] assert len(urls) == len(storage_paths) for url_or_filename, storage_path in zip(urls, storage_paths): # if storage_path is relative, make it full by prefixing with cache_root. if not os.path.isabs(storage_path): storage_path = os.path.join(cache_root, storage_path) dirname = os.path.dirname(storage_path) if not os.path.exists(dirname): os.makedirs(dirname) if os.path.isfile(url_or_filename): src, dst = url_or_filename, storage_path if not os.path.exists(dst): shutil.copyfile(src=src, dst=dst) else: logging.info("Using existing file {}.".format(dst)) else: if os.path.isdir(storage_path): # if only dirname is provided, suffix with basename of URL. raise ValueError( "Expecting storage_path to be a file path, got directory {}".format( storage_path ) ) else: filename = os.path.basename(storage_path) download_url(url=url_or_filename, root=dirname, filename=filename) def _download_vis(self): storage_path = self.config.build_info.get(self.data_type).storage storage_path = utils.get_cache_path(storage_path) if not os.path.exists(storage_path): warnings.warn( f""" The specified path {storage_path} for visual inputs does not exist. Please provide a correct path to the visual inputs or refer to datasets/download_scripts/README.md for downloading instructions. """ ) def build(self): """ Create by split datasets inheriting torch.utils.data.Datasets. # build() can be dataset-specific. Overwrite to customize. """ self.build_processors() build_info = self.config.build_info ann_info = build_info.annotations vis_info = build_info.get(self.data_type) datasets = dict() for split in ann_info.keys(): if split not in ["train", "val", "test"]: continue is_train = split == "train" # processors vis_processor = ( self.vis_processors["train"] if is_train else self.vis_processors["eval"] ) text_processor = ( self.text_processors["train"] if is_train else self.text_processors["eval"] ) # annotation path ann_paths = ann_info.get(split).storage if isinstance(ann_paths, str): ann_paths = [ann_paths] abs_ann_paths = [] for ann_path in ann_paths: if not os.path.isabs(ann_path): ann_path = utils.get_cache_path(ann_path) abs_ann_paths.append(ann_path) ann_paths = abs_ann_paths # visual data storage path vis_path = vis_info.storage if not os.path.isabs(vis_path): # vis_path = os.path.join(utils.get_cache_path(), vis_path) vis_path = utils.get_cache_path(vis_path) if not os.path.exists(vis_path): warnings.warn("storage path {} does not exist.".format(vis_path)) # create datasets dataset_cls = self.train_dataset_cls if is_train else self.eval_dataset_cls datasets[split] = dataset_cls( vis_processor=vis_processor, text_processor=text_processor, ann_paths=ann_paths, vis_root=vis_path, ) return datasets def load_dataset_config(cfg_path): cfg = OmegaConf.load(cfg_path).datasets cfg = cfg[list(cfg.keys())[0]] return cfg
EXA-1-master
exa/libraries/LAVIS/lavis/datasets/builders/base_dataset_builder.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ from lavis.common.registry import registry from lavis.common.utils import get_cache_path from lavis.datasets.builders.base_dataset_builder import BaseDatasetBuilder from lavis.datasets.datasets.video_vqa_datasets import VideoQADataset class VideoQABuilder(BaseDatasetBuilder): train_dataset_cls = VideoQADataset eval_dataset_cls = VideoQADataset def build(self): datasets = super().build() ans2label = self.config.build_info.annotations.get("ans2label") if ans2label is None: raise ValueError("ans2label is not specified in build_info.") ans2label = get_cache_path(ans2label.storage) for split in datasets: datasets[split]._build_class_labels(ans2label) return datasets @registry.register_builder("msrvtt_qa") class MSRVTTQABuilder(VideoQABuilder): DATASET_CONFIG_DICT = { "default": "configs/datasets/msrvtt/defaults_qa.yaml", } @registry.register_builder("msvd_qa") class MSVDQABuilder(VideoQABuilder): DATASET_CONFIG_DICT = { "default": "configs/datasets/msvd/defaults_qa.yaml", }
EXA-1-master
exa/libraries/LAVIS/lavis/datasets/builders/video_qa_builder.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ from lavis.datasets.builders.base_dataset_builder import load_dataset_config from lavis.datasets.builders.caption_builder import ( COCOCapBuilder, MSRVTTCapBuilder, MSVDCapBuilder, VATEXCapBuilder, ) from lavis.datasets.builders.image_text_pair_builder import ( ConceptualCaption12MBuilder, ConceptualCaption3MBuilder, VGCaptionBuilder, SBUCaptionBuilder, ) from lavis.datasets.builders.classification_builder import ( NLVRBuilder, SNLIVisualEntailmentBuilder, ) from lavis.datasets.builders.imagefolder_builder import ImageNetBuilder from lavis.datasets.builders.video_qa_builder import MSRVTTQABuilder, MSVDQABuilder from lavis.datasets.builders.vqa_builder import ( COCOVQABuilder, OKVQABuilder, VGVQABuilder, GQABuilder, ) from lavis.datasets.builders.retrieval_builder import ( MSRVTTRetrievalBuilder, DiDeMoRetrievalBuilder, COCORetrievalBuilder, Flickr30kBuilder, ) from lavis.datasets.builders.dialogue_builder import AVSDDialBuilder from lavis.common.registry import registry __all__ = [ "COCOCapBuilder", "COCORetrievalBuilder", "COCOVQABuilder", "ConceptualCaption12MBuilder", "ConceptualCaption3MBuilder", "DiDeMoRetrievalBuilder", "Flickr30kBuilder", "GQABuilder", "ImageNetBuilder", "MSRVTTCapBuilder", "MSRVTTQABuilder", "MSRVTTRetrievalBuilder", "MSVDCapBuilder", "MSVDQABuilder", "NLVRBuilder", "OKVQABuilder", "SBUCaptionBuilder", "SNLIVisualEntailmentBuilder", "VATEXCapBuilder", "VGCaptionBuilder", "VGVQABuilder", "AVSDDialBuilder", ] def load_dataset(name, cfg_path=None, vis_path=None, data_type=None): """ Example >>> dataset = load_dataset("coco_caption", cfg=None) >>> splits = dataset.keys() >>> print([len(dataset[split]) for split in splits]) """ if cfg_path is None: cfg = None else: cfg = load_dataset_config(cfg_path) try: builder = registry.get_builder_class(name)(cfg) except TypeError: print( f"Dataset {name} not found. Available datasets:\n" + ", ".join([str(k) for k in dataset_zoo.get_names()]) ) exit(1) if vis_path is not None: if data_type is None: # use default data type in the config data_type = builder.config.data_type assert ( data_type in builder.config.build_info ), f"Invalid data_type {data_type} for {name}." builder.config.build_info.get(data_type).storage = vis_path dataset = builder.build_datasets() return dataset class DatasetZoo: def __init__(self) -> None: self.dataset_zoo = { k: list(v.DATASET_CONFIG_DICT.keys()) for k, v in sorted(registry.mapping["builder_name_mapping"].items()) } def get_names(self): return list(self.dataset_zoo.keys()) dataset_zoo = DatasetZoo()
EXA-1-master
exa/libraries/LAVIS/lavis/datasets/builders/__init__.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ from lavis.datasets.builders.base_dataset_builder import BaseDatasetBuilder from lavis.datasets.datasets.retrieval_datasets import ( RetrievalDataset, RetrievalEvalDataset, VideoRetrievalDataset, VideoRetrievalEvalDataset, ) from lavis.common.registry import registry @registry.register_builder("msrvtt_retrieval") class MSRVTTRetrievalBuilder(BaseDatasetBuilder): train_dataset_cls = VideoRetrievalDataset eval_dataset_cls = VideoRetrievalEvalDataset DATASET_CONFIG_DICT = {"default": "configs/datasets/msrvtt/defaults_ret.yaml"} @registry.register_builder("didemo_retrieval") class DiDeMoRetrievalBuilder(BaseDatasetBuilder): train_dataset_cls = VideoRetrievalDataset eval_dataset_cls = VideoRetrievalEvalDataset DATASET_CONFIG_DICT = {"default": "configs/datasets/didemo/defaults_ret.yaml"} @registry.register_builder("coco_retrieval") class COCORetrievalBuilder(BaseDatasetBuilder): train_dataset_cls = RetrievalDataset eval_dataset_cls = RetrievalEvalDataset DATASET_CONFIG_DICT = {"default": "configs/datasets/coco/defaults_ret.yaml"} @registry.register_builder("flickr30k") class Flickr30kBuilder(BaseDatasetBuilder): train_dataset_cls = RetrievalDataset eval_dataset_cls = RetrievalEvalDataset DATASET_CONFIG_DICT = {"default": "configs/datasets/flickr30k/defaults.yaml"}
EXA-1-master
exa/libraries/LAVIS/lavis/datasets/builders/retrieval_builder.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ from lavis.datasets.builders.base_dataset_builder import BaseDatasetBuilder from lavis.common.registry import registry from lavis.datasets.datasets.aok_vqa_datasets import AOKVQADataset, AOKVQAEvalDataset from lavis.datasets.datasets.coco_vqa_datasets import COCOVQADataset, COCOVQAEvalDataset from lavis.datasets.datasets.vg_vqa_datasets import VGVQADataset from lavis.datasets.datasets.gqa_datasets import GQADataset, GQAEvalDataset @registry.register_builder("coco_vqa") class COCOVQABuilder(BaseDatasetBuilder): train_dataset_cls = COCOVQADataset eval_dataset_cls = COCOVQAEvalDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco/defaults_vqa.yaml", "eval": "configs/datasets/coco/eval_vqa.yaml", } @registry.register_builder("vg_vqa") class VGVQABuilder(BaseDatasetBuilder): train_dataset_cls = VGVQADataset DATASET_CONFIG_DICT = {"default": "configs/datasets/vg/defaults_vqa.yaml"} @registry.register_builder("ok_vqa") class OKVQABuilder(COCOVQABuilder): DATASET_CONFIG_DICT = { "default": "configs/datasets/okvqa/defaults.yaml", } @registry.register_builder("aok_vqa") class AOKVQABuilder(BaseDatasetBuilder): train_dataset_cls = AOKVQADataset eval_dataset_cls = AOKVQAEvalDataset DATASET_CONFIG_DICT = {"default": "configs/datasets/aokvqa/defaults.yaml"} @registry.register_builder("gqa") class GQABuilder(BaseDatasetBuilder): train_dataset_cls = GQADataset eval_dataset_cls = GQAEvalDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/gqa/defaults.yaml", "balanced_val": "configs/datasets/gqa/balanced_val.yaml", "balanced_testdev": "configs/datasets/gqa/balanced_testdev.yaml", }
EXA-1-master
exa/libraries/LAVIS/lavis/datasets/builders/vqa_builder.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ from lavis.common.registry import registry from lavis.datasets.builders.base_dataset_builder import BaseDatasetBuilder from lavis.datasets.datasets.avsd_dialogue_datasets import ( AVSDDialDataset, AVSDDialEvalDataset, ) @registry.register_builder("avsd_dialogue") class AVSDDialBuilder(BaseDatasetBuilder): train_dataset_cls = AVSDDialDataset eval_dataset_cls = AVSDDialEvalDataset DATASET_CONFIG_DICT = {"default": "configs/datasets/avsd/defaults_dial.yaml"}
EXA-1-master
exa/libraries/LAVIS/lavis/datasets/builders/dialogue_builder.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import os from lavis.common.registry import registry from lavis.datasets.builders.base_dataset_builder import BaseDatasetBuilder from lavis.datasets.datasets.image_text_pair_datasets import ImageTextPairDataset from lavis.datasets.datasets.laion_dataset import LaionDataset @registry.register_builder("conceptual_caption_3m") class ConceptualCaption3MBuilder(BaseDatasetBuilder): train_dataset_cls = ImageTextPairDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/conceptual_caption/defaults_3m.yaml" } @registry.register_builder("conceptual_caption_12m") class ConceptualCaption12MBuilder(BaseDatasetBuilder): train_dataset_cls = ImageTextPairDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/conceptual_caption/defaults_12m.yaml" } @registry.register_builder("sbu_caption") class SBUCaptionBuilder(BaseDatasetBuilder): train_dataset_cls = ImageTextPairDataset DATASET_CONFIG_DICT = {"default": "configs/datasets/sbu_caption/defaults.yaml"} @registry.register_builder("vg_caption") class VGCaptionBuilder(BaseDatasetBuilder): train_dataset_cls = ImageTextPairDataset DATASET_CONFIG_DICT = {"default": "configs/datasets/vg/defaults_caption.yaml"} @registry.register_builder("laion2B_multi") class Laion2BMultiBuilder(BaseDatasetBuilder): train_dataset_cls = LaionDataset DATASET_CONFIG_DICT = {"default": "configs/datasets/laion/defaults_2B_multi.yaml"} def _download_ann(self): pass def _download_vis(self): pass def build(self): self.build_processors() build_info = self.config.build_info datasets = dict() split = "train" # laion dataset only has train split # create datasets # [NOTE] return inner_datasets (wds.DataPipeline) dataset_cls = self.train_dataset_cls datasets[split] = dataset_cls( vis_processor=self.vis_processors[split], text_processor=self.text_processors[split], location=build_info.storage, ).inner_dataset return datasets
EXA-1-master
exa/libraries/LAVIS/lavis/datasets/builders/image_text_pair_builder.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ from lavis.datasets.builders.base_dataset_builder import BaseDatasetBuilder from lavis.datasets.datasets.coco_caption_datasets import ( COCOCapDataset, COCOCapEvalDataset, NoCapsEvalDataset, ) from lavis.common.registry import registry from lavis.datasets.datasets.video_caption_datasets import ( VideoCaptionDataset, VideoCaptionEvalDataset, ) @registry.register_builder("coco_caption") class COCOCapBuilder(BaseDatasetBuilder): train_dataset_cls = COCOCapDataset eval_dataset_cls = COCOCapEvalDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/coco/defaults_cap.yaml", } @registry.register_builder("nocaps") class COCOCapBuilder(BaseDatasetBuilder): eval_dataset_cls = NoCapsEvalDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/nocaps/defaults.yaml", } @registry.register_builder("msrvtt_caption") class MSRVTTCapBuilder(BaseDatasetBuilder): train_dataset_cls = VideoCaptionDataset eval_dataset_cls = VideoCaptionEvalDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/msrvtt/defaults_cap.yaml", } @registry.register_builder("msvd_caption") class MSVDCapBuilder(BaseDatasetBuilder): train_dataset_cls = VideoCaptionDataset eval_dataset_cls = VideoCaptionEvalDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/msvd/defaults_cap.yaml", } @registry.register_builder("vatex_caption") class VATEXCapBuilder(BaseDatasetBuilder): train_dataset_cls = VideoCaptionDataset eval_dataset_cls = VideoCaptionEvalDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/vatex/defaults_cap.yaml", }
EXA-1-master
exa/libraries/LAVIS/lavis/datasets/builders/caption_builder.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ from lavis.common.registry import registry from lavis.datasets.builders.base_dataset_builder import BaseDatasetBuilder from lavis.datasets.datasets.nlvr_datasets import NLVRDataset, NLVREvalDataset from lavis.datasets.datasets.snli_ve_datasets import SNLIVisualEntialmentDataset @registry.register_builder("nlvr") class NLVRBuilder(BaseDatasetBuilder): train_dataset_cls = NLVRDataset eval_dataset_cls = NLVREvalDataset DATASET_CONFIG_DICT = {"default": "configs/datasets/nlvr/defaults.yaml"} @registry.register_builder("snli_ve") class SNLIVisualEntailmentBuilder(BaseDatasetBuilder): train_dataset_cls = SNLIVisualEntialmentDataset eval_dataset_cls = SNLIVisualEntialmentDataset DATASET_CONFIG_DICT = {"default": "configs/datasets/snli_ve/defaults.yaml"}
EXA-1-master
exa/libraries/LAVIS/lavis/datasets/builders/classification_builder.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import os from collections import OrderedDict from lavis.datasets.datasets.base_dataset import BaseDataset from PIL import Image from torchvision import datasets class ImageFolderDataset(BaseDataset): def __init__(self, vis_processor, vis_root, classnames=[], **kwargs): super().__init__(vis_processor=vis_processor, vis_root=vis_root) self.inner_dataset = datasets.ImageFolder(vis_root) self.annotation = [ {"image": elem[0], "label": elem[1], "image_id": elem[0]} for elem in self.inner_dataset.imgs ] self.classnames = classnames self._add_instance_ids() def __len__(self): return len(self.inner_dataset) def __getitem__(self, index): ann = self.annotation[index] img_fn = ann["image"] image_path = os.path.join(self.vis_root, img_fn) image = Image.open(image_path).convert("RGB") image = self.vis_processor(image) return { "image": image, "label": ann["label"], "image_id": ann["image_id"], "instance_id": ann["instance_id"], } def displ_item(self, index): sample, ann = self.__getitem__(index), self.annotation[index] return OrderedDict( { "file": ann["image"], "label": self.classnames[ann["label"]], "image": sample["image"], } )
EXA-1-master
exa/libraries/LAVIS/lavis/datasets/datasets/imagefolder_dataset.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import webdataset as wds from lavis.datasets.datasets.base_dataset import BaseDataset class LaionDataset(BaseDataset): def __init__(self, vis_processor, text_processor, location): super().__init__(vis_processor=vis_processor, text_processor=text_processor) self.inner_dataset = wds.DataPipeline( wds.ResampledShards(location), wds.tarfile_to_samples(handler=wds.warn_and_continue), wds.shuffle(1000, handler=wds.warn_and_continue), wds.decode("pilrgb", handler=wds.warn_and_continue), wds.to_tuple("jpg", "json", handler=wds.warn_and_continue), wds.map_tuple(self.vis_processor, handler=wds.warn_and_continue), wds.map(self.to_dict, handler=wds.warn_and_continue), ) def to_dict(self, sample): return { "image": sample[0], "text_input": self.text_processor(sample[1]["caption"]), } if __name__ == "__main__": from torchvision import transforms def to_image_text_pair(sample): return sample[0], sample[1]["caption"] normalize = transforms.Normalize( (0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711) ) transform_train = transforms.Compose( [ transforms.RandomResizedCrop(256, scale=(0.2, 1.0)), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize, ] ) dataset = LaionDataset( vis_processor=transform_train, text_processor=lambda x: x, location="/export/laion/laion2B-multi/part-00000/{00000..01743}.tar", ) import torch loader = torch.utils.data.DataLoader(dataset.inner_dataset, batch_size=2) print(next(iter(loader))["text_input"])
EXA-1-master
exa/libraries/LAVIS/lavis/datasets/datasets/laion_dataset.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import json from typing import Iterable from torch.utils.data import Dataset, ConcatDataset from torch.utils.data.dataloader import default_collate class BaseDataset(Dataset): def __init__( self, vis_processor=None, text_processor=None, vis_root=None, ann_paths=[] ): """ vis_root (string): Root directory of images (e.g. coco/images/) ann_root (string): directory to store the annotation file """ self.vis_root = vis_root self.annotation = [] for ann_path in ann_paths: self.annotation.extend(json.load(open(ann_path, "r"))) self.vis_processor = vis_processor self.text_processor = text_processor self._add_instance_ids() def __len__(self): return len(self.annotation) def collater(self, samples): return default_collate(samples) def set_processors(self, vis_processor, text_processor): self.vis_processor = vis_processor self.text_processor = text_processor def _add_instance_ids(self, key="instance_id"): for idx, ann in enumerate(self.annotation): ann[key] = str(idx) class ConcatDataset(ConcatDataset): def __init__(self, datasets: Iterable[Dataset]) -> None: super().__init__(datasets) def collater(self, samples): # TODO For now only supports datasets with same underlying collater implementations all_keys = set() for s in samples: all_keys.update(s) shared_keys = all_keys for s in samples: shared_keys = shared_keys & set(s.keys()) samples_shared_keys = [] for s in samples: samples_shared_keys.append({k: s[k] for k in s.keys() if k in shared_keys}) return self.datasets[0].collater(samples_shared_keys)
EXA-1-master
exa/libraries/LAVIS/lavis/datasets/datasets/base_dataset.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import os import random from collections import OrderedDict from lavis.datasets.datasets.multimodal_classification_datasets import ( MultimodalClassificationDataset, ) from PIL import Image class __DisplMixin: def displ_item(self, index): sample, ann = self.__getitem__(index), self.annotation[index] return OrderedDict( { "file_L": ann["images"][0], "file_R": ann["images"][1], "sentence": ann["sentence"], "label": ann["label"], "image": [sample["image0"], sample["image1"]], } ) class NLVRDataset(MultimodalClassificationDataset, __DisplMixin): def __init__(self, vis_processor, text_processor, vis_root, ann_paths): super().__init__(vis_processor, text_processor, vis_root, ann_paths) self.class_labels = self._build_class_labels() def _build_class_labels(self): return {"False": 0, "True": 1} @staticmethod def _flip(samples): sentence = samples["text_input"] image0, image1 = samples["image0"], samples["image1"] if "left" not in sentence and "right" not in sentence: if random.random() < 0.5: image0, image1 = image1, image0 else: if random.random() < 0.5: sentence = sentence.replace("left", "[TEMP_TOKEN]") sentence = sentence.replace("right", "left") sentence = sentence.replace("[TEMP_TOKEN]", "right") image0, image1 = image1, image0 samples["text_input"] = sentence samples["image0"] = image0 samples["image1"] = image1 return samples def __getitem__(self, index): ann = self.annotation[index] image0_path = os.path.join(self.vis_root, ann["images"][0]) image0 = Image.open(image0_path).convert("RGB") image0 = self.vis_processor(image0) image1_path = os.path.join(self.vis_root, ann["images"][1]) image1 = Image.open(image1_path).convert("RGB") image1 = self.vis_processor(image1) sentence = self.text_processor(ann["sentence"]) label = self.class_labels[ann["label"]] return self._flip( { "image0": image0, "image1": image1, "text_input": sentence, "label": label, # "image_id": ann["image_id"], "instance_id": ann["instance_id"], } ) class NLVREvalDataset(NLVRDataset): @staticmethod def _flip(samples): return samples
EXA-1-master
exa/libraries/LAVIS/lavis/datasets/datasets/nlvr_datasets.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import time import random import torch from lavis.datasets.data_utils import move_to_cuda from torch.utils.data import DataLoader class MultiIterLoader: """ A simple wrapper for iterating over multiple iterators. Args: loaders (List[Loader]): List of Iterator loaders. ratios (List[float]): List of ratios to sample from each loader. If None, all loaders are sampled uniformly. """ def __init__(self, loaders, ratios=None): # assert all loaders has __next__ method for loader in loaders: assert hasattr( loader, "__next__" ), "Loader {} has no __next__ method.".format(loader) if ratios is None: ratios = [1.0] * len(loaders) else: assert len(ratios) == len(loaders) ratios = [float(ratio) / sum(ratios) for ratio in ratios] self.loaders = loaders self.ratios = ratios def __next__(self): # random sample from each loader by ratio loader_idx = random.choices(range(len(self.loaders)), self.ratios, k=1)[0] return next(self.loaders[loader_idx]) class PrefetchLoader(object): """ Modified from https://github.com/ChenRocks/UNITER. overlap compute and cuda data transfer (copied and then modified from nvidia apex) """ def __init__(self, loader): self.loader = loader self.stream = torch.cuda.Stream() def __iter__(self): loader_it = iter(self.loader) self.preload(loader_it) batch = self.next(loader_it) while batch is not None: is_tuple = isinstance(batch, tuple) if is_tuple: task, batch = batch if is_tuple: yield task, batch else: yield batch batch = self.next(loader_it) def __len__(self): return len(self.loader) def preload(self, it): try: self.batch = next(it) except StopIteration: self.batch = None return # if record_stream() doesn't work, another option is to make sure # device inputs are created on the main stream. # self.next_input_gpu = torch.empty_like(self.next_input, # device='cuda') # self.next_target_gpu = torch.empty_like(self.next_target, # device='cuda') # Need to make sure the memory allocated for next_* is not still in use # by the main stream at the time we start copying to next_*: # self.stream.wait_stream(torch.cuda.current_stream()) with torch.cuda.stream(self.stream): self.batch = move_to_cuda(self.batch) # more code for the alternative if record_stream() doesn't work: # copy_ will record the use of the pinned source tensor in this # side stream. # self.next_input_gpu.copy_(self.next_input, non_blocking=True) # self.next_target_gpu.copy_(self.next_target, non_blocking=True) # self.next_input = self.next_input_gpu # self.next_target = self.next_target_gpu def next(self, it): torch.cuda.current_stream().wait_stream(self.stream) batch = self.batch if batch is not None: record_cuda_stream(batch) self.preload(it) return batch def __getattr__(self, name): method = self.loader.__getattribute__(name) return method def record_cuda_stream(batch): if isinstance(batch, torch.Tensor): batch.record_stream(torch.cuda.current_stream()) elif isinstance(batch, list) or isinstance(batch, tuple): for t in batch: record_cuda_stream(t) elif isinstance(batch, dict): for t in batch.values(): record_cuda_stream(t) else: pass class IterLoader: """ A wrapper to convert DataLoader as an infinite iterator. Modified from: https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/iter_based_runner.py """ def __init__(self, dataloader: DataLoader, use_distributed: bool = False): self._dataloader = dataloader self.iter_loader = iter(self._dataloader) self._use_distributed = use_distributed self._epoch = 0 @property def epoch(self) -> int: return self._epoch def __next__(self): try: data = next(self.iter_loader) except StopIteration: self._epoch += 1 if hasattr(self._dataloader.sampler, "set_epoch") and self._use_distributed: self._dataloader.sampler.set_epoch(self._epoch) time.sleep(2) # Prevent possible deadlock during epoch transition self.iter_loader = iter(self._dataloader) data = next(self.iter_loader) return data def __iter__(self): return self def __len__(self): return len(self._dataloader)
EXA-1-master
exa/libraries/LAVIS/lavis/datasets/datasets/dataloader_utils.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import os from PIL import Image from lavis.datasets.datasets.vqa_datasets import VQADataset class VGVQADataset(VQADataset): def __init__(self, vis_processor, text_processor, vis_root, ann_paths): super().__init__(vis_processor, text_processor, vis_root, ann_paths) def __getitem__(self, index): ann = self.annotation[index] image_path = os.path.join(self.vis_root, ann["image"]) image = Image.open(image_path).convert("RGB") image = self.vis_processor(image) question = self.text_processor(ann["question"]) answers = [ann["answer"]] # TODO this should be configured better weights = [0.2] return { "image": image, "text_input": question, "answers": answers, "weights": weights, }
EXA-1-master
exa/libraries/LAVIS/lavis/datasets/datasets/vg_vqa_datasets.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ from abc import abstractmethod from lavis.datasets.datasets.base_dataset import BaseDataset class MultimodalClassificationDataset(BaseDataset): def __init__(self, vis_processor, text_processor, vis_root, ann_paths): super().__init__(vis_processor, text_processor, vis_root, ann_paths) self.class_labels = None @abstractmethod def _build_class_labels(self): pass
EXA-1-master
exa/libraries/LAVIS/lavis/datasets/datasets/multimodal_classification_datasets.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import os from collections import OrderedDict from lavis.datasets.datasets.base_dataset import BaseDataset from PIL import Image class __DisplMixin: def displ_item(self, index): sample, ann = self.__getitem__(index), self.annotation[index] return OrderedDict( { "file": os.path.basename(ann["image"]), "caption": ann["caption"], "image": sample["image"], } ) class ImageTextPairDataset(BaseDataset, __DisplMixin): def __init__(self, vis_processor, text_processor, vis_root, ann_paths): """ vis_root (string): Root directory of images (e.g. coco/images/) ann_root (string): directory to store the annotation file """ super().__init__(vis_processor, text_processor, vis_root, ann_paths) def __getitem__(self, index): # TODO this assumes image input, not general enough ann = self.annotation[index] image_path = os.path.join(self.vis_root, ann["image"]) image = Image.open(image_path).convert("RGB") image = self.vis_processor(image) caption = self.text_processor(ann["caption"]) return {"image": image, "text_input": caption}
EXA-1-master
exa/libraries/LAVIS/lavis/datasets/datasets/image_text_pair_datasets.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import os from lavis.datasets.datasets.base_dataset import BaseDataset from lavis.datasets.datasets.caption_datasets import CaptionDataset class VideoCaptionDataset(CaptionDataset): def __init__(self, vis_processor, text_processor, vis_root, ann_paths): """ vis_root (string): Root directory of images (e.g. coco/images/) ann_root (string): directory to store the annotation file split (string): val or test """ super().__init__(vis_processor, text_processor, vis_root, ann_paths) def __getitem__(self, index): ann = self.annotation[index] vname = ann["video"] video_path = os.path.join(self.vis_root, vname) video = self.vis_processor(video_path) caption = self.text_processor(ann["caption"]) # "image_id" is kept to stay compatible with the COCO evaluation format return { "video": video, "text_input": caption, "image_id": self.img_ids[ann["image_id"]], } class VideoCaptionEvalDataset(BaseDataset): def __init__(self, vis_processor, text_processor, vis_root, ann_paths): """ vis_root (string): Root directory of images (e.g. coco/images/) ann_root (string): directory to store the annotation file split (string): val or test """ super().__init__(vis_processor, text_processor, vis_root, ann_paths) def __getitem__(self, index): ann = self.annotation[index] vname = ann["video"] video_path = os.path.join(self.vis_root, vname) video = self.vis_processor(video_path) return { "video": video, "image_id": ann["image_id"], "instance_id": ann["instance_id"], }
EXA-1-master
exa/libraries/LAVIS/lavis/datasets/datasets/video_caption_datasets.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import json import os from collections import OrderedDict from lavis.datasets.datasets.multimodal_classification_datasets import ( MultimodalClassificationDataset, ) class __DisplMixin: def displ_item(self, index): ann = self.annotation[index] vname = ann["video"] vpath = os.path.join(self.vis_root, vname) return OrderedDict( {"file": vpath, "question": ann["question"], "answer": ann["answer"]} ) class VideoQADataset(MultimodalClassificationDataset, __DisplMixin): def __init__(self, vis_processor, text_processor, vis_root, ann_paths): super().__init__(vis_processor, text_processor, vis_root, ann_paths) def _build_class_labels(self, ans_path): ans2label = json.load(open(ans_path)) self.class_labels = ans2label def _get_answer_label(self, answer): if answer in self.class_labels: return self.class_labels[answer] else: return len(self.class_labels) def __getitem__(self, index): assert ( self.class_labels ), f"class_labels of {__class__.__name__} is not built yet." ann = self.annotation[index] vname = ann["video"] vpath = os.path.join(self.vis_root, vname) frms = self.vis_processor(vpath) question = self.text_processor(ann["question"]) return { "video": frms, "text_input": question, "answers": self._get_answer_label(ann["answer"]), "question_id": ann["question_id"], "instance_id": ann["instance_id"], }
EXA-1-master
exa/libraries/LAVIS/lavis/datasets/datasets/video_vqa_datasets.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import os from collections import OrderedDict from PIL import Image from lavis.datasets.datasets.base_dataset import BaseDataset import json import copy class __DisplMixin: def displ_item(self, index): sample, ann = self.__getitem__(index), self.annotation[index] return OrderedDict( { "file": ann["image"], "dialogue": ann["dialogue"], "image": sample["image"], } ) class DialogueDataset(BaseDataset, __DisplMixin): def __init__(self, vis_processor, text_processor, vis_root, ann_paths): """ vis_root (string): Root directory of images (e.g. coco/images/) ann_root (string): directory to store the annotation file """ self.vis_root = vis_root self.annotation = [] for ann_path in ann_paths: dialogs = json.load(open(ann_path, "r"))["dialogs"] for dialog in dialogs: all_turns = dialog["dialog"] dialogue_context = [] for turn in all_turns: dialog_instance = copy.deepcopy(dialog) question = turn["question"] answer = turn["answer"] dialog_instance["dialog"] = copy.deepcopy(dialogue_context) dialog_instance["question"] = question dialog_instance["answer"] = answer self.annotation.append(dialog_instance) dialogue_context.append(turn) self.vis_processor = vis_processor self.text_processor = text_processor self._add_instance_ids() self.img_ids = {} n = 0 for ann in self.annotation: img_id = ann["image_id"] if img_id not in self.img_ids.keys(): self.img_ids[img_id] = n n += 1 def __getitem__(self, index): ann = self.annotation[index] image_path = os.path.join(self.vis_root, ann["image"]) image = Image.open(image_path).convert("RGB") image = self.vis_processor(image) caption = self.text_processor(ann["caption"]) return { "image": image, "text_input": caption, "image_id": self.img_ids[ann["image_id"]], } class DialogueEvalDataset(BaseDataset, __DisplMixin): def __init__(self, vis_processor, text_processor, vis_root, ann_paths): """ vis_root (string): Root directory of images (e.g. coco/images/) ann_root (string): directory to store the annotation file split (string): val or test """ self.vis_root = vis_root self.annotation = [] for ann_path in ann_paths: dialogs = json.load(open(ann_path, "r"))["dialogs"] for dialog in dialogs: all_turns = dialog["dialog"] dialogue_context = all_turns[:-1] last_turn = all_turns[-1] question = last_turn["question"] answer = last_turn["answer"] dialog["dialog"] = dialogue_context dialog["question"] = question dialog["answer"] = answer self.annotation.append(dialog) self.vis_processor = vis_processor self.text_processor = text_processor self._add_instance_ids() self.img_ids = {} n = 0 for ann in self.annotation: img_id = ann["image_id"] if img_id not in self.img_ids.keys(): self.img_ids[img_id] = n n += 1 def __getitem__(self, index): ann = self.annotation[index] image_path = os.path.join(self.vis_root, ann["image"]) image = Image.open(image_path).convert("RGB") image = self.vis_processor(image) return { "image": image, "image_id": ann["image_id"], "instance_id": ann["instance_id"], }
EXA-1-master
exa/libraries/LAVIS/lavis/datasets/datasets/dialogue_datasets.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import os import json from PIL import Image from PIL import ImageFile ImageFile.LOAD_TRUNCATED_IMAGES = True from lavis.datasets.datasets.caption_datasets import CaptionDataset, CaptionEvalDataset COCOCapDataset = CaptionDataset class COCOCapEvalDataset(CaptionEvalDataset): def __init__(self, vis_processor, text_processor, vis_root, ann_paths): """ vis_root (string): Root directory of images (e.g. coco/images/) ann_root (string): directory to store the annotation file split (string): val or test """ super().__init__(vis_processor, text_processor, vis_root, ann_paths) def __getitem__(self, index): ann = self.annotation[index] image_path = os.path.join(self.vis_root, ann["image"]) image = Image.open(image_path).convert("RGB") image = self.vis_processor(image) img_id = ann["image"].split("/")[-1].strip(".jpg").split("_")[-1] return { "image": image, "image_id": img_id, "instance_id": ann["instance_id"], } class NoCapsEvalDataset(CaptionEvalDataset): def __init__(self, vis_processor, text_processor, vis_root, ann_paths): """ vis_root (string): Root directory of images (e.g. coco/images/) ann_root (string): directory to store the annotation file split (string): val or test """ super().__init__(vis_processor, text_processor, vis_root, ann_paths) def __getitem__(self, index): ann = self.annotation[index] image_path = os.path.join(self.vis_root, ann["image"]) image = Image.open(image_path).convert("RGB") image = self.vis_processor(image) img_id = ann["img_id"] return { "image": image, "image_id": img_id, "instance_id": ann["instance_id"], }
EXA-1-master
exa/libraries/LAVIS/lavis/datasets/datasets/coco_caption_datasets.py