text
stringlengths 96
319k
| id
stringlengths 14
178
| metadata
dict |
---|---|---|
# Copyright 2024-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import warnings
from dataclasses import asdict
from enum import Enum
from typing import Optional
import torch
import torch.nn as nn
from tqdm import tqdm
from transformers.pytorch_utils import Conv1D
from peft.tuners.tuners_utils import BaseTuner, BaseTunerLayer, check_target_module_exists
from peft.utils import TRANSFORMERS_MODELS_TO_VBLORA_TARGET_MODULES_MAPPING, ModulesToSaveWrapper, _get_submodules
from .config import VBLoRAConfig
from .layer import Linear, VBLoRALayer
class VBLoRAModel(BaseTuner):
"""
Creates VBLoRA model from a pretrained transformers model.
The method is described in detail in https://arxiv.org/abs/2405.15179.
Args:
model ([`~transformers.PreTrainedModel`]): The model to be adapted.
config ([`VBLoRAConfig`]): The configuration of the VBLoRA model.
adapter_name (`str`): The name of the adapter, defaults to `"default"`.
low_cpu_mem_usage (`bool`, `optional`, defaults to `False`):
Create empty adapter weights on meta device. Useful to speed up the loading process.
Returns:
`torch.nn.Module`: The VBLoRA model.
Example:
```py
>>> from transformers import AutoModelForCausalLM
>>> from peft import VBLoRAConfig, get_peft_model
>>> base_model = AutoModelForCausalLM.from_pretrained("facebook/opt-125m")
>>> config = VBLoRAConfig(
... task_type="SEQ_CLS",
... r=4,
... target_modules=["fc1", "fc2", "k_proj", "out_proj", "q_proj", "v_proj"],
... num_vectors=60,
... vector_length=256,
... save_only_topk_weights=True,
... )
>>> model = get_peft_model(base_model, config)
```
**Attributes**:
- **model** ([`~transformers.PreTrainedModel`]) -- The model to be adapted.
- **peft_config** ([`VBLoRAConfig`]): The configuration of the VBLoRAConfig model.
"""
prefix: str = "vblora_"
def __init__(self, model, config, adapter_name, low_cpu_mem_usage: bool = False) -> None:
super().__init__(model, config, adapter_name, low_cpu_mem_usage=low_cpu_mem_usage)
def _init_vblora_vector_bank(self, config: VBLoRAConfig, adapter_name: str) -> None:
vblora_vector_bank = torch.zeros(config.num_vectors, config.vector_length)
torch.nn.init.uniform_(vblora_vector_bank, -config.init_vector_bank_bound, config.init_vector_bank_bound)
self.vblora_vector_bank[adapter_name] = vblora_vector_bank
def _pre_injection_hook(self, model: nn.Module, config: VBLoRAConfig, adapter_name: str) -> None:
self.vblora_vector_bank = nn.ParameterDict({})
def _check_new_adapter_config(self, config: VBLoRAConfig) -> None:
"""
A helper method to check the config when a new adapter is being added.
Raise a ValueError if there is something wrong with the config or if it conflicts with existing adapters.
"""
# the below todo is copied from LoRA
# TODO: there should be a check if any of the existing adapters actually has bias != "none", or else the check
# does not fully correspond to the error message.
if (len(self.peft_config) > 1) and (config.bias != "none"):
raise ValueError(
f"{self.__class__.__name__} supports only 1 adapter with bias. When using multiple adapters, "
"set bias to 'none' for all adapters."
)
@staticmethod
def _check_target_module_exists(vblora_config, key):
return check_target_module_exists(vblora_config, key)
def _create_and_replace(
self,
vblora_config,
adapter_name,
target,
target_name,
parent,
current_key,
):
if current_key is None:
raise ValueError("Current Key shouldn't be `None`")
bias = hasattr(target, "bias") and target.bias is not None
kwargs = {
"fan_in_fan_out": vblora_config.fan_in_fan_out,
"bias": bias,
}
self._init_vblora_vector_bank(vblora_config, adapter_name)
# TODO: add quantization support
if isinstance(target, Linear):
target.update_layer(
adapter_name=adapter_name,
vblora_vector_bank=self.vblora_vector_bank,
r=vblora_config.r,
topk=vblora_config.topk,
num_vectors=vblora_config.num_vectors,
vector_length=vblora_config.vector_length,
vblora_dropout=vblora_config.vblora_dropout,
init_logits_std=vblora_config.init_logits_std,
)
else:
new_module = self._create_new_module(
vblora_config=vblora_config,
vblora_vector_bank=self.vblora_vector_bank,
adapter_name=adapter_name,
target=target,
**kwargs,
)
if adapter_name not in self.active_adapter:
# adding an additional adapter: it is not automatically trainable
new_module.requires_grad_(False)
self._replace_module(parent, target_name, new_module, target)
@staticmethod
def _replace_module(parent, child_name, new_module, child):
setattr(parent, child_name, new_module)
# It's not necessary to set requires_grad here, as that is handled by
# _mark_only_adapters_as_trainable
# child layer wraps the original module, unpack it
if hasattr(child, "base_layer"):
child = child.base_layer
if not hasattr(new_module, "base_layer"):
new_module.weight = child.weight
if hasattr(child, "bias"):
new_module.bias = child.bias
if getattr(child, "state", None) is not None:
if hasattr(new_module, "base_layer"):
new_module.base_layer.state = child.state
else:
new_module.state = child.state
new_module.to(child.weight.device)
meta = torch.device("meta")
# dispatch to correct device
for name, module in new_module.named_modules():
if "vblora_" in name:
if not any(p.device == meta for p in module.parameters()):
module.to(child.weight.device)
def _mark_only_adapters_as_trainable(self, model: nn.Module) -> None:
for n, p in model.named_parameters():
if self.prefix not in n:
p.requires_grad = False
for active_adapter in self.active_adapters:
bias = self.peft_config[active_adapter].bias
if bias == "none":
continue
if bias == "all":
for n, p in model.named_parameters():
if "bias" in n:
p.requires_grad = True
elif bias == "vblora_only":
for m in model.modules():
if isinstance(m, VBLoRALayer) and hasattr(m, "bias") and m.bias is not None:
m.bias.requires_grad = True
else:
raise NotImplementedError(f"Requested bias: {bias}, is not implemented.")
@staticmethod
def _create_new_module(vblora_config, vblora_vector_bank, adapter_name, target, **kwargs):
if isinstance(target, BaseTunerLayer):
target_base_layer = target.get_base_layer()
else:
target_base_layer = target
if isinstance(target_base_layer, torch.nn.Linear):
if kwargs["fan_in_fan_out"]:
warnings.warn(
"fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. "
"Setting fan_in_fan_out to False."
)
kwargs["fan_in_fan_out"] = vblora_config.fan_in_fan_out = False
elif isinstance(target_base_layer, Conv1D):
kwargs["is_target_conv_1d_layer"] = True
if not kwargs["fan_in_fan_out"]:
warnings.warn(
"fan_in_fan_out is set to False but the target module is `Conv1D`. Setting fan_in_fan_out to True."
)
kwargs["fan_in_fan_out"] = vblora_config.fan_in_fan_out = True
else:
raise ValueError(
f"Target module {target} is not supported. Currently, only the following modules are supported: "
"`torch.nn.Linear`, `transformers.pytorch_utils.Conv1D`."
)
new_module = Linear(
base_layer=target,
vblora_vector_bank=vblora_vector_bank,
adapter_name=adapter_name,
r=vblora_config.r,
num_vectors=vblora_config.num_vectors,
vector_length=vblora_config.vector_length,
topk=vblora_config.topk,
vblora_dropout=vblora_config.vblora_dropout,
init_logits_std=vblora_config.init_logits_std,
**kwargs,
)
return new_module
def __getattr__(self, name: str):
"""Forward missing attributes to the wrapped module."""
try:
return super().__getattr__(name) # defer to nn.Module's logic
except AttributeError:
if name == "model": # see #1892: prevent infinite recursion if class is not initialized
raise
return getattr(self.model, name)
def get_peft_config_as_dict(self, inference: bool = False):
config_dict = {}
for key, value in self.peft_config.items():
config = {k: v.value if isinstance(v, Enum) else v for k, v in asdict(value).items()}
if inference:
config["inference_mode"] = True
config_dict[key] = config
return config
def _set_adapter_layers(self, enabled: bool = True) -> None:
for module in self.model.modules():
if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)):
module.enable_adapters(enabled)
def enable_adapter_layers(self) -> None:
"""Enable all adapters.
Call this if you have previously disabled all adapters and want to re-enable them.
"""
self._set_adapter_layers(enabled=True)
def disable_adapter_layers(self) -> None:
"""Disable all adapters.
When disabling all adapters, the model output corresponds to the output of the base model.
"""
for active_adapter in self.active_adapters:
val = self.peft_config[active_adapter].bias
if val != "none":
msg = (
f"Careful, disabling adapter layers with bias configured to be '{val}' does not produce the same "
"output as the the base model would without adaption."
)
warnings.warn(msg)
self._set_adapter_layers(enabled=False)
def set_adapter(self, adapter_name: str | list[str]) -> None:
"""Set the active adapter(s).
Additionally, this function will set the specified adapters to trainable (i.e., requires_grad=True). If this is
not desired, use the following code.
```py
>>> for name, param in model_peft.named_parameters():
... if ...: # some check on name (ex. if 'lora' in name)
... param.requires_grad = False
```
Args:
adapter_name (`str` or `list[str]`): Name of the adapter(s) to be activated.
"""
for module in self.model.modules():
if isinstance(module, VBLoRALayer):
if module.merged:
warnings.warn("Adapter cannot be set when the model is merged. Unmerging the model first.")
module.unmerge()
module.set_adapter(adapter_name)
self.active_adapter = adapter_name
@staticmethod
def _prepare_adapter_config(peft_config, model_config):
if peft_config.target_modules is None:
if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_VBLORA_TARGET_MODULES_MAPPING:
raise ValueError("Please specify `target_modules` in `peft_config`")
peft_config.target_modules = set(
TRANSFORMERS_MODELS_TO_VBLORA_TARGET_MODULES_MAPPING[model_config["model_type"]]
)
return peft_config
def _unload_and_optionally_merge(
self,
merge=True,
progressbar: bool = False,
safe_merge: bool = False,
adapter_names: Optional[list[str]] = None,
):
key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key]
desc = "Unloading " + ("and merging " if merge else "") + "model"
for key in tqdm(key_list, disable=not progressbar, desc=desc):
try:
parent, target, target_name = _get_submodules(self.model, key)
except AttributeError:
continue
if hasattr(target, "base_layer"):
if merge:
target.merge(safe_merge=safe_merge, adapter_names=adapter_names)
self._replace_module(parent, target_name, target.get_base_layer(), target)
elif isinstance(target, ModulesToSaveWrapper):
# save any additional trainable modules part of `modules_to_save`
setattr(parent, target_name, target.modules_to_save[target.active_adapter])
return self.model
def delete_adapter(self, adapter_name: str) -> None:
"""
Deletes an existing adapter.
Args:
adapter_name (str): Name of the adapter to be deleted.
"""
if adapter_name not in list(self.peft_config.keys()):
raise ValueError(f"Adapter {adapter_name} does not exist")
del self.peft_config[adapter_name]
key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key]
new_adapter = None
for key in key_list:
_, target, _ = _get_submodules(self.model, key)
if isinstance(target, VBLoRALayer):
target.delete_adapter(adapter_name)
if new_adapter is None:
new_adapter = target.active_adapter[:]
self.active_adapter = new_adapter or []
def merge_and_unload(
self, progressbar: bool = False, safe_merge: bool = False, adapter_names: Optional[list[str]] = None
) -> torch.nn.Module:
r"""
This method merges the VBLoRA layers into the base model. This is needed if someone wants to use the base model
as a standalone model.
Args:
progressbar (`bool`):
whether to show a progressbar indicating the unload and merge process
safe_merge (`bool`):
whether to activate the safe merging check to check if there is any potential Nan in the adapter
weights
adapter_names (`list[str]`, *optional*):
The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults
to `None`.
Example:
```py
>>> from transformers import AutoModelForCausalLM
>>> from peft import PeftModel
>>> base_model = AutoModelForCausalLM.from_pretrained("tiiuae/falcon-40b")
>>> peft_model_id = "smangrul/falcon-40B-int4-peft-lora-sfttrainer-sample"
>>> model = PeftModel.from_pretrained(base_model, peft_model_id)
>>> merged_model = model.merge_and_unload()
```
"""
return self._unload_and_optionally_merge(
progressbar=progressbar, safe_merge=safe_merge, adapter_names=adapter_names
)
def unload(self):
"""
Gets back the base model by removing all the VBLoRA modules without merging. This gives back the original base
model.
"""
return self._unload_and_optionally_merge(merge=False)
def get_nb_savable_parameters(self, adapter="default") -> tuple[int, int]:
r"""
Returns the number of savable VB-LoRA parameters and other savable parameters.
"""
logits_params = 0
vector_bank_params = 0
other_params = 0
for name, param in self.named_parameters():
if "vblora_logits" in name:
logits_params += param.numel()
elif "vblora_vector_bank" in name:
vector_bank_params += param.numel()
elif param.requires_grad:
other_params += param.numel()
if self.peft_config[adapter].save_only_topk_weights:
num_vectors = self.peft_config[adapter].num_vectors
factor = 1 # factor to count float32-equivalent parameters
if num_vectors < 2**8:
factor = 0.25
elif num_vectors < 2**15:
factor = 0.5
elif num_vectors < 2**31:
factor = 1
else:
factor = 2
topk_weight_params = (
logits_params / self.peft_config[adapter].num_vectors * (self.peft_config[adapter].topk - 1)
)
topk_indices_params = (
logits_params / self.peft_config[adapter].num_vectors * self.peft_config[adapter].topk * factor
)
vblora_params = int(vector_bank_params + topk_weight_params + topk_indices_params)
else:
vblora_params = vector_bank_params + logits_params
return vblora_params, other_params
def print_savable_parameters(self) -> None:
r"""
Prints the number of savable VB-LoRA parameters and total savable parameters.
"""
vblora_params, other_params = self.get_nb_savable_parameters()
print(
f"VB-LoRA params to-be-saved (float32-equivalent): {vblora_params:,d} "
f"|| total params to-be-saved: {(vblora_params + other_params):,d}"
)
| peft/src/peft/tuners/vblora/model.py/0 | {
"file_path": "peft/src/peft/tuners/vblora/model.py",
"repo_id": "peft",
"token_count": 8470
} |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Reference code: https://github.com/yxli2123/LoftQ/blob/main/utils.py
# Reference paper: https://arxiv.org/abs/2310.08659
from __future__ import annotations
import logging
import os
from typing import Callable, Optional, Union
import torch
from huggingface_hub import snapshot_download
from huggingface_hub.errors import HFValidationError, LocalEntryNotFoundError
from safetensors import SafetensorError, safe_open
from transformers.utils import cached_file
from transformers.utils.hub import get_checkpoint_shard_files
from peft.import_utils import is_bnb_4bit_available, is_bnb_available
class NFQuantizer:
def __init__(self, num_bits=2, device="cuda", method="normal", block_size=64, *args, **kwargs):
super().__init__(*args, **kwargs)
self.num_bits = num_bits
self.device = device
self.method = method
self.block_size = block_size
if self.method == "normal":
self.norm_lookup_table = self.create_normal_map(num_bits=self.num_bits)
self.norm_lookup_table = self.norm_lookup_table.to(device)
elif self.method == "uniform":
self.norm_lookup_table = self.create_uniform_map(num_bits=self.num_bits)
self.norm_lookup_table = self.norm_lookup_table.to(device)
else:
raise NotImplementedError("Other quantization methods not supported yet.")
@staticmethod
def create_uniform_map(symmetric=False, num_bits=4):
if symmetric:
# print("symmetric uniform quantization")
negative = torch.linspace(-1, 0, 2 ** (num_bits - 1))
positive = torch.linspace(0, 1, 2 ** (num_bits - 1))
table = torch.cat([negative, positive[1:]])
else:
# print("asymmetric uniform quantization")
table = torch.linspace(-1, 1, 2**num_bits)
return table
@staticmethod
def create_normal_map(offset=0.9677083, symmetric=False, num_bits=2):
try:
from scipy.stats import norm
except ImportError:
raise ImportError("The required package 'scipy' is not installed. Please install it to continue.")
variations = 2**num_bits
if symmetric:
v = norm.ppf(torch.linspace(1 - offset, offset, variations + 1)).tolist()
values = []
for index in range(len(v) - 1):
values.append(0.5 * v[index] + 0.5 * v[index + 1])
v = values
else:
# one more positive value, this is an asymmetric type
v1 = norm.ppf(torch.linspace(offset, 0.5, variations // 2 + 1)[:-1]).tolist()
v2 = [0]
v3 = (-norm.ppf(torch.linspace(offset, 0.5, variations // 2)[:-1])).tolist()
v = v1 + v2 + v3
values = torch.Tensor(v)
values = values.sort().values
values /= values.max()
return values
def quantize_tensor(self, weight):
max_abs = torch.abs(weight).max()
weight_normed = weight / max_abs
weight_normed_expanded = weight_normed.unsqueeze(-1)
# Reshape L to have the same number of dimensions as X_expanded
L_reshaped = torch.tensor(self.norm_lookup_table).reshape(1, -1)
# Calculate the absolute difference between X_expanded and L_reshaped
abs_diff = torch.abs(weight_normed_expanded - L_reshaped)
# Find the index of the minimum absolute difference for each element
qweight = torch.argmin(abs_diff, dim=-1)
return qweight, max_abs
def dequantize_tensor(self, qweight, max_abs):
qweight_flatten = qweight.flatten()
weight_normed = self.norm_lookup_table[qweight_flatten]
weight = weight_normed * max_abs
weight = weight.reshape(qweight.shape)
return weight
def quantize_block(self, weight):
if len(weight.shape) != 2:
raise ValueError(f"Only support 2D matrix, but your input has {len(weight.shape)} dimensions.")
if weight.shape[0] * weight.shape[1] % self.block_size != 0:
raise ValueError(
f"Weight with shape ({weight.shape[0]} x {weight.shape[1]}) "
f"is not dividable by block size {self.block_size}."
)
M, N = weight.shape
device = weight.device
# Quantization
weight_flatten = weight.flatten() # (M*N, )
weight_block = weight_flatten.reshape(-1, self.block_size) # (L, B), L = M * N / B
if self.method == "normal":
weight_max = weight_block.abs().max(dim=-1)[0] # (L, 1)
elif self.method == "uniform":
weight_max = weight_block.mean(dim=-1) + 2.5 * weight_block.std(dim=-1)
else:
raise NotImplementedError("Method not supported yet.")
weight_max = weight_max.unsqueeze(-1)
weight_divabs = weight_block / weight_max # (L, B)
weight_divabs = weight_divabs.unsqueeze(-1) # (L, B, 1)
L_reshaped = self.norm_lookup_table.reshape(1, -1) # (1, 2**K)
abs_diff = torch.abs(weight_divabs - L_reshaped) # (L, B, 2**K)
qweight = torch.argmin(abs_diff, dim=-1) # (L, B)
# Pack multiple k-bit into uint8
qweight = qweight.reshape(-1, 8 // self.num_bits)
qweight_pack = torch.zeros((M * N // 8 * self.num_bits, 1), dtype=torch.uint8, device=device)
# data format example:
# [1, 0, 3, 2] or [01, 00, 11, 10] -> [10110001], LIFO
for i in range(8 // self.num_bits):
qweight[:, i] = qweight[:, i] << i * self.num_bits
qweight_pack[:, 0] |= qweight[:, i]
return qweight_pack, weight_max, weight.shape
def dequantize_block(self, qweight, weight_max, weight_shape):
# unpack weight
device = qweight.device
weight = torch.zeros((qweight.shape[0], 8 // self.num_bits), dtype=torch.float32, device=device)
for i in range(8 // self.num_bits):
lookup_table_idx = qweight.to(torch.long) % 2**self.num_bits # get the most right 2 bits
lookup_table_idx = lookup_table_idx.to(torch.long)
weight[:, i] = self.norm_lookup_table[lookup_table_idx].squeeze()
qweight = qweight >> self.num_bits # right shift 2 bits of the original data
weight_block = weight.reshape(-1, self.block_size)
weight = weight_block * weight_max
weight = weight.reshape(weight_shape)
return weight
def _low_rank_decomposition(weight, reduced_rank=32):
"""
:param weight: The matrix to decompose, of shape (H, W) :param reduced_rank: the final rank :return:
"""
matrix_dimension = len(weight.size())
if matrix_dimension != 2:
raise ValueError(f"Only support 2D matrix, but your input has {matrix_dimension} dimensions.")
# Use SVD to decompose a matrix, default full_matrices is False to save parameters
U, S, Vh = torch.linalg.svd(weight, full_matrices=False)
L = U @ (torch.sqrt(torch.diag(S)[:, 0:reduced_rank]))
R = torch.sqrt(torch.diag(S)[0:reduced_rank, :]) @ Vh
return {"L": L, "R": R, "U": U, "S": S, "Vh": Vh, "reduced_rank": reduced_rank}
@torch.no_grad()
def loftq_init(weight: Union[torch.Tensor, torch.nn.Parameter], num_bits: int, reduced_rank: int, num_iter=1):
if is_bnb_available():
import bitsandbytes as bnb
else:
raise ValueError("bitsandbytes is not available, please install it to use LoftQ.")
if num_bits not in [2, 4, 8]:
raise ValueError("Only support 2, 4, 8 bits quantization")
if num_iter <= 0:
raise ValueError("Number of iterations must be greater than 0")
out_feature, in_feature = weight.size()
device = weight.device
dtype = weight.dtype
logging.info(
f"Weight: ({out_feature}, {in_feature}) | Rank: {reduced_rank} | Num Iter: {num_iter} | Num Bits: {num_bits}"
)
if not is_bnb_4bit_available() or num_bits in [2, 8]:
quantizer = NFQuantizer(num_bits=num_bits, device=device, method="normal", block_size=64)
compute_device = device
else:
compute_device = "cuda"
weight = weight.to(device=compute_device, dtype=torch.float32)
res = weight.clone()
for i in range(num_iter):
torch.cuda.empty_cache()
# Quantization
if num_bits == 4 and is_bnb_4bit_available():
qweight = bnb.nn.Params4bit(
res.to("cpu"), requires_grad=False, compress_statistics=False, quant_type="nf4"
).to(compute_device)
dequantized_weight = bnb.functional.dequantize_4bit(qweight.data, qweight.quant_state)
else:
quantized_weight, max_abs, shape = quantizer.quantize_block(res)
dequantized_weight = quantizer.dequantize_block(quantized_weight, max_abs, shape)
res = weight - dequantized_weight
# Decompose the residual by SVD
output = _low_rank_decomposition(res, reduced_rank=reduced_rank)
L, R, reduced_rank = output["L"], output["R"], output["reduced_rank"]
res = weight - torch.mm(L, R)
lora_A, lora_B = R, L
return dequantized_weight.to(device=device, dtype=dtype), lora_A, lora_B
@torch.no_grad()
def _loftq_init_new(qweight, weight, num_bits: int, reduced_rank: int):
import bitsandbytes as bnb
if num_bits != 4:
raise ValueError("Only 4 bit quantization supported at the moment.")
if not is_bnb_4bit_available():
raise ValueError("bitsandbytes 4bit quantization is not available.")
compute_device = "cuda"
dequantized_weight = bnb.functional.dequantize_4bit(qweight.data, qweight.quant_state)
weight = weight.to(device=compute_device, dtype=torch.float32)
residual = weight - dequantized_weight
torch.cuda.empty_cache()
# Decompose the residualidual by SVD
output = _low_rank_decomposition(residual, reduced_rank=reduced_rank)
L, R, reduced_rank = output["L"], output["R"], output["reduced_rank"]
return R, L
class _SafetensorLoader:
"""
Simple utility class that loads tensors with safetensors from a single file or sharded files.
Takes care of file name normalization etc.
"""
def __init__(self, peft_model, model_path):
if model_path is None:
try:
model_path = snapshot_download(peft_model.base_model.config._name_or_path, local_files_only=True)
except (AttributeError, HFValidationError) as exc:
raise ValueError(
"The provided model does not appear to be a transformers model or is a local model. In this case, "
"you must pass the model_path argument that points to the safetensors file."
) from exc
except LocalEntryNotFoundError as exc:
raise ValueError(
"The model.safetensors file must be present on disk, but it could not be found."
) from exc
suffix = "model.safetensors"
if not model_path.endswith(suffix):
model_path = os.path.join(model_path, suffix)
self.model_path = model_path
self.base_model_prefix = getattr(peft_model.get_base_model(), "base_model_prefix", None)
self.prefix = "base_model.model."
self.is_sharded = False
self.weight_map = None
if not os.path.exists(model_path):
# check if the file is sharded
par_dir = model_path.rpartition(os.path.sep)[0]
try:
resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(
par_dir, cached_file(par_dir, "model.safetensors.index.json")
)
except OSError as exc:
raise FileNotFoundError(
f"Could not find file for {model_path}, ensure that there is a (sharded) safetensors file of the model."
) from exc
self.is_sharded = True
# maps from 'model-X-of-Y.safetensors' to full file path
file_map = {k.rpartition(os.path.sep)[-1]: k for k in resolved_archive_file}
self.weight_map = {k: file_map[v] for k, v in sharded_metadata["weight_map"].items()}
def get_tensor(self, name):
if not self.is_sharded:
file_path = self.model_path
else:
file_path = self.weight_map[name]
with safe_open(file_path, framework="pt", device="cpu") as f:
try:
tensor = f.get_tensor(name)
except SafetensorError as exc:
# no matching key found, we probably need to remove the base model prefix
if self.base_model_prefix:
# remove 1 extra character for "."
name = name[len(self.base_model_prefix) + 1 :]
tensor = f.get_tensor(name)
else:
raise exc
return tensor
@torch.no_grad()
def replace_lora_weights_loftq(
peft_model,
model_path: Optional[str] = None,
adapter_name: str = "default",
callback: Optional[Callable[[torch.nn.Module, str], bool]] = None,
):
"""
Replace the LoRA weights of a model quantized with bitsandbytes, using the LoftQ technique.
The replacement is done on the fly by loading in the non-quantized weights from a locally stored safetensors model
file and initializing the LoRA weights such that the quantization error between the original and quantized weights
is minimized.
As lazy loading is not possible with pickle, normal PyTorch checkpoint files cannot be supported.
Depending on the model size, calling this function may take some time to finish.
Args:
peft_model (`PeftModel`):
The model to replace the weights of. Must be a quantized PEFT model with LoRA layers.
model_path (`Optional[str]`):
The path to the model safetensors file. If the model is a Hugging Face model, this will be inferred from
the model's config. Otherwise, it must be provided.
adapter_name (`str`):
The name of the adapter to replace the weights of. The default adapter name is "default".
callback (`Optional[Callable[[PeftModel, str], bool]]`):
A callback function that will be called after each module is replaced. The callback function should take
the model and the name of the current module as input and return a boolean indicating whether the
replacement should be kept. If the callback returns False, the replacement will be rolled back. This can be
very useful to confirm that the LoftQ initialization actually decreases the quantization error of the
model. As an example, this callback could generate logits for given input and compare it with the logits
from the original, non-quanitzed model with the same input, and only return `True` if there is an
improvement. As this is a greedy optimization, it's possible that calling this function multiple times
yields incremental improvements.
"""
if not is_bnb_4bit_available():
raise ValueError("bitsandbytes must be installed and the model must be quantized in 4bits.")
from peft.tuners.lora import Linear4bit
# model_path = _check_model_path_loftq(model_path, peft_model)
prefix = "base_model.model."
any_match = False
safetensor_loader = _SafetensorLoader(peft_model, model_path)
# if too slow, consider adding tqdm as an option
for name, module in peft_model.named_modules():
if not isinstance(module, Linear4bit):
continue
if not name.startswith(prefix):
raise TypeError("The passed model does not appear to be a valid PeftModel")
any_match = True
name = name[len(prefix) :]
tensor = safetensor_loader.get_tensor(name + ".weight")
reduced_rank = module.r[adapter_name]
lora_A, lora_B = _loftq_init_new(module.weight, tensor, num_bits=4, reduced_rank=reduced_rank)
if not callback:
module.lora_A[adapter_name].weight.data = lora_A
module.lora_B[adapter_name].weight.data = lora_B
continue
lora_A_before = module.lora_A[adapter_name].weight.data
lora_B_before = module.lora_B[adapter_name].weight.data
module.lora_A[adapter_name].weight.data = lora_A
module.lora_B[adapter_name].weight.data = lora_B
should_replace = callback(peft_model, name)
if not should_replace:
# roll back
module.lora_A[adapter_name].weight.data = lora_A_before
module.lora_B[adapter_name].weight.data = lora_B_before
del lora_A_before, lora_B_before
if not any_match:
raise ValueError("No bnb LoRA module found on the model")
| peft/src/peft/utils/loftq_utils.py/0 | {
"file_path": "peft/src/peft/utils/loftq_utils.py",
"repo_id": "peft",
"token_count": 7250
} |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import unittest
from unittest.mock import Mock, call, patch
import pytest
import torch
from datasets import load_dataset
from parameterized import parameterized
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
DataCollatorForLanguageModeling,
Trainer,
TrainingArguments,
)
from peft import (
AdaLoraConfig,
BOFTConfig,
BoneConfig,
HRAConfig,
LoraConfig,
OFTConfig,
PrefixTuningConfig,
PromptLearningConfig,
PromptTuningConfig,
PromptTuningInit,
get_peft_model,
)
from .testing_common import PeftCommonTester
from .testing_common import PeftTestConfigManagerForDecoderModels as PeftTestConfigManager
PEFT_DECODER_MODELS_TO_TEST = [
"hf-internal-testing/tiny-random-OPTForCausalLM",
"hf-internal-testing/tiny-random-GPTNeoXForCausalLM",
"hf-internal-testing/tiny-random-GPT2LMHeadModel",
"hf-internal-testing/tiny-random-BloomForCausalLM",
"hf-internal-testing/tiny-random-gpt_neo",
"hf-internal-testing/tiny-random-GPTJForCausalLM",
"hf-internal-testing/tiny-random-GPTBigCodeForCausalLM",
"trl-internal-testing/tiny-random-LlamaForCausalLM",
"peft-internal-testing/tiny-dummy-qwen2",
]
FULL_GRID = {
"model_ids": PEFT_DECODER_MODELS_TO_TEST,
"task_type": "CAUSAL_LM",
}
SMALL_GRID = {
"model_ids": [
"hf-internal-testing/tiny-random-gpt2",
"hf-internal-testing/tiny-random-OPTForCausalLM",
"hf-internal-testing/tiny-random-MistralForCausalLM",
"peft-internal-testing/tiny-dummy-qwen2",
"trl-internal-testing/tiny-random-LlamaForCausalLM",
],
"task_type": "CAUSAL_LM",
}
def skip_adalora_and_gpt2(test_list):
return [test for test in test_list if not (("GPT2LMHeadModel" in test[1]) and (test[2] == AdaLoraConfig))]
def skip_oft_or_hra_and_gpt2(test_list):
return [
test
for test in test_list
if not (
("GPT2LMHeadModel" in test[1])
and ((test[2] == BOFTConfig) or (test[2] == HRAConfig) or (test[2] == OFTConfig))
or (test[2] == BoneConfig)
)
]
def skip_adalora_or_oft_or_hra_and_gpt2(test_list):
return [
test
for test in test_list
if not (
("GPT2LMHeadModel" in test[1])
and (
(test[2] == AdaLoraConfig)
or (test[2] == BOFTConfig)
or (test[2] == HRAConfig)
or (test[2] == OFTConfig)
or (test[2] == BoneConfig)
)
)
]
def only_prompt_learning_filter(test_list):
return [test for test in test_list if issubclass(test[2], PromptLearningConfig)]
class PeftDecoderModelTester(unittest.TestCase, PeftCommonTester):
r"""
Test if the PeftModel behaves as expected. This includes:
- test if the model has the expected methods
We use parametrized.expand for debugging purposes to test each model individually.
"""
transformers_class = AutoModelForCausalLM
def prepare_inputs_for_testing(self):
input_ids = torch.tensor([[1, 1, 1], [1, 2, 1]]).to(self.torch_device)
attention_mask = torch.tensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device)
input_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return input_dict
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(FULL_GRID, filter_params_func=skip_oft_or_hra_and_gpt2)
)
def test_attributes_parametrized(self, test_name, model_id, config_cls, config_kwargs):
self._test_model_attr(model_id, config_cls, config_kwargs)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(FULL_GRID, filter_params_func=skip_oft_or_hra_and_gpt2)
)
def test_adapter_name(self, test_name, model_id, config_cls, config_kwargs):
self._test_adapter_name(model_id, config_cls, config_kwargs)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(FULL_GRID, filter_params_func=skip_oft_or_hra_and_gpt2)
)
def test_prepare_for_training_parametrized(self, test_name, model_id, config_cls, config_kwargs):
self._test_prepare_for_training(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_prompt_tuning_text_prepare_for_training(self, test_name, model_id, config_cls, config_kwargs):
# Test that prompt tuning works with text init
if config_cls != PromptTuningConfig:
return pytest.skip(f"This test does not apply to {config_cls}")
config_kwargs = config_kwargs.copy()
config_kwargs["prompt_tuning_init"] = PromptTuningInit.TEXT
config_kwargs["prompt_tuning_init_text"] = "This is a test prompt."
config_kwargs["tokenizer_name_or_path"] = model_id
self._test_prepare_for_training(model_id, config_cls, config_kwargs)
def test_prompt_tuning_text_tokenizer_kwargs(self):
# Allow users to pass additional arguments to Tokenizer.from_pretrained
# Fix for #1032
mock = Mock()
orig_from_pretrained = AutoTokenizer.from_pretrained
def mock_autotokenizer_from_pretrained(*args, **kwargs):
mock(*args, **kwargs)
return orig_from_pretrained(config.tokenizer_name_or_path)
model_id = "hf-internal-testing/tiny-random-OPTForCausalLM"
config = PromptTuningConfig(
base_model_name_or_path=model_id,
tokenizer_name_or_path=model_id,
num_virtual_tokens=10,
prompt_tuning_init=PromptTuningInit.TEXT,
task_type="CAUSAL_LM",
prompt_tuning_init_text="This is a test prompt.",
tokenizer_kwargs={"trust_remote_code": True, "foo": "bar"},
)
model = self.transformers_class.from_pretrained(model_id).to(self.torch_device)
with patch("transformers.AutoTokenizer.from_pretrained", mock_autotokenizer_from_pretrained):
model = get_peft_model(model, config)
expected_call = call(model_id, trust_remote_code=True, foo="bar")
assert mock.call_args == expected_call
def test_prompt_tuning_config_invalid_args(self):
# Raise an error when tokenizer_kwargs is used with prompt_tuning_init!='TEXT', because this argument has no
# function in that case
model_id = "hf-internal-testing/tiny-random-OPTForCausalLM"
with pytest.raises(ValueError, match="tokenizer_kwargs only valid when using prompt_tuning_init='TEXT'."):
PromptTuningConfig(
base_model_name_or_path=model_id,
tokenizer_name_or_path=model_id,
num_virtual_tokens=10,
task_type="CAUSAL_LM",
prompt_tuning_init_text="This is a test prompt.",
prompt_tuning_init=PromptTuningInit.RANDOM, # <= should not be used together with tokenizer_kwargs
tokenizer_kwargs={"trust_remote_code": True, "foo": "bar"},
)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(FULL_GRID, filter_params_func=skip_oft_or_hra_and_gpt2)
)
def test_save_pretrained(self, test_name, model_id, config_cls, config_kwargs):
self._test_save_pretrained(model_id, config_cls, config_kwargs)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(FULL_GRID, filter_params_func=skip_oft_or_hra_and_gpt2)
)
def test_save_pretrained_pickle(self, test_name, model_id, config_cls, config_kwargs):
self._test_save_pretrained(model_id, config_cls, config_kwargs, safe_serialization=False)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(FULL_GRID, filter_params_func=skip_oft_or_hra_and_gpt2)
)
def test_save_pretrained_selected_adapters(self, test_name, model_id, config_cls, config_kwargs):
self._test_save_pretrained_selected_adapters(model_id, config_cls, config_kwargs)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(FULL_GRID, filter_params_func=skip_oft_or_hra_and_gpt2)
)
def test_save_pretrained_selected_adapters_pickle(self, test_name, model_id, config_cls, config_kwargs):
self._test_save_pretrained_selected_adapters(model_id, config_cls, config_kwargs, safe_serialization=False)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(FULL_GRID, filter_params_func=skip_oft_or_hra_and_gpt2)
)
def test_from_pretrained_config_construction(self, test_name, model_id, config_cls, config_kwargs):
self._test_from_pretrained_config_construction(model_id, config_cls, config_kwargs)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(
{
"model_ids": PEFT_DECODER_MODELS_TO_TEST,
"lora_kwargs": {"init_lora_weights": [False]},
"adalora_kwargs": {"init_lora_weights": [False]},
"ia3_kwargs": {"init_ia3_weights": [False]},
"boft_kwargs": {"init_weights": [False]},
"oft_kwargs": {"init_weights": [False]},
"vera_kwargs": {"init_weights": [False]},
"fourierft_kwargs": {"init_weights": [False]},
"hra_kwargs": {"init_weights": [False]},
"bone_kwargs": {"init_weights": [False]},
"task_type": "CAUSAL_LM",
},
)
)
def test_merge_layers(self, test_name, model_id, config_cls, config_kwargs):
self._test_merge_layers(model_id, config_cls, config_kwargs)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(
{
"model_ids": PEFT_DECODER_MODELS_TO_TEST,
"lora_kwargs": {"init_lora_weights": [False]},
"ia3_kwargs": {"init_ia3_weights": [False]},
"boft_kwargs": {"init_weights": [False]},
"oft_kwargs": {"init_weights": [False]},
"vera_kwargs": {"init_weights": [False]},
"fourierft_kwargs": {"init_weights": [False]},
"hra_kwargs": {"init_weights": [False]},
"bone_kwargs": {"init_weights": [False]},
"task_type": "CAUSAL_LM",
},
filter_params_func=skip_oft_or_hra_and_gpt2,
)
)
def test_merge_layers_multi(self, test_name, model_id, config_cls, config_kwargs):
self._test_merge_layers_multi(model_id, config_cls, config_kwargs)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(
{
"model_ids": PEFT_DECODER_MODELS_TO_TEST,
"lora_kwargs": {"init_lora_weights": [False]},
"ia3_kwargs": {"init_ia3_weights": [False]},
"boft_kwargs": {"init_weights": [False]},
"oft_kwargs": {"init_weights": [False]},
"bone_kwargs": {"init_weights": [False]},
"task_type": "CAUSAL_LM",
},
)
)
def test_merge_layers_nan(self, test_name, model_id, config_cls, config_kwargs):
self._test_merge_layers_nan(model_id, config_cls, config_kwargs)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(
{
"model_ids": PEFT_DECODER_MODELS_TO_TEST,
"lora_kwargs": {"init_lora_weights": [False]},
"bone_kwargs": {"init_weights": [False]},
"task_type": "CAUSAL_LM",
},
)
)
def test_mixed_adapter_batches(self, test_name, model_id, config_cls, config_kwargs):
self._test_mixed_adapter_batches(model_id, config_cls, config_kwargs)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(
{
"model_ids": PEFT_DECODER_MODELS_TO_TEST,
"lora_kwargs": {"init_lora_weights": [False]},
"task_type": "CAUSAL_LM",
},
)
)
def test_generate_with_mixed_adapter_batches(self, test_name, model_id, config_cls, config_kwargs):
self._test_generate_with_mixed_adapter_batches_and_beam_search(model_id, config_cls, config_kwargs)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(FULL_GRID, filter_params_func=skip_oft_or_hra_and_gpt2)
)
def test_generate(self, test_name, model_id, config_cls, config_kwargs):
self._test_generate(model_id, config_cls, config_kwargs)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(FULL_GRID, filter_params_func=skip_oft_or_hra_and_gpt2)
)
def test_generate_pos_args(self, test_name, model_id, config_cls, config_kwargs):
# positional args are supported for PeftModelForCausalLM
self._test_generate_pos_args(model_id, config_cls, config_kwargs, raises_err=False)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_merge_layers_fp16(self, test_name, model_id, config_cls, config_kwargs):
self._test_merge_layers_fp16(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_generate_half_prec(self, test_name, model_id, config_cls, config_kwargs):
self._test_generate_half_prec(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_prefix_tuning_half_prec_conversion(self, test_name, model_id, config_cls, config_kwargs):
self._test_prefix_tuning_half_prec_conversion(model_id, config_cls, config_kwargs)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(FULL_GRID, filter_params_func=skip_oft_or_hra_and_gpt2)
)
def test_training_decoders(self, test_name, model_id, config_cls, config_kwargs):
self._test_training(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_training_decoders_layer_indexing(self, test_name, model_id, config_cls, config_kwargs):
self._test_training_layer_indexing(model_id, config_cls, config_kwargs)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(FULL_GRID, filter_params_func=skip_oft_or_hra_and_gpt2)
)
def test_training_decoders_gradient_checkpointing(self, test_name, model_id, config_cls, config_kwargs):
self._test_training_gradient_checkpointing(model_id, config_cls, config_kwargs)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(FULL_GRID, filter_params_func=skip_oft_or_hra_and_gpt2)
)
def test_inference_safetensors(self, test_name, model_id, config_cls, config_kwargs):
self._test_inference_safetensors(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_peft_model_device_map(self, test_name, model_id, config_cls, config_kwargs):
self._test_peft_model_device_map(model_id, config_cls, config_kwargs)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(FULL_GRID, filter_params_func=skip_oft_or_hra_and_gpt2)
)
def test_delete_adapter(self, test_name, model_id, config_cls, config_kwargs):
self._test_delete_adapter(model_id, config_cls, config_kwargs)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(FULL_GRID, filter_params_func=skip_oft_or_hra_and_gpt2)
)
def test_delete_inactive_adapter(self, test_name, model_id, config_cls, config_kwargs):
self._test_delete_inactive_adapter(model_id, config_cls, config_kwargs)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(FULL_GRID, filter_params_func=skip_oft_or_hra_and_gpt2)
)
def test_adding_multiple_adapters_with_bias_raises(self, test_name, model_id, config_cls, config_kwargs):
self._test_adding_multiple_adapters_with_bias_raises(model_id, config_cls, config_kwargs)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(
{
"model_ids": PEFT_DECODER_MODELS_TO_TEST,
"lora_kwargs": {"init_lora_weights": [False]},
"adalora_kwargs": {"init_lora_weights": [False]},
"ia3_kwargs": {"init_ia3_weights": [False]},
"boft_kwargs": {"init_weights": [False]},
"oft_kwargs": {"init_weights": [False]},
"vera_kwargs": {"init_weights": [False]},
"fourierft_kwargs": {"init_weights": [False]},
"hra_kwargs": {"init_weights": [False]},
"bone_kwargs": {"init_weights": [False]},
"task_type": "CAUSAL_LM",
},
filter_params_func=skip_adalora_or_oft_or_hra_and_gpt2,
)
)
def test_unload_adapter(self, test_name, model_id, config_cls, config_kwargs):
self._test_unload_adapter(model_id, config_cls, config_kwargs)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(
{
"model_ids": PEFT_DECODER_MODELS_TO_TEST,
"lora_kwargs": {"init_lora_weights": [False]},
"ia3_kwargs": {"init_ia3_weights": [False]},
"boft_kwargs": {"init_weights": [False]},
"oft_kwargs": {"init_weights": [False]},
"bone_kwargs": {"init_weights": [False]},
"task_type": "CAUSAL_LM",
},
)
)
def test_weighted_combination_of_adapters(self, test_name, model_id, config_cls, config_kwargs):
self._test_weighted_combination_of_adapters(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_training_prompt_learning_tasks(self, test_name, model_id, config_cls, config_kwargs):
self._test_training_prompt_learning_tasks(model_id, config_cls, config_kwargs)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(
{
"model_ids": PEFT_DECODER_MODELS_TO_TEST,
"lora_kwargs": {"init_lora_weights": [False]},
"ia3_kwargs": {"init_ia3_weights": [False]},
"adalora_kwargs": {"init_lora_weights": [False]},
"boft_kwargs": {"init_weights": [False]},
"oft_kwargs": {"init_weights": [False]},
"vera_kwargs": {"init_weights": [False]},
"fourierft_kwargs": {"init_weights": [False]},
"hra_kwargs": {"init_weights": [False]},
"bone_kwargs": {"init_weights": [False]},
"task_type": "CAUSAL_LM",
},
filter_params_func=skip_oft_or_hra_and_gpt2,
)
)
def test_disable_adapter(self, test_name, model_id, config_cls, config_kwargs):
self._test_disable_adapter(model_id, config_cls, config_kwargs)
def test_generate_adalora_no_dropout(self):
# test for issue #730
model_id = "hf-internal-testing/tiny-random-OPTForCausalLM"
config_kwargs = {
"target_modules": None,
"task_type": "CAUSAL_LM",
"lora_dropout": 0.0,
"total_step": 1,
}
self._test_generate(model_id, AdaLoraConfig, config_kwargs)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(FULL_GRID, filter_params_func=skip_oft_or_hra_and_gpt2)
)
def test_passing_input_embeds_works(self, test_name, model_id, config_cls, config_kwargs):
self._test_passing_input_embeds_works(test_name, model_id, config_cls, config_kwargs)
def test_lora_layer_replication(self):
model_id = "trl-internal-testing/tiny-random-LlamaForCausalLM"
config_kwargs = {
"target_modules": ["down_proj", "up_proj"],
"task_type": "CAUSAL_LM",
"lora_dropout": 0.0,
"layer_replication": [[0, 1], [0, 2], [1, 2]],
}
model = self.transformers_class.from_pretrained(model_id).to(self.torch_device)
config = LoraConfig(
base_model_name_or_path=model_id,
**config_kwargs,
)
assert len(model.model.layers), "Expected 2 layers in original model." == 2
model = get_peft_model(model, config)
layers = model.base_model.model.model.layers
assert len(layers) == 4, "Expected 4 layers in adapted model."
assert (
layers[0].mlp.up_proj.base_layer.weight.data.storage().data_ptr()
== layers[1].mlp.up_proj.base_layer.weight.data.storage().data_ptr()
and layers[2].mlp.up_proj.base_layer.weight.data.storage().data_ptr()
== layers[3].mlp.up_proj.base_layer.weight.data.storage().data_ptr()
), "Expected layers 0-1 and 2-3 to share weights"
assert (
layers[0].mlp.up_proj.base_layer.weight.data.storage().data_ptr()
!= layers[2].mlp.up_proj.base_layer.weight.data.storage().data_ptr()
), "Expected layers 0 and 2 to have different weights"
assert (
layers[0].mlp.up_proj.lora_A.default.weight.data.storage().data_ptr()
!= layers[1].mlp.up_proj.lora_A.default.weight.data.storage().data_ptr()
and layers[2].mlp.up_proj.lora_A.default.weight.data.storage().data_ptr()
!= layers[3].mlp.up_proj.lora_A.default.weight.data.storage().data_ptr()
), "Expected all LoRA adapters to have distinct weights"
assert len([n for n, _ in model.named_parameters() if ".lora_A." in n]) == 8, (
"Expected 8 LoRA adapters since we are adding one each for up and down."
)
self._test_prepare_for_training(model_id, LoraConfig, config_kwargs)
self._test_generate(model_id, LoraConfig, config_kwargs)
def test_prompt_learning_with_grouped_query_attention(self):
# See 1901, fixes a bug with handling GQA
model_id = "peft-internal-testing/tiny-dummy-qwen2"
base_model = AutoModelForCausalLM.from_pretrained(model_id)
peft_config = PrefixTuningConfig(num_virtual_tokens=10, task_type="CAUSAL_LM")
model = get_peft_model(base_model, peft_config)
x = torch.tensor([[1, 2, 3]])
# does not raise
model(x)
def test_prefix_tuning_mistral(self):
# See issue 869, 1962
model_id = "hf-internal-testing/tiny-random-MistralForCausalLM"
base_model = AutoModelForCausalLM.from_pretrained(model_id)
peft_config = PrefixTuningConfig(num_virtual_tokens=10, task_type="CAUSAL_LM")
model = get_peft_model(base_model, peft_config)
tokenizer = AutoTokenizer.from_pretrained(model_id)
tokenizer.pad_token = tokenizer.eos_token
def process(samples):
tokenized = tokenizer(samples["quote"], truncation=True, max_length=128)
return tokenized
data = load_dataset("ybelkada/english_quotes_copy")
data = data.map(process, batched=True)
with tempfile.TemporaryDirectory() as tmp_dirname:
trainer = Trainer(
model=model,
train_dataset=data["train"],
args=TrainingArguments(
num_train_epochs=1,
max_steps=5,
per_device_train_batch_size=4,
output_dir=tmp_dirname,
),
data_collator=DataCollatorForLanguageModeling(tokenizer, mlm=False),
)
trainer.train()
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(SMALL_GRID, filter_params_func=only_prompt_learning_filter)
)
def test_prompt_learning_with_gradient_checkpointing(self, test_name, model_id, config_cls, config_kwargs):
# See issue 869
# Test prompt learning methods with gradient checkpointing in a semi realistic setting.
# Prefix tuning does not work if the model uses the new caching implementation. In that case, a helpful error
# should be raised.
# skip if multi GPU, since this results in DataParallel usage by Trainer, which fails with "CUDA device
# assertion", breaking subsequent tests
if torch.cuda.device_count() > 1:
pytest.skip("Skip prompt_learning_with_gradient_checkpointing test on multi-GPU setups")
peft_config = config_cls(
base_model_name_or_path=model_id,
**config_kwargs,
)
base_model = self.transformers_class.from_pretrained(model_id)
base_model.gradient_checkpointing_enable()
try:
model = get_peft_model(base_model, peft_config)
except ValueError as exc:
# Some methods will raise a helpful error. After this, exit the test, as training would fail.
assert config_cls == PrefixTuningConfig
assert "Prefix tuning does not work with gradient checkpointing" in str(exc)
return
tokenizer = AutoTokenizer.from_pretrained(model_id)
tokenizer.pad_token = tokenizer.eos_token
def process(samples):
tokenized = tokenizer(samples["quote"], truncation=True, max_length=128)
return tokenized
data = load_dataset("ybelkada/english_quotes_copy")
data = data.map(process, batched=True)
with tempfile.TemporaryDirectory() as tmp_dirname:
trainer = Trainer(
model=model,
train_dataset=data["train"],
args=TrainingArguments(
num_train_epochs=1,
max_steps=3,
per_device_train_batch_size=4,
output_dir=tmp_dirname,
),
data_collator=DataCollatorForLanguageModeling(tokenizer, mlm=False),
)
trainer.train()
| peft/tests/test_decoder_models.py/0 | {
"file_path": "peft/tests/test_decoder_models.py",
"repo_id": "peft",
"token_count": 12220
} |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from torch import nn
from transformers import AutoModelForCausalLM, AutoModelForSequenceClassification
from peft import LoraConfig, PeftModel, get_peft_model
from peft.utils.other import ModulesToSaveWrapper
class ModelWithModuleDict(nn.Module):
def __init__(self):
super().__init__()
self.other_layer = nn.Linear(10, 10)
self.module = nn.ModuleDict({"foo": nn.Linear(10, 10)})
def forward(self):
return self.module["foo"](torch.rand(1, 10))
class ModelWithModuleList(nn.Module):
def __init__(self):
super().__init__()
self.other_layer = nn.Linear(10, 10)
self.module = nn.ModuleList([nn.Linear(10, 10)])
def forward(self):
return self.module[0](torch.rand(1, 10))
class ModelWithParameterDict(nn.Module):
def __init__(self):
super().__init__()
self.other_layer = nn.Linear(10, 10)
self.module = nn.ParameterDict({"foo": nn.Parameter(torch.rand(10, 10))})
def forward(self):
return self.module["foo"]
class ModelWithParameterList(nn.Module):
def __init__(self):
super().__init__()
self.other_layer = nn.Linear(10, 10)
self.module = nn.ParameterList([nn.Parameter(torch.rand(10, 10))])
def forward(self):
return self.module[0]
@pytest.mark.parametrize(
"cls", [ModelWithModuleDict, ModelWithModuleList, ModelWithParameterDict, ModelWithParameterList]
)
def test_modules_to_save_targets_module_dict_raises(cls):
model = cls()
peft_config = LoraConfig(
target_modules=["other_layer"],
modules_to_save=["module"],
)
model() # sanity check that the model would normally work
msg = "modules_to_save cannot be applied to modules of type"
with pytest.raises(TypeError, match=msg):
get_peft_model(model=model, peft_config=peft_config)
def test_modules_to_save_targets_tuner_layer_raises():
# See e.g. issue 2027
# Prevent users from (accidentally) targeting the same layer both with a tuner and modules_to_save. Normally, PEFT
# will not target the same layer with both a tuner and ModulesToSaveWrapper. However, if modules_to_save is
# automatically inferred, e.g. when using AutoModelForSequenceClassification, the ModulesToSaveWrapper is applied ex
# post, which can lead to the double wrapping.
model_id = "hf-internal-testing/tiny-random-OPTForCausalLM"
model = AutoModelForSequenceClassification.from_pretrained(model_id)
# Note: target_modules="all-linear" would also work and is closer to the original issue, but let's explicitly target
# "score" here in case that "all-linear" will be fixed to no longer target the score layer.
peft_config = LoraConfig(target_modules=["score"], task_type="SEQ_CLS")
msg = "modules_to_save cannot be applied to modules of type"
with pytest.raises(TypeError, match=msg):
get_peft_model(model, peft_config)
def test_get_peft_model_revision_warning(tmp_path):
base_model_id = "peft-internal-testing/tiny-random-BertModel"
base_revision = "v2.0.0"
base_model = AutoModelForCausalLM.from_pretrained(base_model_id, revision=base_revision).eval()
lora_config = LoraConfig(revision=base_revision)
overwrite_revision = "main"
overwrite_warning = f"peft config has already set base model revision to {base_revision}, overwriting with revision {overwrite_revision}"
with pytest.warns(UserWarning, match=overwrite_warning):
_ = get_peft_model(base_model, lora_config, revision=overwrite_revision)
class TestModulesToSaveAttributeAccess:
"""Test attribute accces on the ModulesToSaveWrapper class.
When we have modules_to_save, the original module is wrapped. As long as only forward was called on this wrapped
module, we were good. However, if, for instance, model parameters were directly accessed by another module, this
would typically fail, as the wrapper does not have this attribute. We had special properties for weight and bias,
but this is not enough. Therefore, attribute access is now transiently delegated to the active adapter (or original
module, if the adapter is disabled).
For one example, see #2099.
"""
@pytest.fixture
def mlp(self):
class MLP(nn.Module):
def __init__(self):
super().__init__()
self.lin0 = nn.Linear(1, 2)
self.lin1 = nn.Linear(3, 4)
return MLP()
def test_transient_attribute_access_default_adapter(self, mlp):
config = LoraConfig(target_modules=["lin0"], modules_to_save=["lin1"])
model = get_peft_model(mlp, config)
assert model.lin1.weight is model.lin1.modules_to_save["default"].weight
assert model.lin1.bias is model.lin1.modules_to_save["default"].bias
def test_transient_attribute_access_non_default_adapter(self, mlp):
config = LoraConfig(target_modules=["lin0"], modules_to_save=["lin1"])
model = get_peft_model(mlp, config)
model.add_adapter("other", config)
# at this point, default is still active
assert model.lin1.weight is model.lin1.modules_to_save["default"].weight
assert model.lin1.bias is model.lin1.modules_to_save["default"].bias
assert model.lin1.weight is not model.lin1.modules_to_save["other"].weight
assert model.lin1.bias is not model.lin1.modules_to_save["other"].bias
model.set_adapter("other")
assert model.lin1.weight is not model.lin1.modules_to_save["default"].weight
assert model.lin1.bias is not model.lin1.modules_to_save["default"].bias
assert model.lin1.weight is model.lin1.modules_to_save["other"].weight
assert model.lin1.bias is model.lin1.modules_to_save["other"].bias
def test_transient_attribute_access_disabled_adapter(self, mlp):
config = LoraConfig(target_modules=["lin0"], modules_to_save=["lin1"])
model = get_peft_model(mlp, config)
# at this point, default is still active
assert model.lin1.weight is model.lin1.modules_to_save["default"].weight
assert model.lin1.bias is model.lin1.modules_to_save["default"].bias
assert model.lin1.weight is not model.lin1.original_module.weight
assert model.lin1.bias is not model.lin1.original_module.bias
with model.disable_adapter():
assert model.lin1.weight is not model.lin1.modules_to_save["default"].weight
assert model.lin1.bias is not model.lin1.modules_to_save["default"].bias
assert model.lin1.weight is model.lin1.original_module.weight
assert model.lin1.bias is model.lin1.original_module.bias
def test_transient_attribute_access_uninitialized_adapter(self, mlp):
# ensure that there is no weird infinite recursion when accessing a non-existing attribute on the class itself
with pytest.raises(AttributeError, match="has no attribute 'original_module'"):
ModulesToSaveWrapper.original_module
def test_transient_attribute_access_attr_does_not_exist_on_modules_to_save(self, mlp):
# ensure that there is no weird infinite recursion when accessing a non-existing attribute on the
# ModelToSaveWrapper instance
config = LoraConfig(target_modules=["lin0"], modules_to_save=["lin1"])
model = get_peft_model(mlp, config)
with pytest.raises(AttributeError, match="has no attribute 'foo'"):
model.lin1.foo
def test_transient_attribute_access_attr_does_not_exist_on_original_module(self, mlp):
# ensure that there is no weird infinite recursion when accessing a non-existing attribute on the
# original module of the ModelToSaveWrapper instance
config = LoraConfig(target_modules=["lin0"], modules_to_save=["lin1"])
model = get_peft_model(mlp, config)
with pytest.raises(AttributeError, match="has no attribute 'foo'"):
with model.disable_adapter():
model.lin1.foo
def test_transient_attribute_access_non_existing_adapter(self, mlp):
# This should normally never happen, as the active adapter should always exist, but it's a failsafe
config = LoraConfig(target_modules=["lin0"], modules_to_save=["lin1"])
model = get_peft_model(mlp, config)
model.base_model.model.lin1._active_adapter = "does-not-exist"
with pytest.raises(AttributeError, match="has no attribute 'weight'"):
model.lin1.weight
class TestModulesToSaveNameSubstringBug:
"""Test a bug that could occur with multiple modules to save where one adapter's name is a substring of another
adapter's name.
This bug was the result of an error in the logic of modifying the state_dict for modules_to_save in
set_peft_model_state_dict. The error in the logic was that it was checked if an entry from modules_to_save (a set
of strings) is a substring of a key of the state_dict. If it was, a new name was assigned to that key in the
state_dict, which would allow to load the weight later.
The issue that stems from the substring check occurs if there are multiple modules_to_save, and one of them has a
name that is a substring of another. So e.g. if one is named "classifier" and the other is named "classifier2",
there could be a false match.
This bug was reported in #2289.
"""
def get_model(self):
class MyModule(nn.Module):
def __init__(self):
super().__init__()
self.lin = nn.Linear(5, 4)
# important: "classifier" is a substring of "classifier2", "classifier3", "classifier4"
self.classifier = nn.Linear(4, 2)
self.classifier2 = nn.Linear(4, 2)
self.classifier3 = nn.Linear(4, 2)
self.classifier4 = nn.Linear(4, 2)
def forward(self, x):
x = self.lin(x)
return self.classifier(x) + self.classifier2(x) + self.classifier3(x) + self.classifier4(x)
torch.manual_seed(0)
return MyModule()
@pytest.fixture
def path_merged_and_unmerged(self, tmp_path):
# Create 2 checkpoints:
# 1. merged: the model after calling merge_and_unload
# 2. unmerged: the PEFT model saved without calling merge_and_unload
path = tmp_path / "model.pt"
lora_config = LoraConfig(
target_modules=["lin"],
# important: "classifier" is a substring of "classifier2", "classifier3", "classifier4"
modules_to_save=["classifier", "classifier2", "classifier3", "classifier4"],
)
model = get_peft_model(self.get_model(), lora_config)
# mock training
for _ in range(5):
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
output = model(torch.randn(10, 5))
loss = output.sum()
loss.backward()
optimizer.step()
# save the peft model without merging
path_unmerged = tmp_path / "unmerged"
model.save_pretrained(path_unmerged)
# merge the model and save state_dict
path_merged = tmp_path / "merged"
merged = model.merge_and_unload()
state_dict = merged.state_dict()
torch.save(state_dict, path_merged)
return path_merged, path_unmerged
def test_load_merged_and_unmerged_same_weights(self, path_merged_and_unmerged):
# Note that this test is quasi flaky, it has a 1 in 4 chance of passing even without the bugfix. It passes when
# "classifier" happens to be the last element of the set model.modules_to_save. The order of the set is random.
# It is not possible just run this test multiple times to minimize the probability of this happening, because
# within the same process, the hash order is consistent. With the bug fix, this doesn't matter, as the test will
# always pass, but if there is a regression, there is a 1 in 4 chance of not catching it. Since the CI runs many
# tests, it is overall very unlikely that none will catch it though. If you see this test failing in CI, thus be
# aware that some of the passing tests may just pass owing to randomness.
path_merged, path_unmerged = path_merged_and_unmerged
# load the merged model directly
state_dict = torch.load(path_merged, weights_only=True)
model = self.get_model()
model.load_state_dict(state_dict)
sd_merged = model.state_dict()
del model
# load the unmerged model and merge it
unmerged = PeftModel.from_pretrained(self.get_model(), path_unmerged)
sd_unmerged = unmerged.merge_and_unload().state_dict()
assert sd_merged.keys() == sd_unmerged.keys()
for key in sd_merged.keys():
param_merged = sd_merged[key]
param_unmerged = sd_unmerged[key]
assert torch.allclose(param_merged, param_unmerged)
| peft/tests/test_other.py/0 | {
"file_path": "peft/tests/test_other.py",
"repo_id": "peft",
"token_count": 5229
} |
- sections:
- local: index
title: Home
- local: quickstart
title: Quickstart
- local: installation
title: Installation
- local: changes
title: Changelog
title: Get started
- sections:
- local: feature_extraction
title: Using Pretrained Models as Feature Extractors
- local: training_script
title: Training With The Official Training Script
- local: hf_hub
title: Share and Load Models from the 🤗 Hugging Face Hub
title: Tutorials
- sections:
- local: models
title: Model Summaries
- local: results
title: Results
- local: models/adversarial-inception-v3
title: Adversarial Inception v3
- local: models/advprop
title: AdvProp (EfficientNet)
- local: models/big-transfer
title: Big Transfer (BiT)
- local: models/csp-darknet
title: CSP-DarkNet
- local: models/csp-resnet
title: CSP-ResNet
- local: models/csp-resnext
title: CSP-ResNeXt
- local: models/densenet
title: DenseNet
- local: models/dla
title: Deep Layer Aggregation
- local: models/dpn
title: Dual Path Network (DPN)
- local: models/ecaresnet
title: ECA-ResNet
- local: models/efficientnet
title: EfficientNet
- local: models/efficientnet-pruned
title: EfficientNet (Knapsack Pruned)
- local: models/ensemble-adversarial
title: Ensemble Adversarial Inception ResNet v2
- local: models/ese-vovnet
title: ESE-VoVNet
- local: models/fbnet
title: FBNet
- local: models/gloun-inception-v3
title: (Gluon) Inception v3
- local: models/gloun-resnet
title: (Gluon) ResNet
- local: models/gloun-resnext
title: (Gluon) ResNeXt
- local: models/gloun-senet
title: (Gluon) SENet
- local: models/gloun-seresnext
title: (Gluon) SE-ResNeXt
- local: models/gloun-xception
title: (Gluon) Xception
- local: models/hrnet
title: HRNet
- local: models/ig-resnext
title: Instagram ResNeXt WSL
- local: models/inception-resnet-v2
title: Inception ResNet v2
- local: models/inception-v3
title: Inception v3
- local: models/inception-v4
title: Inception v4
- local: models/legacy-se-resnet
title: (Legacy) SE-ResNet
- local: models/legacy-se-resnext
title: (Legacy) SE-ResNeXt
- local: models/legacy-senet
title: (Legacy) SENet
- local: models/mixnet
title: MixNet
- local: models/mnasnet
title: MnasNet
- local: models/mobilenet-v2
title: MobileNet v2
- local: models/mobilenet-v3
title: MobileNet v3
- local: models/nasnet
title: NASNet
- local: models/noisy-student
title: Noisy Student (EfficientNet)
- local: models/pnasnet
title: PNASNet
- local: models/regnetx
title: RegNetX
- local: models/regnety
title: RegNetY
- local: models/res2net
title: Res2Net
- local: models/res2next
title: Res2NeXt
- local: models/resnest
title: ResNeSt
- local: models/resnet
title: ResNet
- local: models/resnet-d
title: ResNet-D
- local: models/resnext
title: ResNeXt
- local: models/rexnet
title: RexNet
- local: models/se-resnet
title: SE-ResNet
- local: models/selecsls
title: SelecSLS
- local: models/seresnext
title: SE-ResNeXt
- local: models/skresnet
title: SK-ResNet
- local: models/skresnext
title: SK-ResNeXt
- local: models/spnasnet
title: SPNASNet
- local: models/ssl-resnet
title: SSL ResNet
- local: models/swsl-resnet
title: SWSL ResNet
- local: models/swsl-resnext
title: SWSL ResNeXt
- local: models/tf-efficientnet
title: (Tensorflow) EfficientNet
- local: models/tf-efficientnet-condconv
title: (Tensorflow) EfficientNet CondConv
- local: models/tf-efficientnet-lite
title: (Tensorflow) EfficientNet Lite
- local: models/tf-inception-v3
title: (Tensorflow) Inception v3
- local: models/tf-mixnet
title: (Tensorflow) MixNet
- local: models/tf-mobilenet-v3
title: (Tensorflow) MobileNet v3
- local: models/tresnet
title: TResNet
- local: models/wide-resnet
title: Wide ResNet
- local: models/xception
title: Xception
title: Model Pages
isExpanded: false
- sections:
- local: reference/models
title: Models
- local: reference/data
title: Data
- local: reference/optimizers
title: Optimizers
- local: reference/schedulers
title: Learning Rate Schedulers
title: Reference
| pytorch-image-models/hfdocs/source/_toctree.yml/0 | {
"file_path": "pytorch-image-models/hfdocs/source/_toctree.yml",
"repo_id": "pytorch-image-models",
"token_count": 1701
} |
# ECA-ResNet
An **ECA ResNet** is a variant on a [ResNet](https://paperswithcode.com/method/resnet) that utilises an [Efficient Channel Attention module](https://paperswithcode.com/method/efficient-channel-attention). Efficient Channel Attention is an architectural unit based on [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) that reduces model complexity without dimensionality reduction.
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('ecaresnet101d', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `ecaresnet101d`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('ecaresnet101d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
## Citation
```BibTeX
@misc{wang2020ecanet,
title={ECA-Net: Efficient Channel Attention for Deep Convolutional Neural Networks},
author={Qilong Wang and Banggu Wu and Pengfei Zhu and Peihua Li and Wangmeng Zuo and Qinghua Hu},
year={2020},
eprint={1910.03151},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
<!--
Type: model-index
Collections:
- Name: ECAResNet
Paper:
Title: 'ECA-Net: Efficient Channel Attention for Deep Convolutional Neural Networks'
URL: https://paperswithcode.com/paper/eca-net-efficient-channel-attention-for-deep
Models:
- Name: ecaresnet101d
In Collection: ECAResNet
Metadata:
FLOPs: 10377193728
Parameters: 44570000
File Size: 178815067
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Efficient Channel Attention
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 4x RTX 2080Ti GPUs
ID: ecaresnet101d
LR: 0.1
Epochs: 100
Layers: 101
Crop Pct: '0.875'
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/resnet.py#L1087
Weights: https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45402/outputs/ECAResNet101D_281c5844.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 82.18%
Top 5 Accuracy: 96.06%
- Name: ecaresnet101d_pruned
In Collection: ECAResNet
Metadata:
FLOPs: 4463972081
Parameters: 24880000
File Size: 99852736
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Efficient Channel Attention
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
ID: ecaresnet101d_pruned
Layers: 101
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/resnet.py#L1097
Weights: https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45610/outputs/ECAResNet101D_P_75a3370e.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 80.82%
Top 5 Accuracy: 95.64%
- Name: ecaresnet50d
In Collection: ECAResNet
Metadata:
FLOPs: 5591090432
Parameters: 25580000
File Size: 102579290
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Efficient Channel Attention
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 4x RTX 2080Ti GPUs
ID: ecaresnet50d
LR: 0.1
Epochs: 100
Layers: 50
Crop Pct: '0.875'
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/resnet.py#L1045
Weights: https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45402/outputs/ECAResNet50D_833caf58.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 80.61%
Top 5 Accuracy: 95.31%
- Name: ecaresnet50d_pruned
In Collection: ECAResNet
Metadata:
FLOPs: 3250730657
Parameters: 19940000
File Size: 79990436
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Efficient Channel Attention
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
ID: ecaresnet50d_pruned
Layers: 50
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/resnet.py#L1055
Weights: https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45899/outputs/ECAResNet50D_P_9c67f710.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.71%
Top 5 Accuracy: 94.88%
- Name: ecaresnetlight
In Collection: ECAResNet
Metadata:
FLOPs: 5276118784
Parameters: 30160000
File Size: 120956612
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Efficient Channel Attention
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
ID: ecaresnetlight
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/resnet.py#L1077
Weights: https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45402/outputs/ECAResNetLight_4f34b35b.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 80.46%
Top 5 Accuracy: 95.25%
--> | pytorch-image-models/hfdocs/source/models/ecaresnet.mdx/0 | {
"file_path": "pytorch-image-models/hfdocs/source/models/ecaresnet.mdx",
"repo_id": "pytorch-image-models",
"token_count": 3643
} |
# Inception v4
**Inception-v4** is a convolutional neural network architecture that builds on previous iterations of the Inception family by simplifying the architecture and using more inception modules than [Inception-v3](https://paperswithcode.com/method/inception-v3).
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('inception_v4', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `inception_v4`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('inception_v4', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
## Citation
```BibTeX
@misc{szegedy2016inceptionv4,
title={Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning},
author={Christian Szegedy and Sergey Ioffe and Vincent Vanhoucke and Alex Alemi},
year={2016},
eprint={1602.07261},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
<!--
Type: model-index
Collections:
- Name: Inception v4
Paper:
Title: Inception-v4, Inception-ResNet and the Impact of Residual Connections on
Learning
URL: https://paperswithcode.com/paper/inception-v4-inception-resnet-and-the-impact
Models:
- Name: inception_v4
In Collection: Inception v4
Metadata:
FLOPs: 15806527936
Parameters: 42680000
File Size: 171082495
Architecture:
- Average Pooling
- Dropout
- Inception-A
- Inception-B
- Inception-C
- Reduction-A
- Reduction-B
- Softmax
Tasks:
- Image Classification
Training Techniques:
- Label Smoothing
- RMSProp
- Weight Decay
Training Data:
- ImageNet
Training Resources: 20x NVIDIA Kepler GPUs
ID: inception_v4
LR: 0.045
Dropout: 0.2
Crop Pct: '0.875'
Momentum: 0.9
Image Size: '299'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/inception_v4.py#L313
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/inceptionv4-8e4777a0.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 1.01%
Top 5 Accuracy: 16.85%
--> | pytorch-image-models/hfdocs/source/models/inception-v4.mdx/0 | {
"file_path": "pytorch-image-models/hfdocs/source/models/inception-v4.mdx",
"repo_id": "pytorch-image-models",
"token_count": 1627
} |
# ResNet-D
**ResNet-D** is a modification on the [ResNet](https://paperswithcode.com/method/resnet) architecture that utilises an [average pooling](https://paperswithcode.com/method/average-pooling) tweak for downsampling. The motivation is that in the unmodified ResNet, the [1×1 convolution](https://paperswithcode.com/method/1x1-convolution) for the downsampling block ignores 3/4 of input feature maps, so this is modified so no information will be ignored
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('resnet101d', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `resnet101d`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('resnet101d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../training_script) for training a new model afresh.
## Citation
```BibTeX
@misc{he2018bag,
title={Bag of Tricks for Image Classification with Convolutional Neural Networks},
author={Tong He and Zhi Zhang and Hang Zhang and Zhongyue Zhang and Junyuan Xie and Mu Li},
year={2018},
eprint={1812.01187},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
<!--
Type: model-index
Collections:
- Name: ResNet-D
Paper:
Title: Bag of Tricks for Image Classification with Convolutional Neural Networks
URL: https://paperswithcode.com/paper/bag-of-tricks-for-image-classification-with
Models:
- Name: resnet101d
In Collection: ResNet-D
Metadata:
FLOPs: 13805639680
Parameters: 44570000
File Size: 178791263
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: resnet101d
Crop Pct: '0.94'
Image Size: '256'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L716
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet101d_ra2-2803ffab.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 82.31%
Top 5 Accuracy: 96.06%
- Name: resnet152d
In Collection: ResNet-D
Metadata:
FLOPs: 20155275264
Parameters: 60210000
File Size: 241596837
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: resnet152d
Crop Pct: '0.94'
Image Size: '256'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L724
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet152d_ra2-5cac0439.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 83.13%
Top 5 Accuracy: 96.35%
- Name: resnet18d
In Collection: ResNet-D
Metadata:
FLOPs: 2645205760
Parameters: 11710000
File Size: 46893231
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: resnet18d
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L649
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet18d_ra2-48a79e06.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 72.27%
Top 5 Accuracy: 90.69%
- Name: resnet200d
In Collection: ResNet-D
Metadata:
FLOPs: 26034378752
Parameters: 64690000
File Size: 259662933
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: resnet200d
Crop Pct: '0.94'
Image Size: '256'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L749
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet200d_ra2-bdba9bf9.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 83.24%
Top 5 Accuracy: 96.49%
- Name: resnet26d
In Collection: ResNet-D
Metadata:
FLOPs: 3335276032
Parameters: 16010000
File Size: 64209122
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: resnet26d
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L683
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet26d-69e92c46.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 76.69%
Top 5 Accuracy: 93.15%
- Name: resnet34d
In Collection: ResNet-D
Metadata:
FLOPs: 5026601728
Parameters: 21820000
File Size: 87369807
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: resnet34d
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L666
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet34d_ra2-f8dcfcaf.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 77.11%
Top 5 Accuracy: 93.38%
- Name: resnet50d
In Collection: ResNet-D
Metadata:
FLOPs: 5591002624
Parameters: 25580000
File Size: 102567109
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: resnet50d
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L699
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet50d_ra2-464e36ba.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 80.55%
Top 5 Accuracy: 95.16%
--> | pytorch-image-models/hfdocs/source/models/resnet-d.mdx/0 | {
"file_path": "pytorch-image-models/hfdocs/source/models/resnet-d.mdx",
"repo_id": "pytorch-image-models",
"token_count": 3934
} |
""" ONNX-runtime validation script
This script was created to verify accuracy and performance of exported ONNX
models running with the onnxruntime. It utilizes the PyTorch dataloader/processing
pipeline for a fair comparison against the originals.
Copyright 2020 Ross Wightman
"""
import argparse
import numpy as np
import onnxruntime
from timm.data import create_loader, resolve_data_config, create_dataset
from timm.utils import AverageMeter
import time
parser = argparse.ArgumentParser(description='ONNX Validation')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--onnx-input', default='', type=str, metavar='PATH',
help='path to onnx model/weights file')
parser.add_argument('--onnx-output-opt', default='', type=str, metavar='PATH',
help='path to output optimized onnx graph')
parser.add_argument('--profile', action='store_true', default=False,
help='Enable profiler output.')
parser.add_argument('-j', '--workers', default=2, type=int, metavar='N',
help='number of data loading workers (default: 2)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--img-size', default=None, type=int,
metavar='N', help='Input image dimension, uses model default if empty')
parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
help='Override mean pixel value of dataset')
parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
help='Override std deviation of of dataset')
parser.add_argument('--crop-pct', type=float, default=None, metavar='PCT',
help='Override default crop pct of 0.875')
parser.add_argument('--interpolation', default='', type=str, metavar='NAME',
help='Image resize interpolation type (overrides model)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
def main():
args = parser.parse_args()
args.gpu_id = 0
# Set graph optimization level
sess_options = onnxruntime.SessionOptions()
sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
if args.profile:
sess_options.enable_profiling = True
if args.onnx_output_opt:
sess_options.optimized_model_filepath = args.onnx_output_opt
session = onnxruntime.InferenceSession(args.onnx_input, sess_options)
data_config = resolve_data_config(vars(args))
loader = create_loader(
create_dataset('', args.data),
input_size=data_config['input_size'],
batch_size=args.batch_size,
use_prefetcher=False,
interpolation=data_config['interpolation'],
mean=data_config['mean'],
std=data_config['std'],
num_workers=args.workers,
crop_pct=data_config['crop_pct']
)
input_name = session.get_inputs()[0].name
batch_time = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
end = time.time()
for i, (input, target) in enumerate(loader):
# run the net and return prediction
output = session.run([], {input_name: input.data.numpy()})
output = output[0]
# measure accuracy and record loss
prec1, prec5 = accuracy_np(output, target.numpy())
top1.update(prec1.item(), input.size(0))
top5.update(prec5.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print(
f'Test: [{i}/{len(loader)}]\t'
f'Time {batch_time.val:.3f} ({batch_time.avg:.3f}, {input.size(0) / batch_time.avg:.3f}/s, '
f'{100 * batch_time.avg / input.size(0):.3f} ms/sample) \t'
f'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
f'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'
)
print(f' * Prec@1 {top1.avg:.3f} ({100-top1.avg:.3f}) Prec@5 {top5.avg:.3f} ({100.-top5.avg:.3f})')
def accuracy_np(output, target):
max_indices = np.argsort(output, axis=1)[:, ::-1]
top5 = 100 * np.equal(max_indices[:, :5], target[:, np.newaxis]).sum(axis=1).mean()
top1 = 100 * np.equal(max_indices[:, 0], target).mean()
return top1, top5
if __name__ == '__main__':
main()
| pytorch-image-models/onnx_validate.py/0 | {
"file_path": "pytorch-image-models/onnx_validate.py",
"repo_id": "pytorch-image-models",
"token_count": 1960
} |
"""Run tests for all models
Tests that run on CI should have a specific marker, e.g. @pytest.mark.base. This
marker is used to parallelize the CI runs, with one runner for each marker.
If new tests are added, ensure that they use one of the existing markers
(documented in pyproject.toml > pytest > markers) or that a new marker is added
for this set of tests. If using a new marker, adjust the test matrix in
.github/workflows/tests.yml to run tests with this new marker, otherwise the
tests will be skipped on CI.
"""
import pytest
import torch
import platform
import os
import fnmatch
_IS_MAC = platform.system() == 'Darwin'
try:
from torchvision.models.feature_extraction import create_feature_extractor, get_graph_node_names, NodePathTracer
has_fx_feature_extraction = True
except ImportError:
has_fx_feature_extraction = False
import timm
from timm import list_models, list_pretrained, create_model, set_scriptable, get_pretrained_cfg_value
from timm.layers import Format, get_spatial_dim, get_channel_dim
from timm.models import get_notrace_modules, get_notrace_functions
import importlib
import os
torch_backend = os.environ.get('TORCH_BACKEND')
if torch_backend is not None:
importlib.import_module(torch_backend)
torch_device = os.environ.get('TORCH_DEVICE', 'cpu')
timeout = os.environ.get('TIMEOUT')
timeout120 = int(timeout) if timeout else 120
timeout240 = int(timeout) if timeout else 240
timeout360 = int(timeout) if timeout else 360
if hasattr(torch._C, '_jit_set_profiling_executor'):
# legacy executor is too slow to compile large models for unit tests
# no need for the fusion performance here
torch._C._jit_set_profiling_executor(True)
torch._C._jit_set_profiling_mode(False)
# models with forward_intermediates() and support for FeatureGetterNet features_only wrapper
FEAT_INTER_FILTERS = [
'vision_transformer', 'vision_transformer_sam', 'vision_transformer_hybrid', 'vision_transformer_relpos',
'beit', 'mvitv2', 'eva', 'cait', 'xcit', 'volo', 'twins', 'deit', 'swin_transformer', 'swin_transformer_v2',
'swin_transformer_v2_cr', 'maxxvit', 'efficientnet', 'mobilenetv3', 'levit', 'efficientformer', 'resnet',
'regnet', 'byobnet', 'byoanet', 'mlp_mixer', 'hiera', 'fastvit', 'hieradet_sam2', 'aimv2*'
]
# transformer / hybrid models don't support full set of spatial / feature APIs and/or have spatial output.
NON_STD_FILTERS = [
'vit_*', 'tnt_*', 'pit_*', 'coat_*', 'cait_*', '*mixer_*', 'gmlp_*', 'resmlp_*', 'twins_*',
'convit_*', 'levit*', 'visformer*', 'deit*', 'xcit_*', 'crossvit_*', 'beit*', 'aimv2*',
'poolformer_*', 'volo_*', 'sequencer2d_*', 'mvitv2*', 'gcvit*', 'efficientformer*', 'sam_hiera*',
'eva_*', 'flexivit*', 'eva02*', 'samvit_*', 'efficientvit_m*', 'tiny_vit_*', 'hiera_*', 'vitamin*', 'test_vit*',
]
NUM_NON_STD = len(NON_STD_FILTERS)
# exclude models that cause specific test failures
if 'GITHUB_ACTIONS' in os.environ:
# GitHub Linux runner is slower and hits memory limits sooner than MacOS, exclude bigger models
EXCLUDE_FILTERS = [
'*efficientnet_l2*', '*resnext101_32x48d', '*in21k', '*152x4_bitm', '*101x3_bitm', '*50x3_bitm',
'*nfnet_f3*', '*nfnet_f4*', '*nfnet_f5*', '*nfnet_f6*', '*nfnet_f7*', '*efficientnetv2_xl*',
'*resnetrs350*', '*resnetrs420*', 'xcit_large_24_p8*', '*huge*', '*giant*', '*gigantic*',
'*enormous*', 'maxvit_xlarge*', 'regnet*1280', 'regnet*2560', '*_1b_*', '*_3b_*']
NON_STD_EXCLUDE_FILTERS = ['*huge*', '*giant*', '*gigantic*', '*enormous*', '*_1b_*', '*_3b_*']
else:
EXCLUDE_FILTERS = ['*enormous*']
NON_STD_EXCLUDE_FILTERS = ['*gigantic*', '*enormous*', '*_3b_*']
EXCLUDE_JIT_FILTERS = ['hiera_*']
TARGET_FWD_SIZE = MAX_FWD_SIZE = 384
TARGET_BWD_SIZE = 128
MAX_BWD_SIZE = 320
MAX_FWD_OUT_SIZE = 448
TARGET_JIT_SIZE = 128
MAX_JIT_SIZE = 320
TARGET_FFEAT_SIZE = 96
MAX_FFEAT_SIZE = 256
TARGET_FWD_FX_SIZE = 128
MAX_FWD_FX_SIZE = 256
TARGET_BWD_FX_SIZE = 128
MAX_BWD_FX_SIZE = 224
def _get_input_size(model=None, model_name='', target=None):
if model is None:
assert model_name, "One of model or model_name must be provided"
input_size = get_pretrained_cfg_value(model_name, 'input_size')
fixed_input_size = get_pretrained_cfg_value(model_name, 'fixed_input_size')
min_input_size = get_pretrained_cfg_value(model_name, 'min_input_size')
else:
default_cfg = model.default_cfg
input_size = default_cfg['input_size']
fixed_input_size = default_cfg.get('fixed_input_size', None)
min_input_size = default_cfg.get('min_input_size', None)
assert input_size is not None
if fixed_input_size:
return input_size
if min_input_size:
if target and max(input_size) > target:
input_size = min_input_size
else:
if target and max(input_size) > target:
input_size = tuple([min(x, target) for x in input_size])
return input_size
@pytest.mark.base
@pytest.mark.timeout(timeout240)
@pytest.mark.parametrize('model_name', list_pretrained('test_*'))
@pytest.mark.parametrize('batch_size', [1])
def test_model_inference(model_name, batch_size):
"""Run a single forward pass with each model"""
from PIL import Image
from huggingface_hub import snapshot_download
import tempfile
import safetensors
model = create_model(model_name, pretrained=True)
model.eval()
pp = timm.data.create_transform(**timm.data.resolve_data_config(model=model))
with tempfile.TemporaryDirectory() as temp_dir:
snapshot_download(
repo_id='timm/' + model_name, repo_type='model', local_dir=temp_dir, allow_patterns='test/*'
)
rand_tensors = safetensors.torch.load_file(os.path.join(temp_dir, 'test', 'rand_tensors.safetensors'))
owl_tensors = safetensors.torch.load_file(os.path.join(temp_dir, 'test', 'owl_tensors.safetensors'))
test_owl = Image.open(os.path.join(temp_dir, 'test', 'test_owl.jpg'))
with torch.no_grad():
rand_output = model(rand_tensors['input'])
rand_features = model.forward_features(rand_tensors['input'])
rand_pre_logits = model.forward_head(rand_features, pre_logits=True)
assert torch.allclose(rand_output, rand_tensors['output'], rtol=1e-3, atol=1e-4), 'rand output does not match'
assert torch.allclose(rand_features, rand_tensors['features'], rtol=1e-3, atol=1e-4), 'rand features do not match'
assert torch.allclose(rand_pre_logits, rand_tensors['pre_logits'], rtol=1e-3, atol=1e-4), 'rand pre_logits do not match'
def _test_owl(owl_input, tol=(1e-3, 1e-4)):
owl_output = model(owl_input)
owl_features = model.forward_features(owl_input)
owl_pre_logits = model.forward_head(owl_features.clone(), pre_logits=True)
assert owl_output.softmax(1).argmax(1) == 24 # owl
assert torch.allclose(owl_output, owl_tensors['output'], rtol=tol[0], atol=tol[1]), 'owl output does not match'
assert torch.allclose(owl_features, owl_tensors['features'], rtol=tol[0], atol=tol[1]), 'owl output does not match'
assert torch.allclose(owl_pre_logits, owl_tensors['pre_logits'], rtol=tol[0], atol=tol[1]), 'owl output does not match'
_test_owl(owl_tensors['input']) # test with original pp owl tensor
_test_owl(pp(test_owl).unsqueeze(0), tol=(1e-1, 1e-1)) # re-process from original jpg, Pillow output can change a lot btw ver
@pytest.mark.base
@pytest.mark.timeout(timeout120)
@pytest.mark.parametrize('model_name', list_models(exclude_filters=EXCLUDE_FILTERS))
@pytest.mark.parametrize('batch_size', [1])
def test_model_forward(model_name, batch_size):
"""Run a single forward pass with each model"""
model = create_model(model_name, pretrained=False)
model.eval()
input_size = _get_input_size(model=model, target=TARGET_FWD_SIZE)
if max(input_size) > MAX_FWD_SIZE:
pytest.skip("Fixed input size model > limit.")
inputs = torch.randn((batch_size, *input_size))
inputs = inputs.to(torch_device)
model.to(torch_device)
outputs = model(inputs)
assert outputs.shape[0] == batch_size
assert not torch.isnan(outputs).any(), 'Output included NaNs'
@pytest.mark.base
@pytest.mark.timeout(timeout120)
@pytest.mark.parametrize('model_name', list_models(exclude_filters=EXCLUDE_FILTERS, name_matches_cfg=True))
@pytest.mark.parametrize('batch_size', [2])
def test_model_backward(model_name, batch_size):
"""Run a single forward pass with each model"""
input_size = _get_input_size(model_name=model_name, target=TARGET_BWD_SIZE)
if max(input_size) > MAX_BWD_SIZE:
pytest.skip("Fixed input size model > limit.")
model = create_model(model_name, pretrained=False, num_classes=42)
num_params = sum([x.numel() for x in model.parameters()])
model.train()
inputs = torch.randn((batch_size, *input_size))
inputs = inputs.to(torch_device)
model.to(torch_device)
outputs = model(inputs)
if isinstance(outputs, tuple):
outputs = torch.cat(outputs)
outputs.mean().backward()
for n, x in model.named_parameters():
assert x.grad is not None, f'No gradient for {n}'
num_grad = sum([x.grad.numel() for x in model.parameters() if x.grad is not None])
assert outputs.shape[-1] == 42
assert num_params == num_grad, 'Some parameters are missing gradients'
assert not torch.isnan(outputs).any(), 'Output included NaNs'
# models with extra conv/linear layers after pooling
EARLY_POOL_MODELS = (
timm.models.EfficientVit,
timm.models.EfficientVitLarge,
timm.models.HighPerfGpuNet,
timm.models.GhostNet,
timm.models.MetaNeXt, # InceptionNeXt
timm.models.MobileNetV3,
timm.models.RepGhostNet,
timm.models.VGG,
)
@pytest.mark.cfg
@pytest.mark.timeout(timeout360)
@pytest.mark.parametrize('model_name', list_models(
exclude_filters=EXCLUDE_FILTERS + NON_STD_FILTERS, include_tags=True))
@pytest.mark.parametrize('batch_size', [1])
def test_model_default_cfgs(model_name, batch_size):
"""Run a single forward pass with each model"""
model = create_model(model_name, pretrained=False)
model.eval()
model.to(torch_device)
assert getattr(model, 'num_classes') >= 0
assert getattr(model, 'num_features') > 0
assert getattr(model, 'head_hidden_size') > 0
state_dict = model.state_dict()
cfg = model.default_cfg
pool_size = cfg['pool_size']
input_size = model.default_cfg['input_size']
output_fmt = getattr(model, 'output_fmt', 'NCHW')
spatial_axis = get_spatial_dim(output_fmt)
assert len(spatial_axis) == 2 # TODO add 1D sequence support
feat_axis = get_channel_dim(output_fmt)
if all([x <= MAX_FWD_OUT_SIZE for x in input_size]) and \
not any([fnmatch.fnmatch(model_name, x) for x in EXCLUDE_FILTERS]):
# output sizes only checked if default res <= 448 * 448 to keep resource down
input_size = tuple([min(x, MAX_FWD_OUT_SIZE) for x in input_size])
input_tensor = torch.randn((batch_size, *input_size), device=torch_device)
# test forward_features (always unpooled) & forward_head w/ pre_logits
outputs = model.forward_features(input_tensor)
outputs_pre = model.forward_head(outputs, pre_logits=True)
assert outputs.shape[spatial_axis[0]] == pool_size[0], f'unpooled feature shape {outputs.shape} != config'
assert outputs.shape[spatial_axis[1]] == pool_size[1], f'unpooled feature shape {outputs.shape} != config'
assert outputs.shape[feat_axis] == model.num_features, f'unpooled feature dim {outputs.shape[feat_axis]} != model.num_features {model.num_features}'
assert outputs_pre.shape[1] == model.head_hidden_size, f'pre_logits feature dim {outputs_pre.shape[1]} != model.head_hidden_size {model.head_hidden_size}'
# test forward after deleting the classifier, output should be poooled, size(-1) == model.num_features
model.reset_classifier(0)
assert model.num_classes == 0, f'Expected num_classes to be 0 after reset_classifier(0), but got {model.num_classes}'
model.to(torch_device)
outputs = model.forward(input_tensor)
assert len(outputs.shape) == 2
assert outputs.shape[1] == model.head_hidden_size, f'feature dim w/ removed classifier {outputs.shape[1]} != model.head_hidden_size {model.head_hidden_size}'
assert outputs.shape == outputs_pre.shape, f'output shape of pre_logits {outputs_pre.shape} does not match reset_head(0) {outputs.shape}'
# test model forward after removing pooling and classifier
if not isinstance(model, EARLY_POOL_MODELS):
model.reset_classifier(0, '') # reset classifier and disable global pooling
model.to(torch_device)
outputs = model.forward(input_tensor)
assert len(outputs.shape) == 4
assert outputs.shape[spatial_axis[0]] == pool_size[0] and outputs.shape[spatial_axis[1]] == pool_size[1]
# test classifier + global pool deletion via __init__
if 'pruned' not in model_name and not isinstance(model, EARLY_POOL_MODELS):
model = create_model(model_name, pretrained=False, num_classes=0, global_pool='').eval()
model.to(torch_device)
outputs = model.forward(input_tensor)
assert len(outputs.shape) == 4
assert outputs.shape[spatial_axis[0]] == pool_size[0] and outputs.shape[spatial_axis[1]] == pool_size[1]
# check classifier name matches default_cfg
if cfg.get('num_classes', None):
classifier = cfg['classifier']
if not isinstance(classifier, (tuple, list)):
classifier = classifier,
for c in classifier:
assert c + ".weight" in state_dict.keys(), f'{c} not in model params'
# check first conv(s) names match default_cfg
first_conv = cfg['first_conv']
if isinstance(first_conv, str):
first_conv = (first_conv,)
assert isinstance(first_conv, (tuple, list))
for fc in first_conv:
assert fc + ".weight" in state_dict.keys(), f'{fc} not in model params'
@pytest.mark.cfg
@pytest.mark.timeout(timeout360)
@pytest.mark.parametrize('model_name', list_models(filter=NON_STD_FILTERS, exclude_filters=NON_STD_EXCLUDE_FILTERS, include_tags=True))
@pytest.mark.parametrize('batch_size', [1])
def test_model_default_cfgs_non_std(model_name, batch_size):
"""Run a single forward pass with each model"""
model = create_model(model_name, pretrained=False)
model.eval()
model.to(torch_device)
assert getattr(model, 'num_classes') >= 0
assert getattr(model, 'num_features') > 0
assert getattr(model, 'head_hidden_size') > 0
state_dict = model.state_dict()
cfg = model.default_cfg
input_size = _get_input_size(model=model)
if max(input_size) > 320: # FIXME const
pytest.skip("Fixed input size model > limit.")
input_tensor = torch.randn((batch_size, *input_size), device=torch_device)
feat_dim = getattr(model, 'feature_dim', None)
outputs = model.forward_features(input_tensor)
outputs_pre = model.forward_head(outputs, pre_logits=True)
if isinstance(outputs, (tuple, list)):
# cannot currently verify multi-tensor output.
pass
else:
if feat_dim is None:
feat_dim = -1 if outputs.ndim == 3 else 1
assert outputs.shape[feat_dim] == model.num_features
assert outputs_pre.shape[1] == model.head_hidden_size
# test forward after deleting the classifier, output should be poooled, size(-1) == model.num_features
model.reset_classifier(0)
assert model.num_classes == 0, f'Expected num_classes to be 0 after reset_classifier(0), but got {model.num_classes}'
model.to(torch_device)
outputs = model.forward(input_tensor)
if isinstance(outputs, (tuple, list)):
outputs = outputs[0]
if feat_dim is None:
feat_dim = -1 if outputs.ndim == 3 else 1
assert outputs.shape[feat_dim] == model.head_hidden_size, 'pooled num_features != config'
assert outputs.shape == outputs_pre.shape
model = create_model(model_name, pretrained=False, num_classes=0).eval()
model.to(torch_device)
outputs = model.forward(input_tensor)
if isinstance(outputs, (tuple, list)):
outputs = outputs[0]
if feat_dim is None:
feat_dim = -1 if outputs.ndim == 3 else 1
assert outputs.shape[feat_dim] == model.num_features
# check classifier name matches default_cfg
if cfg.get('num_classes', None):
classifier = cfg['classifier']
if not isinstance(classifier, (tuple, list)):
classifier = classifier,
for c in classifier:
assert c + ".weight" in state_dict.keys(), f'{c} not in model params'
# check first conv(s) names match default_cfg
first_conv = cfg['first_conv']
if isinstance(first_conv, str):
first_conv = (first_conv,)
assert isinstance(first_conv, (tuple, list))
for fc in first_conv:
assert fc + ".weight" in state_dict.keys(), f'{fc} not in model params'
if 'GITHUB_ACTIONS' not in os.environ:
@pytest.mark.timeout(240)
@pytest.mark.parametrize('model_name', list_models(pretrained=True))
@pytest.mark.parametrize('batch_size', [1])
def test_model_load_pretrained(model_name, batch_size):
"""Create that pretrained weights load, verify support for in_chans != 3 while doing so."""
in_chans = 3 if 'pruned' in model_name else 1 # pruning not currently supported with in_chans change
create_model(model_name, pretrained=True, in_chans=in_chans, num_classes=5)
create_model(model_name, pretrained=True, in_chans=in_chans, num_classes=0)
@pytest.mark.timeout(240)
@pytest.mark.parametrize('model_name', list_models(pretrained=True, exclude_filters=NON_STD_FILTERS))
@pytest.mark.parametrize('batch_size', [1])
def test_model_features_pretrained(model_name, batch_size):
"""Create that pretrained weights load when features_only==True."""
create_model(model_name, pretrained=True, features_only=True)
@pytest.mark.torchscript
@pytest.mark.timeout(timeout120)
@pytest.mark.parametrize(
'model_name', list_models(exclude_filters=EXCLUDE_FILTERS + EXCLUDE_JIT_FILTERS, name_matches_cfg=True))
@pytest.mark.parametrize('batch_size', [1])
def test_model_forward_torchscript(model_name, batch_size):
"""Run a single forward pass with each model"""
input_size = _get_input_size(model_name=model_name, target=TARGET_JIT_SIZE)
if max(input_size) > MAX_JIT_SIZE:
pytest.skip("Fixed input size model > limit.")
with set_scriptable(True):
model = create_model(model_name, pretrained=False)
model.eval()
model = torch.jit.script(model)
model.to(torch_device)
outputs = model(torch.randn((batch_size, *input_size)))
assert outputs.shape[0] == batch_size
assert not torch.isnan(outputs).any(), 'Output included NaNs'
EXCLUDE_FEAT_FILTERS = [
'*pruned*', # hopefully fix at some point
] + NON_STD_FILTERS
if 'GITHUB_ACTIONS' in os.environ: # and 'Linux' in platform.system():
# GitHub Linux runner is slower and hits memory limits sooner than MacOS, exclude bigger models
EXCLUDE_FEAT_FILTERS += ['*resnext101_32x32d', '*resnext101_32x16d']
@pytest.mark.features
@pytest.mark.timeout(120)
@pytest.mark.parametrize('model_name', list_models(exclude_filters=EXCLUDE_FILTERS + EXCLUDE_FEAT_FILTERS))
@pytest.mark.parametrize('batch_size', [1])
def test_model_forward_features(model_name, batch_size):
"""Run a single forward pass with each model in feature extraction mode"""
model = create_model(model_name, pretrained=False, features_only=True)
model.eval()
expected_channels = model.feature_info.channels()
expected_reduction = model.feature_info.reduction()
assert len(expected_channels) >= 3 # all models here should have at least 3 default feat levels
input_size = _get_input_size(model=model, target=TARGET_FFEAT_SIZE)
if max(input_size) > MAX_FFEAT_SIZE:
pytest.skip("Fixed input size model > limit.")
output_fmt = getattr(model, 'output_fmt', 'NCHW')
feat_axis = get_channel_dim(output_fmt)
spatial_axis = get_spatial_dim(output_fmt)
import math
outputs = model(torch.randn((batch_size, *input_size)))
assert len(expected_channels) == len(outputs)
spatial_size = input_size[-2:]
for e, r, o in zip(expected_channels, expected_reduction, outputs):
assert e == o.shape[feat_axis]
assert o.shape[spatial_axis[0]] <= math.ceil(spatial_size[0] / r) + 1
assert o.shape[spatial_axis[1]] <= math.ceil(spatial_size[1] / r) + 1
assert o.shape[0] == batch_size
assert not torch.isnan(o).any()
@pytest.mark.features
@pytest.mark.timeout(120)
@pytest.mark.parametrize('model_name', list_models(module=FEAT_INTER_FILTERS, exclude_filters=EXCLUDE_FILTERS + ['*pruned*']))
@pytest.mark.parametrize('batch_size', [1])
def test_model_forward_intermediates_features(model_name, batch_size):
"""Run a single forward pass with each model in feature extraction mode"""
model = create_model(model_name, pretrained=False, features_only=True, feature_cls='getter')
model.eval()
expected_channels = model.feature_info.channels()
expected_reduction = model.feature_info.reduction()
input_size = _get_input_size(model=model, target=TARGET_FFEAT_SIZE)
if max(input_size) > MAX_FFEAT_SIZE:
pytest.skip("Fixed input size model > limit.")
output_fmt = getattr(model, 'output_fmt', 'NCHW')
feat_axis = get_channel_dim(output_fmt)
spatial_axis = get_spatial_dim(output_fmt)
import math
outputs = model(torch.randn((batch_size, *input_size)))
assert len(expected_channels) == len(outputs)
spatial_size = input_size[-2:]
for e, r, o in zip(expected_channels, expected_reduction, outputs):
print(o.shape)
assert e == o.shape[feat_axis]
assert o.shape[spatial_axis[0]] <= math.ceil(spatial_size[0] / r) + 1
assert o.shape[spatial_axis[1]] <= math.ceil(spatial_size[1] / r) + 1
assert o.shape[0] == batch_size
assert not torch.isnan(o).any()
@pytest.mark.features
@pytest.mark.timeout(120)
@pytest.mark.parametrize('model_name', list_models(module=FEAT_INTER_FILTERS, exclude_filters=EXCLUDE_FILTERS + ['*pruned*']))
@pytest.mark.parametrize('batch_size', [1])
def test_model_forward_intermediates(model_name, batch_size):
"""Run a single forward pass with each model in feature extraction mode"""
model = create_model(model_name, pretrained=False)
model.eval()
feature_info = timm.models.FeatureInfo(model.feature_info, len(model.feature_info))
expected_channels = feature_info.channels()
expected_reduction = feature_info.reduction()
assert len(expected_channels) >= 3 # all models here should have at least 3 feature levels
input_size = _get_input_size(model=model, target=TARGET_FFEAT_SIZE)
if max(input_size) > MAX_FFEAT_SIZE:
pytest.skip("Fixed input size model > limit.")
output_fmt = 'NCHW' # NOTE output_fmt determined by forward_intermediates() arg, not model attribute
feat_axis = get_channel_dim(output_fmt)
spatial_axis = get_spatial_dim(output_fmt)
import math
output, intermediates = model.forward_intermediates(
torch.randn((batch_size, *input_size)),
output_fmt=output_fmt,
)
assert len(expected_channels) == len(intermediates)
spatial_size = input_size[-2:]
for e, r, o in zip(expected_channels, expected_reduction, intermediates):
assert e == o.shape[feat_axis]
assert o.shape[spatial_axis[0]] <= math.ceil(spatial_size[0] / r) + 1
assert o.shape[spatial_axis[1]] <= math.ceil(spatial_size[1] / r) + 1
assert o.shape[0] == batch_size
assert not torch.isnan(o).any()
def _create_fx_model(model, train=False):
# This block of code does a bit of juggling to handle any case where there are multiple outputs in train mode
# So we trace once and look at the graph, and get the indices of the nodes that lead into the original fx output
# node. Then we use those indices to select from train_nodes returned by torchvision get_graph_node_names
tracer_kwargs = dict(
leaf_modules=get_notrace_modules(),
autowrap_functions=get_notrace_functions(),
#enable_cpatching=True,
param_shapes_constant=True
)
train_nodes, eval_nodes = get_graph_node_names(model, tracer_kwargs=tracer_kwargs)
eval_return_nodes = [eval_nodes[-1]]
train_return_nodes = [train_nodes[-1]]
if train:
tracer = NodePathTracer(**tracer_kwargs)
graph = tracer.trace(model)
graph_nodes = list(reversed(graph.nodes))
output_node_names = [n.name for n in graph_nodes[0]._input_nodes.keys()]
graph_node_names = [n.name for n in graph_nodes]
output_node_indices = [-graph_node_names.index(node_name) for node_name in output_node_names]
train_return_nodes = [train_nodes[ix] for ix in output_node_indices]
fx_model = create_feature_extractor(
model,
train_return_nodes=train_return_nodes,
eval_return_nodes=eval_return_nodes,
tracer_kwargs=tracer_kwargs,
)
return fx_model
EXCLUDE_FX_FILTERS = ['vit_gi*', 'hiera*']
# not enough memory to run fx on more models than other tests
if 'GITHUB_ACTIONS' in os.environ:
EXCLUDE_FX_FILTERS += [
'beit_large*',
'mixer_l*',
'*nfnet_f2*',
'*resnext101_32x32d',
'resnetv2_152x2*',
'resmlp_big*',
'resnetrs270',
'swin_large*',
'vgg*',
'vit_large*',
'vit_base_patch8*',
'xcit_large*',
]
@pytest.mark.fxforward
@pytest.mark.timeout(120)
@pytest.mark.parametrize('model_name', list_models(exclude_filters=EXCLUDE_FILTERS + EXCLUDE_FX_FILTERS))
@pytest.mark.parametrize('batch_size', [1])
def test_model_forward_fx(model_name, batch_size):
"""
Symbolically trace each model and run single forward pass through the resulting GraphModule
Also check that the output of a forward pass through the GraphModule is the same as that from the original Module
"""
if not has_fx_feature_extraction:
pytest.skip("Can't test FX. Torch >= 1.10 and Torchvision >= 0.11 are required.")
model = create_model(model_name, pretrained=False)
model.eval()
input_size = _get_input_size(model=model, target=TARGET_FWD_FX_SIZE)
if max(input_size) > MAX_FWD_FX_SIZE:
pytest.skip("Fixed input size model > limit.")
with torch.no_grad():
inputs = torch.randn((batch_size, *input_size))
outputs = model(inputs)
if isinstance(outputs, tuple):
outputs = torch.cat(outputs)
model = _create_fx_model(model)
fx_outputs = tuple(model(inputs).values())
if isinstance(fx_outputs, tuple):
fx_outputs = torch.cat(fx_outputs)
assert torch.all(fx_outputs == outputs)
assert outputs.shape[0] == batch_size
assert not torch.isnan(outputs).any(), 'Output included NaNs'
@pytest.mark.fxbackward
@pytest.mark.timeout(120)
@pytest.mark.parametrize('model_name', list_models(
exclude_filters=EXCLUDE_FILTERS + EXCLUDE_FX_FILTERS, name_matches_cfg=True))
@pytest.mark.parametrize('batch_size', [2])
def test_model_backward_fx(model_name, batch_size):
"""Symbolically trace each model and run single backward pass through the resulting GraphModule"""
if not has_fx_feature_extraction:
pytest.skip("Can't test FX. Torch >= 1.10 and Torchvision >= 0.11 are required.")
input_size = _get_input_size(model_name=model_name, target=TARGET_BWD_FX_SIZE)
if max(input_size) > MAX_BWD_FX_SIZE:
pytest.skip("Fixed input size model > limit.")
model = create_model(model_name, pretrained=False, num_classes=42)
model.train()
num_params = sum([x.numel() for x in model.parameters()])
if 'GITHUB_ACTIONS' in os.environ and num_params > 100e6:
pytest.skip("Skipping FX backward test on model with more than 100M params.")
model = _create_fx_model(model, train=True)
outputs = tuple(model(torch.randn((batch_size, *input_size))).values())
if isinstance(outputs, tuple):
outputs = torch.cat(outputs)
outputs.mean().backward()
for n, x in model.named_parameters():
assert x.grad is not None, f'No gradient for {n}'
num_grad = sum([x.grad.numel() for x in model.parameters() if x.grad is not None])
assert outputs.shape[-1] == 42
assert num_params == num_grad, 'Some parameters are missing gradients'
assert not torch.isnan(outputs).any(), 'Output included NaNs'
if 'GITHUB_ACTIONS' not in os.environ:
# FIXME this test is causing GitHub actions to run out of RAM and abruptly kill the test process
# reason: model is scripted after fx tracing, but beit has torch.jit.is_scripting() control flow
EXCLUDE_FX_JIT_FILTERS = [
'deit_*_distilled_patch16_224',
'levit*',
'pit_*_distilled_224',
] + EXCLUDE_FX_FILTERS
@pytest.mark.timeout(120)
@pytest.mark.parametrize(
'model_name', list_models(
exclude_filters=EXCLUDE_FILTERS + EXCLUDE_JIT_FILTERS + EXCLUDE_FX_JIT_FILTERS, name_matches_cfg=True))
@pytest.mark.parametrize('batch_size', [1])
def test_model_forward_fx_torchscript(model_name, batch_size):
"""Symbolically trace each model, script it, and run single forward pass"""
if not has_fx_feature_extraction:
pytest.skip("Can't test FX. Torch >= 1.10 and Torchvision >= 0.11 are required.")
input_size = _get_input_size(model_name=model_name, target=TARGET_JIT_SIZE)
if max(input_size) > MAX_JIT_SIZE:
pytest.skip("Fixed input size model > limit.")
with set_scriptable(True):
model = create_model(model_name, pretrained=False)
model.eval()
model = torch.jit.script(_create_fx_model(model))
with torch.no_grad():
outputs = tuple(model(torch.randn((batch_size, *input_size))).values())
if isinstance(outputs, tuple):
outputs = torch.cat(outputs)
assert outputs.shape[0] == batch_size
assert not torch.isnan(outputs).any(), 'Output included NaNs'
@pytest.mark.timeout(120)
@pytest.mark.parametrize('model_name', ["regnetx_002"])
@pytest.mark.parametrize('batch_size', [1])
def test_model_forward_torchscript_with_features_fx(model_name, batch_size):
"""Create a model with feature extraction based on fx, script it, and run
a single forward pass"""
if not has_fx_feature_extraction:
pytest.skip("Can't test FX. Torch >= 1.10 and Torchvision >= 0.11 are required.")
allowed_models = list_models(
exclude_filters=EXCLUDE_FILTERS + EXCLUDE_JIT_FILTERS + EXCLUDE_FX_JIT_FILTERS,
name_matches_cfg=True
)
assert model_name in allowed_models, f"{model_name=} not supported for this test"
input_size = _get_input_size(model_name=model_name, target=TARGET_JIT_SIZE)
assert max(input_size) <= MAX_JIT_SIZE, "Fixed input size model > limit. Pick a different model to run this test"
with set_scriptable(True):
model = create_model(model_name, pretrained=False, features_only=True, feature_cfg={"feature_cls": "fx"})
model.eval()
model = torch.jit.script(model)
with torch.no_grad():
outputs = model(torch.randn((batch_size, *input_size)))
assert isinstance(outputs, list)
for tensor in outputs:
assert tensor.shape[0] == batch_size
assert not torch.isnan(tensor).any(), 'Output included NaNs' | pytorch-image-models/tests/test_models.py/0 | {
"file_path": "pytorch-image-models/tests/test_models.py",
"repo_id": "pytorch-image-models",
"token_count": 12867
} |
import math
import torch
from torch.utils.data import Sampler
import torch.distributed as dist
class OrderedDistributedSampler(Sampler):
"""Sampler that restricts data loading to a subset of the dataset.
It is especially useful in conjunction with
:class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
process can pass a DistributedSampler instance as a DataLoader sampler,
and load a subset of the original dataset that is exclusive to it.
.. note::
Dataset is assumed to be of constant size.
Arguments:
dataset: Dataset used for sampling.
num_replicas (optional): Number of processes participating in
distributed training.
rank (optional): Rank of the current process within num_replicas.
"""
def __init__(self, dataset, num_replicas=None, rank=None):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
def __iter__(self):
indices = list(range(len(self.dataset)))
# add extra samples to make it evenly divisible
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
class RepeatAugSampler(Sampler):
"""Sampler that restricts data loading to a subset of the dataset for distributed,
with repeated augmentation.
It ensures that different each augmented version of a sample will be visible to a
different process (GPU). Heavily based on torch.utils.data.DistributedSampler
This sampler was taken from https://github.com/facebookresearch/deit/blob/0c4b8f60/samplers.py
Used in
Copyright (c) 2015-present, Facebook, Inc.
"""
def __init__(
self,
dataset,
num_replicas=None,
rank=None,
shuffle=True,
num_repeats=3,
selected_round=256,
selected_ratio=0,
):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.shuffle = shuffle
self.num_repeats = num_repeats
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset) * num_repeats / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
# Determine the number of samples to select per epoch for each rank.
# num_selected logic defaults to be the same as original RASampler impl, but this one can be tweaked
# via selected_ratio and selected_round args.
selected_ratio = selected_ratio or num_replicas # ratio to reduce selected samples by, num_replicas if 0
if selected_round:
self.num_selected_samples = int(math.floor(
len(self.dataset) // selected_round * selected_round / selected_ratio))
else:
self.num_selected_samples = int(math.ceil(len(self.dataset) / selected_ratio))
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
if self.shuffle:
indices = torch.randperm(len(self.dataset), generator=g)
else:
indices = torch.arange(start=0, end=len(self.dataset))
# produce repeats e.g. [0, 0, 0, 1, 1, 1, 2, 2, 2....]
if isinstance(self.num_repeats, float) and not self.num_repeats.is_integer():
# resample for repeats w/ non-integer ratio
repeat_size = math.ceil(self.num_repeats * len(self.dataset))
indices = indices[torch.tensor([int(i // self.num_repeats) for i in range(repeat_size)])]
else:
indices = torch.repeat_interleave(indices, repeats=int(self.num_repeats), dim=0)
indices = indices.tolist() # leaving as tensor thrashes dataloader memory
# add extra samples to make it evenly divisible
padding_size = self.total_size - len(indices)
if padding_size > 0:
indices += indices[:padding_size]
assert len(indices) == self.total_size
# subsample per rank
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
# return up to num selected samples
return iter(indices[:self.num_selected_samples])
def __len__(self):
return self.num_selected_samples
def set_epoch(self, epoch):
self.epoch = epoch
| pytorch-image-models/timm/data/distributed_sampler.py/0 | {
"file_path": "pytorch-image-models/timm/data/distributed_sampler.py",
"repo_id": "pytorch-image-models",
"token_count": 2276
} |
""" Dataset reader for webdataset
Hacked together by / Copyright 2022 Ross Wightman
"""
import io
import json
import logging
import math
import os
import random
import sys
from dataclasses import dataclass
from functools import partial
from itertools import islice
from typing import Any, Callable, Dict, List, Optional, Tuple
import torch
import torch.distributed as dist
import yaml
from PIL import Image
from torch.utils.data import Dataset, IterableDataset, get_worker_info
try:
import webdataset as wds
from webdataset.filters import _shuffle, getfirst
from webdataset.shardlists import expand_urls
from webdataset.tariterators import base_plus_ext, url_opener, tar_file_expander, valid_sample
except ImportError:
wds = None
expand_urls = None
from .class_map import load_class_map
from .reader import Reader
from .shared_count import SharedCount
_logger = logging.getLogger(__name__)
SAMPLE_SHUFFLE_SIZE = int(os.environ.get('WDS_SHUFFLE_SIZE', 8192))
SAMPLE_INITIAL_SIZE = int(os.environ.get('WDS_INITIAL_SIZE', 2048))
def _load_info(root, names=('_info.json', 'info.json')):
if isinstance(names, str):
names = (names,)
tried = []
err_str = ''
for n in names:
full_path = os.path.join(root, n)
try:
tried.append(full_path)
with wds.gopen(full_path) as f:
if n.endswith('.json'):
info_dict = json.load(f)
else:
info_dict = yaml.safe_load(f)
return info_dict
except Exception as e:
err_str = str(e)
_logger.warning(
f'Dataset info file not found at {tried}. Error: {err_str}. '
'Falling back to provided split and size arg.')
return {}
@dataclass
class SplitInfo:
num_samples: int
filenames: Tuple[str]
shard_lengths: Tuple[int] = ()
alt_label: str = ''
name: str = ''
def _parse_split_info(split: str, info: Dict):
def _info_convert(dict_info):
return SplitInfo(
num_samples=dict_info['num_samples'],
filenames=tuple(dict_info['filenames']),
shard_lengths=tuple(dict_info['shard_lengths']),
alt_label=dict_info.get('alt_label', ''),
name=dict_info['name'],
)
if 'tar' in split or '..' in split:
# split in WDS string braceexpand format, sample count can be included with a | separator
# ex: `dataset-split-{0000..9999}.tar|100000` for 9999 shards, covering 100,000 samples
split = split.split('|')
num_samples = 0
split_name = ''
if len(split) > 1:
num_samples = int(split[1])
split = split[0]
if '::' not in split:
split_parts = split.split('-', 3)
split_idx = len(split_parts) - 1
if split_idx and 'splits' in info and split_parts[split_idx] in info['splits']:
split_name = split_parts[split_idx]
split_filenames = expand_urls(split)
if split_name:
split_info = info['splits'][split_name]
if not num_samples:
_fc = {f: c for f, c in zip(split_info['filenames'], split_info['shard_lengths'])}
num_samples = sum(_fc[f] for f in split_filenames)
split_info['filenames'] = tuple(_fc.keys())
split_info['shard_lengths'] = tuple(_fc.values())
split_info['num_samples'] = num_samples
split_info = _info_convert(split_info)
else:
split_info = SplitInfo(
name=split_name,
num_samples=num_samples,
filenames=split_filenames,
)
else:
if 'splits' not in info or split not in info['splits']:
raise RuntimeError(f"split {split} not found in info ({info.get('splits', {}).keys()})")
split = split
split_info = info['splits'][split]
split_info = _info_convert(split_info)
return split_info
def log_and_continue(exn):
"""Call in an exception handler to ignore exceptions, issue a warning, and continue."""
_logger.warning(f'Handling webdataset error ({repr(exn)}). Ignoring.')
# NOTE: try force an exit on errors that are clearly code / config and not transient
if isinstance(exn, TypeError):
raise exn
return True
def _decode(
sample,
image_key='jpg',
image_mode='RGB',
target_key='cls',
alt_label=''
):
""" Custom sample decode
* decode and convert PIL Image
* cls byte string label to int
* pass through JSON byte string (if it exists) without parse
"""
# decode class label, skip if alternate label not valid
if alt_label:
# alternative labels are encoded in json metadata
meta = json.loads(sample['json'])
class_label = int(meta[alt_label])
if class_label < 0:
# skipped labels currently encoded as -1, may change to a null/None value
return None
else:
class_label = int(sample[target_key])
# decode image
img = getfirst(sample, image_key)
with io.BytesIO(img) as b:
img = Image.open(b)
img.load()
if image_mode:
img = img.convert(image_mode)
# json passed through in undecoded state
decoded = dict(jpg=img, cls=class_label, json=sample.get('json', None))
return decoded
def pytorch_worker_seed():
"""get dataloader worker seed from pytorch"""
worker_info = get_worker_info()
if worker_info is not None:
# favour the seed already created for pytorch dataloader workers if it exists
return worker_info.seed
# fallback to wds rank based seed
return wds.utils.pytorch_worker_seed()
if wds is not None:
# conditional to avoid mandatory wds import (via inheritance of wds.PipelineStage)
class detshuffle2(wds.PipelineStage):
def __init__(
self,
bufsize=1000,
initial=100,
seed=0,
epoch=-1,
):
self.bufsize = bufsize
self.initial = initial
self.seed = seed
self.epoch = epoch
def run(self, src):
if isinstance(self.epoch, SharedCount):
epoch = self.epoch.value
else:
# NOTE: this is epoch tracking is problematic in a multiprocess (dataloader workers or train)
# situation as different workers may wrap at different times (or not at all).
self.epoch += 1
epoch = self.epoch
if self.seed < 0:
seed = pytorch_worker_seed() + epoch
else:
seed = self.seed + epoch
# _logger.info(f'shuffle seed: {self.seed}, {seed}, epoch: {epoch}') # FIXME temporary
rng = random.Random(seed)
return _shuffle(src, self.bufsize, self.initial, rng)
else:
detshuffle2 = None
class ResampledShards2(IterableDataset):
"""An iterable dataset yielding a list of urls."""
def __init__(
self,
urls,
nshards=sys.maxsize,
worker_seed=None,
deterministic=True,
epoch=-1,
):
"""Sample shards from the shard list with replacement.
:param urls: a list of URLs as a Python list or brace notation string
"""
super().__init__()
urls = wds.shardlists.expand_urls(urls)
self.urls = urls
assert isinstance(self.urls[0], str)
self.nshards = nshards
self.rng = random.Random()
self.worker_seed = pytorch_worker_seed if worker_seed is None else worker_seed
self.deterministic = deterministic
self.epoch = epoch
def __iter__(self):
"""Return an iterator over the shards."""
if isinstance(self.epoch, SharedCount):
epoch = self.epoch.value
else:
# NOTE: this is epoch tracking is problematic in a multiprocess (dataloader workers or train)
# situation as different workers may wrap at different times (or not at all).
self.epoch += 1
epoch = self.epoch
if self.deterministic:
# reset seed w/ epoch if deterministic, worker seed should be deterministic due to arg.seed
self.rng = random.Random(self.worker_seed() + epoch)
for _ in range(self.nshards):
index = self.rng.randint(0, len(self.urls) - 1)
yield dict(url=self.urls[index])
class ReaderWds(Reader):
def __init__(
self,
root: str,
name: Optional[str] = None,
split: str = 'train',
is_training: bool = False,
num_samples: Optional[int] = None,
batch_size: int = 1,
repeats: int = 0,
seed: int = 42,
class_map: Optional[dict] = None,
input_key: str = 'jpg;png;webp',
input_img_mode: str = 'RGB',
target_key: str = 'cls',
target_img_mode: str = '',
filename_key: str = 'filename',
sample_shuffle_size: Optional[int] = None,
sample_initial_size: Optional[int] = None,
):
super().__init__()
if wds is None:
raise RuntimeError(
'Please install webdataset 0.2.x package `pip install git+https://github.com/webdataset/webdataset`.')
self.root = root
self.is_training = is_training
self.batch_size = batch_size
self.repeats = repeats
self.common_seed = seed # a seed that's fixed across all worker / distributed instances
self.shard_shuffle_size = 500
self.sample_shuffle_size = sample_shuffle_size or SAMPLE_SHUFFLE_SIZE
self.sample_initial_size = sample_initial_size or SAMPLE_INITIAL_SIZE
self.input_key = input_key
self.input_img_mode = input_img_mode
self.target_key = target_key
self.filename_key = filename_key
self.key_ext = '.JPEG' # extension to add to key for original filenames (DS specific, default ImageNet)
self.info = _load_info(self.root)
self.split_info = _parse_split_info(split, self.info)
if num_samples is not None:
self.num_samples = num_samples
else:
self.num_samples = self.split_info.num_samples
if is_training and not self.num_samples:
raise RuntimeError(f'Invalid split definition, num_samples not specified in train mode.')
self.remap_class = False
if class_map:
self.class_to_idx = load_class_map(class_map)
self.remap_class = True
else:
self.class_to_idx = {}
# Distributed world state
self.dist_rank = 0
self.dist_num_replicas = 1
if dist.is_available() and dist.is_initialized() and dist.get_world_size() > 1:
self.dist_rank = dist.get_rank()
self.dist_num_replicas = dist.get_world_size()
# Attributes that are updated in _lazy_init
self.worker_info = None
self.worker_id = 0
self.worker_seed = seed # seed unique to each worker instance
self.num_workers = 1
self.global_worker_id = 0
self.global_num_workers = 1
self.init_count = 0
self.epoch_count = SharedCount()
# DataPipeline is lazy init, the majority of WDS DataPipeline could be init here, BUT, shuffle seed
# is not handled in manner where it can be deterministic for each worker AND initialized up front
self.ds = None
def set_epoch(self, count):
self.epoch_count.value = count
def set_loader_cfg(
self,
num_workers: Optional[int] = None,
):
if self.ds is not None:
return
if num_workers is not None:
self.num_workers = num_workers
self.global_num_workers = self.dist_num_replicas * self.num_workers
def _lazy_init(self):
""" Lazily initialize worker (in worker processes)
"""
if self.worker_info is None:
worker_info = torch.utils.data.get_worker_info()
if worker_info is not None:
self.worker_info = worker_info
self.worker_id = worker_info.id
self.worker_seed = worker_info.seed
self.num_workers = worker_info.num_workers
self.global_num_workers = self.dist_num_replicas * self.num_workers
self.global_worker_id = self.dist_rank * self.num_workers + self.worker_id
# init data pipeline
abs_shard_filenames = [os.path.join(self.root, f) for f in self.split_info.filenames]
pipeline = [wds.SimpleShardList(abs_shard_filenames)]
# at this point we have an iterator over all the shards
if self.is_training:
pipeline.extend([
detshuffle2(
self.shard_shuffle_size,
seed=self.common_seed,
epoch=self.epoch_count,
),
self._split_by_node_and_worker,
# at this point, we have an iterator over the shards assigned to each worker
wds.tarfile_to_samples(handler=log_and_continue),
wds.shuffle(
bufsize=self.sample_shuffle_size,
initial=self.sample_initial_size,
rng=random.Random(self.worker_seed) # this is why we lazy-init whole DataPipeline
),
])
else:
pipeline.extend([
self._split_by_node_and_worker,
# at this point, we have an iterator over the shards assigned to each worker
wds.tarfile_to_samples(handler=log_and_continue),
])
pipeline.extend([
wds.map(
partial(
_decode,
image_key=self.input_key,
image_mode=self.input_img_mode,
alt_label=self.split_info.alt_label,
),
handler=log_and_continue,
),
wds.rename(image=self.input_key, target=self.target_key)
])
self.ds = wds.DataPipeline(*pipeline)
def _split_by_node_and_worker(self, src):
if self.global_num_workers > 1:
for s in islice(src, self.global_worker_id, None, self.global_num_workers):
yield s
else:
for s in src:
yield s
def _num_samples_per_worker(self):
num_worker_samples = self.num_samples / max(self.global_num_workers, self.dist_num_replicas)
if self.is_training or self.dist_num_replicas > 1:
num_worker_samples = math.ceil(num_worker_samples)
if self.is_training:
num_worker_samples = math.ceil(num_worker_samples / self.batch_size) * self.batch_size
return int(num_worker_samples)
def __iter__(self):
if self.ds is None:
self._lazy_init()
num_worker_samples = self._num_samples_per_worker()
if self.is_training or self.dist_num_replicas > 1:
# NOTE: doing distributed validation w/ WDS is messy, hard to meet constraints that
# same # of batches needed across all replicas w/ seeing each sample once.
# with_epoch() is simple but could miss a shard's worth of samples in some workers,
# and duplicate in others. Best to keep num DL workers low and a divisor of #val shards.
ds = self.ds.with_epoch(num_worker_samples)
else:
ds = self.ds
i = 0
# _logger.info(f'start {i}, {self.worker_id}') # FIXME temporary debug
for sample in ds:
target = sample['target']
if self.remap_class:
target = self.class_to_idx[target]
yield sample['image'], target
i += 1
# _logger.info(f'end {i}, {self.worker_id}') # FIXME temporary debug
def __len__(self):
num_samples = self._num_samples_per_worker() * self.num_workers
return num_samples
def _filename(self, index, basename=False, absolute=False):
assert False, "Not supported" # no random access to examples
def filenames(self, basename=False, absolute=False):
""" Return all filenames in dataset, overrides base"""
if self.ds is None:
self._lazy_init()
names = []
for sample in self.ds:
if self.filename_key in sample:
name = sample[self.filename_key]
elif '__key__' in sample:
name = sample['__key__'] + self.key_ext
else:
assert False, "No supported name field present"
names.append(name)
if len(names) >= self.num_samples:
break # safety for ds.repeat() case
return names
| pytorch-image-models/timm/data/readers/reader_wds.py/0 | {
"file_path": "pytorch-image-models/timm/data/readers/reader_wds.py",
"repo_id": "pytorch-image-models",
"token_count": 7881
} |
""" Classifier head and layer factory
Hacked together by / Copyright 2020 Ross Wightman
"""
from collections import OrderedDict
from functools import partial
from typing import Optional, Union, Callable
import torch
import torch.nn as nn
from torch.nn import functional as F
from .adaptive_avgmax_pool import SelectAdaptivePool2d
from .create_act import get_act_layer
from .create_norm import get_norm_layer
def _create_pool(
num_features: int,
num_classes: int,
pool_type: str = 'avg',
use_conv: bool = False,
input_fmt: Optional[str] = None,
):
flatten_in_pool = not use_conv # flatten when we use a Linear layer after pooling
if not pool_type:
flatten_in_pool = False # disable flattening if pooling is pass-through (no pooling)
global_pool = SelectAdaptivePool2d(
pool_type=pool_type,
flatten=flatten_in_pool,
input_fmt=input_fmt,
)
num_pooled_features = num_features * global_pool.feat_mult()
return global_pool, num_pooled_features
def _create_fc(num_features, num_classes, use_conv=False):
if num_classes <= 0:
fc = nn.Identity() # pass-through (no classifier)
elif use_conv:
fc = nn.Conv2d(num_features, num_classes, 1, bias=True)
else:
fc = nn.Linear(num_features, num_classes, bias=True)
return fc
def create_classifier(
num_features: int,
num_classes: int,
pool_type: str = 'avg',
use_conv: bool = False,
input_fmt: str = 'NCHW',
drop_rate: Optional[float] = None,
):
global_pool, num_pooled_features = _create_pool(
num_features,
num_classes,
pool_type,
use_conv=use_conv,
input_fmt=input_fmt,
)
fc = _create_fc(
num_pooled_features,
num_classes,
use_conv=use_conv,
)
if drop_rate is not None:
dropout = nn.Dropout(drop_rate)
return global_pool, dropout, fc
return global_pool, fc
class ClassifierHead(nn.Module):
"""Classifier head w/ configurable global pooling and dropout."""
def __init__(
self,
in_features: int,
num_classes: int,
pool_type: str = 'avg',
drop_rate: float = 0.,
use_conv: bool = False,
input_fmt: str = 'NCHW',
):
"""
Args:
in_features: The number of input features.
num_classes: The number of classes for the final classifier layer (output).
pool_type: Global pooling type, pooling disabled if empty string ('').
drop_rate: Pre-classifier dropout rate.
"""
super(ClassifierHead, self).__init__()
self.in_features = in_features
self.use_conv = use_conv
self.input_fmt = input_fmt
global_pool, fc = create_classifier(
in_features,
num_classes,
pool_type,
use_conv=use_conv,
input_fmt=input_fmt,
)
self.global_pool = global_pool
self.drop = nn.Dropout(drop_rate)
self.fc = fc
self.flatten = nn.Flatten(1) if use_conv and pool_type else nn.Identity()
def reset(self, num_classes: int, pool_type: Optional[str] = None):
if pool_type is not None and pool_type != self.global_pool.pool_type:
self.global_pool, self.fc = create_classifier(
self.in_features,
num_classes,
pool_type=pool_type,
use_conv=self.use_conv,
input_fmt=self.input_fmt,
)
self.flatten = nn.Flatten(1) if self.use_conv and pool_type else nn.Identity()
else:
num_pooled_features = self.in_features * self.global_pool.feat_mult()
self.fc = _create_fc(
num_pooled_features,
num_classes,
use_conv=self.use_conv,
)
def forward(self, x, pre_logits: bool = False):
x = self.global_pool(x)
x = self.drop(x)
if pre_logits:
return self.flatten(x)
x = self.fc(x)
return self.flatten(x)
class NormMlpClassifierHead(nn.Module):
""" A Pool -> Norm -> Mlp Classifier Head for '2D' NCHW tensors
"""
def __init__(
self,
in_features: int,
num_classes: int,
hidden_size: Optional[int] = None,
pool_type: str = 'avg',
drop_rate: float = 0.,
norm_layer: Union[str, Callable] = 'layernorm2d',
act_layer: Union[str, Callable] = 'tanh',
):
"""
Args:
in_features: The number of input features.
num_classes: The number of classes for the final classifier layer (output).
hidden_size: The hidden size of the MLP (pre-logits FC layer) if not None.
pool_type: Global pooling type, pooling disabled if empty string ('').
drop_rate: Pre-classifier dropout rate.
norm_layer: Normalization layer type.
act_layer: MLP activation layer type (only used if hidden_size is not None).
"""
super().__init__()
self.in_features = in_features
self.hidden_size = hidden_size
self.num_features = in_features
self.use_conv = not pool_type
norm_layer = get_norm_layer(norm_layer)
act_layer = get_act_layer(act_layer)
linear_layer = partial(nn.Conv2d, kernel_size=1) if self.use_conv else nn.Linear
self.global_pool = SelectAdaptivePool2d(pool_type=pool_type)
self.norm = norm_layer(in_features)
self.flatten = nn.Flatten(1) if pool_type else nn.Identity()
if hidden_size:
self.pre_logits = nn.Sequential(OrderedDict([
('fc', linear_layer(in_features, hidden_size)),
('act', act_layer()),
]))
self.num_features = hidden_size
else:
self.pre_logits = nn.Identity()
self.drop = nn.Dropout(drop_rate)
self.fc = linear_layer(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
def reset(self, num_classes: int, pool_type: Optional[str] = None):
if pool_type is not None:
self.global_pool = SelectAdaptivePool2d(pool_type=pool_type)
self.flatten = nn.Flatten(1) if pool_type else nn.Identity()
self.use_conv = self.global_pool.is_identity()
linear_layer = partial(nn.Conv2d, kernel_size=1) if self.use_conv else nn.Linear
if self.hidden_size:
if ((isinstance(self.pre_logits.fc, nn.Conv2d) and not self.use_conv) or
(isinstance(self.pre_logits.fc, nn.Linear) and self.use_conv)):
with torch.no_grad():
new_fc = linear_layer(self.in_features, self.hidden_size)
new_fc.weight.copy_(self.pre_logits.fc.weight.reshape(new_fc.weight.shape))
new_fc.bias.copy_(self.pre_logits.fc.bias)
self.pre_logits.fc = new_fc
self.fc = linear_layer(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
def forward(self, x, pre_logits: bool = False):
x = self.global_pool(x)
x = self.norm(x)
x = self.flatten(x)
x = self.pre_logits(x)
x = self.drop(x)
if pre_logits:
return x
x = self.fc(x)
return x
class ClNormMlpClassifierHead(nn.Module):
""" A Pool -> Norm -> Mlp Classifier Head for n-D NxxC tensors
"""
def __init__(
self,
in_features: int,
num_classes: int,
hidden_size: Optional[int] = None,
pool_type: str = 'avg',
drop_rate: float = 0.,
norm_layer: Union[str, Callable] = 'layernorm',
act_layer: Union[str, Callable] = 'gelu',
input_fmt: str = 'NHWC',
):
"""
Args:
in_features: The number of input features.
num_classes: The number of classes for the final classifier layer (output).
hidden_size: The hidden size of the MLP (pre-logits FC layer) if not None.
pool_type: Global pooling type, pooling disabled if empty string ('').
drop_rate: Pre-classifier dropout rate.
norm_layer: Normalization layer type.
act_layer: MLP activation layer type (only used if hidden_size is not None).
"""
super().__init__()
self.in_features = in_features
self.hidden_size = hidden_size
self.num_features = in_features
assert pool_type in ('', 'avg', 'max', 'avgmax')
self.pool_type = pool_type
assert input_fmt in ('NHWC', 'NLC')
self.pool_dim = 1 if input_fmt == 'NLC' else (1, 2)
norm_layer = get_norm_layer(norm_layer)
act_layer = get_act_layer(act_layer)
self.norm = norm_layer(in_features)
if hidden_size:
self.pre_logits = nn.Sequential(OrderedDict([
('fc', nn.Linear(in_features, hidden_size)),
('act', act_layer()),
]))
self.num_features = hidden_size
else:
self.pre_logits = nn.Identity()
self.drop = nn.Dropout(drop_rate)
self.fc = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
def reset(self, num_classes: int, pool_type: Optional[str] = None, reset_other: bool = False):
if pool_type is not None:
self.pool_type = pool_type
if reset_other:
self.pre_logits = nn.Identity()
self.norm = nn.Identity()
self.fc = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
def _global_pool(self, x):
if self.pool_type:
if self.pool_type == 'avg':
x = x.mean(dim=self.pool_dim)
elif self.pool_type == 'max':
x = x.amax(dim=self.pool_dim)
elif self.pool_type == 'avgmax':
x = 0.5 * (x.amax(dim=self.pool_dim) + x.mean(dim=self.pool_dim))
return x
def forward(self, x, pre_logits: bool = False):
x = self._global_pool(x)
x = self.norm(x)
x = self.pre_logits(x)
x = self.drop(x)
if pre_logits:
return x
x = self.fc(x)
return x
| pytorch-image-models/timm/layers/classifier.py/0 | {
"file_path": "pytorch-image-models/timm/layers/classifier.py",
"repo_id": "pytorch-image-models",
"token_count": 5047
} |
""" Gather-Excite Attention Block
Paper: `Gather-Excite: Exploiting Feature Context in CNNs` - https://arxiv.org/abs/1810.12348
Official code here, but it's only partial impl in Caffe: https://github.com/hujie-frank/GENet
I've tried to support all of the extent both w/ and w/o params. I don't believe I've seen another
impl that covers all of the cases.
NOTE: extent=0 + extra_params=False is equivalent to Squeeze-and-Excitation
Hacked together by / Copyright 2021 Ross Wightman
"""
import math
from torch import nn as nn
import torch.nn.functional as F
from .create_act import create_act_layer, get_act_layer
from .create_conv2d import create_conv2d
from .helpers import make_divisible
from .mlp import ConvMlp
class GatherExcite(nn.Module):
""" Gather-Excite Attention Module
"""
def __init__(
self, channels, feat_size=None, extra_params=False, extent=0, use_mlp=True,
rd_ratio=1./16, rd_channels=None, rd_divisor=1, add_maxpool=False,
act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, gate_layer='sigmoid'):
super(GatherExcite, self).__init__()
self.add_maxpool = add_maxpool
act_layer = get_act_layer(act_layer)
self.extent = extent
if extra_params:
self.gather = nn.Sequential()
if extent == 0:
assert feat_size is not None, 'spatial feature size must be specified for global extent w/ params'
self.gather.add_module(
'conv1', create_conv2d(channels, channels, kernel_size=feat_size, stride=1, depthwise=True))
if norm_layer:
self.gather.add_module(f'norm1', nn.BatchNorm2d(channels))
else:
assert extent % 2 == 0
num_conv = int(math.log2(extent))
for i in range(num_conv):
self.gather.add_module(
f'conv{i + 1}',
create_conv2d(channels, channels, kernel_size=3, stride=2, depthwise=True))
if norm_layer:
self.gather.add_module(f'norm{i + 1}', nn.BatchNorm2d(channels))
if i != num_conv - 1:
self.gather.add_module(f'act{i + 1}', act_layer(inplace=True))
else:
self.gather = None
if self.extent == 0:
self.gk = 0
self.gs = 0
else:
assert extent % 2 == 0
self.gk = self.extent * 2 - 1
self.gs = self.extent
if not rd_channels:
rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.)
self.mlp = ConvMlp(channels, rd_channels, act_layer=act_layer) if use_mlp else nn.Identity()
self.gate = create_act_layer(gate_layer)
def forward(self, x):
size = x.shape[-2:]
if self.gather is not None:
x_ge = self.gather(x)
else:
if self.extent == 0:
# global extent
x_ge = x.mean(dim=(2, 3), keepdims=True)
if self.add_maxpool:
# experimental codepath, may remove or change
x_ge = 0.5 * x_ge + 0.5 * x.amax((2, 3), keepdim=True)
else:
x_ge = F.avg_pool2d(
x, kernel_size=self.gk, stride=self.gs, padding=self.gk // 2, count_include_pad=False)
if self.add_maxpool:
# experimental codepath, may remove or change
x_ge = 0.5 * x_ge + 0.5 * F.max_pool2d(x, kernel_size=self.gk, stride=self.gs, padding=self.gk // 2)
x_ge = self.mlp(x_ge)
if x_ge.shape[-1] != 1 or x_ge.shape[-2] != 1:
x_ge = F.interpolate(x_ge, size=size)
return x * self.gate(x_ge)
| pytorch-image-models/timm/layers/gather_excite.py/0 | {
"file_path": "pytorch-image-models/timm/layers/gather_excite.py",
"repo_id": "pytorch-image-models",
"token_count": 1956
} |
""" Bilinear-Attention-Transform and Non-Local Attention
Paper: `Non-Local Neural Networks With Grouped Bilinear Attentional Transforms`
- https://openaccess.thecvf.com/content_CVPR_2020/html/Chi_Non-Local_Neural_Networks_With_Grouped_Bilinear_Attentional_Transforms_CVPR_2020_paper.html
Adapted from original code: https://github.com/BA-Transform/BAT-Image-Classification
"""
import torch
from torch import nn
from torch.nn import functional as F
from .conv_bn_act import ConvNormAct
from .helpers import make_divisible
from .trace_utils import _assert
class NonLocalAttn(nn.Module):
"""Spatial NL block for image classification.
This was adapted from https://github.com/BA-Transform/BAT-Image-Classification
Their NonLocal impl inspired by https://github.com/facebookresearch/video-nonlocal-net.
"""
def __init__(self, in_channels, use_scale=True, rd_ratio=1/8, rd_channels=None, rd_divisor=8, **kwargs):
super(NonLocalAttn, self).__init__()
if rd_channels is None:
rd_channels = make_divisible(in_channels * rd_ratio, divisor=rd_divisor)
self.scale = in_channels ** -0.5 if use_scale else 1.0
self.t = nn.Conv2d(in_channels, rd_channels, kernel_size=1, stride=1, bias=True)
self.p = nn.Conv2d(in_channels, rd_channels, kernel_size=1, stride=1, bias=True)
self.g = nn.Conv2d(in_channels, rd_channels, kernel_size=1, stride=1, bias=True)
self.z = nn.Conv2d(rd_channels, in_channels, kernel_size=1, stride=1, bias=True)
self.norm = nn.BatchNorm2d(in_channels)
self.reset_parameters()
def forward(self, x):
shortcut = x
t = self.t(x)
p = self.p(x)
g = self.g(x)
B, C, H, W = t.size()
t = t.view(B, C, -1).permute(0, 2, 1)
p = p.view(B, C, -1)
g = g.view(B, C, -1).permute(0, 2, 1)
att = torch.bmm(t, p) * self.scale
att = F.softmax(att, dim=2)
x = torch.bmm(att, g)
x = x.permute(0, 2, 1).reshape(B, C, H, W)
x = self.z(x)
x = self.norm(x) + shortcut
return x
def reset_parameters(self):
for name, m in self.named_modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu')
if len(list(m.parameters())) > 1:
nn.init.constant_(m.bias, 0.0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 0)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.GroupNorm):
nn.init.constant_(m.weight, 0)
nn.init.constant_(m.bias, 0)
class BilinearAttnTransform(nn.Module):
def __init__(self, in_channels, block_size, groups, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d):
super(BilinearAttnTransform, self).__init__()
self.conv1 = ConvNormAct(in_channels, groups, 1, act_layer=act_layer, norm_layer=norm_layer)
self.conv_p = nn.Conv2d(groups, block_size * block_size * groups, kernel_size=(block_size, 1))
self.conv_q = nn.Conv2d(groups, block_size * block_size * groups, kernel_size=(1, block_size))
self.conv2 = ConvNormAct(in_channels, in_channels, 1, act_layer=act_layer, norm_layer=norm_layer)
self.block_size = block_size
self.groups = groups
self.in_channels = in_channels
def resize_mat(self, x, t: int):
B, C, block_size, block_size1 = x.shape
_assert(block_size == block_size1, '')
if t <= 1:
return x
x = x.view(B * C, -1, 1, 1)
x = x * torch.eye(t, t, dtype=x.dtype, device=x.device)
x = x.view(B * C, block_size, block_size, t, t)
x = torch.cat(torch.split(x, 1, dim=1), dim=3)
x = torch.cat(torch.split(x, 1, dim=2), dim=4)
x = x.view(B, C, block_size * t, block_size * t)
return x
def forward(self, x):
_assert(x.shape[-1] % self.block_size == 0, '')
_assert(x.shape[-2] % self.block_size == 0, '')
B, C, H, W = x.shape
out = self.conv1(x)
rp = F.adaptive_max_pool2d(out, (self.block_size, 1))
cp = F.adaptive_max_pool2d(out, (1, self.block_size))
p = self.conv_p(rp).view(B, self.groups, self.block_size, self.block_size).sigmoid()
q = self.conv_q(cp).view(B, self.groups, self.block_size, self.block_size).sigmoid()
p = p / p.sum(dim=3, keepdim=True)
q = q / q.sum(dim=2, keepdim=True)
p = p.view(B, self.groups, 1, self.block_size, self.block_size).expand(x.size(
0), self.groups, C // self.groups, self.block_size, self.block_size).contiguous()
p = p.view(B, C, self.block_size, self.block_size)
q = q.view(B, self.groups, 1, self.block_size, self.block_size).expand(x.size(
0), self.groups, C // self.groups, self.block_size, self.block_size).contiguous()
q = q.view(B, C, self.block_size, self.block_size)
p = self.resize_mat(p, H // self.block_size)
q = self.resize_mat(q, W // self.block_size)
y = p.matmul(x)
y = y.matmul(q)
y = self.conv2(y)
return y
class BatNonLocalAttn(nn.Module):
""" BAT
Adapted from: https://github.com/BA-Transform/BAT-Image-Classification
"""
def __init__(
self, in_channels, block_size=7, groups=2, rd_ratio=0.25, rd_channels=None, rd_divisor=8,
drop_rate=0.2, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, **_):
super().__init__()
if rd_channels is None:
rd_channels = make_divisible(in_channels * rd_ratio, divisor=rd_divisor)
self.conv1 = ConvNormAct(in_channels, rd_channels, 1, act_layer=act_layer, norm_layer=norm_layer)
self.ba = BilinearAttnTransform(rd_channels, block_size, groups, act_layer=act_layer, norm_layer=norm_layer)
self.conv2 = ConvNormAct(rd_channels, in_channels, 1, act_layer=act_layer, norm_layer=norm_layer)
self.dropout = nn.Dropout2d(p=drop_rate)
def forward(self, x):
xl = self.conv1(x)
y = self.ba(xl)
y = self.conv2(y)
y = self.dropout(y)
return y + x
| pytorch-image-models/timm/layers/non_local_attn.py/0 | {
"file_path": "pytorch-image-models/timm/layers/non_local_attn.py",
"repo_id": "pytorch-image-models",
"token_count": 3028
} |
""" Convolution with Weight Standardization (StdConv and ScaledStdConv)
StdConv:
@article{weightstandardization,
author = {Siyuan Qiao and Huiyu Wang and Chenxi Liu and Wei Shen and Alan Yuille},
title = {Weight Standardization},
journal = {arXiv preprint arXiv:1903.10520},
year = {2019},
}
Code: https://github.com/joe-siyuan-qiao/WeightStandardization
ScaledStdConv:
Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets`
- https://arxiv.org/abs/2101.08692
Official Deepmind JAX code: https://github.com/deepmind/deepmind-research/tree/master/nfnets
Hacked together by / copyright Ross Wightman, 2021.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from .padding import get_padding, get_padding_value, pad_same
class StdConv2d(nn.Conv2d):
"""Conv2d with Weight Standardization. Used for BiT ResNet-V2 models.
Paper: `Micro-Batch Training with Batch-Channel Normalization and Weight Standardization` -
https://arxiv.org/abs/1903.10520v2
"""
def __init__(
self, in_channel, out_channels, kernel_size, stride=1, padding=None,
dilation=1, groups=1, bias=False, eps=1e-6):
if padding is None:
padding = get_padding(kernel_size, stride, dilation)
super().__init__(
in_channel, out_channels, kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=groups, bias=bias)
self.eps = eps
def forward(self, x):
weight = F.batch_norm(
self.weight.reshape(1, self.out_channels, -1), None, None,
training=True, momentum=0., eps=self.eps).reshape_as(self.weight)
x = F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
return x
class StdConv2dSame(nn.Conv2d):
"""Conv2d with Weight Standardization. TF compatible SAME padding. Used for ViT Hybrid model.
Paper: `Micro-Batch Training with Batch-Channel Normalization and Weight Standardization` -
https://arxiv.org/abs/1903.10520v2
"""
def __init__(
self, in_channel, out_channels, kernel_size, stride=1, padding='SAME',
dilation=1, groups=1, bias=False, eps=1e-6):
padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, dilation=dilation)
super().__init__(
in_channel, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias)
self.same_pad = is_dynamic
self.eps = eps
def forward(self, x):
if self.same_pad:
x = pad_same(x, self.kernel_size, self.stride, self.dilation)
weight = F.batch_norm(
self.weight.reshape(1, self.out_channels, -1), None, None,
training=True, momentum=0., eps=self.eps).reshape_as(self.weight)
x = F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
return x
class ScaledStdConv2d(nn.Conv2d):
"""Conv2d layer with Scaled Weight Standardization.
Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets` -
https://arxiv.org/abs/2101.08692
NOTE: the operations used in this impl differ slightly from the DeepMind Haiku impl. The impact is minor.
"""
def __init__(
self, in_channels, out_channels, kernel_size, stride=1, padding=None,
dilation=1, groups=1, bias=True, gamma=1.0, eps=1e-6, gain_init=1.0):
if padding is None:
padding = get_padding(kernel_size, stride, dilation)
super().__init__(
in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias)
self.gain = nn.Parameter(torch.full((self.out_channels, 1, 1, 1), gain_init))
self.scale = gamma * self.weight[0].numel() ** -0.5 # gamma * 1 / sqrt(fan-in)
self.eps = eps
def forward(self, x):
weight = F.batch_norm(
self.weight.reshape(1, self.out_channels, -1), None, None,
weight=(self.gain * self.scale).view(-1),
training=True, momentum=0., eps=self.eps).reshape_as(self.weight)
return F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
class ScaledStdConv2dSame(nn.Conv2d):
"""Conv2d layer with Scaled Weight Standardization and Tensorflow-like SAME padding support
Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets` -
https://arxiv.org/abs/2101.08692
NOTE: the operations used in this impl differ slightly from the DeepMind Haiku impl. The impact is minor.
"""
def __init__(
self, in_channels, out_channels, kernel_size, stride=1, padding='SAME',
dilation=1, groups=1, bias=True, gamma=1.0, eps=1e-6, gain_init=1.0):
padding, is_dynamic = get_padding_value(padding, kernel_size, stride=stride, dilation=dilation)
super().__init__(
in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation,
groups=groups, bias=bias)
self.gain = nn.Parameter(torch.full((self.out_channels, 1, 1, 1), gain_init))
self.scale = gamma * self.weight[0].numel() ** -0.5
self.same_pad = is_dynamic
self.eps = eps
def forward(self, x):
if self.same_pad:
x = pad_same(x, self.kernel_size, self.stride, self.dilation)
weight = F.batch_norm(
self.weight.reshape(1, self.out_channels, -1), None, None,
weight=(self.gain * self.scale).view(-1),
training=True, momentum=0., eps=self.eps).reshape_as(self.weight)
return F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
| pytorch-image-models/timm/layers/std_conv.py/0 | {
"file_path": "pytorch-image-models/timm/layers/std_conv.py",
"repo_id": "pytorch-image-models",
"token_count": 2483
} |
""" PyTorch FX Based Feature Extraction Helpers
Using https://pytorch.org/vision/stable/feature_extraction.html
"""
from typing import Callable, Dict, List, Optional, Union, Tuple, Type
import torch
from torch import nn
from ._features import _get_feature_info, _get_return_layers
try:
# NOTE we wrap torchvision fns to use timm leaf / no trace definitions
from torchvision.models.feature_extraction import create_feature_extractor as _create_feature_extractor
from torchvision.models.feature_extraction import get_graph_node_names as _get_graph_node_names
has_fx_feature_extraction = True
except ImportError:
has_fx_feature_extraction = False
# Layers we went to treat as leaf modules
from timm.layers import Conv2dSame, ScaledStdConv2dSame, CondConv2d, StdConv2dSame, Format
from timm.layers import resample_abs_pos_embed, resample_abs_pos_embed_nhwc
from timm.layers.non_local_attn import BilinearAttnTransform
from timm.layers.pool2d_same import MaxPool2dSame, AvgPool2dSame
from timm.layers.norm_act import (
BatchNormAct2d,
SyncBatchNormAct,
FrozenBatchNormAct2d,
GroupNormAct,
GroupNorm1Act,
LayerNormAct,
LayerNormAct2d
)
__all__ = ['register_notrace_module', 'is_notrace_module', 'get_notrace_modules',
'register_notrace_function', 'is_notrace_function', 'get_notrace_functions',
'create_feature_extractor', 'get_graph_node_names', 'FeatureGraphNet', 'GraphExtractNet']
# NOTE: By default, any modules from timm.models.layers that we want to treat as leaf modules go here
# BUT modules from timm.models should use the registration mechanism below
_leaf_modules = {
BilinearAttnTransform, # reason: flow control t <= 1
# Reason: get_same_padding has a max which raises a control flow error
Conv2dSame, MaxPool2dSame, ScaledStdConv2dSame, StdConv2dSame, AvgPool2dSame,
CondConv2d, # reason: TypeError: F.conv2d received Proxy in groups=self.groups * B (because B = x.shape[0]),
BatchNormAct2d,
SyncBatchNormAct,
FrozenBatchNormAct2d,
GroupNormAct,
GroupNorm1Act,
LayerNormAct,
LayerNormAct2d,
}
try:
from timm.layers import InplaceAbn
_leaf_modules.add(InplaceAbn)
except ImportError:
pass
def register_notrace_module(module: Type[nn.Module]):
"""
Any module not under timm.models.layers should get this decorator if we don't want to trace through it.
"""
_leaf_modules.add(module)
return module
def is_notrace_module(module: Type[nn.Module]):
return module in _leaf_modules
def get_notrace_modules():
return list(_leaf_modules)
# Functions we want to autowrap (treat them as leaves)
_autowrap_functions = {
resample_abs_pos_embed,
resample_abs_pos_embed_nhwc,
}
def register_notrace_function(func: Callable):
"""
Decorator for functions which ought not to be traced through
"""
_autowrap_functions.add(func)
return func
def is_notrace_function(func: Callable):
return func in _autowrap_functions
def get_notrace_functions():
return list(_autowrap_functions)
def get_graph_node_names(model: nn.Module) -> Tuple[List[str], List[str]]:
return _get_graph_node_names(
model,
tracer_kwargs={'leaf_modules': list(_leaf_modules), 'autowrap_functions': list(_autowrap_functions)}
)
def create_feature_extractor(model: nn.Module, return_nodes: Union[Dict[str, str], List[str]]):
assert has_fx_feature_extraction, 'Please update to PyTorch 1.10+, torchvision 0.11+ for FX feature extraction'
return _create_feature_extractor(
model, return_nodes,
tracer_kwargs={'leaf_modules': list(_leaf_modules), 'autowrap_functions': list(_autowrap_functions)}
)
class FeatureGraphNet(nn.Module):
""" A FX Graph based feature extractor that works with the model feature_info metadata
"""
return_dict: torch.jit.Final[bool]
def __init__(
self,
model: nn.Module,
out_indices: Tuple[int, ...],
out_map: Optional[Dict] = None,
output_fmt: str = 'NCHW',
return_dict: bool = False,
):
super().__init__()
assert has_fx_feature_extraction, 'Please update to PyTorch 1.10+, torchvision 0.11+ for FX feature extraction'
self.feature_info = _get_feature_info(model, out_indices)
if out_map is not None:
assert len(out_map) == len(out_indices)
self.output_fmt = Format(output_fmt)
return_nodes = _get_return_layers(self.feature_info, out_map)
self.graph_module = create_feature_extractor(model, return_nodes)
self.return_dict = return_dict
def forward(self, x):
out = self.graph_module(x)
if self.return_dict:
return out
return list(out.values())
class GraphExtractNet(nn.Module):
""" A standalone feature extraction wrapper that maps dict -> list or single tensor
NOTE:
* one can use feature_extractor directly if dictionary output is desired
* unlike FeatureGraphNet, this is intended to be used standalone and not with model feature_info
metadata for builtin feature extraction mode
* create_feature_extractor can be used directly if dictionary output is acceptable
Args:
model: model to extract features from
return_nodes: node names to return features from (dict or list)
squeeze_out: if only one output, and output in list format, flatten to single tensor
return_dict: return as dictionary from extractor with node names as keys, ignores squeeze_out arg
"""
return_dict: torch.jit.Final[bool]
def __init__(
self,
model: nn.Module,
return_nodes: Union[Dict[str, str], List[str]],
squeeze_out: bool = True,
return_dict: bool = False,
):
super().__init__()
self.squeeze_out = squeeze_out
self.graph_module = create_feature_extractor(model, return_nodes)
self.return_dict = return_dict
def forward(self, x) -> Union[List[torch.Tensor], torch.Tensor]:
out = self.graph_module(x)
if self.return_dict:
return out
out = list(out.values())
return out[0] if self.squeeze_out and len(out) == 1 else out
| pytorch-image-models/timm/models/_features_fx.py/0 | {
"file_path": "pytorch-image-models/timm/models/_features_fx.py",
"repo_id": "pytorch-image-models",
"token_count": 2402
} |
"""
CoaT architecture.
Paper: Co-Scale Conv-Attentional Image Transformers - https://arxiv.org/abs/2104.06399
Official CoaT code at: https://github.com/mlpc-ucsd/CoaT
Modified from timm/models/vision_transformer.py
"""
from typing import List, Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import PatchEmbed, Mlp, DropPath, to_2tuple, trunc_normal_, _assert, LayerNorm
from ._builder import build_model_with_cfg
from ._registry import register_model, generate_default_cfgs
__all__ = ['CoaT']
class ConvRelPosEnc(nn.Module):
""" Convolutional relative position encoding. """
def __init__(self, head_chs, num_heads, window):
"""
Initialization.
Ch: Channels per head.
h: Number of heads.
window: Window size(s) in convolutional relative positional encoding. It can have two forms:
1. An integer of window size, which assigns all attention heads with the same window s
size in ConvRelPosEnc.
2. A dict mapping window size to #attention head splits (
e.g. {window size 1: #attention head split 1, window size 2: #attention head split 2})
It will apply different window size to the attention head splits.
"""
super().__init__()
if isinstance(window, int):
# Set the same window size for all attention heads.
window = {window: num_heads}
self.window = window
elif isinstance(window, dict):
self.window = window
else:
raise ValueError()
self.conv_list = nn.ModuleList()
self.head_splits = []
for cur_window, cur_head_split in window.items():
dilation = 1
# Determine padding size.
# Ref: https://discuss.pytorch.org/t/how-to-keep-the-shape-of-input-and-output-same-when-dilation-conv/14338
padding_size = (cur_window + (cur_window - 1) * (dilation - 1)) // 2
cur_conv = nn.Conv2d(
cur_head_split * head_chs,
cur_head_split * head_chs,
kernel_size=(cur_window, cur_window),
padding=(padding_size, padding_size),
dilation=(dilation, dilation),
groups=cur_head_split * head_chs,
)
self.conv_list.append(cur_conv)
self.head_splits.append(cur_head_split)
self.channel_splits = [x * head_chs for x in self.head_splits]
def forward(self, q, v, size: Tuple[int, int]):
B, num_heads, N, C = q.shape
H, W = size
_assert(N == 1 + H * W, '')
# Convolutional relative position encoding.
q_img = q[:, :, 1:, :] # [B, h, H*W, Ch]
v_img = v[:, :, 1:, :] # [B, h, H*W, Ch]
v_img = v_img.transpose(-1, -2).reshape(B, num_heads * C, H, W)
v_img_list = torch.split(v_img, self.channel_splits, dim=1) # Split according to channels
conv_v_img_list = []
for i, conv in enumerate(self.conv_list):
conv_v_img_list.append(conv(v_img_list[i]))
conv_v_img = torch.cat(conv_v_img_list, dim=1)
conv_v_img = conv_v_img.reshape(B, num_heads, C, H * W).transpose(-1, -2)
EV_hat = q_img * conv_v_img
EV_hat = F.pad(EV_hat, (0, 0, 1, 0, 0, 0)) # [B, h, N, Ch].
return EV_hat
class FactorAttnConvRelPosEnc(nn.Module):
""" Factorized attention with convolutional relative position encoding class. """
def __init__(
self,
dim,
num_heads=8,
qkv_bias=False,
attn_drop=0.,
proj_drop=0.,
shared_crpe=None,
):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop) # Note: attn_drop is actually not used.
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
# Shared convolutional relative position encoding.
self.crpe = shared_crpe
def forward(self, x, size: Tuple[int, int]):
B, N, C = x.shape
# Generate Q, K, V.
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0) # [B, h, N, Ch]
# Factorized attention.
k_softmax = k.softmax(dim=2)
factor_att = k_softmax.transpose(-1, -2) @ v
factor_att = q @ factor_att
# Convolutional relative position encoding.
crpe = self.crpe(q, v, size=size) # [B, h, N, Ch]
# Merge and reshape.
x = self.scale * factor_att + crpe
x = x.transpose(1, 2).reshape(B, N, C) # [B, h, N, Ch] -> [B, N, h, Ch] -> [B, N, C]
# Output projection.
x = self.proj(x)
x = self.proj_drop(x)
return x
class ConvPosEnc(nn.Module):
""" Convolutional Position Encoding.
Note: This module is similar to the conditional position encoding in CPVT.
"""
def __init__(self, dim, k=3):
super(ConvPosEnc, self).__init__()
self.proj = nn.Conv2d(dim, dim, k, 1, k//2, groups=dim)
def forward(self, x, size: Tuple[int, int]):
B, N, C = x.shape
H, W = size
_assert(N == 1 + H * W, '')
# Extract CLS token and image tokens.
cls_token, img_tokens = x[:, :1], x[:, 1:] # [B, 1, C], [B, H*W, C]
# Depthwise convolution.
feat = img_tokens.transpose(1, 2).view(B, C, H, W)
x = self.proj(feat) + feat
x = x.flatten(2).transpose(1, 2)
# Combine with CLS token.
x = torch.cat((cls_token, x), dim=1)
return x
class SerialBlock(nn.Module):
""" Serial block class.
Note: In this implementation, each serial block only contains a conv-attention and a FFN (MLP) module. """
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.,
qkv_bias=False,
proj_drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
shared_cpe=None,
shared_crpe=None,
):
super().__init__()
# Conv-Attention.
self.cpe = shared_cpe
self.norm1 = norm_layer(dim)
self.factoratt_crpe = FactorAttnConvRelPosEnc(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=proj_drop,
shared_crpe=shared_crpe,
)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
# MLP.
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=proj_drop,
)
def forward(self, x, size: Tuple[int, int]):
# Conv-Attention.
x = self.cpe(x, size)
cur = self.norm1(x)
cur = self.factoratt_crpe(cur, size)
x = x + self.drop_path(cur)
# MLP.
cur = self.norm2(x)
cur = self.mlp(cur)
x = x + self.drop_path(cur)
return x
class ParallelBlock(nn.Module):
""" Parallel block class. """
def __init__(
self,
dims,
num_heads,
mlp_ratios=[],
qkv_bias=False,
proj_drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
shared_crpes=None,
):
super().__init__()
# Conv-Attention.
self.norm12 = norm_layer(dims[1])
self.norm13 = norm_layer(dims[2])
self.norm14 = norm_layer(dims[3])
self.factoratt_crpe2 = FactorAttnConvRelPosEnc(
dims[1],
num_heads=num_heads,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=proj_drop,
shared_crpe=shared_crpes[1],
)
self.factoratt_crpe3 = FactorAttnConvRelPosEnc(
dims[2],
num_heads=num_heads,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=proj_drop,
shared_crpe=shared_crpes[2],
)
self.factoratt_crpe4 = FactorAttnConvRelPosEnc(
dims[3],
num_heads=num_heads,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=proj_drop,
shared_crpe=shared_crpes[3],
)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
# MLP.
self.norm22 = norm_layer(dims[1])
self.norm23 = norm_layer(dims[2])
self.norm24 = norm_layer(dims[3])
# In parallel block, we assume dimensions are the same and share the linear transformation.
assert dims[1] == dims[2] == dims[3]
assert mlp_ratios[1] == mlp_ratios[2] == mlp_ratios[3]
mlp_hidden_dim = int(dims[1] * mlp_ratios[1])
self.mlp2 = self.mlp3 = self.mlp4 = Mlp(
in_features=dims[1],
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=proj_drop,
)
def upsample(self, x, factor: float, size: Tuple[int, int]):
""" Feature map up-sampling. """
return self.interpolate(x, scale_factor=factor, size=size)
def downsample(self, x, factor: float, size: Tuple[int, int]):
""" Feature map down-sampling. """
return self.interpolate(x, scale_factor=1.0/factor, size=size)
def interpolate(self, x, scale_factor: float, size: Tuple[int, int]):
""" Feature map interpolation. """
B, N, C = x.shape
H, W = size
_assert(N == 1 + H * W, '')
cls_token = x[:, :1, :]
img_tokens = x[:, 1:, :]
img_tokens = img_tokens.transpose(1, 2).reshape(B, C, H, W)
img_tokens = F.interpolate(
img_tokens,
scale_factor=scale_factor,
recompute_scale_factor=False,
mode='bilinear',
align_corners=False,
)
img_tokens = img_tokens.reshape(B, C, -1).transpose(1, 2)
out = torch.cat((cls_token, img_tokens), dim=1)
return out
def forward(self, x1, x2, x3, x4, sizes: List[Tuple[int, int]]):
_, S2, S3, S4 = sizes
cur2 = self.norm12(x2)
cur3 = self.norm13(x3)
cur4 = self.norm14(x4)
cur2 = self.factoratt_crpe2(cur2, size=S2)
cur3 = self.factoratt_crpe3(cur3, size=S3)
cur4 = self.factoratt_crpe4(cur4, size=S4)
upsample3_2 = self.upsample(cur3, factor=2., size=S3)
upsample4_3 = self.upsample(cur4, factor=2., size=S4)
upsample4_2 = self.upsample(cur4, factor=4., size=S4)
downsample2_3 = self.downsample(cur2, factor=2., size=S2)
downsample3_4 = self.downsample(cur3, factor=2., size=S3)
downsample2_4 = self.downsample(cur2, factor=4., size=S2)
cur2 = cur2 + upsample3_2 + upsample4_2
cur3 = cur3 + upsample4_3 + downsample2_3
cur4 = cur4 + downsample3_4 + downsample2_4
x2 = x2 + self.drop_path(cur2)
x3 = x3 + self.drop_path(cur3)
x4 = x4 + self.drop_path(cur4)
# MLP.
cur2 = self.norm22(x2)
cur3 = self.norm23(x3)
cur4 = self.norm24(x4)
cur2 = self.mlp2(cur2)
cur3 = self.mlp3(cur3)
cur4 = self.mlp4(cur4)
x2 = x2 + self.drop_path(cur2)
x3 = x3 + self.drop_path(cur3)
x4 = x4 + self.drop_path(cur4)
return x1, x2, x3, x4
class CoaT(nn.Module):
""" CoaT class. """
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
num_classes=1000,
embed_dims=(64, 128, 320, 512),
serial_depths=(3, 4, 6, 3),
parallel_depth=0,
num_heads=8,
mlp_ratios=(4, 4, 4, 4),
qkv_bias=True,
drop_rate=0.,
proj_drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
norm_layer=LayerNorm,
return_interm_layers=False,
out_features=None,
crpe_window=None,
global_pool='token',
):
super().__init__()
assert global_pool in ('token', 'avg')
crpe_window = crpe_window or {3: 2, 5: 3, 7: 3}
self.return_interm_layers = return_interm_layers
self.out_features = out_features
self.embed_dims = embed_dims
self.num_features = self.head_hidden_size = embed_dims[-1]
self.num_classes = num_classes
self.global_pool = global_pool
# Patch embeddings.
img_size = to_2tuple(img_size)
self.patch_embed1 = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans,
embed_dim=embed_dims[0], norm_layer=nn.LayerNorm)
self.patch_embed2 = PatchEmbed(
img_size=[x // 4 for x in img_size], patch_size=2, in_chans=embed_dims[0],
embed_dim=embed_dims[1], norm_layer=nn.LayerNorm)
self.patch_embed3 = PatchEmbed(
img_size=[x // 8 for x in img_size], patch_size=2, in_chans=embed_dims[1],
embed_dim=embed_dims[2], norm_layer=nn.LayerNorm)
self.patch_embed4 = PatchEmbed(
img_size=[x // 16 for x in img_size], patch_size=2, in_chans=embed_dims[2],
embed_dim=embed_dims[3], norm_layer=nn.LayerNorm)
# Class tokens.
self.cls_token1 = nn.Parameter(torch.zeros(1, 1, embed_dims[0]))
self.cls_token2 = nn.Parameter(torch.zeros(1, 1, embed_dims[1]))
self.cls_token3 = nn.Parameter(torch.zeros(1, 1, embed_dims[2]))
self.cls_token4 = nn.Parameter(torch.zeros(1, 1, embed_dims[3]))
# Convolutional position encodings.
self.cpe1 = ConvPosEnc(dim=embed_dims[0], k=3)
self.cpe2 = ConvPosEnc(dim=embed_dims[1], k=3)
self.cpe3 = ConvPosEnc(dim=embed_dims[2], k=3)
self.cpe4 = ConvPosEnc(dim=embed_dims[3], k=3)
# Convolutional relative position encodings.
self.crpe1 = ConvRelPosEnc(head_chs=embed_dims[0] // num_heads, num_heads=num_heads, window=crpe_window)
self.crpe2 = ConvRelPosEnc(head_chs=embed_dims[1] // num_heads, num_heads=num_heads, window=crpe_window)
self.crpe3 = ConvRelPosEnc(head_chs=embed_dims[2] // num_heads, num_heads=num_heads, window=crpe_window)
self.crpe4 = ConvRelPosEnc(head_chs=embed_dims[3] // num_heads, num_heads=num_heads, window=crpe_window)
# Disable stochastic depth.
dpr = drop_path_rate
assert dpr == 0.0
skwargs = dict(
num_heads=num_heads,
qkv_bias=qkv_bias,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr,
norm_layer=norm_layer,
)
# Serial blocks 1.
self.serial_blocks1 = nn.ModuleList([
SerialBlock(
dim=embed_dims[0],
mlp_ratio=mlp_ratios[0],
shared_cpe=self.cpe1,
shared_crpe=self.crpe1,
**skwargs,
)
for _ in range(serial_depths[0])]
)
# Serial blocks 2.
self.serial_blocks2 = nn.ModuleList([
SerialBlock(
dim=embed_dims[1],
mlp_ratio=mlp_ratios[1],
shared_cpe=self.cpe2,
shared_crpe=self.crpe2,
**skwargs,
)
for _ in range(serial_depths[1])]
)
# Serial blocks 3.
self.serial_blocks3 = nn.ModuleList([
SerialBlock(
dim=embed_dims[2],
mlp_ratio=mlp_ratios[2],
shared_cpe=self.cpe3,
shared_crpe=self.crpe3,
**skwargs,
)
for _ in range(serial_depths[2])]
)
# Serial blocks 4.
self.serial_blocks4 = nn.ModuleList([
SerialBlock(
dim=embed_dims[3],
mlp_ratio=mlp_ratios[3],
shared_cpe=self.cpe4,
shared_crpe=self.crpe4,
**skwargs,
)
for _ in range(serial_depths[3])]
)
# Parallel blocks.
self.parallel_depth = parallel_depth
if self.parallel_depth > 0:
self.parallel_blocks = nn.ModuleList([
ParallelBlock(
dims=embed_dims,
mlp_ratios=mlp_ratios,
shared_crpes=(self.crpe1, self.crpe2, self.crpe3, self.crpe4),
**skwargs,
)
for _ in range(parallel_depth)]
)
else:
self.parallel_blocks = None
# Classification head(s).
if not self.return_interm_layers:
if self.parallel_blocks is not None:
self.norm2 = norm_layer(embed_dims[1])
self.norm3 = norm_layer(embed_dims[2])
else:
self.norm2 = self.norm3 = None
self.norm4 = norm_layer(embed_dims[3])
if self.parallel_depth > 0:
# CoaT series: Aggregate features of last three scales for classification.
assert embed_dims[1] == embed_dims[2] == embed_dims[3]
self.aggregate = torch.nn.Conv1d(in_channels=3, out_channels=1, kernel_size=1)
self.head_drop = nn.Dropout(drop_rate)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
else:
# CoaT-Lite series: Use feature of last scale for classification.
self.aggregate = None
self.head_drop = nn.Dropout(drop_rate)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
# Initialize weights.
trunc_normal_(self.cls_token1, std=.02)
trunc_normal_(self.cls_token2, std=.02)
trunc_normal_(self.cls_token3, std=.02)
trunc_normal_(self.cls_token4, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'cls_token1', 'cls_token2', 'cls_token3', 'cls_token4'}
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
assert not enable, 'gradient checkpointing not supported'
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem1=r'^cls_token1|patch_embed1|crpe1|cpe1',
serial_blocks1=r'^serial_blocks1\.(\d+)',
stem2=r'^cls_token2|patch_embed2|crpe2|cpe2',
serial_blocks2=r'^serial_blocks2\.(\d+)',
stem3=r'^cls_token3|patch_embed3|crpe3|cpe3',
serial_blocks3=r'^serial_blocks3\.(\d+)',
stem4=r'^cls_token4|patch_embed4|crpe4|cpe4',
serial_blocks4=r'^serial_blocks4\.(\d+)',
parallel_blocks=[ # FIXME (partially?) overlap parallel w/ serial blocks??
(r'^parallel_blocks\.(\d+)', None),
(r'^norm|aggregate', (99999,)),
]
)
return matcher
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
if global_pool is not None:
assert global_pool in ('token', 'avg')
self.global_pool = global_pool
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x0):
B = x0.shape[0]
# Serial blocks 1.
x1 = self.patch_embed1(x0)
H1, W1 = self.patch_embed1.grid_size
x1 = insert_cls(x1, self.cls_token1)
for blk in self.serial_blocks1:
x1 = blk(x1, size=(H1, W1))
x1_nocls = remove_cls(x1).reshape(B, H1, W1, -1).permute(0, 3, 1, 2).contiguous()
# Serial blocks 2.
x2 = self.patch_embed2(x1_nocls)
H2, W2 = self.patch_embed2.grid_size
x2 = insert_cls(x2, self.cls_token2)
for blk in self.serial_blocks2:
x2 = blk(x2, size=(H2, W2))
x2_nocls = remove_cls(x2).reshape(B, H2, W2, -1).permute(0, 3, 1, 2).contiguous()
# Serial blocks 3.
x3 = self.patch_embed3(x2_nocls)
H3, W3 = self.patch_embed3.grid_size
x3 = insert_cls(x3, self.cls_token3)
for blk in self.serial_blocks3:
x3 = blk(x3, size=(H3, W3))
x3_nocls = remove_cls(x3).reshape(B, H3, W3, -1).permute(0, 3, 1, 2).contiguous()
# Serial blocks 4.
x4 = self.patch_embed4(x3_nocls)
H4, W4 = self.patch_embed4.grid_size
x4 = insert_cls(x4, self.cls_token4)
for blk in self.serial_blocks4:
x4 = blk(x4, size=(H4, W4))
x4_nocls = remove_cls(x4).reshape(B, H4, W4, -1).permute(0, 3, 1, 2).contiguous()
# Only serial blocks: Early return.
if self.parallel_blocks is None:
if not torch.jit.is_scripting() and self.return_interm_layers:
# Return intermediate features for down-stream tasks (e.g. Deformable DETR and Detectron2).
feat_out = {}
if 'x1_nocls' in self.out_features:
feat_out['x1_nocls'] = x1_nocls
if 'x2_nocls' in self.out_features:
feat_out['x2_nocls'] = x2_nocls
if 'x3_nocls' in self.out_features:
feat_out['x3_nocls'] = x3_nocls
if 'x4_nocls' in self.out_features:
feat_out['x4_nocls'] = x4_nocls
return feat_out
else:
# Return features for classification.
x4 = self.norm4(x4)
return x4
# Parallel blocks.
for blk in self.parallel_blocks:
x2, x3, x4 = self.cpe2(x2, (H2, W2)), self.cpe3(x3, (H3, W3)), self.cpe4(x4, (H4, W4))
x1, x2, x3, x4 = blk(x1, x2, x3, x4, sizes=[(H1, W1), (H2, W2), (H3, W3), (H4, W4)])
if not torch.jit.is_scripting() and self.return_interm_layers:
# Return intermediate features for down-stream tasks (e.g. Deformable DETR and Detectron2).
feat_out = {}
if 'x1_nocls' in self.out_features:
x1_nocls = remove_cls(x1).reshape(B, H1, W1, -1).permute(0, 3, 1, 2).contiguous()
feat_out['x1_nocls'] = x1_nocls
if 'x2_nocls' in self.out_features:
x2_nocls = remove_cls(x2).reshape(B, H2, W2, -1).permute(0, 3, 1, 2).contiguous()
feat_out['x2_nocls'] = x2_nocls
if 'x3_nocls' in self.out_features:
x3_nocls = remove_cls(x3).reshape(B, H3, W3, -1).permute(0, 3, 1, 2).contiguous()
feat_out['x3_nocls'] = x3_nocls
if 'x4_nocls' in self.out_features:
x4_nocls = remove_cls(x4).reshape(B, H4, W4, -1).permute(0, 3, 1, 2).contiguous()
feat_out['x4_nocls'] = x4_nocls
return feat_out
else:
x2 = self.norm2(x2)
x3 = self.norm3(x3)
x4 = self.norm4(x4)
return [x2, x3, x4]
def forward_head(self, x_feat: Union[torch.Tensor, List[torch.Tensor]], pre_logits: bool = False):
if isinstance(x_feat, list):
assert self.aggregate is not None
if self.global_pool == 'avg':
x = torch.cat([xl[:, 1:].mean(dim=1, keepdim=True) for xl in x_feat], dim=1) # [B, 3, C]
else:
x = torch.stack([xl[:, 0] for xl in x_feat], dim=1) # [B, 3, C]
x = self.aggregate(x).squeeze(dim=1) # Shape: [B, C]
else:
x = x_feat[:, 1:].mean(dim=1) if self.global_pool == 'avg' else x_feat[:, 0]
x = self.head_drop(x)
return x if pre_logits else self.head(x)
def forward(self, x) -> torch.Tensor:
if not torch.jit.is_scripting() and self.return_interm_layers:
# Return intermediate features (for down-stream tasks).
return self.forward_features(x)
else:
# Return features for classification.
x_feat = self.forward_features(x)
x = self.forward_head(x_feat)
return x
def insert_cls(x, cls_token):
""" Insert CLS token. """
cls_tokens = cls_token.expand(x.shape[0], -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
return x
def remove_cls(x):
""" Remove CLS token. """
return x[:, 1:, :]
def checkpoint_filter_fn(state_dict, model):
out_dict = {}
state_dict = state_dict.get('model', state_dict)
for k, v in state_dict.items():
# original model had unused norm layers, removing them requires filtering pretrained checkpoints
if k.startswith('norm1') or \
(k.startswith('norm2') and getattr(model, 'norm2', None) is None) or \
(k.startswith('norm3') and getattr(model, 'norm3', None) is None) or \
(k.startswith('norm4') and getattr(model, 'norm4', None) is None) or \
(k.startswith('aggregate') and getattr(model, 'aggregate', None) is None) or \
(k.startswith('head') and getattr(model, 'head', None) is None):
continue
out_dict[k] = v
return out_dict
def _create_coat(variant, pretrained=False, default_cfg=None, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
model = build_model_with_cfg(
CoaT,
variant,
pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
**kwargs,
)
return model
def _cfg_coat(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed1.proj', 'classifier': 'head',
**kwargs
}
default_cfgs = generate_default_cfgs({
'coat_tiny.in1k': _cfg_coat(hf_hub_id='timm/'),
'coat_mini.in1k': _cfg_coat(hf_hub_id='timm/'),
'coat_small.in1k': _cfg_coat(hf_hub_id='timm/'),
'coat_lite_tiny.in1k': _cfg_coat(hf_hub_id='timm/'),
'coat_lite_mini.in1k': _cfg_coat(hf_hub_id='timm/'),
'coat_lite_small.in1k': _cfg_coat(hf_hub_id='timm/'),
'coat_lite_medium.in1k': _cfg_coat(hf_hub_id='timm/'),
'coat_lite_medium_384.in1k': _cfg_coat(
hf_hub_id='timm/',
input_size=(3, 384, 384), crop_pct=1.0, crop_mode='squash',
),
})
@register_model
def coat_tiny(pretrained=False, **kwargs) -> CoaT:
model_cfg = dict(
patch_size=4, embed_dims=[152, 152, 152, 152], serial_depths=[2, 2, 2, 2], parallel_depth=6)
model = _create_coat('coat_tiny', pretrained=pretrained, **dict(model_cfg, **kwargs))
return model
@register_model
def coat_mini(pretrained=False, **kwargs) -> CoaT:
model_cfg = dict(
patch_size=4, embed_dims=[152, 216, 216, 216], serial_depths=[2, 2, 2, 2], parallel_depth=6)
model = _create_coat('coat_mini', pretrained=pretrained, **dict(model_cfg, **kwargs))
return model
@register_model
def coat_small(pretrained=False, **kwargs) -> CoaT:
model_cfg = dict(
patch_size=4, embed_dims=[152, 320, 320, 320], serial_depths=[2, 2, 2, 2], parallel_depth=6, **kwargs)
model = _create_coat('coat_small', pretrained=pretrained, **dict(model_cfg, **kwargs))
return model
@register_model
def coat_lite_tiny(pretrained=False, **kwargs) -> CoaT:
model_cfg = dict(
patch_size=4, embed_dims=[64, 128, 256, 320], serial_depths=[2, 2, 2, 2], mlp_ratios=[8, 8, 4, 4])
model = _create_coat('coat_lite_tiny', pretrained=pretrained, **dict(model_cfg, **kwargs))
return model
@register_model
def coat_lite_mini(pretrained=False, **kwargs) -> CoaT:
model_cfg = dict(
patch_size=4, embed_dims=[64, 128, 320, 512], serial_depths=[2, 2, 2, 2], mlp_ratios=[8, 8, 4, 4])
model = _create_coat('coat_lite_mini', pretrained=pretrained, **dict(model_cfg, **kwargs))
return model
@register_model
def coat_lite_small(pretrained=False, **kwargs) -> CoaT:
model_cfg = dict(
patch_size=4, embed_dims=[64, 128, 320, 512], serial_depths=[3, 4, 6, 3], mlp_ratios=[8, 8, 4, 4])
model = _create_coat('coat_lite_small', pretrained=pretrained, **dict(model_cfg, **kwargs))
return model
@register_model
def coat_lite_medium(pretrained=False, **kwargs) -> CoaT:
model_cfg = dict(
patch_size=4, embed_dims=[128, 256, 320, 512], serial_depths=[3, 6, 10, 8])
model = _create_coat('coat_lite_medium', pretrained=pretrained, **dict(model_cfg, **kwargs))
return model
@register_model
def coat_lite_medium_384(pretrained=False, **kwargs) -> CoaT:
model_cfg = dict(
img_size=384, patch_size=4, embed_dims=[128, 256, 320, 512], serial_depths=[3, 6, 10, 8])
model = _create_coat('coat_lite_medium_384', pretrained=pretrained, **dict(model_cfg, **kwargs))
return model | pytorch-image-models/timm/models/coat.py/0 | {
"file_path": "pytorch-image-models/timm/models/coat.py",
"repo_id": "pytorch-image-models",
"token_count": 15701
} |
""" EfficientViT (by MSRA)
Paper: `EfficientViT: Memory Efficient Vision Transformer with Cascaded Group Attention`
- https://arxiv.org/abs/2305.07027
Adapted from official impl at https://github.com/microsoft/Cream/tree/main/EfficientViT
"""
__all__ = ['EfficientVitMsra']
import itertools
from collections import OrderedDict
from typing import Dict, Optional
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import SqueezeExcite, SelectAdaptivePool2d, trunc_normal_, _assert
from ._builder import build_model_with_cfg
from ._manipulate import checkpoint_seq
from ._registry import register_model, generate_default_cfgs
class ConvNorm(torch.nn.Sequential):
def __init__(self, in_chs, out_chs, ks=1, stride=1, pad=0, dilation=1, groups=1, bn_weight_init=1):
super().__init__()
self.conv = nn.Conv2d(in_chs, out_chs, ks, stride, pad, dilation, groups, bias=False)
self.bn = nn.BatchNorm2d(out_chs)
torch.nn.init.constant_(self.bn.weight, bn_weight_init)
torch.nn.init.constant_(self.bn.bias, 0)
@torch.no_grad()
def fuse(self):
c, bn = self.conv, self.bn
w = bn.weight / (bn.running_var + bn.eps)**0.5
w = c.weight * w[:, None, None, None]
b = bn.bias - bn.running_mean * bn.weight / \
(bn.running_var + bn.eps)**0.5
m = torch.nn.Conv2d(
w.size(1) * self.conv.groups, w.size(0), w.shape[2:],
stride=self.conv.stride, padding=self.conv.padding, dilation=self.conv.dilation, groups=self.conv.groups)
m.weight.data.copy_(w)
m.bias.data.copy_(b)
return m
class NormLinear(torch.nn.Sequential):
def __init__(self, in_features, out_features, bias=True, std=0.02, drop=0.):
super().__init__()
self.bn = nn.BatchNorm1d(in_features)
self.drop = nn.Dropout(drop)
self.linear = nn.Linear(in_features, out_features, bias=bias)
trunc_normal_(self.linear.weight, std=std)
if self.linear.bias is not None:
nn.init.constant_(self.linear.bias, 0)
@torch.no_grad()
def fuse(self):
bn, linear = self.bn, self.linear
w = bn.weight / (bn.running_var + bn.eps)**0.5
b = bn.bias - self.bn.running_mean * \
self.bn.weight / (bn.running_var + bn.eps)**0.5
w = linear.weight * w[None, :]
if linear.bias is None:
b = b @ self.linear.weight.T
else:
b = (linear.weight @ b[:, None]).view(-1) + self.linear.bias
m = torch.nn.Linear(w.size(1), w.size(0))
m.weight.data.copy_(w)
m.bias.data.copy_(b)
return m
class PatchMerging(torch.nn.Module):
def __init__(self, dim, out_dim):
super().__init__()
hid_dim = int(dim * 4)
self.conv1 = ConvNorm(dim, hid_dim, 1, 1, 0)
self.act = torch.nn.ReLU()
self.conv2 = ConvNorm(hid_dim, hid_dim, 3, 2, 1, groups=hid_dim)
self.se = SqueezeExcite(hid_dim, .25)
self.conv3 = ConvNorm(hid_dim, out_dim, 1, 1, 0)
def forward(self, x):
x = self.conv3(self.se(self.act(self.conv2(self.act(self.conv1(x))))))
return x
class ResidualDrop(torch.nn.Module):
def __init__(self, m, drop=0.):
super().__init__()
self.m = m
self.drop = drop
def forward(self, x):
if self.training and self.drop > 0:
return x + self.m(x) * torch.rand(
x.size(0), 1, 1, 1, device=x.device).ge_(self.drop).div(1 - self.drop).detach()
else:
return x + self.m(x)
class ConvMlp(torch.nn.Module):
def __init__(self, ed, h):
super().__init__()
self.pw1 = ConvNorm(ed, h)
self.act = torch.nn.ReLU()
self.pw2 = ConvNorm(h, ed, bn_weight_init=0)
def forward(self, x):
x = self.pw2(self.act(self.pw1(x)))
return x
class CascadedGroupAttention(torch.nn.Module):
attention_bias_cache: Dict[str, torch.Tensor]
r""" Cascaded Group Attention.
Args:
dim (int): Number of input channels.
key_dim (int): The dimension for query and key.
num_heads (int): Number of attention heads.
attn_ratio (int): Multiplier for the query dim for value dimension.
resolution (int): Input resolution, correspond to the window size.
kernels (List[int]): The kernel size of the dw conv on query.
"""
def __init__(
self,
dim,
key_dim,
num_heads=8,
attn_ratio=4,
resolution=14,
kernels=(5, 5, 5, 5),
):
super().__init__()
self.num_heads = num_heads
self.scale = key_dim ** -0.5
self.key_dim = key_dim
self.val_dim = int(attn_ratio * key_dim)
self.attn_ratio = attn_ratio
qkvs = []
dws = []
for i in range(num_heads):
qkvs.append(ConvNorm(dim // (num_heads), self.key_dim * 2 + self.val_dim))
dws.append(ConvNorm(self.key_dim, self.key_dim, kernels[i], 1, kernels[i] // 2, groups=self.key_dim))
self.qkvs = torch.nn.ModuleList(qkvs)
self.dws = torch.nn.ModuleList(dws)
self.proj = torch.nn.Sequential(
torch.nn.ReLU(),
ConvNorm(self.val_dim * num_heads, dim, bn_weight_init=0)
)
points = list(itertools.product(range(resolution), range(resolution)))
N = len(points)
attention_offsets = {}
idxs = []
for p1 in points:
for p2 in points:
offset = (abs(p1[0] - p2[0]), abs(p1[1] - p2[1]))
if offset not in attention_offsets:
attention_offsets[offset] = len(attention_offsets)
idxs.append(attention_offsets[offset])
self.attention_biases = torch.nn.Parameter(torch.zeros(num_heads, len(attention_offsets)))
self.register_buffer('attention_bias_idxs', torch.LongTensor(idxs).view(N, N), persistent=False)
self.attention_bias_cache = {}
@torch.no_grad()
def train(self, mode=True):
super().train(mode)
if mode and self.attention_bias_cache:
self.attention_bias_cache = {} # clear ab cache
def get_attention_biases(self, device: torch.device) -> torch.Tensor:
if torch.jit.is_tracing() or self.training:
return self.attention_biases[:, self.attention_bias_idxs]
else:
device_key = str(device)
if device_key not in self.attention_bias_cache:
self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs]
return self.attention_bias_cache[device_key]
def forward(self, x):
B, C, H, W = x.shape
feats_in = x.chunk(len(self.qkvs), dim=1)
feats_out = []
feat = feats_in[0]
attn_bias = self.get_attention_biases(x.device)
for head_idx, (qkv, dws) in enumerate(zip(self.qkvs, self.dws)):
if head_idx > 0:
feat = feat + feats_in[head_idx]
feat = qkv(feat)
q, k, v = feat.view(B, -1, H, W).split([self.key_dim, self.key_dim, self.val_dim], dim=1)
q = dws(q)
q, k, v = q.flatten(2), k.flatten(2), v.flatten(2)
q = q * self.scale
attn = q.transpose(-2, -1) @ k
attn = attn + attn_bias[head_idx]
attn = attn.softmax(dim=-1)
feat = v @ attn.transpose(-2, -1)
feat = feat.view(B, self.val_dim, H, W)
feats_out.append(feat)
x = self.proj(torch.cat(feats_out, 1))
return x
class LocalWindowAttention(torch.nn.Module):
r""" Local Window Attention.
Args:
dim (int): Number of input channels.
key_dim (int): The dimension for query and key.
num_heads (int): Number of attention heads.
attn_ratio (int): Multiplier for the query dim for value dimension.
resolution (int): Input resolution.
window_resolution (int): Local window resolution.
kernels (List[int]): The kernel size of the dw conv on query.
"""
def __init__(
self,
dim,
key_dim,
num_heads=8,
attn_ratio=4,
resolution=14,
window_resolution=7,
kernels=(5, 5, 5, 5),
):
super().__init__()
self.dim = dim
self.num_heads = num_heads
self.resolution = resolution
assert window_resolution > 0, 'window_size must be greater than 0'
self.window_resolution = window_resolution
window_resolution = min(window_resolution, resolution)
self.attn = CascadedGroupAttention(
dim, key_dim, num_heads,
attn_ratio=attn_ratio,
resolution=window_resolution,
kernels=kernels,
)
def forward(self, x):
H = W = self.resolution
B, C, H_, W_ = x.shape
# Only check this for classification models
_assert(H == H_, f'input feature has wrong size, expect {(H, W)}, got {(H_, W_)}')
_assert(W == W_, f'input feature has wrong size, expect {(H, W)}, got {(H_, W_)}')
if H <= self.window_resolution and W <= self.window_resolution:
x = self.attn(x)
else:
x = x.permute(0, 2, 3, 1)
pad_b = (self.window_resolution - H % self.window_resolution) % self.window_resolution
pad_r = (self.window_resolution - W % self.window_resolution) % self.window_resolution
x = torch.nn.functional.pad(x, (0, 0, 0, pad_r, 0, pad_b))
pH, pW = H + pad_b, W + pad_r
nH = pH // self.window_resolution
nW = pW // self.window_resolution
# window partition, BHWC -> B(nHh)(nWw)C -> BnHnWhwC -> (BnHnW)hwC -> (BnHnW)Chw
x = x.view(B, nH, self.window_resolution, nW, self.window_resolution, C).transpose(2, 3)
x = x.reshape(B * nH * nW, self.window_resolution, self.window_resolution, C).permute(0, 3, 1, 2)
x = self.attn(x)
# window reverse, (BnHnW)Chw -> (BnHnW)hwC -> BnHnWhwC -> B(nHh)(nWw)C -> BHWC
x = x.permute(0, 2, 3, 1).view(B, nH, nW, self.window_resolution, self.window_resolution, C)
x = x.transpose(2, 3).reshape(B, pH, pW, C)
x = x[:, :H, :W].contiguous()
x = x.permute(0, 3, 1, 2)
return x
class EfficientVitBlock(torch.nn.Module):
""" A basic EfficientVit building block.
Args:
dim (int): Number of input channels.
key_dim (int): Dimension for query and key in the token mixer.
num_heads (int): Number of attention heads.
attn_ratio (int): Multiplier for the query dim for value dimension.
resolution (int): Input resolution.
window_resolution (int): Local window resolution.
kernels (List[int]): The kernel size of the dw conv on query.
"""
def __init__(
self,
dim,
key_dim,
num_heads=8,
attn_ratio=4,
resolution=14,
window_resolution=7,
kernels=[5, 5, 5, 5],
):
super().__init__()
self.dw0 = ResidualDrop(ConvNorm(dim, dim, 3, 1, 1, groups=dim, bn_weight_init=0.))
self.ffn0 = ResidualDrop(ConvMlp(dim, int(dim * 2)))
self.mixer = ResidualDrop(
LocalWindowAttention(
dim, key_dim, num_heads,
attn_ratio=attn_ratio,
resolution=resolution,
window_resolution=window_resolution,
kernels=kernels,
)
)
self.dw1 = ResidualDrop(ConvNorm(dim, dim, 3, 1, 1, groups=dim, bn_weight_init=0.))
self.ffn1 = ResidualDrop(ConvMlp(dim, int(dim * 2)))
def forward(self, x):
return self.ffn1(self.dw1(self.mixer(self.ffn0(self.dw0(x)))))
class EfficientVitStage(torch.nn.Module):
def __init__(
self,
in_dim,
out_dim,
key_dim,
downsample=('', 1),
num_heads=8,
attn_ratio=4,
resolution=14,
window_resolution=7,
kernels=[5, 5, 5, 5],
depth=1,
):
super().__init__()
if downsample[0] == 'subsample':
self.resolution = (resolution - 1) // downsample[1] + 1
down_blocks = []
down_blocks.append((
'res1',
torch.nn.Sequential(
ResidualDrop(ConvNorm(in_dim, in_dim, 3, 1, 1, groups=in_dim)),
ResidualDrop(ConvMlp(in_dim, int(in_dim * 2))),
)
))
down_blocks.append(('patchmerge', PatchMerging(in_dim, out_dim)))
down_blocks.append((
'res2',
torch.nn.Sequential(
ResidualDrop(ConvNorm(out_dim, out_dim, 3, 1, 1, groups=out_dim)),
ResidualDrop(ConvMlp(out_dim, int(out_dim * 2))),
)
))
self.downsample = nn.Sequential(OrderedDict(down_blocks))
else:
assert in_dim == out_dim
self.downsample = nn.Identity()
self.resolution = resolution
blocks = []
for d in range(depth):
blocks.append(EfficientVitBlock(out_dim, key_dim, num_heads, attn_ratio, self.resolution, window_resolution, kernels))
self.blocks = nn.Sequential(*blocks)
def forward(self, x):
x = self.downsample(x)
x = self.blocks(x)
return x
class PatchEmbedding(torch.nn.Sequential):
def __init__(self, in_chans, dim):
super().__init__()
self.add_module('conv1', ConvNorm(in_chans, dim // 8, 3, 2, 1))
self.add_module('relu1', torch.nn.ReLU())
self.add_module('conv2', ConvNorm(dim // 8, dim // 4, 3, 2, 1))
self.add_module('relu2', torch.nn.ReLU())
self.add_module('conv3', ConvNorm(dim // 4, dim // 2, 3, 2, 1))
self.add_module('relu3', torch.nn.ReLU())
self.add_module('conv4', ConvNorm(dim // 2, dim, 3, 2, 1))
self.patch_size = 16
class EfficientVitMsra(nn.Module):
def __init__(
self,
img_size=224,
in_chans=3,
num_classes=1000,
embed_dim=(64, 128, 192),
key_dim=(16, 16, 16),
depth=(1, 2, 3),
num_heads=(4, 4, 4),
window_size=(7, 7, 7),
kernels=(5, 5, 5, 5),
down_ops=(('', 1), ('subsample', 2), ('subsample', 2)),
global_pool='avg',
drop_rate=0.,
):
super(EfficientVitMsra, self).__init__()
self.grad_checkpointing = False
self.num_classes = num_classes
self.drop_rate = drop_rate
# Patch embedding
self.patch_embed = PatchEmbedding(in_chans, embed_dim[0])
stride = self.patch_embed.patch_size
resolution = img_size // self.patch_embed.patch_size
attn_ratio = [embed_dim[i] / (key_dim[i] * num_heads[i]) for i in range(len(embed_dim))]
# Build EfficientVit blocks
self.feature_info = []
stages = []
pre_ed = embed_dim[0]
for i, (ed, kd, dpth, nh, ar, wd, do) in enumerate(
zip(embed_dim, key_dim, depth, num_heads, attn_ratio, window_size, down_ops)):
stage = EfficientVitStage(
in_dim=pre_ed,
out_dim=ed,
key_dim=kd,
downsample=do,
num_heads=nh,
attn_ratio=ar,
resolution=resolution,
window_resolution=wd,
kernels=kernels,
depth=dpth,
)
pre_ed = ed
if do[0] == 'subsample' and i != 0:
stride *= do[1]
resolution = stage.resolution
stages.append(stage)
self.feature_info += [dict(num_chs=ed, reduction=stride, module=f'stages.{i}')]
self.stages = nn.Sequential(*stages)
if global_pool == 'avg':
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool, flatten=True)
else:
assert num_classes == 0
self.global_pool = nn.Identity()
self.num_features = self.head_hidden_size = embed_dim[-1]
self.head = NormLinear(
self.num_features, num_classes, drop=self.drop_rate) if num_classes > 0 else torch.nn.Identity()
@torch.jit.ignore
def no_weight_decay(self):
return {x for x in self.state_dict().keys() if 'attention_biases' in x}
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^patch_embed',
blocks=r'^stages\.(\d+)' if coarse else [
(r'^stages\.(\d+).downsample', (0,)),
(r'^stages\.(\d+)\.\w+\.(\d+)', None),
]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head.linear
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
if global_pool is not None:
if global_pool == 'avg':
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool, flatten=True)
else:
assert num_classes == 0
self.global_pool = nn.Identity()
self.head = NormLinear(
self.num_features, num_classes, drop=self.drop_rate) if num_classes > 0 else torch.nn.Identity()
def forward_features(self, x):
x = self.patch_embed(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.stages, x)
else:
x = self.stages(x)
return x
def forward_head(self, x, pre_logits: bool = False):
x = self.global_pool(x)
return x if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
# def checkpoint_filter_fn(state_dict, model):
# if 'model' in state_dict.keys():
# state_dict = state_dict['model']
# tmp_dict = {}
# out_dict = {}
# target_keys = model.state_dict().keys()
# target_keys = [k for k in target_keys if k.startswith('stages.')]
#
# for k, v in state_dict.items():
# if 'attention_bias_idxs' in k:
# continue
# k = k.split('.')
# if k[-2] == 'c':
# k[-2] = 'conv'
# if k[-2] == 'l':
# k[-2] = 'linear'
# k = '.'.join(k)
# tmp_dict[k] = v
#
# for k, v in tmp_dict.items():
# if k.startswith('patch_embed'):
# k = k.split('.')
# k[1] = 'conv' + str(int(k[1]) // 2 + 1)
# k = '.'.join(k)
# elif k.startswith('blocks'):
# kw = '.'.join(k.split('.')[2:])
# find_kw = [a for a in list(sorted(tmp_dict.keys())) if kw in a]
# idx = find_kw.index(k)
# k = [a for a in target_keys if kw in a][idx]
# out_dict[k] = v
#
# return out_dict
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000,
'mean': IMAGENET_DEFAULT_MEAN,
'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.conv1.conv',
'classifier': 'head.linear',
'fixed_input_size': True,
'pool_size': (4, 4),
**kwargs,
}
default_cfgs = generate_default_cfgs({
'efficientvit_m0.r224_in1k': _cfg(
hf_hub_id='timm/',
#url='https://github.com/xinyuliu-jeffrey/EfficientVit_Model_Zoo/releases/download/v1.0/efficientvit_m0.pth'
),
'efficientvit_m1.r224_in1k': _cfg(
hf_hub_id='timm/',
#url='https://github.com/xinyuliu-jeffrey/EfficientVit_Model_Zoo/releases/download/v1.0/efficientvit_m1.pth'
),
'efficientvit_m2.r224_in1k': _cfg(
hf_hub_id='timm/',
#url='https://github.com/xinyuliu-jeffrey/EfficientVit_Model_Zoo/releases/download/v1.0/efficientvit_m2.pth'
),
'efficientvit_m3.r224_in1k': _cfg(
hf_hub_id='timm/',
#url='https://github.com/xinyuliu-jeffrey/EfficientVit_Model_Zoo/releases/download/v1.0/efficientvit_m3.pth'
),
'efficientvit_m4.r224_in1k': _cfg(
hf_hub_id='timm/',
#url='https://github.com/xinyuliu-jeffrey/EfficientVit_Model_Zoo/releases/download/v1.0/efficientvit_m4.pth'
),
'efficientvit_m5.r224_in1k': _cfg(
hf_hub_id='timm/',
#url='https://github.com/xinyuliu-jeffrey/EfficientVit_Model_Zoo/releases/download/v1.0/efficientvit_m5.pth'
),
})
def _create_efficientvit_msra(variant, pretrained=False, **kwargs):
out_indices = kwargs.pop('out_indices', (0, 1, 2))
model = build_model_with_cfg(
EfficientVitMsra,
variant,
pretrained,
feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
**kwargs
)
return model
@register_model
def efficientvit_m0(pretrained=False, **kwargs):
model_args = dict(
img_size=224,
embed_dim=[64, 128, 192],
depth=[1, 2, 3],
num_heads=[4, 4, 4],
window_size=[7, 7, 7],
kernels=[5, 5, 5, 5]
)
return _create_efficientvit_msra('efficientvit_m0', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def efficientvit_m1(pretrained=False, **kwargs):
model_args = dict(
img_size=224,
embed_dim=[128, 144, 192],
depth=[1, 2, 3],
num_heads=[2, 3, 3],
window_size=[7, 7, 7],
kernels=[7, 5, 3, 3]
)
return _create_efficientvit_msra('efficientvit_m1', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def efficientvit_m2(pretrained=False, **kwargs):
model_args = dict(
img_size=224,
embed_dim=[128, 192, 224],
depth=[1, 2, 3],
num_heads=[4, 3, 2],
window_size=[7, 7, 7],
kernels=[7, 5, 3, 3]
)
return _create_efficientvit_msra('efficientvit_m2', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def efficientvit_m3(pretrained=False, **kwargs):
model_args = dict(
img_size=224,
embed_dim=[128, 240, 320],
depth=[1, 2, 3],
num_heads=[4, 3, 4],
window_size=[7, 7, 7],
kernels=[5, 5, 5, 5]
)
return _create_efficientvit_msra('efficientvit_m3', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def efficientvit_m4(pretrained=False, **kwargs):
model_args = dict(
img_size=224,
embed_dim=[128, 256, 384],
depth=[1, 2, 3],
num_heads=[4, 4, 4],
window_size=[7, 7, 7],
kernels=[7, 5, 3, 3]
)
return _create_efficientvit_msra('efficientvit_m4', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def efficientvit_m5(pretrained=False, **kwargs):
model_args = dict(
img_size=224,
embed_dim=[192, 288, 384],
depth=[1, 3, 4],
num_heads=[3, 3, 4],
window_size=[7, 7, 7],
kernels=[7, 5, 3, 3]
)
return _create_efficientvit_msra('efficientvit_m5', pretrained=pretrained, **dict(model_args, **kwargs))
| pytorch-image-models/timm/models/efficientvit_msra.py/0 | {
"file_path": "pytorch-image-models/timm/models/efficientvit_msra.py",
"repo_id": "pytorch-image-models",
"token_count": 11892
} |
"""
InceptionNeXt paper: https://arxiv.org/abs/2303.16900
Original implementation & weights from: https://github.com/sail-sg/inceptionnext
"""
from functools import partial
from typing import Optional
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import trunc_normal_, DropPath, to_2tuple, get_padding, SelectAdaptivePool2d
from ._builder import build_model_with_cfg
from ._manipulate import checkpoint_seq
from ._registry import register_model, generate_default_cfgs
__all__ = ['MetaNeXt']
class InceptionDWConv2d(nn.Module):
""" Inception depthwise convolution
"""
def __init__(
self,
in_chs,
square_kernel_size=3,
band_kernel_size=11,
branch_ratio=0.125,
dilation=1,
):
super().__init__()
gc = int(in_chs * branch_ratio) # channel numbers of a convolution branch
square_padding = get_padding(square_kernel_size, dilation=dilation)
band_padding = get_padding(band_kernel_size, dilation=dilation)
self.dwconv_hw = nn.Conv2d(
gc, gc, square_kernel_size,
padding=square_padding, dilation=dilation, groups=gc)
self.dwconv_w = nn.Conv2d(
gc, gc, (1, band_kernel_size),
padding=(0, band_padding), dilation=(1, dilation), groups=gc)
self.dwconv_h = nn.Conv2d(
gc, gc, (band_kernel_size, 1),
padding=(band_padding, 0), dilation=(dilation, 1), groups=gc)
self.split_indexes = (in_chs - 3 * gc, gc, gc, gc)
def forward(self, x):
x_id, x_hw, x_w, x_h = torch.split(x, self.split_indexes, dim=1)
return torch.cat((
x_id,
self.dwconv_hw(x_hw),
self.dwconv_w(x_w),
self.dwconv_h(x_h)
), dim=1,
)
class ConvMlp(nn.Module):
""" MLP using 1x1 convs that keeps spatial dims
copied from timm: https://github.com/huggingface/pytorch-image-models/blob/v0.6.11/timm/models/layers/mlp.py
"""
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.ReLU,
norm_layer=None,
bias=True,
drop=0.,
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
bias = to_2tuple(bias)
self.fc1 = nn.Conv2d(in_features, hidden_features, kernel_size=1, bias=bias[0])
self.norm = norm_layer(hidden_features) if norm_layer else nn.Identity()
self.act = act_layer()
self.drop = nn.Dropout(drop)
self.fc2 = nn.Conv2d(hidden_features, out_features, kernel_size=1, bias=bias[1])
def forward(self, x):
x = self.fc1(x)
x = self.norm(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
return x
class MlpClassifierHead(nn.Module):
""" MLP classification head
"""
def __init__(
self,
in_features,
num_classes=1000,
pool_type='avg',
mlp_ratio=3,
act_layer=nn.GELU,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
drop=0.,
bias=True
):
super().__init__()
self.use_conv = False
self.in_features = in_features
self.num_features = hidden_features = int(mlp_ratio * in_features)
assert pool_type, 'Cannot disable pooling'
self.global_pool = SelectAdaptivePool2d(pool_type=pool_type, flatten=True)
self.fc1 = nn.Linear(in_features * self.global_pool.feat_mult(), hidden_features, bias=bias)
self.act = act_layer()
self.norm = norm_layer(hidden_features)
self.fc2 = nn.Linear(hidden_features, num_classes, bias=bias)
self.drop = nn.Dropout(drop)
def reset(self, num_classes: int, pool_type: Optional[str] = None):
if pool_type is not None:
assert pool_type, 'Cannot disable pooling'
self.global_pool = SelectAdaptivePool2d(pool_type=pool_type, flatten=True)
self.fc2 = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
def forward(self, x, pre_logits: bool = False):
x = self.global_pool(x)
x = self.fc1(x)
x = self.act(x)
x = self.norm(x)
x = self.drop(x)
return x if pre_logits else self.fc2(x)
class MetaNeXtBlock(nn.Module):
""" MetaNeXtBlock Block
Args:
dim (int): Number of input channels.
drop_path (float): Stochastic depth rate. Default: 0.0
ls_init_value (float): Init value for Layer Scale. Default: 1e-6.
"""
def __init__(
self,
dim,
dilation=1,
token_mixer=InceptionDWConv2d,
norm_layer=nn.BatchNorm2d,
mlp_layer=ConvMlp,
mlp_ratio=4,
act_layer=nn.GELU,
ls_init_value=1e-6,
drop_path=0.,
):
super().__init__()
self.token_mixer = token_mixer(dim, dilation=dilation)
self.norm = norm_layer(dim)
self.mlp = mlp_layer(dim, int(mlp_ratio * dim), act_layer=act_layer)
self.gamma = nn.Parameter(ls_init_value * torch.ones(dim)) if ls_init_value else None
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
shortcut = x
x = self.token_mixer(x)
x = self.norm(x)
x = self.mlp(x)
if self.gamma is not None:
x = x.mul(self.gamma.reshape(1, -1, 1, 1))
x = self.drop_path(x) + shortcut
return x
class MetaNeXtStage(nn.Module):
def __init__(
self,
in_chs,
out_chs,
stride=2,
depth=2,
dilation=(1, 1),
drop_path_rates=None,
ls_init_value=1.0,
token_mixer=InceptionDWConv2d,
act_layer=nn.GELU,
norm_layer=None,
mlp_ratio=4,
):
super().__init__()
self.grad_checkpointing = False
if stride > 1 or dilation[0] != dilation[1]:
self.downsample = nn.Sequential(
norm_layer(in_chs),
nn.Conv2d(
in_chs,
out_chs,
kernel_size=2,
stride=stride,
dilation=dilation[0],
),
)
else:
self.downsample = nn.Identity()
drop_path_rates = drop_path_rates or [0.] * depth
stage_blocks = []
for i in range(depth):
stage_blocks.append(MetaNeXtBlock(
dim=out_chs,
dilation=dilation[1],
drop_path=drop_path_rates[i],
ls_init_value=ls_init_value,
token_mixer=token_mixer,
act_layer=act_layer,
norm_layer=norm_layer,
mlp_ratio=mlp_ratio,
))
self.blocks = nn.Sequential(*stage_blocks)
def forward(self, x):
x = self.downsample(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
return x
class MetaNeXt(nn.Module):
r""" MetaNeXt
A PyTorch impl of : `InceptionNeXt: When Inception Meets ConvNeXt` - https://arxiv.org/abs/2303.16900
Args:
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
depths (tuple(int)): Number of blocks at each stage. Default: (3, 3, 9, 3)
dims (tuple(int)): Feature dimension at each stage. Default: (96, 192, 384, 768)
token_mixers: Token mixer function. Default: nn.Identity
norm_layer: Normalization layer. Default: nn.BatchNorm2d
act_layer: Activation function for MLP. Default: nn.GELU
mlp_ratios (int or tuple(int)): MLP ratios. Default: (4, 4, 4, 3)
drop_rate (float): Head dropout rate
drop_path_rate (float): Stochastic depth rate. Default: 0.
ls_init_value (float): Init value for Layer Scale. Default: 1e-6.
"""
def __init__(
self,
in_chans=3,
num_classes=1000,
global_pool='avg',
output_stride=32,
depths=(3, 3, 9, 3),
dims=(96, 192, 384, 768),
token_mixers=InceptionDWConv2d,
norm_layer=nn.BatchNorm2d,
act_layer=nn.GELU,
mlp_ratios=(4, 4, 4, 3),
drop_rate=0.,
drop_path_rate=0.,
ls_init_value=1e-6,
):
super().__init__()
num_stage = len(depths)
if not isinstance(token_mixers, (list, tuple)):
token_mixers = [token_mixers] * num_stage
if not isinstance(mlp_ratios, (list, tuple)):
mlp_ratios = [mlp_ratios] * num_stage
self.num_classes = num_classes
self.global_pool = global_pool
self.drop_rate = drop_rate
self.feature_info = []
self.stem = nn.Sequential(
nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4),
norm_layer(dims[0])
)
dp_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
prev_chs = dims[0]
curr_stride = 4
dilation = 1
# feature resolution stages, each consisting of multiple residual blocks
self.stages = nn.Sequential()
for i in range(num_stage):
stride = 2 if curr_stride == 2 or i > 0 else 1
if curr_stride >= output_stride and stride > 1:
dilation *= stride
stride = 1
curr_stride *= stride
first_dilation = 1 if dilation in (1, 2) else 2
out_chs = dims[i]
self.stages.append(MetaNeXtStage(
prev_chs,
out_chs,
stride=stride if i > 0 else 1,
dilation=(first_dilation, dilation),
depth=depths[i],
drop_path_rates=dp_rates[i],
ls_init_value=ls_init_value,
act_layer=act_layer,
token_mixer=token_mixers[i],
norm_layer=norm_layer,
mlp_ratio=mlp_ratios[i],
))
prev_chs = out_chs
self.feature_info += [dict(num_chs=prev_chs, reduction=curr_stride, module=f'stages.{i}')]
self.num_features = prev_chs
self.head = MlpClassifierHead(self.num_features, num_classes, pool_type=self.global_pool, drop=drop_rate)
self.head_hidden_size = self.head.num_features
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, (nn.Conv2d, nn.Linear)):
trunc_normal_(m.weight, std=.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^stem',
blocks=r'^stages\.(\d+)' if coarse else [
(r'^stages\.(\d+)\.downsample', (0,)), # blocks
(r'^stages\.(\d+)\.blocks\.(\d+)', None),
]
)
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head.fc2
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
self.head.reset(num_classes, global_pool)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for s in self.stages:
s.grad_checkpointing = enable
@torch.jit.ignore
def no_weight_decay(self):
return set()
def forward_features(self, x):
x = self.stem(x)
x = self.stages(x)
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.0', 'classifier': 'head.fc2',
**kwargs
}
default_cfgs = generate_default_cfgs({
'inception_next_atto.sail_in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/sail-sg/inceptionnext/releases/download/model/inceptionnext_atto.pth',
),
'inception_next_tiny.sail_in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/sail-sg/inceptionnext/releases/download/model/inceptionnext_tiny.pth',
),
'inception_next_small.sail_in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/sail-sg/inceptionnext/releases/download/model/inceptionnext_small.pth',
),
'inception_next_base.sail_in1k': _cfg(
hf_hub_id='timm/',
# url='https://github.com/sail-sg/inceptionnext/releases/download/model/inceptionnext_base.pth',
crop_pct=0.95,
),
'inception_next_base.sail_in1k_384': _cfg(
hf_hub_id='timm/',
# url='https://github.com/sail-sg/inceptionnext/releases/download/model/inceptionnext_base_384.pth',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0,
),
})
def _create_inception_next(variant, pretrained=False, **kwargs):
model = build_model_with_cfg(
MetaNeXt, variant, pretrained,
feature_cfg=dict(out_indices=(0, 1, 2, 3), flatten_sequential=True),
**kwargs,
)
return model
@register_model
def inception_next_atto(pretrained=False, **kwargs):
model_args = dict(
depths=(2, 2, 6, 2), dims=(40, 80, 160, 320),
token_mixers=partial(InceptionDWConv2d, band_kernel_size=9, branch_ratio=0.25)
)
return _create_inception_next('inception_next_atto', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def inception_next_tiny(pretrained=False, **kwargs):
model_args = dict(
depths=(3, 3, 9, 3), dims=(96, 192, 384, 768),
token_mixers=InceptionDWConv2d,
)
return _create_inception_next('inception_next_tiny', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def inception_next_small(pretrained=False, **kwargs):
model_args = dict(
depths=(3, 3, 27, 3), dims=(96, 192, 384, 768),
token_mixers=InceptionDWConv2d,
)
return _create_inception_next('inception_next_small', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def inception_next_base(pretrained=False, **kwargs):
model_args = dict(
depths=(3, 3, 27, 3), dims=(128, 256, 512, 1024),
token_mixers=InceptionDWConv2d,
)
return _create_inception_next('inception_next_base', pretrained=pretrained, **dict(model_args, **kwargs))
| pytorch-image-models/timm/models/inception_next.py/0 | {
"file_path": "pytorch-image-models/timm/models/inception_next.py",
"repo_id": "pytorch-image-models",
"token_count": 7654
} |
""" Normalization Free Nets. NFNet, NF-RegNet, NF-ResNet (pre-activation) Models
Paper: `Characterizing signal propagation to close the performance gap in unnormalized ResNets`
- https://arxiv.org/abs/2101.08692
Paper: `High-Performance Large-Scale Image Recognition Without Normalization`
- https://arxiv.org/abs/2102.06171
Official Deepmind JAX code: https://github.com/deepmind/deepmind-research/tree/master/nfnets
Status:
* These models are a work in progress, experiments ongoing.
* Pretrained weights for two models so far, more to come.
* Model details updated to closer match official JAX code now that it's released
* NF-ResNet, NF-RegNet-B, and NFNet-F models supported
Hacked together by / copyright Ross Wightman, 2021.
"""
from collections import OrderedDict
from dataclasses import dataclass, replace
from functools import partial
from typing import Callable, Tuple, Optional
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import ClassifierHead, DropPath, AvgPool2dSame, ScaledStdConv2d, ScaledStdConv2dSame, \
get_act_layer, get_act_fn, get_attn, make_divisible
from ._builder import build_model_with_cfg
from ._features_fx import register_notrace_module
from ._manipulate import checkpoint_seq
from ._registry import generate_default_cfgs, register_model
__all__ = ['NormFreeNet', 'NfCfg'] # model_registry will add each entrypoint fn to this
@dataclass
class NfCfg:
depths: Tuple[int, int, int, int]
channels: Tuple[int, int, int, int]
alpha: float = 0.2
stem_type: str = '3x3'
stem_chs: Optional[int] = None
group_size: Optional[int] = None
attn_layer: Optional[str] = None
attn_kwargs: dict = None
attn_gain: float = 2.0 # NF correction gain to apply if attn layer is used
width_factor: float = 1.0
bottle_ratio: float = 0.5
num_features: int = 0 # num out_channels for final conv, no final_conv if 0
ch_div: int = 8 # round channels % 8 == 0 to keep tensor-core use optimal
reg: bool = False # enables EfficientNet-like options used in RegNet variants, expand from in_chs, se in middle
extra_conv: bool = False # extra 3x3 bottleneck convolution for NFNet models
gamma_in_act: bool = False
same_padding: bool = False
std_conv_eps: float = 1e-5
skipinit: bool = False # disabled by default, non-trivial performance impact
zero_init_fc: bool = False
act_layer: str = 'silu'
class GammaAct(nn.Module):
def __init__(self, act_type='relu', gamma: float = 1.0, inplace=False):
super().__init__()
self.act_fn = get_act_fn(act_type)
self.gamma = gamma
self.inplace = inplace
def forward(self, x):
return self.act_fn(x, inplace=self.inplace).mul_(self.gamma)
def act_with_gamma(act_type, gamma: float = 1.):
def _create(inplace=False):
return GammaAct(act_type, gamma=gamma, inplace=inplace)
return _create
class DownsampleAvg(nn.Module):
def __init__(
self,
in_chs: int,
out_chs: int,
stride: int = 1,
dilation: int = 1,
first_dilation: Optional[int] = None,
conv_layer: Callable = ScaledStdConv2d,
):
""" AvgPool Downsampling as in 'D' ResNet variants. Support for dilation."""
super(DownsampleAvg, self).__init__()
avg_stride = stride if dilation == 1 else 1
if stride > 1 or dilation > 1:
avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d
self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False)
else:
self.pool = nn.Identity()
self.conv = conv_layer(in_chs, out_chs, 1, stride=1)
def forward(self, x):
return self.conv(self.pool(x))
@register_notrace_module # reason: mul_ causes FX to drop a relevant node. https://github.com/pytorch/pytorch/issues/68301
class NormFreeBlock(nn.Module):
"""Normalization-Free pre-activation block.
"""
def __init__(
self,
in_chs: int,
out_chs: Optional[int] = None,
stride: int = 1,
dilation: int = 1,
first_dilation: Optional[int] = None,
alpha: float = 1.0,
beta: float = 1.0,
bottle_ratio: float = 0.25,
group_size: Optional[int] = None,
ch_div: int = 1,
reg: bool = True,
extra_conv: bool = False,
skipinit: bool = False,
attn_layer: Optional[Callable] = None,
attn_gain: bool = 2.0,
act_layer: Optional[Callable] = None,
conv_layer: Callable = ScaledStdConv2d,
drop_path_rate: float = 0.,
):
super().__init__()
first_dilation = first_dilation or dilation
out_chs = out_chs or in_chs
# RegNet variants scale bottleneck from in_chs, otherwise scale from out_chs like ResNet
mid_chs = make_divisible(in_chs * bottle_ratio if reg else out_chs * bottle_ratio, ch_div)
groups = 1 if not group_size else mid_chs // group_size
if group_size and group_size % ch_div == 0:
mid_chs = group_size * groups # correct mid_chs if group_size divisible by ch_div, otherwise error
self.alpha = alpha
self.beta = beta
self.attn_gain = attn_gain
if in_chs != out_chs or stride != 1 or dilation != first_dilation:
self.downsample = DownsampleAvg(
in_chs,
out_chs,
stride=stride,
dilation=dilation,
first_dilation=first_dilation,
conv_layer=conv_layer,
)
else:
self.downsample = None
self.act1 = act_layer()
self.conv1 = conv_layer(in_chs, mid_chs, 1)
self.act2 = act_layer(inplace=True)
self.conv2 = conv_layer(mid_chs, mid_chs, 3, stride=stride, dilation=first_dilation, groups=groups)
if extra_conv:
self.act2b = act_layer(inplace=True)
self.conv2b = conv_layer(mid_chs, mid_chs, 3, stride=1, dilation=dilation, groups=groups)
else:
self.act2b = None
self.conv2b = None
if reg and attn_layer is not None:
self.attn = attn_layer(mid_chs) # RegNet blocks apply attn btw conv2 & 3
else:
self.attn = None
self.act3 = act_layer()
self.conv3 = conv_layer(mid_chs, out_chs, 1, gain_init=1. if skipinit else 0.)
if not reg and attn_layer is not None:
self.attn_last = attn_layer(out_chs) # ResNet blocks apply attn after conv3
else:
self.attn_last = None
self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity()
self.skipinit_gain = nn.Parameter(torch.tensor(0.)) if skipinit else None
def forward(self, x):
out = self.act1(x) * self.beta
# shortcut branch
shortcut = x
if self.downsample is not None:
shortcut = self.downsample(out)
# residual branch
out = self.conv1(out)
out = self.conv2(self.act2(out))
if self.conv2b is not None:
out = self.conv2b(self.act2b(out))
if self.attn is not None:
out = self.attn_gain * self.attn(out)
out = self.conv3(self.act3(out))
if self.attn_last is not None:
out = self.attn_gain * self.attn_last(out)
out = self.drop_path(out)
if self.skipinit_gain is not None:
out.mul_(self.skipinit_gain)
out = out * self.alpha + shortcut
return out
def create_stem(
in_chs: int,
out_chs: int,
stem_type: str = '',
conv_layer: Optional[Callable] = None,
act_layer: Optional[Callable] = None,
preact_feature: bool = True,
):
stem_stride = 2
stem_feature = dict(num_chs=out_chs, reduction=2, module='stem.conv')
stem = OrderedDict()
assert stem_type in ('', 'deep', 'deep_tiered', 'deep_quad', '3x3', '7x7', 'deep_pool', '3x3_pool', '7x7_pool')
if 'deep' in stem_type:
if 'quad' in stem_type:
# 4 deep conv stack as in NFNet-F models
assert not 'pool' in stem_type
stem_chs = (out_chs // 8, out_chs // 4, out_chs // 2, out_chs)
strides = (2, 1, 1, 2)
stem_stride = 4
stem_feature = dict(num_chs=out_chs // 2, reduction=2, module='stem.conv3')
else:
if 'tiered' in stem_type:
stem_chs = (3 * out_chs // 8, out_chs // 2, out_chs) # 'T' resnets in resnet.py
else:
stem_chs = (out_chs // 2, out_chs // 2, out_chs) # 'D' ResNets
strides = (2, 1, 1)
stem_feature = dict(num_chs=out_chs // 2, reduction=2, module='stem.conv2')
last_idx = len(stem_chs) - 1
for i, (c, s) in enumerate(zip(stem_chs, strides)):
stem[f'conv{i + 1}'] = conv_layer(in_chs, c, kernel_size=3, stride=s)
if i != last_idx:
stem[f'act{i + 2}'] = act_layer(inplace=True)
in_chs = c
elif '3x3' in stem_type:
# 3x3 stem conv as in RegNet
stem['conv'] = conv_layer(in_chs, out_chs, kernel_size=3, stride=2)
else:
# 7x7 stem conv as in ResNet
stem['conv'] = conv_layer(in_chs, out_chs, kernel_size=7, stride=2)
if 'pool' in stem_type:
stem['pool'] = nn.MaxPool2d(3, stride=2, padding=1)
stem_stride = 4
return nn.Sequential(stem), stem_stride, stem_feature
# from https://github.com/deepmind/deepmind-research/tree/master/nfnets
_nonlin_gamma = dict(
identity=1.0,
celu=1.270926833152771,
elu=1.2716004848480225,
gelu=1.7015043497085571,
leaky_relu=1.70590341091156,
log_sigmoid=1.9193484783172607,
log_softmax=1.0002083778381348,
relu=1.7139588594436646,
relu6=1.7131484746932983,
selu=1.0008515119552612,
sigmoid=4.803835391998291,
silu=1.7881293296813965,
softsign=2.338853120803833,
softplus=1.9203323125839233,
tanh=1.5939117670059204,
)
class NormFreeNet(nn.Module):
""" Normalization-Free Network
As described in :
`Characterizing signal propagation to close the performance gap in unnormalized ResNets`
- https://arxiv.org/abs/2101.08692
and
`High-Performance Large-Scale Image Recognition Without Normalization` - https://arxiv.org/abs/2102.06171
This model aims to cover both the NFRegNet-Bx models as detailed in the paper's code snippets and
the (preact) ResNet models described earlier in the paper.
There are a few differences:
* channels are rounded to be divisible by 8 by default (keep tensor core kernels happy),
this changes channel dim and param counts slightly from the paper models
* activation correcting gamma constants are moved into the ScaledStdConv as it has less performance
impact in PyTorch when done with the weight scaling there. This likely wasn't a concern in the JAX impl.
* a config option `gamma_in_act` can be enabled to not apply gamma in StdConv as described above, but
apply it in each activation. This is slightly slower, numerically different, but matches official impl.
* skipinit is disabled by default, it seems to have a rather drastic impact on GPU memory use and throughput
for what it is/does. Approx 8-10% throughput loss.
"""
def __init__(
self,
cfg: NfCfg,
num_classes: int = 1000,
in_chans: int = 3,
global_pool: str = 'avg',
output_stride: int = 32,
drop_rate: float = 0.,
drop_path_rate: float = 0.,
**kwargs,
):
"""
Args:
cfg: Model architecture configuration.
num_classes: Number of classifier classes.
in_chans: Number of input channels.
global_pool: Global pooling type.
output_stride: Output stride of network, one of (8, 16, 32).
drop_rate: Dropout rate.
drop_path_rate: Stochastic depth drop-path rate.
**kwargs: Extra kwargs overlayed onto cfg.
"""
super().__init__()
self.num_classes = num_classes
self.drop_rate = drop_rate
self.grad_checkpointing = False
cfg = replace(cfg, **kwargs)
assert cfg.act_layer in _nonlin_gamma, f"Please add non-linearity constants for activation ({cfg.act_layer})."
conv_layer = ScaledStdConv2dSame if cfg.same_padding else ScaledStdConv2d
if cfg.gamma_in_act:
act_layer = act_with_gamma(cfg.act_layer, gamma=_nonlin_gamma[cfg.act_layer])
conv_layer = partial(conv_layer, eps=cfg.std_conv_eps)
else:
act_layer = get_act_layer(cfg.act_layer)
conv_layer = partial(conv_layer, gamma=_nonlin_gamma[cfg.act_layer], eps=cfg.std_conv_eps)
attn_layer = partial(get_attn(cfg.attn_layer), **cfg.attn_kwargs) if cfg.attn_layer else None
stem_chs = make_divisible((cfg.stem_chs or cfg.channels[0]) * cfg.width_factor, cfg.ch_div)
self.stem, stem_stride, stem_feat = create_stem(
in_chans,
stem_chs,
cfg.stem_type,
conv_layer=conv_layer,
act_layer=act_layer,
)
self.feature_info = [stem_feat]
drop_path_rates = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(cfg.depths)).split(cfg.depths)]
prev_chs = stem_chs
net_stride = stem_stride
dilation = 1
expected_var = 1.0
stages = []
for stage_idx, stage_depth in enumerate(cfg.depths):
stride = 1 if stage_idx == 0 and stem_stride > 2 else 2
if net_stride >= output_stride and stride > 1:
dilation *= stride
stride = 1
net_stride *= stride
first_dilation = 1 if dilation in (1, 2) else 2
blocks = []
for block_idx in range(cfg.depths[stage_idx]):
first_block = block_idx == 0 and stage_idx == 0
out_chs = make_divisible(cfg.channels[stage_idx] * cfg.width_factor, cfg.ch_div)
blocks += [NormFreeBlock(
in_chs=prev_chs, out_chs=out_chs,
alpha=cfg.alpha,
beta=1. / expected_var ** 0.5,
stride=stride if block_idx == 0 else 1,
dilation=dilation,
first_dilation=first_dilation,
group_size=cfg.group_size,
bottle_ratio=1. if cfg.reg and first_block else cfg.bottle_ratio,
ch_div=cfg.ch_div,
reg=cfg.reg,
extra_conv=cfg.extra_conv,
skipinit=cfg.skipinit,
attn_layer=attn_layer,
attn_gain=cfg.attn_gain,
act_layer=act_layer,
conv_layer=conv_layer,
drop_path_rate=drop_path_rates[stage_idx][block_idx],
)]
if block_idx == 0:
expected_var = 1. # expected var is reset after first block of each stage
expected_var += cfg.alpha ** 2 # Even if reset occurs, increment expected variance
first_dilation = dilation
prev_chs = out_chs
self.feature_info += [dict(num_chs=prev_chs, reduction=net_stride, module=f'stages.{stage_idx}')]
stages += [nn.Sequential(*blocks)]
self.stages = nn.Sequential(*stages)
if cfg.num_features:
# The paper NFRegNet models have an EfficientNet-like final head convolution.
self.num_features = make_divisible(cfg.width_factor * cfg.num_features, cfg.ch_div)
self.final_conv = conv_layer(prev_chs, self.num_features, 1)
self.feature_info[-1] = dict(num_chs=self.num_features, reduction=net_stride, module=f'final_conv')
else:
self.num_features = prev_chs
self.final_conv = nn.Identity()
self.final_act = act_layer(inplace=cfg.num_features > 0)
self.head_hidden_size = self.num_features
self.head = ClassifierHead(
self.num_features,
num_classes,
pool_type=global_pool,
drop_rate=self.drop_rate,
)
for n, m in self.named_modules():
if 'fc' in n and isinstance(m, nn.Linear):
if cfg.zero_init_fc:
nn.init.zeros_(m.weight)
else:
nn.init.normal_(m.weight, 0., .01)
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='linear')
if m.bias is not None:
nn.init.zeros_(m.bias)
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^stem',
blocks=[
(r'^stages\.(\d+)' if coarse else r'^stages\.(\d+)\.(\d+)', None),
(r'^final_conv', (99999,))
]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head.fc
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
self.head.reset(num_classes, global_pool)
def forward_features(self, x):
x = self.stem(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.stages, x)
else:
x = self.stages(x)
x = self.final_conv(x)
x = self.final_act(x)
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _nfres_cfg(
depths,
channels=(256, 512, 1024, 2048),
group_size=None,
act_layer='relu',
attn_layer=None,
attn_kwargs=None,
):
attn_kwargs = attn_kwargs or {}
cfg = NfCfg(
depths=depths,
channels=channels,
stem_type='7x7_pool',
stem_chs=64,
bottle_ratio=0.25,
group_size=group_size,
act_layer=act_layer,
attn_layer=attn_layer,
attn_kwargs=attn_kwargs,
)
return cfg
def _nfreg_cfg(depths, channels=(48, 104, 208, 440)):
num_features = 1280 * channels[-1] // 440
attn_kwargs = dict(rd_ratio=0.5)
cfg = NfCfg(
depths=depths,
channels=channels,
stem_type='3x3',
group_size=8,
width_factor=0.75,
bottle_ratio=2.25,
num_features=num_features,
reg=True,
attn_layer='se',
attn_kwargs=attn_kwargs,
)
return cfg
def _nfnet_cfg(
depths,
channels=(256, 512, 1536, 1536),
group_size=128,
bottle_ratio=0.5,
feat_mult=2.,
act_layer='gelu',
attn_layer='se',
attn_kwargs=None,
):
num_features = int(channels[-1] * feat_mult)
attn_kwargs = attn_kwargs if attn_kwargs is not None else dict(rd_ratio=0.5)
cfg = NfCfg(
depths=depths,
channels=channels,
stem_type='deep_quad',
stem_chs=128,
group_size=group_size,
bottle_ratio=bottle_ratio,
extra_conv=True,
num_features=num_features,
act_layer=act_layer,
attn_layer=attn_layer,
attn_kwargs=attn_kwargs,
)
return cfg
def _dm_nfnet_cfg(
depths,
channels=(256, 512, 1536, 1536),
act_layer='gelu',
skipinit=True,
):
cfg = NfCfg(
depths=depths,
channels=channels,
stem_type='deep_quad',
stem_chs=128,
group_size=128,
bottle_ratio=0.5,
extra_conv=True,
gamma_in_act=True,
same_padding=True,
skipinit=skipinit,
num_features=int(channels[-1] * 2.0),
act_layer=act_layer,
attn_layer='se',
attn_kwargs=dict(rd_ratio=0.5),
)
return cfg
model_cfgs = dict(
# NFNet-F models w/ GELU compatible with DeepMind weights
dm_nfnet_f0=_dm_nfnet_cfg(depths=(1, 2, 6, 3)),
dm_nfnet_f1=_dm_nfnet_cfg(depths=(2, 4, 12, 6)),
dm_nfnet_f2=_dm_nfnet_cfg(depths=(3, 6, 18, 9)),
dm_nfnet_f3=_dm_nfnet_cfg(depths=(4, 8, 24, 12)),
dm_nfnet_f4=_dm_nfnet_cfg(depths=(5, 10, 30, 15)),
dm_nfnet_f5=_dm_nfnet_cfg(depths=(6, 12, 36, 18)),
dm_nfnet_f6=_dm_nfnet_cfg(depths=(7, 14, 42, 21)),
# NFNet-F models w/ GELU
nfnet_f0=_nfnet_cfg(depths=(1, 2, 6, 3)),
nfnet_f1=_nfnet_cfg(depths=(2, 4, 12, 6)),
nfnet_f2=_nfnet_cfg(depths=(3, 6, 18, 9)),
nfnet_f3=_nfnet_cfg(depths=(4, 8, 24, 12)),
nfnet_f4=_nfnet_cfg(depths=(5, 10, 30, 15)),
nfnet_f5=_nfnet_cfg(depths=(6, 12, 36, 18)),
nfnet_f6=_nfnet_cfg(depths=(7, 14, 42, 21)),
nfnet_f7=_nfnet_cfg(depths=(8, 16, 48, 24)),
# Experimental 'light' versions of NFNet-F that are little leaner, w/ SiLU act
nfnet_l0=_nfnet_cfg(
depths=(1, 2, 6, 3), feat_mult=1.5, group_size=64, bottle_ratio=0.25,
attn_kwargs=dict(rd_ratio=0.25, rd_divisor=8), act_layer='silu'),
eca_nfnet_l0=_nfnet_cfg(
depths=(1, 2, 6, 3), feat_mult=1.5, group_size=64, bottle_ratio=0.25,
attn_layer='eca', attn_kwargs=dict(), act_layer='silu'),
eca_nfnet_l1=_nfnet_cfg(
depths=(2, 4, 12, 6), feat_mult=2, group_size=64, bottle_ratio=0.25,
attn_layer='eca', attn_kwargs=dict(), act_layer='silu'),
eca_nfnet_l2=_nfnet_cfg(
depths=(3, 6, 18, 9), feat_mult=2, group_size=64, bottle_ratio=0.25,
attn_layer='eca', attn_kwargs=dict(), act_layer='silu'),
eca_nfnet_l3=_nfnet_cfg(
depths=(4, 8, 24, 12), feat_mult=2, group_size=64, bottle_ratio=0.25,
attn_layer='eca', attn_kwargs=dict(), act_layer='silu'),
# EffNet influenced RegNet defs.
# NOTE: These aren't quite the official ver, ch_div=1 must be set for exact ch counts. I round to ch_div=8.
nf_regnet_b0=_nfreg_cfg(depths=(1, 3, 6, 6)),
nf_regnet_b1=_nfreg_cfg(depths=(2, 4, 7, 7)),
nf_regnet_b2=_nfreg_cfg(depths=(2, 4, 8, 8), channels=(56, 112, 232, 488)),
nf_regnet_b3=_nfreg_cfg(depths=(2, 5, 9, 9), channels=(56, 128, 248, 528)),
nf_regnet_b4=_nfreg_cfg(depths=(2, 6, 11, 11), channels=(64, 144, 288, 616)),
nf_regnet_b5=_nfreg_cfg(depths=(3, 7, 14, 14), channels=(80, 168, 336, 704)),
# ResNet (preact, D style deep stem/avg down) defs
nf_resnet26=_nfres_cfg(depths=(2, 2, 2, 2)),
nf_resnet50=_nfres_cfg(depths=(3, 4, 6, 3)),
nf_resnet101=_nfres_cfg(depths=(3, 4, 23, 3)),
nf_seresnet26=_nfres_cfg(depths=(2, 2, 2, 2), attn_layer='se', attn_kwargs=dict(rd_ratio=1/16)),
nf_seresnet50=_nfres_cfg(depths=(3, 4, 6, 3), attn_layer='se', attn_kwargs=dict(rd_ratio=1/16)),
nf_seresnet101=_nfres_cfg(depths=(3, 4, 23, 3), attn_layer='se', attn_kwargs=dict(rd_ratio=1/16)),
nf_ecaresnet26=_nfres_cfg(depths=(2, 2, 2, 2), attn_layer='eca', attn_kwargs=dict()),
nf_ecaresnet50=_nfres_cfg(depths=(3, 4, 6, 3), attn_layer='eca', attn_kwargs=dict()),
nf_ecaresnet101=_nfres_cfg(depths=(3, 4, 23, 3), attn_layer='eca', attn_kwargs=dict()),
test_nfnet=_nfnet_cfg(
depths=(1, 1, 1, 1), channels=(32, 64, 96, 128), feat_mult=1.5, group_size=8, bottle_ratio=0.25,
attn_kwargs=dict(rd_ratio=0.25, rd_divisor=8), act_layer='silu'),
)
def _create_normfreenet(variant, pretrained=False, **kwargs):
model_cfg = model_cfgs[variant]
feature_cfg = dict(flatten_sequential=True)
return build_model_with_cfg(
NormFreeNet,
variant,
pretrained,
model_cfg=model_cfg,
feature_cfg=feature_cfg,
**kwargs,
)
def _dcfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.9, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.conv1', 'classifier': 'head.fc',
**kwargs
}
default_cfgs = generate_default_cfgs({
'dm_nfnet_f0.dm_in1k': _dcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f0-604f9c3a.pth',
pool_size=(6, 6), input_size=(3, 192, 192), test_input_size=(3, 256, 256), crop_pct=.9, crop_mode='squash'),
'dm_nfnet_f1.dm_in1k': _dcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f1-fc540f82.pth',
pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 320, 320), crop_pct=0.91, crop_mode='squash'),
'dm_nfnet_f2.dm_in1k': _dcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f2-89875923.pth',
pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 352, 352), crop_pct=0.92, crop_mode='squash'),
'dm_nfnet_f3.dm_in1k': _dcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f3-d74ab3aa.pth',
pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 416, 416), crop_pct=0.94, crop_mode='squash'),
'dm_nfnet_f4.dm_in1k': _dcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f4-0ac5b10b.pth',
pool_size=(12, 12), input_size=(3, 384, 384), test_input_size=(3, 512, 512), crop_pct=0.951, crop_mode='squash'),
'dm_nfnet_f5.dm_in1k': _dcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f5-ecb20ab1.pth',
pool_size=(13, 13), input_size=(3, 416, 416), test_input_size=(3, 544, 544), crop_pct=0.954, crop_mode='squash'),
'dm_nfnet_f6.dm_in1k': _dcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-dnf-weights/dm_nfnet_f6-e0f12116.pth',
pool_size=(14, 14), input_size=(3, 448, 448), test_input_size=(3, 576, 576), crop_pct=0.956, crop_mode='squash'),
'nfnet_f0': _dcfg(
url='', pool_size=(6, 6), input_size=(3, 192, 192), test_input_size=(3, 256, 256)),
'nfnet_f1': _dcfg(
url='', pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 320, 320)),
'nfnet_f2': _dcfg(
url='', pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 352, 352)),
'nfnet_f3': _dcfg(
url='', pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 416, 416)),
'nfnet_f4': _dcfg(
url='', pool_size=(12, 12), input_size=(3, 384, 384), test_input_size=(3, 512, 512)),
'nfnet_f5': _dcfg(
url='', pool_size=(13, 13), input_size=(3, 416, 416), test_input_size=(3, 544, 544)),
'nfnet_f6': _dcfg(
url='', pool_size=(14, 14), input_size=(3, 448, 448), test_input_size=(3, 576, 576)),
'nfnet_f7': _dcfg(
url='', pool_size=(15, 15), input_size=(3, 480, 480), test_input_size=(3, 608, 608)),
'nfnet_l0.ra2_in1k': _dcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nfnet_l0_ra2-45c6688d.pth',
pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 288, 288), test_crop_pct=1.0),
'eca_nfnet_l0.ra2_in1k': _dcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecanfnet_l0_ra2-e3e9ac50.pth',
pool_size=(7, 7), input_size=(3, 224, 224), test_input_size=(3, 288, 288), test_crop_pct=1.0),
'eca_nfnet_l1.ra2_in1k': _dcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecanfnet_l1_ra2-7dce93cd.pth',
pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 320, 320), test_crop_pct=1.0),
'eca_nfnet_l2.ra3_in1k': _dcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ecanfnet_l2_ra3-da781a61.pth',
pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 384, 384), test_crop_pct=1.0),
'eca_nfnet_l3': _dcfg(
url='',
pool_size=(11, 11), input_size=(3, 352, 352), test_input_size=(3, 448, 448), test_crop_pct=1.0),
'nf_regnet_b0': _dcfg(
url='', pool_size=(6, 6), input_size=(3, 192, 192), test_input_size=(3, 256, 256), first_conv='stem.conv'),
'nf_regnet_b1.ra2_in1k': _dcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nf_regnet_b1_256_ra2-ad85cfef.pth',
pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 288, 288), first_conv='stem.conv'), # NOT to paper spec
'nf_regnet_b2': _dcfg(
url='', pool_size=(8, 8), input_size=(3, 240, 240), test_input_size=(3, 272, 272), first_conv='stem.conv'),
'nf_regnet_b3': _dcfg(
url='', pool_size=(9, 9), input_size=(3, 288, 288), test_input_size=(3, 320, 320), first_conv='stem.conv'),
'nf_regnet_b4': _dcfg(
url='', pool_size=(10, 10), input_size=(3, 320, 320), test_input_size=(3, 384, 384), first_conv='stem.conv'),
'nf_regnet_b5': _dcfg(
url='', pool_size=(12, 12), input_size=(3, 384, 384), test_input_size=(3, 456, 456), first_conv='stem.conv'),
'nf_resnet26': _dcfg(url='', first_conv='stem.conv'),
'nf_resnet50.ra2_in1k': _dcfg(
hf_hub_id='timm/',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/nf_resnet50_ra2-9f236009.pth',
pool_size=(8, 8), input_size=(3, 256, 256), test_input_size=(3, 288, 288), crop_pct=0.94, first_conv='stem.conv'),
'nf_resnet101': _dcfg(url='', first_conv='stem.conv'),
'nf_seresnet26': _dcfg(url='', first_conv='stem.conv'),
'nf_seresnet50': _dcfg(url='', first_conv='stem.conv'),
'nf_seresnet101': _dcfg(url='', first_conv='stem.conv'),
'nf_ecaresnet26': _dcfg(url='', first_conv='stem.conv'),
'nf_ecaresnet50': _dcfg(url='', first_conv='stem.conv'),
'nf_ecaresnet101': _dcfg(url='', first_conv='stem.conv'),
'test_nfnet.r160_in1k': _dcfg(
hf_hub_id='timm/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5),
crop_pct=0.95, input_size=(3, 160, 160), pool_size=(5, 5)),
})
@register_model
def dm_nfnet_f0(pretrained=False, **kwargs) -> NormFreeNet:
""" NFNet-F0 (DeepMind weight compatible)
`High-Performance Large-Scale Image Recognition Without Normalization`
- https://arxiv.org/abs/2102.06171
"""
return _create_normfreenet('dm_nfnet_f0', pretrained=pretrained, **kwargs)
@register_model
def dm_nfnet_f1(pretrained=False, **kwargs) -> NormFreeNet:
""" NFNet-F1 (DeepMind weight compatible)
`High-Performance Large-Scale Image Recognition Without Normalization`
- https://arxiv.org/abs/2102.06171
"""
return _create_normfreenet('dm_nfnet_f1', pretrained=pretrained, **kwargs)
@register_model
def dm_nfnet_f2(pretrained=False, **kwargs) -> NormFreeNet:
""" NFNet-F2 (DeepMind weight compatible)
`High-Performance Large-Scale Image Recognition Without Normalization`
- https://arxiv.org/abs/2102.06171
"""
return _create_normfreenet('dm_nfnet_f2', pretrained=pretrained, **kwargs)
@register_model
def dm_nfnet_f3(pretrained=False, **kwargs) -> NormFreeNet:
""" NFNet-F3 (DeepMind weight compatible)
`High-Performance Large-Scale Image Recognition Without Normalization`
- https://arxiv.org/abs/2102.06171
"""
return _create_normfreenet('dm_nfnet_f3', pretrained=pretrained, **kwargs)
@register_model
def dm_nfnet_f4(pretrained=False, **kwargs) -> NormFreeNet:
""" NFNet-F4 (DeepMind weight compatible)
`High-Performance Large-Scale Image Recognition Without Normalization`
- https://arxiv.org/abs/2102.06171
"""
return _create_normfreenet('dm_nfnet_f4', pretrained=pretrained, **kwargs)
@register_model
def dm_nfnet_f5(pretrained=False, **kwargs) -> NormFreeNet:
""" NFNet-F5 (DeepMind weight compatible)
`High-Performance Large-Scale Image Recognition Without Normalization`
- https://arxiv.org/abs/2102.06171
"""
return _create_normfreenet('dm_nfnet_f5', pretrained=pretrained, **kwargs)
@register_model
def dm_nfnet_f6(pretrained=False, **kwargs) -> NormFreeNet:
""" NFNet-F6 (DeepMind weight compatible)
`High-Performance Large-Scale Image Recognition Without Normalization`
- https://arxiv.org/abs/2102.06171
"""
return _create_normfreenet('dm_nfnet_f6', pretrained=pretrained, **kwargs)
@register_model
def nfnet_f0(pretrained=False, **kwargs) -> NormFreeNet:
""" NFNet-F0
`High-Performance Large-Scale Image Recognition Without Normalization`
- https://arxiv.org/abs/2102.06171
"""
return _create_normfreenet('nfnet_f0', pretrained=pretrained, **kwargs)
@register_model
def nfnet_f1(pretrained=False, **kwargs) -> NormFreeNet:
""" NFNet-F1
`High-Performance Large-Scale Image Recognition Without Normalization`
- https://arxiv.org/abs/2102.06171
"""
return _create_normfreenet('nfnet_f1', pretrained=pretrained, **kwargs)
@register_model
def nfnet_f2(pretrained=False, **kwargs) -> NormFreeNet:
""" NFNet-F2
`High-Performance Large-Scale Image Recognition Without Normalization`
- https://arxiv.org/abs/2102.06171
"""
return _create_normfreenet('nfnet_f2', pretrained=pretrained, **kwargs)
@register_model
def nfnet_f3(pretrained=False, **kwargs) -> NormFreeNet:
""" NFNet-F3
`High-Performance Large-Scale Image Recognition Without Normalization`
- https://arxiv.org/abs/2102.06171
"""
return _create_normfreenet('nfnet_f3', pretrained=pretrained, **kwargs)
@register_model
def nfnet_f4(pretrained=False, **kwargs) -> NormFreeNet:
""" NFNet-F4
`High-Performance Large-Scale Image Recognition Without Normalization`
- https://arxiv.org/abs/2102.06171
"""
return _create_normfreenet('nfnet_f4', pretrained=pretrained, **kwargs)
@register_model
def nfnet_f5(pretrained=False, **kwargs) -> NormFreeNet:
""" NFNet-F5
`High-Performance Large-Scale Image Recognition Without Normalization`
- https://arxiv.org/abs/2102.06171
"""
return _create_normfreenet('nfnet_f5', pretrained=pretrained, **kwargs)
@register_model
def nfnet_f6(pretrained=False, **kwargs) -> NormFreeNet:
""" NFNet-F6
`High-Performance Large-Scale Image Recognition Without Normalization`
- https://arxiv.org/abs/2102.06171
"""
return _create_normfreenet('nfnet_f6', pretrained=pretrained, **kwargs)
@register_model
def nfnet_f7(pretrained=False, **kwargs) -> NormFreeNet:
""" NFNet-F7
`High-Performance Large-Scale Image Recognition Without Normalization`
- https://arxiv.org/abs/2102.06171
"""
return _create_normfreenet('nfnet_f7', pretrained=pretrained, **kwargs)
@register_model
def nfnet_l0(pretrained=False, **kwargs) -> NormFreeNet:
""" NFNet-L0b w/ SiLU
My experimental 'light' model w/ F0 repeats, 1.5x final_conv mult, 64 group_size, .25 bottleneck & SE ratio
"""
return _create_normfreenet('nfnet_l0', pretrained=pretrained, **kwargs)
@register_model
def eca_nfnet_l0(pretrained=False, **kwargs) -> NormFreeNet:
""" ECA-NFNet-L0 w/ SiLU
My experimental 'light' model w/ F0 repeats, 1.5x final_conv mult, 64 group_size, .25 bottleneck & ECA attn
"""
return _create_normfreenet('eca_nfnet_l0', pretrained=pretrained, **kwargs)
@register_model
def eca_nfnet_l1(pretrained=False, **kwargs) -> NormFreeNet:
""" ECA-NFNet-L1 w/ SiLU
My experimental 'light' model w/ F1 repeats, 2.0x final_conv mult, 64 group_size, .25 bottleneck & ECA attn
"""
return _create_normfreenet('eca_nfnet_l1', pretrained=pretrained, **kwargs)
@register_model
def eca_nfnet_l2(pretrained=False, **kwargs) -> NormFreeNet:
""" ECA-NFNet-L2 w/ SiLU
My experimental 'light' model w/ F2 repeats, 2.0x final_conv mult, 64 group_size, .25 bottleneck & ECA attn
"""
return _create_normfreenet('eca_nfnet_l2', pretrained=pretrained, **kwargs)
@register_model
def eca_nfnet_l3(pretrained=False, **kwargs) -> NormFreeNet:
""" ECA-NFNet-L3 w/ SiLU
My experimental 'light' model w/ F3 repeats, 2.0x final_conv mult, 64 group_size, .25 bottleneck & ECA attn
"""
return _create_normfreenet('eca_nfnet_l3', pretrained=pretrained, **kwargs)
@register_model
def nf_regnet_b0(pretrained=False, **kwargs) -> NormFreeNet:
""" Normalization-Free RegNet-B0
`Characterizing signal propagation to close the performance gap in unnormalized ResNets`
- https://arxiv.org/abs/2101.08692
"""
return _create_normfreenet('nf_regnet_b0', pretrained=pretrained, **kwargs)
@register_model
def nf_regnet_b1(pretrained=False, **kwargs) -> NormFreeNet:
""" Normalization-Free RegNet-B1
`Characterizing signal propagation to close the performance gap in unnormalized ResNets`
- https://arxiv.org/abs/2101.08692
"""
return _create_normfreenet('nf_regnet_b1', pretrained=pretrained, **kwargs)
@register_model
def nf_regnet_b2(pretrained=False, **kwargs) -> NormFreeNet:
""" Normalization-Free RegNet-B2
`Characterizing signal propagation to close the performance gap in unnormalized ResNets`
- https://arxiv.org/abs/2101.08692
"""
return _create_normfreenet('nf_regnet_b2', pretrained=pretrained, **kwargs)
@register_model
def nf_regnet_b3(pretrained=False, **kwargs) -> NormFreeNet:
""" Normalization-Free RegNet-B3
`Characterizing signal propagation to close the performance gap in unnormalized ResNets`
- https://arxiv.org/abs/2101.08692
"""
return _create_normfreenet('nf_regnet_b3', pretrained=pretrained, **kwargs)
@register_model
def nf_regnet_b4(pretrained=False, **kwargs) -> NormFreeNet:
""" Normalization-Free RegNet-B4
`Characterizing signal propagation to close the performance gap in unnormalized ResNets`
- https://arxiv.org/abs/2101.08692
"""
return _create_normfreenet('nf_regnet_b4', pretrained=pretrained, **kwargs)
@register_model
def nf_regnet_b5(pretrained=False, **kwargs) -> NormFreeNet:
""" Normalization-Free RegNet-B5
`Characterizing signal propagation to close the performance gap in unnormalized ResNets`
- https://arxiv.org/abs/2101.08692
"""
return _create_normfreenet('nf_regnet_b5', pretrained=pretrained, **kwargs)
@register_model
def nf_resnet26(pretrained=False, **kwargs) -> NormFreeNet:
""" Normalization-Free ResNet-26
`Characterizing signal propagation to close the performance gap in unnormalized ResNets`
- https://arxiv.org/abs/2101.08692
"""
return _create_normfreenet('nf_resnet26', pretrained=pretrained, **kwargs)
@register_model
def nf_resnet50(pretrained=False, **kwargs) -> NormFreeNet:
""" Normalization-Free ResNet-50
`Characterizing signal propagation to close the performance gap in unnormalized ResNets`
- https://arxiv.org/abs/2101.08692
"""
return _create_normfreenet('nf_resnet50', pretrained=pretrained, **kwargs)
@register_model
def nf_resnet101(pretrained=False, **kwargs) -> NormFreeNet:
""" Normalization-Free ResNet-101
`Characterizing signal propagation to close the performance gap in unnormalized ResNets`
- https://arxiv.org/abs/2101.08692
"""
return _create_normfreenet('nf_resnet101', pretrained=pretrained, **kwargs)
@register_model
def nf_seresnet26(pretrained=False, **kwargs) -> NormFreeNet:
""" Normalization-Free SE-ResNet26
"""
return _create_normfreenet('nf_seresnet26', pretrained=pretrained, **kwargs)
@register_model
def nf_seresnet50(pretrained=False, **kwargs) -> NormFreeNet:
""" Normalization-Free SE-ResNet50
"""
return _create_normfreenet('nf_seresnet50', pretrained=pretrained, **kwargs)
@register_model
def nf_seresnet101(pretrained=False, **kwargs) -> NormFreeNet:
""" Normalization-Free SE-ResNet101
"""
return _create_normfreenet('nf_seresnet101', pretrained=pretrained, **kwargs)
@register_model
def nf_ecaresnet26(pretrained=False, **kwargs) -> NormFreeNet:
""" Normalization-Free ECA-ResNet26
"""
return _create_normfreenet('nf_ecaresnet26', pretrained=pretrained, **kwargs)
@register_model
def nf_ecaresnet50(pretrained=False, **kwargs) -> NormFreeNet:
""" Normalization-Free ECA-ResNet50
"""
return _create_normfreenet('nf_ecaresnet50', pretrained=pretrained, **kwargs)
@register_model
def nf_ecaresnet101(pretrained=False, **kwargs) -> NormFreeNet:
""" Normalization-Free ECA-ResNet101
"""
return _create_normfreenet('nf_ecaresnet101', pretrained=pretrained, **kwargs)
@register_model
def test_nfnet(pretrained=False, **kwargs) -> NormFreeNet:
return _create_normfreenet('test_nfnet', pretrained=pretrained, **kwargs) | pytorch-image-models/timm/models/nfnet.py/0 | {
"file_path": "pytorch-image-models/timm/models/nfnet.py",
"repo_id": "pytorch-image-models",
"token_count": 19457
} |
""" Sequencer
Paper: `Sequencer: Deep LSTM for Image Classification` - https://arxiv.org/pdf/2205.01972.pdf
"""
# Copyright (c) 2022. Yuki Tatsunami
# Licensed under the Apache License, Version 2.0 (the "License");
import math
from functools import partial
from itertools import accumulate
from typing import Optional, Tuple
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, DEFAULT_CROP_PCT
from timm.layers import lecun_normal_, DropPath, Mlp, PatchEmbed, ClassifierHead
from ._builder import build_model_with_cfg
from ._manipulate import named_apply
from ._registry import register_model, generate_default_cfgs
__all__ = ['Sequencer2d'] # model_registry will add each entrypoint fn to this
def _init_weights(module: nn.Module, name: str, head_bias: float = 0., flax=False):
if isinstance(module, nn.Linear):
if name.startswith('head'):
nn.init.zeros_(module.weight)
nn.init.constant_(module.bias, head_bias)
else:
if flax:
# Flax defaults
lecun_normal_(module.weight)
if module.bias is not None:
nn.init.zeros_(module.bias)
else:
nn.init.xavier_uniform_(module.weight)
if module.bias is not None:
if 'mlp' in name:
nn.init.normal_(module.bias, std=1e-6)
else:
nn.init.zeros_(module.bias)
elif isinstance(module, nn.Conv2d):
lecun_normal_(module.weight)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif isinstance(module, (nn.LayerNorm, nn.BatchNorm2d, nn.GroupNorm)):
nn.init.ones_(module.weight)
nn.init.zeros_(module.bias)
elif isinstance(module, (nn.RNN, nn.GRU, nn.LSTM)):
stdv = 1.0 / math.sqrt(module.hidden_size)
for weight in module.parameters():
nn.init.uniform_(weight, -stdv, stdv)
elif hasattr(module, 'init_weights'):
module.init_weights()
class RNNIdentity(nn.Module):
def __init__(self, *args, **kwargs):
super(RNNIdentity, self).__init__()
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, None]:
return x, None
class RNN2dBase(nn.Module):
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int = 1,
bias: bool = True,
bidirectional: bool = True,
union="cat",
with_fc=True,
):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = 2 * hidden_size if bidirectional else hidden_size
self.union = union
self.with_vertical = True
self.with_horizontal = True
self.with_fc = with_fc
self.fc = None
if with_fc:
if union == "cat":
self.fc = nn.Linear(2 * self.output_size, input_size)
elif union == "add":
self.fc = nn.Linear(self.output_size, input_size)
elif union == "vertical":
self.fc = nn.Linear(self.output_size, input_size)
self.with_horizontal = False
elif union == "horizontal":
self.fc = nn.Linear(self.output_size, input_size)
self.with_vertical = False
else:
raise ValueError("Unrecognized union: " + union)
elif union == "cat":
pass
if 2 * self.output_size != input_size:
raise ValueError(f"The output channel {2 * self.output_size} is different from the input channel {input_size}.")
elif union == "add":
pass
if self.output_size != input_size:
raise ValueError(f"The output channel {self.output_size} is different from the input channel {input_size}.")
elif union == "vertical":
if self.output_size != input_size:
raise ValueError(f"The output channel {self.output_size} is different from the input channel {input_size}.")
self.with_horizontal = False
elif union == "horizontal":
if self.output_size != input_size:
raise ValueError(f"The output channel {self.output_size} is different from the input channel {input_size}.")
self.with_vertical = False
else:
raise ValueError("Unrecognized union: " + union)
self.rnn_v = RNNIdentity()
self.rnn_h = RNNIdentity()
def forward(self, x):
B, H, W, C = x.shape
if self.with_vertical:
v = x.permute(0, 2, 1, 3)
v = v.reshape(-1, H, C)
v, _ = self.rnn_v(v)
v = v.reshape(B, W, H, -1)
v = v.permute(0, 2, 1, 3)
else:
v = None
if self.with_horizontal:
h = x.reshape(-1, W, C)
h, _ = self.rnn_h(h)
h = h.reshape(B, H, W, -1)
else:
h = None
if v is not None and h is not None:
if self.union == "cat":
x = torch.cat([v, h], dim=-1)
else:
x = v + h
elif v is not None:
x = v
elif h is not None:
x = h
if self.fc is not None:
x = self.fc(x)
return x
class LSTM2d(RNN2dBase):
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int = 1,
bias: bool = True,
bidirectional: bool = True,
union="cat",
with_fc=True,
):
super().__init__(input_size, hidden_size, num_layers, bias, bidirectional, union, with_fc)
if self.with_vertical:
self.rnn_v = nn.LSTM(
input_size,
hidden_size,
num_layers,
batch_first=True,
bias=bias,
bidirectional=bidirectional,
)
if self.with_horizontal:
self.rnn_h = nn.LSTM(
input_size,
hidden_size,
num_layers,
batch_first=True,
bias=bias,
bidirectional=bidirectional,
)
class Sequencer2dBlock(nn.Module):
def __init__(
self,
dim,
hidden_size,
mlp_ratio=3.0,
rnn_layer=LSTM2d,
mlp_layer=Mlp,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
act_layer=nn.GELU,
num_layers=1,
bidirectional=True,
union="cat",
with_fc=True,
drop=0.,
drop_path=0.,
):
super().__init__()
channels_dim = int(mlp_ratio * dim)
self.norm1 = norm_layer(dim)
self.rnn_tokens = rnn_layer(
dim,
hidden_size,
num_layers=num_layers,
bidirectional=bidirectional,
union=union,
with_fc=with_fc,
)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp_channels = mlp_layer(dim, channels_dim, act_layer=act_layer, drop=drop)
def forward(self, x):
x = x + self.drop_path(self.rnn_tokens(self.norm1(x)))
x = x + self.drop_path(self.mlp_channels(self.norm2(x)))
return x
class Shuffle(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
if self.training:
B, H, W, C = x.shape
r = torch.randperm(H * W)
x = x.reshape(B, -1, C)
x = x[:, r, :].reshape(B, H, W, -1)
return x
class Downsample2d(nn.Module):
def __init__(self, input_dim, output_dim, patch_size):
super().__init__()
self.down = nn.Conv2d(input_dim, output_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
x = x.permute(0, 3, 1, 2)
x = self.down(x)
x = x.permute(0, 2, 3, 1)
return x
class Sequencer2dStage(nn.Module):
def __init__(
self,
dim,
dim_out,
depth,
patch_size,
hidden_size,
mlp_ratio,
downsample=False,
block_layer=Sequencer2dBlock,
rnn_layer=LSTM2d,
mlp_layer=Mlp,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
act_layer=nn.GELU,
num_layers=1,
bidirectional=True,
union="cat",
with_fc=True,
drop=0.,
drop_path=0.,
):
super().__init__()
if downsample:
self.downsample = Downsample2d(dim, dim_out, patch_size)
else:
assert dim == dim_out
self.downsample = nn.Identity()
blocks = []
for block_idx in range(depth):
blocks.append(block_layer(
dim_out,
hidden_size,
mlp_ratio=mlp_ratio,
rnn_layer=rnn_layer,
mlp_layer=mlp_layer,
norm_layer=norm_layer,
act_layer=act_layer,
num_layers=num_layers,
bidirectional=bidirectional,
union=union,
with_fc=with_fc,
drop=drop,
drop_path=drop_path[block_idx] if isinstance(drop_path, (list, tuple)) else drop_path,
))
self.blocks = nn.Sequential(*blocks)
def forward(self, x):
x = self.downsample(x)
x = self.blocks(x)
return x
class Sequencer2d(nn.Module):
def __init__(
self,
num_classes=1000,
img_size=224,
in_chans=3,
global_pool='avg',
layers=(4, 3, 8, 3),
patch_sizes=(7, 2, 2, 1),
embed_dims=(192, 384, 384, 384),
hidden_sizes=(48, 96, 96, 96),
mlp_ratios=(3.0, 3.0, 3.0, 3.0),
block_layer=Sequencer2dBlock,
rnn_layer=LSTM2d,
mlp_layer=Mlp,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
act_layer=nn.GELU,
num_rnn_layers=1,
bidirectional=True,
union="cat",
with_fc=True,
drop_rate=0.,
drop_path_rate=0.,
nlhb=False,
stem_norm=False,
):
super().__init__()
assert global_pool in ('', 'avg')
self.num_classes = num_classes
self.global_pool = global_pool
self.num_features = self.head_hidden_size = embed_dims[-1] # for consistency with other models
self.feature_dim = -1 # channel dim index for feature outputs (rank 4, NHWC)
self.output_fmt = 'NHWC'
self.feature_info = []
self.stem = PatchEmbed(
img_size=None,
patch_size=patch_sizes[0],
in_chans=in_chans,
embed_dim=embed_dims[0],
norm_layer=norm_layer if stem_norm else None,
flatten=False,
output_fmt='NHWC',
)
assert len(layers) == len(patch_sizes) == len(embed_dims) == len(hidden_sizes) == len(mlp_ratios)
reductions = list(accumulate(patch_sizes, lambda x, y: x * y))
stages = []
prev_dim = embed_dims[0]
for i, _ in enumerate(embed_dims):
stages += [Sequencer2dStage(
prev_dim,
embed_dims[i],
depth=layers[i],
downsample=i > 0,
patch_size=patch_sizes[i],
hidden_size=hidden_sizes[i],
mlp_ratio=mlp_ratios[i],
block_layer=block_layer,
rnn_layer=rnn_layer,
mlp_layer=mlp_layer,
norm_layer=norm_layer,
act_layer=act_layer,
num_layers=num_rnn_layers,
bidirectional=bidirectional,
union=union,
with_fc=with_fc,
drop=drop_rate,
drop_path=drop_path_rate,
)]
prev_dim = embed_dims[i]
self.feature_info += [dict(num_chs=prev_dim, reduction=reductions[i], module=f'stages.{i}')]
self.stages = nn.Sequential(*stages)
self.norm = norm_layer(embed_dims[-1])
self.head = ClassifierHead(
self.num_features,
num_classes,
pool_type=global_pool,
drop_rate=drop_rate,
input_fmt=self.output_fmt,
)
self.init_weights(nlhb=nlhb)
def init_weights(self, nlhb=False):
head_bias = -math.log(self.num_classes) if nlhb else 0.
named_apply(partial(_init_weights, head_bias=head_bias), module=self) # depth-first
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^stem',
blocks=[
(r'^stages\.(\d+)', None),
(r'^norm', (99999,))
] if coarse else [
(r'^stages\.(\d+)\.blocks\.(\d+)', None),
(r'^stages\.(\d+)\.downsample', (0,)),
(r'^norm', (99999,))
]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
assert not enable, 'gradient checkpointing not supported'
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
self.head.reset(num_classes, pool_type=global_pool)
def forward_features(self, x):
x = self.stem(x)
x = self.stages(x)
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=True) if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def checkpoint_filter_fn(state_dict, model):
""" Remap original checkpoints -> timm """
if 'stages.0.blocks.0.norm1.weight' in state_dict:
return state_dict # already translated checkpoint
if 'model' in state_dict:
state_dict = state_dict['model']
import re
out_dict = {}
for k, v in state_dict.items():
k = re.sub(r'blocks.([0-9]+).([0-9]+).down', lambda x: f'stages.{int(x.group(1)) + 1}.downsample.down', k)
k = re.sub(r'blocks.([0-9]+).([0-9]+)', r'stages.\1.blocks.\2', k)
k = k.replace('head.', 'head.fc.')
out_dict[k] = v
return out_dict
def _create_sequencer2d(variant, pretrained=False, **kwargs):
default_out_indices = tuple(range(3))
out_indices = kwargs.pop('out_indices', default_out_indices)
model = build_model_with_cfg(
Sequencer2d,
variant,
pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
**kwargs,
)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': DEFAULT_CROP_PCT, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.proj', 'classifier': 'head.fc',
**kwargs
}
default_cfgs = generate_default_cfgs({
'sequencer2d_s.in1k': _cfg(hf_hub_id='timm/'),
'sequencer2d_m.in1k': _cfg(hf_hub_id='timm/'),
'sequencer2d_l.in1k': _cfg(hf_hub_id='timm/'),
})
@register_model
def sequencer2d_s(pretrained=False, **kwargs) -> Sequencer2d:
model_args = dict(
layers=[4, 3, 8, 3],
patch_sizes=[7, 2, 1, 1],
embed_dims=[192, 384, 384, 384],
hidden_sizes=[48, 96, 96, 96],
mlp_ratios=[3.0, 3.0, 3.0, 3.0],
rnn_layer=LSTM2d,
bidirectional=True,
union="cat",
with_fc=True,
)
model = _create_sequencer2d('sequencer2d_s', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def sequencer2d_m(pretrained=False, **kwargs) -> Sequencer2d:
model_args = dict(
layers=[4, 3, 14, 3],
patch_sizes=[7, 2, 1, 1],
embed_dims=[192, 384, 384, 384],
hidden_sizes=[48, 96, 96, 96],
mlp_ratios=[3.0, 3.0, 3.0, 3.0],
rnn_layer=LSTM2d,
bidirectional=True,
union="cat",
with_fc=True,
**kwargs)
model = _create_sequencer2d('sequencer2d_m', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def sequencer2d_l(pretrained=False, **kwargs) -> Sequencer2d:
model_args = dict(
layers=[8, 8, 16, 4],
patch_sizes=[7, 2, 1, 1],
embed_dims=[192, 384, 384, 384],
hidden_sizes=[48, 96, 96, 96],
mlp_ratios=[3.0, 3.0, 3.0, 3.0],
rnn_layer=LSTM2d,
bidirectional=True,
union="cat",
with_fc=True,
**kwargs)
model = _create_sequencer2d('sequencer2d_l', pretrained=pretrained, **dict(model_args, **kwargs))
return model
| pytorch-image-models/timm/models/sequencer.py/0 | {
"file_path": "pytorch-image-models/timm/models/sequencer.py",
"repo_id": "pytorch-image-models",
"token_count": 9247
} |
""" Vision OutLOoker (VOLO) implementation
Paper: `VOLO: Vision Outlooker for Visual Recognition` - https://arxiv.org/abs/2106.13112
Code adapted from official impl at https://github.com/sail-sg/volo, original copyright in comment below
Modifications and additions for timm by / Copyright 2022, Ross Wightman
"""
# Copyright 2021 Sea Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import DropPath, Mlp, to_2tuple, to_ntuple, trunc_normal_, use_fused_attn
from ._builder import build_model_with_cfg
from ._features import feature_take_indices
from ._manipulate import checkpoint
from ._registry import register_model, generate_default_cfgs
__all__ = ['VOLO'] # model_registry will add each entrypoint fn to this
class OutlookAttention(nn.Module):
def __init__(
self,
dim,
num_heads,
kernel_size=3,
padding=1,
stride=1,
qkv_bias=False,
attn_drop=0.,
proj_drop=0.,
):
super().__init__()
head_dim = dim // num_heads
self.num_heads = num_heads
self.kernel_size = kernel_size
self.padding = padding
self.stride = stride
self.scale = head_dim ** -0.5
self.v = nn.Linear(dim, dim, bias=qkv_bias)
self.attn = nn.Linear(dim, kernel_size ** 4 * num_heads)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.unfold = nn.Unfold(kernel_size=kernel_size, padding=padding, stride=stride)
self.pool = nn.AvgPool2d(kernel_size=stride, stride=stride, ceil_mode=True)
def forward(self, x):
B, H, W, C = x.shape
v = self.v(x).permute(0, 3, 1, 2) # B, C, H, W
h, w = math.ceil(H / self.stride), math.ceil(W / self.stride)
v = self.unfold(v).reshape(
B, self.num_heads, C // self.num_heads,
self.kernel_size * self.kernel_size, h * w).permute(0, 1, 4, 3, 2) # B,H,N,kxk,C/H
attn = self.pool(x.permute(0, 3, 1, 2)).permute(0, 2, 3, 1)
attn = self.attn(attn).reshape(
B, h * w, self.num_heads, self.kernel_size * self.kernel_size,
self.kernel_size * self.kernel_size).permute(0, 2, 1, 3, 4) # B,H,N,kxk,kxk
attn = attn * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).permute(0, 1, 4, 3, 2).reshape(B, C * self.kernel_size * self.kernel_size, h * w)
x = F.fold(x, output_size=(H, W), kernel_size=self.kernel_size, padding=self.padding, stride=self.stride)
x = self.proj(x.permute(0, 2, 3, 1))
x = self.proj_drop(x)
return x
class Outlooker(nn.Module):
def __init__(
self,
dim,
kernel_size,
padding,
stride=1,
num_heads=1,
mlp_ratio=3.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
qkv_bias=False,
):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = OutlookAttention(
dim,
num_heads,
kernel_size=kernel_size,
padding=padding,
stride=stride,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
)
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = Mlp(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
)
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
x = x + self.drop_path1(self.attn(self.norm1(x)))
x = x + self.drop_path2(self.mlp(self.norm2(x)))
return x
class Attention(nn.Module):
fused_attn: torch.jit.Final[bool]
def __init__(
self,
dim,
num_heads=8,
qkv_bias=False,
attn_drop=0.,
proj_drop=0.,
):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.fused_attn = use_fused_attn()
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, H, W, C = x.shape
qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0)
if self.fused_attn:
x = F.scaled_dot_product_attention(
q, k, v,
dropout_p=self.attn_drop.p if self.training else 0.,
)
else:
q = q * self.scale
attn = q @ k.transpose(-2, -1)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, H, W, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Transformer(nn.Module):
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.,
qkv_bias=False,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop)
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer)
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
x = x + self.drop_path1(self.attn(self.norm1(x)))
x = x + self.drop_path2(self.mlp(self.norm2(x)))
return x
class ClassAttention(nn.Module):
def __init__(
self,
dim,
num_heads=8,
head_dim=None,
qkv_bias=False,
attn_drop=0.,
proj_drop=0.,
):
super().__init__()
self.num_heads = num_heads
if head_dim is not None:
self.head_dim = head_dim
else:
head_dim = dim // num_heads
self.head_dim = head_dim
self.scale = head_dim ** -0.5
self.kv = nn.Linear(dim, self.head_dim * self.num_heads * 2, bias=qkv_bias)
self.q = nn.Linear(dim, self.head_dim * self.num_heads, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(self.head_dim * self.num_heads, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
kv = self.kv(x).reshape(B, N, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
k, v = kv.unbind(0)
q = self.q(x[:, :1, :]).reshape(B, self.num_heads, 1, self.head_dim) * self.scale
attn = q @ k.transpose(-2, -1)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
cls_embed = (attn @ v).transpose(1, 2).reshape(B, 1, self.head_dim * self.num_heads)
cls_embed = self.proj(cls_embed)
cls_embed = self.proj_drop(cls_embed)
return cls_embed
class ClassBlock(nn.Module):
def __init__(
self,
dim,
num_heads,
head_dim=None,
mlp_ratio=4.,
qkv_bias=False,
drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = ClassAttention(
dim,
num_heads=num_heads,
head_dim=head_dim,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=drop,
)
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = Mlp(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
drop=drop,
)
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
cls_embed = x[:, :1]
cls_embed = cls_embed + self.drop_path1(self.attn(self.norm1(x)))
cls_embed = cls_embed + self.drop_path2(self.mlp(self.norm2(cls_embed)))
return torch.cat([cls_embed, x[:, 1:]], dim=1)
def get_block(block_type, **kargs):
if block_type == 'ca':
return ClassBlock(**kargs)
def rand_bbox(size, lam, scale=1):
"""
get bounding box as token labeling (https://github.com/zihangJiang/TokenLabeling)
return: bounding box
"""
W = size[1] // scale
H = size[2] // scale
cut_rat = np.sqrt(1. - lam)
cut_w = (W * cut_rat).astype(int)
cut_h = (H * cut_rat).astype(int)
# uniform
cx = np.random.randint(W)
cy = np.random.randint(H)
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
return bbx1, bby1, bbx2, bby2
class PatchEmbed(nn.Module):
""" Image to Patch Embedding.
Different with ViT use 1 conv layer, we use 4 conv layers to do patch embedding
"""
def __init__(
self,
img_size=224,
stem_conv=False,
stem_stride=1,
patch_size=8,
in_chans=3,
hidden_dim=64,
embed_dim=384,
):
super().__init__()
assert patch_size in [4, 8, 16]
if stem_conv:
self.conv = nn.Sequential(
nn.Conv2d(in_chans, hidden_dim, kernel_size=7, stride=stem_stride, padding=3, bias=False), # 112x112
nn.BatchNorm2d(hidden_dim),
nn.ReLU(inplace=True),
nn.Conv2d(hidden_dim, hidden_dim, kernel_size=3, stride=1, padding=1, bias=False), # 112x112
nn.BatchNorm2d(hidden_dim),
nn.ReLU(inplace=True),
nn.Conv2d(hidden_dim, hidden_dim, kernel_size=3, stride=1, padding=1, bias=False), # 112x112
nn.BatchNorm2d(hidden_dim),
nn.ReLU(inplace=True),
)
else:
self.conv = None
self.proj = nn.Conv2d(
hidden_dim, embed_dim, kernel_size=patch_size // stem_stride, stride=patch_size // stem_stride)
self.num_patches = (img_size // patch_size) * (img_size // patch_size)
def forward(self, x):
if self.conv is not None:
x = self.conv(x)
x = self.proj(x) # B, C, H, W
return x
class Downsample(nn.Module):
""" Image to Patch Embedding, downsampling between stage1 and stage2
"""
def __init__(self, in_embed_dim, out_embed_dim, patch_size=2):
super().__init__()
self.proj = nn.Conv2d(in_embed_dim, out_embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
x = x.permute(0, 3, 1, 2)
x = self.proj(x) # B, C, H, W
x = x.permute(0, 2, 3, 1)
return x
def outlooker_blocks(
block_fn,
index,
dim,
layers,
num_heads=1,
kernel_size=3,
padding=1,
stride=2,
mlp_ratio=3.,
qkv_bias=False,
attn_drop=0,
drop_path_rate=0.,
**kwargs,
):
"""
generate outlooker layer in stage1
return: outlooker layers
"""
blocks = []
for block_idx in range(layers[index]):
block_dpr = drop_path_rate * (block_idx + sum(layers[:index])) / (sum(layers) - 1)
blocks.append(block_fn(
dim,
kernel_size=kernel_size,
padding=padding,
stride=stride,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
drop_path=block_dpr,
))
blocks = nn.Sequential(*blocks)
return blocks
def transformer_blocks(
block_fn,
index,
dim,
layers,
num_heads,
mlp_ratio=3.,
qkv_bias=False,
attn_drop=0,
drop_path_rate=0.,
**kwargs,
):
"""
generate transformer layers in stage2
return: transformer layers
"""
blocks = []
for block_idx in range(layers[index]):
block_dpr = drop_path_rate * (block_idx + sum(layers[:index])) / (sum(layers) - 1)
blocks.append(block_fn(
dim,
num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
drop_path=block_dpr,
))
blocks = nn.Sequential(*blocks)
return blocks
class VOLO(nn.Module):
"""
Vision Outlooker, the main class of our model
"""
def __init__(
self,
layers,
img_size=224,
in_chans=3,
num_classes=1000,
global_pool='token',
patch_size=8,
stem_hidden_dim=64,
embed_dims=None,
num_heads=None,
downsamples=(True, False, False, False),
outlook_attention=(True, False, False, False),
mlp_ratio=3.0,
qkv_bias=False,
drop_rate=0.,
pos_drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
norm_layer=nn.LayerNorm,
post_layers=('ca', 'ca'),
use_aux_head=True,
use_mix_token=False,
pooling_scale=2,
):
super().__init__()
num_layers = len(layers)
mlp_ratio = to_ntuple(num_layers)(mlp_ratio)
img_size = to_2tuple(img_size)
self.num_classes = num_classes
self.global_pool = global_pool
self.mix_token = use_mix_token
self.pooling_scale = pooling_scale
self.num_features = self.head_hidden_size = embed_dims[-1]
if use_mix_token: # enable token mixing, see token labeling for details.
self.beta = 1.0
assert global_pool == 'token', "return all tokens if mix_token is enabled"
self.grad_checkpointing = False
self.patch_embed = PatchEmbed(
stem_conv=True,
stem_stride=2,
patch_size=patch_size,
in_chans=in_chans,
hidden_dim=stem_hidden_dim,
embed_dim=embed_dims[0],
)
r = patch_size
# initial positional encoding, we add positional encoding after outlooker blocks
patch_grid = (img_size[0] // patch_size // pooling_scale, img_size[1] // patch_size // pooling_scale)
self.pos_embed = nn.Parameter(torch.zeros(1, patch_grid[0], patch_grid[1], embed_dims[-1]))
self.pos_drop = nn.Dropout(p=pos_drop_rate)
# set the main block in network
self.stage_ends = []
self.feature_info = []
network = []
block_idx = 0
for i in range(len(layers)):
if outlook_attention[i]:
# stage 1
stage = outlooker_blocks(
Outlooker,
i,
embed_dims[i],
layers,
num_heads[i],
mlp_ratio=mlp_ratio[i],
qkv_bias=qkv_bias,
attn_drop=attn_drop_rate,
norm_layer=norm_layer,
)
else:
# stage 2
stage = transformer_blocks(
Transformer,
i,
embed_dims[i],
layers,
num_heads[i],
mlp_ratio=mlp_ratio[i],
qkv_bias=qkv_bias,
drop_path_rate=drop_path_rate,
attn_drop=attn_drop_rate,
norm_layer=norm_layer,
)
network.append(stage)
self.stage_ends.append(block_idx)
self.feature_info.append(dict(num_chs=embed_dims[i], reduction=r, module=f'network.{block_idx}'))
block_idx += 1
if downsamples[i]:
# downsampling between two stages
network.append(Downsample(embed_dims[i], embed_dims[i + 1], 2))
r *= 2
block_idx += 1
self.network = nn.ModuleList(network)
# set post block, for example, class attention layers
self.post_network = None
if post_layers is not None:
self.post_network = nn.ModuleList([
get_block(
post_layers[i],
dim=embed_dims[-1],
num_heads=num_heads[-1],
mlp_ratio=mlp_ratio[-1],
qkv_bias=qkv_bias,
attn_drop=attn_drop_rate,
drop_path=0.,
norm_layer=norm_layer)
for i in range(len(post_layers))
])
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dims[-1]))
trunc_normal_(self.cls_token, std=.02)
# set output type
if use_aux_head:
self.aux_head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
else:
self.aux_head = None
self.norm = norm_layer(self.num_features)
# Classifier head
self.head_drop = nn.Dropout(drop_rate)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.pos_embed, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^cls_token|pos_embed|patch_embed', # stem and embed
blocks=[
(r'^network\.(\d+)\.(\d+)', None),
(r'^network\.(\d+)', (0,)),
],
blocks2=[
(r'^cls_token', (0,)),
(r'^post_network\.(\d+)', None),
(r'^norm', (99999,))
],
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self) -> nn.Module:
return self.head
def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None):
self.num_classes = num_classes
if global_pool is not None:
self.global_pool = global_pool
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
if self.aux_head is not None:
self.aux_head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
def forward_tokens(self, x):
for idx, block in enumerate(self.network):
if idx == 2:
# add positional encoding after outlooker blocks
x = x + self.pos_embed
x = self.pos_drop(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint(block, x)
else:
x = block(x)
B, H, W, C = x.shape
x = x.reshape(B, -1, C)
return x
def forward_cls(self, x):
B, N, C = x.shape
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat([cls_tokens, x], dim=1)
for block in self.post_network:
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint(block, x)
else:
x = block(x)
return x
def forward_train(self, x):
""" A separate forward fn for training with mix_token (if a train script supports).
Combining multiple modes in as single forward with different return types is torchscript hell.
"""
x = self.patch_embed(x)
x = x.permute(0, 2, 3, 1) # B,C,H,W-> B,H,W,C
# mix token, see token labeling for details.
if self.mix_token and self.training:
lam = np.random.beta(self.beta, self.beta)
patch_h, patch_w = x.shape[1] // self.pooling_scale, x.shape[2] // self.pooling_scale
bbx1, bby1, bbx2, bby2 = rand_bbox(x.size(), lam, scale=self.pooling_scale)
temp_x = x.clone()
sbbx1, sbby1 = self.pooling_scale * bbx1, self.pooling_scale * bby1
sbbx2, sbby2 = self.pooling_scale * bbx2, self.pooling_scale * bby2
temp_x[:, sbbx1:sbbx2, sbby1:sbby2, :] = x.flip(0)[:, sbbx1:sbbx2, sbby1:sbby2, :]
x = temp_x
else:
bbx1, bby1, bbx2, bby2 = 0, 0, 0, 0
# step2: tokens learning in the two stages
x = self.forward_tokens(x)
# step3: post network, apply class attention or not
if self.post_network is not None:
x = self.forward_cls(x)
x = self.norm(x)
if self.global_pool == 'avg':
x_cls = x.mean(dim=1)
elif self.global_pool == 'token':
x_cls = x[:, 0]
else:
x_cls = x
if self.aux_head is None:
return x_cls
x_aux = self.aux_head(x[:, 1:]) # generate classes in all feature tokens, see token labeling
if not self.training:
return x_cls + 0.5 * x_aux.max(1)[0]
if self.mix_token and self.training: # reverse "mix token", see token labeling for details.
x_aux = x_aux.reshape(x_aux.shape[0], patch_h, patch_w, x_aux.shape[-1])
temp_x = x_aux.clone()
temp_x[:, bbx1:bbx2, bby1:bby2, :] = x_aux.flip(0)[:, bbx1:bbx2, bby1:bby2, :]
x_aux = temp_x
x_aux = x_aux.reshape(x_aux.shape[0], patch_h * patch_w, x_aux.shape[-1])
# return these: 1. class token, 2. classes from all feature tokens, 3. bounding box
return x_cls, x_aux, (bbx1, bby1, bbx2, bby2)
def forward_intermediates(
self,
x: torch.Tensor,
indices: Optional[Union[int, List[int]]] = None,
norm: bool = False,
stop_early: bool = False,
output_fmt: str = 'NCHW',
intermediates_only: bool = False,
) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]:
""" Forward features that returns intermediates.
Args:
x: Input image tensor
indices: Take last n blocks if int, all if None, select matching indices if sequence
norm: Apply norm layer to all intermediates
stop_early: Stop iterating over blocks when last desired intermediate hit
output_fmt: Shape of intermediate feature outputs
intermediates_only: Only return intermediate features
Returns:
"""
assert output_fmt in ('NCHW',), 'Output format must be NCHW.'
intermediates = []
take_indices, max_index = feature_take_indices(len(self.stage_ends), indices)
take_indices = [self.stage_ends[i] for i in take_indices]
max_index = self.stage_ends[max_index]
# forward pass
B, _, height, width = x.shape
x = self.patch_embed(x).permute(0, 2, 3, 1) # B,C,H,W-> B,H,W,C
# step2: tokens learning in the two stages
if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript
network = self.network
else:
network = self.network[:max_index + 1]
for idx, block in enumerate(network):
if idx == 2:
# add positional encoding after outlooker blocks
x = x + self.pos_embed
x = self.pos_drop(x)
x = block(x)
if idx in take_indices:
if norm and idx >= 2:
x_inter = self.norm(x)
else:
x_inter = x
intermediates.append(x_inter.permute(0, 3, 1, 2))
if intermediates_only:
return intermediates
# NOTE not supporting return of class tokens
# step3: post network, apply class attention or not
B, H, W, C = x.shape
x = x.reshape(B, -1, C)
if self.post_network is not None:
x = self.forward_cls(x)
x = self.norm(x)
return x, intermediates
def prune_intermediate_layers(
self,
indices: Union[int, List[int]] = 1,
prune_norm: bool = False,
prune_head: bool = True,
):
""" Prune layers not required for specified intermediates.
"""
take_indices, max_index = feature_take_indices(len(self.stage_ends), indices)
max_index = self.stage_ends[max_index]
self.network = self.network[:max_index + 1] # truncate blocks
if prune_norm:
self.norm = nn.Identity()
if prune_head:
self.post_network = nn.ModuleList() # prune token blocks with head
self.reset_classifier(0, '')
return take_indices
def forward_features(self, x):
x = self.patch_embed(x).permute(0, 2, 3, 1) # B,C,H,W-> B,H,W,C
# step2: tokens learning in the two stages
x = self.forward_tokens(x)
# step3: post network, apply class attention or not
if self.post_network is not None:
x = self.forward_cls(x)
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool = False):
if self.global_pool == 'avg':
out = x.mean(dim=1)
elif self.global_pool == 'token':
out = x[:, 0]
else:
out = x
x = self.head_drop(x)
if pre_logits:
return out
out = self.head(out)
if self.aux_head is not None:
# generate classes in all feature tokens, see token labeling
aux = self.aux_head(x[:, 1:])
out = out + 0.5 * aux.max(1)[0]
return out
def forward(self, x):
""" simplified forward (without mix token training) """
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _create_volo(variant, pretrained=False, **kwargs):
out_indices = kwargs.pop('out_indices', 3)
return build_model_with_cfg(
VOLO,
variant,
pretrained,
feature_cfg=dict(out_indices=out_indices, feature_cls='getter'),
**kwargs,
)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .96, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.conv.0', 'classifier': ('head', 'aux_head'),
**kwargs
}
default_cfgs = generate_default_cfgs({
'volo_d1_224.sail_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/sail-sg/volo/releases/download/volo_1/d1_224_84.2.pth.tar',
crop_pct=0.96),
'volo_d1_384.sail_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/sail-sg/volo/releases/download/volo_1/d1_384_85.2.pth.tar',
crop_pct=1.0, input_size=(3, 384, 384)),
'volo_d2_224.sail_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/sail-sg/volo/releases/download/volo_1/d2_224_85.2.pth.tar',
crop_pct=0.96),
'volo_d2_384.sail_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/sail-sg/volo/releases/download/volo_1/d2_384_86.0.pth.tar',
crop_pct=1.0, input_size=(3, 384, 384)),
'volo_d3_224.sail_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/sail-sg/volo/releases/download/volo_1/d3_224_85.4.pth.tar',
crop_pct=0.96),
'volo_d3_448.sail_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/sail-sg/volo/releases/download/volo_1/d3_448_86.3.pth.tar',
crop_pct=1.0, input_size=(3, 448, 448)),
'volo_d4_224.sail_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/sail-sg/volo/releases/download/volo_1/d4_224_85.7.pth.tar',
crop_pct=0.96),
'volo_d4_448.sail_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/sail-sg/volo/releases/download/volo_1/d4_448_86.79.pth.tar',
crop_pct=1.15, input_size=(3, 448, 448)),
'volo_d5_224.sail_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/sail-sg/volo/releases/download/volo_1/d5_224_86.10.pth.tar',
crop_pct=0.96),
'volo_d5_448.sail_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/sail-sg/volo/releases/download/volo_1/d5_448_87.0.pth.tar',
crop_pct=1.15, input_size=(3, 448, 448)),
'volo_d5_512.sail_in1k': _cfg(
hf_hub_id='timm/',
url='https://github.com/sail-sg/volo/releases/download/volo_1/d5_512_87.07.pth.tar',
crop_pct=1.15, input_size=(3, 512, 512)),
})
@register_model
def volo_d1_224(pretrained=False, **kwargs) -> VOLO:
""" VOLO-D1 model, Params: 27M """
model_args = dict(layers=(4, 4, 8, 2), embed_dims=(192, 384, 384, 384), num_heads=(6, 12, 12, 12), **kwargs)
model = _create_volo('volo_d1_224', pretrained=pretrained, **model_args)
return model
@register_model
def volo_d1_384(pretrained=False, **kwargs) -> VOLO:
""" VOLO-D1 model, Params: 27M """
model_args = dict(layers=(4, 4, 8, 2), embed_dims=(192, 384, 384, 384), num_heads=(6, 12, 12, 12), **kwargs)
model = _create_volo('volo_d1_384', pretrained=pretrained, **model_args)
return model
@register_model
def volo_d2_224(pretrained=False, **kwargs) -> VOLO:
""" VOLO-D2 model, Params: 59M """
model_args = dict(layers=(6, 4, 10, 4), embed_dims=(256, 512, 512, 512), num_heads=(8, 16, 16, 16), **kwargs)
model = _create_volo('volo_d2_224', pretrained=pretrained, **model_args)
return model
@register_model
def volo_d2_384(pretrained=False, **kwargs) -> VOLO:
""" VOLO-D2 model, Params: 59M """
model_args = dict(layers=(6, 4, 10, 4), embed_dims=(256, 512, 512, 512), num_heads=(8, 16, 16, 16), **kwargs)
model = _create_volo('volo_d2_384', pretrained=pretrained, **model_args)
return model
@register_model
def volo_d3_224(pretrained=False, **kwargs) -> VOLO:
""" VOLO-D3 model, Params: 86M """
model_args = dict(layers=(8, 8, 16, 4), embed_dims=(256, 512, 512, 512), num_heads=(8, 16, 16, 16), **kwargs)
model = _create_volo('volo_d3_224', pretrained=pretrained, **model_args)
return model
@register_model
def volo_d3_448(pretrained=False, **kwargs) -> VOLO:
""" VOLO-D3 model, Params: 86M """
model_args = dict(layers=(8, 8, 16, 4), embed_dims=(256, 512, 512, 512), num_heads=(8, 16, 16, 16), **kwargs)
model = _create_volo('volo_d3_448', pretrained=pretrained, **model_args)
return model
@register_model
def volo_d4_224(pretrained=False, **kwargs) -> VOLO:
""" VOLO-D4 model, Params: 193M """
model_args = dict(layers=(8, 8, 16, 4), embed_dims=(384, 768, 768, 768), num_heads=(12, 16, 16, 16), **kwargs)
model = _create_volo('volo_d4_224', pretrained=pretrained, **model_args)
return model
@register_model
def volo_d4_448(pretrained=False, **kwargs) -> VOLO:
""" VOLO-D4 model, Params: 193M """
model_args = dict(layers=(8, 8, 16, 4), embed_dims=(384, 768, 768, 768), num_heads=(12, 16, 16, 16), **kwargs)
model = _create_volo('volo_d4_448', pretrained=pretrained, **model_args)
return model
@register_model
def volo_d5_224(pretrained=False, **kwargs) -> VOLO:
""" VOLO-D5 model, Params: 296M
stem_hidden_dim=128, the dim in patch embedding is 128 for VOLO-D5
"""
model_args = dict(
layers=(12, 12, 20, 4), embed_dims=(384, 768, 768, 768), num_heads=(12, 16, 16, 16),
mlp_ratio=4, stem_hidden_dim=128, **kwargs)
model = _create_volo('volo_d5_224', pretrained=pretrained, **model_args)
return model
@register_model
def volo_d5_448(pretrained=False, **kwargs) -> VOLO:
""" VOLO-D5 model, Params: 296M
stem_hidden_dim=128, the dim in patch embedding is 128 for VOLO-D5
"""
model_args = dict(
layers=(12, 12, 20, 4), embed_dims=(384, 768, 768, 768), num_heads=(12, 16, 16, 16),
mlp_ratio=4, stem_hidden_dim=128, **kwargs)
model = _create_volo('volo_d5_448', pretrained=pretrained, **model_args)
return model
@register_model
def volo_d5_512(pretrained=False, **kwargs) -> VOLO:
""" VOLO-D5 model, Params: 296M
stem_hidden_dim=128, the dim in patch embedding is 128 for VOLO-D5
"""
model_args = dict(
layers=(12, 12, 20, 4), embed_dims=(384, 768, 768, 768), num_heads=(12, 16, 16, 16),
mlp_ratio=4, stem_hidden_dim=128, **kwargs)
model = _create_volo('volo_d5_512', pretrained=pretrained, **model_args)
return model
| pytorch-image-models/timm/models/volo.py/0 | {
"file_path": "pytorch-image-models/timm/models/volo.py",
"repo_id": "pytorch-image-models",
"token_count": 17707
} |
""" ADOPT PyTorch Optimizer
ADOPT: Modified Adam Can Converge with Any β2 with the Optimal Rate: https://arxiv.org/abs/2411.02853
Modified for reduced dependencies on PyTorch internals from original at: https://github.com/iShohei220/adopt
@inproceedings{taniguchi2024adopt,
author={Taniguchi, Shohei and Harada, Keno and Minegishi, Gouki and Oshima, Yuta and Jeong, Seong Cheol and Nagahara, Go and Iiyama, Tomoshi and Suzuki, Masahiro and Iwasawa, Yusuke and Matsuo, Yutaka},
booktitle = {Advances in Neural Information Processing Systems},
title = {ADOPT: Modified Adam Can Converge with Any β2 with the Optimal Rate},
year = {2024}
}
"""
from typing import cast, List, Optional, Tuple, Union
import torch
from torch import Tensor
from torch.optim.optimizer import Optimizer
from ._types import ParamsT
__all__ = ["Adopt", "adopt"]
def _view_as_real(params, *state_and_grads):
for i, p in enumerate(params):
if torch.is_complex(p):
params[i] = torch.view_as_real(params[i])
for s in state_and_grads:
s[i] = torch.view_as_real(s[i])
def _get_scalar_dtype(is_fused=None):
if is_fused:
return torch.float32
return (
torch.float64 if torch.get_default_dtype() == torch.float64 else torch.float32
)
def _is_compiling():
if hasattr(torch, 'compiler') and hasattr(torch.compiler, 'is_compiling'):
return torch.compiler.is_compiling()
else:
return False
def _get_value(x):
# item is significantly faster than a cpu tensor in eager mode
if not torch.jit.is_scripting() and _is_compiling():
return x
else:
return x.item() if isinstance(x, torch.Tensor) else x
class Adopt(Optimizer):
"""
ADOPT: Modified Adam Can Converge with Any β2 with the Optimal Rate: https://arxiv.org/abs/2411.02853
"""
def __init__(
self,
params: ParamsT,
lr: Union[float, Tensor] = 1e-3,
betas: Tuple[float, float] = (0.9, 0.9999),
eps: float = 1e-6,
clip_exp: Optional[float] = 0.333,
weight_decay: float = 0.0,
decoupled: bool = False,
*,
caution: bool = False,
foreach: Optional[bool] = False,
maximize: bool = False,
capturable: bool = False,
differentiable: bool = False,
):
if isinstance(lr, Tensor):
if foreach and not capturable:
raise ValueError(
"lr as a Tensor is not supported for capturable=False and foreach=True"
)
if lr.numel() != 1:
raise ValueError("Tensor lr must be 1-element")
if not 0.0 <= lr:
raise ValueError(f"Invalid learning rate: {lr}")
if not 0.0 <= eps:
raise ValueError(f"Invalid epsilon value: {eps}")
if not 0.0 <= betas[0] < 1.0:
raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}")
if not 0.0 <= betas[1] < 1.0:
raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}")
if not 0.0 <= weight_decay:
raise ValueError(f"Invalid weight_decay value: {weight_decay}")
defaults = dict(
lr=lr,
betas=betas,
eps=eps,
weight_decay=weight_decay,
clip_exp=clip_exp,
decoupled=decoupled,
caution=caution,
maximize=maximize,
foreach=foreach,
capturable=capturable,
differentiable=differentiable,
)
super().__init__(params, defaults)
def __setstate__(self, state):
super().__setstate__(state)
for group in self.param_groups:
group.setdefault("maximize", False)
group.setdefault("foreach", None)
group.setdefault("capturable", False)
group.setdefault("differentiable", False)
group.setdefault("clip_exp", None)
group.setdefault("caution", False)
for p in group["params"]:
p_state = self.state.get(p, [])
if len(p_state) != 0 and not torch.is_tensor(p_state["step"]):
step_val = float(p_state["step"])
p_state["step"] = (
torch.tensor(
step_val,
dtype=_get_scalar_dtype(),
device=p.device,
)
if group["capturable"]
else torch.tensor(step_val, dtype=_get_scalar_dtype())
)
def _init_group(
self,
group,
params_with_grad,
grads,
exp_avgs,
exp_avg_sqs,
state_steps,
):
has_complex = False
for p in group["params"]:
if p.grad is None:
continue
has_complex |= torch.is_complex(p)
params_with_grad.append(p)
if p.grad.is_sparse:
raise RuntimeError("ADOPT does not support sparse gradients")
grads.append(p.grad)
state = self.state[p]
# Lazy state initialization
if len(state) == 0:
# note(crcrpar): [special device hosting for step]
# Deliberately host `step` on CPU if both capturable and fused are off.
# This is because kernel launches are costly on CUDA and XLA.
state["step"] = (
torch.zeros((), dtype=_get_scalar_dtype(), device=p.grad.device)
if group["capturable"]
else torch.tensor(0.0, dtype=_get_scalar_dtype())
)
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p.grad, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(p.grad, memory_format=torch.preserve_format)
exp_avgs.append(state["exp_avg"])
exp_avg_sqs.append(state["exp_avg_sq"])
if group["differentiable"] and state["step"].requires_grad:
raise RuntimeError("`requires_grad` is not supported for `step` in differentiable mode")
# Foreach without capturable does not support a tensor lr
if group["foreach"] and torch.is_tensor(group["lr"]) and not group["capturable"]:
raise RuntimeError("lr as a Tensor is not supported for capturable=False and foreach=True")
state_steps.append(state["step"])
return has_complex
#@_use_grad_for_differentiable # FIXME internal context mgr, can't use
@torch.no_grad()
def step(self, closure=None):
"""Perform a single optimization step.
Args:
closure (Callable, optional): A closure that reevaluates the model
and returns the loss.
"""
self._cuda_graph_capture_health_check()
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad: List[Tensor] = []
grads: List[Tensor] = []
exp_avgs: List[Tensor] = []
exp_avg_sqs: List[Tensor] = []
state_steps: List[Tensor] = []
beta1, beta2 = group["betas"]
has_complex = self._init_group(
group,
params_with_grad,
grads,
exp_avgs,
exp_avg_sqs,
state_steps,
)
adopt(
params_with_grad,
grads,
exp_avgs,
exp_avg_sqs,
state_steps,
has_complex=has_complex,
beta1=beta1,
beta2=beta2,
lr=group["lr"],
weight_decay=group["weight_decay"],
clip_exp=group["clip_exp"],
decoupled=group["decoupled"],
eps=group["eps"],
caution=group["caution"],
maximize=group["maximize"],
foreach=group["foreach"],
capturable=group["capturable"],
differentiable=group["differentiable"],
grad_scale=getattr(self, "grad_scale", None),
found_inf=getattr(self, "found_inf", None),
)
return loss
def _single_tensor_adopt(
params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
state_steps: List[Tensor],
grad_scale: Optional[Tensor],
found_inf: Optional[Tensor],
*,
has_complex: bool,
beta1: float,
beta2: float,
lr: Union[float, Tensor],
weight_decay: float,
clip_exp: Optional[float],
decoupled: bool,
eps: float,
caution: bool,
maximize: bool,
capturable: bool,
differentiable: bool,
):
assert grad_scale is None and found_inf is None
if torch.jit.is_scripting():
# this assert is due to JIT being dumb and not realizing that the ops below
# have overloads to handle both float and Tensor lrs, so we just assert it's
# a float since most people using JIT are using floats
assert isinstance(lr, float)
for i, param in enumerate(params):
grad = grads[i] if not maximize else -grads[i]
exp_avg = exp_avgs[i]
exp_avg_sq = exp_avg_sqs[i]
step_t = state_steps[i]
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
if capturable and not _is_compiling():
from torch.optim.optimizer import _get_capturable_supported_devices
capturable_supported_devices = _get_capturable_supported_devices()
assert param.device.type == step_t.device.type and param.device.type in capturable_supported_devices,\
f"If capturable=True, params and state_steps must be on supported devices: {capturable_supported_devices}."
# update step
step_t += 1
if torch.is_complex(param):
grad = torch.view_as_real(grad)
if exp_avg is not None:
exp_avg = torch.view_as_real(exp_avg)
if exp_avg_sq is not None:
exp_avg_sq = torch.view_as_real(exp_avg_sq)
param = torch.view_as_real(param)
if weight_decay != 0 and not decoupled:
grad = grad.add(param, alpha=weight_decay)
step = step_t if capturable or differentiable else _get_value(step_t)
if step == 1:
exp_avg_sq.addcmul_(grad, grad.conj())
continue
if weight_decay != 0 and decoupled:
param.add_(param, alpha=-lr * weight_decay)
denom = torch.clamp(exp_avg_sq.sqrt(), eps)
normed_grad = grad.div(denom)
if clip_exp is not None:
clip_val = (step - 1) ** clip_exp
normed_grad.clamp_(-clip_val, clip_val)
exp_avg.lerp_(normed_grad, 1 - beta1)
if caution:
# Apply caution as per 'Cautious Optimizers' - https://arxiv.org/abs/2411.16085
mask = (exp_avg * grad > 0).to(grad.dtype)
mask.div_(mask.mean().clamp_(min=1e-3))
exp_avg = exp_avg * mask
param.add_(exp_avg, alpha=-lr)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad.conj(), value=1 - beta2)
def _multi_tensor_adopt(
params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
state_steps: List[Tensor],
grad_scale: Optional[Tensor],
found_inf: Optional[Tensor],
*,
has_complex: bool,
beta1: float,
beta2: float,
lr: Union[float, Tensor],
weight_decay: float,
clip_exp: Optional[float],
decoupled: bool,
eps: float,
caution: bool,
maximize: bool,
capturable: bool,
differentiable: bool,
):
if len(params) == 0:
return
if isinstance(lr, Tensor) and not capturable:
raise RuntimeError(
"lr as a Tensor is not supported for capturable=False and foreach=True"
)
# If compiling, the compiler will handle cudagraph checks, see note [torch.compile x capturable]
if capturable and not _is_compiling():
from torch.optim.optimizer import _get_capturable_supported_devices
capturable_supported_devices = _get_capturable_supported_devices(
supports_xla=False
)
assert all(
p.device.type == step.device.type and p.device.type in capturable_supported_devices
for p, step in zip(params, state_steps)
), f"If capturable=True, params and state_steps must be on supported devices: {capturable_supported_devices}."
assert grad_scale is None and found_inf is None
assert not differentiable, "_foreach ops don't support autograd"
grouped_tensors = Optimizer._group_tensors_by_device_and_dtype(
[params, grads, exp_avgs, exp_avg_sqs, state_steps] # type: ignore[list-item]
)
for (
device_params_,
device_grads_,
device_exp_avgs_,
device_exp_avg_sqs_,
device_state_steps_,
), _ in grouped_tensors.values():
device_params = cast(List[Tensor], device_params_)
device_grads = cast(List[Tensor], device_grads_)
device_exp_avgs = cast(List[Tensor], device_exp_avgs_)
device_exp_avg_sqs = cast(List[Tensor], device_exp_avg_sqs_)
device_state_steps = cast(List[Tensor], device_state_steps_)
# Handle complex parameters
if has_complex:
_view_as_real(device_params, device_grads, device_exp_avgs, device_exp_avg_sqs)
if maximize:
device_grads = torch._foreach_neg(device_grads) # type: ignore[assignment]
# Update steps
# If steps are on CPU, foreach will fall back to the slow path, which is a for-loop calling t.add(1) over
# and over. 1 will then be wrapped into a Tensor over and over again, which is slower than if we just
# wrapped it once now. The alpha is required to assure we go to the right overload.
if not _is_compiling() and device_state_steps[0].is_cpu:
torch._foreach_add_(device_state_steps, torch.tensor(1.0, device="cpu"), alpha=1.0)
else:
torch._foreach_add_(device_state_steps, 1)
if weight_decay != 0 and not decoupled:
# Re-use the intermediate memory (device_grads) already allocated for maximize
if maximize:
torch._foreach_add_(device_grads, device_params, alpha=weight_decay)
else:
device_grads = torch._foreach_add(device_grads, device_params, alpha=weight_decay)
if device_state_steps[0] == 1:
torch._foreach_addcmul_(device_exp_avg_sqs, device_grads, device_grads)
continue
if weight_decay != 0 and decoupled:
torch._foreach_add_(device_params, device_params, alpha=-lr * weight_decay)
exp_avg_sq_sqrt = torch._foreach_sqrt(device_exp_avg_sqs)
torch._foreach_maximum_(exp_avg_sq_sqrt, eps)
normed_grad = torch._foreach_div(device_grads, exp_avg_sq_sqrt)
if clip_exp is not None:
clip_val = (device_state_steps[0] - 1) ** clip_exp
torch._foreach_maximum_(normed_grad, -clip_val)
torch._foreach_minimum_(normed_grad, clip_val)
torch._foreach_lerp_(device_exp_avgs, normed_grad, 1 - beta1)
if caution:
# Apply caution as per 'Cautious Optimizers' - https://arxiv.org/abs/2411.16085
masks = torch._foreach_mul(device_exp_avgs, device_grads)
masks = [(m > 0).to(g.dtype) for m, g in zip(masks, device_grads)]
mask_scale = [m.mean() for m in masks]
torch._foreach_maximum_(mask_scale, 1e-3)
torch._foreach_div_(masks, mask_scale)
device_exp_avgs = torch._foreach_mul(device_exp_avgs, masks)
torch._foreach_add_(device_params, device_exp_avgs, alpha=-lr)
torch._foreach_mul_(device_exp_avg_sqs, beta2)
torch._foreach_addcmul_(device_exp_avg_sqs, device_grads, device_grads, value=1 - beta2)
#@_disable_dynamo_if_unsupported(single_tensor_fn=_single_tensor_adopt) # FIXME internal context mgr, can't use
def adopt(
params: List[Tensor],
grads: List[Tensor],
exp_avgs: List[Tensor],
exp_avg_sqs: List[Tensor],
state_steps: List[Tensor],
# kwonly args with defaults are not supported by functions compiled with torchscript issue #70627
# setting this as kwarg for now as functional API is compiled by torch/distributed/optim
foreach: Optional[bool] = None,
capturable: bool = False,
differentiable: bool = False,
grad_scale: Optional[Tensor] = None,
found_inf: Optional[Tensor] = None,
has_complex: bool = False,
*,
beta1: float,
beta2: float,
lr: Union[float, Tensor],
weight_decay: float,
clip_exp: Optional[float],
decoupled: bool,
eps: float,
caution: bool,
maximize: bool,
):
r"""Functional API that performs ADOPT algorithm computation.
"""
if foreach is None:
foreach = False
# this check is slow during compilation, so we skip it
# if it's strictly needed we can add this check back in dynamo
if not _is_compiling() and not all(isinstance(t, torch.Tensor) for t in state_steps):
raise RuntimeError(
"API has changed, `state_steps` argument must contain a list of singleton tensors"
)
if foreach and torch.jit.is_scripting():
raise RuntimeError("torch.jit.script not supported with foreach optimizers")
if foreach and not torch.jit.is_scripting():
func = _multi_tensor_adopt
else:
func = _single_tensor_adopt
func(
params,
grads,
exp_avgs,
exp_avg_sqs,
state_steps,
has_complex=has_complex,
beta1=beta1,
beta2=beta2,
lr=lr,
weight_decay=weight_decay,
clip_exp=clip_exp,
decoupled=decoupled,
eps=eps,
caution=caution,
maximize=maximize,
capturable=capturable,
differentiable=differentiable,
grad_scale=grad_scale,
found_inf=found_inf,
)
| pytorch-image-models/timm/optim/adopt.py/0 | {
"file_path": "pytorch-image-models/timm/optim/adopt.py",
"repo_id": "pytorch-image-models",
"token_count": 9017
} |
from typing import List, Optional
import torch
from torch import Tensor
from torch.optim.optimizer import Optimizer
try:
from torch.optim.optimizer import _use_grad_for_differentiable, _default_to_fused_or_foreach
has_recent_pt = True
except ImportError:
has_recent_pt = False
from ._types import ParamsT
__all__ = ['SGDW', 'sgdw']
class SGDW(Optimizer):
def __init__(
self,
params: ParamsT,
lr: float = 1e-3,
momentum: float = 0.,
dampening: float = 0.,
weight_decay: float = 0.,
nesterov: bool = False,
*,
caution: bool = False,
maximize: bool = False,
foreach: Optional[bool] = None,
differentiable: bool = False,
):
if lr < 0.0:
raise ValueError(f"Invalid learning rate: {lr}")
if momentum < 0.0:
raise ValueError(f"Invalid momentum value: {momentum}")
if weight_decay < 0.0:
raise ValueError(f"Invalid weight_decay value: {weight_decay}")
defaults = dict(
lr=lr,
momentum=momentum,
dampening=dampening,
weight_decay=weight_decay,
nesterov=nesterov,
caution=caution,
maximize=maximize,
foreach=foreach,
differentiable=differentiable,
)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super().__init__(params, defaults)
def __setstate__(self, state):
super().__setstate__(state)
for group in self.param_groups:
group.setdefault('caution', False)
group.setdefault('nesterov', False)
group.setdefault('maximize', False)
group.setdefault('foreach', None)
group.setdefault('differentiable', False)
def _init_group(self, group, params_with_grad, grads, momentum_buffer_list):
has_sparse_grad = False
for p in group['params']:
if p.grad is not None:
params_with_grad.append(p)
grads.append(p.grad)
if p.grad.is_sparse:
has_sparse_grad = True
state = self.state[p]
if 'momentum_buffer' not in state:
momentum_buffer_list.append(None)
else:
momentum_buffer_list.append(state['momentum_buffer'])
return has_sparse_grad
# FIXME figure out how to make _use_grad_for_differentiable interchangeable with no_grad decorator
# without args, for backwards compatibility with old pytorch
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (Callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
grads = []
momentum_buffer_list = []
has_sparse_grad = self._init_group(group, params_with_grad, grads, momentum_buffer_list)
sgdw(
params_with_grad,
grads,
momentum_buffer_list,
weight_decay=group['weight_decay'],
momentum=group['momentum'],
lr=group['lr'],
dampening=group['dampening'],
nesterov=group['nesterov'],
caution=group['caution'],
maximize=group['maximize'],
has_sparse_grad=has_sparse_grad,
foreach=group['foreach'],
)
# update momentum_buffers in state
for p, momentum_buffer in zip(params_with_grad, momentum_buffer_list):
state = self.state[p]
state['momentum_buffer'] = momentum_buffer
return loss
def sgdw(
params: List[Tensor],
grads: List[Tensor],
momentum_buffer_list: List[Optional[Tensor]],
# kwonly args with defaults are not supported by functions compiled with torchscript issue #70627
# setting this as kwarg for now as functional API is compiled by torch/distributed/optim
has_sparse_grad: bool = None,
foreach: Optional[bool] = None,
*,
weight_decay: float,
momentum: float,
lr: float,
dampening: float,
nesterov: bool,
caution: bool,
maximize: bool
):
r"""Functional API that performs SGD algorithm computation.
See :class:`~torch.optim.SGD` for details.
"""
if has_recent_pt and hasattr(Optimizer, '_group_tensors_by_device_and_dtype'):
if foreach is None:
# why must we be explicit about an if statement for torch.jit.is_scripting here?
# because JIT can't handle Optionals nor fancy conditionals when scripting
if not torch.jit.is_scripting():
_, foreach = _default_to_fused_or_foreach(params, differentiable=False, use_fused=False)
else:
foreach = False
if foreach and torch.jit.is_scripting():
raise RuntimeError('torch.jit.script not supported with foreach optimizers')
else:
foreach = False # disabling altogether for older pytorch, as using _group_tensors_by_device_and_dtype
if foreach and not torch.jit.is_scripting():
func = _multi_tensor_sgdw
else:
func = _single_tensor_sgdw
func(
params,
grads,
momentum_buffer_list,
weight_decay=weight_decay,
momentum=momentum,
lr=lr,
dampening=dampening,
nesterov=nesterov,
caution=caution,
has_sparse_grad=has_sparse_grad,
maximize=maximize,
)
def _single_tensor_sgdw(
params: List[Tensor],
grads: List[Tensor],
momentum_buffer_list: List[Optional[Tensor]],
*,
weight_decay: float,
momentum: float,
lr: float,
dampening: float,
nesterov: bool,
caution: bool,
maximize: bool,
has_sparse_grad: bool
):
for i, param in enumerate(params):
grad = grads[i] if not maximize else -grads[i]
param.mul_(1. - lr * weight_decay)
if momentum != 0:
buf = momentum_buffer_list[i]
if buf is None:
buf = torch.clone(grad).detach()
momentum_buffer_list[i] = buf
else:
buf.mul_(momentum).add_(grad, alpha=1 - dampening)
if caution:
if nesterov:
buf = grad.add(buf, alpha=momentum)
# Apply caution as per 'Cautious Optimizers' - https://arxiv.org/abs/2411.16085
mask = (buf * grad > 0).to(grad.dtype)
mask.div_(mask.mean().clamp_(min=1e-3))
grad = buf * mask
else:
if nesterov:
grad = grad.add(buf, alpha=momentum)
else:
grad = buf
param.add_(grad, alpha=-lr)
def _multi_tensor_sgdw(
params: List[Tensor],
grads: List[Tensor],
momentum_buffer_list: List[Optional[Tensor]],
*,
weight_decay: float,
momentum: float,
lr: float,
dampening: float,
nesterov: bool,
caution: bool,
maximize: bool,
has_sparse_grad: bool
):
if len(params) == 0:
return
grouped_tensors = Optimizer._group_tensors_by_device_and_dtype(
[params, grads, momentum_buffer_list], with_indices=True)
for ((device_params, device_grads, device_momentum_buffer_list), indices) in grouped_tensors.values():
device_has_sparse_grad = has_sparse_grad and any(grad.is_sparse for grad in device_grads)
if maximize:
device_grads = torch._foreach_neg(device_grads)
torch._foreach_mul_(params, 1. - lr * weight_decay)
if momentum != 0:
bufs = []
all_states_with_momentum_buffer = True
for i in range(len(device_momentum_buffer_list)):
if device_momentum_buffer_list[i] is None:
all_states_with_momentum_buffer = False
break
else:
bufs.append(device_momentum_buffer_list[i])
if all_states_with_momentum_buffer:
torch._foreach_mul_(bufs, momentum)
torch._foreach_add_(bufs, device_grads, alpha=1 - dampening)
else:
bufs = []
for i in range(len(device_momentum_buffer_list)):
if device_momentum_buffer_list[i] is None:
buf = device_momentum_buffer_list[i] = momentum_buffer_list[indices[i]] = \
torch.clone(device_grads[i]).detach()
else:
buf = device_momentum_buffer_list[i]
buf.mul_(momentum).add_(device_grads[i], alpha=1 - dampening)
bufs.append(buf)
if caution:
if nesterov:
# Can't do nesterov in-place if we want to compare against orig grad for caution
bufs = torch._foreach_add(device_grads, bufs, alpha=momentum)
# Apply caution as per 'Cautious Optimizers' - https://arxiv.org/abs/2411.16085
masks = torch._foreach_mul(bufs, device_grads)
masks = [(m > 0).to(g.dtype) for m, g in zip(masks, device_grads)]
mask_scale = [m.mean() for m in masks]
torch._foreach_maximum_(mask_scale, 1e-3)
torch._foreach_div_(masks, mask_scale)
device_grads = torch._foreach_mul(bufs, masks)
else:
if nesterov:
torch._foreach_add_(device_grads, bufs, alpha=momentum)
else:
device_grads = bufs
if not device_has_sparse_grad:
torch._foreach_add_(device_params, device_grads, alpha=-lr)
else:
# foreach APIs don't support sparse
for i in range(len(device_params)):
device_params[i].add_(device_grads[i], alpha=-lr)
| pytorch-image-models/timm/optim/sgdw.py/0 | {
"file_path": "pytorch-image-models/timm/optim/sgdw.py",
"repo_id": "pytorch-image-models",
"token_count": 5288
} |
""" CUDA / AMP utils
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch
try:
from apex import amp
has_apex = True
except ImportError:
amp = None
has_apex = False
from .clip_grad import dispatch_clip_grad
class ApexScaler:
state_dict_key = "amp"
def __call__(
self,
loss,
optimizer,
clip_grad=None,
clip_mode='norm',
parameters=None,
create_graph=False,
need_update=True,
):
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward(create_graph=create_graph)
if need_update:
if clip_grad is not None:
dispatch_clip_grad(amp.master_params(optimizer), clip_grad, mode=clip_mode)
optimizer.step()
def state_dict(self):
if 'state_dict' in amp.__dict__:
return amp.state_dict()
def load_state_dict(self, state_dict):
if 'load_state_dict' in amp.__dict__:
amp.load_state_dict(state_dict)
class NativeScaler:
state_dict_key = "amp_scaler"
def __init__(self, device='cuda'):
try:
self._scaler = torch.amp.GradScaler(device=device)
except (AttributeError, TypeError) as e:
self._scaler = torch.cuda.amp.GradScaler()
def __call__(
self,
loss,
optimizer,
clip_grad=None,
clip_mode='norm',
parameters=None,
create_graph=False,
need_update=True,
):
self._scaler.scale(loss).backward(create_graph=create_graph)
if need_update:
if clip_grad is not None:
assert parameters is not None
self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place
dispatch_clip_grad(parameters, clip_grad, mode=clip_mode)
self._scaler.step(optimizer)
self._scaler.update()
def state_dict(self):
return self._scaler.state_dict()
def load_state_dict(self, state_dict):
self._scaler.load_state_dict(state_dict)
| pytorch-image-models/timm/utils/cuda.py/0 | {
"file_path": "pytorch-image-models/timm/utils/cuda.py",
"repo_id": "pytorch-image-models",
"token_count": 1048
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Orchestrate a multi-agent system 🤖🤝🤖
[[open-in-colab]]
In this notebook we will make a **multi-agent web browser: an agentic system with several agents collaborating to solve problems using the web!**
It will be a simple hierarchy:
```
+----------------+
| Manager agent |
+----------------+
|
_______________|______________
| |
Code Interpreter +------------------+
tool | Web Search agent |
+------------------+
| |
Web Search tool |
Visit webpage tool
```
Let's set up this system.
Run the line below to install the required dependencies:
```
!pip install markdownify duckduckgo-search smolagents --upgrade -q
```
Let's login in order to call the HF Inference API:
```
from huggingface_hub import login
login()
```
⚡️ Our agent will be powered by [Qwen/Qwen2.5-Coder-32B-Instruct](https://huggingface.co/Qwen/Qwen2.5-Coder-32B-Instruct) using `HfApiModel` class that uses HF's Inference API: the Inference API allows to quickly and easily run any OS model.
_Note:_ The Inference API hosts models based on various criteria, and deployed models may be updated or replaced without prior notice. Learn more about it [here](https://huggingface.co/docs/api-inference/supported-models).
```py
model_id = "Qwen/Qwen2.5-Coder-32B-Instruct"
```
## 🔍 Create a web search tool
For web browsing, we can already use our pre-existing [`DuckDuckGoSearchTool`](https://github.com/huggingface/smolagents/blob/main/src/smolagents/default_tools.py#L151-L176) tool to provide a Google search equivalent.
But then we will also need to be able to peak into the page found by the `DuckDuckGoSearchTool`.
To do so, we could import the library's built-in `VisitWebpageTool`, but we will build it again to see how it's done.
So let's create our `VisitWebpageTool` tool from scratch using `markdownify`.
```py
import re
import requests
from markdownify import markdownify
from requests.exceptions import RequestException
from smolagents import tool
@tool
def visit_webpage(url: str) -> str:
"""Visits a webpage at the given URL and returns its content as a markdown string.
Args:
url: The URL of the webpage to visit.
Returns:
The content of the webpage converted to Markdown, or an error message if the request fails.
"""
try:
# Send a GET request to the URL
response = requests.get(url)
response.raise_for_status() # Raise an exception for bad status codes
# Convert the HTML content to Markdown
markdown_content = markdownify(response.text).strip()
# Remove multiple line breaks
markdown_content = re.sub(r"\n{3,}", "\n\n", markdown_content)
return markdown_content
except RequestException as e:
return f"Error fetching the webpage: {str(e)}"
except Exception as e:
return f"An unexpected error occurred: {str(e)}"
```
Ok, now let's initialize and test our tool!
```py
print(visit_webpage("https://en.wikipedia.org/wiki/Hugging_Face")[:500])
```
## Build our multi-agent system 🤖🤝🤖
Now that we have all the tools `search` and `visit_webpage`, we can use them to create the web agent.
Which configuration to choose for this agent?
- Web browsing is a single-timeline task that does not require parallel tool calls, so JSON tool calling works well for that. We thus choose a `ToolCallingAgent`.
- Also, since sometimes web search requires exploring many pages before finding the correct answer, we prefer to increase the number of `max_steps` to 10.
```py
from smolagents import (
CodeAgent,
ToolCallingAgent,
HfApiModel,
DuckDuckGoSearchTool,
LiteLLMModel,
)
model = HfApiModel(model_id)
web_agent = ToolCallingAgent(
tools=[DuckDuckGoSearchTool(), visit_webpage],
model=model,
max_steps=10,
name="search",
description="Runs web searches for you. Give it your query as an argument.",
)
```
Note that we gave this agent attributes `name` and `description`, mandatory attributes to make this agent callable by its manager agent.
Then we create a manager agent, and upon initialization we pass our managed agent to it in its `managed_agents` argument.
Since this agent is the one tasked with the planning and thinking, advanced reasoning will be beneficial, so a `CodeAgent` will be the best choice.
Also, we want to ask a question that involves the current year and does additional data calculations: so let us add `additional_authorized_imports=["time", "numpy", "pandas"]`, just in case the agent needs these packages.
```py
manager_agent = CodeAgent(
tools=[],
model=model,
managed_agents=[web_agent],
additional_authorized_imports=["time", "numpy", "pandas"],
)
```
That's all! Now let's run our system! We select a question that requires both some calculation and research:
```py
answer = manager_agent.run("If LLM training continues to scale up at the current rhythm until 2030, what would be the electric power in GW required to power the biggest training runs by 2030? What would that correspond to, compared to some countries? Please provide a source for any numbers used.")
```
We get this report as the answer:
```
Based on current growth projections and energy consumption estimates, if LLM trainings continue to scale up at the
current rhythm until 2030:
1. The electric power required to power the biggest training runs by 2030 would be approximately 303.74 GW, which
translates to about 2,660,762 GWh/year.
2. Comparing this to countries' electricity consumption:
- It would be equivalent to about 34% of China's total electricity consumption.
- It would exceed the total electricity consumption of India (184%), Russia (267%), and Japan (291%).
- It would be nearly 9 times the electricity consumption of countries like Italy or Mexico.
3. Source of numbers:
- The initial estimate of 5 GW for future LLM training comes from AWS CEO Matt Garman.
- The growth projection used a CAGR of 79.80% from market research by Springs.
- Country electricity consumption data is from the U.S. Energy Information Administration, primarily for the year
2021.
```
Seems like we'll need some sizeable powerplants if the [scaling hypothesis](https://gwern.net/scaling-hypothesis) continues to hold true.
Our agents managed to efficiently collaborate towards solving the task! ✅
💡 You can easily extend this orchestration to more agents: one does the code execution, one the web search, one handles file loadings...
| smolagents/docs/source/en/examples/multiagents.md/0 | {
"file_path": "smolagents/docs/source/en/examples/multiagents.md",
"repo_id": "smolagents",
"token_count": 2350
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# मल्टी-स्टेप एजेंट्स कैसे काम करते हैं?
ReAct फ्रेमवर्क ([Yao et al., 2022](https://huggingface.co/papers/2210.03629)) वर्तमान में एजेंट्स बनाने का मुख्य दृष्टिकोण है।
नाम दो शब्दों, "Reason" (तर्क) और "Act" (क्रिया) के संयोजन पर आधारित है। वास्तव में, इस आर्किटेक्चर का पालन करने वाले एजेंट अपने कार्य को उतने चरणों में हल करेंगे जितने आवश्यक हों, प्रत्येक चरण में एक Reasoning कदम होगा, फिर एक Action कदम होगा, जहाँ यह टूल कॉल्स तैयार करेगा जो उसे कार्य को हल करने के करीब ले जाएंगे।
ReAct प्रक्रिया में पिछले चरणों की मेमोरी रखना शामिल है।
> [!TIP]
> मल्टी-स्टेप एजेंट्स के बारे में अधिक जानने के लिए [Open-source LLMs as LangChain Agents](https://huggingface.co/blog/open-source-llms-as-agents) ब्लॉग पोस्ट पढ़ें।
यहाँ एक वीडियो ओवरव्यू है कि यह कैसे काम करता है:
<div class="flex justify-center">
<img
class="block dark:hidden"
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/Agent_ManimCE.gif"
/>
<img
class="hidden dark:block"
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/Agent_ManimCE.gif"
/>
</div>

हम दो प्रकार के ToolCallingAgent को लागू करते हैं:
- [`ToolCallingAgent`] अपने आउटपुट में टूल कॉल को JSON के रूप में जनरेट करता है।
- [`CodeAgent`] ToolCallingAgent का एक नया प्रकार है जो अपने टूल कॉल को कोड के ब्लॉब्स के रूप में जनरेट करता है, जो उन LLM के लिए वास्तव में अच्छी तरह काम करता है जिनका कोडिंग प्रदर्शन मजबूत है।
> [!TIP]
> हम एजेंट्स को वन-शॉट में चलाने का विकल्प भी प्रदान करते हैं: बस एजेंट को लॉन्च करते समय `single_step=True` पास करें, जैसे `agent.run(your_task, single_step=True)` | smolagents/docs/source/hi/conceptual_guides/react.md/0 | {
"file_path": "smolagents/docs/source/hi/conceptual_guides/react.md",
"repo_id": "smolagents",
"token_count": 1968
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# 编排 multi-agent 系统 🤖🤝🤖
[[open-in-colab]]
此notebook将构建一个 **multi-agent 网络浏览器:一个有多个代理协作,使用网络进行搜索解决问题的代理系统**
`ManagedAgent` 对象将封装这些管理网络搜索的agent,形成一个简单的层次结构:
```
+----------------+
| Manager agent |
+----------------+
|
_______________|______________
| |
Code interpreter +--------------------------------+
tool | Managed agent |
| +------------------+ |
| | Web Search agent | |
| +------------------+ |
| | | |
| Web Search tool | |
| Visit webpage tool |
+--------------------------------+
```
我们来一起构建这个系统。运行下列代码以安装依赖包:
```
!pip install markdownify duckduckgo-search smolagents --upgrade -q
```
我们需要登录Hugging Face Hub以调用HF的Inference API:
```
from huggingface_hub import login
login()
```
⚡️ HF的Inference API 可以快速轻松地运行任何开源模型,因此我们的agent将使用HF的Inference API
中的`HfApiModel`类来调用
[Qwen/Qwen2.5-Coder-32B-Instruct](https://huggingface.co/Qwen/Qwen2.5-Coder-32B-Instruct)模型。
_Note:_ 基于多参数和部署模型的 Inference API 可能在没有预先通知的情况下更新或替换模型。了解更多信息,请参阅[这里](https://huggingface.co/docs/api-inference/supported-models)。
```py
model_id = "Qwen/Qwen2.5-Coder-32B-Instruct"
```
## 🔍 创建网络搜索工具
虽然我们可以使用已经存在的
[`DuckDuckGoSearchTool`](https://github.com/huggingface/smolagents/blob/main/src/smolagents/default_tools.py#L151-L176)
工具作为谷歌搜索的平替进行网页浏览,然后我们也需要能够查看`DuckDuckGoSearchTool`找到的页面。为此,我
们可以直接导入库的内置
`VisitWebpageTool`。但是我们将重新构建它以了解其工作原理。
我们将使用`markdownify` 来从头构建我们的`VisitWebpageTool`工具。
```py
import re
import requests
from markdownify import markdownify
from requests.exceptions import RequestException
from smolagents import tool
@tool
def visit_webpage(url: str) -> str:
"""Visits a webpage at the given URL and returns its content as a markdown string.
Args:
url: The URL of the webpage to visit.
Returns:
The content of the webpage converted to Markdown, or an error message if the request fails.
"""
try:
# Send a GET request to the URL
response = requests.get(url)
response.raise_for_status() # Raise an exception for bad status codes
# Convert the HTML content to Markdown
markdown_content = markdownify(response.text).strip()
# Remove multiple line breaks
markdown_content = re.sub(r"\n{3,}", "\n\n", markdown_content)
return markdown_content
except RequestException as e:
return f"Error fetching the webpage: {str(e)}"
except Exception as e:
return f"An unexpected error occurred: {str(e)}"
```
现在我们初始化这个工具并测试它!
```py
print(visit_webpage("https://en.wikipedia.org/wiki/Hugging_Face")[:500])
```
## 构建我们的 multi-agent 系统 🤖🤝🤖
现在我们有了所有工具`search`和`visit_webpage`,我们可以使用它们来创建web agent。
我们该选取什么样的配置来构建这个agent呢?
- 网页浏览是一个单线程任务,不需要并行工具调用,因此JSON工具调用对于这个任务非常有效。因此我们选择`ToolCallingAgent`。
- 有时候网页搜索需要探索许多页面才能找到正确答案,所以我们更喜欢将 `max_steps` 增加到10。
```py
from smolagents import (
CodeAgent,
ToolCallingAgent,
HfApiModel,
ManagedAgent,
DuckDuckGoSearchTool,
LiteLLMModel,
)
model = HfApiModel(model_id)
web_agent = ToolCallingAgent(
tools=[DuckDuckGoSearchTool(), visit_webpage],
model=model,
max_steps=10,
)
```
然后我们将这个agent封装到一个`ManagedAgent`中,使其可以被其管理的agent调用。
```py
managed_web_agent = ManagedAgent(
agent=web_agent,
name="search",
description="Runs web searches for you. Give it your query as an argument.",
)
```
最后,我们创建一个manager agent,在初始化时将我们的managed agent传递给它的`managed_agents`参数。因为这个agent负责计划和思考,所以高级推理将是有益的,因此`CodeAgent`将是最佳选择。此外,我们想要问一个涉及当前年份的问题,并进行额外的数据计算:因此让我们添加`additional_authorized_imports=["time", "numpy", "pandas"]`,以防agent需要这些包。
```py
manager_agent = CodeAgent(
tools=[],
model=model,
managed_agents=[managed_web_agent],
additional_authorized_imports=["time", "numpy", "pandas"],
)
```
可以了!现在让我们运行我们的系统!我们选择一个需要一些计算和研究的问题:
```py
answer = manager_agent.run("If LLM training continues to scale up at the current rhythm until 2030, what would be the electric power in GW required to power the biggest training runs by 2030? What would that correspond to, compared to some countries? Please provide a source for any numbers used.")
```
我们用这个report 来回答这个问题:
```
Based on current growth projections and energy consumption estimates, if LLM trainings continue to scale up at the
current rhythm until 2030:
1. The electric power required to power the biggest training runs by 2030 would be approximately 303.74 GW, which
translates to about 2,660,762 GWh/year.
1. Comparing this to countries' electricity consumption:
- It would be equivalent to about 34% of China's total electricity consumption.
- It would exceed the total electricity consumption of India (184%), Russia (267%), and Japan (291%).
- It would be nearly 9 times the electricity consumption of countries like Italy or Mexico.
2. Source of numbers:
- The initial estimate of 5 GW for future LLM training comes from AWS CEO Matt Garman.
- The growth projection used a CAGR of 79.80% from market research by Springs.
- Country electricity consumption data is from the U.S. Energy Information Administration, primarily for the year
2021.
```
如果[scaling hypothesis](https://gwern.net/scaling-hypothesis)持续成立的话,我们需要一些庞大的动力配置。我们的agent成功地协作解决了这个任务!✅
💡 你可以轻松地将这个编排扩展到更多的agent:一个执行代码,一个进行网页搜索,一个处理文件加载⋯⋯
| smolagents/docs/source/zh/examples/multiagents.md/0 | {
"file_path": "smolagents/docs/source/zh/examples/multiagents.md",
"repo_id": "smolagents",
"token_count": 3596
} |
from openinference.instrumentation.smolagents import SmolagentsInstrumentor
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import SimpleSpanProcessor
from smolagents import (
CodeAgent,
DuckDuckGoSearchTool,
HfApiModel,
ToolCallingAgent,
VisitWebpageTool,
)
# Let's setup the instrumentation first
trace_provider = TracerProvider()
trace_provider.add_span_processor(SimpleSpanProcessor(OTLPSpanExporter("http://0.0.0.0:6006/v1/traces")))
SmolagentsInstrumentor().instrument(tracer_provider=trace_provider, skip_dep_check=True)
# Then we run the agentic part!
model = HfApiModel()
search_agent = ToolCallingAgent(
tools=[DuckDuckGoSearchTool(), VisitWebpageTool()],
model=model,
name="search_agent",
description="This is an agent that can do web search.",
)
manager_agent = CodeAgent(
tools=[],
model=model,
managed_agents=[search_agent],
)
manager_agent.run("If the US keeps it 2024 growth rate, how many years would it take for the GDP to double?")
| smolagents/examples/inspect_multiagent_run.py/0 | {
"file_path": "smolagents/examples/inspect_multiagent_run.py",
"repo_id": "smolagents",
"token_count": 388
} |
import os
import datasets
from langchain.docstore.document import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_chroma import Chroma
# from langchain_community.document_loaders import PyPDFLoader
from langchain_huggingface import HuggingFaceEmbeddings
from tqdm import tqdm
from transformers import AutoTokenizer
# from langchain_openai import OpenAIEmbeddings
from smolagents import LiteLLMModel, Tool
from smolagents.agents import CodeAgent
# from smolagents.agents import ToolCallingAgent
knowledge_base = datasets.load_dataset("m-ric/huggingface_doc", split="train")
source_docs = [
Document(page_content=doc["text"], metadata={"source": doc["source"].split("/")[1]}) for doc in knowledge_base
]
## For your own PDFs, you can use the following code to load them into source_docs
# pdf_directory = "pdfs"
# pdf_files = [
# os.path.join(pdf_directory, f)
# for f in os.listdir(pdf_directory)
# if f.endswith(".pdf")
# ]
# source_docs = []
# for file_path in pdf_files:
# loader = PyPDFLoader(file_path)
# docs.extend(loader.load())
text_splitter = RecursiveCharacterTextSplitter.from_huggingface_tokenizer(
AutoTokenizer.from_pretrained("thenlper/gte-small"),
chunk_size=200,
chunk_overlap=20,
add_start_index=True,
strip_whitespace=True,
separators=["\n\n", "\n", ".", " ", ""],
)
# Split docs and keep only unique ones
print("Splitting documents...")
docs_processed = []
unique_texts = {}
for doc in tqdm(source_docs):
new_docs = text_splitter.split_documents([doc])
for new_doc in new_docs:
if new_doc.page_content not in unique_texts:
unique_texts[new_doc.page_content] = True
docs_processed.append(new_doc)
print("Embedding documents... This should take a few minutes (5 minutes on MacBook with M1 Pro)")
# Initialize embeddings and ChromaDB vector store
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
# embeddings = OpenAIEmbeddings(model="text-embedding-3-small")
vector_store = Chroma.from_documents(docs_processed, embeddings, persist_directory="./chroma_db")
class RetrieverTool(Tool):
name = "retriever"
description = (
"Uses semantic search to retrieve the parts of documentation that could be most relevant to answer your query."
)
inputs = {
"query": {
"type": "string",
"description": "The query to perform. This should be semantically close to your target documents. Use the affirmative form rather than a question.",
}
}
output_type = "string"
def __init__(self, vector_store, **kwargs):
super().__init__(**kwargs)
self.vector_store = vector_store
def forward(self, query: str) -> str:
assert isinstance(query, str), "Your search query must be a string"
docs = self.vector_store.similarity_search(query, k=3)
return "\nRetrieved documents:\n" + "".join(
[f"\n\n===== Document {str(i)} =====\n" + doc.page_content for i, doc in enumerate(docs)]
)
retriever_tool = RetrieverTool(vector_store)
# Choose which LLM engine to use!
# from smolagents import HfApiModel
# model = HfApiModel(model_id="meta-llama/Llama-3.3-70B-Instruct")
# from smolagents import TransformersModel
# model = TransformersModel(model_id="meta-llama/Llama-3.2-2B-Instruct")
# For anthropic: change model_id below to 'anthropic/claude-3-5-sonnet-20240620' and also change 'os.environ.get("ANTHROPIC_API_KEY")'
model = LiteLLMModel(
model_id="groq/llama-3.3-70b-versatile",
api_key=os.environ.get("GROQ_API_KEY"),
)
# # You can also use the ToolCallingAgent class
# agent = ToolCallingAgent(
# tools=[retriever_tool],
# model=model,
# verbose=True,
# )
agent = CodeAgent(
tools=[retriever_tool],
model=model,
max_steps=4,
verbosity_level=2,
)
agent_output = agent.run("How can I push a model to the Hub?")
print("Final output:")
print(agent_output)
| smolagents/examples/rag_using_chromadb.py/0 | {
"file_path": "smolagents/examples/rag_using_chromadb.py",
"repo_id": "smolagents",
"token_count": 1499
} |
system_prompt: |-
You are an expert assistant who can solve any task using tool calls. You will be given a task to solve as best you can.
To do so, you have been given access to some tools.
The tool call you write is an action: after the tool is executed, you will get the result of the tool call as an "observation".
This Action/Observation can repeat N times, you should take several steps when needed.
You can use the result of the previous action as input for the next action.
The observation will always be a string: it can represent a file, like "image_1.jpg".
Then you can use it as input for the next action. You can do it for instance as follows:
Observation: "image_1.jpg"
Action:
{
"name": "image_transformer",
"arguments": {"image": "image_1.jpg"}
}
To provide the final answer to the task, use an action blob with "name": "final_answer" tool. It is the only way to complete the task, else you will be stuck on a loop. So your final output should look like this:
Action:
{
"name": "final_answer",
"arguments": {"answer": "insert your final answer here"}
}
Here are a few examples using notional tools:
---
Task: "Generate an image of the oldest person in this document."
Action:
{
"name": "document_qa",
"arguments": {"document": "document.pdf", "question": "Who is the oldest person mentioned?"}
}
Observation: "The oldest person in the document is John Doe, a 55 year old lumberjack living in Newfoundland."
Action:
{
"name": "image_generator",
"arguments": {"prompt": "A portrait of John Doe, a 55-year-old man living in Canada."}
}
Observation: "image.png"
Action:
{
"name": "final_answer",
"arguments": "image.png"
}
---
Task: "What is the result of the following operation: 5 + 3 + 1294.678?"
Action:
{
"name": "python_interpreter",
"arguments": {"code": "5 + 3 + 1294.678"}
}
Observation: 1302.678
Action:
{
"name": "final_answer",
"arguments": "1302.678"
}
---
Task: "Which city has the highest population , Guangzhou or Shanghai?"
Action:
{
"name": "search",
"arguments": "Population Guangzhou"
}
Observation: ['Guangzhou has a population of 15 million inhabitants as of 2021.']
Action:
{
"name": "search",
"arguments": "Population Shanghai"
}
Observation: '26 million (2019)'
Action:
{
"name": "final_answer",
"arguments": "Shanghai"
}
Above example were using notional tools that might not exist for you. You only have access to these tools:
{%- for tool in tools.values() %}
- {{ tool.name }}: {{ tool.description }}
Takes inputs: {{tool.inputs}}
Returns an output of type: {{tool.output_type}}
{%- endfor %}
{%- if managed_agents and managed_agents.values() | list %}
You can also give requests to team members.
Calling a team member works the same as for calling a tool: simply, the only argument you can give in the call is 'request', a long string explaining your request.
Given that this team member is a real human, you should be very verbose in your request.
Here is a list of the team members that you can call:
{%- for agent in managed_agents.values() %}
- {{ agent.name }}: {{ agent.description }}
{%- endfor %}
{%- else %}
{%- endif %}
Here are the rules you should always follow to solve your task:
1. ALWAYS provide a tool call, else you will fail.
2. Always use the right arguments for the tools. Never use variable names as the action arguments, use the value instead.
3. Call a tool only when needed: do not call the search agent if you do not need information, try to solve the task yourself.
If no tool call is needed, use final_answer tool to return your answer.
4. Never re-do a tool call that you previously did with the exact same parameters.
Now Begin! If you solve the task correctly, you will receive a reward of $1,000,000.
planning:
initial_facts: |-
Below I will present you a task.
You will now build a comprehensive preparatory survey of which facts we have at our disposal and which ones we still need.
To do so, you will have to read the task and identify things that must be discovered in order to successfully complete it.
Don't make any assumptions. For each item, provide a thorough reasoning. Here is how you will structure this survey:
---
### 1. Facts given in the task
List here the specific facts given in the task that could help you (there might be nothing here).
### 2. Facts to look up
List here any facts that we may need to look up.
Also list where to find each of these, for instance a website, a file... - maybe the task contains some sources that you should re-use here.
### 3. Facts to derive
List here anything that we want to derive from the above by logical reasoning, for instance computation or simulation.
Keep in mind that "facts" will typically be specific names, dates, values, etc. Your answer should use the below headings:
### 1. Facts given in the task
### 2. Facts to look up
### 3. Facts to derive
Do not add anything else.
initial_plan : |-
You are a world expert at making efficient plans to solve any task using a set of carefully crafted tools.
Now for the given task, develop a step-by-step high-level plan taking into account the above inputs and list of facts.
This plan should involve individual tasks based on the available tools, that if executed correctly will yield the correct answer.
Do not skip steps, do not add any superfluous steps. Only write the high-level plan, DO NOT DETAIL INDIVIDUAL TOOL CALLS.
After writing the final step of the plan, write the '\n<end_plan>' tag and stop there.
Here is your task:
Task:
```
{{task}}
```
You can leverage these tools:
{%- for tool in tools.values() %}
- {{ tool.name }}: {{ tool.description }}
Takes inputs: {{tool.inputs}}
Returns an output of type: {{tool.output_type}}
{%- endfor %}
{%- if managed_agents and managed_agents.values() | list %}
You can also give requests to team members.
Calling a team member works the same as for calling a tool: simply, the only argument you can give in the call is 'request', a long string explaining your request.
Given that this team member is a real human, you should be very verbose in your request.
Here is a list of the team members that you can call:
{%- for agent in managed_agents.values() %}
- {{ agent.name }}: {{ agent.description }}
{%- endfor %}
{%- else %}
{%- endif %}
List of facts that you know:
```
{{answer_facts}}
```
Now begin! Write your plan below.
update_facts_pre_messages: |-
You are a world expert at gathering known and unknown facts based on a conversation.
Below you will find a task, and a history of attempts made to solve the task. You will have to produce a list of these:
### 1. Facts given in the task
### 2. Facts that we have learned
### 3. Facts still to look up
### 4. Facts still to derive
Find the task and history below:
update_facts_post_messages: |-
Earlier we've built a list of facts.
But since in your previous steps you may have learned useful new facts or invalidated some false ones.
Please update your list of facts based on the previous history, and provide these headings:
### 1. Facts given in the task
### 2. Facts that we have learned
### 3. Facts still to look up
### 4. Facts still to derive
Now write your new list of facts below.
update_plan_pre_messages: |-
You are a world expert at making efficient plans to solve any task using a set of carefully crafted tools.
You have been given a task:
```
{{task}}
```
Find below the record of what has been tried so far to solve it. Then you will be asked to make an updated plan to solve the task.
If the previous tries so far have met some success, you can make an updated plan based on these actions.
If you are stalled, you can make a completely new plan starting from scratch.
update_plan_post_messages: |-
You're still working towards solving this task:
```
{{task}}
```
You can leverage these tools:
{%- for tool in tools.values() %}
- {{ tool.name }}: {{ tool.description }}
Takes inputs: {{tool.inputs}}
Returns an output of type: {{tool.output_type}}
{%- endfor %}
{%- if managed_agents and managed_agents.values() | list %}
You can also give requests to team members.
Calling a team member works the same as for calling a tool: simply, the only argument you can give in the call is 'task'.
Given that this team member is a real human, you should be very verbose in your task, it should be a long string providing informations as detailed as necessary.
Here is a list of the team members that you can call:
{%- for agent in managed_agents.values() %}
- {{ agent.name }}: {{ agent.description }}
{%- endfor %}
{%- else %}
{%- endif %}
Here is the up to date list of facts that you know:
```
{{facts_update}}
```
Now for the given task, develop a step-by-step high-level plan taking into account the above inputs and list of facts.
This plan should involve individual tasks based on the available tools, that if executed correctly will yield the correct answer.
Beware that you have {remaining_steps} steps remaining.
Do not skip steps, do not add any superfluous steps. Only write the high-level plan, DO NOT DETAIL INDIVIDUAL TOOL CALLS.
After writing the final step of the plan, write the '\n<end_plan>' tag and stop there.
Now write your new plan below.
managed_agent:
task: |-
You're a helpful agent named '{{name}}'.
You have been submitted this task by your manager.
---
Task:
{{task}}
---
You're helping your manager solve a wider task: so make sure to not provide a one-line answer, but give as much information as possible to give them a clear understanding of the answer.
Your final_answer WILL HAVE to contain these parts:
### 1. Task outcome (short version):
### 2. Task outcome (extremely detailed version):
### 3. Additional context (if relevant):
Put all these in your final_answer tool, everything that you do not pass as an argument to final_answer will be lost.
And even if your task resolution is not successful, please return as much context as possible, so that your manager can act upon this feedback.
report: |-
Here is the final answer from your managed agent '{{name}}':
{{final_answer}} | smolagents/src/smolagents/prompts/toolcalling_agent.yaml/0 | {
"file_path": "smolagents/src/smolagents/prompts/toolcalling_agent.yaml",
"repo_id": "smolagents",
"token_count": 3229
} |
import pytest
from smolagents.agents import ToolCall
from smolagents.memory import (
ActionStep,
AgentMemory,
ChatMessage,
MemoryStep,
Message,
MessageRole,
PlanningStep,
SystemPromptStep,
TaskStep,
)
class TestAgentMemory:
def test_initialization(self):
system_prompt = "This is a system prompt."
memory = AgentMemory(system_prompt=system_prompt)
assert memory.system_prompt.system_prompt == system_prompt
assert memory.steps == []
class TestMemoryStep:
def test_initialization(self):
step = MemoryStep()
assert isinstance(step, MemoryStep)
def test_dict(self):
step = MemoryStep()
assert step.dict() == {}
def test_to_messages(self):
step = MemoryStep()
with pytest.raises(NotImplementedError):
step.to_messages()
def test_action_step_to_messages():
action_step = ActionStep(
model_input_messages=[Message(role=MessageRole.USER, content="Hello")],
tool_calls=[
ToolCall(id="id", name="get_weather", arguments={"location": "Paris"}),
],
start_time=0.0,
end_time=1.0,
step_number=1,
error=None,
duration=1.0,
model_output_message=ChatMessage(role=MessageRole.ASSISTANT, content="Hi"),
model_output="Hi",
observations="This is a nice observation",
observations_images=["image1.png"],
action_output="Output",
)
messages = action_step.to_messages()
assert len(messages) == 4
for message in messages:
assert isinstance(message, dict)
assert "role" in message
assert "content" in message
assert isinstance(message["role"], MessageRole)
assert isinstance(message["content"], list)
assistant_message = messages[0]
assert assistant_message["role"] == MessageRole.ASSISTANT
assert len(assistant_message["content"]) == 1
for content in assistant_message["content"]:
assert isinstance(content, dict)
assert "type" in content
assert "text" in content
message = messages[1]
assert message["role"] == MessageRole.ASSISTANT
assert len(message["content"]) == 1
text_content = message["content"][0]
assert isinstance(text_content, dict)
assert "type" in text_content
assert "text" in text_content
observation_message = messages[2]
assert observation_message["role"] == MessageRole.TOOL_RESPONSE
assert "Observation:\nThis is a nice observation" in observation_message["content"][0]["text"]
image_message = messages[3]
image_content = image_message["content"][1]
assert isinstance(image_content, dict)
assert "type" in image_content
assert "image" in image_content
def test_planning_step_to_messages():
planning_step = PlanningStep(
model_input_messages=[Message(role=MessageRole.USER, content="Hello")],
model_output_message_facts=ChatMessage(role=MessageRole.ASSISTANT, content="Facts"),
facts="These are facts.",
model_output_message_plan=ChatMessage(role=MessageRole.ASSISTANT, content="Plan"),
plan="This is a plan.",
)
messages = planning_step.to_messages(summary_mode=False)
assert len(messages) == 2
for message in messages:
assert isinstance(message, dict)
assert "role" in message
assert "content" in message
assert isinstance(message["role"], MessageRole)
assert message["role"] == MessageRole.ASSISTANT
assert isinstance(message["content"], list)
assert len(message["content"]) == 1
for content in message["content"]:
assert isinstance(content, dict)
assert "type" in content
assert "text" in content
def test_task_step_to_messages():
task_step = TaskStep(task="This is a task.", task_images=["task_image1.png"])
messages = task_step.to_messages(summary_mode=False)
assert len(messages) == 1
for message in messages:
assert isinstance(message, dict)
assert "role" in message
assert "content" in message
assert isinstance(message["role"], MessageRole)
assert message["role"] == MessageRole.USER
assert isinstance(message["content"], list)
assert len(message["content"]) == 2
text_content = message["content"][0]
assert isinstance(text_content, dict)
assert "type" in text_content
assert "text" in text_content
for image_content in message["content"][1:]:
assert isinstance(image_content, dict)
assert "type" in image_content
assert "image" in image_content
def test_system_prompt_step_to_messages():
system_prompt_step = SystemPromptStep(system_prompt="This is a system prompt.")
messages = system_prompt_step.to_messages(summary_mode=False)
assert len(messages) == 1
for message in messages:
assert isinstance(message, dict)
assert "role" in message
assert "content" in message
assert isinstance(message["role"], MessageRole)
assert message["role"] == MessageRole.SYSTEM
assert isinstance(message["content"], list)
assert len(message["content"]) == 1
for content in message["content"]:
assert isinstance(content, dict)
assert "type" in content
assert "text" in content
| smolagents/tests/test_memory.py/0 | {
"file_path": "smolagents/tests/test_memory.py",
"repo_id": "smolagents",
"token_count": 2092
} |
# Rust builder
FROM lukemathwalker/cargo-chef:latest-rust-1.84.0 AS chef
WORKDIR /usr/src
ARG CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse
FROM chef AS planner
COPY Cargo.lock Cargo.lock
COPY Cargo.toml Cargo.toml
COPY rust-toolchain.toml rust-toolchain.toml
COPY proto proto
COPY benchmark benchmark
COPY router router
COPY backends backends
COPY launcher launcher
RUN cargo chef prepare --recipe-path recipe.json
FROM chef AS builder
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
python3.11-dev
RUN PROTOC_ZIP=protoc-21.12-linux-x86_64.zip && \
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v21.12/$PROTOC_ZIP && \
unzip -o $PROTOC_ZIP -d /usr/local bin/protoc && \
unzip -o $PROTOC_ZIP -d /usr/local 'include/*' && \
rm -f $PROTOC_ZIP
COPY --from=planner /usr/src/recipe.json recipe.json
RUN cargo chef cook --profile release-opt --recipe-path recipe.json
ARG GIT_SHA
ARG DOCKER_LABEL
COPY Cargo.lock Cargo.lock
COPY Cargo.toml Cargo.toml
COPY rust-toolchain.toml rust-toolchain.toml
COPY proto proto
COPY benchmark benchmark
COPY router router
COPY backends backends
COPY launcher launcher
RUN cargo build --profile release-opt --frozen
# Text Generation Inference base image for RoCm
FROM rocm/dev-ubuntu-22.04:6.2 AS base
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
build-essential \
ca-certificates \
ccache \
curl \
git \
make \
libmsgpack-dev \
libssl-dev \
llvm-dev \
g++ \
# Needed to build VLLM & flash.
rocthrust-dev \
hipsparse-dev \
hipblas-dev \
hipcub-dev \
rocblas-dev \
hiprand-dev \
hipfft-dev \
rocrand-dev \
miopen-hip-dev \
hipsolver-dev \
rccl-dev \
cmake \
python3.11-venv && \
rm -rf /var/lib/apt/lists/*
# Keep in sync with `server/pyproject.toml
ARG MAMBA_VERSION=23.1.0-1
ARG PYTHON_VERSION='3.11.10'
# Automatically set by buildx
ARG TARGETPLATFORM
ENV PATH=/opt/conda/bin:$PATH
ARG PYTORCH_ROCM_ARCH="gfx90a;gfx942"
# TGI seem to require libssl.so.1.1 instead of libssl.so.3 so we can't use ubuntu 22.04. Ubuntu 20.04 has python==3.8, and TGI requires python>=3.9, hence the need for miniconda.
# Install mamba
# translating Docker's TARGETPLATFORM into mamba arches
RUN case ${TARGETPLATFORM} in \
"linux/arm64") MAMBA_ARCH=aarch64 ;; \
*) MAMBA_ARCH=x86_64 ;; \
esac && \
curl -fsSL -v -o ~/mambaforge.sh -O "https://github.com/conda-forge/miniforge/releases/download/${MAMBA_VERSION}/Mambaforge-${MAMBA_VERSION}-Linux-${MAMBA_ARCH}.sh"
RUN chmod +x ~/mambaforge.sh && \
bash ~/mambaforge.sh -b -p /opt/conda && \
mamba init && \
rm ~/mambaforge.sh
# RUN conda install intel::mkl-static intel::mkl-include
# Install pytorch
# On arm64 we exit with an error code
RUN case ${TARGETPLATFORM} in \
"linux/arm64") exit 1 ;; \
*) /opt/conda/bin/conda update -y conda && \
/opt/conda/bin/conda install -y "python=${PYTHON_VERSION}" ;; \
esac && \
/opt/conda/bin/conda clean -ya
# Install flash-attention, torch dependencies
RUN python3 -m pip install --upgrade pip uv && pip install numpy einops ninja joblib msgpack cmake --no-cache-dir && rm -rf /var/lib/apt/lists/*
RUN conda install mkl=2021
ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/opt/rocm/lib/:/opt/conda/lib/python3.11/site-packages/torch/lib:/opt/conda/lib/
ARG COMMON_WORKDIR=/
WORKDIR ${COMMON_WORKDIR}
# Install HIPBLASLt
FROM base AS build_hipblaslt
ARG HIPBLASLT_BRANCH="e6da924"
RUN git clone https://github.com/ROCm/hipBLASLt.git \
&& cd hipBLASLt \
&& git checkout ${HIPBLASLT_BRANCH} \
&& SCCACHE_IDLE_TIMEOUT=1800 ./install.sh --architecture ${PYTORCH_ROCM_ARCH} --legacy_hipblas_direct \
&& cd build/release \
&& make package
FROM scratch AS export_hipblaslt
ARG COMMON_WORKDIR
COPY --from=build_hipblaslt ${COMMON_WORKDIR}/hipBLASLt/build/release/*.deb /
# RCCL build stages
FROM base AS build_rccl
ARG RCCL_BRANCH="rocm-6.2.0"
RUN git clone https://github.com/ROCm/rccl \
&& cd rccl \
&& git checkout ${RCCL_BRANCH} \
&& ./install.sh -p --amdgpu_targets ${PYTORCH_ROCM_ARCH}
FROM scratch AS export_rccl
ARG COMMON_WORKDIR
COPY --from=build_rccl ${COMMON_WORKDIR}/rccl/build/release/*.deb /
# Triton build stages
FROM base AS build_triton
ARG TRITON_BRANCH="e192dba"
ARG TRITON_REPO="https://github.com/triton-lang/triton.git"
RUN python3 -m pip install ninja cmake wheel pybind11 && git clone ${TRITON_REPO} \
&& cd triton \
&& git checkout ${TRITON_BRANCH} \
&& cd python \
&& python3 setup.py bdist_wheel --dist-dir=dist
FROM scratch AS export_triton
ARG COMMON_WORKDIR
COPY --from=build_triton ${COMMON_WORKDIR}/triton/python/dist/*.whl /
# # AMD-SMI build stages
FROM base AS build_amdsmi
RUN cd /opt/rocm/share/amd_smi \
&& pip wheel . --wheel-dir=dist
FROM scratch AS export_amdsmi
COPY --from=build_amdsmi /opt/rocm/share/amd_smi/dist/*.whl /
FROM base as build_pytorch
RUN --mount=type=bind,from=export_hipblaslt,src=/,target=/install \
if ls /install/*.deb; then \
dpkg -i /install/*.deb \
&& sed -i 's/, hipblaslt-dev \(.*\), hipcub-dev/, hipcub-dev/g' /var/lib/dpkg/status \
&& sed -i 's/, hipblaslt \(.*\), hipfft/, hipfft/g' /var/lib/dpkg/status; \
fi
ARG BUILD_ENVIRONMENT=pytorch-linux-jammy-rocm6.2-py3.11
ARG PYTORCH_ROCM_ARCH="gfx90a;gfx942"
# A commit to fix the output scaling factor issue in _scaled_mm
# Not yet in 2.5.0-rc1
ARG PYTORCH_BRANCH="cedc116"
ARG PYTORCH_VISION_BRANCH="v0.19.1"
ARG PYTORCH_REPO="https://github.com/ROCm/pytorch.git"
RUN git clone ${PYTORCH_REPO} pytorch \
&& cd pytorch && git checkout ${PYTORCH_BRANCH} && git submodule update --init --recursive \
&& pip install -r requirements.txt --no-cache-dir \
&& python tools/amd_build/build_amd.py \
&& CMAKE_PREFIX_PATH=$(python3 -c 'import sys; print(sys.prefix)') python3 setup.py bdist_wheel --dist-dir=dist
FROM scratch as export_pytorch
ARG COMMON_WORKDIR
COPY --from=build_pytorch ${COMMON_WORKDIR}/pytorch/dist/*.whl /
FROM base AS install_deps
ARG COMMON_WORKDIR
# Install hipblaslt
RUN --mount=type=bind,from=export_hipblaslt,src=/,target=/install \
if ls /install/*.deb; then \
dpkg -i /install/*.deb \
&& sed -i 's/, hipblaslt-dev \(.*\), hipcub-dev/, hipcub-dev/g' /var/lib/dpkg/status \
&& sed -i 's/, hipblaslt \(.*\), hipfft/, hipfft/g' /var/lib/dpkg/status; \
fi
RUN --mount=type=bind,from=export_rccl,src=/,target=/install \
if ls /install/*.deb; then \
dpkg -i /install/*.deb \
# RCCL needs to be installed twice
&& dpkg -i /install/*.deb \
&& sed -i 's/, rccl-dev \(.*\), rocalution/, rocalution/g' /var/lib/dpkg/status \
&& sed -i 's/, rccl \(.*\), rocalution/, rocalution/g' /var/lib/dpkg/status; \
fi
RUN --mount=type=bind,from=export_triton,src=/,target=/install \
if ls /install/*.whl; then \
# Preemptively uninstall to prevent pip same-version no-installs
pip uninstall -y triton \
&& pip install /install/*.whl; \
fi
RUN --mount=type=bind,from=export_amdsmi,src=/,target=/install \
# Preemptively uninstall to prevent pip same-version no-installs
pip uninstall -y amdsmi \
&& pip install /install/*.whl;
RUN --mount=type=bind,from=export_pytorch,src=/,target=/install \
if ls /install/*.whl; then \
# Preemptively uninstall to prevent pip same-version no-installs
pip uninstall -y torch torchvision \
&& pip install /install/*.whl; \
fi
FROM install_deps AS kernel-builder
# # Build vllm kernels
FROM kernel-builder AS vllm-builder
WORKDIR /usr/src
COPY server/Makefile-vllm Makefile
RUN pip install setuptools_scm
# Build specific version of vllm
RUN make build-vllm-rocm
# Build Flash Attention v2 kernels
FROM kernel-builder AS flash-att-v2-builder
WORKDIR /usr/src
COPY server/Makefile-flash-att-v2 Makefile
# Build specific version of flash attention v2
RUN make build-flash-attention-v2-rocm
# Build Transformers CUDA kernels (gpt-neox and bloom)
FROM kernel-builder AS custom-kernels-builder
WORKDIR /usr/src
COPY server/custom_kernels/ .
RUN python setup.py build
# Build exllama kernels
FROM kernel-builder AS exllama-kernels-builder
WORKDIR /usr/src
COPY server/exllama_kernels/ .
RUN python setup.py build
# Build exllama v2 kernels
FROM kernel-builder AS exllamav2-kernels-builder
WORKDIR /usr/src
COPY server/exllamav2_kernels/ .
RUN python setup.py build
FROM kernel-builder AS marlin-kernels
WORKDIR /usr/src
ENV MARLIN_KERNELS_BRANCH=v0.3.6
ENV VLLM_TARGET_DEVICE=rocm
RUN git clone https://github.com/danieldk/marlin-kernels.git && \
cd marlin-kernels && \
git checkout ${MARLIN_KERNELS_BRANCH} && \
python setup.py install
FROM kernel-builder AS moe-kernels
WORKDIR /usr/src
ENV MOE_KERNELS_BRANCH=v0.8.2
ENV VLLM_TARGET_DEVICE=rocm
RUN git clone https://github.com/danieldk/moe-kernels.git && \
cd moe-kernels && \
git checkout ${MOE_KERNELS_BRANCH} && \
python setup.py install
FROM install_deps AS base-copy
# Text Generation Inference base env
ENV HF_HOME=/data \
HF_HUB_ENABLE_HF_TRANSFER=1 \
PORT=80
# Copy builds artifacts from vllm builder
COPY --from=vllm-builder /usr/src/vllm/build/lib.linux-x86_64-cpython-311 /opt/conda/lib/python3.11/site-packages
# Copy build artifacts from flash attention v2 builder
COPY --from=flash-att-v2-builder /usr/src/flash-attention-v2/build/lib.linux-x86_64-cpython-311 /opt/conda/lib/python3.11/site-packages
# Copy build artifacts from custom kernels builder
COPY --from=custom-kernels-builder /usr/src/build/lib.linux-x86_64-cpython-311 /opt/conda/lib/python3.11/site-packages
# Copy build artifacts from exllama kernels builder
COPY --from=exllama-kernels-builder /usr/src/build/lib.linux-x86_64-cpython-311 /opt/conda/lib/python3.11/site-packages
# Copy build artifacts from exllamav2 kernels builder
COPY --from=exllamav2-kernels-builder /usr/src/build/lib.linux-x86_64-cpython-311 /opt/conda/lib/python3.11/site-packages
# Copy build artifacts from marlin kernels
COPY --from=marlin-kernels /usr/src/marlin-kernels/build/lib.linux-x86_64-cpython-311 /opt/conda/lib/python3.11/site-packages
# Copy build artifacts from moe kernels
COPY --from=moe-kernels /usr/src/moe-kernels/build/lib.linux-x86_64-cpython-311 /opt/conda/lib/python3.11/site-packages
# Install server
COPY proto proto
COPY server server
COPY server/Makefile server/Makefile
ENV UV_SYSTEM_PYTHON=1
RUN cd server && \
pip install -U pip uv && \
uv sync --frozen --extra gen --extra accelerate --extra compressed-tensors --extra quantize --extra peft --extra outlines --no-install-project && \
. ./.venv/bin/activate && \
make gen-server-raw
RUN cd server && \
uv sync --frozen --extra gen --extra accelerate --extra compressed-tensors --extra quantize --extra peft --extra outlines && \
. ./.venv/bin/activate && \
pwd && \
text-generation-server --help
# Install benchmarker
COPY --from=builder /usr/src/target/release-opt/text-generation-benchmark /usr/local/bin/text-generation-benchmark
# Install router
COPY --from=builder /usr/src/target/release-opt/text-generation-router /usr/local/bin/text-generation-router
# Install launcher
COPY --from=builder /usr/src/target/release-opt/text-generation-launcher /usr/local/bin/text-generation-launcher
ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/opt/conda/lib/"
# AWS Sagemaker compatible image
FROM base AS sagemaker
COPY sagemaker-entrypoint.sh entrypoint.sh
RUN chmod +x entrypoint.sh
ENTRYPOINT ["./entrypoint.sh"]
# Final image
FROM base-copy
# Set AS recommended: https://github.com/ROCm/triton/wiki/A-script-to-set-program-execution-environment-in-ROCm
ENV HIP_FORCE_DEV_KERNARG=1
# On MI250 and MI300, performances for flash with Triton FA are slightly better than CK.
# However, Triton requires a tunning for each prompt length, which is prohibitive.
ENV ROCM_USE_FLASH_ATTN_V2_TRITON=0
ENV ROCM_USE_CUSTOM_PAGED_ATTN=1
ENV PYTORCH_TUNABLEOP_TUNING_AFTER_WARMUP=0
ENV VLLM_MOE_PADDING=0
ENV ATTENTION=paged
ENV PREFIX_CACHING=0
ENV PREFILL_CHUNKING=0
ENV ROCM_USE_SKINNY_GEMM=1
COPY ./tgi-entrypoint.sh /tgi-entrypoint.sh
RUN chmod +x /tgi-entrypoint.sh
ENTRYPOINT ["/tgi-entrypoint.sh"]
CMD ["--json-output"]
| text-generation-inference/Dockerfile_amd/0 | {
"file_path": "text-generation-inference/Dockerfile_amd",
"repo_id": "text-generation-inference",
"token_count": 5134
} |
#ifndef TGI_HARDWARE_CUDA
#define TGI_HARDWARE_CUDA
#include <cstdint>
#include <optional>
#include <nvml.h>
namespace huggingface::tgi::hardware::cuda {
static constexpr auto VOLTA = std::make_tuple(7u, 0u);
static constexpr auto TURING = std::make_tuple(7u, 5u);
static constexpr auto AMPERE = std::make_tuple(8u, 0u);
static constexpr auto HOPPER = std::make_tuple(9u, 0u);
static constexpr auto ADA_LOVELACE = std::make_tuple(8u, 9u);
/**
* Get the number of GPUs on the local machine
* @return std::nullopt if no device is available, otherwise >= 1
*/
inline std::optional<size_t> get_device_count() {
uint32_t numGpus = 0;
if (nvmlDeviceGetCount_v2(&numGpus) == NVML_SUCCESS) {
return numGpus;
}
return std::nullopt;
}
/**
* Store information about the version of the CUDA Compute Capabilities detected on the device
*/
struct compute_capabilities_t {
int32_t major;
int32_t minor;
compute_capabilities_t(): compute_capabilities_t(0) {}
explicit compute_capabilities_t(size_t device_idx): major(-1), minor(-1) {
nvmlDevice_t device;
if (nvmlDeviceGetHandleByIndex_v2(device_idx, &device) == NVML_SUCCESS) {
nvmlDeviceGetCudaComputeCapability(device, &major, &minor);
}
};
compute_capabilities_t(int32_t major, int32_t minor): major(major), minor(minor) {}
/**
* Evaluate if the underlying capabilities is at least greater or equals to the provided 2-tuple (major, minor)
* @param sm Architecture version (major, minor)
* @return True if greater or equals to the underlying compute capabilities
*/
[[nodiscard]] constexpr auto is_at_least(std::tuple<uint32_t, uint32_t> sm) const -> decltype(auto) { return std::tie(major, minor) >= sm; }
/**
* Check if the capabilities match at least Volta architecture (sm_70)
* @return true if at least Volta (>= sm_70), false otherwise
*/
[[nodiscard]] constexpr bool is_at_least_volta() const { return is_at_least(VOLTA); }
/**
* Check if the capabilities match at least Turing architecture (sm_75)
* @return true if at least Turing (>= sm_75), false otherwise
*/
[[nodiscard]] constexpr bool is_at_least_turing() const { return is_at_least(TURING); }
/**
* Check if the capabilities match at least Ampere architecture (sm_80)
* @return true if at least Ampere (>= sm_80), false otherwise
*/
[[nodiscard]] constexpr bool is_at_least_ampere() const { return is_at_least(AMPERE); }
/**
* Check if the capabilities match at least Ada Lovelace architecture (sm_89)
* @return true if at least Ada Lovelace (>= sm_89), false otherwise
*/
[[nodiscard]] constexpr bool is_at_least_ada_lovelace() const { return is_at_least(ADA_LOVELACE); }
/**
* Check if the capabilities match at least Hopper architecture (sm_90)
* @return true if at least Hopper (>= sm_90), false otherwise
*/
[[nodiscard]] constexpr bool is_at_least_hopper() const { return is_at_least(HOPPER); }
};
}
#endif
| text-generation-inference/backends/trtllm/csrc/hardware.hpp/0 | {
"file_path": "text-generation-inference/backends/trtllm/csrc/hardware.hpp",
"repo_id": "text-generation-inference",
"token_count": 1383
} |
mod backend;
mod client;
mod queue;
use crate::client::{ClientError, ShardedClient};
pub(crate) use backend::BackendV2;
use serde::Serialize;
use thiserror::Error;
use utoipa::ToSchema;
#[derive(Clone, Debug, Serialize, ToSchema)]
pub struct BackendInfo {
/// Mandatory
#[schema(example = "cuda")]
pub model_device_type: String,
#[schema(example = "torch.float16")]
pub model_dtype: String,
/// Backend parameters
#[schema(example = "1")]
pub speculate: usize,
#[schema(example = "1.2")]
pub waiting_served_ratio: f32,
#[schema(example = "32000")]
pub max_batch_total_tokens: u32,
#[schema(example = "20")]
pub max_waiting_tokens: usize,
#[schema(nullable = true, example = "null")]
pub max_batch_size: Option<usize>,
}
#[allow(clippy::too_many_arguments)]
pub async fn connect_backend(
max_input_tokens: usize,
max_total_tokens: usize,
master_shard_uds_path: String,
waiting_served_ratio: f32,
max_batch_prefill_tokens: u32,
max_batch_total_tokens: Option<u32>,
max_waiting_tokens: usize,
max_batch_size: Option<usize>,
) -> Result<(BackendV2, BackendInfo), V2Error> {
// Helper function
let check_max_batch_total_tokens = |max_supported_batch_total_tokens: Option<u32>| {
match max_supported_batch_total_tokens {
// Older models do not support automatic max-batch-total-tokens
None => {
let max_batch_total_tokens = max_batch_total_tokens
.unwrap_or(16000.max((max_total_tokens as u32).max(max_batch_prefill_tokens)));
tracing::warn!("Model does not support automatic max batch total tokens");
Ok(max_batch_total_tokens)
}
// Flash attention models return their max supported total tokens
Some(max_supported_batch_total_tokens) => {
// Warn if user added his own max-batch-total-tokens as we will ignore it
if max_batch_total_tokens.is_some() {
tracing::warn!(
"`--max-batch-total-tokens` is deprecated for Flash \
Attention models."
);
tracing::warn!(
"Inferred max batch total tokens: {max_supported_batch_total_tokens}"
);
}
if max_total_tokens as u32 > max_supported_batch_total_tokens {
return Err(V2Error::NotEnoughMemory(max_total_tokens));
}
Ok(max_supported_batch_total_tokens)
}
}
};
let mut sharded_client = ShardedClient::connect_uds(master_shard_uds_path)
.await
.map_err(V2Error::Connection)?;
// server is running on v2
// Clear the cache; useful if the webserver rebooted
sharded_client
.clear_cache(None)
.await
.map_err(V2Error::Cache)?;
// Get info from the shard
let shard_info = sharded_client.info().await.map_err(V2Error::Info)?;
// Warmup model
tracing::info!("Warming up model");
let max_batch_total_tokens = check_max_batch_total_tokens(
sharded_client
.warmup(
max_input_tokens as u32,
max_batch_prefill_tokens,
max_total_tokens as u32,
max_batch_size,
)
.await
.map_err(V2Error::Warmup)?,
)?;
tracing::info!("Setting max batch total tokens to {max_batch_total_tokens}");
let backend_info = BackendInfo {
waiting_served_ratio,
max_batch_total_tokens,
max_waiting_tokens,
max_batch_size,
model_device_type: shard_info.device_type.clone(),
model_dtype: shard_info.dtype.clone(),
speculate: shard_info.speculate as usize,
};
let backend = BackendV2::new(
sharded_client,
waiting_served_ratio,
max_batch_prefill_tokens,
max_batch_total_tokens,
max_waiting_tokens,
max_batch_size,
shard_info.requires_padding,
shard_info.window_size,
shard_info.speculate,
);
tracing::info!("Using backend V3");
Ok((backend, backend_info))
}
#[derive(Debug, Error)]
pub enum V2Error {
#[error("Unable to clear the Python model shards cache: {0}")]
Cache(ClientError),
#[error("Unable to connect to the Python model shards: {0}")]
Connection(ClientError),
#[error("Unable to get the Python model shards info: {0}")]
Info(ClientError),
#[error("Unable to warmup the Python model shards: {0}")]
Warmup(ClientError),
#[error("Not enough memory to handle `max_total_tokens={0}`")]
NotEnoughMemory(usize),
}
| text-generation-inference/backends/v2/src/lib.rs/0 | {
"file_path": "text-generation-inference/backends/v2/src/lib.rs",
"repo_id": "text-generation-inference",
"token_count": 2252
} |
<div align="center">
# Text Generation Inference benchmarking tool

</div>
A lightweight benchmarking tool based inspired by [oha](https://github.com/hatoo/oha)
and powered by [Ratatui](https://github.com/ratatui/ratatui).
## Install
```shell
make install-benchmark
```
## Run
First, start `text-generation-inference`:
```shell
text-generation-launcher --model-id bigscience/bloom-560m
```
Then run the benchmarking tool:
```shell
text-generation-benchmark --tokenizer-name bigscience/bloom-560m
```
| text-generation-inference/benchmark/README.md/0 | {
"file_path": "text-generation-inference/benchmark/README.md",
"repo_id": "text-generation-inference",
"token_count": 184
} |
import pytest
from text_generation import (
InferenceAPIClient,
InferenceAPIAsyncClient,
Client,
AsyncClient,
)
from text_generation.errors import NotSupportedError, NotFoundError
from text_generation.inference_api import check_model_support, deployed_models
def test_check_model_support(flan_t5_xxl, unsupported_model, fake_model):
assert check_model_support(flan_t5_xxl)
assert not check_model_support(unsupported_model)
with pytest.raises(NotFoundError):
check_model_support(fake_model)
def test_deployed_models():
deployed_models()
def test_client(flan_t5_xxl):
client = InferenceAPIClient(flan_t5_xxl)
assert isinstance(client, Client)
def test_client_unsupported_model(unsupported_model):
with pytest.raises(NotSupportedError):
InferenceAPIClient(unsupported_model)
def test_async_client(flan_t5_xxl):
client = InferenceAPIAsyncClient(flan_t5_xxl)
assert isinstance(client, AsyncClient)
def test_async_client_unsupported_model(unsupported_model):
with pytest.raises(NotSupportedError):
InferenceAPIAsyncClient(unsupported_model)
| text-generation-inference/clients/python/tests/test_inference_api.py/0 | {
"file_path": "text-generation-inference/clients/python/tests/test_inference_api.py",
"repo_id": "text-generation-inference",
"token_count": 411
} |
# Monitoring TGI server with Prometheus and Grafana dashboard
TGI server deployment can easily be monitored through a Grafana dashboard, consuming a Prometheus data collection. Example of inspectable metrics are statistics on the effective batch sizes used by TGI, prefill/decode latencies, number of generated tokens, etc.
In this tutorial, we look at how to set up a local Grafana dashboard to monitor TGI usage.

## Setup on the server machine
First, on your server machine, TGI needs to be launched as usual. TGI exposes [multiple](https://github.com/huggingface/text-generation-inference/discussions/1127#discussioncomment-7240527) metrics that can be collected by Prometheus monitoring server.
In the rest of this tutorial, we assume that TGI was launched through Docker with `--network host`.
On the server where TGI is hosted, a Prometheus server needs to be installed and launched. To do so, please follow [Prometheus installation instructions](https://prometheus.io/download/#prometheus). For example, at the time of writing on a Linux machine:
```
wget https://github.com/prometheus/prometheus/releases/download/v2.52.0/prometheus-2.52.0.linux-amd64.tar.gz
tar -xvzf prometheus-2.52.0.linux-amd64.tar.gz
cd prometheus
```
Prometheus needs to be configured to listen on TGI's port. To do so, in Prometheus configuration file `prometheus.yml`, one needs to edit the lines:
```
static_configs:
- targets: ["0.0.0.0:80"]
```
to use the correct IP address and port.
We suggest to try `curl 0.0.0.0:80/generate -X POST -d '{"inputs":"hey chatbot, how are","parameters":{"max_new_tokens":15}}' -H 'Content-Type: application/json'` on the server side to make sure to configure the correct IP and port.
Once Prometheus is configured, Prometheus server can be launched on the same machine where TGI is launched:
```
./prometheus --config.file="prometheus.yml"
```
In this guide, Prometheus monitoring data will be consumed on a local computer. Hence, we need to forward Prometheus port (by default 9090) to the local computer. To do so, we can for example:
* Use ssh [local port forwarding](https://www.ssh.com/academy/ssh/tunneling-example)
* Use ngrok port tunneling
For simplicity, we will use [Ngrok](https://ngrok.com/docs/) in this guide to tunnel Prometheus port from the TGI server to the outside word.
For that, you should follow the steps at https://dashboard.ngrok.com/get-started/setup/linux, and once Ngrok is installed, use:
```bash
ngrok http http://0.0.0.0:9090
```
As a sanity check, one can make sure that Prometheus server can be accessed at the URL given by Ngrok (in the style of https://d661-4-223-164-145.ngrok-free.app) from a local machine.
## Setup on the monitoring machine
Monitoring is typically done on an other machine than the server one. We use a Grafana dashboard to monitor TGI's server usage.
Two options are available:
* Use Grafana Cloud for an hosted dashboard solution (https://grafana.com/products/cloud/).
* Self-host a grafana dashboard.
In this tutorial, for simplicity, we will self host the dashbard. We recommend installing Grafana Open-source edition following [the official install instructions](https://grafana.com/grafana/download?platform=linux&edition=oss), using the available Linux binaries. For example:
```bash
wget https://dl.grafana.com/oss/release/grafana-11.0.0.linux-amd64.tar.gz
tar -zxvf grafana-11.0.0.linux-amd64.tar.gz
cd grafana-11.0.0
./bin/grafana-server
```
Once the Grafana server is launched, the Grafana interface is available at http://localhost:3000. One needs to log in with the `admin` username and `admin` password.
Once logged in, the Prometheus data source for Grafana needs to be configured, in the option `Add your first data source`. There, a Prometheus data source needs to be added with the Ngrok address we got earlier, that exposes Prometheus port (example: https://d661-4-223-164-145.ngrok-free.app).
Once Prometheus data source is configured, we can finally create our dashboard! From home, go to `Create your first dashboard` and then `Import dashboard`. There, we will use the recommended dashboard template [tgi_grafana.json](https://github.com/huggingface/text-generation-inference/blob/main/assets/tgi_grafana.json) for a dashboard ready to be used, but you may configure your own dashboard as you like.
Community contributed dashboard templates are also available, for example [here](https://grafana.com/grafana/dashboards/19831-text-generation-inference-dashboard/) or [here](https://grafana.com/grafana/dashboards/20246-text-generation-inference/).
Load your dashboard configuration, and your TGI dashboard should be ready to go!
| text-generation-inference/docs/source/basic_tutorials/monitoring.md/0 | {
"file_path": "text-generation-inference/docs/source/basic_tutorials/monitoring.md",
"repo_id": "text-generation-inference",
"token_count": 1376
} |
# Supported Models
Text Generation Inference enables serving optimized models. The following sections list which models (VLMs & LLMs) are supported.
- [Deepseek V2](https://huggingface.co/deepseek-ai/DeepSeek-V2)
- [Deepseek V3](https://huggingface.co/deepseek-ai/DeepSeek-V3)
- [Idefics 2](https://huggingface.co/HuggingFaceM4/idefics2-8b) (Multimodal)
- [Idefics 3](https://huggingface.co/HuggingFaceM4/Idefics3-8B-Llama3) (Multimodal)
- [Llava Next (1.6)](https://huggingface.co/llava-hf/llava-v1.6-vicuna-13b-hf) (Multimodal)
- [Llama](https://huggingface.co/collections/meta-llama/llama-31-669fc079a0c406a149a5738f)
- [Phi 3](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct)
- [Granite](https://huggingface.co/ibm-granite/granite-3.0-8b-instruct)
- [Gemma](https://huggingface.co/google/gemma-7b)
- [PaliGemma](https://huggingface.co/google/paligemma-3b-pt-224)
- [Gemma2](https://huggingface.co/collections/google/gemma-2-release-667d6600fd5220e7b967f315)
- [Cohere](https://huggingface.co/CohereForAI/c4ai-command-r-plus)
- [Dbrx](https://huggingface.co/databricks/dbrx-instruct)
- [Mamba](https://huggingface.co/state-spaces/mamba-2.8b-slimpj)
- [Mistral](https://huggingface.co/mistralai/Mistral-Nemo-Instruct-2407)
- [Mixtral](https://huggingface.co/mistralai/Mixtral-8x22B-Instruct-v0.1)
- [Gpt Bigcode](https://huggingface.co/bigcode/gpt_bigcode-santacoder)
- [Phi](https://huggingface.co/microsoft/phi-1_5)
- [PhiMoe](https://huggingface.co/microsoft/Phi-3.5-MoE-instruct)
- [Baichuan](https://huggingface.co/baichuan-inc/Baichuan2-7B-Chat)
- [Falcon](https://huggingface.co/tiiuae/falcon-7b-instruct)
- [StarCoder 2](https://huggingface.co/bigcode/starcoder2-15b-instruct-v0.1)
- [Qwen 2](https://huggingface.co/collections/Qwen/qwen2-6659360b33528ced941e557f)
- [Qwen 2 VL](https://huggingface.co/collections/Qwen/qwen2-vl-66cee7455501d7126940800d)
- [Opt](https://huggingface.co/facebook/opt-6.7b)
- [T5](https://huggingface.co/google/flan-t5-xxl)
- [Galactica](https://huggingface.co/facebook/galactica-120b)
- [SantaCoder](https://huggingface.co/bigcode/santacoder)
- [Bloom](https://huggingface.co/bigscience/bloom-560m)
- [Mpt](https://huggingface.co/mosaicml/mpt-7b-instruct)
- [Gpt2](https://huggingface.co/openai-community/gpt2)
- [Gpt Neox](https://huggingface.co/EleutherAI/gpt-neox-20b)
- [Gptj](https://huggingface.co/EleutherAI/gpt-j-6b)
- [Idefics](https://huggingface.co/HuggingFaceM4/idefics-9b) (Multimodal)
- [Mllama](https://huggingface.co/meta-llama/Llama-3.2-11B-Vision-Instruct) (Multimodal)
If the above list lacks the model you would like to serve, depending on the model's pipeline type, you can try to initialize and serve the model anyways to see how well it performs, but performance isn't guaranteed for non-optimized models:
```python
# for causal LMs/text-generation models
AutoModelForCausalLM.from_pretrained(<model>, device_map="auto")
# or, for text-to-text generation models
AutoModelForSeq2SeqLM.from_pretrained(<model>, device_map="auto")
```
If you wish to serve a supported model that already exists on a local folder, just point to the local folder.
```bash
text-generation-launcher --model-id <PATH-TO-LOCAL-BLOOM>
```
| text-generation-inference/docs/source/supported_models.md/0 | {
"file_path": "text-generation-inference/docs/source/supported_models.md",
"repo_id": "text-generation-inference",
"token_count": 1350
} |
{
"choices": [
{
"finish_reason": "length",
"index": 0,
"logprobs": null,
"text": " A Beginner’s Guide\nDeep learning is a subset"
}
],
"created": 1725876621,
"id": "",
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
"object": "text_completion",
"system_fingerprint": "2.2.1-dev0-native",
"usage": {
"completion_tokens": 10,
"prompt_tokens": 6,
"total_tokens": 16
}
}
| text-generation-inference/integration-tests/models/__snapshots__/test_completion_prompts/test_flash_llama_completion_single_prompt.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_completion_prompts/test_flash_llama_completion_single_prompt.json",
"repo_id": "text-generation-inference",
"token_count": 212
} |
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 25,
"logprob": -2.9316406,
"special": false,
"text": ":"
},
{
"id": 330,
"logprob": -3.5136719,
"special": false,
"text": " \""
},
{
"id": 489,
"logprob": -0.7783203,
"special": false,
"text": " +"
},
{
"id": 1715,
"logprob": -1.2314453,
"special": false,
"text": " request"
},
{
"id": 489,
"logprob": -2.0019531,
"special": false,
"text": " +"
},
{
"id": 2990,
"logprob": -1.5009766,
"special": false,
"text": " \"\\"
},
{
"id": 77,
"logprob": -0.057434082,
"special": false,
"text": "n"
},
{
"id": 702,
"logprob": -1.4912109,
"special": false,
"text": "\"\n"
},
{
"id": 262,
"logprob": -1.2636719,
"special": false,
"text": " "
},
{
"id": 557,
"logprob": -2.4042969,
"special": false,
"text": " }\n\n"
}
],
"top_tokens": null
},
"generated_text": ": \" + request + \"\\n\"\n }\n\n"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama_exl2/test_flash_llama_exl2.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama_exl2/test_flash_llama_exl2.json",
"repo_id": "text-generation-inference",
"token_count": 880
} |
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": 0,
"tokens": [
{
"id": 5229,
"logprob": -0.6645508,
"special": false,
"text": " failed"
},
{
"id": 29901,
"logprob": 0.0,
"special": false,
"text": ":"
},
{
"id": 6527,
"logprob": -2.2324219,
"special": false,
"text": " Could"
},
{
"id": 451,
"logprob": 0.0,
"special": false,
"text": " not"
},
{
"id": 6088,
"logprob": -1.6074219,
"special": false,
"text": " parse"
},
{
"id": 1243,
"logprob": -1.6298828,
"special": false,
"text": " test"
},
{
"id": 1206,
"logprob": -0.72558594,
"special": false,
"text": " case"
},
{
"id": 1024,
"logprob": -0.40429688,
"special": false,
"text": " name"
},
{
"id": 515,
"logprob": 0.0,
"special": false,
"text": " from"
},
{
"id": 525,
"logprob": -1.2519531,
"special": false,
"text": " '"
}
],
"top_tokens": null
},
"generated_text": "Test request failed: Could not parse test case name from '"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama_marlin_24/test_flash_llama_marlin24_all_params.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama_marlin_24/test_flash_llama_marlin24_all_params.json",
"repo_id": "text-generation-inference",
"token_count": 857
} |
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [],
"seed": null,
"tokens": [
{
"id": 198,
"logprob": -2.9023438,
"special": false,
"text": "\n"
},
{
"id": 2,
"logprob": -2.9160156,
"special": false,
"text": "#"
},
{
"id": 4230,
"logprob": -3.1035156,
"special": false,
"text": " Create"
},
{
"id": 264,
"logprob": -1.1025391,
"special": false,
"text": " a"
},
{
"id": 1681,
"logprob": -1.6914062,
"special": false,
"text": " request"
},
{
"id": 198,
"logprob": -1.1953125,
"special": false,
"text": "\n"
},
{
"id": 2035,
"logprob": -1.3203125,
"special": false,
"text": "request"
},
{
"id": 284,
"logprob": -0.13537598,
"special": false,
"text": " ="
},
{
"id": 7388,
"logprob": -1.2402344,
"special": false,
"text": " requests"
},
{
"id": 670,
"logprob": -0.2775879,
"special": false,
"text": ".get"
}
],
"top_tokens": null
},
"generated_text": "\n# Create a request\nrequest = requests.get"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_flash_qwen2/test_flash_qwen2.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_qwen2/test_flash_qwen2.json",
"repo_id": "text-generation-inference",
"token_count": 865
} |
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 60,
"prefill": [],
"seed": 0,
"tokens": [
{
"id": 222,
"logprob": 0.0,
"special": false,
"text": "\n"
},
{
"id": 222,
"logprob": 0.0,
"special": false,
"text": "\n"
},
{
"id": 40,
"logprob": -0.7944336,
"special": false,
"text": "#"
},
{
"id": 494,
"logprob": 0.0,
"special": false,
"text": " +"
},
{
"id": 447,
"logprob": -0.1796875,
"special": false,
"text": " ["
},
{
"id": 9009,
"logprob": 0.0,
"special": false,
"text": "markdown"
},
{
"id": 98,
"logprob": 0.0,
"special": false,
"text": "]"
},
{
"id": 37402,
"logprob": 0.0,
"special": false,
"text": " slideshow"
},
{
"id": 8492,
"logprob": 0.0,
"special": false,
"text": "={\""
},
{
"id": 7277,
"logprob": 0.0,
"special": false,
"text": "slide"
},
{
"id": 100,
"logprob": 0.0,
"special": false,
"text": "_"
},
{
"id": 700,
"logprob": 0.0,
"special": false,
"text": "type"
},
{
"id": 582,
"logprob": 0.0,
"special": false,
"text": "\":"
},
{
"id": 332,
"logprob": 0.0,
"special": false,
"text": " \""
},
{
"id": 7277,
"logprob": -0.06994629,
"special": false,
"text": "slide"
},
{
"id": 3667,
"logprob": 0.0,
"special": false,
"text": "\"}"
},
{
"id": 222,
"logprob": 0.0,
"special": false,
"text": "\n"
},
{
"id": 40,
"logprob": 0.0,
"special": false,
"text": "#"
},
{
"id": 607,
"logprob": -0.8261719,
"special": false,
"text": " #"
},
{
"id": 244,
"logprob": -1.8574219,
"special": false,
"text": " "
},
{
"id": 55,
"logprob": -1.4541016,
"special": false,
"text": "2"
},
{
"id": 51,
"logprob": 0.0,
"special": false,
"text": "."
},
{
"id": 6208,
"logprob": -0.9794922,
"special": false,
"text": " What"
},
{
"id": 458,
"logprob": 0.0,
"special": false,
"text": " is"
},
{
"id": 341,
"logprob": 0.0,
"special": false,
"text": " the"
},
{
"id": 10609,
"logprob": -0.69189453,
"special": false,
"text": " difference"
},
{
"id": 3761,
"logprob": 0.0,
"special": false,
"text": " between"
},
{
"id": 331,
"logprob": 0.0,
"special": false,
"text": " a"
},
{
"id": 1168,
"logprob": -0.27172852,
"special": false,
"text": " list"
},
{
"id": 480,
"logprob": 0.0,
"special": false,
"text": " and"
},
{
"id": 331,
"logprob": 0.0,
"special": false,
"text": " a"
},
{
"id": 8871,
"logprob": 0.0,
"special": false,
"text": " tuple"
},
{
"id": 68,
"logprob": 0.0,
"special": false,
"text": "?"
},
{
"id": 222,
"logprob": 0.0,
"special": false,
"text": "\n"
},
{
"id": 40,
"logprob": -1.3359375,
"special": false,
"text": "#"
},
{
"id": 222,
"logprob": 0.0,
"special": false,
"text": "\n"
},
{
"id": 40,
"logprob": 0.0,
"special": false,
"text": "#"
},
{
"id": 449,
"logprob": -0.03164673,
"special": false,
"text": " -"
},
{
"id": 418,
"logprob": -1.0947266,
"special": false,
"text": " A"
},
{
"id": 1168,
"logprob": 0.0,
"special": false,
"text": " list"
},
{
"id": 458,
"logprob": 0.0,
"special": false,
"text": " is"
},
{
"id": 331,
"logprob": -0.3305664,
"special": false,
"text": " a"
},
{
"id": 14792,
"logprob": 0.0,
"special": false,
"text": " mutable"
},
{
"id": 6645,
"logprob": -0.40478516,
"special": false,
"text": " sequence"
},
{
"id": 451,
"logprob": 0.0,
"special": false,
"text": " of"
},
{
"id": 4725,
"logprob": -0.50390625,
"special": false,
"text": " elements"
},
{
"id": 49,
"logprob": -2.1269531,
"special": false,
"text": ","
},
{
"id": 2236,
"logprob": -0.1427002,
"special": false,
"text": " while"
},
{
"id": 331,
"logprob": 0.0,
"special": false,
"text": " a"
},
{
"id": 8871,
"logprob": 0.0,
"special": false,
"text": " tuple"
},
{
"id": 458,
"logprob": 0.0,
"special": false,
"text": " is"
},
{
"id": 619,
"logprob": 0.0,
"special": false,
"text": " an"
},
{
"id": 26079,
"logprob": 0.0,
"special": false,
"text": " immutable"
},
{
"id": 6645,
"logprob": 0.0,
"special": false,
"text": " sequence"
},
{
"id": 451,
"logprob": 0.0,
"special": false,
"text": " of"
},
{
"id": 4725,
"logprob": 0.0,
"special": false,
"text": " elements"
},
{
"id": 51,
"logprob": 0.0,
"special": false,
"text": "."
},
{
"id": 222,
"logprob": 0.0,
"special": false,
"text": "\n"
},
{
"id": 40,
"logprob": 0.0,
"special": false,
"text": "#"
},
{
"id": 449,
"logprob": 0.0,
"special": false,
"text": " -"
}
],
"top_tokens": null
},
"generated_text": "\n\n# + [markdown] slideshow={\"slide_type\": \"slide\"}\n# # 2. What is the difference between a list and a tuple?\n#\n# - A list is a mutable sequence of elements, while a tuple is an immutable sequence of elements.\n# -"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder2_lora/test_flash_starcoder2_default_params.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder2_lora/test_flash_starcoder2_default_params.json",
"repo_id": "text-generation-inference",
"token_count": 4513
} |
{
"details": {
"best_of_sequences": null,
"finish_reason": "stop_sequence",
"generated_tokens": 6,
"prefill": [],
"seed": 0,
"tokens": [
{
"id": 13,
"logprob": -1.0654297,
"special": false,
"text": "\n"
},
{
"id": 1014,
"logprob": -2.7460938,
"special": false,
"text": "The"
},
{
"id": 6032,
"logprob": -1.359375,
"special": false,
"text": " purpose"
},
{
"id": 302,
"logprob": 0.0,
"special": false,
"text": " of"
},
{
"id": 456,
"logprob": 0.0,
"special": false,
"text": " this"
},
{
"id": 1369,
"logprob": -0.40063477,
"special": false,
"text": " test"
}
],
"top_tokens": null
},
"generated_text": "Test request\nThe purpose of this test"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_llava_next/test_flash_llava_next_all_params.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_llava_next/test_flash_llava_next_all_params.json",
"repo_id": "text-generation-inference",
"token_count": 563
} |
[
{
"details": {
"best_of_sequences": null,
"finish_reason": "eos_token",
"generated_tokens": 6,
"prefill": [
{
"id": 0,
"logprob": null,
"text": "<pad>"
}
],
"seed": null,
"tokens": [
{
"id": 259,
"logprob": -1.3798828,
"special": false,
"text": " "
},
{
"id": 39261,
"logprob": -0.36328125,
"special": false,
"text": "Because"
},
{
"id": 609,
"logprob": -1.0947266,
"special": false,
"text": " it"
},
{
"id": 339,
"logprob": -0.8286133,
"special": false,
"text": " is"
},
{
"id": 16017,
"logprob": -1.6826172,
"special": false,
"text": " blue"
},
{
"id": 1,
"logprob": -0.7290039,
"special": true,
"text": "</s>"
}
]
},
"generated_text": "Because it is blue"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "eos_token",
"generated_tokens": 6,
"prefill": [
{
"id": 0,
"logprob": null,
"text": "<pad>"
}
],
"seed": null,
"tokens": [
{
"id": 259,
"logprob": -1.3789062,
"special": false,
"text": " "
},
{
"id": 39261,
"logprob": -0.36279297,
"special": false,
"text": "Because"
},
{
"id": 609,
"logprob": -1.0966797,
"special": false,
"text": " it"
},
{
"id": 339,
"logprob": -0.8276367,
"special": false,
"text": " is"
},
{
"id": 16017,
"logprob": -1.6845703,
"special": false,
"text": " blue"
},
{
"id": 1,
"logprob": -0.72753906,
"special": true,
"text": "</s>"
}
]
},
"generated_text": "Because it is blue"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "eos_token",
"generated_tokens": 6,
"prefill": [
{
"id": 0,
"logprob": null,
"text": "<pad>"
}
],
"seed": null,
"tokens": [
{
"id": 259,
"logprob": -1.3789062,
"special": false,
"text": " "
},
{
"id": 39261,
"logprob": -0.36279297,
"special": false,
"text": "Because"
},
{
"id": 609,
"logprob": -1.0966797,
"special": false,
"text": " it"
},
{
"id": 339,
"logprob": -0.8276367,
"special": false,
"text": " is"
},
{
"id": 16017,
"logprob": -1.6845703,
"special": false,
"text": " blue"
},
{
"id": 1,
"logprob": -0.72753906,
"special": true,
"text": "</s>"
}
]
},
"generated_text": "Because it is blue"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "eos_token",
"generated_tokens": 6,
"prefill": [
{
"id": 0,
"logprob": null,
"text": "<pad>"
}
],
"seed": null,
"tokens": [
{
"id": 259,
"logprob": -1.3789062,
"special": false,
"text": " "
},
{
"id": 39261,
"logprob": -0.36279297,
"special": false,
"text": "Because"
},
{
"id": 609,
"logprob": -1.0966797,
"special": false,
"text": " it"
},
{
"id": 339,
"logprob": -0.8276367,
"special": false,
"text": " is"
},
{
"id": 16017,
"logprob": -1.6845703,
"special": false,
"text": " blue"
},
{
"id": 1,
"logprob": -0.72753906,
"special": true,
"text": "</s>"
}
]
},
"generated_text": "Because it is blue"
}
]
| text-generation-inference/integration-tests/models/__snapshots__/test_mt0_base/test_mt0_base_load.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_mt0_base/test_mt0_base_load.json",
"repo_id": "text-generation-inference",
"token_count": 2874
} |
import pytest
@pytest.fixture(scope="module")
def flash_llama_awq_handle_sharded(launcher):
with launcher(
"abhinavkulkarni/codellama-CodeLlama-7b-Python-hf-w4-g128-awq",
num_shard=2,
quantize="awq",
) as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_llama_awq_sharded(flash_llama_awq_handle_sharded):
await flash_llama_awq_handle_sharded.health(300)
return flash_llama_awq_handle_sharded.client
@pytest.mark.release
@pytest.mark.asyncio
async def test_flash_llama_awq_sharded(flash_llama_awq_sharded, response_snapshot):
response = await flash_llama_awq_sharded.generate(
"What is Deep Learning?", max_new_tokens=10, decoder_input_details=True
)
assert response.details.generated_tokens == 10
assert (
response.generated_text
== "\nWhat is the difference between Deep Learning and Machine"
)
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
async def test_flash_llama_awq_load_sharded(
flash_llama_awq_sharded, generate_load, response_snapshot
):
responses = await generate_load(
flash_llama_awq_sharded, "What is Deep Learning?", max_new_tokens=10, n=4
)
assert len(responses) == 4
assert all(
[
r.generated_text
== "\nWhat is the difference between Deep Learning and Machine"
for r in responses
]
)
assert responses == response_snapshot
| text-generation-inference/integration-tests/models/test_flash_awq_sharded.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_flash_awq_sharded.py",
"repo_id": "text-generation-inference",
"token_count": 624
} |
import pytest
@pytest.fixture(scope="module")
def flash_starcoder2_handle(launcher):
with launcher("bigcode/starcoder2-3b", num_shard=2) as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_starcoder2(flash_starcoder2_handle):
await flash_starcoder2_handle.health(300)
return flash_starcoder2_handle.client
@pytest.mark.release
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_starcoder2(flash_starcoder2, response_snapshot):
response = await flash_starcoder2.generate(
"def print_hello", max_new_tokens=10, decoder_input_details=True
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_starcoder2_default_params(flash_starcoder2, response_snapshot):
response = await flash_starcoder2.generate(
"def print_hello",
max_new_tokens=60,
temperature=0.2,
top_p=0.95,
decoder_input_details=True,
seed=0,
)
assert response.details.generated_tokens == 60
assert response == response_snapshot
@pytest.mark.release
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_starcoder2_load(
flash_starcoder2, generate_load, response_snapshot
):
responses = await generate_load(
flash_starcoder2, "def print_hello", max_new_tokens=10, n=4
)
assert len(responses) == 4
assert all([r.generated_text == responses[0].generated_text for r in responses])
assert responses == response_snapshot
| text-generation-inference/integration-tests/models/test_flash_starcoder2.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_flash_starcoder2.py",
"repo_id": "text-generation-inference",
"token_count": 625
} |
import pytest
@pytest.fixture(scope="module")
def opt_sharded_handle(launcher):
with launcher("facebook/opt-6.7b", num_shard=2) as handle:
yield handle
@pytest.fixture(scope="module")
async def opt_sharded(opt_sharded_handle):
await opt_sharded_handle.health(300)
return opt_sharded_handle.client
@pytest.mark.release
@pytest.mark.asyncio
async def test_opt(opt_sharded):
pass
| text-generation-inference/integration-tests/models/test_opt.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_opt.py",
"repo_id": "text-generation-inference",
"token_count": 160
} |
import json
def main():
with open("./ShareGPT_V3_unfiltered_cleaned_split.json", "r") as f:
data = json.load(f)
# Select only the first 2k conversations that start with a human.
max = 2000
conversations = []
for conversation in data:
conv = conversation.get("conversations")
if conv and conv[0]["from"] == "human":
# Trim the rest of the output
conversation["conversations"] = conversation["conversations"][:1]
conversations.append(conversation)
if len(conversation) >= max:
break
with open("./small.json", "w") as f:
data = json.dump(conversations, f, indent=4)
if __name__ == "__main__":
main()
| text-generation-inference/load_tests/filter.py/0 | {
"file_path": "text-generation-inference/load_tests/filter.py",
"repo_id": "text-generation-inference",
"token_count": 307
} |
# Router
Also named `webserver` throughout the docs.
This router is handling most of the logic to handle the "batches" tell
when to pass new `prefill` requests and pausing `decode` requests, which ones etc...
It uses gRPC to communicate with the shards which can therefore be kept
much simpler and focus on having the most efficient forward passes as possible.
## Continuous batching
One important feature of `text-generation-inference` is enabled
by this `router`.
Continuous batching is the act of regularly running queries in the same
`forward` step of the LLM (a "batch") and also removing them when they are
finished.
In order for continuous batching to be useful, you need to have more compute available
with respect to the memory requirements of your model. This is essentially true for
LLMs and the larger the model, the truer it gets (since you have to pool multiple
GPUs to load the model, you effectively have a lot of compute power at your hands).
Static batching is the act of doing several queries at the same time, but usually
this is controlled by the client, and therefore the amount of batching is decided
beforehand.
For text-generation, and LLMs which are memory bound we can try to be much more
efficient with the available compute, by having client sending us single queries,
and let the router mix&match queries into or out of batches to make the use the
compute the most efficiently. This is possible because for LLMs the total compute
for running the model is much bigger than doing mix&match of the batches themselves.
### Simple continuous batching
text-generation works by feeding a prompt to a model, and iteratively calling
`forward` on the model to produce new text, 1 token at a time.
The first idea is simple, when a query arrives, we start working on it directly.
When new queries arrive, we simply wait for the current `forward` to be finished
then batch the current running prompt with the new query, and call `forward`.
Whenever either query is finished: either the model produce EOS (end of sentence) token
or the query reached the allowed limit. We simply drop it from the batch, remove
all the allocated memory and we can continue with the rest until nothing is left.
This simple idea generalizes very well and we could potentially stack many requests
in the same batch.
One thing to note, is that queries can be potentially run with different parameters
meaning different way to choose the next token (sampling, not sampling, temperature, top_k etc..). This is not problematic for the proposed approach we just need to do the sampling
independantly on each member of the batch.
### Prefill, decode and past key values
In order to make LLMs and text-generation efficient, there's actually a very powerful
trick that can be used, which is the "caching" of some attention matrices. [More on that
in the first part of this blog](https://huggingface.co/blog/accelerated-inference#getting-to-the-first-10x-speedup)
What this means, is that the first "pass" of a prompt is different from the subsequent
"forward" passes. Since for the first one we have to compute the entire attention matrix, whereas in the follow-ups only require to compute the new token attention.
The first pass is called `prefill` throughout this codebase where as the follow-ups are called `decode`.
Since `prefill` is much more expensive than `decode` we don't want to do it all the time,
but a currently running query is probably doing `decode`. If we want to do the continuous
batching as explained previously we need to run `prefill` at some point in order to create
the attention matrix required to be able to join the `decode` group.
`text-generation-inference` uses a bunch of different strategies and parameters in
order to enable you to find the sweet spot between exploiting the hardware and perceived latency.
With no continuous batching at all, latency is going to be super good, but throughput (meaning
the total number of requests allowed in a given timeframe) is going to be super bad (since it's essentially 1).
With static batching, you can probably reach the maximum throughput (by using the maximum total batch size applicable to your hardware), but the latency is super bad since in order to have maximum throughput you need to wait for requests to come in before processing.
With continuous batching you can find a sweet spot. In general latency is the most critical
parameter users care about. But a 2x latency slowdown for 10x more users on the same
hardware is an acceptable tradeoff.
## Token streaming
This is a very important aspect of client UX. As mentionned above, latency is the
most critical perceived quality of an LLM API.
With token streaming, the server can start answering after the first `prefill` pass
directly, without waiting for all the generation to be done. For extremely long queries
this means clients can start to see something happening orders of magnitude before
the work is done. Seeing something in progress allows them to cut short if it's not
what's wanted but also it "feels" better.
| text-generation-inference/router/README.md/0 | {
"file_path": "text-generation-inference/router/README.md",
"repo_id": "text-generation-inference",
"token_count": 1175
} |
// Adapted from turboderp exllama: https://github.com/turboderp/exllama
#ifndef _cuda_compat_cuh
#define _cuda_compat_cuh
// atomicAdd for half types, to support CC < 7.x
__device__ __forceinline__ void atomicAdd_half(half* address, half val)
{
unsigned int * address_as_ui = (unsigned int *) ((char *)address - ((size_t)address & 2));
unsigned int old = *address_as_ui;
unsigned int assumed;
do
{
assumed = old;
__half_raw hsum;
hsum.x = (size_t)address & 2 ? (old >> 16) : (old & 0xffff);
half tmpres = __hadd(hsum, val);
hsum = __half_raw(tmpres);
old = (size_t)address & 2 ? (old & 0xffff) | (hsum.x << 16) : (old & 0xffff0000) | hsum.x;
old = atomicCAS(address_as_ui, assumed, old);
}
while (assumed != old);
}
// atomicAdd for half2 types
__device__ __forceinline__ void atomicAdd_half2(half2* address, half2 val)
{
unsigned int* address_as_ui = (unsigned int*)address;
unsigned int old = *address_as_ui;
unsigned int assumed;
do
{
assumed = old;
half2 old_val = *((half2*)&old);
half2 new_val = __hadd2(old_val, val);
old = atomicCAS(address_as_ui, assumed, *((unsigned int*)&new_val));
}
while (assumed != old);
}
//
#if defined(__CUDA_ARCH__) || defined(USE_ROCM)
#if __CUDA_ARCH__ < 700 || defined(USE_ROCM)
__device__ __forceinline__ void atomicAdd(half* address, half val) { atomicAdd_half(address, val); }
#if __CUDA_ARCH__ < 600 || defined(USE_ROCM)
__device__ __forceinline__ void atomicAdd(half2* address, half2 val) { atomicAdd_half2(address, val); }
#endif
#endif
#endif
#endif
| text-generation-inference/server/exllama_kernels/exllama_kernels/cu_compat.cuh/0 | {
"file_path": "text-generation-inference/server/exllama_kernels/exllama_kernels/cu_compat.cuh",
"repo_id": "text-generation-inference",
"token_count": 692
} |
#ifndef _util_h
#define _util_h
#define DBGS(__x) printf("%s\n", __x)
#define DBGI(__x) printf("%s: %i\n", #__x, __x)
#define DBGI2(__x, __y) printf("%s, %s: %i, %i\n", #__x, #__y, __x, __y)
#define DBGI3(__x, __y, __z) printf("%s, %s, %s: %i, %i, %i\n", #__x, #__y, #__z, __x, __y, __z)
#define DBGF(__x) printf("%s: %f\n", #__x, __x)
#define DBGF2(__x, __y) printf("%s, %s: %f, %f\n", #__x, #__y, __x, __y)
#define DBGF3(__x, __y, __z) printf("%s, %s, %s: %f, %f, %f\n", #__x, #__y, #__z, __x, __y, __z)
#endif
| text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cpp/util.h/0 | {
"file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cpp/util.h",
"repo_id": "text-generation-inference",
"token_count": 296
} |
#ifndef _util_cuh
#define _util_cuh
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include <cstdint>
#include <cstdio>
#include <ATen/cuda/CUDAContext.h>
#define DIVIDE(x, size) (((x) + (size) - 1) / (size))
#define DBGS(__x) printf("%s\n", __x)
#define DBGI(__x) printf("%s: %i\n", #__x, __x)
#define DBGI2(__x, __y) printf("%s, %s: %i, %i\n", #__x, #__y, __x, __y)
#define DBGI3(__x, __y, __z) printf("%s, %s, %s: %i, %i, %i\n", #__x, #__y, #__z, __x, __y, __z)
#define DBGX(__x) printf("%s: %x\n", #__x, __x)
#define DBGX2(__x, __y) printf("%s, %s: %x, %x\n", #__x, #__y, __x, __y)
#define DBGX3(__x, __y, __z) printf("%s, %s, %s: %x, %x, %x\n", #__x, #__y, #__z, __x, __y, __z)
#define DBGF(__x) printf("%s: %f\n", #__x, __x)
#define DBGF2(__x, __y) printf("%s, %s: %f, %f\n", #__x, #__y, __x, __y)
#define DBGF3(__x, __y, __z) printf("%s, %s, %s: %f, %f, %f\n", #__x, #__y, #__z, __x, __y, __z)
#define DBGH(__x) printf("%s: %f\n", #__x, __half2float(__x))
#define DBGH2(__x, __y) printf("%s, %s: %f, %f\n", #__x, #__y, __half2float(__x), __half2float(__y))
#define DBGH3(__x, __y, __z) printf("%s, %s, %s: %f, %f, %f\n", #__x, #__y, #__z, __half2float(__x), __half2float(__y), __half2float(__z))
#define DBGIH(__x, __y) printf("%s, %s: %i, %f\n", #__x, #__y, __x, __half2float(__y))
#define DBGIH2(__x, __y, __z) printf("%s, %s, %s: %i, %f, %f\n", #__x, #__y, #__z, __x, __half2float(__y), __half2float(__z))
__forceinline__ __device__ half dq_scale_(const int qs, const half max_scale)
{
half qs_h = __hmul(__int2half_rn(qs + 1), __float2half_rn(1.0f / 16.0f));
qs_h = __hmul(qs_h, qs_h);
qs_h = __hmul(qs_h, max_scale);
return qs_h;
}
__forceinline__ __device__ float clamp(float x, float a, float b)
{
return fmaxf(a, fminf(b, x));
}
#define cuda_check(ans) { gpu_assert((ans), __FILE__, __LINE__); }
inline void gpu_assert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"CUDA error: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
void print_global_mem(const half* ptr, int rows, int columns, int stride);
#endif
| text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/util.cuh/0 | {
"file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/util.cuh",
"repo_id": "text-generation-inference",
"token_count": 1115
} |
from text_generation_server.utils.hub import (
download_weights,
weight_hub_files,
weight_files,
)
from text_generation_server.utils.convert import convert_files
def test_convert_files():
model_id = "bigscience/bloom-560m"
pt_filenames = weight_hub_files(model_id, extension=".bin")
local_pt_files = download_weights(pt_filenames, model_id)
local_st_files = [
p.parent / f"{p.stem.lstrip('pytorch_')}.safetensors" for p in local_pt_files
]
convert_files(local_pt_files, local_st_files, discard_names=[])
found_st_files = weight_files(model_id)
assert all([p in found_st_files for p in local_st_files])
| text-generation-inference/server/tests/utils/test_convert.py/0 | {
"file_path": "text-generation-inference/server/tests/utils/test_convert.py",
"repo_id": "text-generation-inference",
"token_count": 259
} |
from dataclasses import dataclass
import torch
from typing import Optional
@dataclass
class Seqlen:
input_lengths: torch.Tensor
cache_lengths: torch.Tensor
cu_seqlen_q: Optional[torch.Tensor]
cu_seqlen_k: Optional[torch.Tensor]
max_q: int
max_k: int
def __init__(
self,
input_lengths,
cache_lengths,
cu_seqlen_q=None,
max_q=None,
max_k=None,
):
self.input_lengths = input_lengths
self.cache_lengths = cache_lengths
device = self.input_lengths.device
shape = self.input_lengths.shape
if cu_seqlen_q is None:
cu_seqlen_q = torch.arange(
shape[0] + 1,
device=device,
dtype=torch.int32,
)
max_q = 1
else:
assert max_q is not None
assert max_k is not None
cu_seqlen_k = torch.zeros(shape[-1] + 1, device=device, dtype=torch.int32)
# cuda graphs don't like this and this is necessary to clamp within mistral
# Although FA2 might not want the clamping
# cu_seqlen_k[0] = 0
total = self.input_lengths + self.cache_lengths
torch.cumsum(total, -1, out=cu_seqlen_k[1:])
self.cu_seqlen_q = cu_seqlen_q
self.cu_seqlen_k = cu_seqlen_k
self.max_q = max_q
self.max_k = max_k
def clamp(self, max):
# Flash decoding doesn't need to clamp
return self
| text-generation-inference/server/text_generation_server/layers/attention/common.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/layers/attention/common.py",
"repo_id": "text-generation-inference",
"token_count": 739
} |
from typing import List, Union
import torch
from compressed_tensors.quantization import ActivationOrdering, QuantizationArgs
from loguru import logger
from text_generation_server.layers.marlin.gptq import repack_gptq_for_marlin
from text_generation_server.utils.log import log_once
from text_generation_server.utils.weights import Weights, WeightsLoader
class WNA16IntLoader(WeightsLoader):
"""
Loader for W4A16/W8A16 INT compressed-tensors parameters.
"""
def __init__(self, weights: QuantizationArgs):
self.weights = weights
self.desc_act = self.weights.actorder == ActivationOrdering.GROUP
self.groupsize = (
-1 if self.weights.group_size is None else self.weights.group_size
)
def __str__(self) -> str:
quantization_type = f"W{self.weights.num_bits}A16"
return f"{self.__class__.__name__} ({quantization_type})"
def get_weights(self, weights: Weights, prefix: str):
log_once(logger.info, "Using GPTQ-Marlin kernels")
try:
weight_packed = weights.get_tensor(f"{prefix}.weight_packed").t()
except RuntimeError:
raise RuntimeError(
f"Cannot load w{self.weights.num_bits}a16 weight, make sure the model is already quantized"
)
zero_point = None
if not self.weights.symmetric:
zero_point = weights.get_tensor(f"{prefix}.weight_zero_point").t()
g_idx = None
if self.desc_act:
g_idx = weights.get_tensor(f"{prefix}.weight_g_idx")
scales = weights.get_tensor(f"{prefix}.weight.scales").t()
return repack_gptq_for_marlin(
qweight=weight_packed.contiguous(),
scales=scales,
qzeros=zero_point,
g_idx=g_idx,
bits=self.weights.num_bits,
desc_act=self.desc_act,
groupsize=self.groupsize,
quant_method="compressed-tensors",
sym=self.weights.symmetric,
sharded_infeatures=False,
)
def get_weights_col_packed(
self,
weights: Weights,
prefix: str,
block_sizes: Union[int, List[int]],
):
try:
weight_packed = weights.get_packed_sharded(
f"{prefix}.weight_packed", dim=0, block_sizes=block_sizes
).t()
except RuntimeError:
raise RuntimeError(
f"Cannot load w{self.weights.num_bits}a16 weight, make sure the model is already quantized"
)
scales = weights.get_packed_sharded(
f"{prefix}.weight_scale", dim=0, block_sizes=block_sizes
).t()
scales = scales.to(dtype=weights.dtype)
zero_point = None
if not self.weights.symmetric:
zero_point = weights.get_packed_sharded(
f"{prefix}.qzeros", dim=0, block_sizes=block_sizes
).t()
g_idx = None
if self.desc_act:
g_idx = weights.get_tensor(f"{prefix}.g_idx")
return repack_gptq_for_marlin(
qweight=weight_packed.contiguous(),
scales=scales,
qzeros=zero_point,
g_idx=g_idx,
bits=self.weights.num_bits,
desc_act=self.desc_act,
groupsize=self.groupsize,
quant_method="compressed-tensors",
sym=self.weights.symmetric,
sharded_infeatures=False,
)
def get_multi_weights_col(self, weights: Weights, prefixes: List[str], dim: int):
try:
weight_packed = torch.cat(
[
weights.get_sharded(f"{p}.weight_packed", dim=0).t()
for p in prefixes
],
dim=1,
)
except RuntimeError:
raise RuntimeError(
f"Cannot load w{self.weights.num_bits}a16 weight, make sure the model is already quantized"
)
scales = torch.cat(
[weights.get_sharded(f"{p}.weight_scale", dim=0).t() for p in prefixes],
dim=1,
)
zero_point = None
if not self.weights.symmetric:
zero_point = torch.cat(
[weights.get_sharded(f"{p}.qzeros", dim=0).t() for p in prefixes], dim=1
).t()
g_idx = None
if self.desc_act:
w = [weights.get_tensor(f"{p}.g_idx") for p in prefixes]
for w2 in w[1:]:
torch.testing.assert_close(w2, w[0])
g_idx = w[0]
return repack_gptq_for_marlin(
qweight=weight_packed.contiguous(),
scales=scales,
qzeros=zero_point,
g_idx=g_idx,
bits=self.weights.num_bits,
desc_act=self.desc_act,
groupsize=self.groupsize,
quant_method="compressed-tensors",
sym=self.weights.symmetric,
sharded_infeatures=False,
)
def get_weights_row(self, weights: Weights, prefix: str):
log_once(logger.info, "Using GPTQ-Marlin kernels")
try:
weight_packed = weights.get_sharded(f"{prefix}.weight_packed", dim=1).t()
except RuntimeError:
raise RuntimeError(
f"Cannot load `{self.quantize}` weight, make sure the model is already quantized."
)
zero_point = None
if not self.weights.symmetric:
if self.desc_act or self.groupsize == -1:
zero_point = weights.get_tensor(f"{prefix}.weight_zero_point").t()
else:
zero_point = weights.get_sharded(
f"{prefix}.weight_zero_point", dim=1
).t()
g_idx = None
if self.desc_act:
g_idx = weights.get_sharded(f"{prefix}.g_idx", dim=0)
if self.desc_act or self.groupsize == -1:
scales = weights.get_tensor(f"{prefix}.weight_scale").t()
else:
scales = weights.get_sharded(f"{prefix}.weight_scale", dim=1).t()
sharded_in_features = weights.process_group.size() > 1
return repack_gptq_for_marlin(
qweight=weight_packed.contiguous(),
scales=scales,
qzeros=zero_point,
g_idx=g_idx,
bits=self.weights.num_bits,
desc_act=self.desc_act,
groupsize=self.groupsize,
quant_method="compressed-tensors",
sym=self.weights.symmetric,
sharded_infeatures=sharded_in_features,
)
| text-generation-inference/server/text_generation_server/layers/compressed_tensors/wna16_int.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/layers/compressed_tensors/wna16_int.py",
"repo_id": "text-generation-inference",
"token_count": 3314
} |
from typing import TYPE_CHECKING, Optional, List
import torch
import torch.distributed
from torch import nn
from torch.distributed import ProcessGroup
from text_generation_server.utils.sgmv import (
add_lora_a_bgmv,
add_lora_b_bgmv,
has_sgmv,
lora_a_sgmv_cutlass,
lora_b_sgmv_cutlass,
orient_for_rank,
)
if TYPE_CHECKING:
from text_generation_server.adapters import AdapterBatchData
from text_generation_server.adapters.lora import BatchLoraWeights
class LoraLinear(nn.Module):
def __init__(
self, base_layer: nn.Module, layer_id: int, process_group: ProcessGroup
):
super().__init__()
self.base_layer = base_layer
self.layer_id = layer_id
self.process_group = process_group
def forward_layer_type(
self,
result: torch.Tensor,
input: torch.Tensor,
adapter_data: "AdapterBatchData",
layer_type: str,
start_idx: int,
end_idx: int,
) -> torch.Tensor:
if adapter_data is None:
return result
data: Optional["BatchLoraWeights"] = adapter_data.data.get(layer_type)
if has_sgmv() and data is not None and data.can_vectorize(self.process_group):
# In tensor-parallel configurations, each GPU processes a specific segment of the output.
# The 'result' tensor represents the full output, which can vary in size based on
# the layer type (e.g., attention vs. feed-forward layers). We define the current
# segment using start_idx and end_idx. If the segment size doesn't match this GPU's
# slice of 'result', we create a zero tensor of the correct size for LoRA computation.
# This approach ensures accurate LoRA application across various layer sizes and
# configurations, adapting to different model architectures and parallelization strategies.
#
# Example scenarios where this is necessary:
# 1. The adapter's size doesn't evenly divide across GPUs.
# 2. We're processing the last segment which might be smaller.
# 3. Different projection layers (q, k, v) have different sizes.
if end_idx - start_idx != result.shape[1]:
proj = torch.zeros_like(result[:, start_idx:end_idx])
else:
proj = result
for r, rank_segments in data.rank_data.items():
lora_a_ptr = rank_segments.lora_a_ptr
lora_b_ptr = rank_segments.lora_b_ptr
if lora_a_ptr is None or lora_b_ptr is None:
raise ValueError("LoRA data is missing")
if data.use_sgmv:
# Use SGMV for prefill
v = lora_a_sgmv_cutlass(
input,
rank_segments.tmp_shrink,
lora_a_ptr,
rank_segments.segment_starts,
rank_segments.segment_ends,
self.layer_id,
r,
)
if self.process_group.size() > 1:
v = self.collect_lora_a(v)
lora_b_sgmv_cutlass(
proj,
v,
rank_segments.tmp_expand,
lora_b_ptr,
rank_segments.segment_starts,
rank_segments.segment_ends,
self.layer_id,
)
else:
# Use BGMV for decode
v = torch.zeros(
(input.size(0), r), dtype=input.dtype, device=input.device
)
# TODO: error with [-1, 0], but not [0, -1]
add_lora_a_bgmv(
v,
input,
lora_a_ptr,
rank_segments.indices,
self.layer_id,
)
if self.process_group.size() > 1:
v = self.collect_lora_a(v)
add_lora_b_bgmv(
proj,
v,
lora_b_ptr,
rank_segments.indices,
self.layer_id,
)
if end_idx - start_idx != result.shape[1]:
result[:, start_idx:end_idx] += proj
else:
for adapter_index in adapter_data.meta.adapter_set:
if data is not None and data.has_adapter(adapter_index):
adapter_mask = (
(adapter_data.meta.adapter_indices == adapter_index)
.to(input.dtype)
.view(-1, 1)
)
layer_result = self.forward_lora(
input, data, adapter_index, adapter_mask
)
result[:, start_idx:end_idx] += layer_result
return result
def forward_lora(
self,
input: torch.Tensor,
data: "BatchLoraWeights",
adapter_index: int,
adapter_mask: torch.Tensor,
) -> torch.Tensor:
lora_a = data.lora_a[adapter_index][self.layer_id, :, :]
lora_b = data.lora_b[adapter_index][self.layer_id, :, :]
lora_a = orient_for_rank(lora_a, lora_b.size(0))
a_out = input @ lora_a
if self.process_group.size() > 1:
a_out = self.collect_lora_a(a_out)
result = (a_out @ lora_b) * adapter_mask
return result
def collect_lora_a(self, a_out: torch.Tensor) -> torch.Tensor:
raise NotImplementedError("Implemented in subclasses")
class TensorParallelMultiAdapterLinear(LoraLinear):
def __init__(
self,
base_layer: nn.Module,
layer_id: int,
layer_names: List[str],
sizes: List[int],
process_group: ProcessGroup,
):
super().__init__(base_layer, layer_id, process_group)
self.layer_names = layer_names
self.sizes = sizes
@classmethod
def load(
cls,
base_layer: nn.Module,
layer_id: int,
layer_names: List[str],
sizes: List[int],
process_group: ProcessGroup,
):
return TensorParallelMultiAdapterLinear(
base_layer, layer_id, layer_names, sizes, process_group
)
def forward(
self, input: torch.Tensor, adapter_data: "AdapterBatchData"
) -> torch.Tensor:
result = self.base_layer(input)
# noop if no layer names are provided (e.g. for models without adapters)
if self.layer_names is None:
return result
# handle models like Bloom that have inputs of shape
# (batch_size, sequence_length, hidden_size)
# we need to reshape them to (batch_size * sequence_length, hidden_size)
# for the LoRA computation, then reshape back
prev_shape = result.shape
is_3d = len(input.shape) >= 3
if is_3d:
input = input.reshape(-1, input.shape[-1])
result = result.reshape(-1, result.shape[-1])
offset = 0
for i, layer_name in enumerate(self.layer_names):
start_idx = offset // self.process_group.size()
# The 'sizes' parameter is essential in tensor-parallel setups for handling multiple
# projection layers (q_proj, k_proj, v_proj) by defining their output dimensions. It
# ensures correct slicing of the result tensor, accommodating variations like grouped-query
# attention where k_proj and v_proj differ from q_proj. This allows precise application of
# LoRA adapters to each sub-component of the multi-head attention mechanism, managing the
# different projection sizes across layers and model architectures.
if self.sizes is not None:
offset += self.sizes[i]
end_idx = offset // self.process_group.size()
else:
end_idx = result.shape[1]
result = self.forward_layer_type(
result, input, adapter_data, layer_name, start_idx, end_idx
)
if is_3d:
result = result.reshape(prev_shape)
return result
def collect_lora_a(self, a_out: torch.Tensor) -> torch.Tensor:
# Tensor parallel implementation of X @ A@B, where A and B are sharded column-wise.
# We use an all-gather between X@A and (X@A)@B to ensure alignment across ranks.
#
# TODO(travis): this is not very efficient as we do an all-gather for every adapter,
# instead we could pre-allocate a (B, a, r) tensor for all adapters with the same
# rank, compute `a_out` on each, and then slice them into the buffer as shown here:
# https://discuss.pytorch.org/t/concatenate-tensors-without-memory-copying/34609
gathered_tensors = [
torch.empty_like(a_out) for _ in range(self.process_group.size())
]
torch.distributed.all_gather(gathered_tensors, a_out)
return torch.cat(gathered_tensors, dim=1)
class TensorParallelAdapterRowLinear(LoraLinear):
def __init__(self, base_layer, layer_id, layer_name, process_group):
super().__init__(base_layer, layer_id, process_group)
self.layer_name = layer_name
@classmethod
def load(cls, base_layer, layer_id, layer_name, process_group):
return cls(base_layer, layer_id, layer_name, process_group)
def forward(
self, input: torch.Tensor, adapter_data: "AdapterBatchData"
) -> torch.Tensor:
result = self.base_layer(input)
if self.layer_name is None:
return result
# Fused all-gather + all-reduce from S-LoRA paper: https://arxiv.org/abs/2311.03285
stride = result.shape[-1] // self.process_group.size()
start_idx = self.process_group.rank() * stride
end_idx = (self.process_group.rank() + 1) * stride
self.forward_layer_type(
result, input, adapter_data, self.layer_name, start_idx, end_idx
)
return result
def collect_lora_a(self, a_out: torch.Tensor) -> torch.Tensor:
# Tensor parallel implementation of X @ A@B, where A and B are sharded row-wise.
# We use an all-reduce between X@A and (X@A)@B to ensure alignment across ranks.
#
# TODO(travis): this is not very efficient as we do an all-reduce for every adapter,
# instead we could pre-allocate a (B, a, r) tensor for all adapters with the same
# rank, compute `a_out` on each, and then slice them into the buffer as shown here:
# https://discuss.pytorch.org/t/concatenate-tensors-without-memory-copying/34609
torch.distributed.all_reduce(a_out, group=self.process_group)
return a_out
| text-generation-inference/server/text_generation_server/layers/lora.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/layers/lora.py",
"repo_id": "text-generation-inference",
"token_count": 5398
} |
# ruff: noqa: F821
# the above line disables the `undefined-name` rule for the model type variables
from compressed_tensors.compressors.model_compressors.model_compressor import (
QuantizationConfig,
)
from compressed_tensors.quantization import QuantizationType
from pydantic import ValidationError
import torch
import enum
import os
from loguru import logger
from transformers.configuration_utils import PretrainedConfig
from transformers.models.auto import modeling_auto
from huggingface_hub import hf_hub_download, HfApi
from typing import Optional, List, Dict
from pathlib import Path
import transformers
from text_generation_server.utils.speculate import get_speculate, set_speculate
from text_generation_server.models.model import Model
from text_generation_server.models.causal_lm import CausalLM, CausalLMBatchKeysLast
from text_generation_server.models.custom_modeling.opt_modeling import OPTForCausalLM
from text_generation_server.models.custom_modeling.mpt_modeling import (
MPTForCausalLM,
)
from text_generation_server.models.bloom import BloomCausalLMBatch
from text_generation_server.models.custom_modeling.bloom_modeling import (
BloomForCausalLM,
)
from text_generation_server.models.globals import ATTENTION
from text_generation_server.models.seq2seq_lm import Seq2SeqLM
from text_generation_server.models.galactica import GalacticaCausalLMBatch
from text_generation_server.models.custom_modeling.neox_modeling import (
GPTNeoxForCausalLM,
)
from text_generation_server.models.custom_modeling.phi_modeling import (
PhiConfig,
PhiForCausalLM,
)
from text_generation_server.models.custom_modeling.flash_phi_moe_modeling import (
PhiMoEConfig,
)
from text_generation_server.models.custom_modeling.t5_modeling import (
T5ForConditionalGeneration,
)
from text_generation_server.utils.adapter import (
AdapterParameters,
build_layer_weight_lookup,
load_and_merge_adapters,
AdapterInfo,
)
from text_generation_server.adapters.lora import LoraWeights
from text_generation_server.utils.import_utils import SYSTEM
from text_generation_server.utils.log import log_master
# The flag below controls whether to allow TF32 on matmul. This flag defaults to False
# in PyTorch 1.12 and later.
torch.backends.cuda.matmul.allow_tf32 = True
# The flag below controls whether to allow TF32 on cuDNN. This flag defaults to True.
torch.backends.cudnn.allow_tf32 = True
# Disable gradients
torch.set_grad_enabled(False)
__all__ = [
"Model",
"CausalLM",
"Seq2SeqLM",
"get_model_with_lora_adapters",
]
FLASH_ATT_ERROR_MESSAGE = "{} requires Flash Attention enabled models."
FLASH_ATTENTION = True
try:
from text_generation_server.models.flash_causal_lm import FlashCausalLM
from text_generation_server.models.vlm_causal_lm import VlmCausalLM
from text_generation_server.models.mllama_causal_lm import MllamaCausalLM
from text_generation_server.models.custom_modeling.flash_deepseek_v2_modeling import (
FlashDeepseekV2ForCausalLM,
DeepseekV2Config,
)
from text_generation_server.models.custom_modeling.flash_deepseek_v3_modeling import (
FlashDeepseekV3ForCausalLM,
DeepseekV3Config,
)
from text_generation_server.models.custom_modeling.flash_llama_modeling import (
FlashLlamaForCausalLM,
)
from text_generation_server.models.custom_modeling.flash_cohere_modeling import (
FlashCohereForCausalLM,
)
from text_generation_server.models.custom_modeling.flash_gemma_modeling import (
FlashGemmaForCausalLM,
)
from text_generation_server.models.custom_modeling.flash_gemma2_modeling import (
FlashGemma2ForCausalLM,
)
from text_generation_server.models.custom_modeling.flash_dbrx_modeling import (
FlashDbrxForCausalLM,
DbrxConfig,
)
from text_generation_server.models.custom_modeling.flash_rw_modeling import (
RWConfig,
FlashRWForCausalLM,
)
from text_generation_server.models.custom_modeling.flash_neox_modeling import (
FlashGPTNeoXForCausalLM,
)
from text_generation_server.models.pali_gemma import (
PaliGemmaBatch,
)
from text_generation_server.models.custom_modeling.flash_pali_gemma_modeling import (
PaliGemmaForConditionalGeneration,
)
from text_generation_server.models.custom_modeling.flash_phi_modeling import (
FlashPhiForCausalLM,
)
from text_generation_server.models.idefics_causal_lm import IdeficsCausalLM
from text_generation_server.models.mllama_causal_lm import MllamaCausalLMBatch
from text_generation_server.models.custom_modeling.mllama import (
MllamaForConditionalGeneration,
)
from text_generation_server.models.custom_modeling.llava_next import (
LlavaNextForConditionalGeneration,
)
from text_generation_server.models.custom_modeling.flash_santacoder_modeling import (
FlashSantacoderForCausalLM,
)
from text_generation_server.models.custom_modeling.flash_starcoder2_modeling import (
FlashStarcoder2ForCausalLM,
)
from text_generation_server.models.custom_modeling.flash_qwen2_modeling import (
Qwen2ForCausalLM,
)
from text_generation_server.models.custom_modeling.flash_mistral_modeling import (
FlashMistralForCausalLM,
)
from text_generation_server.models.custom_modeling.flash_mixtral_modeling import (
FlashMixtralForCausalLM,
)
from text_generation_server.models.custom_modeling.flash_gpt2_modeling import (
FlashGPT2ForCausalLM,
)
from text_generation_server.models.custom_modeling.flash_gptj_modeling import (
FlashGPTJForCausalLM,
)
from text_generation_server.models.custom_modeling.idefics2 import (
Idefics2ForConditionalGeneration,
)
from text_generation_server.models.custom_modeling.idefics3 import (
Idefics3ForConditionalGeneration,
)
from text_generation_server.models.custom_modeling.qwen2_vl import (
Qwen2VLForConditionalGeneration,
)
from text_generation_server.layers.attention import SUPPORTS_WINDOWING
except ImportError as e:
log_master(logger.warning, f"Could not import Flash Attention enabled models: {e}")
SUPPORTS_WINDOWING = False
FLASH_ATTENTION = False
if FLASH_ATTENTION:
__all__.append(FlashCausalLM)
__all__.append(IdeficsCausalLM)
MAMBA_AVAILABLE = True
try:
from text_generation_server.models.mamba import Mamba
except ImportError as e:
log_master(logger.warning, f"Could not import Mamba: {e}")
MAMBA_AVAILABLE = False
if MAMBA_AVAILABLE:
__all__.append(Mamba)
FLASH_TRANSFORMERS_BACKEND = torch.cuda.is_available()
try:
from text_generation_server.models.transformers_flash_causal_lm import (
TransformersFlashCausalLM,
)
except ImportError:
FLASH_TRANSFORMERS_BACKEND = False
class ModelType(enum.Enum):
DEEPSEEK_V2 = {
"type": "deepseek_v2",
"name": "Deepseek V2",
"url": "https://huggingface.co/deepseek-ai/DeepSeek-V2",
}
DEEPSEEK_V3 = {
"type": "deepseek_v3",
"name": "Deepseek V3",
"url": "https://huggingface.co/deepseek-ai/DeepSeek-V3",
}
IDEFICS2 = {
"type": "idefics2",
"name": "Idefics 2",
"url": "https://huggingface.co/HuggingFaceM4/idefics2-8b",
"multimodal": True,
}
IDEFICS3 = {
"type": "idefics3",
"name": "Idefics 3",
"url": "https://huggingface.co/HuggingFaceM4/Idefics3-8B-Llama3",
"multimodal": True,
}
LLAVA_NEXT = {
"type": "llava_next",
"name": "Llava Next (1.6)",
"url": "https://huggingface.co/llava-hf/llava-v1.6-vicuna-13b-hf",
"multimodal": True,
}
LLAMA = {
"type": "llama",
"name": "Llama",
"url": "https://huggingface.co/collections/meta-llama/llama-31-669fc079a0c406a149a5738f",
}
PHI3 = {
"type": "phi3",
"name": "Phi 3",
"url": "https://huggingface.co/microsoft/Phi-3-mini-4k-instruct",
}
GRANITE = {
"type": "granite",
"name": "Granite",
"url": "https://huggingface.co/ibm-granite/granite-3.0-8b-instruct",
}
GEMMA = {
"type": "gemma",
"name": "Gemma",
"url": "https://huggingface.co/google/gemma-7b",
}
PALIGEMMA = {
"type": "paligemma",
"name": "PaliGemma",
"url": "https://huggingface.co/google/paligemma-3b-pt-224",
}
GEMMA2 = {
"type": "gemma2",
"name": "Gemma2",
"url": "https://huggingface.co/collections/google/gemma-2-release-667d6600fd5220e7b967f315",
}
COHERE = {
"type": "cohere",
"name": "Cohere",
"url": "https://huggingface.co/CohereForAI/c4ai-command-r-plus",
}
DBRX = {
"type": "dbrx",
"name": "Dbrx",
"url": "https://huggingface.co/databricks/dbrx-instruct",
}
MAMBA = {
"type": "mamba",
"name": "Mamba",
"url": "https://huggingface.co/state-spaces/mamba-2.8b-slimpj",
}
MISTRAL = {
"type": "mistral",
"name": "Mistral",
"url": "https://huggingface.co/mistralai/Mistral-Nemo-Instruct-2407",
}
MIXTRAL = {
"type": "mixtral",
"name": "Mixtral",
"url": "https://huggingface.co/mistralai/Mixtral-8x22B-Instruct-v0.1",
}
GPT_BIGCODE = {
"type": "gpt_bigcode",
"name": "Gpt Bigcode",
"url": "https://huggingface.co/bigcode/gpt_bigcode-santacoder",
}
PHI = {
"type": "phi",
"name": "Phi",
"url": "https://huggingface.co/microsoft/phi-1_5",
}
PHI_MOE = {
"type": "phimoe",
"name": "PhiMoe",
"url": "https://huggingface.co/microsoft/Phi-3.5-MoE-instruct",
}
BAICHUAN = {
"type": "baichuan",
"name": "Baichuan",
"url": "https://huggingface.co/baichuan-inc/Baichuan2-7B-Chat",
}
FALCON = {
"type": "falcon",
"name": "Falcon",
"url": "https://huggingface.co/tiiuae/falcon-7b-instruct",
}
STARCODER2 = {
"type": "starcoder2",
"name": "StarCoder 2",
"url": "https://huggingface.co/bigcode/starcoder2-15b-instruct-v0.1",
}
QWEN2 = {
"type": "qwen2",
"name": "Qwen 2",
"url": "https://huggingface.co/collections/Qwen/qwen2-6659360b33528ced941e557f",
}
QWEN2_VL = {
"type": "qwen2_vl",
"name": "Qwen 2 VL",
"url": "https://huggingface.co/collections/Qwen/qwen2-vl-66cee7455501d7126940800d",
}
OPT = {
"type": "opt",
"name": "Opt",
"url": "https://huggingface.co/facebook/opt-6.7b",
}
T5 = {
"type": "t5",
"name": "T5",
"url": "https://huggingface.co/google/flan-t5-xxl",
}
GALACTICA = {
"type": "galactica",
"name": "Galactica",
"url": "https://huggingface.co/facebook/galactica-120b",
}
SANTACODER = {
"type": "santacoder",
"name": "SantaCoder",
"url": "https://huggingface.co/bigcode/santacoder",
}
BLOOM = {
"type": "bloom",
"name": "Bloom",
"url": "https://huggingface.co/bigscience/bloom-560m",
}
MPT = {
"type": "mpt",
"name": "Mpt",
"url": "https://huggingface.co/mosaicml/mpt-7b-instruct",
}
GPT2 = {
"type": "gpt2",
"name": "Gpt2",
"url": "https://huggingface.co/openai-community/gpt2",
}
GPT_NEOX = {
"type": "gpt_neox",
"name": "Gpt Neox",
"url": "https://huggingface.co/EleutherAI/gpt-neox-20b",
}
GPTJ = {
"type": "gptj",
"name": "Gptj",
"url": "https://huggingface.co/EleutherAI/gpt-j-6b",
}
IDEFICS = {
"type": "idefics",
"name": "Idefics",
"url": "https://huggingface.co/HuggingFaceM4/idefics-9b",
"multimodal": True,
}
MLLAMA = {
"type": "mllama",
"name": "Mllama",
"url": "https://huggingface.co/meta-llama/Llama-3.2-11B-Vision-Instruct",
"multimodal": True,
}
__GLOBALS = locals()
for data in ModelType:
__GLOBALS[data.name] = data.value["type"]
def get_model(
model_id: str,
lora_adapter_ids: Optional[List[str]],
revision: Optional[str],
sharded: bool,
quantize: Optional[str],
speculate: Optional[int],
dtype: Optional[str],
kv_cache_dtype: Optional[str],
trust_remote_code: bool,
max_input_tokens: int,
) -> Model:
global FLASH_ATTENTION
config_dict, _ = PretrainedConfig.get_config_dict(
model_id, revision=revision, trust_remote_code=trust_remote_code
)
model_type = config_dict.get("model_type", None)
quantization_config = config_dict.get("quantization_config", None)
if quantization_config is None:
quantization_config = config_dict.get("compression_config", None)
if quantization_config is not None and quantize is None:
method = quantization_config.get("quant_method", None)
if method in {"gptq", "awq", "exl2"}:
log_master(logger.info, f"Auto selecting quantization method {method}")
quantize = method
elif method == "fbgemm_fp8" or method == "fp8":
log_master(logger.info, "Auto selecting quantization method fp8")
quantize = "fp8"
if method == "compressed-tensors":
log_master(
logger.info, "Auto selecting quantization method compressed-tensors"
)
quantize = "compressed-tensors"
else:
log_master(logger.warning, f"Unknown quantization method {method}")
if dtype is None:
if quantize in ["awq", "exl2", "gptq", "marlin"]:
if SYSTEM == "ipex" and not (
hasattr(torch, "xpu") and torch.xpu.is_available()
):
dtype = torch.bfloat16
else:
# These quantizers only work with float16 params.
dtype = torch.float16
else:
# Keep it as default for now and let
# every model resolve their own default dtype.
dtype = None
elif dtype == "float16":
dtype = torch.float16
elif dtype == "bfloat16":
dtype = torch.bfloat16
else:
raise RuntimeError(f"Unknown dtype {dtype}")
compressed_tensors_config = None
if quantize == "compressed-tensors":
try:
compressed_tensors_config = QuantizationConfig.model_validate(
quantization_config
)
except ValidationError as e:
raise ValueError("Cannot parse compressed-tensors configuration") from e
if kv_cache_dtype is None:
kv_cache_scheme = (
compressed_tensors_config.kv_cache_scheme
if isinstance(compressed_tensors_config, QuantizationConfig)
else None
)
if (
kv_cache_scheme is not None
and kv_cache_scheme.type == QuantizationType.FLOAT
and kv_cache_scheme.num_bits == 8
and SYSTEM == "cuda"
and ATTENTION == "flashinfer"
):
kv_cache_dtype = torch.float8_e4m3fn
else:
kv_cache_dtype = dtype
elif kv_cache_dtype == "fp8_e4m3fn":
kv_cache_dtype = torch.float8_e4m3fn
elif kv_cache_dtype == "fp8_e5m2":
kv_cache_dtype = torch.float8_e5m2
else:
raise RuntimeError(f"Unknown kv_cache_dtype: {kv_cache_dtype}")
if speculate is not None:
set_speculate(speculate)
else:
set_speculate(0)
speculator = None
if "medusa_num_heads" in config_dict:
medusa_model_id = model_id
medusa_revision = revision
model_id = config_dict["base_model_name_or_path"]
revision = "main"
speculate_medusa = config_dict["medusa_num_heads"]
if speculate is not None:
if speculate > speculate_medusa:
raise RuntimeError(
f"Speculate is set to `{speculate}` but this medusa models only has `{speculate_medusa}` heads, please make them match"
)
else:
set_speculate(speculate)
else:
set_speculate(speculate_medusa)
config_dict, _ = PretrainedConfig.get_config_dict(
model_id, revision=revision, trust_remote_code=trust_remote_code
)
# Reload model type from parent.
model_type = config_dict.get("model_type", None)
is_local = Path(medusa_model_id).exists()
if not is_local:
medusa_config = hf_hub_download(
medusa_model_id, revision=medusa_revision, filename="config.json"
)
hf_hub_download(
medusa_model_id,
revision=medusa_revision,
filename="medusa_lm_head.safetensors",
)
speculator = {
"path": Path(medusa_config).parent,
"model_paths": ["medusa_lm_head.safetensors"],
}
else:
speculator = {
"path": Path(medusa_model_id),
"model_paths": ["medusa_lm_head.safetensors"],
}
method = "medusa"
elif model_type == "mlp_speculator":
mlp_model_id = model_id
mlp_revision = revision
model_id = config_dict["base_model_name_or_path"]
revision = "main"
speculate_mlp = config_dict["n_predict"]
if speculate is not None:
if speculate > speculate_mlp:
raise RuntimeError(
f"Speculate is set to `{speculate}` but this mlp_speculator models only has `{speculate_mlp}` heads, please make them match"
)
else:
set_speculate(speculate)
else:
set_speculate(speculate_mlp)
config_dict, _ = PretrainedConfig.get_config_dict(
model_id, revision=revision, trust_remote_code=trust_remote_code
)
# Reload model type from parent.
model_type = config_dict.get("model_type", None)
is_local = Path(mlp_model_id).exists()
extension = ".safetensors"
if not is_local:
mlp_speculator_config = hf_hub_download(
mlp_model_id, revision=mlp_revision, filename="config.json"
)
api = HfApi()
info = api.model_info(mlp_model_id, revision=mlp_revision)
filenames = [
s.rfilename
for s in info.siblings
if s.rfilename.endswith(extension)
and len(s.rfilename.split("/")) == 1
and "arguments" not in s.rfilename
and "args" not in s.rfilename
and "training" not in s.rfilename
]
for filename in filenames:
hf_hub_download(
mlp_model_id,
revision=mlp_revision,
filename=filename,
)
speculator_dir_path = Path(mlp_speculator_config).parent
# if these are downloaded, they get converted to safetensors
filenames.extend(
[p for p in os.listdir(speculator_dir_path) if p.endswith(extension)]
)
speculator = {
"path": Path(mlp_speculator_config).parent,
"model_paths": filenames,
}
else:
speculator = Path(mlp_model_id)
filenames = [p for p in os.listdir(speculator) if p.endswith(extension)]
speculator = {"path": speculator, "model_paths": filenames}
method = "mlp_speculator"
else:
method = "n-gram"
speculate = get_speculate()
if speculate > 0:
log_master(
logger.info, f"Using speculation {method} with {speculate} input ids."
)
if model_type is None:
# TODO: fix how we determine model type for Mamba
if "ssm_cfg" in config_dict:
# *only happens in Mamba case
model_type = "mamba"
else:
raise RuntimeError(
f"Could not determine model type for {model_id} revision {revision}"
)
if quantize == "exl2" and sharded:
raise RuntimeError(
"Sharding is currently not supported with `exl2` quantization"
)
sliding_window = (
config_dict.get("sliding_window")
if config_dict.get("sliding_window") is not None
else -1
)
use_sliding_window = sliding_window is not None and sliding_window != -1
needs_sliding_window = (
max_input_tokens is not None and max_input_tokens > sliding_window
)
if use_sliding_window and needs_sliding_window and not SUPPORTS_WINDOWING:
raise ValueError(
f"The backend {SYSTEM} does not support sliding window attention that is used by the model type {model_type}. To use this model nonetheless with the {SYSTEM} backend, please launch TGI with the argument `--max-input-tokens` smaller than sliding_window={sliding_window} (got here max_input_tokens={max_input_tokens})."
)
if model_type == DEEPSEEK_V2:
if FLASH_ATTENTION:
head_size = max(
config_dict.get("qk_nope_dim", 128)
+ config_dict.get("qk_rope_dim", 64),
config_dict.get("v_head_dim", 128),
)
return FlashCausalLM(
model_id=model_id,
model_class=FlashDeepseekV2ForCausalLM,
revision=revision,
quantize=quantize,
speculator=speculator,
default_dtype=torch.bfloat16,
dtype=dtype,
kv_cache_dtype=kv_cache_dtype,
trust_remote_code=trust_remote_code,
lora_adapter_ids=lora_adapter_ids,
config_class=DeepseekV2Config,
head_size=head_size,
)
elif sharded:
raise NotImplementedError(
FLASH_ATT_ERROR_MESSAGE.format("Sharded Deepseek V2")
)
else:
return CausalLM.fallback(
model_id,
revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
trust_remote_code=trust_remote_code,
)
elif model_type == DEEPSEEK_V3:
if FLASH_ATTENTION:
head_size = max(
config_dict.get("qk_nope_dim", 128)
+ config_dict.get("qk_rope_dim", 64),
config_dict.get("v_head_dim", 128),
)
return FlashCausalLM(
model_id=model_id,
model_class=FlashDeepseekV3ForCausalLM,
revision=revision,
quantize=quantize,
speculator=speculator,
default_dtype=torch.bfloat16,
dtype=dtype,
kv_cache_dtype=kv_cache_dtype,
trust_remote_code=trust_remote_code,
lora_adapter_ids=lora_adapter_ids,
config_class=DeepseekV3Config,
head_size=head_size,
)
elif sharded:
raise NotImplementedError(
FLASH_ATT_ERROR_MESSAGE.format("Sharded Deepseek V3")
)
else:
return CausalLM.fallback(
model_id,
revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
trust_remote_code=trust_remote_code,
)
elif model_type == MAMBA:
return Mamba(
model_id,
revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
trust_remote_code=trust_remote_code,
)
elif model_type == "ssm":
raise RuntimeError(
"`ssm` models have been deprecated in favor of `mamba` models, which follow standard HF formats. Check out a list here: https://huggingface.co/models?search=mamba%20-hf"
)
if model_id.startswith("facebook/galactica"):
return CausalLM(
model_id=model_id,
# Yes galactica is just an OPT model.
model_class=OPTForCausalLM,
revision=revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
trust_remote_code=trust_remote_code,
batch_class=GalacticaCausalLMBatch,
)
if (
model_type == GPT_BIGCODE
or model_type == GPT2
and model_id.startswith("bigcode/")
):
if FLASH_ATTENTION:
return FlashCausalLM(
model_id=model_id,
model_class=FlashSantacoderForCausalLM,
revision=revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
kv_cache_dtype=kv_cache_dtype,
trust_remote_code=trust_remote_code,
lora_adapter_ids=lora_adapter_ids,
aliases={"transformer.wte.weight": ["lm_head.weight"]},
num_kv_heads=1,
)
elif sharded:
raise NotImplementedError(
FLASH_ATT_ERROR_MESSAGE.format("Sharded Santacoder")
)
else:
return transformers_causal_lm_class.fallback(
model_id=model_id,
revision=revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
trust_remote_code=trust_remote_code,
)
if model_type == BLOOM:
return CausalLM(
model_id=model_id,
model_class=BloomForCausalLM,
revision=revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
trust_remote_code=trust_remote_code,
batch_class=BloomCausalLMBatch,
)
elif model_type == MPT:
return CausalLM(
model_id=model_id,
model_class=MPTForCausalLM,
revision=revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
trust_remote_code=trust_remote_code,
batch_class=CausalLMBatchKeysLast,
)
elif model_type == GPT2:
if FLASH_ATTENTION:
try:
return FlashCausalLM(
model_id=model_id,
model_class=FlashGPT2ForCausalLM,
revision=revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
kv_cache_dtype=kv_cache_dtype,
trust_remote_code=trust_remote_code,
lora_adapter_ids=lora_adapter_ids,
)
except RuntimeError as e:
# Lots of legacy models with various weight names.
log_master(logger.warning, f"Couldn't load flash gpt2 variant: {e}")
return CausalLM.fallback(
model_id,
revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
trust_remote_code=trust_remote_code,
)
elif sharded:
raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded GPT-2"))
else:
return CausalLM.fallback(
model_id,
revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
trust_remote_code=trust_remote_code,
)
elif model_type == GPTJ:
if FLASH_ATTENTION:
try:
return FlashCausalLM(
model_id=model_id,
model_class=FlashGPTJForCausalLM,
revision=revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
kv_cache_dtype=kv_cache_dtype,
trust_remote_code=trust_remote_code,
lora_adapter_ids=lora_adapter_ids,
)
except RuntimeError as e:
# Lots of legacy models with various weight names.
log_master(logger.warning, f"Couldn't load flash gptj variant: {e}")
return CausalLM.fallback(
model_id,
revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
trust_remote_code=trust_remote_code,
)
elif sharded:
raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded GPT-J"))
else:
return CausalLM.fallback(
model_id,
revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
trust_remote_code=trust_remote_code,
)
elif model_type == GPT_NEOX:
if FLASH_ATTENTION:
from text_generation_server.models.custom_modeling.flash_neox_modeling import (
GPTNeoXConfig,
)
return FlashCausalLM(
model_id=model_id,
model_class=FlashGPTNeoXForCausalLM,
revision=revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
kv_cache_dtype=kv_cache_dtype,
trust_remote_code=trust_remote_code,
lora_adapter_ids=lora_adapter_ids,
config_class=GPTNeoXConfig,
)
elif sharded:
return CausalLM(
model_id=model_id,
model_class=GPTNeoxForCausalLM,
revision=revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
trust_remote_code=trust_remote_code,
)
else:
return CausalLM.fallback(
model_id,
revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
trust_remote_code=trust_remote_code,
)
elif model_type == PHI:
if FLASH_ATTENTION:
return FlashCausalLM(
model_id=model_id,
model_class=FlashPhiForCausalLM,
revision=revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
kv_cache_dtype=kv_cache_dtype,
trust_remote_code=trust_remote_code,
lora_adapter_ids=lora_adapter_ids,
)
else:
return TransformersFlashCausalLM.fallback(
model_id,
revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
trust_remote_code=trust_remote_code,
)
elif model_type == PHI_MOE:
if FLASH_ATTENTION:
return FlashCausalLM(
model_id=model_id,
model_class=FlashLlamaForCausalLM,
config_class=PhiMoEConfig,
revision=revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
kv_cache_dtype=kv_cache_dtype,
trust_remote_code=trust_remote_code,
lora_adapter_ids=lora_adapter_ids,
)
else:
return CausalLM.fallback(
model_id,
revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
trust_remote_code=trust_remote_code,
)
elif model_type == "phi-msft":
if FLASH_ATTENTION:
raise NotImplementedError(
"Legacy phi-msft is not supported with Flash Attention"
)
else:
return CausalLM(
model_id=model_id,
model_class=PhiForCausalLM,
config_class=PhiConfig,
revision=revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
trust_remote_code=trust_remote_code,
)
elif model_type == LLAMA or model_type == PHI3 or model_type == GRANITE:
if FLASH_ATTENTION:
return FlashCausalLM(
model_id=model_id,
model_class=FlashLlamaForCausalLM,
revision=revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
kv_cache_dtype=kv_cache_dtype,
trust_remote_code=trust_remote_code,
lora_adapter_ids=lora_adapter_ids,
)
elif FLASH_TRANSFORMERS_BACKEND:
return TransformersFlashCausalLM.fallback(
model_id,
revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
trust_remote_code=trust_remote_code,
)
elif sharded:
raise NotImplementedError(
FLASH_ATT_ERROR_MESSAGE.format(f"Sharded {model_type}")
)
else:
return CausalLM.fallback(
model_id,
revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
trust_remote_code=trust_remote_code,
)
elif model_type == BAICHUAN:
if FLASH_ATTENTION:
return FlashCausalLM(
model_id=model_id,
model_class=FlashLlamaForCausalLM,
revision=revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
kv_cache_dtype=kv_cache_dtype,
trust_remote_code=trust_remote_code,
lora_adapter_ids=lora_adapter_ids,
)
elif sharded:
raise NotImplementedError(
FLASH_ATT_ERROR_MESSAGE.format(f"Sharded {model_type}")
)
else:
return CausalLM.fallback(
model_id,
revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
trust_remote_code=trust_remote_code,
)
if model_type == GEMMA:
if FLASH_ATTENTION:
return FlashCausalLM(
model_id=model_id,
model_class=FlashGemmaForCausalLM,
revision=revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
kv_cache_dtype=kv_cache_dtype,
# Works better for these models
default_dtype=torch.bfloat16,
trust_remote_code=trust_remote_code,
lora_adapter_ids=lora_adapter_ids,
)
elif FLASH_TRANSFORMERS_BACKEND:
return TransformersFlashCausalLM.fallback(
model_id,
revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
trust_remote_code=trust_remote_code,
)
elif sharded:
raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded Gemma"))
else:
return CausalLM.fallback(
model_id,
revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
trust_remote_code=trust_remote_code,
)
elif model_type == GEMMA2:
if FLASH_ATTENTION:
return FlashCausalLM(
model_id=model_id,
model_class=FlashGemma2ForCausalLM,
revision=revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
kv_cache_dtype=kv_cache_dtype,
# Works better for these models
default_dtype=torch.bfloat16,
trust_remote_code=trust_remote_code,
lora_adapter_ids=lora_adapter_ids,
)
elif sharded:
raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded Gemma2"))
else:
return CausalLM.fallback(
model_id,
revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
trust_remote_code=trust_remote_code,
)
if model_type == COHERE:
if FLASH_ATTENTION:
return FlashCausalLM(
model_id=model_id,
model_class=FlashCohereForCausalLM,
revision=revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
kv_cache_dtype=kv_cache_dtype,
trust_remote_code=trust_remote_code,
lora_adapter_ids=lora_adapter_ids,
)
elif FLASH_TRANSFORMERS_BACKEND:
return TransformersFlashCausalLM.fallback(
model_id,
revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
trust_remote_code=trust_remote_code,
)
elif sharded:
raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded Cohere"))
else:
return CausalLM.fallback(
model_id,
revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
trust_remote_code=trust_remote_code,
)
if model_type == DBRX:
if FLASH_ATTENTION:
return FlashCausalLM(
model_id=model_id,
model_class=FlashDbrxForCausalLM,
revision=revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
kv_cache_dtype=kv_cache_dtype,
# Dbrx works better in bfloat16.
default_dtype=torch.bfloat16,
trust_remote_code=trust_remote_code,
lora_adapter_ids=lora_adapter_ids,
config_class=DbrxConfig,
)
elif sharded:
raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded DBRX"))
else:
return CausalLM.fallback(
model_id,
revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
trust_remote_code=trust_remote_code,
)
if model_type in ["RefinedWeb", "RefinedWebModel", FALCON]:
if sharded:
if FLASH_ATTENTION:
if config_dict.get("alibi", False):
raise NotImplementedError("sharded is not supported for this model")
return FlashCausalLM(
model_id=model_id,
model_class=FlashRWForCausalLM,
revision=revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
kv_cache_dtype=kv_cache_dtype,
aliases={
"lm_head.weight": ["transformer.word_embeddings.weight"],
"transformer.word_embeddings.weight": ["lm_head.weight"],
},
trust_remote_code=trust_remote_code,
lora_adapter_ids=lora_adapter_ids,
config_class=RWConfig,
)
raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded Falcon"))
else:
if FLASH_ATTENTION and not config_dict.get("alibi", False):
return FlashCausalLM(
model_id=model_id,
model_class=FlashRWForCausalLM,
revision=revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
kv_cache_dtype=kv_cache_dtype,
aliases={
"lm_head.weight": ["transformer.word_embeddings.weight"],
"transformer.word_embeddings.weight": ["lm_head.weight"],
},
trust_remote_code=trust_remote_code,
lora_adapter_ids=lora_adapter_ids,
config_class=RWConfig,
)
else:
return CausalLM.fallback(
model_id,
revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
trust_remote_code=trust_remote_code,
)
if model_type == MISTRAL:
if FLASH_ATTENTION:
return FlashCausalLM(
model_id=model_id,
model_class=FlashMistralForCausalLM,
revision=revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
kv_cache_dtype=kv_cache_dtype,
trust_remote_code=trust_remote_code,
lora_adapter_ids=lora_adapter_ids,
)
elif FLASH_TRANSFORMERS_BACKEND:
return TransformersFlashCausalLM.fallback(
model_id,
revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
trust_remote_code=trust_remote_code,
)
elif sharded:
raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded Mistral"))
else:
return CausalLM.fallback(
model_id,
revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
trust_remote_code=trust_remote_code,
)
if model_type == MIXTRAL:
if FLASH_ATTENTION:
return FlashCausalLM(
model_id=model_id,
model_class=FlashMixtralForCausalLM,
revision=revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
kv_cache_dtype=kv_cache_dtype,
trust_remote_code=trust_remote_code,
lora_adapter_ids=lora_adapter_ids,
)
elif FLASH_TRANSFORMERS_BACKEND:
return TransformersFlashCausalLM.fallback(
model_id,
revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
trust_remote_code=trust_remote_code,
)
elif sharded:
raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded Mixtral"))
else:
return CausalLM.fallback(
model_id,
revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
trust_remote_code=trust_remote_code,
)
if model_type == STARCODER2:
if FLASH_ATTENTION:
return FlashCausalLM(
model_id=model_id,
model_class=FlashStarcoder2ForCausalLM,
revision=revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
kv_cache_dtype=kv_cache_dtype,
trust_remote_code=trust_remote_code,
lora_adapter_ids=lora_adapter_ids,
)
elif FLASH_TRANSFORMERS_BACKEND:
return TransformersFlashCausalLM.fallback(
model_id,
revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
trust_remote_code=trust_remote_code,
)
elif sharded:
raise NotImplementedError(
FLASH_ATT_ERROR_MESSAGE.format("Sharded Starcoder2")
)
else:
return CausalLM.fallback(
model_id,
revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
trust_remote_code=trust_remote_code,
)
if model_type == QWEN2:
if FLASH_ATTENTION:
return FlashCausalLM(
model_id=model_id,
model_class=Qwen2ForCausalLM,
revision=revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
kv_cache_dtype=kv_cache_dtype,
trust_remote_code=trust_remote_code,
lora_adapter_ids=lora_adapter_ids,
)
elif FLASH_TRANSFORMERS_BACKEND:
return TransformersFlashCausalLM.fallback(
model_id,
revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
trust_remote_code=trust_remote_code,
)
elif sharded:
raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Sharded Qwen2"))
else:
return CausalLM.fallback(
model_id,
revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
trust_remote_code=trust_remote_code,
)
if model_type == OPT:
return CausalLM(
model_id=model_id,
model_class=OPTForCausalLM,
revision=revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
trust_remote_code=trust_remote_code,
)
if model_type == T5:
return Seq2SeqLM(
model_id=model_id,
model_class=T5ForConditionalGeneration,
revision=revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
trust_remote_code=trust_remote_code,
aliases={
"shared.weight": [
"encoder.embed_tokens.weight",
"decoder.embed_tokens.weight",
]
},
)
if model_type == IDEFICS:
if FLASH_ATTENTION:
return IdeficsCausalLM(
model_id,
revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
trust_remote_code=trust_remote_code,
)
else:
raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Idefics"))
if model_type == QWEN2_VL:
return VlmCausalLM(
model_id=model_id,
model_class=Qwen2VLForConditionalGeneration,
revision=revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
default_dtype=torch.bfloat16,
kv_cache_dtype=kv_cache_dtype,
trust_remote_code=trust_remote_code,
lora_adapter_ids=lora_adapter_ids,
)
if model_type == MLLAMA:
if FLASH_ATTENTION:
return MllamaCausalLM(
model_id=model_id,
model_class=MllamaForConditionalGeneration,
batch_class=MllamaCausalLMBatch,
revision=revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
default_dtype=torch.bfloat16,
trust_remote_code=trust_remote_code,
lora_adapter_ids=lora_adapter_ids,
)
else:
raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Mllama"))
if model_type == IDEFICS2:
if FLASH_ATTENTION:
return VlmCausalLM(
model_id=model_id,
model_class=Idefics2ForConditionalGeneration,
revision=revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
kv_cache_dtype=kv_cache_dtype,
trust_remote_code=trust_remote_code,
lora_adapter_ids=lora_adapter_ids,
# XXX: Extremely important to cap resolution in order to limit
# VRAM usage.
processor_kwargs={"size": {"longest_edge": 448, "shortest_edge": 378}},
)
else:
raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Idefics"))
if model_type == IDEFICS3:
if FLASH_ATTENTION:
return VlmCausalLM(
model_id=model_id,
model_class=Idefics3ForConditionalGeneration,
revision=revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
default_dtype=torch.bfloat16,
trust_remote_code=trust_remote_code,
lora_adapter_ids=lora_adapter_ids,
# XXX: Extremely important to cap resolution in order to limit
# VRAM usage.
processor_kwargs={"size": {"longest_edge": 1456}},
)
else:
raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Idefics"))
if model_type == PALIGEMMA:
if FLASH_ATTENTION:
return VlmCausalLM(
model_id=model_id,
model_class=PaliGemmaForConditionalGeneration,
revision=revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
kv_cache_dtype=kv_cache_dtype,
# Works better for these models
default_dtype=torch.bfloat16,
trust_remote_code=trust_remote_code,
lora_adapter_ids=lora_adapter_ids,
batch_class=PaliGemmaBatch,
)
else:
raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("Idefics"))
if model_type == LLAVA_NEXT:
if FLASH_ATTENTION:
return VlmCausalLM(
model_class=LlavaNextForConditionalGeneration,
model_id=model_id,
revision=revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
kv_cache_dtype=kv_cache_dtype,
trust_remote_code=trust_remote_code,
)
else:
raise NotImplementedError(FLASH_ATT_ERROR_MESSAGE.format("LlavaNext"))
if quantize == "gptq":
raise NotImplementedError(
"gptq quantization is not supported for AutoModel, you can try to quantize it with `text-generation-server quantize ORIGINAL_MODEL_ID NEW_MODEL_ID`"
)
if quantize == "awq":
raise NotImplementedError("awq quantization is not supported for AutoModel")
elif (quantize == "bitsandbytes-fp4") or (quantize == "bitsandbytes-nf4"):
raise NotImplementedError("4bit quantization is not supported for AutoModel")
elif quantize == "eetq":
raise NotImplementedError("Eetq quantization is not supported for AutoModel")
elif quantize == "exl2":
raise NotImplementedError("exl2 quantization is not supported for AutoModel")
# Fast transformers if available
transformers_model_class = getattr(
transformers,
modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.get(model_type, ""),
None,
)
if (
FLASH_TRANSFORMERS_BACKEND
and transformers_model_class is not None
and transformers_model_class._supports_flex_attn
):
return TransformersFlashCausalLM.fallback(
model_id,
revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
trust_remote_code=trust_remote_code,
)
if sharded:
raise NotImplementedError("sharded is not supported for AutoModel")
if model_type in modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES:
return Seq2SeqLM.fallback(
model_id,
revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
trust_remote_code=trust_remote_code,
)
auto_map = config_dict.get("auto_map", None)
if trust_remote_code and auto_map is not None:
if "AutoModelForCausalLM" in auto_map.keys():
return CausalLM.fallback(
model_id,
revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
trust_remote_code=trust_remote_code,
)
if "AutoModelForSeq2SeqLM" in auto_map.keys():
return Seq2SeqLM.fallback(
model_id,
revision,
quantize=quantize,
speculator=speculator,
dtype=dtype,
trust_remote_code=trust_remote_code,
)
raise ValueError(f"Unsupported model type {model_type}")
# get_model_with_lora_adapters wraps the internal get_model function and adds support for loading adapters
# this provides a post model loading hook to load adapters into the model after the model has been loaded
def get_model_with_lora_adapters(
model_id: str,
lora_adapters: Optional[List[AdapterInfo]],
revision: Optional[str],
sharded: bool,
quantize: Optional[str],
speculate: Optional[int],
dtype: Optional[str],
kv_cache_dtype: Optional[str],
trust_remote_code: bool,
max_input_tokens: int,
adapter_to_index: Dict[str, int],
):
lora_adapter_ids = [adapter.id for adapter in lora_adapters]
model = get_model(
model_id,
lora_adapter_ids,
revision,
sharded,
quantize,
speculate,
dtype,
kv_cache_dtype,
trust_remote_code,
max_input_tokens,
)
if len(lora_adapters) > 0:
target_to_layer = build_layer_weight_lookup(model.model)
for index, adapter in enumerate(lora_adapters):
# The AdapterParameters object allows for merging multiple adapters into a single adapter.
# At the moment, we only support loading a single adapter into the model, but we keep the
# AdapterParameters object for easier extension in the future.
adapter_parameters = AdapterParameters(
adapter_info=[adapter],
# when merging multiple adapters we can weight them differently
# if this is not set, all adapters will be weighted equally
# see: text_generation_server.utils.merges.strategies for impl
weights=None,
merge_strategy=0,
density=1.0,
majority_sign_method=0,
)
adapter_index = index + 1
adapter_to_index[adapter.id] = adapter_index
logger.info(
f"Loading adapter weights into model: {','.join([adapter.id for adapter in adapter_parameters.adapter_info])}"
)
weight_names = tuple([v[0] for v in target_to_layer.values()])
(
module_map,
adapter_config,
adapter_weight_names,
adapter_tokenizer,
) = load_and_merge_adapters(
model.model_id,
adapter_parameters,
adapter_index,
weight_names,
False,
)
unused_weight_names = adapter_weight_names.copy()
adapter_layers = [
"q_proj",
"k_proj",
"v_proj",
"o_proj",
"gate_proj",
"up_proj",
"down_proj",
"qkv_proj",
# add c_* layers used in starcoder2
"c_proj",
"c_fc",
]
for layer_name in adapter_layers:
nlayers = (
1 if layer_name == "lm_head" else len(model.model.model.layers)
)
adapter_weights = LoraWeights.prepare_weights(
config=adapter_config,
module_map=module_map,
layer_type=layer_name,
unused_weight_names=unused_weight_names,
nlayers=nlayers,
dtype=model.dtype,
world_size=model.world_size,
process_group=model.process_group,
target_to_layer=target_to_layer,
)
if adapter_weights is None:
continue
model.layer_to_adapter_weights[layer_name].add_adapter(
adapter_index, adapter_weights
)
if len(unused_weight_names) > 0:
logger.warning(
f"{','.join([a.id for a in lora_adapters])} unused adapter weights: {unused_weight_names}"
)
if adapter_tokenizer is not None:
model.tokenizers.add_tokenizer(adapter_index, adapter_tokenizer)
model.loaded_adapters.add(adapter_index)
return model
| text-generation-inference/server/text_generation_server/models/__init__.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/__init__.py",
"repo_id": "text-generation-inference",
"token_count": 31346
} |
# coding=utf-8
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Tuple, Type
import torch
import torch.distributed
from torch import nn
from transformers.configuration_utils import PretrainedConfig
from text_generation_server.layers import (
FastLinear,
SpeculativeHead,
TensorParallelColumnLinear,
TensorParallelEmbedding,
TensorParallelRowLinear,
get_linear,
)
from text_generation_server.layers.attention import (
Seqlen,
attention,
paged_attention,
)
from text_generation_server.layers.attention.kv_cache import get_kv_scales
from text_generation_server.layers.layernorm import FastRMSNorm
from text_generation_server.layers.moe import DenseMoELayer, MoELayer, SparseMoELayer
from text_generation_server.layers.rotary import PositionRotaryEmbedding
from text_generation_server.utils.weights import UnquantizedWeight
class MixtralConfig(PretrainedConfig):
model_type = "mixtral"
def __init__(
self,
vocab_size=32000,
hidden_size=4096,
intermediate_size=14336,
num_hidden_layers=32,
num_attention_heads=32,
num_key_value_heads=8,
hidden_act="silu",
max_position_embeddings=4096 * 32,
initializer_range=0.02,
rms_norm_eps=1e-05,
use_cache=True,
pad_token_id=None,
bos_token_id=1,
eos_token_id=2,
pretraining_tp=1,
tie_word_embeddings=False,
rope_theta=10000.0,
sliding_window=None,
num_experts_per_tok=2,
num_local_experts=8,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.sliding_window = sliding_window
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.pretraining_tp = pretraining_tp
self.use_cache = use_cache
self.rope_theta = rope_theta
self.num_experts_per_tok = num_experts_per_tok
self.num_local_experts = num_local_experts
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
def promote_scalar(x: torch.Tensor) -> torch.Tensor:
return x.view(1) if len(x.size()) == 0 else x
def load_attention(config, prefix: str, weights):
if config.num_attention_heads != config.num_key_value_heads:
return _load_gqa(config, prefix, weights)
else:
return TensorParallelColumnLinear.load_multi(
config,
prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"],
dim=0,
weights=weights,
bias=False,
)
def _load_gqa(config, prefix: str, weights):
assert config.hidden_size % config.num_attention_heads == 0
assert config.num_attention_heads % weights.process_group.size() == 0
weight = weights.get_multi_weights_col(
prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"],
dim=0,
)
if isinstance(weight, UnquantizedWeight):
weight.weight = weight.weight.to(dtype=weights.dtype).to(device=weights.device)
head_size = config.hidden_size // config.num_attention_heads
num_heads = config.num_attention_heads // weights.process_group.size()
num_key_value_heads = config.num_key_value_heads // weights.process_group.size()
assert list(weight.weight.shape) == [
(num_heads + 2 * num_key_value_heads) * head_size,
config.hidden_size,
], f"{list(weight.weight.shape)} != {[(num_heads + 2 * config.num_key_value_heads) * head_size, config.hidden_size]}"
return TensorParallelColumnLinear(get_linear(weight, bias=None))
def _load_experts(config, prefix: str, mat, weights):
if config.quantize is not None:
raise NotImplementedError("Mixtral does not support weight quantization yet.")
assert mat in ["w1", "w2", "w3"]
world_size = weights.process_group.size()
rank = weights.process_group.rank()
assert (
config.intermediate_size % world_size == 0
), f"The chosen size {config.intermediate_size} is not compatible with sharding on {world_size} shards"
block_size = config.intermediate_size // world_size
start = rank * block_size
stop = (rank + 1) * block_size
tensor = torch.empty(
(config.num_local_experts * block_size, config.hidden_size),
dtype=weights.dtype,
device=weights.device,
)
for i in range(config.num_local_experts):
slice_ = weights._get_slice(f"{prefix}.{i}.{mat}.weight")
if mat == "w2":
expert_slice = slice_[:, start:stop].t().contiguous()
else:
expert_slice = slice_[start:stop]
tensor[i * block_size : (i + 1) * block_size] = expert_slice.to(
dtype=weights.dtype
).to(device=weights.device)
return tensor
class MixtralAttention(torch.nn.Module):
def __init__(
self,
prefix: str,
config,
weights,
):
super().__init__()
self.max_past = (
config.sliding_window if config.sliding_window is not None else -1
)
self.num_heads = config.num_attention_heads
self.hidden_size = config.hidden_size
self.head_size = self.hidden_size // self.num_heads
self.rotary_emb = PositionRotaryEmbedding.static(
config=config,
dim=self.head_size,
base=config.rope_theta,
device=weights.device,
)
self.softmax_scale = self.head_size**-0.5
if self.num_heads % weights.process_group.size() != 0:
raise ValueError(
f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} "
f"and `num_shards`: {weights.process_group.size()}"
)
self.num_heads = self.num_heads // weights.process_group.size()
self.num_key_value_heads = (
config.num_key_value_heads // weights.process_group.size()
)
self.query_key_value = load_attention(config, prefix, weights)
self.kv_scales = get_kv_scales(weights, f"{prefix}")
self.o_proj = TensorParallelRowLinear.load(
config,
prefix=f"{prefix}.o_proj",
weights=weights,
bias=False,
)
self.num_groups = self.num_heads // self.num_key_value_heads
self.kv_head_mapping = torch.arange(
0, self.num_key_value_heads, dtype=torch.int32, device=weights.device
).repeat_interleave(self.num_groups)
def forward(
self,
hidden_states,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
prefill_cache_indices,
):
qkv = self.query_key_value(hidden_states)
query, kv = qkv.split(
[
self.head_size * self.num_heads,
2 * self.head_size * self.num_key_value_heads,
],
dim=1,
)
query = query.view(-1, self.num_heads, self.head_size)
kv = kv.view(-1, 2, self.num_key_value_heads, self.head_size)
self.rotary_emb(query, torch.select(kv, dim=1, index=0), cos, sin)
if prefill_cache_indices is not None:
kv_to_cache = kv[prefill_cache_indices]
else:
kv_to_cache = kv
kv_cache.store(
key=kv_to_cache[:, 0],
value=kv_to_cache[:, 1],
slots=slots,
kv_scales=self.kv_scales,
)
# Prefill
if cu_seqlen_prefill is not None:
# flash attention
attn_output = attention(
query=query,
key=kv_to_cache[:, 0],
value=kv_to_cache[:, 1],
kv_cache=kv_cache,
kv_scales=self.kv_scales,
seqlen=seqlen,
block_tables=block_tables,
softmax_scale=self.softmax_scale,
window_size_left=self.max_past,
)
# Decode
else:
attn_output = paged_attention(
query,
kv_cache,
self.kv_head_mapping,
self.softmax_scale,
block_tables,
seqlen,
max_s,
kv_scales=self.kv_scales,
)
return self.o_proj(attn_output.view(-1, self.num_heads * self.head_size))
@torch.jit.script
def select_experts(gate_logits: torch.Tensor, top_k: int):
# all_probs: (sequence_length, n_experts) and upcast for softmax
all_probs = torch.nn.functional.softmax(gate_logits, dim=1, dtype=torch.float)
# weights, selected_experts: (sequence_length, top-k)
weights, selected_experts = torch.topk(all_probs, top_k, dim=-1)
weights /= weights.sum(dim=-1, keepdim=True)
weights = weights.view(-1)
selected_experts = selected_experts.view(-1)
return selected_experts, weights
@torch.jit.script
def round_up(x: torch.Tensor, value: int):
return torch.div(x + (value - 1), value, rounding_mode="trunc") * value
class MixtralMoE(nn.Module):
def __init__(
self, prefix, config: MixtralConfig, moe_layer_cls: Type[MoELayer], weights
):
super().__init__()
# gating
self.gate = FastLinear.load(config, f"{prefix}.gate", weights, bias=False)
self.moe = moe_layer_cls(
n_expert_group=None,
n_experts=config.num_local_experts,
prefix=f"{prefix}.experts",
renormalize=True,
topk=config.num_experts_per_tok,
topk_group=None,
weights=weights,
gate_proj_name="w1",
up_proj_name="w3",
down_proj_name="w2",
)
assert isinstance(self.moe, MoELayer)
self.process_group = weights.process_group
def forward(self, x: torch.Tensor) -> torch.Tensor:
# router_logits: (num_tokens, n_experts)
router_logits = self.gate(x)
out = self.moe(x, gating_output=router_logits)
# Reduce sum
if self.process_group.size() > 1:
torch.distributed.all_reduce(out, group=self.process_group)
return out.view(*x.shape)
class MixtralLayer(nn.Module):
def __init__(self, prefix: str, layer_id, config, weights):
super().__init__()
prefix = f"{prefix}.layers.{layer_id}"
self.self_attn = MixtralAttention(
prefix=f"{prefix}.self_attn", config=config, weights=weights
)
moe_layer_cls = (
SparseMoELayer if SparseMoELayer.is_supported(weights) else DenseMoELayer
)
self.moe = MixtralMoE(
f"{prefix}.block_sparse_moe", config, moe_layer_cls, weights
)
self.input_layernorm = FastRMSNorm.load(
prefix=f"{prefix}.input_layernorm", weights=weights, eps=config.rms_norm_eps
)
self.post_attention_layernorm = FastRMSNorm.load(
prefix=f"{prefix}.post_attention_layernorm",
weights=weights,
eps=config.rms_norm_eps,
)
def forward(
self,
hidden_states,
residual,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
prefill_cache_indices,
):
normed_hidden_states, res = self.input_layernorm(hidden_states, residual)
# Self Attention
attn_output = self.self_attn(
normed_hidden_states,
cos,
sin,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
prefill_cache_indices,
)
# faster post attention rms norm
normed_attn_res_output, attn_res = self.post_attention_layernorm(
attn_output, res
)
moe_output = self.moe(normed_attn_res_output)
return moe_output, attn_res
class MixtralModel(torch.nn.Module):
def __init__(self, prefix: str, config, weights):
super().__init__()
self.embed_tokens = TensorParallelEmbedding(
prefix=(
"model.embed_tokens" if not prefix else f"{prefix}.model.embed_tokens"
),
weights=weights,
)
self.layers = nn.ModuleList(
[
MixtralLayer(
"model" if not prefix else f"{prefix}.model",
layer_id,
config,
weights,
)
for layer_id in range(config.num_hidden_layers)
]
)
self.norm = FastRMSNorm.load(
prefix="model.norm" if not prefix else f"{prefix}.model.norm",
weights=weights,
eps=config.rms_norm_eps,
)
self.head_size = self.layers[0].self_attn.head_size
self.num_heads = self.layers[0].self_attn.num_heads
self.num_key_value_heads = self.layers[0].self_attn.num_key_value_heads
def forward(
self,
input_ids: torch.Tensor,
position_ids: torch.Tensor,
cu_seqlen_prefill: Optional[torch.Tensor],
kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
block_tables: torch.Tensor,
slots: torch.Tensor,
seqlen: Seqlen,
max_s: int,
true_max_s: int,
prefill_cache_indices: Optional[torch.Tensor],
) -> torch.Tensor:
hidden_states = self.embed_tokens(input_ids)
# Get rotary cos and sin for this forward
# Avoid to index in each layer
cos, sin = self.layers[0].self_attn.rotary_emb.get_cos_sin(
position_ids, true_max_s, hidden_states.dtype
)
residual = None
for i, layer in enumerate(self.layers):
hidden_states, residual = layer(
hidden_states,
residual,
cos,
sin,
cu_seqlen_prefill,
kv_cache[i],
block_tables,
slots,
seqlen,
max_s,
prefill_cache_indices,
)
hidden_states, _ = self.norm(hidden_states, residual)
return hidden_states
class FlashMixtralForCausalLM(torch.nn.Module):
def __init__(self, prefix: str, config, weights):
super().__init__()
self.model = MixtralModel(prefix, config, weights)
self.lm_head = SpeculativeHead.load(
config,
prefix="lm_head" if not prefix else f"{prefix}.lm_head",
weights=weights,
)
self.max_past = config.sliding_window
self.max_past_tensor = (
torch.tensor(config.sliding_window, device=weights.device)
if self.max_past is not None
else None
)
def forward(
self,
input_ids: torch.Tensor,
position_ids: torch.Tensor,
cu_seqlen_prefill: Optional[torch.Tensor],
kv_cache: List[Tuple[torch.Tensor, torch.Tensor]],
block_tables: torch.Tensor,
slots: torch.Tensor,
seqlen: Seqlen,
max_s: int,
prefill_cache_indices: Optional[torch.Tensor],
lm_head_indices: Optional[torch.Tensor] = None,
adapter_data: Optional[torch.Tensor] = None,
) -> torch.Tensor:
true_max_s = max_s
if prefill_cache_indices is not None:
# Slots also need to be sliced as it has the same size as the whole kv tensor
slots = slots[prefill_cache_indices]
elif self.max_past is not None:
# Clamp in decode mode as paged attention requires clamped values whereas the flash attention
# kernel requires the true values
seqlen = seqlen.clamp(max=self.max_past_tensor)
hidden_states = self.model(
input_ids,
position_ids,
cu_seqlen_prefill,
kv_cache,
block_tables,
slots,
seqlen,
max_s,
true_max_s,
prefill_cache_indices,
)
if lm_head_indices is not None:
hidden_states = hidden_states[lm_head_indices]
logits = self.lm_head(hidden_states)
return logits
| text-generation-inference/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py",
"repo_id": "text-generation-inference",
"token_count": 8660
} |
# coding=utf-8
# Copyright 2021 The OpenAI Team Authors and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch IdeficsVision model: a copy of CLIPVisionModel using a simpler config object"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from transformers.activations import ACT2FN
from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
from transformers.utils import (
ModelOutput,
logging,
)
from text_generation_server.layers import (
TensorParallelColumnLinear,
TensorParallelRowLinear,
TensorParallelEmbedding,
)
logger = logging.get_logger(__name__)
@dataclass
class IdeficsVisionModelOutput(ModelOutput):
"""
Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.
Args:
image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
The image embeddings obtained by applying the projection layer to the pooler_output.
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
image_embeds: Optional[torch.FloatTensor] = None
last_hidden_state: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
# Copied from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings with CLIP->Idefics
class IdeficsVisionEmbeddings(nn.Module):
def __init__(self, prefix, config, weights):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.image_size = config.image_size
self.patch_size = config.patch_size
self.class_embedding = nn.Parameter(
weights.get_tensor(f"{prefix}.class_embedding")
)
self.patch_embedding = nn.Conv2d.load_no_bias(
prefix=f"{prefix}.patch_embedding",
weights=weights,
in_channels=config.num_channels,
out_channels=self.embed_dim,
kernel_size=self.patch_size,
stride=self.patch_size,
)
self.num_patches = (self.image_size // self.patch_size) ** 2
self.num_positions = self.num_patches + 1
self.position_embedding = TensorParallelEmbedding(
prefix="model.vision_model.embeddings.position_embedding", weights=weights
)
self.position_ids = (
torch.arange(self.num_positions).expand((1, -1)).to(device=weights.device)
)
def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
batch_size = pixel_values.shape[0]
target_dtype = self.patch_embedding.weight.dtype
patch_embeds = self.patch_embedding(
pixel_values.to(dtype=target_dtype)
) # shape = [*, width, grid, grid]
patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
class_embeds = self.class_embedding.expand(batch_size, 1, -1)
embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
embeddings = embeddings + self.position_embedding(self.position_ids)
return embeddings
# Copied from transformers.models.clip.modeling_clip.CLIPAttention with CLIP->IdeficsVision
class IdeficsVisionAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, prefix, config, weights):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
f" {self.num_heads})."
)
self.scale = self.head_dim**-0.5
self.dropout = config.attention_dropout
if self.num_heads % weights.process_group.size() != 0:
raise ValueError(
f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} "
f"and `num_shards`: {weights.process_group.size()}"
)
self.num_heads = self.num_heads // weights.process_group.size()
self.embed_dim = self.embed_dim // weights.process_group.size()
self.k_proj = TensorParallelColumnLinear.load(
config, prefix=f"{prefix}.k_proj", weights=weights, bias=True
)
self.v_proj = TensorParallelColumnLinear.load(
config, prefix=f"{prefix}.v_proj", weights=weights, bias=True
)
self.q_proj = TensorParallelColumnLinear.load(
config, prefix=f"{prefix}.q_proj", weights=weights, bias=True
)
self.out_proj = TensorParallelRowLinear.load(
config, prefix=f"{prefix}.out_proj", weights=weights, bias=True
)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return (
tensor.view(bsz, seq_len, self.num_heads, self.head_dim)
.transpose(1, 2)
.contiguous()
)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
causal_attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
bsz, tgt_len, _ = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scale
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
f" {attn_weights.size()}"
)
# apply the causal_attention_mask first
if causal_attention_mask is not None:
if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
f" {causal_attention_mask.size()}"
)
attn_weights = (
attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ causal_attention_mask
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
)
attn_weights = (
attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attention_mask
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if output_attentions:
# this operation is a bit akward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(
bsz, self.num_heads, tgt_len, src_len
)
attn_weights = attn_weights_reshaped.view(
bsz * self.num_heads, tgt_len, src_len
)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(
attn_weights, p=self.dropout, training=self.training
)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped
# Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->IdeficsVision
class IdeficsVisionMLP(nn.Module):
def __init__(self, prefix, config, weights):
super().__init__()
self.config = config
self.activation_fn = ACT2FN[config.hidden_act]
self.fc1 = TensorParallelColumnLinear.load(
config, prefix=f"{prefix}.fc1", weights=weights, bias=True
)
self.fc2 = TensorParallelRowLinear.load(
config, prefix=f"{prefix}.fc2", weights=weights, bias=True
)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states
# Copied from transformers.models.clip.modeling_clip.CLIPEncoderLayer with CLIP->IdeficsVision
class IdeficsVisionEncoderLayer(nn.Module):
def __init__(self, prefix, config, weights):
super().__init__()
self.embed_dim = config.hidden_size
self.self_attn = IdeficsVisionAttention(
prefix=f"{prefix}.self_attn", config=config, weights=weights
)
self.layer_norm1 = nn.LayerNorm.load(
prefix=f"{prefix}.layer_norm1", weights=weights, eps=config.layer_norm_eps
)
self.mlp = IdeficsVisionMLP(
prefix=f"{prefix}.mlp", config=config, weights=weights
)
self.layer_norm2 = nn.LayerNorm.load(
prefix=f"{prefix}.layer_norm2", weights=weights, eps=config.layer_norm_eps
)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
causal_attention_mask: torch.Tensor,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.FloatTensor]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
`(config.encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
hidden_states, attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
causal_attention_mask=causal_attention_mask,
output_attentions=output_attentions,
)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
# Copied from transformers.models.clip.modeling_clip.CLIPEncoder with CLIP->IdeficsVision
class IdeficsVisionEncoder(nn.Module):
"""
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`IdeficsVisionEncoderLayer`].
Args:
config: IdeficsVisionConfig
"""
def __init__(self, prefix, config, weights):
super().__init__()
self.config = config
self.layers = nn.ModuleList(
[
IdeficsVisionEncoderLayer(
prefix=f"{prefix}.encoder.layers.{layer_id}",
config=config,
weights=weights,
)
for layer_id in range(config.num_hidden_layers)
]
)
# self.gradient_checkpointing = False
def forward(
self,
inputs_embeds,
attention_mask: Optional[torch.Tensor] = None,
causal_attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutput]:
r"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Causal mask for the text model. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = (
output_attentions
if output_attentions is not None
else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
hidden_states = inputs_embeds
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
# if self.gradient_checkpointing and self.training:
# def create_custom_forward(module):
# def custom_forward(*inputs):
# return module(*inputs, output_attentions)
# return custom_forward
# layer_outputs = torch.utils.checkpoint.checkpoint(
# create_custom_forward(encoder_layer),
# hidden_states,
# attention_mask,
# causal_attention_mask,
# )
# else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
causal_attention_mask,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [hidden_states, encoder_states, all_attentions]
if v is not None
)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=encoder_states,
attentions=all_attentions,
)
# Adapted from transformers.models.clip.modeling_clip.CLIPVisionTransformer
class IdeficsVisionTransformer(nn.Module):
def __init__(self, prefix, config, weights):
super().__init__()
self.config = config
self.embeddings = IdeficsVisionEmbeddings(
prefix=f"{prefix}.embeddings", config=config, weights=weights
)
self.pre_layrnorm = nn.LayerNorm.load(
prefix=f"{prefix}.pre_layrnorm", weights=weights, eps=config.layer_norm_eps
)
self.encoder = IdeficsVisionEncoder(
prefix=prefix, config=config, weights=weights
)
self.post_layernorm = nn.LayerNorm.load(
prefix=f"{prefix}.post_layernorm",
weights=weights,
eps=config.layer_norm_eps,
)
# copied from transformers.models.clip.modeling_clip.CLIPVisionTransformer.forward
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutputWithPooling]:
r"""
Returns:
"""
output_attentions = (
output_attentions
if output_attentions is not None
else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
hidden_states = self.embeddings(pixel_values)
hidden_states = self.pre_layrnorm(hidden_states)
encoder_outputs = self.encoder(
inputs_embeds=hidden_states,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
last_hidden_state = encoder_outputs[0]
pooled_output = last_hidden_state[:, 0, :]
pooled_output = self.post_layernorm(pooled_output)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(
last_hidden_state=last_hidden_state,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
| text-generation-inference/server/text_generation_server/models/custom_modeling/idefics_vision.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/idefics_vision.py",
"repo_id": "text-generation-inference",
"token_count": 9625
} |
import torch
import torch.distributed
from transformers import AutoTokenizer, PreTrainedTokenizerBase
from typing import Optional, Union
from text_generation_server.models.custom_modeling.mamba_modeling import (
MambaConfig,
)
from loguru import logger
from text_generation_server.pb import generate_pb2
from text_generation_server.utils import (
initialize_torch_distributed,
weight_files,
Weights,
)
from text_generation_server.models.globals import CUDA_GRAPHS, MEM_POOL
import time
from text_generation_server.models.custom_modeling.mamba_modeling import (
MambaModel,
InferenceParams,
)
from text_generation_server.models import Model
from typing import Any, List, Tuple, Type, Dict
from text_generation_server.models.types import (
Batch,
Tokens,
Generation,
GeneratedText,
)
from text_generation_server.utils.chunks import concat_text_chunks
from text_generation_server.utils.quantization import get_loader
from text_generation_server.utils.tokens import batch_top_tokens, Sampling
from dataclasses import dataclass
from text_generation_server.utils import NextTokenChooser, StoppingCriteria
def new_inference_params(
n_blocks: int,
batch_size: int,
d_inner: int,
d_conv: int,
d_state: int,
seqlen_offset: int,
dtype: torch.dtype,
device: torch.device,
):
max_seqlen = 0
conv_states = torch.zeros(
(
n_blocks,
batch_size,
d_inner,
d_conv,
),
device=device,
dtype=dtype,
)
ssm_states = torch.zeros(
(
n_blocks,
batch_size,
d_inner,
d_state,
),
device=device,
dtype=dtype,
)
inference_params = InferenceParams(
max_seqlen=max_seqlen,
max_batch_size=batch_size,
seqlen_offset=seqlen_offset,
conv_states=conv_states,
ssm_states=ssm_states,
)
return inference_params
@dataclass
class MambaBatch(Batch):
batch_id: int
requests: List[generate_pb2.Request]
requests_idx_mapping: Dict[int, int]
# Decoder values
input_ids: torch.Tensor
# All tokens
all_input_ids: List[torch.Tensor]
# Lengths of all generations present in the batch
input_lengths: List[int]
prefix_offsets: List[int]
read_offsets: List[int]
# Generation helpers
next_token_choosers: List[NextTokenChooser]
stopping_criterias: List[StoppingCriteria]
top_n_tokens: List[int]
top_n_tokens_tensor: torch.Tensor
# Metadata used for padding
max_input_length: int
padding_right_offset: int
# Maximum number of tokens this batch will grow to
max_tokens: int
# Past metadata
keys_head_dim_last: bool = True
# Inference params
inference_params: Optional[Dict[str, Any]] = None
def to_pb(self) -> generate_pb2.CachedBatch:
return generate_pb2.CachedBatch(
id=self.batch_id,
request_ids=[r.id for r in self.requests],
size=len(self),
max_tokens=self.max_tokens,
current_tokens=len(self),
)
@classmethod
def from_pb(
cls,
pb: generate_pb2.Batch,
tokenizer: PreTrainedTokenizerBase,
dtype: torch.dtype,
device: torch.device,
) -> "MambaBatch":
inputs = []
next_token_choosers = []
stopping_criterias = []
top_n_tokens = []
prefix_offsets = []
read_offsets = []
requests_idx_mapping = {}
# Parse batch
max_truncation = 0
padding_right_offset = 0
max_decode_tokens = 0
for i, r in enumerate(pb.requests):
requests_idx_mapping[r.id] = i
inputs.append(concat_text_chunks(r.input_chunks.chunks))
next_token_choosers.append(
NextTokenChooser.from_pb(r.parameters, device, tokenizer)
)
stopping_criteria = StoppingCriteria.from_pb(
r.stopping_parameters, tokenizer
)
stopping_criterias.append(stopping_criteria)
top_n_tokens.append(r.top_n_tokens)
max_truncation = max(max_truncation, r.truncate)
max_decode_tokens += stopping_criteria.max_new_tokens
padding_right_offset = max(
padding_right_offset, stopping_criteria.max_new_tokens
)
tokenized_inputs = tokenizer(
inputs,
return_tensors="pt",
padding=True,
return_token_type_ids=False,
truncation=True,
max_length=max_truncation,
).to(device)
for _ in pb.requests:
input_len = tokenized_inputs["input_ids"].shape[1]
prefix_offsets.append(input_len - 5)
read_offsets.append(input_len)
input_lengths = tokenized_inputs["attention_mask"].sum(1)
max_input_length = input_lengths.max()
input_ids = tokenized_inputs["input_ids"]
all_input_ids = tokenized_inputs["input_ids"].T.split(1, dim=1)
top_n_tokens_tensor = torch.tensor(
top_n_tokens, device=device, dtype=torch.int64
)
max_tokens = len(inputs) * (max_input_length + max_decode_tokens)
return cls(
batch_id=pb.id,
requests=pb.requests,
requests_idx_mapping=requests_idx_mapping,
input_ids=input_ids,
# past_input_ids=None,
all_input_ids=list(all_input_ids),
input_lengths=input_lengths.tolist(),
prefix_offsets=prefix_offsets,
read_offsets=read_offsets,
next_token_choosers=next_token_choosers,
stopping_criterias=stopping_criterias,
top_n_tokens=top_n_tokens,
top_n_tokens_tensor=top_n_tokens_tensor,
max_input_length=max_input_length.item(),
padding_right_offset=padding_right_offset,
max_tokens=max_tokens,
)
def filter(self, request_ids: List[int]) -> Optional["MambaBatch"]:
if len(request_ids) == 0:
raise ValueError("Batch must have at least one request")
if len(request_ids) == len(self):
return self
keep_indices = []
# New values after filtering
requests_idx_mapping = {}
requests = []
input_lengths = []
prefix_offsets = []
read_offsets = []
all_input_ids = []
max_input_length = 0
next_token_choosers = []
stopping_criterias = []
top_n_tokens = []
total_remaining_decode_tokens = 0
new_padding_right_offset = 0
indices = []
for i, request_id in enumerate(request_ids):
idx = self.requests_idx_mapping[request_id]
requests_idx_mapping[request_id] = i
keep_indices.append(idx)
requests.append(self.requests[idx])
prefix_offsets.append(self.prefix_offsets[idx])
read_offsets.append(self.read_offsets[idx])
all_input_ids.append(self.all_input_ids[idx])
request_input_length = self.input_lengths[idx]
input_lengths.append(request_input_length)
max_input_length = max(max_input_length, request_input_length)
indices.append(idx)
next_token_choosers.append(self.next_token_choosers[idx])
stopping_criteria = self.stopping_criterias[idx]
stopping_criterias.append(stopping_criteria)
top_n_tokens.append(self.top_n_tokens[idx])
remaining_decode_tokens = (
stopping_criteria.max_new_tokens - stopping_criteria.current_tokens
)
total_remaining_decode_tokens += remaining_decode_tokens
new_padding_right_offset = max(
new_padding_right_offset, remaining_decode_tokens
)
# Apply indices to input_ids, attention mask, past key values and other items that need to be cached
input_ids = self.input_ids[keep_indices]
top_n_tokens_tensor = self.top_n_tokens_tensor[keep_indices]
max_tokens = len(request_ids) * max_input_length + total_remaining_decode_tokens
self.requests = requests
self.requests_idx_mapping = requests_idx_mapping
self.input_ids = input_ids
self.all_input_ids = all_input_ids
self.input_lengths = input_lengths
self.prefix_offsets = prefix_offsets
self.read_offsets = read_offsets
self.next_token_choosers = next_token_choosers
self.stopping_criterias = stopping_criterias
self.top_n_tokens = top_n_tokens
self.top_n_tokens_tensor = top_n_tokens_tensor
self.max_input_length = max_input_length
self.padding_right_offset = new_padding_right_offset
self.max_tokens = max_tokens
# TODO
# Kept it simple by just updating the state, maybe updating the other CPU values is necessary.
self.inference_params.conv_states = self.inference_params.conv_states[
:, indices
]
self.inference_params.ssm_states = self.inference_params.ssm_states[:, indices]
return self
@classmethod
def concatenate(cls, batches: List["MambaBatch"]) -> "MambaBatch":
# Used for padding
total_batch_size = 0
max_input_length = 0
padding_right_offset = 0
for batch in batches:
total_batch_size += len(batch)
max_input_length = max(max_input_length, batch.max_input_length)
padding_right_offset = max(padding_right_offset, batch.padding_right_offset)
# Batch attributes
requests = []
requests_idx_mapping = {}
input_lengths = []
prefix_offsets = []
read_offsets = []
all_input_ids = []
next_token_choosers = []
stopping_criterias = []
top_n_tokens = []
max_tokens = 0
seqlen_offset = 0
(n_blocks, _, d_inner, d_conv) = batches[0].inference_params.conv_states.shape
(_, _, _, d_state) = batches[0].inference_params.ssm_states.shape
dtype = batches[0].inference_params.conv_states.dtype
device = batches[0].inference_params.conv_states.device
inference_params = new_inference_params(
n_blocks=n_blocks,
batch_size=total_batch_size,
d_state=d_state,
d_conv=d_conv,
d_inner=d_inner,
seqlen_offset=seqlen_offset,
device=device,
dtype=dtype,
)
# Batch tensors
input_ids = None
top_n_tokens_tensor = None
# Used for slicing correctly inside the tensors
# Equivalent to a cumsum on batch sizes
start_index = 0
for i, batch in enumerate(batches):
requests.extend(batch.requests)
input_lengths.extend(batch.input_lengths)
prefix_offsets.extend(batch.prefix_offsets)
read_offsets.extend(batch.read_offsets)
all_input_ids.extend(batch.all_input_ids)
next_token_choosers.extend(batch.next_token_choosers)
stopping_criterias.extend(batch.stopping_criterias)
top_n_tokens.extend(batch.top_n_tokens)
if i == 0:
requests_idx_mapping = batch.requests_idx_mapping
else:
# We need to offset the mapping for each batch by the cumulative batch size
for k, v in batch.requests_idx_mapping.items():
requests_idx_mapping[k] = v + start_index
# Slicing end index for this batch
end_index = start_index + len(batch)
# Create empty tensor
# input_ids is always of shape [batch_size, 1]
# We do not need to pad it
if input_ids is None:
input_ids = batch.input_ids.new_empty((total_batch_size, 1))
# Copy to correct indices
input_ids[start_index:end_index] = batch.input_ids
if top_n_tokens_tensor is None:
top_n_tokens_tensor = batches[0].top_n_tokens_tensor.new_zeros(
total_batch_size,
)
top_n_tokens_tensor[start_index:end_index] = batch.top_n_tokens_tensor
# Add eventual padding tokens that were added while concatenating
max_tokens += batch.max_tokens + (
max_input_length - batch.max_input_length
) * len(batch)
inference_params.max_seqlen = max(
inference_params.max_seqlen, batch.inference_params.max_seqlen
)
assert batch.inference_params.seqlen_offset != 0, "Invalid seqlen offset"
inference_params.seqlen_offset = max(
inference_params.seqlen_offset, batch.inference_params.seqlen_offset
)
inference_params.conv_states[:, start_index:end_index] = (
batch.inference_params.conv_states
)
inference_params.ssm_states[:, start_index:end_index] = (
batch.inference_params.ssm_states
)
start_index = end_index
return cls(
batch_id=batches[0].batch_id,
requests=requests,
requests_idx_mapping=requests_idx_mapping,
input_ids=input_ids,
all_input_ids=all_input_ids,
input_lengths=input_lengths,
prefix_offsets=prefix_offsets,
read_offsets=read_offsets,
next_token_choosers=next_token_choosers,
stopping_criterias=stopping_criterias,
top_n_tokens=top_n_tokens,
top_n_tokens_tensor=top_n_tokens_tensor,
max_input_length=max_input_length,
padding_right_offset=padding_right_offset,
keys_head_dim_last=batches[0].keys_head_dim_last,
max_tokens=max_tokens,
inference_params=inference_params,
)
def __len__(self):
return len(self.requests)
class Mamba(Model):
def __init__(
self,
model_id: str,
revision: Optional[str] = None,
quantize: Optional[str] = None,
speculator: Optional[str] = None,
dtype: Optional[torch.dtype] = None,
trust_remote_code: bool = False,
):
self.quantize = quantize
self.process_group, _rank, world_size = initialize_torch_distributed()
if world_size > 1:
raise RuntimeError("Mamba does not support Tensor Parallelism (TP)")
self.cuda_graphs = {}
if torch.cuda.is_available():
device = torch.device("cuda")
# Bf16 is important. In f16 accumulations in the matmul are causing
# differences while the server is under load.
# This is detectable by the integration load test
dtype = torch.bfloat16 if dtype is None else dtype
else:
if quantize:
raise ValueError("quantization is not available on CPU")
device = torch.device("cpu")
dtype = torch.float32 if dtype is None else dtype
tokenizer = AutoTokenizer.from_pretrained(
"EleutherAI/gpt-neox-20b",
revision=revision,
padding_side="left",
truncation_side="left",
trust_remote_code=trust_remote_code,
)
config = MambaConfig.from_pretrained(
model_id, revision=revision, trust_remote_code=trust_remote_code
)
tokenizer.bos_token_id = config.bos_token_id
tokenizer.eos_token_id = config.eos_token_id
tokenizer.pad_token = tokenizer.eos_token
config.quantize = quantize
config.speculator = speculator
torch.distributed.barrier(group=self.process_group)
weights_loader = get_loader(
quantize=quantize, model_id=model_id, revision=revision
)
filenames = weight_files(model_id, revision=revision, extension=".safetensors")
weights = Weights(
filenames,
device,
dtype,
process_group=self.process_group,
weights_loader=weights_loader,
)
model = MambaModel(config, weights)
torch.distributed.barrier(group=self.process_group)
super(Mamba, self).__init__(
model_id=model_id,
model=model,
tokenizer=tokenizer,
requires_padding=True,
dtype=dtype,
device=device,
)
@property
def batch_type(self) -> Type[MambaBatch]:
return MambaBatch
def warmup(
self, batch, max_input_tokens: Optional[int], max_total_tokens: Optional[int]
) -> Union[Optional[int], Optional[int], Optional[int]]:
# TODO: implement warmup for Mamba if needed
if CUDA_GRAPHS:
if self.speculate is None or self.speculate == 0:
try:
logger.info(f"Cuda Graphs are enabled for sizes {CUDA_GRAPHS}")
# Warmup cuda graphs
for bs in CUDA_GRAPHS:
self.cuda_graph_warmup(bs)
except Exception:
logger.exception("Decode cuda graph warmup failed")
else:
logger.info(f"Cuda Graphs are disabled (CUDA_GRAPHS={CUDA_GRAPHS}).")
if max_total_tokens is None:
max_total_tokens = min(self.tokenizer.model_max_length, 4096)
if max_input_tokens is None:
max_input_tokens = max_total_tokens - 1
return None, max_input_tokens, max_total_tokens
def cuda_graph_warmup(self, batch_size: int):
input_ids = torch.zeros((batch_size, 1), dtype=torch.int64, device=self.device)
n_blocks = len(self.model.blocks)
d_state = self.model.config.d_state
d_conv = self.model.config.d_conv
# Inner takes the expand multiplication
d_inner = self.model.config.d_inner
# Important seqlen_offset to go through the update mecanism with the state
seqlen_offset = 1
inference_params = new_inference_params(
n_blocks=n_blocks,
batch_size=batch_size,
d_state=d_state,
d_conv=d_conv,
d_inner=d_inner,
seqlen_offset=seqlen_offset,
device=self.device,
dtype=self.dtype,
)
graph = torch.cuda.CUDAGraph()
torch.cuda.synchronize()
# Run once outside to warmup
self.model.forward(input_ids=input_ids, inference_params=inference_params)
torch.cuda.synchronize()
with torch.cuda.graph(graph, pool=MEM_POOL):
logits, speculative_logits = self.model.forward(
input_ids=input_ids, inference_params=inference_params
)
torch.cuda.synchronize()
graph_dict = {
"input_ids": input_ids,
"inference_params": inference_params,
"graph": graph,
"logits": logits,
"speculative_logits": speculative_logits,
}
self.cuda_graphs[batch_size] = graph_dict
def tunableop_warmup(self, batch_size: int, seqlen: int):
input_ids = torch.zeros((batch_size, 1), dtype=torch.int64, device=self.device)
n_blocks = len(self.model.blocks)
d_state = self.model.config.d_state
d_conv = self.model.config.d_conv
# Inner takes the expand multiplication
d_inner = self.model.config.d_inner
# Important seqlen_offset to go through the update mecanism with the state
seqlen_offset = 1
inference_params = new_inference_params(
n_blocks=n_blocks,
batch_size=seqlen,
d_state=d_state,
d_conv=d_conv,
d_inner=d_inner,
seqlen_offset=seqlen_offset,
device=self.device,
dtype=self.dtype,
)
self.model.forward(input_ids=input_ids, inference_params=inference_params)
def forward(
self, input_ids: torch.Tensor, inference_params: Any
) -> Tuple[torch.Tensor, torch.Tensor]:
bs = input_ids.shape[0]
padded_bs = bs
if bs == 3:
padded_bs = 4
elif 3 < bs <= 8:
padded_bs = 8
elif bs > 8:
padded_bs = (bs + 7) // 8 * 8
# Try to find an associated cuda graph
cuda_graph = self.cuda_graphs.get(padded_bs, None)
is_prefill = inference_params is None or inference_params.seqlen_offset == 0
if is_prefill or cuda_graph is None:
return self.model(
input_ids,
inference_params=inference_params,
)
# Copy inputs to the static inputs of the cuda graph
# Static inputs are potentially padded
cuda_graph["input_ids"][:bs] = input_ids
cuda_graph["inference_params"].conv_states[
:, :bs
] = inference_params.conv_states
cuda_graph["inference_params"].ssm_states[:, :bs] = inference_params.ssm_states
# Replay the graph
cuda_graph["graph"].replay()
inference_params.conv_states.copy_(
cuda_graph["inference_params"].conv_states[:, :bs]
)
inference_params.ssm_states.copy_(
cuda_graph["inference_params"].ssm_states[:, :bs]
)
# Slice output to the correct shape
speculative_logits = (
cuda_graph["speculative_logits"][:bs]
if cuda_graph["speculative_logits"] is not None
else None
)
logits = cuda_graph["logits"][:bs]
return logits, speculative_logits
def generate_token(self, batch) -> Tuple[List[Any], Optional[Any], Tuple[int, int]]:
start = time.time_ns()
input_ids = (
batch.input_ids
) # batch.past_input_ids if batch.past_input_ids is not None else batch.input_ids
batch_size, max_seqlen = input_ids.shape
# Inference params
if batch.inference_params is None:
# 0 is important here
seqlen_offset = 0
n_blocks = len(self.model.blocks)
d_state = self.model.config.d_state
d_conv = self.model.config.d_conv
d_inner = self.model.config.d_inner
inference_params = new_inference_params(
n_blocks=n_blocks,
batch_size=batch_size,
d_state=d_state,
d_conv=d_conv,
d_inner=d_inner,
seqlen_offset=seqlen_offset,
device=self.device,
dtype=self.dtype,
)
batch.inference_params = inference_params
# Forward pass
logits, speculative_logits = self.forward(
input_ids, inference_params=batch.inference_params
)
# batch.inference_params = new_inference_params
# Results
generations: List[Generation] = []
stopped = True
# Speculation is not active for causal
accepted_ids = torch.ones_like(batch.input_ids)[:, 0]
batch_top_token_ids, batch_top_token_logprobs = batch_top_tokens(
batch.top_n_tokens,
batch.top_n_tokens_tensor,
torch.log_softmax(logits[:, -1], -1),
accepted_ids,
)
start_decode = time.time_ns()
# Zipped iterator
iterator = zip(
batch.requests,
batch.input_lengths,
batch.prefix_offsets,
batch.read_offsets,
logits,
batch.next_token_choosers,
batch.stopping_criterias,
batch.all_input_ids,
batch.top_n_tokens,
batch_top_token_ids,
batch_top_token_logprobs,
)
# For each member of the batch
for i, (
request,
input_length,
prefix_offset,
read_offset,
logits,
next_token_chooser,
stopping_criteria,
all_input_ids,
top_n_tokens,
top_token_ids,
top_token_logprobs,
) in enumerate(iterator):
# Select next token
next_token_id, logprobs = next_token_chooser(
all_input_ids.view(1, -1), logits[-1:, :]
)
# Append next token to all tokens
all_input_ids = torch.cat([all_input_ids, next_token_id])
new_input_length = input_length + 1
# Generated token
next_token_logprob = logprobs[-1, next_token_id]
next_token_id_squeezed = next_token_id.squeeze()
next_token_text, prefix_offset, read_offset = self.decode_token(
all_input_ids[:, 0], prefix_offset, read_offset
)
# Evaluate stopping criteria
stop, reason = stopping_criteria(
next_token_id_squeezed,
next_token_text,
)
if not stop:
stopped = False
# Shard generations
# All generations will be appended in the rust sharded client
if i % self.world_size == self.rank:
if stop:
# Decode generated tokens
output_text, _, _ = self.decode_token(
all_input_ids[:, 0],
prefix_offset=len(all_input_ids)
- stopping_criteria.current_tokens
- 1,
read_offset=len(all_input_ids)
- stopping_criteria.current_tokens,
skip_special_tokens=True,
)
# Get seed
if isinstance(next_token_chooser.choice, Sampling):
seed = next_token_chooser.choice.seed
else:
seed = None
generated_text = GeneratedText(
output_text, stopping_criteria.current_tokens, reason, seed
)
else:
generated_text = None
if stopping_criteria.current_tokens == 1 and request.prefill_logprobs:
# Remove generated token to only have prefill and add nan for first prompt token
prefill_logprobs = [float("nan")] + torch.log_softmax(
logits, -1
).gather(1, all_input_ids[1:]).squeeze(1)[
-new_input_length:-1
].tolist()
prefill_token_ids = all_input_ids[-new_input_length:-1]
prefill_texts = self.tokenizer.batch_decode(
prefill_token_ids,
clean_up_tokenization_spaces=False,
skip_special_tokens=False,
)
prefill_tokens = Tokens(
prefill_token_ids,
prefill_logprobs,
prefill_texts,
is_special=[],
)
else:
prefill_tokens = None
if top_n_tokens > 0:
toptoken_texts = self.tokenizer.batch_decode(
top_token_ids,
clean_up_tokenization_spaces=False,
skip_special_tokens=False,
)
special_toptokens = [
token_id in self.all_special_ids for token_id in top_token_ids
]
top_tokens = Tokens(
top_token_ids,
top_token_logprobs,
toptoken_texts,
special_toptokens,
)
else:
top_tokens = None
generation = Generation(
request.id,
prefill_tokens,
Tokens(
[next_token_id_squeezed],
[next_token_logprob],
[next_token_text],
[next_token_id_squeezed.item() in self.all_special_ids],
),
generated_text,
top_tokens,
)
generations.append(generation)
# Update values
batch.next_token_choosers[i] = batch.next_token_choosers[
i
].advance_grammar(next_token_id_squeezed.item())
batch.input_ids[i, 0] = next_token_id
batch.all_input_ids[i] = all_input_ids
batch.input_lengths[i] = new_input_length
batch.prefix_offsets[i] = prefix_offset
batch.read_offsets[i] = read_offset
batch.max_input_length = max(batch.max_input_length, new_input_length)
# We finished all generations in the batch; there is no next batch
if stopped:
forward_ns = start_decode - start
decode_ns = time.time_ns() - start_decode
return generations, None, (forward_ns, decode_ns)
# Slice unused values from prefill
batch.input_ids = batch.input_ids[:, :1]
forward_ns = start_decode - start
decode_ns = time.time_ns() - start_decode
return generations, batch, (forward_ns, decode_ns)
| text-generation-inference/server/text_generation_server/models/mamba.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/mamba.py",
"repo_id": "text-generation-inference",
"token_count": 15065
} |
import os
import torch
from torch.distributed import ProcessGroup
from datetime import timedelta
from loguru import logger
from text_generation_server.utils.import_utils import SYSTEM
# Tensor Parallelism settings
RANK = int(os.getenv("RANK", "0"))
WORLD_SIZE = int(os.getenv("WORLD_SIZE", "1"))
# CUDA memory fraction
MEMORY_FRACTION = float(os.getenv("CUDA_MEMORY_FRACTION", "1.0"))
class FakeBarrier:
def wait(self):
pass
class FakeGroup(ProcessGroup):
def __init__(self, rank, size):
self._rank = rank
self._size = size
super().__init__(rank, size)
def allreduce(self, *args, **kwargs):
return FakeBarrier()
def allgather(self, inputs, local_tensor, **kwargs):
assert (
len(inputs[0]) == len(local_tensor) == 1
), f"{len(inputs[0])} != {len(local_tensor)} != 1, and the FakeGroup is supposed to join on simple tensors"
for input_ in inputs:
input_[0].data = local_tensor[0].data
return FakeBarrier()
def barrier(self, *args, **kwargs):
return FakeBarrier()
def size(self):
return self._size
def rank(self):
return self._rank
def initialize_torch_distributed():
if torch.cuda.is_available():
from torch.distributed import ProcessGroupNCCL
# Set the device id.
assert WORLD_SIZE <= torch.cuda.device_count(), "Each process is one gpu"
device = RANK % torch.cuda.device_count()
torch.cuda.set_device(device)
torch.cuda.set_per_process_memory_fraction(MEMORY_FRACTION, device)
backend = "nccl"
options = ProcessGroupNCCL.Options()
options.is_high_priority_stream = True
options._timeout = timedelta(seconds=120)
else:
backend = "gloo"
options = None
if WORLD_SIZE == 1:
return FakeGroup(RANK, WORLD_SIZE), RANK, WORLD_SIZE
else:
if os.getenv("DEBUG", None) == "1":
return FakeGroup(RANK, WORLD_SIZE), RANK, WORLD_SIZE
if not torch.distributed.is_initialized():
# Call the init process.
if SYSTEM == "ipex":
import intel_extension_for_pytorch as ipex
ipex.distributed.init_process_group(
backend="ccl",
world_size=WORLD_SIZE,
rank=RANK,
timeout=timedelta(seconds=120),
pg_options=options,
)
else:
device = torch.device(f"cuda:{RANK}")
torch.distributed.init_process_group(
backend=backend,
world_size=WORLD_SIZE,
rank=RANK,
timeout=timedelta(seconds=120),
pg_options=options,
device_id=device,
)
else:
logger.warning("torch.distributed is already initialized.")
return torch.distributed.group.WORLD, RANK, WORLD_SIZE
| text-generation-inference/server/text_generation_server/utils/dist.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/utils/dist.py",
"repo_id": "text-generation-inference",
"token_count": 1414
} |
[package]
authors = ["Nicolas Patry <[email protected]>"]
edition = "2021"
name = "node"
version = "0.21.0-dev.0"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[lib]
crate-type = ["cdylib"]
[dependencies]
napi = "2"
napi-derive = "2"
serde = { version = "1.0.163", features = ["derive"] }
tokenizers = { path = "../../tokenizers/" }
[build-dependencies]
napi-build = "2"
[profile.release]
lto = true
| tokenizers/bindings/node/Cargo.toml/0 | {
"file_path": "tokenizers/bindings/node/Cargo.toml",
"repo_id": "tokenizers",
"token_count": 200
} |
import { prependNormalizer, stripAccentsNormalizer, stripNormalizer } from '../../'
describe('stripNormalizer', () => {
it('instantiates with no parameters', () => {
const normalizer = stripNormalizer()
expect(normalizer.constructor.name).toEqual('Normalizer')
})
it('accepts `undefined` as first parameter', () => {
expect(stripNormalizer(undefined)).toBeDefined()
})
it('accepts `undefined` as second parameter', () => {
expect(stripNormalizer(false, undefined)).toBeDefined()
})
it('instantiates with one parameter', () => {
const normalizer = stripNormalizer(false)
expect(normalizer.constructor.name).toEqual('Normalizer')
})
it('instantiates with two parameters', () => {
const normalizer = stripNormalizer(false, true)
expect(normalizer.constructor.name).toEqual('Normalizer')
})
it('prepend instantiates with one parameter', () => {
const normalizer = prependNormalizer('_')
expect(normalizer.constructor.name).toEqual('Normalizer')
expect(normalizer.normalizeString('Hello')).toEqual('_Hello')
})
it('can normalize strings', () => {
const normalizer = stripNormalizer()
expect(normalizer.normalizeString(' Hello there ')).toEqual('Hello there')
})
})
describe('stripAccentsNormalizer', () => {
it('initialize', () => {
const normalizer = stripAccentsNormalizer()
expect(normalizer.constructor.name).toEqual('Normalizer')
})
})
| tokenizers/bindings/node/lib/bindings/normalizers.test.ts/0 | {
"file_path": "tokenizers/bindings/node/lib/bindings/normalizers.test.ts",
"repo_id": "tokenizers",
"token_count": 468
} |
{
"name": "tokenizers-linux-arm-gnueabihf",
"version": "0.13.4-rc1",
"os": [
"linux"
],
"cpu": [
"arm"
],
"main": "tokenizers.linux-arm-gnueabihf.node",
"files": [
"tokenizers.linux-arm-gnueabihf.node"
],
"description": "Tokenizers platform specific bindings",
"keywords": [
"napi-rs",
"NAPI",
"N-API",
"Rust",
"node-addon",
"node-addon-api"
],
"license": "MIT",
"engines": {
"node": ">= 10"
},
"publishConfig": {
"registry": "https://registry.npmjs.org/",
"access": "public"
},
"repository": "tokenizers"
} | tokenizers/bindings/node/npm/linux-arm-gnueabihf/package.json/0 | {
"file_path": "tokenizers/bindings/node/npm/linux-arm-gnueabihf/package.json",
"repo_id": "tokenizers",
"token_count": 278
} |
<jupyter_start><jupyter_code>!wget https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt -O /tmp/bert-base-uncased-vocab.txt
from tokenizers import BertWordPieceTokenizer
from tokenizers.tools import EncodingVisualizer
EncodingVisualizer.unk_token_regex.search("aaa[udsnk]aaa")
text = """Mathias Bynens 'Z͑ͫ̓ͪ̂ͫ̽͏̴̙̤̞͉͚̯̞̠͍A̴̵̜̰͔ͫ͗͢L̠ͨͧͩ͘G̴̻͈͍͔̹̑͗̎̅͛́Ǫ̵̹̻̝̳͂̌̌͘!͖̬̰̙̗̿̋ͥͥ̂ͣ̐́́͜͞': Whenever you’re working on a piece of JavaScript code that deals with strings or regular expressions in some way, just add a unit test that contains a pile of poo (💩) in a string, 💩💩💩💩💩💩💩💩💩💩💩💩 and see if anything breaks. It’s a quick, fun, and easy way to see if your code supports astral symbols. Once you’ve found a Unicode-related bug in your code, all you need to do is apply the techniques discussed in this post to fix it."""
tokenizer = BertWordPieceTokenizer("/tmp/bert-base-uncased-vocab.txt", lowercase=True)
visualizer = EncodingVisualizer(tokenizer=tokenizer)<jupyter_output><empty_output><jupyter_text>Visualizing Tokens With No Annotations<jupyter_code>visualizer(text)<jupyter_output><empty_output><jupyter_text>Visualizing Tokens With Aligned AnnotationsFirst we make some annotations with the Annotation class<jupyter_code>from tokenizers.tools import Annotation
anno1 = Annotation(start=0, end=2, label="foo")
anno2 = Annotation(start=2, end=4, label="bar")
anno3 = Annotation(start=6, end=8, label="poo")
anno4 = Annotation(start=9, end=12, label="shoe")
annotations = [
anno1,
anno2,
anno3,
anno4,
Annotation(start=23, end=30, label="random tandem bandem sandem landem fandom"),
Annotation(start=63, end=70, label="foo"),
Annotation(start=80, end=95, label="bar"),
Annotation(start=120, end=128, label="bar"),
Annotation(start=152, end=155, label="poo"),
]
visualizer(text, annotations=annotations)<jupyter_output><empty_output><jupyter_text>Using A Custom Annotation FormatEvery system has its own representation of annotations. That's why we can instantiate the EncodingVisualizer with a convertion function.<jupyter_code>funnyAnnotations = [dict(startPlace=i, endPlace=i + 3, theTag=str(i)) for i in range(0, 20, 4)]
funnyAnnotations
def converter(funny):
return Annotation(start=funny["startPlace"], end=funny["endPlace"], label=funny["theTag"])
visualizer = EncodingVisualizer(tokenizer=tokenizer, default_to_notebook=True, annotation_converter=converter)
visualizer(text, annotations=funnyAnnotations)<jupyter_output><empty_output><jupyter_text>Trying with Roberta<jupyter_code>!wget "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-vocab.json" -O /tmp/roberta-base-vocab.json
!wget "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-merges.txt" -O /tmp/roberta-base-merges.txt
from tokenizers import ByteLevelBPETokenizer
roberta_tokenizer = ByteLevelBPETokenizer.from_file("/tmp/roberta-base-vocab.json", "/tmp/roberta-base-merges.txt")
roberta_visualizer = EncodingVisualizer(tokenizer=roberta_tokenizer, default_to_notebook=True)
roberta_visualizer(text, annotations=annotations)<jupyter_output><empty_output> | tokenizers/bindings/python/examples/using_the_visualizer.ipynb/0 | {
"file_path": "tokenizers/bindings/python/examples/using_the_visualizer.ipynb",
"repo_id": "tokenizers",
"token_count": 1222
} |
# Generated content DO NOT EDIT
from .. import pre_tokenizers
PreTokenizer = pre_tokenizers.PreTokenizer
BertPreTokenizer = pre_tokenizers.BertPreTokenizer
ByteLevel = pre_tokenizers.ByteLevel
CharDelimiterSplit = pre_tokenizers.CharDelimiterSplit
Digits = pre_tokenizers.Digits
Metaspace = pre_tokenizers.Metaspace
Punctuation = pre_tokenizers.Punctuation
Sequence = pre_tokenizers.Sequence
Split = pre_tokenizers.Split
UnicodeScripts = pre_tokenizers.UnicodeScripts
Whitespace = pre_tokenizers.Whitespace
WhitespaceSplit = pre_tokenizers.WhitespaceSplit
| tokenizers/bindings/python/py_src/tokenizers/pre_tokenizers/__init__.py/0 | {
"file_path": "tokenizers/bindings/python/py_src/tokenizers/pre_tokenizers/__init__.py",
"repo_id": "tokenizers",
"token_count": 177
} |
use pyo3::exceptions;
use pyo3::prelude::*;
use pyo3::types::*;
use tk::tokenizer::{Offsets, PaddingDirection};
use tk::utils::truncation::TruncationDirection;
use tokenizers as tk;
use crate::error::{deprecation_warning, PyError};
/// The :class:`~tokenizers.Encoding` represents the output of a :class:`~tokenizers.Tokenizer`.
#[pyclass(dict, module = "tokenizers", name = "Encoding")]
#[repr(transparent)]
pub struct PyEncoding {
pub encoding: tk::tokenizer::Encoding,
}
impl From<tk::tokenizer::Encoding> for PyEncoding {
fn from(v: tk::tokenizer::Encoding) -> Self {
Self { encoding: v }
}
}
#[pymethods]
impl PyEncoding {
#[new]
#[pyo3(text_signature = None)]
fn new() -> Self {
Self {
encoding: tk::tokenizer::Encoding::default(),
}
}
fn __getstate__(&self, py: Python) -> PyResult<PyObject> {
let data = serde_json::to_string(&self.encoding).map_err(|e| {
exceptions::PyException::new_err(format!(
"Error while attempting to pickle Encoding: {}",
e
))
})?;
Ok(PyBytes::new(py, data.as_bytes()).into())
}
fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> {
match state.extract::<&[u8]>(py) {
Ok(s) => {
self.encoding = serde_json::from_slice(s).map_err(|e| {
exceptions::PyException::new_err(format!(
"Error while attempting to unpickle Encoding: {}",
e
))
})?;
Ok(())
}
Err(e) => Err(e),
}
}
fn __repr__(&self) -> PyResult<String> {
Ok(format!(
"Encoding(num_tokens={}, attributes=[ids, type_ids, tokens, offsets, \
attention_mask, special_tokens_mask, overflowing])",
self.encoding.get_ids().len()
))
}
fn __len__(&self) -> PyResult<usize> {
Ok(self.encoding.len())
}
/// Merge the list of encodings into one final :class:`~tokenizers.Encoding`
///
/// Args:
/// encodings (A :obj:`List` of :class:`~tokenizers.Encoding`):
/// The list of encodings that should be merged in one
///
/// growing_offsets (:obj:`bool`, defaults to :obj:`True`):
/// Whether the offsets should accumulate while merging
///
/// Returns:
/// :class:`~tokenizers.Encoding`: The resulting Encoding
#[staticmethod]
#[pyo3(signature = (encodings, growing_offsets = true))]
#[pyo3(text_signature = "(encodings, growing_offsets=True)")]
fn merge(encodings: Vec<PyRef<PyEncoding>>, growing_offsets: bool) -> PyEncoding {
tk::tokenizer::Encoding::merge(
encodings.into_iter().map(|e| e.encoding.clone()),
growing_offsets,
)
.into()
}
/// The number of sequences represented
///
/// Returns:
/// :obj:`int`: The number of sequences in this :class:`~tokenizers.Encoding`
#[getter]
fn get_n_sequences(&self) -> usize {
self.encoding.n_sequences()
}
/// Set the given sequence index
///
/// Set the given sequence index for the whole range of tokens contained in this
/// :class:`~tokenizers.Encoding`.
#[pyo3(text_signature = "(self, sequence_id)")]
fn set_sequence_id(&mut self, sequence_id: usize) {
self.encoding.set_sequence_id(sequence_id);
}
/// The generated IDs
///
/// The IDs are the main input to a Language Model. They are the token indices,
/// the numerical representations that a LM understands.
///
/// Returns:
/// :obj:`List[int]`: The list of IDs
#[getter]
fn get_ids(&self) -> Vec<u32> {
self.encoding.get_ids().to_vec()
}
/// The generated tokens
///
/// They are the string representation of the IDs.
///
/// Returns:
/// :obj:`List[str]`: The list of tokens
#[getter]
fn get_tokens(&self) -> Vec<String> {
self.encoding.get_tokens().to_vec()
}
/// The generated word indices.
///
/// .. warning::
/// This is deprecated and will be removed in a future version.
/// Please use :obj:`~tokenizers.Encoding.word_ids` instead.
///
/// They represent the index of the word associated to each token.
/// When the input is pre-tokenized, they correspond to the ID of the given input label,
/// otherwise they correspond to the words indices as defined by the
/// :class:`~tokenizers.pre_tokenizers.PreTokenizer` that was used.
///
/// For special tokens and such (any token that was generated from something that was
/// not part of the input), the output is :obj:`None`
///
/// Returns:
/// A :obj:`List` of :obj:`Optional[int]`: A list of optional word index.
#[getter]
fn get_words(&self, py: Python<'_>) -> PyResult<Vec<Option<u32>>> {
deprecation_warning(
py,
"0.9.4",
"Encoding.words is deprecated, please use Encoding.word_ids instead.",
)?;
Ok(self.get_word_ids())
}
/// The generated word indices.
///
/// They represent the index of the word associated to each token.
/// When the input is pre-tokenized, they correspond to the ID of the given input label,
/// otherwise they correspond to the words indices as defined by the
/// :class:`~tokenizers.pre_tokenizers.PreTokenizer` that was used.
///
/// For special tokens and such (any token that was generated from something that was
/// not part of the input), the output is :obj:`None`
///
/// Returns:
/// A :obj:`List` of :obj:`Optional[int]`: A list of optional word index.
#[getter]
fn get_word_ids(&self) -> Vec<Option<u32>> {
self.encoding.get_word_ids().to_vec()
}
/// The generated sequence indices.
///
/// They represent the index of the input sequence associated to each token.
/// The sequence id can be None if the token is not related to any input sequence,
/// like for example with special tokens.
///
/// Returns:
/// A :obj:`List` of :obj:`Optional[int]`: A list of optional sequence index.
#[getter]
fn get_sequence_ids(&self) -> Vec<Option<usize>> {
self.encoding.get_sequence_ids()
}
/// The generated type IDs
///
/// Generally used for tasks like sequence classification or question answering,
/// these tokens let the LM know which input sequence corresponds to each tokens.
///
/// Returns:
/// :obj:`List[int]`: The list of type ids
#[getter]
fn get_type_ids(&self) -> Vec<u32> {
self.encoding.get_type_ids().to_vec()
}
/// The offsets associated to each token
///
/// These offsets let's you slice the input string, and thus retrieve the original
/// part that led to producing the corresponding token.
///
/// Returns:
/// A :obj:`List` of :obj:`Tuple[int, int]`: The list of offsets
#[getter]
fn get_offsets(&self) -> Vec<(usize, usize)> {
self.encoding.get_offsets().to_vec()
}
/// The special token mask
///
/// This indicates which tokens are special tokens, and which are not.
///
/// Returns:
/// :obj:`List[int]`: The special tokens mask
#[getter]
fn get_special_tokens_mask(&self) -> Vec<u32> {
self.encoding.get_special_tokens_mask().to_vec()
}
/// The attention mask
///
/// This indicates to the LM which tokens should be attended to, and which should not.
/// This is especially important when batching sequences, where we need to applying
/// padding.
///
/// Returns:
/// :obj:`List[int]`: The attention mask
#[getter]
fn get_attention_mask(&self) -> Vec<u32> {
self.encoding.get_attention_mask().to_vec()
}
/// A :obj:`List` of overflowing :class:`~tokenizers.Encoding`
///
/// When using truncation, the :class:`~tokenizers.Tokenizer` takes care of splitting
/// the output into as many pieces as required to match the specified maximum length.
/// This field lets you retrieve all the subsequent pieces.
///
/// When you use pairs of sequences, the overflowing pieces will contain enough
/// variations to cover all the possible combinations, while respecting the provided
/// maximum length.
#[getter]
fn get_overflowing(&self) -> Vec<PyEncoding> {
self.encoding
.get_overflowing()
.clone()
.into_iter()
.map(|e| e.into())
.collect()
}
/// Get the encoded tokens corresponding to the word at the given index
/// in one of the input sequences.
///
/// Args:
/// word_index (:obj:`int`):
/// The index of a word in one of the input sequences.
/// sequence_index (:obj:`int`, defaults to :obj:`0`):
/// The index of the sequence that contains the target word
///
/// Returns:
/// :obj:`Tuple[int, int]`: The range of tokens: :obj:`(first, last + 1)`
#[pyo3(signature = (word_index, sequence_index = 0))]
#[pyo3(text_signature = "(self, word_index, sequence_index=0)")]
fn word_to_tokens(&self, word_index: u32, sequence_index: usize) -> Option<(usize, usize)> {
self.encoding.word_to_tokens(word_index, sequence_index)
}
/// Get the offsets of the word at the given index in one of the input sequences.
///
/// Args:
/// word_index (:obj:`int`):
/// The index of a word in one of the input sequences.
/// sequence_index (:obj:`int`, defaults to :obj:`0`):
/// The index of the sequence that contains the target word
///
/// Returns:
/// :obj:`Tuple[int, int]`: The range of characters (span) :obj:`(first, last + 1)`
#[pyo3(signature = (word_index, sequence_index = 0))]
#[pyo3(text_signature = "(self, word_index, sequence_index=0)")]
fn word_to_chars(&self, word_index: u32, sequence_index: usize) -> Option<Offsets> {
self.encoding.word_to_chars(word_index, sequence_index)
}
/// Get the index of the sequence represented by the given token.
///
/// In the general use case, this method returns :obj:`0` for a single sequence or
/// the first sequence of a pair, and :obj:`1` for the second sequence of a pair
///
/// Args:
/// token_index (:obj:`int`):
/// The index of a token in the encoded sequence.
///
/// Returns:
/// :obj:`int`: The sequence id of the given token
#[pyo3(text_signature = "(self, token_index)")]
fn token_to_sequence(&self, token_index: usize) -> Option<usize> {
self.encoding.token_to_sequence(token_index)
}
/// Get the offsets of the token at the given index.
///
/// The returned offsets are related to the input sequence that contains the
/// token. In order to determine in which input sequence it belongs, you
/// must call :meth:`~tokenizers.Encoding.token_to_sequence()`.
///
/// Args:
/// token_index (:obj:`int`):
/// The index of a token in the encoded sequence.
///
/// Returns:
/// :obj:`Tuple[int, int]`: The token offsets :obj:`(first, last + 1)`
#[pyo3(text_signature = "(self, token_index)")]
fn token_to_chars(&self, token_index: usize) -> Option<Offsets> {
let (_, offsets) = self.encoding.token_to_chars(token_index)?;
Some(offsets)
}
/// Get the index of the word that contains the token in one of the input sequences.
///
/// The returned word index is related to the input sequence that contains
/// the token. In order to determine in which input sequence it belongs, you
/// must call :meth:`~tokenizers.Encoding.token_to_sequence()`.
///
/// Args:
/// token_index (:obj:`int`):
/// The index of a token in the encoded sequence.
///
/// Returns:
/// :obj:`int`: The index of the word in the relevant input sequence.
#[pyo3(text_signature = "(self, token_index)")]
fn token_to_word(&self, token_index: usize) -> Option<u32> {
let (_, word_idx) = self.encoding.token_to_word(token_index)?;
Some(word_idx)
}
/// Get the token that contains the char at the given position in the input sequence.
///
/// Args:
/// char_pos (:obj:`int`):
/// The position of a char in the input string
/// sequence_index (:obj:`int`, defaults to :obj:`0`):
/// The index of the sequence that contains the target char
///
/// Returns:
/// :obj:`int`: The index of the token that contains this char in the encoded sequence
#[pyo3(signature = (char_pos, sequence_index = 0))]
#[pyo3(text_signature = "(self, char_pos, sequence_index=0)")]
fn char_to_token(&self, char_pos: usize, sequence_index: usize) -> Option<usize> {
self.encoding.char_to_token(char_pos, sequence_index)
}
/// Get the word that contains the char at the given position in the input sequence.
///
/// Args:
/// char_pos (:obj:`int`):
/// The position of a char in the input string
/// sequence_index (:obj:`int`, defaults to :obj:`0`):
/// The index of the sequence that contains the target char
///
/// Returns:
/// :obj:`int`: The index of the word that contains this char in the input sequence
#[pyo3(signature = (char_pos, sequence_index = 0))]
#[pyo3(text_signature = "(self, char_pos, sequence_index=0)")]
fn char_to_word(&self, char_pos: usize, sequence_index: usize) -> Option<u32> {
self.encoding.char_to_word(char_pos, sequence_index)
}
/// Pad the :class:`~tokenizers.Encoding` at the given length
///
/// Args:
/// length (:obj:`int`):
/// The desired length
///
/// direction: (:obj:`str`, defaults to :obj:`right`):
/// The expected padding direction. Can be either :obj:`right` or :obj:`left`
///
/// pad_id (:obj:`int`, defaults to :obj:`0`):
/// The ID corresponding to the padding token
///
/// pad_type_id (:obj:`int`, defaults to :obj:`0`):
/// The type ID corresponding to the padding token
///
/// pad_token (:obj:`str`, defaults to `[PAD]`):
/// The pad token to use
#[pyo3(signature = (length, **kwargs))]
#[pyo3(
text_signature = "(self, length, direction='right', pad_id=0, pad_type_id=0, pad_token='[PAD]')"
)]
fn pad(&mut self, length: usize, kwargs: Option<&Bound<'_, PyDict>>) -> PyResult<()> {
let mut pad_id = 0;
let mut pad_type_id = 0;
let mut pad_token = "[PAD]".to_string();
let mut direction = PaddingDirection::Right;
if let Some(kwargs) = kwargs {
for (key, value) in kwargs {
let key: String = key.extract()?;
match key.as_ref() {
"direction" => {
let value: String = value.extract()?;
direction = match value.as_ref() {
"left" => Ok(PaddingDirection::Left),
"right" => Ok(PaddingDirection::Right),
other => Err(PyError(format!(
"Unknown `direction`: `{}`. Use \
one of `left` or `right`",
other
))
.into_pyerr::<exceptions::PyValueError>()),
}?;
}
"pad_id" => pad_id = value.extract()?,
"pad_type_id" => pad_type_id = value.extract()?,
"pad_token" => pad_token = value.extract()?,
_ => println!("Ignored unknown kwarg option {}", key),
}
}
}
self.encoding
.pad(length, pad_id, pad_type_id, &pad_token, direction);
Ok(())
}
/// Truncate the :class:`~tokenizers.Encoding` at the given length
///
/// If this :class:`~tokenizers.Encoding` represents multiple sequences, when truncating
/// this information is lost. It will be considered as representing a single sequence.
///
/// Args:
/// max_length (:obj:`int`):
/// The desired length
///
/// stride (:obj:`int`, defaults to :obj:`0`):
/// The length of previous content to be included in each overflowing piece
///
/// direction (:obj:`str`, defaults to :obj:`right`):
/// Truncate direction
#[pyo3(signature = (max_length, stride = 0, direction = "right"))]
#[pyo3(text_signature = "(self, max_length, stride=0, direction='right')")]
fn truncate(&mut self, max_length: usize, stride: usize, direction: &str) -> PyResult<()> {
let tdir = match direction {
"left" => Ok(TruncationDirection::Left),
"right" => Ok(TruncationDirection::Right),
_ => Err(PyError(format!(
"Invalid truncation direction value : {}",
direction
))
.into_pyerr::<exceptions::PyValueError>()),
}?;
self.encoding.truncate(max_length, stride, tdir);
Ok(())
}
}
| tokenizers/bindings/python/src/encoding.rs/0 | {
"file_path": "tokenizers/bindings/python/src/encoding.rs",
"repo_id": "tokenizers",
"token_count": 7409
} |
import argparse
import inspect
import os
from pathlib import Path
INDENT = " " * 4
GENERATED_COMMENT = "# Generated content DO NOT EDIT\n"
def do_indent(text: str, indent: str):
return text.replace("\n", f"\n{indent}")
def function(obj, indent, text_signature=None):
if text_signature is None:
text_signature = obj.__text_signature__
string = ""
string += f"{indent}def {obj.__name__}{text_signature}:\n"
indent += INDENT
string += f'{indent}"""\n'
string += f"{indent}{do_indent(obj.__doc__, indent)}\n"
string += f'{indent}"""\n'
string += f"{indent}pass\n"
string += "\n"
string += "\n"
return string
def member_sort(member):
if inspect.isclass(member):
value = 10 + len(inspect.getmro(member))
else:
value = 1
return value
def fn_predicate(obj):
value = inspect.ismethoddescriptor(obj) or inspect.isbuiltin(obj)
if value:
return obj.__doc__ and obj.__text_signature__ and not obj.__name__.startswith("_")
if inspect.isgetsetdescriptor(obj):
return obj.__doc__ and not obj.__name__.startswith("_")
return False
def get_module_members(module):
members = [
member
for name, member in inspect.getmembers(module)
if not name.startswith("_") and not inspect.ismodule(member)
]
members.sort(key=member_sort)
return members
def pyi_file(obj, indent=""):
string = ""
if inspect.ismodule(obj):
string += GENERATED_COMMENT
members = get_module_members(obj)
for member in members:
string += pyi_file(member, indent)
elif inspect.isclass(obj):
indent += INDENT
mro = inspect.getmro(obj)
if len(mro) > 2:
inherit = f"({mro[1].__name__})"
else:
inherit = ""
string += f"class {obj.__name__}{inherit}:\n"
body = ""
if obj.__doc__:
body += f'{indent}"""\n{indent}{do_indent(obj.__doc__, indent)}\n{indent}"""\n'
fns = inspect.getmembers(obj, fn_predicate)
# Init
if obj.__text_signature__:
body += f"{indent}def __init__{obj.__text_signature__}:\n"
body += f"{indent+INDENT}pass\n"
body += "\n"
for name, fn in fns:
body += pyi_file(fn, indent=indent)
if not body:
body += f"{indent}pass\n"
string += body
string += "\n\n"
elif inspect.isbuiltin(obj):
string += f"{indent}@staticmethod\n"
string += function(obj, indent)
elif inspect.ismethoddescriptor(obj):
string += function(obj, indent)
elif inspect.isgetsetdescriptor(obj):
# TODO it would be interesting to add the setter maybe ?
string += f"{indent}@property\n"
string += function(obj, indent, text_signature="(self)")
else:
raise Exception(f"Object {obj} is not supported")
return string
def py_file(module, origin):
members = get_module_members(module)
string = GENERATED_COMMENT
string += f"from .. import {origin}\n"
string += "\n"
for member in members:
name = member.__name__
string += f"{name} = {origin}.{name}\n"
return string
import subprocess
from typing import List, Optional, Tuple
def do_ruff(code, is_pyi: bool):
command = ["ruff", "format", "--config", "pyproject.toml", "--silent", "-"]
if is_pyi:
command.extend(["--stdin-filename", "test.pyi"])
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
stdout, _ = process.communicate(input=code.encode("utf-8"))
return stdout.decode("utf-8")
def write(module, directory, origin, check=False):
submodules = [(name, member) for name, member in inspect.getmembers(module) if inspect.ismodule(member)]
filename = os.path.join(directory, "__init__.pyi")
pyi_content = pyi_file(module)
pyi_content = do_ruff(pyi_content, is_pyi=True)
os.makedirs(directory, exist_ok=True)
if check:
with open(filename, "r") as f:
data = f.read()
assert data == pyi_content, f"The content of {filename} seems outdated, please run `python stub.py`"
else:
with open(filename, "w") as f:
f.write(pyi_content)
filename = os.path.join(directory, "__init__.py")
py_content = py_file(module, origin)
py_content = do_ruff(py_content, is_pyi=False)
os.makedirs(directory, exist_ok=True)
is_auto = False
if not os.path.exists(filename):
is_auto = True
else:
with open(filename, "r") as f:
line = f.readline()
if line == GENERATED_COMMENT:
is_auto = True
if is_auto:
if check:
with open(filename, "r") as f:
data = f.read()
assert data == py_content, f"The content of {filename} seems outdated, please run `python stub.py`"
else:
with open(filename, "w") as f:
f.write(py_content)
for name, submodule in submodules:
write(submodule, os.path.join(directory, name), f"{name}", check=check)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--check", action="store_true")
args = parser.parse_args()
import tokenizers
write(tokenizers.tokenizers, "py_src/tokenizers/", "tokenizers", check=args.check)
| tokenizers/bindings/python/stub.py/0 | {
"file_path": "tokenizers/bindings/python/stub.py",
"repo_id": "tokenizers",
"token_count": 2392
} |
# Models
<tokenizerslangcontent>
<python>
## BPE
[[autodoc]] tokenizers.models.BPE
## Model
[[autodoc]] tokenizers.models.Model
## Unigram
[[autodoc]] tokenizers.models.Unigram
## WordLevel
[[autodoc]] tokenizers.models.WordLevel
## WordPiece
[[autodoc]] tokenizers.models.WordPiece
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | tokenizers/docs/source-doc-builder/api/models.mdx/0 | {
"file_path": "tokenizers/docs/source-doc-builder/api/models.mdx",
"repo_id": "tokenizers",
"token_count": 179
} |
#[macro_use]
extern crate criterion;
use criterion::{Criterion, Throughput};
use tokenizers::Tokenizer;
pub fn llama3(c: &mut Criterion) {
let data = std::fs::read_to_string("data/big.txt").unwrap();
let mut group = c.benchmark_group("llama3-encode");
group.throughput(Throughput::Bytes(data.bytes().len() as u64));
group.bench_function("llama3-offsets", |b| {
let tokenizer =
Tokenizer::from_pretrained("meta-llama/Meta-Llama-3.1-8B-Instruct", None).unwrap();
let data: Vec<_> = data.lines().collect();
let add_special_tokens = false;
b.iter(|| {
tokenizer
.encode_batch_char_offsets(criterion::black_box(data.clone()), add_special_tokens)
.unwrap()
})
});
group.bench_function("llama3-nooffsets", |b| {
let tokenizer =
Tokenizer::from_pretrained("meta-llama/Meta-Llama-3.1-8B-Instruct", None).unwrap();
let data: Vec<_> = data.lines().collect();
let add_special_tokens = false;
b.iter(|| {
tokenizer
.encode_batch(criterion::black_box(data.clone()), add_special_tokens)
.unwrap()
})
});
group.finish();
}
criterion_group! {
name = bert_benches;
config = Criterion::default().sample_size(10);
targets = llama3
}
criterion_main!(bert_benches);
| tokenizers/tokenizers/benches/llama3.rs/0 | {
"file_path": "tokenizers/tokenizers/benches/llama3.rs",
"repo_id": "tokenizers",
"token_count": 645
} |
//! [Byte Pair Encoding](https://www.aclweb.org/anthology/P16-1162/) model.
use std::{iter, mem};
mod model;
mod serialization;
pub mod trainer;
mod word;
type Pair = (u32, u32);
/// Errors that can be encountered while using or constructing a `BPE` model.
#[derive(thiserror::Error, Debug)]
pub enum Error {
/// An error encountered while reading files mainly.
#[error("IoError: {0}")]
Io(#[from] std::io::Error),
/// An error forwarded from Serde, while parsing JSON
#[error("JsonError: {0}")]
JsonError(#[from] serde_json::Error),
/// When the vocab.json file is in the wrong format
#[error("Bad vocabulary json file")]
BadVocabulary,
/// When the merges.txt file is in the wrong format. This error holds the line
/// number of the line that caused the error.
#[error("Merges text file invalid at line {0}")]
BadMerges(usize),
/// If a token found in merges, is not in the vocab
#[error("Token `{0}` out of vocabulary")]
MergeTokenOutOfVocabulary(String),
/// If the provided unk token is out of vocabulary
#[error("Unk token `{0}` not found in the vocabulary")]
UnkTokenOutOfVocabulary(String),
/// Dropout not between 0 and 1.
#[error("Dropout should be between 0 and 1, inclusive")]
InvalidDropout,
}
/// Provides access to the `FirstLastIterator` to any Iterator
pub(crate) trait WithFirstLastIterator: Iterator + Sized {
fn with_first_and_last(self) -> FirstLastIterator<Self>;
}
impl<I> WithFirstLastIterator for I
where
I: Iterator,
{
fn with_first_and_last(self) -> FirstLastIterator<Self> {
FirstLastIterator {
first: true,
iter: self.peekable(),
}
}
}
/// Provides information about whether an item is the first and/or the last of the iterator
pub(crate) struct FirstLastIterator<I>
where
I: Iterator,
{
first: bool,
iter: iter::Peekable<I>,
}
impl<I> Iterator for FirstLastIterator<I>
where
I: Iterator,
{
/// (is_first, is_last, item)
type Item = (bool, bool, I::Item);
fn next(&mut self) -> Option<Self::Item> {
let first = mem::replace(&mut self.first, false);
self.iter
.next()
.map(|e| (first, self.iter.peek().is_none(), e))
}
}
// Re-export
pub use model::*;
pub use trainer::*;
use word::*;
| tokenizers/tokenizers/src/models/bpe/mod.rs/0 | {
"file_path": "tokenizers/tokenizers/src/models/bpe/mod.rs",
"repo_id": "tokenizers",
"token_count": 893
} |
use super::{super::OrderedVocabIter, WordPiece, WordPieceBuilder};
use serde::{
de::{MapAccess, Visitor},
ser::SerializeStruct,
Deserialize, Deserializer, Serialize, Serializer,
};
use std::collections::HashSet;
impl Serialize for WordPiece {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut model = serializer.serialize_struct("WordPiece", 5)?;
// Small fields first
model.serialize_field("type", "WordPiece")?;
model.serialize_field("unk_token", &self.unk_token)?;
model.serialize_field("continuing_subword_prefix", &self.continuing_subword_prefix)?;
model.serialize_field("max_input_chars_per_word", &self.max_input_chars_per_word)?;
// Then large ones
let ordered_vocab = OrderedVocabIter::new(&self.vocab_r);
model.serialize_field("vocab", &ordered_vocab)?;
model.end()
}
}
impl<'de> Deserialize<'de> for WordPiece {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_struct(
"WordPiece",
&[
"type",
"unk_token",
"continuing_subword_prefix",
"max_input_chars_per_word",
"vocab",
],
WordPieceVisitor,
)
}
}
struct WordPieceVisitor;
impl<'de> Visitor<'de> for WordPieceVisitor {
type Value = WordPiece;
fn expecting(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(fmt, "struct WordPiece")
}
fn visit_map<V>(self, mut map: V) -> std::result::Result<Self::Value, V::Error>
where
V: MapAccess<'de>,
{
let mut builder = WordPieceBuilder::new();
let mut missing_fields = vec![
// for retrocompatibility the "type" field is not mandatory
"unk_token",
"continuing_subword_prefix",
"max_input_chars_per_word",
"vocab",
]
.into_iter()
.collect::<HashSet<_>>();
while let Some(key) = map.next_key::<String>()? {
match key.as_ref() {
"unk_token" => builder = builder.unk_token(map.next_value()?),
"continuing_subword_prefix" => {
builder = builder.continuing_subword_prefix(map.next_value()?)
}
"max_input_chars_per_word" => {
builder = builder.max_input_chars_per_word(map.next_value()?)
}
"vocab" => builder = builder.vocab(map.next_value()?),
"type" => match map.next_value()? {
"WordPiece" => {}
u => {
return Err(serde::de::Error::invalid_value(
serde::de::Unexpected::Str(u),
&"WordPiece",
))
}
},
_ => {}
}
missing_fields.remove::<str>(&key);
}
if !missing_fields.is_empty() {
Err(serde::de::Error::missing_field(
missing_fields.iter().next().unwrap(),
))
} else {
Ok(builder.build().map_err(serde::de::Error::custom)?)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn serde() {
let wp = WordPiece::default();
let wp_s = "{\
\"type\":\"WordPiece\",\
\"unk_token\":\"[UNK]\",\
\"continuing_subword_prefix\":\"##\",\
\"max_input_chars_per_word\":100,\
\"vocab\":{}\
}";
assert_eq!(serde_json::to_string(&wp).unwrap(), wp_s);
assert_eq!(serde_json::from_str::<WordPiece>(wp_s).unwrap(), wp);
}
#[test]
fn deserialization_should_fail() {
let missing_unk = "{\
\"type\":\"WordPiece\",\
\"continuing_subword_prefix\":\"##\",\
\"max_input_chars_per_word\":100,\
\"vocab\":{}\
}";
assert!(serde_json::from_str::<WordPiece>(missing_unk)
.unwrap_err()
.to_string()
.starts_with("missing field `unk_token`"));
let wrong_type = "{\
\"type\":\"WordLevel\",\
\"unk_token\":\"[UNK]\",\
\"vocab\":{}\
}";
assert!(serde_json::from_str::<WordPiece>(wrong_type)
.unwrap_err()
.to_string()
.starts_with("invalid value: string \"WordLevel\", expected WordPiece"));
}
}
| tokenizers/tokenizers/src/models/wordpiece/serialization.rs/0 | {
"file_path": "tokenizers/tokenizers/src/models/wordpiece/serialization.rs",
"repo_id": "tokenizers",
"token_count": 2453
} |
pub mod bert;
pub mod byte_level;
pub mod delimiter;
pub mod digits;
pub mod metaspace;
pub mod punctuation;
pub mod sequence;
pub mod split;
pub mod unicode_scripts;
pub mod whitespace;
use serde::{Deserialize, Deserializer, Serialize};
use crate::pre_tokenizers::bert::BertPreTokenizer;
use crate::pre_tokenizers::byte_level::ByteLevel;
use crate::pre_tokenizers::delimiter::CharDelimiterSplit;
use crate::pre_tokenizers::digits::Digits;
use crate::pre_tokenizers::metaspace::Metaspace;
use crate::pre_tokenizers::punctuation::Punctuation;
use crate::pre_tokenizers::sequence::Sequence;
use crate::pre_tokenizers::split::Split;
use crate::pre_tokenizers::unicode_scripts::UnicodeScripts;
use crate::pre_tokenizers::whitespace::{Whitespace, WhitespaceSplit};
use crate::{PreTokenizedString, PreTokenizer};
#[derive(Serialize, Clone, Debug, PartialEq)]
#[serde(untagged)]
pub enum PreTokenizerWrapper {
BertPreTokenizer(BertPreTokenizer),
ByteLevel(ByteLevel),
Delimiter(CharDelimiterSplit),
Metaspace(Metaspace),
Whitespace(Whitespace),
Sequence(Sequence),
Split(Split),
Punctuation(Punctuation),
WhitespaceSplit(WhitespaceSplit),
Digits(Digits),
UnicodeScripts(UnicodeScripts),
}
impl PreTokenizer for PreTokenizerWrapper {
fn pre_tokenize(&self, normalized: &mut PreTokenizedString) -> crate::Result<()> {
match self {
Self::BertPreTokenizer(bpt) => bpt.pre_tokenize(normalized),
Self::ByteLevel(bpt) => bpt.pre_tokenize(normalized),
Self::Delimiter(dpt) => dpt.pre_tokenize(normalized),
Self::Metaspace(mspt) => mspt.pre_tokenize(normalized),
Self::Whitespace(wspt) => wspt.pre_tokenize(normalized),
Self::Punctuation(tok) => tok.pre_tokenize(normalized),
Self::Sequence(tok) => tok.pre_tokenize(normalized),
Self::Split(tok) => tok.pre_tokenize(normalized),
Self::WhitespaceSplit(wspt) => wspt.pre_tokenize(normalized),
Self::Digits(wspt) => wspt.pre_tokenize(normalized),
Self::UnicodeScripts(us) => us.pre_tokenize(normalized),
}
}
}
impl<'de> Deserialize<'de> for PreTokenizerWrapper {
fn deserialize<D>(deserializer: D) -> std::result::Result<Self, D::Error>
where
D: Deserializer<'de>,
{
#[derive(Deserialize)]
pub struct Tagged {
#[serde(rename = "type")]
variant: EnumType,
#[serde(flatten)]
rest: serde_json::Value,
}
#[derive(Deserialize, Serialize)]
pub enum EnumType {
BertPreTokenizer,
ByteLevel,
Delimiter,
Metaspace,
Whitespace,
Sequence,
Split,
Punctuation,
WhitespaceSplit,
Digits,
UnicodeScripts,
}
#[derive(Deserialize)]
#[serde(untagged)]
pub enum PreTokenizerHelper {
Tagged(Tagged),
Legacy(serde_json::Value),
}
#[derive(Deserialize)]
#[serde(untagged)]
pub enum PreTokenizerUntagged {
BertPreTokenizer(BertPreTokenizer),
ByteLevel(ByteLevel),
Delimiter(CharDelimiterSplit),
Metaspace(Metaspace),
Whitespace(Whitespace),
Sequence(Sequence),
Split(Split),
Punctuation(Punctuation),
WhitespaceSplit(WhitespaceSplit),
Digits(Digits),
UnicodeScripts(UnicodeScripts),
}
let helper = PreTokenizerHelper::deserialize(deserializer)?;
Ok(match helper {
PreTokenizerHelper::Tagged(pretok) => {
let mut values: serde_json::Map<String, serde_json::Value> =
serde_json::from_value(pretok.rest).map_err(serde::de::Error::custom)?;
values.insert(
"type".to_string(),
serde_json::to_value(&pretok.variant).map_err(serde::de::Error::custom)?,
);
let values = serde_json::Value::Object(values);
match pretok.variant {
EnumType::BertPreTokenizer => PreTokenizerWrapper::BertPreTokenizer(
serde_json::from_value(values).map_err(serde::de::Error::custom)?,
),
EnumType::ByteLevel => PreTokenizerWrapper::ByteLevel(
serde_json::from_value(values).map_err(serde::de::Error::custom)?,
),
EnumType::Delimiter => PreTokenizerWrapper::Delimiter(
serde_json::from_value(values).map_err(serde::de::Error::custom)?,
),
EnumType::Metaspace => PreTokenizerWrapper::Metaspace(
serde_json::from_value(values).map_err(serde::de::Error::custom)?,
),
EnumType::Whitespace => PreTokenizerWrapper::Whitespace(
serde_json::from_value(values).map_err(serde::de::Error::custom)?,
),
EnumType::Sequence => PreTokenizerWrapper::Sequence(
serde_json::from_value(values).map_err(serde::de::Error::custom)?,
),
EnumType::Split => PreTokenizerWrapper::Split(
serde_json::from_value(values).map_err(serde::de::Error::custom)?,
),
EnumType::Punctuation => PreTokenizerWrapper::Punctuation(
serde_json::from_value(values).map_err(serde::de::Error::custom)?,
),
EnumType::WhitespaceSplit => PreTokenizerWrapper::WhitespaceSplit(
serde_json::from_value(values).map_err(serde::de::Error::custom)?,
),
EnumType::Digits => PreTokenizerWrapper::Digits(
serde_json::from_value(values).map_err(serde::de::Error::custom)?,
),
EnumType::UnicodeScripts => PreTokenizerWrapper::UnicodeScripts(
serde_json::from_value(values).map_err(serde::de::Error::custom)?,
),
}
}
PreTokenizerHelper::Legacy(value) => {
let untagged = serde_json::from_value(value).map_err(serde::de::Error::custom)?;
match untagged {
PreTokenizerUntagged::BertPreTokenizer(bert) => {
PreTokenizerWrapper::BertPreTokenizer(bert)
}
PreTokenizerUntagged::ByteLevel(byte_level) => {
PreTokenizerWrapper::ByteLevel(byte_level)
}
PreTokenizerUntagged::Delimiter(delimiter) => {
PreTokenizerWrapper::Delimiter(delimiter)
}
PreTokenizerUntagged::Metaspace(metaspace) => {
PreTokenizerWrapper::Metaspace(metaspace)
}
PreTokenizerUntagged::Whitespace(whitespace) => {
PreTokenizerWrapper::Whitespace(whitespace)
}
PreTokenizerUntagged::Sequence(sequence) => {
PreTokenizerWrapper::Sequence(sequence)
}
PreTokenizerUntagged::Split(split) => PreTokenizerWrapper::Split(split),
PreTokenizerUntagged::Punctuation(punctuation) => {
PreTokenizerWrapper::Punctuation(punctuation)
}
PreTokenizerUntagged::WhitespaceSplit(whitespace_split) => {
PreTokenizerWrapper::WhitespaceSplit(whitespace_split)
}
PreTokenizerUntagged::Digits(digits) => PreTokenizerWrapper::Digits(digits),
PreTokenizerUntagged::UnicodeScripts(unicode_scripts) => {
PreTokenizerWrapper::UnicodeScripts(unicode_scripts)
}
}
}
})
}
}
impl_enum_from!(BertPreTokenizer, PreTokenizerWrapper, BertPreTokenizer);
impl_enum_from!(ByteLevel, PreTokenizerWrapper, ByteLevel);
impl_enum_from!(CharDelimiterSplit, PreTokenizerWrapper, Delimiter);
impl_enum_from!(Whitespace, PreTokenizerWrapper, Whitespace);
impl_enum_from!(Punctuation, PreTokenizerWrapper, Punctuation);
impl_enum_from!(Sequence, PreTokenizerWrapper, Sequence);
impl_enum_from!(Split, PreTokenizerWrapper, Split);
impl_enum_from!(Metaspace, PreTokenizerWrapper, Metaspace);
impl_enum_from!(WhitespaceSplit, PreTokenizerWrapper, WhitespaceSplit);
impl_enum_from!(Digits, PreTokenizerWrapper, Digits);
impl_enum_from!(UnicodeScripts, PreTokenizerWrapper, UnicodeScripts);
#[cfg(test)]
mod tests {
use super::metaspace::PrependScheme;
use super::*;
#[test]
fn test_deserialize() {
let pre_tokenizer: PreTokenizerWrapper = serde_json::from_str(r#"{"type":"Sequence","pretokenizers":[{"type":"WhitespaceSplit"},{"type":"Metaspace","replacement":"▁","str_rep":"▁","add_prefix_space":true}]}"#).unwrap();
assert_eq!(
pre_tokenizer,
PreTokenizerWrapper::Sequence(Sequence::new(vec![
PreTokenizerWrapper::WhitespaceSplit(WhitespaceSplit {}),
PreTokenizerWrapper::Metaspace(Metaspace::new('▁', PrependScheme::Always, true))
]))
);
let pre_tokenizer: PreTokenizerWrapper = serde_json::from_str(
r#"{"type":"Metaspace","replacement":"▁","add_prefix_space":true}"#,
)
.unwrap();
assert_eq!(
pre_tokenizer,
PreTokenizerWrapper::Metaspace(Metaspace::new('▁', PrependScheme::Always, true))
);
let pre_tokenizer: PreTokenizerWrapper = serde_json::from_str(r#"{"type":"Sequence","pretokenizers":[{"type":"WhitespaceSplit"},{"type":"Metaspace","replacement":"▁","add_prefix_space":true}]}"#).unwrap();
assert_eq!(
pre_tokenizer,
PreTokenizerWrapper::Sequence(Sequence::new(vec![
PreTokenizerWrapper::WhitespaceSplit(WhitespaceSplit {}),
PreTokenizerWrapper::Metaspace(Metaspace::new('▁', PrependScheme::Always, true))
]))
);
let pre_tokenizer: PreTokenizerWrapper = serde_json::from_str(
r#"{"type":"Metaspace","replacement":"▁","add_prefix_space":true, "prepend_scheme":"first"}"#,
)
.unwrap();
assert_eq!(
pre_tokenizer,
PreTokenizerWrapper::Metaspace(Metaspace::new(
'▁',
metaspace::PrependScheme::First,
true
))
);
let pre_tokenizer: PreTokenizerWrapper = serde_json::from_str(
r#"{"type":"Metaspace","replacement":"▁","add_prefix_space":true, "prepend_scheme":"always"}"#,
)
.unwrap();
assert_eq!(
pre_tokenizer,
PreTokenizerWrapper::Metaspace(Metaspace::new(
'▁',
metaspace::PrependScheme::Always,
true
))
);
}
#[test]
fn test_deserialize_whitespace_split() {
let pre_tokenizer: PreTokenizerWrapper =
serde_json::from_str(r#"{"type":"WhitespaceSplit"}"#).unwrap();
assert_eq!(
pre_tokenizer,
PreTokenizerWrapper::WhitespaceSplit(WhitespaceSplit {})
);
}
#[test]
fn pre_tokenizer_deserialization_no_type() {
let json = r#"{"replacement":"▁","add_prefix_space":true, "prepend_scheme":"always"}}"#;
let reconstructed = serde_json::from_str::<PreTokenizerWrapper>(json);
match reconstructed {
Err(err) => assert_eq!(
err.to_string(),
"data did not match any variant of untagged enum PreTokenizerUntagged"
),
_ => panic!("Expected an error here"),
}
let json = r#"{"type":"Metaspace", "replacement":"▁" }"#;
let reconstructed = serde_json::from_str::<PreTokenizerWrapper>(json).unwrap();
assert_eq!(
reconstructed,
PreTokenizerWrapper::Metaspace(Metaspace::default())
);
let json = r#"{"type":"Metaspace", "add_prefix_space":true }"#;
let reconstructed = serde_json::from_str::<PreTokenizerWrapper>(json);
match reconstructed {
Err(err) => assert_eq!(err.to_string(), "missing field `replacement`"),
_ => panic!("Expected an error here"),
}
let json = r#"{"behavior":"default_split"}"#;
let reconstructed = serde_json::from_str::<PreTokenizerWrapper>(json);
match reconstructed {
Err(err) => assert_eq!(
err.to_string(),
"data did not match any variant of untagged enum PreTokenizerUntagged"
),
_ => panic!("Expected an error here"),
}
}
}
| tokenizers/tokenizers/src/pre_tokenizers/mod.rs/0 | {
"file_path": "tokenizers/tokenizers/src/pre_tokenizers/mod.rs",
"repo_id": "tokenizers",
"token_count": 6537
} |
use crate::pattern::Pattern;
use crate::{Offsets, Result};
use std::ops::{Bound, RangeBounds};
use unicode_normalization_alignments::UnicodeNormalization;
use serde::{Deserialize, Serialize};
/// The possible offsets referential
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum OffsetReferential {
Original,
Normalized,
}
/// Represents a Range usable by the NormalizedString to index its content.
/// A Range can use indices relative to either the `Original` or the `Normalized` string
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum Range<T: RangeBounds<usize> + Clone> {
Original(T),
Normalized(T),
}
#[allow(clippy::len_without_is_empty)]
impl<T> Range<T>
where
T: RangeBounds<usize> + Clone,
{
/// Unwrap the underlying range
pub fn unwrap(self) -> T {
match self {
Self::Original(r) => r,
Self::Normalized(r) => r,
}
}
/// Return the length of the current Range if not Unbounded
pub fn len(&self) -> Option<usize> {
let range = self.clone().unwrap();
let end = match range.end_bound() {
Bound::Unbounded => None,
Bound::Included(i) => Some(*i + 1),
Bound::Excluded(i) => Some(*i),
}?;
match range.start_bound() {
Bound::Unbounded => Some(end),
Bound::Included(i) => Some(end - *i),
Bound::Excluded(i) => Some(end - (*i + 1)),
}
}
/// Converts the current Range to a `std::ops::Range<usize>`. This requires the `max_len`
/// of the represented string (in chars, not bytes) in order to cover the case where the
/// original provided range was unbounded
pub fn into_full_range(self, max_len: usize) -> std::ops::Range<usize> {
let range = self.unwrap();
let start = match range.start_bound() {
Bound::Unbounded => 0,
Bound::Included(i) => *i,
Bound::Excluded(i) => *i + 1,
};
let end = match range.end_bound() {
Bound::Unbounded => max_len,
Bound::Included(i) => *i + 1,
Bound::Excluded(i) => *i,
};
start..end
}
}
/// Defines the expected behavior for the delimiter of a Split Pattern
/// When splitting on `'-'` for example, with input `the-final--countdown`:
/// - Removed => `[ "the", "final", "countdown" ]`
/// - Isolated => `[ "the", "-", "final", "-", "-", "countdown" ]`
/// - MergedWithPrevious => `[ "the-", "final-", "-", "countdown" ]`
/// - MergedWithNext => `[ "the", "-final", "-", "-countdown" ]`
/// - Contiguous => `[ "the", "-", "final", "--", "countdown" ]`
#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize, Eq)]
pub enum SplitDelimiterBehavior {
Removed,
Isolated,
MergedWithPrevious,
MergedWithNext,
Contiguous,
}
impl std::fmt::Display for SplitDelimiterBehavior {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.serialize(f)
}
}
/// A `NormalizedString` takes care of processing an "original" string to modify
/// it and obtain a "normalized" string. It keeps both version of the string,
/// alignments information between both and provides an interface to retrieve
/// ranges of each string, using offsets from any of them.
///
/// It is possible to retrieve a part of the original string, by indexing it with
/// offsets from the normalized one, and the other way around too. It is also
/// possible to convert offsets from one referential to the other one easily.
#[derive(Default, Debug, Clone, PartialEq, Eq)]
pub struct NormalizedString {
/// The original version of the string, before any modification
original: String,
/// The normalized version of the string, after all modifications
normalized: String,
/// Mapping from normalized string to original one: (start, end) for each
/// byte of the normalized string
alignments: Vec<(usize, usize)>,
/// If this NormalizedString is a slice of a bigger one, we keep the track
/// of the missing part, so that we can still give offsets from this original
/// string.
original_shift: usize,
}
impl NormalizedString {
#[cfg(test)]
pub(crate) fn new(
original: String,
normalized: String,
alignments: Vec<(usize, usize)>,
original_shift: usize,
) -> Self {
Self {
original,
normalized,
alignments,
original_shift,
}
}
/// Return the normalized string
pub fn get(&self) -> &str {
&self.normalized
}
/// Return the original string
pub fn get_original(&self) -> &str {
&self.original
}
/// Return the original offsets
pub fn offsets_original(&self) -> Offsets {
(
self.original_shift,
self.original_shift + self.len_original(),
)
}
/// Convert the given offsets range from one referential to the other one:
/// `Original => Normalized` or `Normalized => Original`
///
/// Returns `None` when targeting something that is outside range
pub fn convert_offsets<T>(&self, range: Range<T>) -> Option<std::ops::Range<usize>>
where
T: RangeBounds<usize> + Clone,
{
let len_original = self.len_original();
let len_normalized = self.len();
let (target, original) = match range {
Range::Original(_) => (range.into_full_range(len_original), true),
Range::Normalized(_) => (range.into_full_range(len_normalized), false),
};
// If we target an empty range, let's return the same
if target.start == target.end {
return Some(target);
}
// If the target goes reverse, return None
if target.start > target.end {
return None;
}
// If we target 0..0 on an empty string, we want to expand to the entire equivalent
if original && self.original.is_empty() && target == (0..0) {
return Some(0..len_normalized);
}
if !original && self.normalized.is_empty() && target == (0..0) {
return Some(0..len_original);
}
if original {
let (mut start, mut end) = (None, None);
self.alignments
.iter()
.enumerate()
.take_while(|(_, alignment)| target.end >= alignment.1)
.for_each(|(i, alignment)| {
if start.is_none() && target.start <= alignment.0 {
// For now, don't update if width == 0
if alignment.0 != alignment.1 {
start = Some(i);
}
}
if target.end >= alignment.1 {
end = Some(i + 1);
}
});
match (start, end) {
// Targeting inexistant beginning
(Some(s), None) => Some(s..s),
// Targeting inexistant end
(None, Some(e)) => Some(e..e),
// Found the range
(Some(s), Some(e)) => Some(s..e),
_ => None,
}
} else {
self.alignments.get(target).and_then(expand_alignments)
}
}
/// Return a range of the normalized string
pub fn get_range<T>(&self, range: Range<T>) -> Option<&str>
where
T: RangeBounds<usize> + Clone,
{
match range {
Range::Original(_) => self.normalized.get(self.convert_offsets(range)?),
Range::Normalized(_) => self.normalized.get(range.into_full_range(self.len())),
}
}
/// Return a range of the original string
pub fn get_range_original<T>(&self, range: Range<T>) -> Option<&str>
where
T: RangeBounds<usize> + Clone,
{
match range {
Range::Original(_) => self
.original
.get(range.into_full_range(self.len_original())),
Range::Normalized(_) => self.original.get(self.convert_offsets(range)?),
}
}
/// Validate the given range, to make sure it is on char boundaries
fn validate_range<T: RangeBounds<usize> + Clone>(
&self,
range: Range<T>,
) -> Option<Range<std::ops::Range<usize>>> {
match range {
Range::Original(_) => {
let r = range.into_full_range(self.original.len());
if !(self.original.is_char_boundary(r.start)
&& self.original.is_char_boundary(r.end))
{
None
} else {
Some(Range::Original(r))
}
}
Range::Normalized(_) => {
let r = range.into_full_range(self.normalized.len());
if !(self.normalized.is_char_boundary(r.start)
&& self.normalized.is_char_boundary(r.end))
{
None
} else {
Some(Range::Normalized(r))
}
}
}
}
/// Return a slice of the current NormalizedString
/// If the range is not on char boundaries, return None
pub fn slice<T>(&self, range: Range<T>) -> Option<NormalizedString>
where
T: RangeBounds<usize> + Clone,
{
let full_range = self.validate_range(range)?;
let (normalized_range, original_range) = match full_range {
Range::Original(_) => (
self.convert_offsets(full_range.clone())?,
full_range.clone().unwrap(),
),
Range::Normalized(_) => (
full_range.clone().unwrap(),
self.convert_offsets(full_range.clone())?,
),
};
let n_shift = original_range.start;
Some(Self {
original: self
.get_range_original(full_range.clone())
.unwrap_or_default()
.into(),
normalized: self.get_range(full_range).unwrap_or_default().into(),
alignments: self
.alignments
.get(normalized_range)?
.to_vec()
.iter()
.map(|(start, end)| (start - n_shift, end - n_shift))
.collect(),
original_shift: self.original_shift + original_range.start,
})
}
/// Applies transformations to the current normalized version of the string,
/// while updating the alignments.
/// This method expect an Iterator yielding each char of the new normalized string
/// with a `change` isize equals to:
/// - `1` if this is a new char
/// - `-N` if the char is right before N removed chars
/// - `0` if the char is replacing the existing one
///
/// Since it is possible that the normalized string doesn't include some of the characters at
/// the beginning of the original one, we need an `initial_offset` which represents the number
/// of removed chars at the very beginning.
pub fn transform_range<T, I>(&mut self, range: Range<T>, dest: I, initial_offset: usize)
where
T: RangeBounds<usize> + Clone,
I: IntoIterator<Item = (char, isize)>,
{
let n_range = match range {
Range::Normalized(_) => range.into_full_range(self.len()),
Range::Original(_) => match self.convert_offsets(range) {
Some(range) => range,
None => return,
},
};
trace!(
"===== transform_range call with {:?} (initial_offset: {}) =====",
n_range,
initial_offset
);
// Retrieve the original characters that are being replaced. This let us
// compute the change in byte sizes along the way.
let mut replaced_normalized = self.normalized[n_range.clone()]
.chars()
.collect::<Vec<_>>()
.into_iter();
let initial_removed: usize = (&mut replaced_normalized)
.take(initial_offset)
.map(|c| c.len_utf8())
.sum();
let mut offset = (initial_removed + n_range.start) as isize;
let mut alignments = Vec::with_capacity(n_range.len());
trace!("=> Applying transformations");
let normalized = dest
.into_iter()
.map(|(c, changes)| {
trace!(
"### {:?} with size {}: {} with offset {} ###",
c,
c.len_utf8(),
match changes {
0 => "Replacing".into(),
ch if ch > 0 => "Adding".into(),
ch if ch < 0 => format!("Replacing + removing {ch} following chars"),
_ => "Undefined".into(),
},
offset
);
let idx = offset as usize;
let align = if changes.is_positive() {
if idx < 1 {
(0, 0)
} else {
// This is a newly inserted character, so it shares the same alignment
// than the previous one
self.alignments[idx - 1]
}
} else {
self.alignments[idx]
};
// If we are replacing a character, find it and compute the change in size
let replaced_char = if !changes.is_positive() {
replaced_normalized.next()
} else {
None
};
let replaced_char_size = replaced_char.map_or(0, |c| c.len_utf8());
let replaced_char_size_change = c.len_utf8() as isize - replaced_char_size as isize;
if let Some(ref replaced_char) = replaced_char {
trace!(
"Replacing char {:?} - with a change in size: {}",
replaced_char,
replaced_char_size_change
);
}
// If we are removing some characters, find them too
let total_bytes_to_remove = if changes.is_negative() {
(&mut replaced_normalized)
.take(-changes as usize)
.map(|c| c.len_utf8())
.sum()
} else {
0
};
trace!("Total bytes to remove: {}", total_bytes_to_remove);
// Keep track of the changes for next offsets
offset += replaced_char_size as isize;
offset += total_bytes_to_remove as isize;
trace!("New offset: {}", offset);
trace!("New normalized alignment: {}x {:?}", c.len_utf8(), align);
alignments.extend((0..c.len_utf8()).map(|_| align));
// Then we keep only the char for string reconstruction
c
})
.collect::<String>();
self.alignments.splice(n_range.clone(), alignments);
// This bounds check already happens above (`self.normalized[n_range.clone()]`), but future
// code could change to mutate `self` or `self.normalized` in the interim.
// Perform it again and hope the optimizer collapses it.
assert!(self.normalized.get(n_range.clone()).is_some());
unsafe {
self.normalized
// Safety: This is safe as long as we do not splice across a
// UTF-8 character, and we only add UTF-8 text. `normalized` is a String
// so the latter is trivially true, and we assert for the former above.
.as_mut_vec()
.splice(n_range, normalized.bytes());
}
}
/// Applies transformations to the current normalized version of the string,
/// while updating the alignments.
/// This method expect an Iterator yielding each char of the new normalized string
/// with a `change` isize equals to:
/// - `1` if this is a new char
/// - `-N` if the char is right before N removed chars
/// - `0` if the char is replacing the existing one
///
/// Since it is possible that the normalized string doesn't include some of the characters at
/// the beginning of the original one, we need an `initial_offset` which represents the number
/// of removed chars at the very beginning.
pub fn transform<I>(&mut self, dest: I, initial_offset: usize)
where
I: IntoIterator<Item = (char, isize)>,
{
self.transform_range(Range::Original(..), dest, initial_offset)
}
/// Applies NFD normalization
pub fn nfd(&mut self) -> &mut Self {
self.transform(self.get().to_owned().nfd(), 0);
self
}
/// Applies NFKD normalization
pub fn nfkd(&mut self) -> &mut Self {
self.transform(self.get().to_owned().nfkd(), 0);
self
}
/// Applies NFC normalization
pub fn nfc(&mut self) -> &mut Self {
self.transform(self.get().to_owned().nfc(), 0);
self
}
/// Applies NFKC normalization
pub fn nfkc(&mut self) -> &mut Self {
self.transform(self.get().to_owned().nfkc(), 0);
self
}
/// Applies filtering over our characters
pub fn filter<F: Fn(char) -> bool>(&mut self, keep: F) -> &mut Self {
let mut removed: isize = 0;
let mut removed_start: usize = 0;
let mut transforms = Vec::with_capacity(self.normalized.len());
let mut last_c = None;
for c in self.normalized.chars() {
if keep(c) {
match last_c {
Some(lc) => {
transforms.push((lc, -removed));
}
None => {
removed_start = removed as usize;
}
}
last_c = Some(c);
removed = 0;
} else {
removed += 1;
}
}
if let Some(lc) = last_c {
transforms.push((lc, -removed));
}
self.transform(transforms, removed_start);
self
}
/// Prepend the given string to ourself
pub fn prepend(&mut self, s: &str) -> &mut Self {
if let Some(next) = self.normalized.chars().next() {
let transformations = s
.chars()
.enumerate()
.map(|(i, c)| (c, isize::from(i != 0)))
.chain(std::iter::once((next, 1)));
self.transform_range(Range::Normalized(0..next.len_utf8()), transformations, 0);
}
self
}
/// Append the given string to ourself
pub fn append(&mut self, s: &str) -> &mut Self {
if let Some((b, prev)) = self.normalized.char_indices().last() {
let transformations = std::iter::once((prev, 0)).chain(s.chars().map(|c| (c, 1)));
self.transform_range(Range::Normalized(b..), transformations, 0);
} else {
let transformations = s.chars().map(|c| (c, 1));
self.transform_range(Range::Normalized(..), transformations, 0);
}
self
}
/// Map our characters
pub fn map<F: Fn(char) -> char>(&mut self, map: F) -> &mut Self {
let transformations = self
.normalized
.chars()
.map(|c| (map(c), 0))
.collect::<Vec<_>>();
self.transform(transformations, 0);
self
}
/// Calls the given function for each characters
pub fn for_each<F: FnMut(char)>(&self, foreach: F) -> &Self {
self.normalized.chars().for_each(foreach);
self
}
/// Lowercase
pub fn lowercase(&mut self) -> &mut Self {
let mut new_chars: Vec<(char, isize)> = vec![];
self.for_each(|c| {
c.to_lowercase().enumerate().for_each(|(index, c)| {
new_chars.push((c, isize::from(index > 0)));
})
});
self.transform(new_chars, 0);
self
}
/// Uppercase
pub fn uppercase(&mut self) -> &mut Self {
let mut new_chars: Vec<(char, isize)> = vec![];
self.for_each(|c| {
c.to_uppercase().enumerate().for_each(|(index, c)| {
new_chars.push((c, isize::from(index > 0)));
})
});
self.transform(new_chars, 0);
self
}
/// Replace anything that matches the pattern with the given content.
pub fn replace<P: Pattern>(&mut self, pattern: P, content: &str) -> Result<()> {
let mut new_normalized = String::with_capacity(self.normalized.len()); // Initially allocate for the input size
let mut new_alignments: Vec<(usize, usize)> = Vec::with_capacity(self.alignments.len());
let mut last_end = 0; // Keep track of the last end position
pattern
.find_matches(&self.normalized)?
.into_iter()
.for_each(|((start, end), is_match)| {
if is_match {
let range = start..end;
let mut new_len = 0;
let removed_chars = self.normalized[range.clone()].chars().count();
/* The following code is equivalent to this call, but computationally much more efficient
self.transform_range(
Range::Normalized(range),
content.chars().map(|c| {
new_len += c.len_utf8();
(c, 1)
}),
removed_chars,
); */
// Copy the part of the string that is before the match
new_normalized.push_str(&self.normalized[last_end..start]);
new_alignments.extend(self.alignments[last_end..start].iter().cloned());
let n_range = Range::Normalized(range).into_full_range(self.len());
// Retrieve the original characters that are being replaced. This let us
// compute the change in byte sizes along the way.
let mut replaced_normalized = self.normalized[n_range.clone()]
.chars()
.collect::<Vec<_>>()
.into_iter();
let initial_removed: usize = (&mut replaced_normalized)
.take(removed_chars)
.map(|c| c.len_utf8())
.sum();
let dest = content.chars().map(|c| {
new_len += c.len_utf8();
(c, 1)
});
let mut offset = (initial_removed + n_range.start) as isize;
let normalized = dest
.into_iter()
.map(|(c, changes): (char, i32)| {
let idx = offset as usize;
let align = if changes.is_positive() {
if idx < 1 {
(0, 0)
} else {
// This is a newly inserted character, so it shares the same alignment
// than the previous one
self.alignments[idx - 1]
}
} else {
self.alignments[idx]
};
// If we are replacing a character, find it and compute the change in size
let replaced_char = if !changes.is_positive() {
replaced_normalized.next()
} else {
None
};
let replaced_char_size = replaced_char.map_or(0, |c| c.len_utf8());
// If we are removing some characters, find them too
let total_bytes_to_remove = if changes.is_negative() {
(&mut replaced_normalized)
.take(-changes as usize)
.map(|c| c.len_utf8())
.sum()
} else {
0
};
// Keep track of the changes for next offsets
offset += replaced_char_size as isize;
offset += total_bytes_to_remove as isize;
new_alignments.extend((0..c.len_utf8()).map(|_| align));
// Then we keep only the char for string reconstruction
c
})
.collect::<String>();
new_normalized.push_str(&normalized);
last_end = end;
}
});
// Copy the remaining part of the input
new_normalized.push_str(&self.normalized[last_end..]);
new_alignments.extend(&self.alignments[last_end..]);
self.normalized = new_normalized;
self.alignments = new_alignments;
Ok(())
}
/// Clear the normalized part of the string
pub fn clear(&mut self) -> usize {
let len = self.len();
self.transform(std::iter::empty(), len);
len
}
/// Split the current string in many subparts. Specify what to do with the
/// delimiter.
///
/// ## Splitting Behavior for the delimiter
///
/// The behavior can be one of the followings:
/// When splitting on `'-'` for example, with input `the-final--countdown`:
/// - Removed => `[ "the", "", "final", "", "", "countdown" ]`
/// - Isolated => `[ "the", "-", "final", "-", "-", "countdown" ]`
/// - MergedWithPrevious => `[ "the-", "final-", "-", "countdown" ]`
/// - MergedWithNext => `[ "the", "-final", "-", "-countdown" ]`
pub fn split<P: Pattern>(
&self,
pattern: P,
behavior: SplitDelimiterBehavior,
) -> Result<Vec<NormalizedString>> {
let matches = pattern.find_matches(&self.normalized)?;
// Process the matches according to the selected behavior: Vec<(Offsets, should_remove)>
use SplitDelimiterBehavior::*;
let splits = match behavior {
Isolated => matches
.into_iter()
.map(|(offsets, _)| (offsets, false))
.collect(),
Removed => matches,
Contiguous => {
let mut previous_match = false;
matches
.into_iter()
.fold(vec![], |mut acc, (offsets, is_match)| {
if is_match == previous_match {
if let Some(((_, end), _)) = acc.last_mut() {
*end = offsets.1;
} else {
acc.push((offsets, false));
}
} else {
acc.push((offsets, false));
}
previous_match = is_match;
acc
})
}
MergedWithPrevious => {
let mut previous_match = false;
matches
.into_iter()
.fold(vec![], |mut acc, (offsets, is_match)| {
if is_match && !previous_match {
if let Some(((_, end), _)) = acc.last_mut() {
*end = offsets.1;
} else {
acc.push((offsets, false));
}
} else {
acc.push((offsets, false));
}
previous_match = is_match;
acc
})
}
MergedWithNext => {
let mut previous_match = false;
let mut matches =
matches
.into_iter()
.rev()
.fold(vec![], |mut acc, (offsets, is_match)| {
if is_match && !previous_match {
if let Some(((start, _), _)) = acc.last_mut() {
*start = offsets.0;
} else {
acc.push((offsets, false));
}
} else {
acc.push((offsets, false));
}
previous_match = is_match;
acc
});
matches.reverse();
matches
}
};
// Then we split according to the computed splits
Ok(splits
.into_iter()
.filter_map(|(offsets, remove)| {
if !remove {
Some(
self.slice(Range::Normalized(offsets.0..offsets.1))
.expect("NormalizedString bad split"),
)
} else {
None
}
})
.collect())
}
/// Remove any leading space(s) of the normalized string
pub fn lstrip(&mut self) -> &mut Self {
self.lrstrip(true, false)
}
/// Remove any trailing space(s) of the normalized string
pub fn rstrip(&mut self) -> &mut Self {
self.lrstrip(false, true)
}
/// Remove any leading and trailing space(s) of the normalized string
pub fn strip(&mut self) -> &mut Self {
self.lrstrip(true, true)
}
fn lrstrip(&mut self, left: bool, right: bool) -> &mut Self {
let leading_spaces = if left {
self.get().chars().take_while(|c| c.is_whitespace()).count()
} else {
0
};
let trailing_spaces = if right {
self.get()
.chars()
.rev()
.take_while(|c| c.is_whitespace())
.count()
} else {
0
};
if leading_spaces > 0 || trailing_spaces > 0 {
let count = self.get().chars().count();
let transformation = self
.normalized
.chars()
.enumerate()
.filter_map(|(i, c)| {
if i < leading_spaces || i >= count - trailing_spaces {
None
} else if i == self.len() - trailing_spaces - 1 {
Some((c, -(trailing_spaces as isize)))
} else {
Some((c, 0))
}
})
.collect::<Vec<_>>();
self.transform(transformation, leading_spaces);
}
self
}
/// Returns the length of the normalized string (counting chars not bytes)
pub fn len(&self) -> usize {
self.normalized.len()
}
/// Returns the length of the original string (counting chars not bytes)
pub fn len_original(&self) -> usize {
self.original.len()
}
/// Whether empty
pub fn is_empty(&self) -> bool {
self.normalized.is_empty()
}
/// Recalculate original alignments
#[allow(dead_code)]
pub(crate) fn alignments_original(&self) -> Vec<(usize, usize)> {
// Start, end are in alignments
// offset, length are in alignments_original
let mut alignments_original = Vec::with_capacity(self.original.len());
// Eventual gap before first group
let start = self.alignments[0].0;
if start != 0 {
alignments_original.extend(vec![(0, 0); start]);
}
let mut last = (&self.alignments[0].0, &self.alignments[0].1);
let mut offset = 0;
let mut length = 0;
for (start, end) in &self.alignments {
if last == (start, end) {
// This is the same group
length += 1;
} else {
// This is a new group
if start < last.1 {
panic!("We can't have overlapping ranges.");
}
// Add the old group
alignments_original.extend(vec![(offset, offset + length); last.1 - last.0]);
offset += length;
length = 1;
// Eventual gap between the 2 groups
alignments_original.extend(vec![(offset, offset); start - last.1]);
}
last = (start, end);
}
// Add the last group
alignments_original.extend(vec![(offset, offset + length); last.1 - last.0]);
// Add eventual last gap
offset += length;
alignments_original.extend(vec![
(offset, offset);
self.original.len() - alignments_original.len()
]);
// assert_eq!(alignments_original.len(), self.original.len());
alignments_original
}
}
/// Returns the range covered by a slice of alignments
fn expand_alignments(alignments: &[(usize, usize)]) -> Option<std::ops::Range<usize>> {
if alignments.is_empty() {
None
} else {
let start = alignments[0].0;
let end = alignments[alignments.len() - 1].1;
Some(start..end)
}
}
/// Returns a range of the given string slice, by indexing chars instead of bytes
pub fn get_range_of<T: RangeBounds<usize>>(s: &str, range: T) -> Option<&str> {
let len = s.chars().count();
let start = match range.start_bound() {
Bound::Unbounded => 0,
Bound::Included(i) => *i,
Bound::Excluded(i) => *i + 1,
};
let end = match range.end_bound() {
Bound::Unbounded => len,
Bound::Included(i) => *i + 1,
Bound::Excluded(i) => *i,
};
if start == 0 && end == 0 {
Some(&s[0..0])
} else if start >= len || end > len || start >= end {
None
} else {
let start_b = s.char_indices().map(|(i, _)| i).nth(start).unwrap_or(0);
let end_b = s.char_indices().map(|(i, _)| i).nth(end).unwrap_or(s.len());
Some(&s[start_b..end_b])
}
}
/// Convert the given range from bytes to char
pub fn bytes_to_char(s: &str, range: std::ops::Range<usize>) -> Option<std::ops::Range<usize>> {
let (mut start, mut end) = if range == (0..0) {
(Some(0), Some(0))
} else {
(None, None)
};
s.char_indices()
.enumerate()
.take_while(|(_, (b, _))| *b <= range.end)
.filter(|(_, (b, _))| *b >= range.start)
.for_each(|(i, (b, c))| {
if b == range.start {
start = Some(i);
}
if b == range.end {
end = Some(i);
}
if b + c.len_utf8() == range.end {
end = Some(i + 1);
}
});
Some(start?..end?)
}
/// Convert the given range from char to bytes
pub fn char_to_bytes(s: &str, range: std::ops::Range<usize>) -> Option<std::ops::Range<usize>> {
let (mut start, mut end) = if range == (0..0) {
(Some(0), Some(0))
} else {
(None, None)
};
if range.start == range.end {
s.char_indices()
.skip(range.start)
.take(1)
.for_each(|(b, _)| {
start = Some(b);
end = Some(b);
});
} else {
s.char_indices()
.skip(range.start)
.take(range.end - range.start)
.for_each(|(b, c)| {
if start.is_none() {
start = Some(b);
}
end = Some(b + c.len_utf8());
});
}
Some(start?..end?)
}
impl From<String> for NormalizedString {
fn from(s: String) -> Self {
let alignments = s
.char_indices()
.flat_map(|(b, c)| {
let len = c.len_utf8();
(0..len).map(move |_| (b, b + len))
})
.collect::<Vec<_>>();
Self {
original: s.clone(),
normalized: s,
alignments,
original_shift: 0,
}
}
}
impl From<&str> for NormalizedString {
fn from(s: &str) -> Self {
Self::from(s.to_owned())
}
}
#[cfg(test)]
mod tests {
use super::*;
use regex::Regex;
use unicode_categories::UnicodeCategories;
#[test]
fn test_len_range_inclusive() {
let range = Range::Original(3..=7);
let len = range.len();
assert_eq!(len, Some(5)); // 7 - 3 + 1 = 5
}
#[test]
fn test_len_range_exclusive() {
let range = Range::Original(3..7);
let len = range.len();
assert_eq!(len, Some(4)); // 7 - 3 = 4
}
#[test]
fn nfd_adds_new_chars() {
let mut n = NormalizedString::from("élégant");
n.nfd();
assert_eq!(
&n.alignments,
&[
(0, 2),
(0, 2),
(0, 2),
(2, 3),
(3, 5),
(3, 5),
(3, 5),
(5, 6),
(6, 7),
(7, 8),
(8, 9)
]
);
assert_eq!(
n.alignments_original(),
vec![
(0, 3),
(0, 3),
(3, 4),
(4, 7),
(4, 7),
(7, 8),
(8, 9),
(9, 10),
(10, 11)
]
);
}
#[test]
fn remove_chars_added_by_nfd() {
let mut n = NormalizedString::from("élégant");
n.nfd().filter(|c| !c.is_mark_nonspacing());
assert_eq!(n.get(), "elegant");
assert_eq!(
&n.alignments,
&[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (7, 8), (8, 9)]
);
assert_eq!(
n.alignments_original(),
vec![
(0, 1),
(0, 1),
(1, 2),
(2, 3),
(2, 3),
(3, 4),
(4, 5),
(5, 6),
(6, 7)
]
);
}
#[test]
fn remove_chars() {
let mut n = NormalizedString::from("élégant");
n.filter(|c| c != 'n');
assert_eq!(n.get(), "élégat");
assert_eq!(
&n.alignments,
&[
(0, 2),
(0, 2),
(2, 3),
(3, 5),
(3, 5),
(5, 6),
(6, 7),
// Skipped range
(8, 9)
]
);
assert_eq!(
n.alignments_original(),
vec![
(0, 2),
(0, 2),
(2, 3),
(3, 5),
(3, 5),
(5, 6),
(6, 7),
(7, 7), // Eaten n
(7, 8)
]
);
}
#[test]
fn mixed_addition_and_removal() {
let mut n = NormalizedString::from("élégant");
n.nfd().filter(|c| !c.is_mark_nonspacing() && c != 'n');
assert_eq!(n.get(), "elegat");
assert_eq!(
&n.alignments,
&[(0, 2), (2, 3), (3, 5), (5, 6), (6, 7), (8, 9)]
);
assert_eq!(
n.alignments_original(),
vec![
(0, 1),
(0, 1),
(1, 2),
(2, 3),
(2, 3),
(3, 4), // g
(4, 5), // a
(5, 5), // Eaten n
(5, 6)
]
);
}
#[test]
fn range_conversion() {
let mut n = NormalizedString::from(" __Hello__ ");
n.filter(|c| !c.is_whitespace()).lowercase();
let hello_n = n.convert_offsets(Range::Original(6..11));
assert_eq!(hello_n, Some(2..7));
assert_eq!(
n.get_range(Range::Normalized(hello_n.clone().unwrap())),
Some("hello")
);
assert_eq!(
n.get_range_original(Range::Normalized(hello_n.unwrap())),
Some("Hello")
);
assert_eq!(n.get_range(Range::Original(6..11)), Some("hello"));
assert_eq!(n.get_range_original(Range::Original(6..11)), Some("Hello"));
// Make sure we get None only in specific cases
assert_eq!(n.convert_offsets(Range::Original(0..0)), Some(0..0));
assert_eq!(n.convert_offsets(Range::Original(3..3)), Some(3..3));
assert_eq!(n.convert_offsets(Range::Original(15..)), Some(9..9));
assert_eq!(n.convert_offsets(Range::Original(16..)), Some(16..16));
assert_eq!(n.convert_offsets(Range::Original(17..)), None);
assert_eq!(n.convert_offsets(Range::Normalized(0..0)), Some(0..0));
assert_eq!(n.convert_offsets(Range::Normalized(3..3)), Some(3..3));
assert_eq!(n.convert_offsets(Range::Normalized(9..)), Some(9..9));
assert_eq!(n.convert_offsets(Range::Normalized(10..)), None);
}
#[test]
fn original_range() {
let mut n = NormalizedString::from("Hello_______ World!");
n.filter(|c| c != '_').lowercase();
let world_n = n.get_range(Range::Normalized(6..11)).unwrap();
let world_o = n.get_range_original(Range::Normalized(6..11)).unwrap();
assert_eq!(world_n, "world");
assert_eq!(world_o, "World");
let original_range = Range::Original(n.convert_offsets(Range::Normalized(6..11)).unwrap());
assert_eq!(n.get_range(original_range.clone()).unwrap(), "world");
assert_eq!(
n.get_range_original(original_range.clone()).unwrap(),
"World"
);
assert_eq!(original_range.into_full_range(n.len_original()), 13..18);
}
#[test]
fn added_around_edges() {
let mut n = NormalizedString::from("Hello");
n.transform(
vec![
(' ', 1),
('H', 0),
('e', 0),
('l', 0),
('l', 0),
('o', 0),
(' ', 1),
],
0,
);
assert_eq!(&n.normalized, " Hello ");
assert_eq!(
n.get_range_original(Range::Normalized(1..n.normalized.len() - 1)),
Some("Hello")
);
}
#[test]
fn added_characters_alignment() {
let mut n = NormalizedString::from("野口 No");
n.transform(
n.get().to_owned().chars().flat_map(|c| {
if (c as usize) > 0x4E00 {
vec![(' ', 0), (c, 1), (' ', 1)]
} else {
vec![(c, 0)]
}
}),
0,
);
assert_eq!(
n,
NormalizedString {
original: "野口 No".into(),
normalized: " 野 口 No".into(),
alignments: vec![
(0, 3),
(0, 3),
(0, 3),
(0, 3),
(0, 3),
(3, 6),
(3, 6),
(3, 6),
(3, 6),
(3, 6),
(6, 7),
(7, 8),
(8, 9)
],
original_shift: 0
}
);
assert_eq!(
n.alignments_original(),
vec![
(0, 5),
(0, 5),
(0, 5),
(5, 10),
(5, 10),
(5, 10),
(10, 11),
(11, 12),
(12, 13)
]
);
}
#[test]
fn remove_at_beginning() {
let mut n = NormalizedString::from(" Hello");
n.filter(|c| !c.is_whitespace());
assert_eq!(
n.get_range_original(Range::Normalized(1.."Hello".len())),
Some("ello")
);
assert_eq!(
n.get_range_original(Range::Normalized(0..n.normalized.len())),
Some("Hello")
);
}
#[test]
fn remove_at_end() {
let mut n = NormalizedString::from("Hello ");
n.filter(|c| !c.is_whitespace());
assert_eq!(n.get_range_original(Range::Normalized(0..4)), Some("Hell"));
assert_eq!(
n.get_range_original(Range::Normalized(0..n.normalized.len())),
Some("Hello")
);
}
#[test]
fn removed_around_both_edges() {
let mut n = NormalizedString::from(" Hello ");
n.filter(|c| !c.is_whitespace());
assert_eq!(&n.normalized, "Hello");
assert_eq!(
n.get_range_original(Range::Normalized(0.."Hello".len())),
Some("Hello")
);
assert_eq!(
n.get_range_original(Range::Normalized(1.."Hell".len())),
Some("ell")
);
}
#[test]
fn lstrip() {
let mut n = NormalizedString::from(" This is an example ");
n.lstrip();
assert_eq!(&n.normalized, "This is an example ");
assert_eq!(
n.get_range_original(Range::Normalized(0..n.normalized.len())),
Some("This is an example ")
);
}
#[test]
fn rstrip() {
let mut n = NormalizedString::from(" This is an example ");
n.rstrip();
assert_eq!(&n.normalized, " This is an example");
assert_eq!(
n.get_range_original(Range::Normalized(0..n.normalized.len())),
Some(" This is an example")
);
}
#[test]
fn strip() {
let mut n = NormalizedString::from(" This is an example ");
n.strip();
assert_eq!(&n.normalized, "This is an example");
assert_eq!(
n.get_range_original(Range::Normalized(0..n.normalized.len())),
Some("This is an example")
);
}
#[test]
fn strip_unicode() {
let mut n = NormalizedString::from(" 你好asa \n");
n.strip();
assert_eq!(&n.normalized, "你好asa");
assert_eq!(
n.get_range_original(Range::Normalized(0..n.normalized.len())),
Some("你好asa")
);
}
#[test]
fn prepend() {
let mut n = NormalizedString::from("there");
n.prepend("Hey ");
assert_eq!(&n.normalized, "Hey there");
assert_eq!(
n.alignments,
vec![
(0, 1),
(0, 1),
(0, 1),
(0, 1),
(0, 1),
(1, 2),
(2, 3),
(3, 4),
(4, 5)
]
);
assert_eq!(n.convert_offsets(Range::Normalized(0..4)), Some(0..1));
}
#[test]
fn append() {
let mut n = NormalizedString::from("Hey");
n.append(" there");
assert_eq!(&n.normalized, "Hey there");
assert_eq!(
n.alignments,
vec![
(0, 1),
(1, 2),
(2, 3),
(2, 3),
(2, 3),
(2, 3),
(2, 3),
(2, 3),
(2, 3)
]
);
assert_eq!(
n.convert_offsets(Range::Normalized(3.." there".len())),
Some(2..3)
);
}
#[test]
fn get_range() {
let s = String::from("Hello my name is John 👋");
assert_eq!(get_range_of(&s, ..), Some(&s[..]));
assert_eq!(get_range_of(&s, 17..), Some("John 👋"));
}
#[test]
fn slice() {
let mut s = NormalizedString::from("𝔾𝕠𝕠𝕕 𝕞𝕠𝕣𝕟𝕚𝕟𝕘");
s.nfkc();
let original_slice = s.slice(Range::Original(0..4)).unwrap();
assert_eq!(original_slice.get(), "G");
assert_eq!(original_slice.get_original(), "𝔾");
let normalized_slice = s.slice(Range::Normalized(0..4)).unwrap();
assert_eq!(normalized_slice.get(), "Good");
assert_eq!(normalized_slice.get_original(), "𝔾𝕠𝕠𝕕");
// Make sure the sliced NormalizedString is still aligned as expected
let mut s = NormalizedString::from(" Good Morning! ");
s.strip();
// If we keep the whole slice
let slice = s.slice(Range::Original(..)).unwrap();
assert_eq!(
slice.get_range_original(Range::Normalized(0..4)),
Some("Good")
);
let slice = s.slice(Range::Normalized(..)).unwrap();
assert_eq!(
slice.get_range_original(Range::Normalized(0..4)),
Some("Good")
);
// If we keep after the modified piece
let slice = s.slice(Range::Original(4..15)).unwrap();
assert_eq!(
slice.get_range_original(Range::Normalized(0..3)),
Some("ood")
);
// If we keep only the modified piece
let slice = s.slice(Range::Original(3..16)).unwrap();
assert_eq!(
slice.get_range_original(Range::Normalized(0..4)),
Some("Good")
);
}
#[test]
fn replace() {
// Simple
let mut s = NormalizedString::from(" Hello friend ");
s.replace(' ', "_").unwrap();
assert_eq!(s.get(), "_Hello___friend_");
let mut s = NormalizedString::from("aaaab");
s.replace('a', "b").unwrap();
assert_eq!(s.get(), "bbbbb");
// Overlapping
let mut s = NormalizedString::from("aaaab");
s.replace("aaa", "b").unwrap();
assert_eq!(s.get(), "bab");
// Regex
let mut s = NormalizedString::from(" Hello friend ");
let re = Regex::new(r"\s+").unwrap();
s.replace(&re, "_").unwrap();
assert_eq!(s.get(), "_Hello_friend_");
}
#[test]
fn split() {
use SplitDelimiterBehavior::*;
let s = NormalizedString::from("The-final--countdown");
let test = |behavior: SplitDelimiterBehavior, result: Vec<&str>| {
let splits = s.split('-', behavior).unwrap();
assert_eq!(splits.iter().map(|n| n.get()).collect::<Vec<_>>(), result);
};
test(Removed, vec!["The", "final", "countdown"]);
test(Isolated, vec!["The", "-", "final", "-", "-", "countdown"]);
test(MergedWithPrevious, vec!["The-", "final-", "-", "countdown"]);
test(MergedWithNext, vec!["The", "-final", "-", "-countdown"]);
test(Contiguous, vec!["The", "-", "final", "--", "countdown"]);
}
#[test]
fn transform_range_single_bytes() {
let s = NormalizedString::from("Hello friend");
// Removing at the beginning
let mut current = s.clone();
current.transform_range(Range::Original(0..4), vec![('Y', 0)], 3);
assert_eq!(
current,
NormalizedString {
original: "Hello friend".into(),
normalized: "Yo friend".into(),
alignments: vec![
(3, 4),
(4, 5),
(5, 6),
(6, 7),
(7, 8),
(8, 9),
(9, 10),
(10, 11),
(11, 12)
],
original_shift: 0,
}
);
assert_eq!(
current.alignments_original(),
vec![
(0, 0),
(0, 0),
(0, 0),
(0, 1),
(1, 2),
(2, 3),
(3, 4),
(4, 5),
(5, 6),
(6, 7),
(7, 8),
(8, 9)
]
);
// Removing in the middle
let mut current = s.clone();
current.transform_range(
Range::Original(3..10),
vec![('_', 0), ('F', 0), ('R', -2)],
2,
);
assert_eq!(
current,
NormalizedString {
original: "Hello friend".into(),
normalized: "Hel_FRnd".into(),
alignments: vec![
(0, 1),
(1, 2),
(2, 3),
(5, 6),
(6, 7),
(7, 8),
(10, 11),
(11, 12)
],
original_shift: 0,
}
);
assert_eq!(
current.alignments_original(),
vec![
(0, 1),
(1, 2),
(2, 3),
(3, 3),
(3, 3),
(3, 4),
(4, 5),
(5, 6),
(6, 6),
(6, 6),
(6, 7),
(7, 8)
]
);
// Removing at the end
let mut current = s.clone();
current.transform_range(Range::Original(5..), vec![('_', 0), ('F', -5)], 0);
assert_eq!(
current,
NormalizedString {
original: "Hello friend".into(),
normalized: "Hello_F".into(),
alignments: vec![(0, 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7)],
original_shift: 0,
}
);
assert_eq!(
current.alignments_original(),
vec![
(0, 1),
(1, 2),
(2, 3),
(3, 4),
(4, 5),
(5, 6),
(6, 7),
(7, 7),
(7, 7),
(7, 7),
(7, 7),
(7, 7)
]
);
// Adding at the beginning
let mut current = s.clone();
current.transform_range(Range::Original(0..1), vec![('H', 1), ('H', 0)], 0);
assert_eq!(
current,
NormalizedString {
original: "Hello friend".into(),
normalized: "HHello friend".into(),
alignments: vec![
(0, 0),
(0, 1),
(1, 2),
(2, 3),
(3, 4),
(4, 5),
(5, 6),
(6, 7),
(7, 8),
(8, 9),
(9, 10),
(10, 11),
(11, 12)
],
original_shift: 0,
}
);
assert_eq!(
current.alignments_original(),
vec![
(1, 2),
(2, 3),
(3, 4),
(4, 5),
(5, 6),
(6, 7),
(7, 8),
(8, 9),
(9, 10),
(10, 11),
(11, 12),
(12, 13)
]
);
// Equivalent to the previous one
let mut current = s.clone();
current.transform_range(Range::Original(0..0), vec![('H', 1)], 0);
assert_eq!(
current,
NormalizedString {
original: "Hello friend".into(),
normalized: "HHello friend".into(),
alignments: vec![
(0, 0),
(0, 1),
(1, 2),
(2, 3),
(3, 4),
(4, 5),
(5, 6),
(6, 7),
(7, 8),
(8, 9),
(9, 10),
(10, 11),
(11, 12)
],
original_shift: 0,
}
);
assert_eq!(
current.alignments_original(),
vec![
(1, 2),
(2, 3),
(3, 4),
(4, 5),
(5, 6),
(6, 7),
(7, 8),
(8, 9),
(9, 10),
(10, 11),
(11, 12),
(12, 13)
]
);
// Adding as part of the first character
let mut current = s.clone();
current.transform_range(Range::Original(0..1), vec![('H', 0), ('H', 1)], 0);
assert_eq!(
current,
NormalizedString {
original: "Hello friend".into(),
normalized: "HHello friend".into(),
alignments: vec![
(0, 1),
(0, 1),
(1, 2),
(2, 3),
(3, 4),
(4, 5),
(5, 6),
(6, 7),
(7, 8),
(8, 9),
(9, 10),
(10, 11),
(11, 12)
],
original_shift: 0,
}
);
assert_eq!(
current.alignments_original(),
vec![
(0, 2),
(2, 3),
(3, 4),
(4, 5),
(5, 6),
(6, 7),
(7, 8),
(8, 9),
(9, 10),
(10, 11),
(11, 12),
(12, 13)
]
);
// Adding in the middle
let mut current = s.clone();
current.transform_range(
Range::Original(5..6),
vec![('_', 0), ('m', 1), ('y', 1), ('_', 1)],
0,
);
assert_eq!(
current,
NormalizedString {
original: "Hello friend".into(),
normalized: "Hello_my_friend".into(),
alignments: vec![
(0, 1),
(1, 2),
(2, 3),
(3, 4),
(4, 5),
(5, 6),
(5, 6),
(5, 6),
(5, 6),
(6, 7),
(7, 8),
(8, 9),
(9, 10),
(10, 11),
(11, 12)
],
original_shift: 0,
}
);
assert_eq!(
current.alignments_original(),
vec![
(0, 1),
(1, 2),
(2, 3),
(3, 4),
(4, 5),
(5, 9),
(9, 10),
(10, 11),
(11, 12),
(12, 13),
(13, 14),
(14, 15)
]
);
// Adding at the end
let mut current = s;
current.transform_range(Range::Original(11..), vec![('d', 0), ('_', 1), ('!', 1)], 0);
assert_eq!(
current,
NormalizedString {
original: "Hello friend".into(),
normalized: "Hello friend_!".into(),
alignments: vec![
(0, 1),
(1, 2),
(2, 3),
(3, 4),
(4, 5),
(5, 6),
(6, 7),
(7, 8),
(8, 9),
(9, 10),
(10, 11),
(11, 12),
(11, 12),
(11, 12)
],
original_shift: 0,
}
);
assert_eq!(
current.alignments_original(),
vec![
(0, 1),
(1, 2),
(2, 3),
(3, 4),
(4, 5),
(5, 6),
(6, 7),
(7, 8),
(8, 9),
(9, 10),
(10, 11),
(11, 14)
]
);
}
#[test]
fn transform_range_multiple_bytes() {
let s = NormalizedString::from("𝔾𝕠𝕠𝕕");
// Removing at the beginning
let mut current = s.clone();
current.transform_range(Range::Original(0..8), vec![('G', -1)], 0);
assert_eq!(
current,
NormalizedString {
original: "𝔾𝕠𝕠𝕕".into(),
normalized: "G𝕠𝕕".into(),
alignments: vec![
(0, 4),
(8, 12),
(8, 12),
(8, 12),
(8, 12),
(12, 16),
(12, 16),
(12, 16),
(12, 16)
],
original_shift: 0,
}
);
assert_eq!(
current.alignments_original(),
vec![
(0, 1),
(0, 1),
(0, 1),
(0, 1),
(1, 1),
(1, 1),
(1, 1),
(1, 1),
(1, 5),
(1, 5),
(1, 5),
(1, 5),
(5, 9),
(5, 9),
(5, 9),
(5, 9)
]
);
assert_eq!(current.get_range(Range::Original(0..8)).unwrap(), "G");
assert_eq!(current.get_range(Range::Original(0..4)).unwrap(), "G");
assert_eq!(
current.get_range_original(Range::Original(0..4)).unwrap(),
"𝔾"
);
assert_eq!(
current.get_range_original(Range::Original(0..8)).unwrap(),
"𝔾𝕠"
);
// Removing in the middle
let mut current = s.clone();
current.transform_range(Range::Original(4..12), vec![('o', -1)], 0);
assert_eq!(
current,
NormalizedString {
original: "𝔾𝕠𝕠𝕕".into(),
normalized: "𝔾o𝕕".into(),
alignments: vec![
(0, 4),
(0, 4),
(0, 4),
(0, 4),
(4, 8),
(12, 16),
(12, 16),
(12, 16),
(12, 16)
],
original_shift: 0,
}
);
assert_eq!(
current.alignments_original(),
vec![
(0, 4),
(0, 4),
(0, 4),
(0, 4),
(4, 5),
(4, 5),
(4, 5),
(4, 5),
(5, 5),
(5, 5),
(5, 5),
(5, 5),
(5, 9),
(5, 9),
(5, 9),
(5, 9)
]
);
// Removing at the end
let mut current = s.clone();
current.transform_range(Range::Original(12..), vec![('d', 0), ('!', 1)], 0);
assert_eq!(
current,
NormalizedString {
original: "𝔾𝕠𝕠𝕕".into(),
normalized: "𝔾𝕠𝕠d!".into(),
alignments: vec![
(0, 4),
(0, 4),
(0, 4),
(0, 4),
(4, 8),
(4, 8),
(4, 8),
(4, 8),
(8, 12),
(8, 12),
(8, 12),
(8, 12),
(12, 16),
(12, 16)
],
original_shift: 0,
}
);
// Adding at the beginning
let mut current = s.clone();
current.transform_range(Range::Original(0..4), vec![('_', 1), ('𝔾', 0)], 0);
assert_eq!(
current,
NormalizedString {
original: "𝔾𝕠𝕠𝕕".into(),
normalized: "_𝔾𝕠𝕠𝕕".into(),
alignments: vec![
(0, 0),
(0, 4),
(0, 4),
(0, 4),
(0, 4),
(4, 8),
(4, 8),
(4, 8),
(4, 8),
(8, 12),
(8, 12),
(8, 12),
(8, 12),
(12, 16),
(12, 16),
(12, 16),
(12, 16)
],
original_shift: 0,
}
);
assert_eq!(
current.alignments_original(),
vec![
(1, 5),
(1, 5),
(1, 5),
(1, 5),
(5, 9),
(5, 9),
(5, 9),
(5, 9),
(9, 13),
(9, 13),
(9, 13),
(9, 13),
(13, 17),
(13, 17),
(13, 17),
(13, 17)
]
);
assert_eq!(current.get_range(Range::Original(0..8)).unwrap(), "𝔾𝕠");
assert_eq!(current.get_range(Range::Original(0..4)).unwrap(), "𝔾");
assert_eq!(
current.get_range_original(Range::Original(0..4)).unwrap(),
"𝔾"
);
assert_eq!(
current.get_range_original(Range::Original(0..8)).unwrap(),
"𝔾𝕠"
);
// Equivalent to the previous one
let mut current = s.clone();
current.transform_range(Range::Original(0..0), vec![('_', 1)], 0);
assert_eq!(
current,
NormalizedString {
original: "𝔾𝕠𝕠𝕕".into(),
normalized: "_𝔾𝕠𝕠𝕕".into(),
alignments: vec![
(0, 0),
(0, 4),
(0, 4),
(0, 4),
(0, 4),
(4, 8),
(4, 8),
(4, 8),
(4, 8),
(8, 12),
(8, 12),
(8, 12),
(8, 12),
(12, 16),
(12, 16),
(12, 16),
(12, 16)
],
original_shift: 0,
}
);
assert_eq!(
current.alignments_original(),
vec![
(1, 5),
(1, 5),
(1, 5),
(1, 5),
(5, 9),
(5, 9),
(5, 9),
(5, 9),
(9, 13),
(9, 13),
(9, 13),
(9, 13),
(13, 17),
(13, 17),
(13, 17),
(13, 17)
]
);
assert_eq!(current.get_range(Range::Original(0..8)).unwrap(), "𝔾𝕠");
assert_eq!(current.get_range(Range::Original(0..4)).unwrap(), "𝔾");
assert_eq!(
current.get_range_original(Range::Original(0..4)).unwrap(),
"𝔾"
);
assert_eq!(
current.get_range_original(Range::Original(0..8)).unwrap(),
"𝔾𝕠"
);
// Adding as part of the first character
let mut current = s.clone();
current.transform_range(Range::Original(0..4), vec![('𝔾', 0), ('o', 1)], 0);
assert_eq!(
current,
NormalizedString {
original: "𝔾𝕠𝕠𝕕".into(),
normalized: "𝔾o𝕠𝕠𝕕".into(),
alignments: vec![
(0, 4),
(0, 4),
(0, 4),
(0, 4),
(0, 4),
(4, 8),
(4, 8),
(4, 8),
(4, 8),
(8, 12),
(8, 12),
(8, 12),
(8, 12),
(12, 16),
(12, 16),
(12, 16),
(12, 16)
],
original_shift: 0,
}
);
assert_eq!(
current.alignments_original(),
vec![
(0, 5),
(0, 5),
(0, 5),
(0, 5),
(5, 9),
(5, 9),
(5, 9),
(5, 9),
(9, 13),
(9, 13),
(9, 13),
(9, 13),
(13, 17),
(13, 17),
(13, 17),
(13, 17)
]
);
assert_eq!(current.get_range(Range::Original(0..8)).unwrap(), "𝔾o𝕠");
assert_eq!(current.get_range(Range::Original(0..4)).unwrap(), "𝔾o");
assert_eq!(
current.get_range_original(Range::Original(0..4)).unwrap(),
"𝔾"
);
assert_eq!(
current.get_range_original(Range::Original(0..8)).unwrap(),
"𝔾𝕠"
);
// Adding in the middle
let mut current = s.clone();
current.transform_range(
Range::Original(4..8),
vec![('𝕠', 0), ('o', 1), ('o', 1), ('o', 1)],
0,
);
assert_eq!(
current,
NormalizedString {
original: "𝔾𝕠𝕠𝕕".into(),
normalized: "𝔾𝕠ooo𝕠𝕕".into(),
alignments: vec![
(0, 4),
(0, 4),
(0, 4),
(0, 4),
(4, 8),
(4, 8),
(4, 8),
(4, 8),
(4, 8),
(4, 8),
(4, 8),
(8, 12),
(8, 12),
(8, 12),
(8, 12),
(12, 16),
(12, 16),
(12, 16),
(12, 16)
],
original_shift: 0,
}
);
assert_eq!(
current.alignments_original(),
vec![
(0, 4),
(0, 4),
(0, 4),
(0, 4),
(4, 11),
(4, 11),
(4, 11),
(4, 11),
(11, 15),
(11, 15),
(11, 15),
(11, 15),
(15, 19),
(15, 19),
(15, 19),
(15, 19)
]
);
// Adding at the end
let mut current = s;
current.transform_range(Range::Original(16..), vec![('!', 1)], 0);
assert_eq!(
current,
NormalizedString {
original: "𝔾𝕠𝕠𝕕".into(),
normalized: "𝔾𝕠𝕠𝕕!".into(),
alignments: vec![
(0, 4),
(0, 4),
(0, 4),
(0, 4),
(4, 8),
(4, 8),
(4, 8),
(4, 8),
(8, 12),
(8, 12),
(8, 12),
(8, 12),
(12, 16),
(12, 16),
(12, 16),
(12, 16),
(12, 16)
],
original_shift: 0,
}
);
assert_eq!(
current.alignments_original(),
vec![
(0, 4),
(0, 4),
(0, 4),
(0, 4),
(4, 8),
(4, 8),
(4, 8),
(4, 8),
(8, 12),
(8, 12),
(8, 12),
(8, 12),
(12, 17),
(12, 17),
(12, 17),
(12, 17)
]
);
}
#[test]
fn transform_check() {
let mut s = NormalizedString::from("abc…");
s.nfkd();
let transforms = vec![('a', -2), ('.', 0), ('.', 0), ('.', 0)];
s.transform(transforms, 0);
s.lowercase();
assert_eq!(s.get(), "a...");
}
#[test]
fn test_append_after_clear() {
let mut n = NormalizedString::from("Hello");
assert_eq!(n.get(), "Hello");
n.clear();
assert_eq!(n.get(), "");
n.append(" World");
assert_eq!(n.get(), " World");
assert_eq!(n.len_original(), 5);
assert_eq!(n.len(), 6);
assert_eq!(n.get_range_original(Range::Original(0..5)), Some("Hello"));
assert_eq!(n.get_range_original(Range::Normalized(0..6)), Some(""));
assert_eq!(n.get_range(Range::Normalized(0..6)), Some(" World"));
}
}
| tokenizers/tokenizers/src/tokenizer/normalizer.rs/0 | {
"file_path": "tokenizers/tokenizers/src/tokenizer/normalizer.rs",
"repo_id": "tokenizers",
"token_count": 43220
} |
use std::collections::HashMap;
use std::iter::FromIterator;
use tokenizers::decoders::byte_fallback::ByteFallback;
use tokenizers::models::bpe::{BpeTrainerBuilder, BPE};
use tokenizers::normalizers::{Sequence, Strip, NFC};
use tokenizers::pre_tokenizers::byte_level::ByteLevel;
use tokenizers::{AddedToken, TokenizerBuilder};
use tokenizers::{DecoderWrapper, NormalizerWrapper, PostProcessorWrapper, PreTokenizerWrapper};
use tokenizers::{Tokenizer, TokenizerImpl};
#[test]
fn train_tokenizer() {
let vocab_size: usize = 100;
let mut tokenizer = TokenizerBuilder::new()
.with_model(BPE::default())
.with_normalizer(Some(Sequence::new(vec![
Strip::new(true, true).into(),
NFC.into(),
])))
.with_pre_tokenizer(Some(ByteLevel::default()))
.with_post_processor(Some(ByteLevel::default()))
.with_decoder(Some(ByteLevel::default()))
.build()
.unwrap();
let mut trainer = BpeTrainerBuilder::new()
.show_progress(false)
.vocab_size(vocab_size)
.min_frequency(0)
.special_tokens(vec![
AddedToken::from(String::from("<s>"), true),
AddedToken::from(String::from("<pad>"), true),
AddedToken::from(String::from("</s>"), true),
AddedToken::from(String::from("<unk>"), true),
AddedToken::from(String::from("<mask>"), true),
])
.build();
let pretty = true;
tokenizer
.train_from_files(&mut trainer, vec!["data/small.txt".to_string()])
.unwrap()
.save("data/tokenizer.json", pretty)
.unwrap();
}
#[test]
fn load_tokenizer() {
let tokenizer = Tokenizer::from_file("data/roberta.json").unwrap();
let example = "This is an example";
let ids = vec![713, 16, 41, 1246];
let tokens = vec!["This", "Ġis", "Ġan", "Ġexample"];
let encodings = tokenizer.encode(example, false).unwrap();
assert_eq!(encodings.get_ids(), ids);
assert_eq!(encodings.get_tokens(), tokens);
let decoded = tokenizer.decode(&ids, false).unwrap();
assert_eq!(decoded, example);
}
#[test]
fn streaming_tokenizer() {
let tokenizer = Tokenizer::from_file("data/roberta.json").unwrap();
let mut decode_stream = tokenizer.decode_stream(false);
assert_eq!(decode_stream.step(713).unwrap(), Some("This".to_string()));
assert_eq!(decode_stream.step(16).unwrap(), Some(" is".to_string()));
assert_eq!(decode_stream.step(41).unwrap(), Some(" an".to_string()));
assert_eq!(
decode_stream.step(1246).unwrap(),
Some(" example".to_string())
);
let tokenizer = Tokenizer::from_file("data/albert-base-v1-tokenizer.json").unwrap();
let encoded = tokenizer.encode("This is an example", false).unwrap();
assert_eq!(encoded.get_ids(), &[48, 25, 40, 823]);
let mut decode_stream = tokenizer.decode_stream(false);
// No space anymore
assert_eq!(decode_stream.step(25).unwrap(), Some("is".to_string()));
let mut decode_stream = tokenizer.decode_stream(false);
assert_eq!(decode_stream.step(48).unwrap(), Some("this".to_string()));
assert_eq!(decode_stream.step(25).unwrap(), Some(" is".to_string()));
assert_eq!(decode_stream.step(40).unwrap(), Some(" an".to_string()));
assert_eq!(
decode_stream.step(823).unwrap(),
Some(" example".to_string())
);
// None example
let vocab = HashMap::from_iter([
("<0x20>".to_string(), 0),
("<0xC3>".to_string(), 1),
("<0xA9>".to_string(), 2),
(" This".to_string(), 3),
]);
let merges = vec![];
let bpe = BPE::builder()
.vocab_and_merges(vocab, merges)
.byte_fallback(true)
.build()
.unwrap();
let tokenizer = TokenizerBuilder::new()
.with_model(bpe)
.with_normalizer(Some(Sequence::new(vec![
Strip::new(true, true).into(),
NFC.into(),
])))
.with_pre_tokenizer(Some(ByteLevel::default()))
.with_post_processor(Some(ByteLevel::default()))
.with_decoder(Some(ByteFallback::default()))
.build()
.unwrap();
let mut decode_stream = tokenizer.decode_stream(false);
assert_eq!(decode_stream.step(0).unwrap(), Some(" ".to_string()));
assert_eq!(decode_stream.step(1).unwrap(), None);
assert_eq!(decode_stream.step(2).unwrap(), Some("é".to_string()));
assert_eq!(decode_stream.step(2).unwrap(), None);
}
#[test]
#[ignore]
fn quicktour_slow_train() -> tokenizers::Result<()> {
// START quicktour_init_tokenizer
use tokenizers::models::bpe::BPE;
let mut tokenizer: TokenizerImpl<
BPE,
NormalizerWrapper,
PreTokenizerWrapper,
PostProcessorWrapper,
DecoderWrapper,
> = TokenizerImpl::new(
BPE::builder()
.unk_token("[UNK]".to_string())
.build()
.unwrap(),
);
// END quicktour_init_tokenizer
// START quicktour_init_trainer
use tokenizers::models::bpe::BpeTrainer;
let mut trainer = BpeTrainer::builder()
.special_tokens(vec![
AddedToken::from("[UNK]", true),
AddedToken::from("[CLS]", true),
AddedToken::from("[SEP]", true),
AddedToken::from("[PAD]", true),
AddedToken::from("[MASK]", true),
])
.build();
// END quicktour_init_trainer
// START quicktour_init_pretok
use tokenizers::pre_tokenizers::whitespace::Whitespace;
tokenizer.with_pre_tokenizer(Some(Whitespace {}));
// END quicktour_init_pretok
// START quicktour_train
let files = vec![
"data/wikitext-103-raw/wiki.train.raw".into(),
"data/wikitext-103-raw/wiki.test.raw".into(),
"data/wikitext-103-raw/wiki.valid.raw".into(),
];
tokenizer.train_from_files(&mut trainer, files)?;
// END quicktour_train
// START quicktour_save
tokenizer.save("data/tokenizer-wiki.json", false)?;
// END quicktour_save
Ok(())
}
#[test]
fn quicktour() -> tokenizers::Result<()> {
// START quicktour_reload_tokenizer
let mut tokenizer = Tokenizer::from_file("data/tokenizer-wiki.json")?;
// END quicktour_reload_tokenizer
// START quicktour_encode
let output = tokenizer.encode("Hello, y'all! How are you 😁 ?", true)?;
// END quicktour_encode
// START quicktour_print_tokens
println!("{:?}", output.get_tokens());
// ["Hello", ",", "y", "'", "all", "!", "How", "are", "you", "[UNK]", "?",]
// END quicktour_print_tokens
assert_eq!(
output.get_tokens(),
["Hello", ",", "y", "'", "all", "!", "How", "are", "you", "[UNK]", "?",]
);
// START quicktour_print_ids
println!("{:?}", output.get_ids());
// [27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35]
// END quicktour_print_ids
assert_eq!(
output.get_ids(),
[27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35]
);
// START quicktour_print_offsets
println!("{:?}", output.get_offsets()[9]);
// (26, 30)
// END quicktour_print_offsets
assert_eq!(output.get_offsets()[9], (26, 30));
// START quicktour_use_offsets
let sentence = "Hello, y'all! How are you 😁 ?";
println!("{}", &sentence[26..30]);
// "😁"
// END quicktour_use_offsets
// START quicktour_check_sep
println!("{}", tokenizer.token_to_id("[SEP]").unwrap());
// 2
// END quicktour_check_sep
assert_eq!(tokenizer.token_to_id("[SEP]"), Some(2));
// START quicktour_init_template_processing
use tokenizers::processors::template::TemplateProcessing;
let special_tokens = vec![
("[CLS]", tokenizer.token_to_id("[CLS]").unwrap()),
("[SEP]", tokenizer.token_to_id("[SEP]").unwrap()),
];
tokenizer.with_post_processor(Some(
TemplateProcessing::builder()
.try_single("[CLS] $A [SEP]")
.unwrap()
.try_pair("[CLS] $A [SEP] $B:1 [SEP]:1")
.unwrap()
.special_tokens(special_tokens)
.build()?,
));
// END quicktour_init_template_processing
// START quicktour_print_special_tokens
let output = tokenizer.encode("Hello, y'all! How are you 😁 ?", true)?;
println!("{:?}", output.get_tokens());
// ["[CLS]", "Hello", ",", "y", "'", "all", "!", "How", "are", "you", "[UNK]", "?", "[SEP]"]
// END quicktour_print_special_tokens
assert_eq!(
output.get_tokens(),
["[CLS]", "Hello", ",", "y", "'", "all", "!", "How", "are", "you", "[UNK]", "?", "[SEP]"]
);
// START quicktour_print_special_tokens_pair
let output = tokenizer.encode(("Hello, y'all!", "How are you 😁 ?"), true)?;
println!("{:?}", output.get_tokens());
// ["[CLS]", "Hello", ",", "y", "'", "all", "!", "[SEP]", "How", "are", "you", "[UNK]", "?", "[SEP]"]
// END quicktour_print_special_tokens_pair
assert_eq!(
output.get_tokens(),
[
"[CLS]", "Hello", ",", "y", "'", "all", "!", "[SEP]", "How", "are", "you", "[UNK]",
"?", "[SEP]"
]
);
// START quicktour_print_type_ids
println!("{:?}", output.get_type_ids());
// [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
// END quicktour_print_type_ids
assert_eq!(
output.get_type_ids(),
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
);
// START quicktour_encode_batch
let output = tokenizer.encode_batch(vec!["Hello, y'all!", "How are you 😁 ?"], true)?;
// END quicktour_encode_batch
println!("{output:?}");
// START quicktour_encode_batch_pair
let output = tokenizer.encode_batch(
vec![
("Hello, y'all!", "How are you 😁 ?"),
("Hello to you too!", "I'm fine, thank you!"),
],
true,
)?;
// END quicktour_encode_batch_pair
println!("{output:?}");
// START quicktour_enable_padding
use tokenizers::PaddingParams;
tokenizer.with_padding(Some(PaddingParams {
pad_id: 3,
pad_token: "[PAD]".to_string(),
..PaddingParams::default()
}));
// END quicktour_enable_padding
// START quicktour_print_batch_tokens
let output = tokenizer.encode_batch(vec!["Hello, y'all!", "How are you 😁 ?"], true)?;
println!("{:?}", output[1].get_tokens());
// ["[CLS]", "How", "are", "you", "[UNK]", "?", "[SEP]", "[PAD]"]
// END quicktour_print_batch_tokens
assert_eq!(
output[1].get_tokens(),
["[CLS]", "How", "are", "you", "[UNK]", "?", "[SEP]", "[PAD]"]
);
// START quicktour_print_attention_mask
println!("{:?}", output[1].get_attention_mask());
// [1, 1, 1, 1, 1, 1, 1, 0]
// END quicktour_print_attention_mask
assert_eq!(output[1].get_attention_mask(), [1, 1, 1, 1, 1, 1, 1, 0]);
Ok(())
}
#[test]
fn pipeline() -> tokenizers::Result<()> {
// START pipeline_reload_tokenizer
use tokenizers::Tokenizer;
let mut tokenizer = Tokenizer::from_file("data/tokenizer-wiki.json")?;
// END pipeline_reload_tokenizer
// START pipeline_setup_normalizer
use tokenizers::normalizers::{
strip::StripAccents, unicode::NFD, utils::Sequence as NormalizerSequence,
};
let normalizer = NormalizerSequence::new(vec![NFD.into(), StripAccents.into()]);
// END pipeline_setup_normalizer
// START pipeline_test_normalizer
use tokenizers::{NormalizedString, Normalizer};
let mut normalized = NormalizedString::from("Héllò hôw are ü?");
normalizer.normalize(&mut normalized)?;
println!("{}", normalized.get());
// "Hello how are u?"
// END pipeline_test_normalizer
assert_eq!(normalized.get(), "Hello how are u?");
// START pipeline_replace_normalizer
tokenizer.with_normalizer(Some(normalizer));
// END pipeline_replace_normalizer
// START pipeline_setup_pre_tokenizer
use tokenizers::pre_tokenizers::whitespace::Whitespace;
use tokenizers::{OffsetReferential, OffsetType, PreTokenizedString, PreTokenizer};
let pre_tokenizer = Whitespace {};
let mut pre_tokenized = PreTokenizedString::from("Hello! How are you? I'm fine, thank you.");
pre_tokenizer.pre_tokenize(&mut pre_tokenized)?;
println!(
"{:?}",
pre_tokenized.get_splits(OffsetReferential::Original, OffsetType::Byte)
);
// [("Hello", (0, 5), None), ("!", (5, 6), None), ("How", (7, 10), None),
// ("are", (11, 14), None), ("you", (15, 18), None), ("?", (18, 19), None),
// ("I", (20, 21), None), ("\'", (21, 22), None), ("m", (22, 23), None),
// ("fine", (24, 28), None), (",", (28, 29), None), ("thank", (30, 35), None),
// ("you", (36, 39), None), (".", (39, 40), None)]
// END pipeline_setup_pre_tokenizer
assert_eq!(
pre_tokenized.get_splits(OffsetReferential::Original, OffsetType::Byte),
vec![
("Hello", (0, 5), &None),
("!", (5, 6), &None),
("How", (7, 10), &None),
("are", (11, 14), &None),
("you", (15, 18), &None),
("?", (18, 19), &None),
("I", (20, 21), &None),
("\'", (21, 22), &None),
("m", (22, 23), &None),
("fine", (24, 28), &None),
(",", (28, 29), &None),
("thank", (30, 35), &None),
("you", (36, 39), &None),
(".", (39, 40), &None)
]
);
// START pipeline_combine_pre_tokenizer
use tokenizers::pre_tokenizers::{digits::Digits, sequence::Sequence};
let pre_tokenizer = Sequence::new(vec![Whitespace {}.into(), Digits::new(true).into()]);
let mut pre_tokenized = PreTokenizedString::from("Call 911!");
pre_tokenizer.pre_tokenize(&mut pre_tokenized)?;
println!(
"{:?}",
pre_tokenized.get_splits(OffsetReferential::Original, OffsetType::Byte)
);
// END pipeline_combine_pre_tokenizer
assert_eq!(
pre_tokenized.get_splits(OffsetReferential::Original, OffsetType::Byte),
vec![
("Call", (0, 4), &None),
("9", (5, 6), &None),
("1", (6, 7), &None),
("1", (7, 8), &None),
("!", (8, 9), &None)
]
);
// START pipeline_replace_pre_tokenizer
tokenizer.with_pre_tokenizer(Some(pre_tokenizer));
// END pipeline_replace_pre_tokenizer
// START pipeline_setup_processor
use tokenizers::processors::template::TemplateProcessing;
tokenizer.with_post_processor(Some(
TemplateProcessing::builder()
.try_single("[CLS] $A [SEP]")
.unwrap()
.try_pair("[CLS] $A [SEP] $B:1 [SEP]:1")
.unwrap()
.special_tokens(vec![("[CLS]", 1), ("[SEP]", 2)])
.build()
.unwrap(),
));
// END pipeline_setup_processor
// START pipeline_test_decoding
let output = tokenizer.encode("Hello, y'all! How are you 😁 ?", true)?;
println!("{:?}", output.get_ids());
// [1, 27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35, 2]
let decoded = tokenizer.decode(
&[1, 27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35, 2],
true,
)?;
println!("{decoded}");
// "Hello , y ' all ! How are you ?"
// END pipeline_test_decoding
Ok(())
}
#[test]
#[ignore]
fn train_pipeline_bert() -> tokenizers::Result<()> {
// START bert_setup_tokenizer
use tokenizers::models::wordpiece::WordPiece;
use tokenizers::Tokenizer;
let mut bert_tokenizer = Tokenizer::new(
WordPiece::builder()
.unk_token("[UNK]".to_string())
.build()
.unwrap(),
);
// END bert_setup_tokenizer
// START bert_setup_normalizer
use tokenizers::normalizers::utils::Sequence as NormalizerSequence;
use tokenizers::normalizers::{strip::StripAccents, unicode::NFD, utils::Lowercase};
bert_tokenizer.with_normalizer(Some(NormalizerSequence::new(vec![
NFD.into(),
Lowercase.into(),
StripAccents.into(),
])));
// END bert_setup_normalizer
// START bert_setup_pre_tokenizer
use tokenizers::pre_tokenizers::whitespace::Whitespace;
bert_tokenizer.with_pre_tokenizer(Some(Whitespace {}));
// END bert_setup_pre_tokenizer
// START bert_setup_processor
use tokenizers::processors::template::TemplateProcessing;
bert_tokenizer.with_post_processor(Some(
TemplateProcessing::builder()
.try_single("[CLS] $A [SEP]")
.unwrap()
.try_pair("[CLS] $A [SEP] $B:1 [SEP]:1")
.unwrap()
.special_tokens(vec![("[CLS]", 1), ("[SEP]", 2)])
.build()
.unwrap(),
));
// END bert_setup_processor
// START bert_train_tokenizer
use tokenizers::models::{wordpiece::WordPieceTrainer, TrainerWrapper};
let mut trainer: TrainerWrapper = WordPieceTrainer::builder()
.vocab_size(30_522)
.special_tokens(vec![
AddedToken::from("[UNK]", true),
AddedToken::from("[CLS]", true),
AddedToken::from("[SEP]", true),
AddedToken::from("[PAD]", true),
AddedToken::from("[MASK]", true),
])
.build()
.into();
let files = vec![
"data/wikitext-103-raw/wiki.train.raw".into(),
"data/wikitext-103-raw/wiki.test.raw".into(),
"data/wikitext-103-raw/wiki.valid.raw".into(),
];
bert_tokenizer.train_from_files(&mut trainer, files)?;
bert_tokenizer.save("data/bert-wiki.json", false)?;
// END bert_train_tokenizer
Ok(())
}
#[test]
fn pipeline_bert() -> tokenizers::Result<()> {
let mut bert_tokenizer = Tokenizer::from_file("data/bert-wiki.json")?;
// START bert_test_decoding
let output = bert_tokenizer.encode("Welcome to the 🤗 Tokenizers library.", true)?;
println!("{:?}", output.get_tokens());
// ["[CLS]", "welcome", "to", "the", "[UNK]", "tok", "##eni", "##zer", "##s", "library", ".", "[SEP]"]
let decoded = bert_tokenizer.decode(output.get_ids(), true)?;
println!("{decoded}");
// "welcome to the tok ##eni ##zer ##s library ."
// END bert_test_decoding
assert_eq!(
output.get_tokens(),
&[
"[CLS]", "welcome", "to", "the", "[UNK]", "tok", "##eni", "##zer", "##s", "library",
".", "[SEP]"
]
);
assert_eq!(decoded, "welcome to the tok ##eni ##zer ##s library .");
// START bert_proper_decoding
use tokenizers::decoders::wordpiece::WordPiece as WordPieceDecoder;
bert_tokenizer.with_decoder(Some(WordPieceDecoder::default()));
let decoded = bert_tokenizer.decode(output.get_ids(), true)?;
// "welcome to the tokenizers library."
// END bert_proper_decoding
assert_eq!(decoded, "welcome to the tokenizers library.");
Ok(())
}
| tokenizers/tokenizers/tests/documentation.rs/0 | {
"file_path": "tokenizers/tokenizers/tests/documentation.rs",
"repo_id": "tokenizers",
"token_count": 8476
} |
- local: index
title: 🤗 Transformers.js
- sections:
- local: installation
title: Installation
- local: pipelines
title: The pipeline API
- local: custom_usage
title: Custom usage
title: Get started
- sections:
- local: tutorials/vanilla-js
title: Building a Vanilla JS Application
- local: tutorials/react
title: Building a React Application
- local: tutorials/next
title: Building a Next.js Application
- local: tutorials/browser-extension
title: Building a Browser Extension
- local: tutorials/electron
title: Building an Electron Application
- local: tutorials/node
title: Server-side Inference in Node.js
title: Tutorials
- sections:
- local: guides/webgpu
title: Running models on WebGPU
- local: guides/dtypes
title: Using quantized models (dtypes)
- local: guides/private
title: Accessing Private/Gated Models
- local: guides/node-audio-processing
title: Server-side Audio Processing
title: Developer Guides
- sections:
- local: api/transformers
title: Index
- local: api/pipelines
title: Pipelines
- local: api/models
title: Models
- local: api/tokenizers
title: Tokenizers
- local: api/processors
title: Processors
- local: api/configs
title: Configs
- local: api/env
title: Environment variables
- sections:
- local: api/backends/onnx
title: ONNX
title: Backends
isExpanded: false
- sections:
- local: api/generation/parameters
title: Parameters
- local: api/generation/configuration_utils
title: Configuration
- local: api/generation/logits_process
title: Logits Processors
- local: api/generation/logits_sampler
title: Logits Samplers
- local: api/generation/stopping_criteria
title: Stopping Criteria
- local: api/generation/streamers
title: Streamers
title: Generation
isExpanded: false
- sections:
- local: api/utils/core
title: Core
- local: api/utils/hub
title: Hub
- local: api/utils/image
title: Image
- local: api/utils/audio
title: Audio
- local: api/utils/tensor
title: Tensor
- local: api/utils/maths
title: Maths
- local: api/utils/data-structures
title: Data Structures
title: Utilities
isExpanded: false
title: API Reference
| transformers.js/docs/source/_toctree.yml/0 | {
"file_path": "transformers.js/docs/source/_toctree.yml",
"repo_id": "transformers.js",
"token_count": 825
} |
// This file (model.js) contains all the logic for loading the model and running predictions.
class MyClassificationPipeline {
// NOTE: Replace this with your own task and model
static task = 'text-classification';
static model = 'Xenova/distilbert-base-uncased-finetuned-sst-2-english';
static instance = null;
static async getInstance(progress_callback = null) {
if (this.instance === null) {
// Dynamically import the Transformers.js library
let { pipeline, env } = await import('@xenova/transformers');
// NOTE: Uncomment this to change the cache directory
// env.cacheDir = './.cache';
this.instance = pipeline(this.task, this.model, { progress_callback });
}
return this.instance;
}
}
// The run function is used by the `transformers:run` event handler.
async function run(event, text) {
const classifier = await MyClassificationPipeline.getInstance();
return await classifier(text);
}
module.exports = {
run
}
| transformers.js/examples/electron/src/model.js/0 | {
"file_path": "transformers.js/examples/electron/src/model.js",
"repo_id": "transformers.js",
"token_count": 366
} |
{
"name": "musicgen-web",
"private": true,
"version": "0.0.0",
"type": "module",
"scripts": {
"dev": "vite",
"build": "vite build",
"lint": "eslint . --ext js,jsx --report-unused-disable-directives --max-warnings 0",
"preview": "vite preview"
},
"dependencies": {
"@xenova/transformers": "github:xenova/transformers.js#v3",
"react": "^18.2.0",
"react-dom": "^18.2.0"
},
"devDependencies": {
"@types/react": "^18.2.66",
"@types/react-dom": "^18.2.22",
"@vitejs/plugin-react": "^4.2.1",
"autoprefixer": "^10.4.19",
"eslint": "^8.57.0",
"eslint-plugin-react": "^7.34.1",
"eslint-plugin-react-hooks": "^4.6.0",
"eslint-plugin-react-refresh": "^0.4.6",
"postcss": "^8.4.38",
"tailwindcss": "^3.4.3",
"vite": "^5.2.0"
}
}
| transformers.js/examples/musicgen-web/package.json/0 | {
"file_path": "transformers.js/examples/musicgen-web/package.json",
"repo_id": "transformers.js",
"token_count": 415
} |
{
"name": "esm",
"version": "1.0.0",
"description": "Server-side inference with Transformers.js (ESM)",
"type": "module",
"main": "app.js",
"keywords": [],
"author": "Xenova",
"license": "ISC",
"dependencies": {
"@xenova/transformers": "^2.0.0"
}
}
| transformers.js/examples/node/esm/package.json/0 | {
"file_path": "transformers.js/examples/node/esm/package.json",
"repo_id": "transformers.js",
"token_count": 116
} |
@import url('https://fonts.googleapis.com/css2?family=Montserrat&display=swap');
* {
box-sizing: border-box;
padding: 0;
margin: 0;
font-family: 'Montserrat', sans-serif;
}
html {
background: radial-gradient(ellipse at center, #1b2735 0%, #090a0f 100%);
height: 100%;
width: 100%;
}
body {
overflow: hidden;
display: flex;
justify-content: center;
}
#deepscatter {
position: absolute;
left: 0;
top: 0;
}
#container-for-webgl-canvas {
width: 100vw;
display: flex;
justify-content: center;
}
.tooltip {
width: 400px;
margin-top: -10px;
}
.tooltip>audio {
margin-top: 10px;
width: 100%;
}
#search {
background: url('https://upload.wikimedia.org/wikipedia/commons/0/0b/Search_Icon.svg') no-repeat center right;
background-size: 50px 50px;
width: 56px;
border: none;
cursor: pointer;
}
#header {
padding: 20px 0 50px 0;
color: white;
text-align: center;
background: linear-gradient(to bottom, rgba(0, 0, 0, 1), rgba(0, 0, 0, 0));
width: 100vw;
}
#title {
font-size: 30px;
font-weight: 600;
}
#header,
#search-bar {
position: absolute;
justify-content: center;
z-index: 1;
}
#search-bar {
padding: 0 20px;
max-width: 90vw;
overflow: hidden;
border-radius: 20px 20px 0px 0px;
bottom: 0;
height: 60px;
width: 500px;
background-color: white;
display: flex;
}
#overlay {
position: absolute;
width: 100vw;
height: 100vh;
z-index: 999999999;
background-color: rgba(0, 0, 0, 0.8);
pointer-events: none;
display: none;
color: white;
justify-content: center;
align-items: center;
font-size: 20px;
}
#query {
width: 100%;
height: 100%;
font-size: 1.5em;
border: none;
outline: none;
}
a {
color: white;
} | transformers.js/examples/semantic-audio-search/style.css/0 | {
"file_path": "transformers.js/examples/semantic-audio-search/style.css",
"repo_id": "transformers.js",
"token_count": 707
} |
import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/[email protected]';
// Since we will download the model from the Hugging Face Hub, we can skip the local model check
env.allowLocalModels = false;
// Reference the elements that we will need
const status = document.getElementById('status');
const fileUpload = document.getElementById('file-upload');
const imageContainer = document.getElementById('image-container');
// Create a new object detection pipeline
status.textContent = 'Loading model...';
const detector = await pipeline('object-detection', 'Xenova/detr-resnet-50');
status.textContent = 'Ready';
fileUpload.addEventListener('change', function (e) {
const file = e.target.files[0];
if (!file) {
return;
}
const reader = new FileReader();
// Set up a callback when the file is loaded
reader.onload = function (e2) {
imageContainer.innerHTML = '';
const image = document.createElement('img');
image.src = e2.target.result;
imageContainer.appendChild(image);
detect(image);
};
reader.readAsDataURL(file);
});
// Detect objects in the image
async function detect(img) {
status.textContent = 'Analysing...';
const output = await detector(img.src, {
threshold: 0.5,
percentage: true,
});
status.textContent = '';
output.forEach(renderBox);
}
// Render a bounding box and label on the image
function renderBox({ box, label }) {
const { xmax, xmin, ymax, ymin } = box;
// Generate a random color for the box
const color = '#' + Math.floor(Math.random() * 0xFFFFFF).toString(16).padStart(6, 0);
// Draw the box
const boxElement = document.createElement('div');
boxElement.className = 'bounding-box';
Object.assign(boxElement.style, {
borderColor: color,
left: 100 * xmin + '%',
top: 100 * ymin + '%',
width: 100 * (xmax - xmin) + '%',
height: 100 * (ymax - ymin) + '%',
})
// Draw label
const labelElement = document.createElement('span');
labelElement.textContent = label;
labelElement.className = 'bounding-box-label';
labelElement.style.backgroundColor = color;
boxElement.appendChild(labelElement);
imageContainer.appendChild(boxElement);
}
| transformers.js/examples/vanilla-js/index.js/0 | {
"file_path": "transformers.js/examples/vanilla-js/index.js",
"repo_id": "transformers.js",
"token_count": 817
} |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Transformers.js | Real-time depth estimation</title>
</head>
<body>
<h1>
Real-time depth estimation w/
<a href="https://huggingface.co/onnx-community/depth-anything-v2-small" target="_blank">Depth Anything V2</a>
</h1>
<h3>
Runs locally in your browser, powered by
<a href="https://github.com/huggingface/transformers.js" target="_blank">🤗 Transformers.js</a>
</h3>
<div id="container">
<video id="video" autoplay muted playsinline></video>
<canvas id="output-canvas"></canvas>
</div>
<div id="controls">
<div title="Read frames from your webcam and process them at a lower size (lower = faster)">
<label>Stream scale</label>
(<label id="scale-value">0.4</label>)
<br>
<input id="scale" type="range" min="0.1" max="1" step="0.1" value="0.4" disabled>
</div>
<div title="The width of the image (lower = faster)">
<label>Image size</label>
(<label id="size-value">504</label>px)
<br>
<input id="size" type="range" min="140" max="840" step="14" value="504" disabled>
</div>
</div>
<label id="status">Loading model...</label>
<script type="module" src="/main.js"></script>
</body>
</html> | transformers.js/examples/webgpu-video-depth-estimation/index.html/0 | {
"file_path": "transformers.js/examples/webgpu-video-depth-estimation/index.html",
"repo_id": "transformers.js",
"token_count": 534
} |
function formatBytes(size) {
const i = size == 0 ? 0 : Math.floor(Math.log(size) / Math.log(1024));
return +((size / Math.pow(1024, i)).toFixed(2)) * 1 + ['B', 'kB', 'MB', 'GB', 'TB'][i];
}
export default function Progress({ text, percentage, total }) {
percentage ??= 0;
return (
<div className="w-full bg-gray-100 dark:bg-gray-700 text-left rounded-lg overflow-hidden mb-0.5">
<div className="bg-blue-400 whitespace-nowrap px-1 text-sm" style={{ width: `${percentage}%` }}>
{text} ({percentage.toFixed(2)}%{isNaN(total) ? '' : ` of ${formatBytes(total)}`})
</div>
</div>
);
}
| transformers.js/examples/webgpu-vlm/src/components/Progress.jsx/0 | {
"file_path": "transformers.js/examples/webgpu-vlm/src/components/Progress.jsx",
"repo_id": "transformers.js",
"token_count": 290
} |
from transformers.convert_slow_tokenizer import Converter
from tokenizers import Tokenizer, pre_tokenizers, processors
from tokenizers.models import WordPiece
class EsmConverter(Converter):
def converted(self) -> Tokenizer:
vocab = self.original_tokenizer.vocab
tokenizer = Tokenizer(WordPiece(vocab, continuing_subword_prefix='', max_input_chars_per_word=int(
1e10), unk_token=str(self.original_tokenizer.unk_token)))
tokenizer.pre_tokenizer = pre_tokenizers.WhitespaceSplit()
cls = str(self.original_tokenizer.cls_token)
cls_token_id = self.original_tokenizer.cls_token_id
# No sep token in ESM vocabulary
sep = str(self.original_tokenizer.eos_token)
sep_token_id = self.original_tokenizer.eos_token_id
if sep_token_id is None:
tokenizer.post_processor = processors.TemplateProcessing(
single=f"{cls}:0 $A:0",
special_tokens=[
(cls, cls_token_id),
],
)
else:
tokenizer.post_processor = processors.TemplateProcessing(
single=f"{cls}:0 $A:0 {sep}:0",
pair=f"{cls}:0 $A:0 {sep}:0 $B:1 {sep}:1",
special_tokens=[
(cls, cls_token_id),
(sep, sep_token_id),
],
)
# For some reason, all tokens are added: none of them are special, but they all need special splitting.
# See https://github.com/huggingface/transformers/blob/df5c5c62ae253055336f5bb0828ca8e3e15ab6bd/src/transformers/models/esm/tokenization_esm.py#L79-L80
special_tokens = []
other_tokens = []
for token, token_id in vocab.items():
if token[0] == '<' and token[-1] == '>' and token_id <= 3:
special_tokens.append(token)
else:
other_tokens.append(token)
tokenizer.add_special_tokens(special_tokens)
tokenizer.add_tokens(other_tokens)
return tokenizer
def generate_fast_tokenizer(tokenizer):
tokenizer.vocab = tokenizer._token_to_id
return EsmConverter(tokenizer).converted()
| transformers.js/scripts/extra/esm.py/0 | {
"file_path": "transformers.js/scripts/extra/esm.py",
"repo_id": "transformers.js",
"token_count": 1055
} |
/**
* @module generation/configuration_utils
*/
import { pick } from "../utils/core.js";
/**
* Class that holds a configuration for a generation task.
*/
export class GenerationConfig {
// Parameters that control the length of the output
/**
* The maximum length the generated tokens can have.
* Corresponds to the length of the input prompt + `max_new_tokens`.
* Its effect is overridden by `max_new_tokens`, if also set.
* @type {number}
* @default 20
*/
max_length = 20;
/**
* The maximum numbers of tokens to generate, ignoring the number of tokens in the prompt.
* @type {number}
* @default null
*/
max_new_tokens = null;
/**
* The minimum length of the sequence to be generated.
* Corresponds to the length of the input prompt + `min_new_tokens`.
* Its effect is overridden by `min_new_tokens`, if also set.
* @type {number}
* @default 0
*/
min_length = 0;
/**
* The minimum numbers of tokens to generate, ignoring the number of tokens in the prompt.
* @type {number}
* @default null
*/
min_new_tokens = null;
/**
* Controls the stopping condition for beam-based methods, like beam-search. It accepts the following values:
* - `true`, where the generation stops as soon as there are `num_beams` complete candidates;
* - `false`, where an heuristic is applied and the generation stops when is it very unlikely to find better candidates;
* - `"never"`, where the beam search procedure only stops when there cannot be better candidates (canonical beam search algorithm).
* @type {boolean|"never"}
* @default false
*/
early_stopping = false;
/**
* The maximum amount of time you allow the computation to run for in seconds.
* Generation will still finish the current pass after allocated time has been passed.
* @type {number}
* @default null
*/
max_time = null;
// Parameters that control the generation strategy used
/**
* Whether or not to use sampling; use greedy decoding otherwise.
* @type {boolean}
* @default false
*/
do_sample = false;
/**
* Number of beams for beam search. 1 means no beam search.
* @type {number}
* @default 1
*/
num_beams = 1;
/**
* Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams.
* See [this paper](https://arxiv.org/pdf/1610.02424.pdf) for more details.
* @type {number}
* @default 1
*/
num_beam_groups = 1;
/**
* The values balance the model confidence and the degeneration penalty in contrastive search decoding.
* @type {number}
* @default null
*/
penalty_alpha = null;
/**
* Whether or not the model should use the past last key/values attentions (if applicable to the model) to speed up decoding.
* @type {boolean}
* @default true
*/
use_cache = true;
// Parameters for manipulation of the model output logits
/**
* The value used to modulate the next token probabilities.
* @type {number}
* @default 1.0
*/
temperature = 1.0;
/**
* The number of highest probability vocabulary tokens to keep for top-k-filtering.
* @type {number}
* @default 50
*/
top_k = 50;
/**
* If set to float < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or higher are kept for generation.
* @type {number}
* @default 1.0
*/
top_p = 1.0;
/**
* Local typicality measures how similar the conditional probability of predicting a target token next is to the expected conditional probability of predicting a random token next, given the partial text already generated.
* If set to float < 1, the smallest set of the most locally typical tokens with probabilities that add up to `typical_p` or higher are kept for generation.
* See [this paper](https://arxiv.org/pdf/2202.00666.pdf) for more details.
* @type {number}
* @default 1.0
*/
typical_p = 1.0;
/**
* If set to float strictly between 0 and 1, only tokens with a conditional probability greater than `epsilon_cutoff` will be sampled.
* In the paper, suggested values range from 3e-4 to 9e-4, depending on the size of the model.
* See [Truncation Sampling as Language Model Desmoothing](https://arxiv.org/abs/2210.15191) for more details.
* @type {number}
* @default 0.0
*/
epsilon_cutoff = 0.0;
/**
* Eta sampling is a hybrid of locally typical sampling and epsilon sampling.
* If set to float strictly between 0 and 1, a token is only considered if it is greater than either `eta_cutoff` or `sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits)))`.
* The latter term is intuitively the expected next token probability, scaled by `sqrt(eta_cutoff)`. In the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model.
* See [Truncation Sampling as Language Model Desmoothing](https://arxiv.org/abs/2210.15191) for more details.
* @type {number}
* @default 0.0
*/
eta_cutoff = 0.0;
/**
* This value is subtracted from a beam's score if it generates a token same as any beam from other group at a particular time.
* Note that `diversity_penalty` is only effective if `group beam search` is enabled.
* @type {number}
* @default 0.0
*/
diversity_penalty = 0.0;
/**
* The parameter for repetition penalty. 1.0 means no penalty.
* See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
* @type {number}
* @default 1.0
*/
repetition_penalty = 1.0;
/**
* The paramater for encoder_repetition_penalty.
* An exponential penalty on sequences that are not in the original input.
* 1.0 means no penalty.
* @type {number}
* @default 1.0
*/
encoder_repetition_penalty = 1.0;
/**
* Exponential penalty to the length that is used with beam-based generation.
* It is applied as an exponent to the sequence length, which in turn is used to divide the score of the sequence.
* Since the score is the log likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while `length_penalty` < 0.0 encourages shorter sequences.
* @type {number}
* @default 1.0
*/
length_penalty = 1.0;
/**
* If set to int > 0, all ngrams of that size can only occur once.
* @type {number}
* @default 0
*/
no_repeat_ngram_size = 0;
/**
* List of token ids that are not allowed to be generated.
* In order to get the token ids of the words that should not appear in the generated text, use
* `tokenizer(bad_words, { add_prefix_space: true, add_special_tokens: false }).input_ids`.
* @type {number[][]}
* @default null
*/
bad_words_ids = null;
/**
* List of token ids that must be generated.
* If given a `number[][]`, this is treated as a simple list of words that must be included, the opposite to `bad_words_ids`.
* If given `number[][][]`, this triggers a [disjunctive constraint](https://github.com/huggingface/transformers/issues/14081), where one can allow different forms of each word.
* @type {number[][]|number[][][]}
* @default null
*/
force_words_ids = null;
/**
* Whether to renormalize the logits after applying all the logits processors or warpers (including the custom ones).
* It's highly recommended to set this flag to `true` as the search algorithms suppose the score logits are normalized but some logit processors or warpers break the normalization.
* @type {boolean}
* @default false
*/
renormalize_logits = false;
/**
* Custom constraints that can be added to the generation to ensure that the output will contain the use of certain tokens as defined by `Constraint` objects, in the most sensible way possible.
* @type {Object[]}
* @default null
*/
constraints = null;
/**
* The id of the token to force as the first generated token after the `decoder_start_token_id`.
* Useful for multilingual models like mBART where the first generated token needs to be the target language token.
* @type {number}
* @default null
*/
forced_bos_token_id = null;
/**
* The id of the token to force as the last generated token when `max_length` is reached.
* Optionally, use a list to set multiple *end-of-sequence* tokens.
* @type {number|number[]}
* @default null
*/
forced_eos_token_id = null;
/**
* Whether to remove possible *nan* and *inf* outputs of the model to prevent the generation method to crash. Note that using `remove_invalid_values` can slow down generation.
* @type {boolean}
*/
remove_invalid_values = false;
/**
* This Tuple adds an exponentially increasing length penalty, after a certain amount of tokens have been generated.
* The tuple shall consist of: `(start_index, decay_factor)` where `start_index` indicates where penalty starts and `decay_factor` represents the factor of exponential decay.
* @type {[number, number]}
* @default null
*/
exponential_decay_length_penalty = null;
/**
* A list of tokens that will be suppressed at generation.
* The `SuppressTokens` logit processor will set their log probs to `-inf` so that they are not sampled.
* @type {number[]}
* @default null
*/
suppress_tokens = null;
/**
* A streamer that will be used to stream the generation.
* @type {import('./streamers.js').TextStreamer}
* @default null
*/
streamer = null;
/**
* A list of tokens that will be suppressed at the beginning of the generation.
* The `SuppressBeginTokens` logit processor will set their log probs to `-inf` so that they are not sampled.
* @type {number[]}
* @default null
*/
begin_suppress_tokens = null;
/**
* A list of pairs of integers which indicates a mapping from generation indices to token indices that will be forced before sampling.
* For example, `[[1, 123]]` means the second generated token will always be a token of index 123.
* @type {[number, number][]}
* @default null
*/
forced_decoder_ids = null;
/**
* The guidance scale for classifier free guidance (CFG). CFG is enabled by setting `guidance_scale > 1`.
* Higher guidance scale encourages the model to generate samples that are more closely linked to the input
* prompt, usually at the expense of poorer quality.
* @type {number}
* @default null
*/
guidance_scale = null;
// Parameters that define the output variables of `generate`
/**
* The number of independently computed returned sequences for each element in the batch.
* @type {number}
* @default 1
*/
num_return_sequences = 1;
/**
* Whether or not to return the attentions tensors of all attention layers.
* See `attentions` under returned tensors for more details.
* @type {boolean}
* @default false
*/
output_attentions = false;
/**
* Whether or not to return the hidden states of all layers.
* See `hidden_states` under returned tensors for more details.
* @type {boolean}
* @default false
*/
output_hidden_states = false;
/**
* Whether or not to return the prediction scores.
* See `scores` under returned tensors for more details.
* @type {boolean}
* @default false
*/
output_scores = false;
/**
* Whether or not to return a `ModelOutput` instead of a plain tuple.
* @type {boolean}
* @default false
*/
return_dict_in_generate = false;
// Special tokens that can be used at generation time
/**
* The id of the *padding* token.
* @type {number}
* @default null
*/
pad_token_id = null;
/**
* The id of the *beginning-of-sequence* token.
* @type {number}
* @default null
*/
bos_token_id = null;
/**
* The id of the *end-of-sequence* token.
* Optionally, use a list to set multiple *end-of-sequence* tokens.
* @type {number|number[]}
* @default null
*/
eos_token_id = null;
// Generation parameters exclusive to encoder-decoder models
/**
* If set to int > 0, all ngrams of that size that occur in the `encoder_input_ids` cannot occur in the `decoder_input_ids`.
* @type {number}
* @default 0
*/
encoder_no_repeat_ngram_size = 0;
/**
* If an encoder-decoder model starts decoding with a different token than *bos*, the id of that token.
* @type {number}
* @default null
*/
decoder_start_token_id = null;
// Wild card
/**
* Additional generation kwargs will be forwarded to the `generate` function of the model.
* Kwargs that are not present in `generate`'s signature will be used in the model forward pass.
* @type {Object}
* @default {}
*/
generation_kwargs = {};
/**
*
* @param {GenerationConfig|import('../configs.js').PretrainedConfig} config
*/
constructor(config) {
Object.assign(this, pick(config, Object.getOwnPropertyNames(this)));
}
}
| transformers.js/src/generation/configuration_utils.js/0 | {
"file_path": "transformers.js/src/generation/configuration_utils.js",
"repo_id": "transformers.js",
"token_count": 4707
} |
import {
ImageProcessor,
} from "../../base/image_processors_utils.js";
export class ConvNextImageProcessor extends ImageProcessor {
constructor(config) {
super(config);
/**
* Percentage of the image to crop. Only has an effect if this.size < 384.
*/
// @ts-expect-error TS2339
this.crop_pct = this.config.crop_pct ?? (224 / 256);
}
async resize(image) {
const shortest_edge = this.size?.shortest_edge;
if (shortest_edge === undefined) {
throw new Error(`Size dictionary must contain 'shortest_edge' key.`);
}
if (shortest_edge < 384) {
// maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
const resize_shortest_edge = Math.floor(shortest_edge / this.crop_pct);
const [newWidth, newHeight] = this.get_resize_output_image_size(image, {
shortest_edge: resize_shortest_edge,
});
image = await image.resize(newWidth, newHeight, {
resample: this.resample,
});
// then crop to (shortest_edge, shortest_edge)
image = await image.center_crop(shortest_edge, shortest_edge);
} else {
// warping (no cropping) when evaluated at 384 or larger
image = await image.resize(shortest_edge, shortest_edge, {
resample: this.resample,
});
}
return image;
}
}
export class ConvNextFeatureExtractor extends ConvNextImageProcessor { }
| transformers.js/src/models/convnext/image_processing_convnext.js/0 | {
"file_path": "transformers.js/src/models/convnext/image_processing_convnext.js",
"repo_id": "transformers.js",
"token_count": 683
} |
import {
ImageProcessor,
} from "../../base/image_processors_utils.js";
export class JinaCLIPImageProcessor extends ImageProcessor {
constructor(config) {
// JinaCLIPImageProcessor uses a custom preprocessor_config.json, so we configure it here
const { resize_mode, fill_color, interpolation, size, ...other } = config;
const new_size = resize_mode === 'squash'
? { width: size, height: size }
: resize_mode === 'shortest'
? { shortest_edge: size }
: { longest_edge: size };
const resample = interpolation === 'bicubic' ? 3 : 2;
super({
...other,
size: new_size,
resample,
do_center_crop: true,
crop_size: size,
do_normalize: true,
});
}
}
| transformers.js/src/models/jina_clip/image_processing_jina_clip.js/0 | {
"file_path": "transformers.js/src/models/jina_clip/image_processing_jina_clip.js",
"repo_id": "transformers.js",
"token_count": 382
} |
import { createInferenceSession, isONNXProxy } from "../backends/onnx.js";
import { Tensor } from "../utils/tensor.js";
import { apis } from "../env.js";
const IS_WEB_ENV = apis.IS_BROWSER_ENV || apis.IS_WEBWORKER_ENV;
/**
* Asynchronously creates a wrapper function for running an ONNX inference session.
*
* @param {number[]} session_bytes The session data in bytes.
* @param {import('onnxruntime-common').InferenceSession.SessionOptions} session_options The options for the ONNX session.
* @template {string | [string] | string[]} T
* @param {T} names The name(s) of the output tensor(s).
*
* @returns {Promise<function(Record<string, Tensor>): Promise<T extends string ? Tensor : T extends string[] ? { [K in keyof T]: Tensor } : never>>}
* The wrapper function for running the ONNX inference session.
*/
const wrap = async (session_bytes, session_options, names) => {
const session = await createInferenceSession(
new Uint8Array(session_bytes), session_options,
);
/** @type {Promise<any>} */
let chain = Promise.resolve();
return /** @type {any} */(async (/** @type {Record<string, Tensor>} */ inputs) => {
const proxied = isONNXProxy();
const ortFeed = Object.fromEntries(Object.entries(inputs).map(([k, v]) => [k, (proxied ? v.clone() : v).ort_tensor]));
// When running in-browser via WASM, we need to chain calls to session.run to avoid "Error: Session already started"
const outputs = await (chain = IS_WEB_ENV ? chain.then(() => session.run(ortFeed)) : session.run(ortFeed));
if (Array.isArray(names)) {
return names.map((n) => new Tensor(outputs[n]));
} else {
return new Tensor(outputs[/** @type {string} */(names)]);
}
})
}
// In-memory registry of initialized ONNX operators
export class TensorOpRegistry {
static session_options = {
// TODO: Allow for multiple execution providers
// executionProviders: ['webgpu'],
};
static get nearest_interpolate_4d() {
if (!this._nearest_interpolate_4d) {
this._nearest_interpolate_4d = wrap(
[8, 10, 18, 0, 58, 129, 1, 10, 41, 10, 1, 120, 10, 0, 10, 0, 10, 1, 115, 18, 1, 121, 34, 6, 82, 101, 115, 105, 122, 101, 42, 18, 10, 4, 109, 111, 100, 101, 34, 7, 110, 101, 97, 114, 101, 115, 116, 160, 1, 3, 18, 1, 114, 90, 31, 10, 1, 120, 18, 26, 10, 24, 8, 1, 18, 20, 10, 3, 18, 1, 98, 10, 3, 18, 1, 99, 10, 3, 18, 1, 104, 10, 3, 18, 1, 119, 90, 15, 10, 1, 115, 18, 10, 10, 8, 8, 7, 18, 4, 10, 2, 8, 4, 98, 31, 10, 1, 121, 18, 26, 10, 24, 8, 1, 18, 20, 10, 3, 18, 1, 98, 10, 3, 18, 1, 99, 10, 3, 18, 1, 104, 10, 3, 18, 1, 119, 66, 2, 16, 21],
this.session_options,
'y',
);
}
return this._nearest_interpolate_4d;
}
static get bilinear_interpolate_4d() {
if (!this._bilinear_interpolate_4d) {
this._bilinear_interpolate_4d = wrap(
[8, 9, 18, 0, 58, 128, 1, 10, 40, 10, 1, 120, 10, 0, 10, 0, 10, 1, 115, 18, 1, 121, 34, 6, 82, 101, 115, 105, 122, 101, 42, 17, 10, 4, 109, 111, 100, 101, 34, 6, 108, 105, 110, 101, 97, 114, 160, 1, 3, 18, 1, 114, 90, 31, 10, 1, 120, 18, 26, 10, 24, 8, 1, 18, 20, 10, 3, 18, 1, 98, 10, 3, 18, 1, 99, 10, 3, 18, 1, 104, 10, 3, 18, 1, 119, 90, 15, 10, 1, 115, 18, 10, 10, 8, 8, 7, 18, 4, 10, 2, 8, 4, 98, 31, 10, 1, 121, 18, 26, 10, 24, 8, 1, 18, 20, 10, 3, 18, 1, 98, 10, 3, 18, 1, 99, 10, 3, 18, 1, 104, 10, 3, 18, 1, 119, 66, 2, 16, 20],
this.session_options,
'y',
);
}
return this._bilinear_interpolate_4d;
}
static get bicubic_interpolate_4d() {
if (!this._bicubic_interpolate_4d) {
this._bicubic_interpolate_4d = wrap(
[8, 9, 18, 0, 58, 127, 10, 39, 10, 1, 120, 10, 0, 10, 0, 10, 1, 115, 18, 1, 121, 34, 6, 82, 101, 115, 105, 122, 101, 42, 16, 10, 4, 109, 111, 100, 101, 34, 5, 99, 117, 98, 105, 99, 160, 1, 3, 18, 1, 114, 90, 31, 10, 1, 120, 18, 26, 10, 24, 8, 1, 18, 20, 10, 3, 18, 1, 98, 10, 3, 18, 1, 99, 10, 3, 18, 1, 104, 10, 3, 18, 1, 119, 90, 15, 10, 1, 115, 18, 10, 10, 8, 8, 7, 18, 4, 10, 2, 8, 4, 98, 31, 10, 1, 121, 18, 26, 10, 24, 8, 1, 18, 20, 10, 3, 18, 1, 98, 10, 3, 18, 1, 99, 10, 3, 18, 1, 104, 10, 3, 18, 1, 119, 66, 2, 16, 20],
this.session_options,
'y',
);
}
return this._bicubic_interpolate_4d;
}
static get matmul() {
if (!this._matmul) {
this._matmul = wrap(
[8, 9, 18, 0, 58, 55, 10, 17, 10, 1, 97, 10, 1, 98, 18, 1, 99, 34, 6, 77, 97, 116, 77, 117, 108, 18, 1, 114, 90, 9, 10, 1, 97, 18, 4, 10, 2, 8, 1, 90, 9, 10, 1, 98, 18, 4, 10, 2, 8, 1, 98, 9, 10, 1, 99, 18, 4, 10, 2, 8, 1, 66, 2, 16, 20],
this.session_options,
'c',
);
}
return this._matmul;
}
static get stft() {
if (!this._stft) {
this._stft = wrap(
[8, 7, 18, 0, 58, 148, 1, 10, 38, 10, 1, 115, 10, 1, 106, 10, 1, 119, 10, 1, 108, 18, 1, 111, 34, 4, 83, 84, 70, 84, 42, 15, 10, 8, 111, 110, 101, 115, 105, 100, 101, 100, 24, 1, 160, 1, 2, 18, 1, 115, 90, 26, 10, 1, 115, 18, 21, 10, 19, 8, 1, 18, 15, 10, 3, 18, 1, 98, 10, 3, 18, 1, 115, 10, 3, 18, 1, 99, 90, 11, 10, 1, 106, 18, 6, 10, 4, 8, 7, 18, 0, 90, 16, 10, 1, 119, 18, 11, 10, 9, 8, 1, 18, 5, 10, 3, 18, 1, 119, 90, 11, 10, 1, 108, 18, 6, 10, 4, 8, 7, 18, 0, 98, 31, 10, 1, 111, 18, 26, 10, 24, 8, 1, 18, 20, 10, 3, 18, 1, 98, 10, 3, 18, 1, 102, 10, 3, 18, 1, 100, 10, 3, 18, 1, 99, 66, 2, 16, 17],
this.session_options,
'o',
)
}
return this._stft;
}
static get rfft() {
if (!this._rfft) {
this._rfft = wrap(
[8, 9, 18, 0, 58, 97, 10, 33, 10, 1, 120, 10, 0, 10, 1, 97, 18, 1, 121, 34, 3, 68, 70, 84, 42, 15, 10, 8, 111, 110, 101, 115, 105, 100, 101, 100, 24, 1, 160, 1, 2, 18, 1, 100, 90, 21, 10, 1, 120, 18, 16, 10, 14, 8, 1, 18, 10, 10, 3, 18, 1, 115, 10, 3, 18, 1, 99, 90, 11, 10, 1, 97, 18, 6, 10, 4, 8, 7, 18, 0, 98, 21, 10, 1, 121, 18, 16, 10, 14, 8, 1, 18, 10, 10, 3, 18, 1, 115, 10, 3, 18, 1, 99, 66, 2, 16, 20],
this.session_options,
'y',
)
}
return this._rfft;
}
static get top_k() {
if (!this._top_k) {
this._top_k = wrap(
[8, 10, 18, 0, 58, 73, 10, 18, 10, 1, 120, 10, 1, 107, 18, 1, 118, 18, 1, 105, 34, 4, 84, 111, 112, 75, 18, 1, 116, 90, 9, 10, 1, 120, 18, 4, 10, 2, 8, 1, 90, 15, 10, 1, 107, 18, 10, 10, 8, 8, 7, 18, 4, 10, 2, 8, 1, 98, 9, 10, 1, 118, 18, 4, 10, 2, 8, 1, 98, 9, 10, 1, 105, 18, 4, 10, 2, 8, 7, 66, 2, 16, 21],
this.session_options,
[ /* Values */ 'v', /* Indices */ 'i']
)
}
return this._top_k;
}
static get slice() {
if (!this._slice) {
this._slice = wrap(
[8, 7, 18, 0, 58, 96, 10, 25, 10, 1, 120, 10, 1, 115, 10, 1, 101, 10, 1, 97, 10, 1, 116, 18, 1, 121, 34, 5, 83, 108, 105, 99, 101, 18, 1, 114, 90, 9, 10, 1, 120, 18, 4, 10, 2, 8, 1, 90, 9, 10, 1, 115, 18, 4, 10, 2, 8, 7, 90, 9, 10, 1, 101, 18, 4, 10, 2, 8, 7, 90, 9, 10, 1, 97, 18, 4, 10, 2, 8, 7, 90, 9, 10, 1, 116, 18, 4, 10, 2, 8, 7, 98, 9, 10, 1, 121, 18, 4, 10, 2, 8, 1, 66, 2, 16, 13],
this.session_options,
'y',
)
}
return this._slice;
}
}
| transformers.js/src/ops/registry.js/0 | {
"file_path": "transformers.js/src/ops/registry.js",
"repo_id": "transformers.js",
"token_count": 3856
} |
import { spawnSync } from "child_process";
const MODULE_NAME = "@huggingface/transformers";
const CODE_BODY = `
const model_id = "hf-internal-testing/tiny-random-LlamaForCausalLM";
const generator = await pipeline("text-generation", model_id, { dtype: "fp32" });
const result = await generator("hello", { max_new_tokens: 3, return_full_text: false });
process.stdout.write(result[0].generated_text);
`;
const TARGET_OUTPUT = "erdingsAndroid Load";
const wrap_async_iife = (code) => `(async function() { ${code} })();`;
const check = (code, module = false) => {
const args = ["-e", code];
if (module) args.push("--input-type=module");
const { status, stdout, stderr } = spawnSync("node", args);
expect(stderr.toString()).toBe(""); // No warnings or errors are printed
expect(stdout.toString()).toBe(TARGET_OUTPUT); // The output should match
expect(status).toBe(0); // The process should exit cleanly
};
describe("Testing the bundle", () => {
it("ECMAScript Module (ESM)", () => {
check(`import { pipeline } from "${MODULE_NAME}";${CODE_BODY}`, true);
});
it("CommonJS (CJS) with require", () => {
check(`const { pipeline } = require("${MODULE_NAME}");${wrap_async_iife(CODE_BODY)}`);
});
it("CommonJS (CJS) with dynamic import", () => {
check(`${wrap_async_iife(`const { pipeline } = await import("${MODULE_NAME}");${CODE_BODY}`)}`);
});
});
| transformers.js/tests/bundles.test.js/0 | {
"file_path": "transformers.js/tests/bundles.test.js",
"repo_id": "transformers.js",
"token_count": 494
} |
import { AutoImageProcessor, CLIPFeatureExtractor } from "../../../src/transformers.js";
import { load_cached_image } from "../../asset_cache.js";
import { MAX_PROCESSOR_LOAD_TIME, MAX_TEST_EXECUTION_TIME } from "../../init.js";
export default () => {
// CLIPFeatureExtractor
// - tests center crop (do_center_crop=true, crop_size=224)
describe("CLIPFeatureExtractor", () => {
const model_id = "Xenova/clip-vit-base-patch16";
/** @type {CLIPFeatureExtractor} */
let processor;
beforeAll(async () => {
processor = await AutoImageProcessor.from_pretrained(model_id);
}, MAX_PROCESSOR_LOAD_TIME);
it(
"default",
async () => {
const image = await load_cached_image("tiger");
const { pixel_values, original_sizes, reshaped_input_sizes } = await processor(image);
expect(pixel_values.dims).toEqual([1, 3, 224, 224]);
expect(pixel_values.mean().item()).toBeCloseTo(-0.06678297738282096, 6);
expect(original_sizes).toEqual([[408, 612]]);
expect(reshaped_input_sizes).toEqual([[224, 224]]);
},
MAX_TEST_EXECUTION_TIME,
);
});
};
| transformers.js/tests/models/clip/test_image_processing_clip.js/0 | {
"file_path": "transformers.js/tests/models/clip/test_image_processing_clip.js",
"repo_id": "transformers.js",
"token_count": 464
} |
import { AutoImageProcessor, Idefics3ImageProcessor } from "../../../src/transformers.js";
import { load_cached_image } from "../../asset_cache.js";
import { MAX_PROCESSOR_LOAD_TIME, MAX_TEST_EXECUTION_TIME } from "../../init.js";
export default () => {
// Idefics3ImageProcessor
// - custom image processing (patching)
describe("Idefics3ImageProcessor", () => {
const model_id = "hf-internal-testing/tiny-random-Idefics3ForConditionalGeneration";
/** @type {Record<string, import('../../../src/utils/image.js').RawImage>} */
const images = {};
/** @type {Idefics3ImageProcessor} */
let processor;
beforeAll(async () => {
processor = await AutoImageProcessor.from_pretrained(model_id);
// Load images
const image = await load_cached_image("gradient_1280x640");
const white_image = await load_cached_image("white_image");
images.image = image;
images.image_1 = await image.resize(1600, 1067);
images.image_2 = await image.resize(224, 224);
images.white_image = white_image;
images.white_image_1 = await white_image.resize(1600, 1067);
images.white_image_2 = await white_image.resize(224, 224);
}, MAX_PROCESSOR_LOAD_TIME);
it(
"no image splitting",
async () => {
const { pixel_values, rows, cols } = await processor(images.image, { do_image_splitting: false, return_row_col_info: true });
expect(pixel_values.dims).toEqual([1, 1, 3, 364, 364]);
expect(pixel_values.mean().item()).toBeCloseTo(-0.001035306602716446, 2);
expect(rows).toEqual([[0]]);
expect(cols).toEqual([[0]]);
},
MAX_TEST_EXECUTION_TIME,
);
it(
"batched no image splitting",
async () => {
const { pixel_values, pixel_attention_mask, rows, cols } = await processor([[images.white_image_1], [images.white_image_2], [images.white_image_1, images.white_image_2]], { do_image_splitting: false, return_row_col_info: true });
expect(pixel_values.dims).toEqual([3, 2, 3, 364, 364]);
expect(pixel_values.mean().item()).toBeCloseTo(2 / 3, 2);
expect(pixel_attention_mask.dims).toEqual([3, 2, 364, 364]);
expect(pixel_attention_mask.to("float32").mean().item()).toBeCloseTo(2 / 3, 3);
expect(rows).toEqual([[0], [0], [0, 0]]);
expect(cols).toEqual([[0], [0], [0, 0]]);
// Test that the order of the pixel attention mask matches the python implementation
expect(pixel_attention_mask.data.reduce((a, b, i) => a + i * b, 0)).toEqual(228217205216);
},
MAX_TEST_EXECUTION_TIME,
);
it(
"correct patching",
async () => {
const { pixel_values, rows, cols } = await processor(images.image, { return_row_col_info: true });
expect(pixel_values.dims).toEqual([1, 9, 3, 364, 364]);
expect(pixel_values.flatten(2).mean(2).tolist()).toBeCloseToNested([[-0.7012196183204651, -0.30104631185531616, 0.09912905097007751, 0.49929487705230713, -0.5011996626853943, -0.10103467106819153, 0.2991456389427185, 0.6993265151977539, -0.0010353063698858023]], 1);
expect(rows).toEqual([[2]]);
expect(cols).toEqual([[4]]);
},
MAX_TEST_EXECUTION_TIME,
);
it(
"unbatched, single image",
async () => {
const { pixel_values, rows, cols } = await processor(images.image_1, { return_row_col_info: true });
expect(pixel_values.dims).toEqual([1, 13, 3, 364, 364]);
expect(rows).toEqual([[3]]);
expect(cols).toEqual([[4]]);
},
MAX_TEST_EXECUTION_TIME,
);
it(
"unbatched, multiple images",
async () => {
const { pixel_values, rows, cols } = await processor([images.image_1, images.image_2], { return_row_col_info: true });
expect(pixel_values.dims).toEqual([1, 30, 3, 364, 364]);
expect(rows).toEqual([[3, 4]]);
expect(cols).toEqual([[4, 4]]);
},
MAX_TEST_EXECUTION_TIME,
);
it(
"batched, multiple images",
async () => {
const { pixel_values, rows, cols } = await processor([[images.image_1], [images.image_1, images.image_2]], { return_row_col_info: true });
expect(pixel_values.dims).toEqual([2, 30, 3, 364, 364]);
expect(rows).toEqual([[3], [3, 4]]);
expect(cols).toEqual([[4], [4, 4]]);
},
MAX_TEST_EXECUTION_TIME,
);
});
};
| transformers.js/tests/models/idefics3/test_image_processing_idefics3.js/0 | {
"file_path": "transformers.js/tests/models/idefics3/test_image_processing_idefics3.js",
"repo_id": "transformers.js",
"token_count": 1918
} |
import { Wav2Vec2Processor, MoonshineForConditionalGeneration, full, ones } from "../../../src/transformers.js";
import { MAX_MODEL_LOAD_TIME, MAX_TEST_EXECUTION_TIME, MAX_MODEL_DISPOSE_TIME, DEFAULT_MODEL_OPTIONS } from "../../init.js";
export default () => {
describe("MoonshineForConditionalGeneration", () => {
const model_id = "hf-internal-testing/tiny-random-MoonshineForConditionalGeneration";
/** @type {MoonshineForConditionalGeneration} */
let model;
/** @type {Wav2Vec2Processor} */
let processor;
beforeAll(async () => {
model = await MoonshineForConditionalGeneration.from_pretrained(model_id, DEFAULT_MODEL_OPTIONS);
processor = await Wav2Vec2Processor.from_pretrained(model_id);
}, MAX_MODEL_LOAD_TIME);
const input_values = new Float32Array(16000);
it(
"forward",
async () => {
const inputs = await processor(input_values);
const { logits } = await model({
...inputs,
decoder_input_ids: ones([1, 1]),
});
expect(logits.dims).toEqual([1, 1, 32768]);
expect(logits.mean().item()).toBeCloseTo(0.016709428280591965, 6);
},
MAX_TEST_EXECUTION_TIME,
);
it(
"batch_size=1",
async () => {
const inputs = await processor(input_values);
const generate_ids = await model.generate({ ...inputs, max_new_tokens: 3 });
const new_tokens = generate_ids;
expect(new_tokens.tolist()).toEqual([[/* Decoder start token */ 1n, /* Generated */ 6891n, 21892n, 14850n]]);
},
MAX_TEST_EXECUTION_TIME,
);
afterAll(async () => {
await model?.dispose();
}, MAX_MODEL_DISPOSE_TIME);
});
};
| transformers.js/tests/models/moonshine/test_modeling_moonshine.js/0 | {
"file_path": "transformers.js/tests/models/moonshine/test_modeling_moonshine.js",
"repo_id": "transformers.js",
"token_count": 724
} |
import { AutoProcessor, AutoModelForAudioFrameClassification } from "../../../src/transformers.js";
import { MAX_TEST_EXECUTION_TIME, DEFAULT_MODEL_OPTIONS } from "../../init.js";
import { compare } from "../../test_utils.js";
export default () => {
const models_to_test = ["onnx-community/pyannote-segmentation-3.0"];
let audio;
beforeAll(async () => {
const url = "https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/mlk.npy";
const buffer = await (await fetch(url)).arrayBuffer();
audio = Float32Array.from(new Float64Array(buffer));
});
it(
`PyAnnoteForAudioFrameClassification`,
async () => {
const model_id = models_to_test[0];
// Load model and processor
const model = await AutoModelForAudioFrameClassification.from_pretrained(model_id, DEFAULT_MODEL_OPTIONS);
const processor = await AutoProcessor.from_pretrained(model_id);
// Check processor config
expect(processor.sampling_rate).toEqual(16000);
// Preprocess audio
const inputs = await processor(audio);
// Run model with inputs
const { logits } = await model(inputs);
compare(logits.dims, [1, 767, 7]);
compare(logits.mean().item(), -4.822614669799805, 6);
const result = processor.post_process_speaker_diarization(logits, audio.length);
const target = [
[
{ id: 0, start: 0, end: 1.0512535626298245, confidence: 0.7898106738171984 },
{ id: 2, start: 1.0512535626298245, end: 2.373798367228636, confidence: 0.8923380609065887 },
{ id: 0, start: 2.373798367228636, end: 3.5776532534660155, confidence: 0.6920057005438546 },
{ id: 2, start: 3.5776532534660155, end: 4.578039708226655, confidence: 0.8169249580865657 },
{ id: 3, start: 4.578039708226655, end: 6.2396985652867, confidence: 0.6921662061495533 },
{ id: 2, start: 6.2396985652867, end: 8.664364040384521, confidence: 0.705263573835628 },
{ id: 0, start: 8.664364040384521, end: 10.071687358098641, confidence: 0.6650650397924295 },
{ id: 2, start: 10.071687358098641, end: 12.598087048934833, confidence: 0.8999033333468749 },
{ id: 0, start: 12.598087048934833, end: 13.005023911888312, confidence: 0.37838892004965197 },
],
];
compare(result, target);
await model.dispose();
},
MAX_TEST_EXECUTION_TIME,
);
};
| transformers.js/tests/models/pyannote/test_modeling_pyannote.js/0 | {
"file_path": "transformers.js/tests/models/pyannote/test_modeling_pyannote.js",
"repo_id": "transformers.js",
"token_count": 1018
} |
import { AutoImageProcessor, rand, Tensor, VitPoseImageProcessor } from "../../../src/transformers.js";
import { load_cached_image } from "../../asset_cache.js";
import { MAX_PROCESSOR_LOAD_TIME, MAX_TEST_EXECUTION_TIME } from "../../init.js";
export default () => {
describe("VitPoseImageProcessor", () => {
const model_id = "onnx-community/vitpose-base-simple";
/** @type {VitPoseImageProcessor} */
let processor;
beforeAll(async () => {
processor = await AutoImageProcessor.from_pretrained(model_id);
}, MAX_PROCESSOR_LOAD_TIME);
it(
"default",
async () => {
const image = await load_cached_image("tiger");
const { pixel_values, original_sizes, reshaped_input_sizes } = await processor(image);
expect(pixel_values.dims).toEqual([1, 3, 256, 192]);
expect(pixel_values.mean().item()).toBeCloseTo(-0.2771204710006714, 6);
expect(original_sizes).toEqual([[408, 612]]);
expect(reshaped_input_sizes).toEqual([[256, 192]]);
},
MAX_TEST_EXECUTION_TIME,
);
it(
"post_process_pose_estimation",
async () => {
const num_classes = 17;
const size = [0, 0, 1000, 1500];
const heatmaps = rand([1, num_classes, 64, 48]);
const boxes = [[size]];
const { bbox, scores, labels, keypoints } = processor.post_process_pose_estimation(heatmaps, boxes, { threshold: null })[0][0];
expect(bbox).toEqual(size);
expect(scores).toHaveLength(num_classes);
expect(labels).toHaveLength(num_classes);
expect(keypoints).toHaveLength(num_classes);
expect(keypoints[0]).toHaveLength(2);
},
MAX_TEST_EXECUTION_TIME,
);
});
};
| transformers.js/tests/models/vitpose/test_image_processing_vitpose.js/0 | {
"file_path": "transformers.js/tests/models/vitpose/test_image_processing_vitpose.js",
"repo_id": "transformers.js",
"token_count": 725
} |
import { pipeline, ImageClassificationPipeline } from "../../src/transformers.js";
import { MAX_MODEL_LOAD_TIME, MAX_TEST_EXECUTION_TIME, MAX_MODEL_DISPOSE_TIME, DEFAULT_MODEL_OPTIONS } from "../init.js";
import { load_cached_image } from "../asset_cache.js";
const PIPELINE_ID = "image-classification";
export default () => {
describe("Image Classification", () => {
const model_id = "hf-internal-testing/tiny-random-vit";
/** @type {ImageClassificationPipeline} */
let pipe;
let images;
beforeAll(async () => {
pipe = await pipeline(PIPELINE_ID, model_id, DEFAULT_MODEL_OPTIONS);
images = await Promise.all([load_cached_image("white_image"), load_cached_image("blue_image")]);
}, MAX_MODEL_LOAD_TIME);
it("should be an instance of ImageClassificationPipeline", () => {
expect(pipe).toBeInstanceOf(ImageClassificationPipeline);
});
describe("batch_size=1", () => {
it(
"default (top_k=5)",
async () => {
const output = await pipe(images[0]);
const target = [
{ label: "LABEL_1", score: 0.5020533800125122 },
{ label: "LABEL_0", score: 0.4979466497898102 },
];
expect(output).toBeCloseToNested(target, 5);
},
MAX_TEST_EXECUTION_TIME,
);
it(
"custom (top_k=1)",
async () => {
const output = await pipe(images[0], { top_k: 1 });
const target = [{ label: "LABEL_1", score: 0.5020533800125122 }];
expect(output).toBeCloseToNested(target, 5);
},
MAX_TEST_EXECUTION_TIME,
);
});
describe("batch_size>1", () => {
it(
"default (top_k=5)",
async () => {
const output = await pipe(images);
const target = [
[
{ label: "LABEL_1", score: 0.5020533800125122 },
{ label: "LABEL_0", score: 0.4979466497898102 },
],
[
{ label: "LABEL_1", score: 0.519227921962738 },
{ label: "LABEL_0", score: 0.4807720482349396 },
],
];
expect(output).toBeCloseToNested(target, 5);
},
MAX_TEST_EXECUTION_TIME,
);
it(
"custom (top_k=1)",
async () => {
const output = await pipe(images, { top_k: 1 });
const target = [[{ label: "LABEL_1", score: 0.5020533800125122 }], [{ label: "LABEL_1", score: 0.519227921962738 }]];
expect(output).toBeCloseToNested(target, 5);
},
MAX_TEST_EXECUTION_TIME,
);
});
afterAll(async () => {
await pipe.dispose();
}, MAX_MODEL_DISPOSE_TIME);
});
};
| transformers.js/tests/pipelines/test_pipelines_image_classification.js/0 | {
"file_path": "transformers.js/tests/pipelines/test_pipelines_image_classification.js",
"repo_id": "transformers.js",
"token_count": 1283
} |
import { pipeline, ZeroShotImageClassificationPipeline } from "../../src/transformers.js";
import { MAX_MODEL_LOAD_TIME, MAX_TEST_EXECUTION_TIME, MAX_MODEL_DISPOSE_TIME, DEFAULT_MODEL_OPTIONS } from "../init.js";
import { load_cached_image } from "../asset_cache.js";
const PIPELINE_ID = "zero-shot-image-classification";
export default () => {
describe("Zero-shot Image Classification", () => {
const model_id = "hf-internal-testing/tiny-random-GroupViTModel";
// Example adapted from https://huggingface.co/docs/transformers/en/model_doc/groupvit
const labels = ["cat", "dog"];
const hypothesis_template = "a photo of a {}";
/** @type {ZeroShotImageClassificationPipeline} */
let pipe;
let images;
beforeAll(async () => {
pipe = await pipeline(PIPELINE_ID, model_id, DEFAULT_MODEL_OPTIONS);
images = await Promise.all([load_cached_image("white_image"), load_cached_image("blue_image")]);
}, MAX_MODEL_LOAD_TIME);
it("should be an instance of ZeroShotImageClassificationPipeline", () => {
expect(pipe).toBeInstanceOf(ZeroShotImageClassificationPipeline);
});
describe("batch_size=1", () => {
it(
"default",
async () => {
const output = await pipe(images[0], labels);
const target = [
{ score: 0.5990662574768066, label: "cat" },
{ score: 0.40093377232551575, label: "dog" },
];
expect(output).toBeCloseToNested(target, 5);
},
MAX_TEST_EXECUTION_TIME,
);
it(
"custom (w/ hypothesis_template)",
async () => {
const output = await pipe(images[0], labels, { hypothesis_template });
const target = [
{ score: 0.5527022480964661, label: "cat" },
{ score: 0.44729775190353394, label: "dog" },
];
expect(output).toBeCloseToNested(target, 5);
},
MAX_TEST_EXECUTION_TIME,
);
});
describe("batch_size>1", () => {
it(
"default",
async () => {
const output = await pipe(images, labels);
const target = [
[
{ score: 0.5990662574768066, label: "cat" },
{ score: 0.40093377232551575, label: "dog" },
],
[
{ score: 0.5006340146064758, label: "dog" },
{ score: 0.49936598539352417, label: "cat" },
],
];
expect(output).toBeCloseToNested(target, 5);
},
MAX_TEST_EXECUTION_TIME,
);
it(
"custom (w/ hypothesis_template)",
async () => {
const output = await pipe(images, labels, { hypothesis_template });
const target = [
[
{ score: 0.5527022480964661, label: "cat" },
{ score: 0.44729775190353394, label: "dog" },
],
[
{ score: 0.5395973324775696, label: "cat" },
{ score: 0.46040263772010803, label: "dog" },
],
];
expect(output).toBeCloseToNested(target, 5);
},
MAX_TEST_EXECUTION_TIME,
);
});
afterAll(async () => {
await pipe.dispose();
}, MAX_MODEL_DISPOSE_TIME);
});
};
| transformers.js/tests/pipelines/test_pipelines_zero_shot_image_classification.js/0 | {
"file_path": "transformers.js/tests/pipelines/test_pipelines_zero_shot_image_classification.js",
"repo_id": "transformers.js",
"token_count": 1549
} |
cff-version: "1.2.0"
date-released: 2020-10
message: "If you use this software, please cite it using these metadata."
title: "Transformers: State-of-the-Art Natural Language Processing"
url: "https://github.com/huggingface/transformers"
authors:
- family-names: Wolf
given-names: Thomas
- family-names: Debut
given-names: Lysandre
- family-names: Sanh
given-names: Victor
- family-names: Chaumond
given-names: Julien
- family-names: Delangue
given-names: Clement
- family-names: Moi
given-names: Anthony
- family-names: Cistac
given-names: Perric
- family-names: Ma
given-names: Clara
- family-names: Jernite
given-names: Yacine
- family-names: Plu
given-names: Julien
- family-names: Xu
given-names: Canwen
- family-names: "Le Scao"
given-names: Teven
- family-names: Gugger
given-names: Sylvain
- family-names: Drame
given-names: Mariama
- family-names: Lhoest
given-names: Quentin
- family-names: Rush
given-names: "Alexander M."
preferred-citation:
type: conference-paper
authors:
- family-names: Wolf
given-names: Thomas
- family-names: Debut
given-names: Lysandre
- family-names: Sanh
given-names: Victor
- family-names: Chaumond
given-names: Julien
- family-names: Delangue
given-names: Clement
- family-names: Moi
given-names: Anthony
- family-names: Cistac
given-names: Perric
- family-names: Ma
given-names: Clara
- family-names: Jernite
given-names: Yacine
- family-names: Plu
given-names: Julien
- family-names: Xu
given-names: Canwen
- family-names: "Le Scao"
given-names: Teven
- family-names: Gugger
given-names: Sylvain
- family-names: Drame
given-names: Mariama
- family-names: Lhoest
given-names: Quentin
- family-names: Rush
given-names: "Alexander M."
booktitle: "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations"
month: 10
start: 38
end: 45
title: "Transformers: State-of-the-Art Natural Language Processing"
year: 2020
publisher: "Association for Computational Linguistics"
url: "https://www.aclweb.org/anthology/2020.emnlp-demos.6"
address: "Online"
| transformers/CITATION.cff/0 | {
"file_path": "transformers/CITATION.cff",
"repo_id": "transformers",
"token_count": 824
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.